Merge branch 'stable/for-jens-3.8' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / drivers / scsi / lpfc / lpfc_sli.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/slab.h>
27
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_cmnd.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
33 #include <scsi/fc/fc_fs.h>
34 #include <linux/aer.h>
35
36 #include "lpfc_hw4.h"
37 #include "lpfc_hw.h"
38 #include "lpfc_sli.h"
39 #include "lpfc_sli4.h"
40 #include "lpfc_nl.h"
41 #include "lpfc_disc.h"
42 #include "lpfc_scsi.h"
43 #include "lpfc.h"
44 #include "lpfc_crtn.h"
45 #include "lpfc_logmsg.h"
46 #include "lpfc_compat.h"
47 #include "lpfc_debugfs.h"
48 #include "lpfc_vport.h"
49
50 /* There are only four IOCB completion types. */
51 typedef enum _lpfc_iocb_type {
52         LPFC_UNKNOWN_IOCB,
53         LPFC_UNSOL_IOCB,
54         LPFC_SOL_IOCB,
55         LPFC_ABORT_IOCB
56 } lpfc_iocb_type;
57
58
59 /* Provide function prototypes local to this module. */
60 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
61                                   uint32_t);
62 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
63                               uint8_t *, uint32_t *);
64 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
65                                                          struct lpfc_iocbq *);
66 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
67                                       struct hbq_dmabuf *);
68 static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
69                                     struct lpfc_cqe *);
70 static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
71                                        int);
72 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
73                         uint32_t);
74
75 static IOCB_t *
76 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
77 {
78         return &iocbq->iocb;
79 }
80
81 /**
82  * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
83  * @q: The Work Queue to operate on.
84  * @wqe: The work Queue Entry to put on the Work queue.
85  *
86  * This routine will copy the contents of @wqe to the next available entry on
87  * the @q. This function will then ring the Work Queue Doorbell to signal the
88  * HBA to start processing the Work Queue Entry. This function returns 0 if
89  * successful. If no entries are available on @q then this function will return
90  * -ENOMEM.
91  * The caller is expected to hold the hbalock when calling this routine.
92  **/
93 static uint32_t
94 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
95 {
96         union lpfc_wqe *temp_wqe;
97         struct lpfc_register doorbell;
98         uint32_t host_index;
99         uint32_t idx;
100
101         /* sanity check on queue memory */
102         if (unlikely(!q))
103                 return -ENOMEM;
104         temp_wqe = q->qe[q->host_index].wqe;
105
106         /* If the host has not yet processed the next entry then we are done */
107         idx = ((q->host_index + 1) % q->entry_count);
108         if (idx == q->hba_index) {
109                 q->WQ_overflow++;
110                 return -ENOMEM;
111         }
112         q->WQ_posted++;
113         /* set consumption flag every once in a while */
114         if (!((q->host_index + 1) % q->entry_repost))
115                 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
116         if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
117                 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
118         lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
119
120         /* Update the host index before invoking device */
121         host_index = q->host_index;
122
123         q->host_index = idx;
124
125         /* Ring Doorbell */
126         doorbell.word0 = 0;
127         bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
128         bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
129         bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
130         writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
131
132         return 0;
133 }
134
135 /**
136  * lpfc_sli4_wq_release - Updates internal hba index for WQ
137  * @q: The Work Queue to operate on.
138  * @index: The index to advance the hba index to.
139  *
140  * This routine will update the HBA index of a queue to reflect consumption of
141  * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
142  * an entry the host calls this function to update the queue's internal
143  * pointers. This routine returns the number of entries that were consumed by
144  * the HBA.
145  **/
146 static uint32_t
147 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
148 {
149         uint32_t released = 0;
150
151         /* sanity check on queue memory */
152         if (unlikely(!q))
153                 return 0;
154
155         if (q->hba_index == index)
156                 return 0;
157         do {
158                 q->hba_index = ((q->hba_index + 1) % q->entry_count);
159                 released++;
160         } while (q->hba_index != index);
161         return released;
162 }
163
164 /**
165  * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
166  * @q: The Mailbox Queue to operate on.
167  * @wqe: The Mailbox Queue Entry to put on the Work queue.
168  *
169  * This routine will copy the contents of @mqe to the next available entry on
170  * the @q. This function will then ring the Work Queue Doorbell to signal the
171  * HBA to start processing the Work Queue Entry. This function returns 0 if
172  * successful. If no entries are available on @q then this function will return
173  * -ENOMEM.
174  * The caller is expected to hold the hbalock when calling this routine.
175  **/
176 static uint32_t
177 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
178 {
179         struct lpfc_mqe *temp_mqe;
180         struct lpfc_register doorbell;
181         uint32_t host_index;
182
183         /* sanity check on queue memory */
184         if (unlikely(!q))
185                 return -ENOMEM;
186         temp_mqe = q->qe[q->host_index].mqe;
187
188         /* If the host has not yet processed the next entry then we are done */
189         if (((q->host_index + 1) % q->entry_count) == q->hba_index)
190                 return -ENOMEM;
191         lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
192         /* Save off the mailbox pointer for completion */
193         q->phba->mbox = (MAILBOX_t *)temp_mqe;
194
195         /* Update the host index before invoking device */
196         host_index = q->host_index;
197         q->host_index = ((q->host_index + 1) % q->entry_count);
198
199         /* Ring Doorbell */
200         doorbell.word0 = 0;
201         bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
202         bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
203         writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
204         return 0;
205 }
206
207 /**
208  * lpfc_sli4_mq_release - Updates internal hba index for MQ
209  * @q: The Mailbox Queue to operate on.
210  *
211  * This routine will update the HBA index of a queue to reflect consumption of
212  * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
213  * an entry the host calls this function to update the queue's internal
214  * pointers. This routine returns the number of entries that were consumed by
215  * the HBA.
216  **/
217 static uint32_t
218 lpfc_sli4_mq_release(struct lpfc_queue *q)
219 {
220         /* sanity check on queue memory */
221         if (unlikely(!q))
222                 return 0;
223
224         /* Clear the mailbox pointer for completion */
225         q->phba->mbox = NULL;
226         q->hba_index = ((q->hba_index + 1) % q->entry_count);
227         return 1;
228 }
229
230 /**
231  * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
232  * @q: The Event Queue to get the first valid EQE from
233  *
234  * This routine will get the first valid Event Queue Entry from @q, update
235  * the queue's internal hba index, and return the EQE. If no valid EQEs are in
236  * the Queue (no more work to do), or the Queue is full of EQEs that have been
237  * processed, but not popped back to the HBA then this routine will return NULL.
238  **/
239 static struct lpfc_eqe *
240 lpfc_sli4_eq_get(struct lpfc_queue *q)
241 {
242         struct lpfc_eqe *eqe;
243         uint32_t idx;
244
245         /* sanity check on queue memory */
246         if (unlikely(!q))
247                 return NULL;
248         eqe = q->qe[q->hba_index].eqe;
249
250         /* If the next EQE is not valid then we are done */
251         if (!bf_get_le32(lpfc_eqe_valid, eqe))
252                 return NULL;
253         /* If the host has not yet processed the next entry then we are done */
254         idx = ((q->hba_index + 1) % q->entry_count);
255         if (idx == q->host_index)
256                 return NULL;
257
258         q->hba_index = idx;
259         return eqe;
260 }
261
262 /**
263  * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
264  * @q: The Event Queue to disable interrupts
265  *
266  **/
267 static inline void
268 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
269 {
270         struct lpfc_register doorbell;
271
272         doorbell.word0 = 0;
273         bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
274         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
275         bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
276                 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
277         bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
278         writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
279 }
280
281 /**
282  * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
283  * @q: The Event Queue that the host has completed processing for.
284  * @arm: Indicates whether the host wants to arms this CQ.
285  *
286  * This routine will mark all Event Queue Entries on @q, from the last
287  * known completed entry to the last entry that was processed, as completed
288  * by clearing the valid bit for each completion queue entry. Then it will
289  * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
290  * The internal host index in the @q will be updated by this routine to indicate
291  * that the host has finished processing the entries. The @arm parameter
292  * indicates that the queue should be rearmed when ringing the doorbell.
293  *
294  * This function will return the number of EQEs that were popped.
295  **/
296 uint32_t
297 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
298 {
299         uint32_t released = 0;
300         struct lpfc_eqe *temp_eqe;
301         struct lpfc_register doorbell;
302
303         /* sanity check on queue memory */
304         if (unlikely(!q))
305                 return 0;
306
307         /* while there are valid entries */
308         while (q->hba_index != q->host_index) {
309                 temp_eqe = q->qe[q->host_index].eqe;
310                 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
311                 released++;
312                 q->host_index = ((q->host_index + 1) % q->entry_count);
313         }
314         if (unlikely(released == 0 && !arm))
315                 return 0;
316
317         /* ring doorbell for number popped */
318         doorbell.word0 = 0;
319         if (arm) {
320                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
321                 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
322         }
323         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
324         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
325         bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
326                         (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
327         bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
328         writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
329         /* PCI read to flush PCI pipeline on re-arming for INTx mode */
330         if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
331                 readl(q->phba->sli4_hba.EQCQDBregaddr);
332         return released;
333 }
334
335 /**
336  * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
337  * @q: The Completion Queue to get the first valid CQE from
338  *
339  * This routine will get the first valid Completion Queue Entry from @q, update
340  * the queue's internal hba index, and return the CQE. If no valid CQEs are in
341  * the Queue (no more work to do), or the Queue is full of CQEs that have been
342  * processed, but not popped back to the HBA then this routine will return NULL.
343  **/
344 static struct lpfc_cqe *
345 lpfc_sli4_cq_get(struct lpfc_queue *q)
346 {
347         struct lpfc_cqe *cqe;
348         uint32_t idx;
349
350         /* sanity check on queue memory */
351         if (unlikely(!q))
352                 return NULL;
353
354         /* If the next CQE is not valid then we are done */
355         if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
356                 return NULL;
357         /* If the host has not yet processed the next entry then we are done */
358         idx = ((q->hba_index + 1) % q->entry_count);
359         if (idx == q->host_index)
360                 return NULL;
361
362         cqe = q->qe[q->hba_index].cqe;
363         q->hba_index = idx;
364         return cqe;
365 }
366
367 /**
368  * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
369  * @q: The Completion Queue that the host has completed processing for.
370  * @arm: Indicates whether the host wants to arms this CQ.
371  *
372  * This routine will mark all Completion queue entries on @q, from the last
373  * known completed entry to the last entry that was processed, as completed
374  * by clearing the valid bit for each completion queue entry. Then it will
375  * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
376  * The internal host index in the @q will be updated by this routine to indicate
377  * that the host has finished processing the entries. The @arm parameter
378  * indicates that the queue should be rearmed when ringing the doorbell.
379  *
380  * This function will return the number of CQEs that were released.
381  **/
382 uint32_t
383 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
384 {
385         uint32_t released = 0;
386         struct lpfc_cqe *temp_qe;
387         struct lpfc_register doorbell;
388
389         /* sanity check on queue memory */
390         if (unlikely(!q))
391                 return 0;
392         /* while there are valid entries */
393         while (q->hba_index != q->host_index) {
394                 temp_qe = q->qe[q->host_index].cqe;
395                 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
396                 released++;
397                 q->host_index = ((q->host_index + 1) % q->entry_count);
398         }
399         if (unlikely(released == 0 && !arm))
400                 return 0;
401
402         /* ring doorbell for number popped */
403         doorbell.word0 = 0;
404         if (arm)
405                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
406         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
407         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
408         bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
409                         (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
410         bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
411         writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
412         return released;
413 }
414
415 /**
416  * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
417  * @q: The Header Receive Queue to operate on.
418  * @wqe: The Receive Queue Entry to put on the Receive queue.
419  *
420  * This routine will copy the contents of @wqe to the next available entry on
421  * the @q. This function will then ring the Receive Queue Doorbell to signal the
422  * HBA to start processing the Receive Queue Entry. This function returns the
423  * index that the rqe was copied to if successful. If no entries are available
424  * on @q then this function will return -ENOMEM.
425  * The caller is expected to hold the hbalock when calling this routine.
426  **/
427 static int
428 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
429                  struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
430 {
431         struct lpfc_rqe *temp_hrqe;
432         struct lpfc_rqe *temp_drqe;
433         struct lpfc_register doorbell;
434         int put_index = hq->host_index;
435
436         /* sanity check on queue memory */
437         if (unlikely(!hq) || unlikely(!dq))
438                 return -ENOMEM;
439         temp_hrqe = hq->qe[hq->host_index].rqe;
440         temp_drqe = dq->qe[dq->host_index].rqe;
441
442         if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
443                 return -EINVAL;
444         if (hq->host_index != dq->host_index)
445                 return -EINVAL;
446         /* If the host has not yet processed the next entry then we are done */
447         if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
448                 return -EBUSY;
449         lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
450         lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
451
452         /* Update the host index to point to the next slot */
453         hq->host_index = ((hq->host_index + 1) % hq->entry_count);
454         dq->host_index = ((dq->host_index + 1) % dq->entry_count);
455
456         /* Ring The Header Receive Queue Doorbell */
457         if (!(hq->host_index % hq->entry_repost)) {
458                 doorbell.word0 = 0;
459                 bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
460                        hq->entry_repost);
461                 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
462                 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
463         }
464         return put_index;
465 }
466
467 /**
468  * lpfc_sli4_rq_release - Updates internal hba index for RQ
469  * @q: The Header Receive Queue to operate on.
470  *
471  * This routine will update the HBA index of a queue to reflect consumption of
472  * one Receive Queue Entry by the HBA. When the HBA indicates that it has
473  * consumed an entry the host calls this function to update the queue's
474  * internal pointers. This routine returns the number of entries that were
475  * consumed by the HBA.
476  **/
477 static uint32_t
478 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
479 {
480         /* sanity check on queue memory */
481         if (unlikely(!hq) || unlikely(!dq))
482                 return 0;
483
484         if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
485                 return 0;
486         hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
487         dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
488         return 1;
489 }
490
491 /**
492  * lpfc_cmd_iocb - Get next command iocb entry in the ring
493  * @phba: Pointer to HBA context object.
494  * @pring: Pointer to driver SLI ring object.
495  *
496  * This function returns pointer to next command iocb entry
497  * in the command ring. The caller must hold hbalock to prevent
498  * other threads consume the next command iocb.
499  * SLI-2/SLI-3 provide different sized iocbs.
500  **/
501 static inline IOCB_t *
502 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
503 {
504         return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
505                            pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
506 }
507
508 /**
509  * lpfc_resp_iocb - Get next response iocb entry in the ring
510  * @phba: Pointer to HBA context object.
511  * @pring: Pointer to driver SLI ring object.
512  *
513  * This function returns pointer to next response iocb entry
514  * in the response ring. The caller must hold hbalock to make sure
515  * that no other thread consume the next response iocb.
516  * SLI-2/SLI-3 provide different sized iocbs.
517  **/
518 static inline IOCB_t *
519 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
520 {
521         return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
522                            pring->sli.sli3.rspidx * phba->iocb_rsp_size);
523 }
524
525 /**
526  * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
527  * @phba: Pointer to HBA context object.
528  *
529  * This function is called with hbalock held. This function
530  * allocates a new driver iocb object from the iocb pool. If the
531  * allocation is successful, it returns pointer to the newly
532  * allocated iocb object else it returns NULL.
533  **/
534 struct lpfc_iocbq *
535 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
536 {
537         struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
538         struct lpfc_iocbq * iocbq = NULL;
539
540         list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
541         if (iocbq)
542                 phba->iocb_cnt++;
543         if (phba->iocb_cnt > phba->iocb_max)
544                 phba->iocb_max = phba->iocb_cnt;
545         return iocbq;
546 }
547
548 /**
549  * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
550  * @phba: Pointer to HBA context object.
551  * @xritag: XRI value.
552  *
553  * This function clears the sglq pointer from the array of acive
554  * sglq's. The xritag that is passed in is used to index into the
555  * array. Before the xritag can be used it needs to be adjusted
556  * by subtracting the xribase.
557  *
558  * Returns sglq ponter = success, NULL = Failure.
559  **/
560 static struct lpfc_sglq *
561 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
562 {
563         struct lpfc_sglq *sglq;
564
565         sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
566         phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
567         return sglq;
568 }
569
570 /**
571  * __lpfc_get_active_sglq - Get the active sglq for this XRI.
572  * @phba: Pointer to HBA context object.
573  * @xritag: XRI value.
574  *
575  * This function returns the sglq pointer from the array of acive
576  * sglq's. The xritag that is passed in is used to index into the
577  * array. Before the xritag can be used it needs to be adjusted
578  * by subtracting the xribase.
579  *
580  * Returns sglq ponter = success, NULL = Failure.
581  **/
582 struct lpfc_sglq *
583 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
584 {
585         struct lpfc_sglq *sglq;
586
587         sglq =  phba->sli4_hba.lpfc_sglq_active_list[xritag];
588         return sglq;
589 }
590
591 /**
592  * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
593  * @phba: Pointer to HBA context object.
594  * @xritag: xri used in this exchange.
595  * @rrq: The RRQ to be cleared.
596  *
597  **/
598 void
599 lpfc_clr_rrq_active(struct lpfc_hba *phba,
600                     uint16_t xritag,
601                     struct lpfc_node_rrq *rrq)
602 {
603         struct lpfc_nodelist *ndlp = NULL;
604
605         if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
606                 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
607
608         /* The target DID could have been swapped (cable swap)
609          * we should use the ndlp from the findnode if it is
610          * available.
611          */
612         if ((!ndlp) && rrq->ndlp)
613                 ndlp = rrq->ndlp;
614
615         if (!ndlp)
616                 goto out;
617
618         if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
619                 rrq->send_rrq = 0;
620                 rrq->xritag = 0;
621                 rrq->rrq_stop_time = 0;
622         }
623 out:
624         mempool_free(rrq, phba->rrq_pool);
625 }
626
627 /**
628  * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
629  * @phba: Pointer to HBA context object.
630  *
631  * This function is called with hbalock held. This function
632  * Checks if stop_time (ratov from setting rrq active) has
633  * been reached, if it has and the send_rrq flag is set then
634  * it will call lpfc_send_rrq. If the send_rrq flag is not set
635  * then it will just call the routine to clear the rrq and
636  * free the rrq resource.
637  * The timer is set to the next rrq that is going to expire before
638  * leaving the routine.
639  *
640  **/
641 void
642 lpfc_handle_rrq_active(struct lpfc_hba *phba)
643 {
644         struct lpfc_node_rrq *rrq;
645         struct lpfc_node_rrq *nextrrq;
646         unsigned long next_time;
647         unsigned long iflags;
648         LIST_HEAD(send_rrq);
649
650         spin_lock_irqsave(&phba->hbalock, iflags);
651         phba->hba_flag &= ~HBA_RRQ_ACTIVE;
652         next_time = jiffies + HZ * (phba->fc_ratov + 1);
653         list_for_each_entry_safe(rrq, nextrrq,
654                                  &phba->active_rrq_list, list) {
655                 if (time_after(jiffies, rrq->rrq_stop_time))
656                         list_move(&rrq->list, &send_rrq);
657                 else if (time_before(rrq->rrq_stop_time, next_time))
658                         next_time = rrq->rrq_stop_time;
659         }
660         spin_unlock_irqrestore(&phba->hbalock, iflags);
661         if (!list_empty(&phba->active_rrq_list))
662                 mod_timer(&phba->rrq_tmr, next_time);
663         list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
664                 list_del(&rrq->list);
665                 if (!rrq->send_rrq)
666                         /* this call will free the rrq */
667                 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
668                 else if (lpfc_send_rrq(phba, rrq)) {
669                         /* if we send the rrq then the completion handler
670                         *  will clear the bit in the xribitmap.
671                         */
672                         lpfc_clr_rrq_active(phba, rrq->xritag,
673                                             rrq);
674                 }
675         }
676 }
677
678 /**
679  * lpfc_get_active_rrq - Get the active RRQ for this exchange.
680  * @vport: Pointer to vport context object.
681  * @xri: The xri used in the exchange.
682  * @did: The targets DID for this exchange.
683  *
684  * returns NULL = rrq not found in the phba->active_rrq_list.
685  *         rrq = rrq for this xri and target.
686  **/
687 struct lpfc_node_rrq *
688 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
689 {
690         struct lpfc_hba *phba = vport->phba;
691         struct lpfc_node_rrq *rrq;
692         struct lpfc_node_rrq *nextrrq;
693         unsigned long iflags;
694
695         if (phba->sli_rev != LPFC_SLI_REV4)
696                 return NULL;
697         spin_lock_irqsave(&phba->hbalock, iflags);
698         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
699                 if (rrq->vport == vport && rrq->xritag == xri &&
700                                 rrq->nlp_DID == did){
701                         list_del(&rrq->list);
702                         spin_unlock_irqrestore(&phba->hbalock, iflags);
703                         return rrq;
704                 }
705         }
706         spin_unlock_irqrestore(&phba->hbalock, iflags);
707         return NULL;
708 }
709
710 /**
711  * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
712  * @vport: Pointer to vport context object.
713  * @ndlp: Pointer to the lpfc_node_list structure.
714  * If ndlp is NULL Remove all active RRQs for this vport from the
715  * phba->active_rrq_list and clear the rrq.
716  * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
717  **/
718 void
719 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
720
721 {
722         struct lpfc_hba *phba = vport->phba;
723         struct lpfc_node_rrq *rrq;
724         struct lpfc_node_rrq *nextrrq;
725         unsigned long iflags;
726         LIST_HEAD(rrq_list);
727
728         if (phba->sli_rev != LPFC_SLI_REV4)
729                 return;
730         if (!ndlp) {
731                 lpfc_sli4_vport_delete_els_xri_aborted(vport);
732                 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
733         }
734         spin_lock_irqsave(&phba->hbalock, iflags);
735         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
736                 if ((rrq->vport == vport) && (!ndlp  || rrq->ndlp == ndlp))
737                         list_move(&rrq->list, &rrq_list);
738         spin_unlock_irqrestore(&phba->hbalock, iflags);
739
740         list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
741                 list_del(&rrq->list);
742                 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
743         }
744 }
745
746 /**
747  * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list.
748  * @phba: Pointer to HBA context object.
749  *
750  * Remove all rrqs from the phba->active_rrq_list and free them by
751  * calling __lpfc_clr_active_rrq
752  *
753  **/
754 void
755 lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
756 {
757         struct lpfc_node_rrq *rrq;
758         struct lpfc_node_rrq *nextrrq;
759         unsigned long next_time;
760         unsigned long iflags;
761         LIST_HEAD(rrq_list);
762
763         if (phba->sli_rev != LPFC_SLI_REV4)
764                 return;
765         spin_lock_irqsave(&phba->hbalock, iflags);
766         phba->hba_flag &= ~HBA_RRQ_ACTIVE;
767         next_time = jiffies + HZ * (phba->fc_ratov * 2);
768         list_splice_init(&phba->active_rrq_list, &rrq_list);
769         spin_unlock_irqrestore(&phba->hbalock, iflags);
770
771         list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
772                 list_del(&rrq->list);
773                 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
774         }
775         if (!list_empty(&phba->active_rrq_list))
776                 mod_timer(&phba->rrq_tmr, next_time);
777 }
778
779
780 /**
781  * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
782  * @phba: Pointer to HBA context object.
783  * @ndlp: Targets nodelist pointer for this exchange.
784  * @xritag the xri in the bitmap to test.
785  *
786  * This function is called with hbalock held. This function
787  * returns 0 = rrq not active for this xri
788  *         1 = rrq is valid for this xri.
789  **/
790 int
791 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
792                         uint16_t  xritag)
793 {
794         if (!ndlp)
795                 return 0;
796         if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
797                         return 1;
798         else
799                 return 0;
800 }
801
802 /**
803  * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
804  * @phba: Pointer to HBA context object.
805  * @ndlp: nodelist pointer for this target.
806  * @xritag: xri used in this exchange.
807  * @rxid: Remote Exchange ID.
808  * @send_rrq: Flag used to determine if we should send rrq els cmd.
809  *
810  * This function takes the hbalock.
811  * The active bit is always set in the active rrq xri_bitmap even
812  * if there is no slot avaiable for the other rrq information.
813  *
814  * returns 0 rrq actived for this xri
815  *         < 0 No memory or invalid ndlp.
816  **/
817 int
818 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
819                     uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
820 {
821         unsigned long iflags;
822         struct lpfc_node_rrq *rrq;
823         int empty;
824
825         if (!ndlp)
826                 return -EINVAL;
827
828         if (!phba->cfg_enable_rrq)
829                 return -EINVAL;
830
831         spin_lock_irqsave(&phba->hbalock, iflags);
832         if (phba->pport->load_flag & FC_UNLOADING) {
833                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
834                 goto out;
835         }
836
837         /*
838          * set the active bit even if there is no mem available.
839          */
840         if (NLP_CHK_FREE_REQ(ndlp))
841                 goto out;
842
843         if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
844                 goto out;
845
846         if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
847                 goto out;
848
849         spin_unlock_irqrestore(&phba->hbalock, iflags);
850         rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
851         if (!rrq) {
852                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
853                                 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
854                                 " DID:0x%x Send:%d\n",
855                                 xritag, rxid, ndlp->nlp_DID, send_rrq);
856                 return -EINVAL;
857         }
858         rrq->send_rrq = send_rrq;
859         rrq->xritag = xritag;
860         rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
861         rrq->ndlp = ndlp;
862         rrq->nlp_DID = ndlp->nlp_DID;
863         rrq->vport = ndlp->vport;
864         rrq->rxid = rxid;
865         rrq->send_rrq = send_rrq;
866         spin_lock_irqsave(&phba->hbalock, iflags);
867         empty = list_empty(&phba->active_rrq_list);
868         list_add_tail(&rrq->list, &phba->active_rrq_list);
869         phba->hba_flag |= HBA_RRQ_ACTIVE;
870         if (empty)
871                 lpfc_worker_wake_up(phba);
872         spin_unlock_irqrestore(&phba->hbalock, iflags);
873         return 0;
874 out:
875         spin_unlock_irqrestore(&phba->hbalock, iflags);
876         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
877                         "2921 Can't set rrq active xri:0x%x rxid:0x%x"
878                         " DID:0x%x Send:%d\n",
879                         xritag, rxid, ndlp->nlp_DID, send_rrq);
880         return -EINVAL;
881 }
882
883 /**
884  * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
885  * @phba: Pointer to HBA context object.
886  * @piocb: Pointer to the iocbq.
887  *
888  * This function is called with hbalock held. This function
889  * gets a new driver sglq object from the sglq list. If the
890  * list is not empty then it is successful, it returns pointer to the newly
891  * allocated sglq object else it returns NULL.
892  **/
893 static struct lpfc_sglq *
894 __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
895 {
896         struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
897         struct lpfc_sglq *sglq = NULL;
898         struct lpfc_sglq *start_sglq = NULL;
899         struct lpfc_scsi_buf *lpfc_cmd;
900         struct lpfc_nodelist *ndlp;
901         int found = 0;
902
903         if (piocbq->iocb_flag &  LPFC_IO_FCP) {
904                 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
905                 ndlp = lpfc_cmd->rdata->pnode;
906         } else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
907                         !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
908                 ndlp = piocbq->context_un.ndlp;
909         else  if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) &&
910                         (piocbq->iocb_flag & LPFC_IO_LIBDFC))
911                 ndlp = piocbq->context_un.ndlp;
912         else
913                 ndlp = piocbq->context1;
914
915         list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
916         start_sglq = sglq;
917         while (!found) {
918                 if (!sglq)
919                         return NULL;
920                 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_lxritag)) {
921                         /* This xri has an rrq outstanding for this DID.
922                          * put it back in the list and get another xri.
923                          */
924                         list_add_tail(&sglq->list, lpfc_sgl_list);
925                         sglq = NULL;
926                         list_remove_head(lpfc_sgl_list, sglq,
927                                                 struct lpfc_sglq, list);
928                         if (sglq == start_sglq) {
929                                 sglq = NULL;
930                                 break;
931                         } else
932                                 continue;
933                 }
934                 sglq->ndlp = ndlp;
935                 found = 1;
936                 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
937                 sglq->state = SGL_ALLOCATED;
938         }
939         return sglq;
940 }
941
942 /**
943  * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
944  * @phba: Pointer to HBA context object.
945  *
946  * This function is called with no lock held. This function
947  * allocates a new driver iocb object from the iocb pool. If the
948  * allocation is successful, it returns pointer to the newly
949  * allocated iocb object else it returns NULL.
950  **/
951 struct lpfc_iocbq *
952 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
953 {
954         struct lpfc_iocbq * iocbq = NULL;
955         unsigned long iflags;
956
957         spin_lock_irqsave(&phba->hbalock, iflags);
958         iocbq = __lpfc_sli_get_iocbq(phba);
959         spin_unlock_irqrestore(&phba->hbalock, iflags);
960         return iocbq;
961 }
962
963 /**
964  * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
965  * @phba: Pointer to HBA context object.
966  * @iocbq: Pointer to driver iocb object.
967  *
968  * This function is called with hbalock held to release driver
969  * iocb object to the iocb pool. The iotag in the iocb object
970  * does not change for each use of the iocb object. This function
971  * clears all other fields of the iocb object when it is freed.
972  * The sqlq structure that holds the xritag and phys and virtual
973  * mappings for the scatter gather list is retrieved from the
974  * active array of sglq. The get of the sglq pointer also clears
975  * the entry in the array. If the status of the IO indiactes that
976  * this IO was aborted then the sglq entry it put on the
977  * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
978  * IO has good status or fails for any other reason then the sglq
979  * entry is added to the free list (lpfc_sgl_list).
980  **/
981 static void
982 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
983 {
984         struct lpfc_sglq *sglq;
985         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
986         unsigned long iflag = 0;
987         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
988
989         if (iocbq->sli4_xritag == NO_XRI)
990                 sglq = NULL;
991         else
992                 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
993
994         if (sglq)  {
995                 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
996                         (sglq->state != SGL_XRI_ABORTED)) {
997                         spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
998                                         iflag);
999                         list_add(&sglq->list,
1000                                 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1001                         spin_unlock_irqrestore(
1002                                 &phba->sli4_hba.abts_sgl_list_lock, iflag);
1003                 } else {
1004                         sglq->state = SGL_FREED;
1005                         sglq->ndlp = NULL;
1006                         list_add_tail(&sglq->list,
1007                                 &phba->sli4_hba.lpfc_sgl_list);
1008
1009                         /* Check if TXQ queue needs to be serviced */
1010                         if (pring->txq_cnt)
1011                                 lpfc_worker_wake_up(phba);
1012                 }
1013         }
1014
1015
1016         /*
1017          * Clean all volatile data fields, preserve iotag and node struct.
1018          */
1019         memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1020         iocbq->sli4_lxritag = NO_XRI;
1021         iocbq->sli4_xritag = NO_XRI;
1022         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1023 }
1024
1025
1026 /**
1027  * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1028  * @phba: Pointer to HBA context object.
1029  * @iocbq: Pointer to driver iocb object.
1030  *
1031  * This function is called with hbalock held to release driver
1032  * iocb object to the iocb pool. The iotag in the iocb object
1033  * does not change for each use of the iocb object. This function
1034  * clears all other fields of the iocb object when it is freed.
1035  **/
1036 static void
1037 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1038 {
1039         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1040
1041         /*
1042          * Clean all volatile data fields, preserve iotag and node struct.
1043          */
1044         memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1045         iocbq->sli4_xritag = NO_XRI;
1046         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1047 }
1048
1049 /**
1050  * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1051  * @phba: Pointer to HBA context object.
1052  * @iocbq: Pointer to driver iocb object.
1053  *
1054  * This function is called with hbalock held to release driver
1055  * iocb object to the iocb pool. The iotag in the iocb object
1056  * does not change for each use of the iocb object. This function
1057  * clears all other fields of the iocb object when it is freed.
1058  **/
1059 static void
1060 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1061 {
1062         phba->__lpfc_sli_release_iocbq(phba, iocbq);
1063         phba->iocb_cnt--;
1064 }
1065
1066 /**
1067  * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1068  * @phba: Pointer to HBA context object.
1069  * @iocbq: Pointer to driver iocb object.
1070  *
1071  * This function is called with no lock held to release the iocb to
1072  * iocb pool.
1073  **/
1074 void
1075 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1076 {
1077         unsigned long iflags;
1078
1079         /*
1080          * Clean all volatile data fields, preserve iotag and node struct.
1081          */
1082         spin_lock_irqsave(&phba->hbalock, iflags);
1083         __lpfc_sli_release_iocbq(phba, iocbq);
1084         spin_unlock_irqrestore(&phba->hbalock, iflags);
1085 }
1086
1087 /**
1088  * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1089  * @phba: Pointer to HBA context object.
1090  * @iocblist: List of IOCBs.
1091  * @ulpstatus: ULP status in IOCB command field.
1092  * @ulpWord4: ULP word-4 in IOCB command field.
1093  *
1094  * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1095  * on the list by invoking the complete callback function associated with the
1096  * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1097  * fields.
1098  **/
1099 void
1100 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1101                       uint32_t ulpstatus, uint32_t ulpWord4)
1102 {
1103         struct lpfc_iocbq *piocb;
1104
1105         while (!list_empty(iocblist)) {
1106                 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1107
1108                 if (!piocb->iocb_cmpl)
1109                         lpfc_sli_release_iocbq(phba, piocb);
1110                 else {
1111                         piocb->iocb.ulpStatus = ulpstatus;
1112                         piocb->iocb.un.ulpWord[4] = ulpWord4;
1113                         (piocb->iocb_cmpl) (phba, piocb, piocb);
1114                 }
1115         }
1116         return;
1117 }
1118
1119 /**
1120  * lpfc_sli_iocb_cmd_type - Get the iocb type
1121  * @iocb_cmnd: iocb command code.
1122  *
1123  * This function is called by ring event handler function to get the iocb type.
1124  * This function translates the iocb command to an iocb command type used to
1125  * decide the final disposition of each completed IOCB.
1126  * The function returns
1127  * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1128  * LPFC_SOL_IOCB     if it is a solicited iocb completion
1129  * LPFC_ABORT_IOCB   if it is an abort iocb
1130  * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
1131  *
1132  * The caller is not required to hold any lock.
1133  **/
1134 static lpfc_iocb_type
1135 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1136 {
1137         lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1138
1139         if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1140                 return 0;
1141
1142         switch (iocb_cmnd) {
1143         case CMD_XMIT_SEQUENCE_CR:
1144         case CMD_XMIT_SEQUENCE_CX:
1145         case CMD_XMIT_BCAST_CN:
1146         case CMD_XMIT_BCAST_CX:
1147         case CMD_ELS_REQUEST_CR:
1148         case CMD_ELS_REQUEST_CX:
1149         case CMD_CREATE_XRI_CR:
1150         case CMD_CREATE_XRI_CX:
1151         case CMD_GET_RPI_CN:
1152         case CMD_XMIT_ELS_RSP_CX:
1153         case CMD_GET_RPI_CR:
1154         case CMD_FCP_IWRITE_CR:
1155         case CMD_FCP_IWRITE_CX:
1156         case CMD_FCP_IREAD_CR:
1157         case CMD_FCP_IREAD_CX:
1158         case CMD_FCP_ICMND_CR:
1159         case CMD_FCP_ICMND_CX:
1160         case CMD_FCP_TSEND_CX:
1161         case CMD_FCP_TRSP_CX:
1162         case CMD_FCP_TRECEIVE_CX:
1163         case CMD_FCP_AUTO_TRSP_CX:
1164         case CMD_ADAPTER_MSG:
1165         case CMD_ADAPTER_DUMP:
1166         case CMD_XMIT_SEQUENCE64_CR:
1167         case CMD_XMIT_SEQUENCE64_CX:
1168         case CMD_XMIT_BCAST64_CN:
1169         case CMD_XMIT_BCAST64_CX:
1170         case CMD_ELS_REQUEST64_CR:
1171         case CMD_ELS_REQUEST64_CX:
1172         case CMD_FCP_IWRITE64_CR:
1173         case CMD_FCP_IWRITE64_CX:
1174         case CMD_FCP_IREAD64_CR:
1175         case CMD_FCP_IREAD64_CX:
1176         case CMD_FCP_ICMND64_CR:
1177         case CMD_FCP_ICMND64_CX:
1178         case CMD_FCP_TSEND64_CX:
1179         case CMD_FCP_TRSP64_CX:
1180         case CMD_FCP_TRECEIVE64_CX:
1181         case CMD_GEN_REQUEST64_CR:
1182         case CMD_GEN_REQUEST64_CX:
1183         case CMD_XMIT_ELS_RSP64_CX:
1184         case DSSCMD_IWRITE64_CR:
1185         case DSSCMD_IWRITE64_CX:
1186         case DSSCMD_IREAD64_CR:
1187         case DSSCMD_IREAD64_CX:
1188                 type = LPFC_SOL_IOCB;
1189                 break;
1190         case CMD_ABORT_XRI_CN:
1191         case CMD_ABORT_XRI_CX:
1192         case CMD_CLOSE_XRI_CN:
1193         case CMD_CLOSE_XRI_CX:
1194         case CMD_XRI_ABORTED_CX:
1195         case CMD_ABORT_MXRI64_CN:
1196         case CMD_XMIT_BLS_RSP64_CX:
1197                 type = LPFC_ABORT_IOCB;
1198                 break;
1199         case CMD_RCV_SEQUENCE_CX:
1200         case CMD_RCV_ELS_REQ_CX:
1201         case CMD_RCV_SEQUENCE64_CX:
1202         case CMD_RCV_ELS_REQ64_CX:
1203         case CMD_ASYNC_STATUS:
1204         case CMD_IOCB_RCV_SEQ64_CX:
1205         case CMD_IOCB_RCV_ELS64_CX:
1206         case CMD_IOCB_RCV_CONT64_CX:
1207         case CMD_IOCB_RET_XRI64_CX:
1208                 type = LPFC_UNSOL_IOCB;
1209                 break;
1210         case CMD_IOCB_XMIT_MSEQ64_CR:
1211         case CMD_IOCB_XMIT_MSEQ64_CX:
1212         case CMD_IOCB_RCV_SEQ_LIST64_CX:
1213         case CMD_IOCB_RCV_ELS_LIST64_CX:
1214         case CMD_IOCB_CLOSE_EXTENDED_CN:
1215         case CMD_IOCB_ABORT_EXTENDED_CN:
1216         case CMD_IOCB_RET_HBQE64_CN:
1217         case CMD_IOCB_FCP_IBIDIR64_CR:
1218         case CMD_IOCB_FCP_IBIDIR64_CX:
1219         case CMD_IOCB_FCP_ITASKMGT64_CX:
1220         case CMD_IOCB_LOGENTRY_CN:
1221         case CMD_IOCB_LOGENTRY_ASYNC_CN:
1222                 printk("%s - Unhandled SLI-3 Command x%x\n",
1223                                 __func__, iocb_cmnd);
1224                 type = LPFC_UNKNOWN_IOCB;
1225                 break;
1226         default:
1227                 type = LPFC_UNKNOWN_IOCB;
1228                 break;
1229         }
1230
1231         return type;
1232 }
1233
1234 /**
1235  * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1236  * @phba: Pointer to HBA context object.
1237  *
1238  * This function is called from SLI initialization code
1239  * to configure every ring of the HBA's SLI interface. The
1240  * caller is not required to hold any lock. This function issues
1241  * a config_ring mailbox command for each ring.
1242  * This function returns zero if successful else returns a negative
1243  * error code.
1244  **/
1245 static int
1246 lpfc_sli_ring_map(struct lpfc_hba *phba)
1247 {
1248         struct lpfc_sli *psli = &phba->sli;
1249         LPFC_MBOXQ_t *pmb;
1250         MAILBOX_t *pmbox;
1251         int i, rc, ret = 0;
1252
1253         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1254         if (!pmb)
1255                 return -ENOMEM;
1256         pmbox = &pmb->u.mb;
1257         phba->link_state = LPFC_INIT_MBX_CMDS;
1258         for (i = 0; i < psli->num_rings; i++) {
1259                 lpfc_config_ring(phba, i, pmb);
1260                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1261                 if (rc != MBX_SUCCESS) {
1262                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1263                                         "0446 Adapter failed to init (%d), "
1264                                         "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1265                                         "ring %d\n",
1266                                         rc, pmbox->mbxCommand,
1267                                         pmbox->mbxStatus, i);
1268                         phba->link_state = LPFC_HBA_ERROR;
1269                         ret = -ENXIO;
1270                         break;
1271                 }
1272         }
1273         mempool_free(pmb, phba->mbox_mem_pool);
1274         return ret;
1275 }
1276
1277 /**
1278  * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1279  * @phba: Pointer to HBA context object.
1280  * @pring: Pointer to driver SLI ring object.
1281  * @piocb: Pointer to the driver iocb object.
1282  *
1283  * This function is called with hbalock held. The function adds the
1284  * new iocb to txcmplq of the given ring. This function always returns
1285  * 0. If this function is called for ELS ring, this function checks if
1286  * there is a vport associated with the ELS command. This function also
1287  * starts els_tmofunc timer if this is an ELS command.
1288  **/
1289 static int
1290 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1291                         struct lpfc_iocbq *piocb)
1292 {
1293         list_add_tail(&piocb->list, &pring->txcmplq);
1294         piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1295         pring->txcmplq_cnt++;
1296         if (pring->txcmplq_cnt > pring->txcmplq_max)
1297                 pring->txcmplq_max = pring->txcmplq_cnt;
1298
1299         if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1300            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1301            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1302                 if (!piocb->vport)
1303                         BUG();
1304                 else
1305                         mod_timer(&piocb->vport->els_tmofunc,
1306                                   jiffies + HZ * (phba->fc_ratov << 1));
1307         }
1308
1309
1310         return 0;
1311 }
1312
1313 /**
1314  * lpfc_sli_ringtx_get - Get first element of the txq
1315  * @phba: Pointer to HBA context object.
1316  * @pring: Pointer to driver SLI ring object.
1317  *
1318  * This function is called with hbalock held to get next
1319  * iocb in txq of the given ring. If there is any iocb in
1320  * the txq, the function returns first iocb in the list after
1321  * removing the iocb from the list, else it returns NULL.
1322  **/
1323 struct lpfc_iocbq *
1324 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1325 {
1326         struct lpfc_iocbq *cmd_iocb;
1327
1328         list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1329         if (cmd_iocb != NULL)
1330                 pring->txq_cnt--;
1331         return cmd_iocb;
1332 }
1333
1334 /**
1335  * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1336  * @phba: Pointer to HBA context object.
1337  * @pring: Pointer to driver SLI ring object.
1338  *
1339  * This function is called with hbalock held and the caller must post the
1340  * iocb without releasing the lock. If the caller releases the lock,
1341  * iocb slot returned by the function is not guaranteed to be available.
1342  * The function returns pointer to the next available iocb slot if there
1343  * is available slot in the ring, else it returns NULL.
1344  * If the get index of the ring is ahead of the put index, the function
1345  * will post an error attention event to the worker thread to take the
1346  * HBA to offline state.
1347  **/
1348 static IOCB_t *
1349 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1350 {
1351         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1352         uint32_t  max_cmd_idx = pring->sli.sli3.numCiocb;
1353         if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1354            (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1355                 pring->sli.sli3.next_cmdidx = 0;
1356
1357         if (unlikely(pring->sli.sli3.local_getidx ==
1358                 pring->sli.sli3.next_cmdidx)) {
1359
1360                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1361
1362                 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1363                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1364                                         "0315 Ring %d issue: portCmdGet %d "
1365                                         "is bigger than cmd ring %d\n",
1366                                         pring->ringno,
1367                                         pring->sli.sli3.local_getidx,
1368                                         max_cmd_idx);
1369
1370                         phba->link_state = LPFC_HBA_ERROR;
1371                         /*
1372                          * All error attention handlers are posted to
1373                          * worker thread
1374                          */
1375                         phba->work_ha |= HA_ERATT;
1376                         phba->work_hs = HS_FFER3;
1377
1378                         lpfc_worker_wake_up(phba);
1379
1380                         return NULL;
1381                 }
1382
1383                 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1384                         return NULL;
1385         }
1386
1387         return lpfc_cmd_iocb(phba, pring);
1388 }
1389
1390 /**
1391  * lpfc_sli_next_iotag - Get an iotag for the iocb
1392  * @phba: Pointer to HBA context object.
1393  * @iocbq: Pointer to driver iocb object.
1394  *
1395  * This function gets an iotag for the iocb. If there is no unused iotag and
1396  * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1397  * array and assigns a new iotag.
1398  * The function returns the allocated iotag if successful, else returns zero.
1399  * Zero is not a valid iotag.
1400  * The caller is not required to hold any lock.
1401  **/
1402 uint16_t
1403 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1404 {
1405         struct lpfc_iocbq **new_arr;
1406         struct lpfc_iocbq **old_arr;
1407         size_t new_len;
1408         struct lpfc_sli *psli = &phba->sli;
1409         uint16_t iotag;
1410
1411         spin_lock_irq(&phba->hbalock);
1412         iotag = psli->last_iotag;
1413         if(++iotag < psli->iocbq_lookup_len) {
1414                 psli->last_iotag = iotag;
1415                 psli->iocbq_lookup[iotag] = iocbq;
1416                 spin_unlock_irq(&phba->hbalock);
1417                 iocbq->iotag = iotag;
1418                 return iotag;
1419         } else if (psli->iocbq_lookup_len < (0xffff
1420                                            - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1421                 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1422                 spin_unlock_irq(&phba->hbalock);
1423                 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
1424                                   GFP_KERNEL);
1425                 if (new_arr) {
1426                         spin_lock_irq(&phba->hbalock);
1427                         old_arr = psli->iocbq_lookup;
1428                         if (new_len <= psli->iocbq_lookup_len) {
1429                                 /* highly unprobable case */
1430                                 kfree(new_arr);
1431                                 iotag = psli->last_iotag;
1432                                 if(++iotag < psli->iocbq_lookup_len) {
1433                                         psli->last_iotag = iotag;
1434                                         psli->iocbq_lookup[iotag] = iocbq;
1435                                         spin_unlock_irq(&phba->hbalock);
1436                                         iocbq->iotag = iotag;
1437                                         return iotag;
1438                                 }
1439                                 spin_unlock_irq(&phba->hbalock);
1440                                 return 0;
1441                         }
1442                         if (psli->iocbq_lookup)
1443                                 memcpy(new_arr, old_arr,
1444                                        ((psli->last_iotag  + 1) *
1445                                         sizeof (struct lpfc_iocbq *)));
1446                         psli->iocbq_lookup = new_arr;
1447                         psli->iocbq_lookup_len = new_len;
1448                         psli->last_iotag = iotag;
1449                         psli->iocbq_lookup[iotag] = iocbq;
1450                         spin_unlock_irq(&phba->hbalock);
1451                         iocbq->iotag = iotag;
1452                         kfree(old_arr);
1453                         return iotag;
1454                 }
1455         } else
1456                 spin_unlock_irq(&phba->hbalock);
1457
1458         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1459                         "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1460                         psli->last_iotag);
1461
1462         return 0;
1463 }
1464
1465 /**
1466  * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1467  * @phba: Pointer to HBA context object.
1468  * @pring: Pointer to driver SLI ring object.
1469  * @iocb: Pointer to iocb slot in the ring.
1470  * @nextiocb: Pointer to driver iocb object which need to be
1471  *            posted to firmware.
1472  *
1473  * This function is called with hbalock held to post a new iocb to
1474  * the firmware. This function copies the new iocb to ring iocb slot and
1475  * updates the ring pointers. It adds the new iocb to txcmplq if there is
1476  * a completion call back for this iocb else the function will free the
1477  * iocb object.
1478  **/
1479 static void
1480 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1481                 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1482 {
1483         /*
1484          * Set up an iotag
1485          */
1486         nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1487
1488
1489         if (pring->ringno == LPFC_ELS_RING) {
1490                 lpfc_debugfs_slow_ring_trc(phba,
1491                         "IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
1492                         *(((uint32_t *) &nextiocb->iocb) + 4),
1493                         *(((uint32_t *) &nextiocb->iocb) + 6),
1494                         *(((uint32_t *) &nextiocb->iocb) + 7));
1495         }
1496
1497         /*
1498          * Issue iocb command to adapter
1499          */
1500         lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1501         wmb();
1502         pring->stats.iocb_cmd++;
1503
1504         /*
1505          * If there is no completion routine to call, we can release the
1506          * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1507          * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1508          */
1509         if (nextiocb->iocb_cmpl)
1510                 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1511         else
1512                 __lpfc_sli_release_iocbq(phba, nextiocb);
1513
1514         /*
1515          * Let the HBA know what IOCB slot will be the next one the
1516          * driver will put a command into.
1517          */
1518         pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1519         writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1520 }
1521
1522 /**
1523  * lpfc_sli_update_full_ring - Update the chip attention register
1524  * @phba: Pointer to HBA context object.
1525  * @pring: Pointer to driver SLI ring object.
1526  *
1527  * The caller is not required to hold any lock for calling this function.
1528  * This function updates the chip attention bits for the ring to inform firmware
1529  * that there are pending work to be done for this ring and requests an
1530  * interrupt when there is space available in the ring. This function is
1531  * called when the driver is unable to post more iocbs to the ring due
1532  * to unavailability of space in the ring.
1533  **/
1534 static void
1535 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1536 {
1537         int ringno = pring->ringno;
1538
1539         pring->flag |= LPFC_CALL_RING_AVAILABLE;
1540
1541         wmb();
1542
1543         /*
1544          * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1545          * The HBA will tell us when an IOCB entry is available.
1546          */
1547         writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1548         readl(phba->CAregaddr); /* flush */
1549
1550         pring->stats.iocb_cmd_full++;
1551 }
1552
1553 /**
1554  * lpfc_sli_update_ring - Update chip attention register
1555  * @phba: Pointer to HBA context object.
1556  * @pring: Pointer to driver SLI ring object.
1557  *
1558  * This function updates the chip attention register bit for the
1559  * given ring to inform HBA that there is more work to be done
1560  * in this ring. The caller is not required to hold any lock.
1561  **/
1562 static void
1563 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1564 {
1565         int ringno = pring->ringno;
1566
1567         /*
1568          * Tell the HBA that there is work to do in this ring.
1569          */
1570         if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1571                 wmb();
1572                 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1573                 readl(phba->CAregaddr); /* flush */
1574         }
1575 }
1576
1577 /**
1578  * lpfc_sli_resume_iocb - Process iocbs in the txq
1579  * @phba: Pointer to HBA context object.
1580  * @pring: Pointer to driver SLI ring object.
1581  *
1582  * This function is called with hbalock held to post pending iocbs
1583  * in the txq to the firmware. This function is called when driver
1584  * detects space available in the ring.
1585  **/
1586 static void
1587 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1588 {
1589         IOCB_t *iocb;
1590         struct lpfc_iocbq *nextiocb;
1591
1592         /*
1593          * Check to see if:
1594          *  (a) there is anything on the txq to send
1595          *  (b) link is up
1596          *  (c) link attention events can be processed (fcp ring only)
1597          *  (d) IOCB processing is not blocked by the outstanding mbox command.
1598          */
1599         if (pring->txq_cnt &&
1600             lpfc_is_link_up(phba) &&
1601             (pring->ringno != phba->sli.fcp_ring ||
1602              phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1603
1604                 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1605                        (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1606                         lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1607
1608                 if (iocb)
1609                         lpfc_sli_update_ring(phba, pring);
1610                 else
1611                         lpfc_sli_update_full_ring(phba, pring);
1612         }
1613
1614         return;
1615 }
1616
1617 /**
1618  * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1619  * @phba: Pointer to HBA context object.
1620  * @hbqno: HBQ number.
1621  *
1622  * This function is called with hbalock held to get the next
1623  * available slot for the given HBQ. If there is free slot
1624  * available for the HBQ it will return pointer to the next available
1625  * HBQ entry else it will return NULL.
1626  **/
1627 static struct lpfc_hbq_entry *
1628 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1629 {
1630         struct hbq_s *hbqp = &phba->hbqs[hbqno];
1631
1632         if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1633             ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1634                 hbqp->next_hbqPutIdx = 0;
1635
1636         if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1637                 uint32_t raw_index = phba->hbq_get[hbqno];
1638                 uint32_t getidx = le32_to_cpu(raw_index);
1639
1640                 hbqp->local_hbqGetIdx = getidx;
1641
1642                 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1643                         lpfc_printf_log(phba, KERN_ERR,
1644                                         LOG_SLI | LOG_VPORT,
1645                                         "1802 HBQ %d: local_hbqGetIdx "
1646                                         "%u is > than hbqp->entry_count %u\n",
1647                                         hbqno, hbqp->local_hbqGetIdx,
1648                                         hbqp->entry_count);
1649
1650                         phba->link_state = LPFC_HBA_ERROR;
1651                         return NULL;
1652                 }
1653
1654                 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1655                         return NULL;
1656         }
1657
1658         return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1659                         hbqp->hbqPutIdx;
1660 }
1661
1662 /**
1663  * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1664  * @phba: Pointer to HBA context object.
1665  *
1666  * This function is called with no lock held to free all the
1667  * hbq buffers while uninitializing the SLI interface. It also
1668  * frees the HBQ buffers returned by the firmware but not yet
1669  * processed by the upper layers.
1670  **/
1671 void
1672 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1673 {
1674         struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1675         struct hbq_dmabuf *hbq_buf;
1676         unsigned long flags;
1677         int i, hbq_count;
1678         uint32_t hbqno;
1679
1680         hbq_count = lpfc_sli_hbq_count();
1681         /* Return all memory used by all HBQs */
1682         spin_lock_irqsave(&phba->hbalock, flags);
1683         for (i = 0; i < hbq_count; ++i) {
1684                 list_for_each_entry_safe(dmabuf, next_dmabuf,
1685                                 &phba->hbqs[i].hbq_buffer_list, list) {
1686                         hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1687                         list_del(&hbq_buf->dbuf.list);
1688                         (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1689                 }
1690                 phba->hbqs[i].buffer_count = 0;
1691         }
1692         /* Return all HBQ buffer that are in-fly */
1693         list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
1694                                  list) {
1695                 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1696                 list_del(&hbq_buf->dbuf.list);
1697                 if (hbq_buf->tag == -1) {
1698                         (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1699                                 (phba, hbq_buf);
1700                 } else {
1701                         hbqno = hbq_buf->tag >> 16;
1702                         if (hbqno >= LPFC_MAX_HBQS)
1703                                 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1704                                         (phba, hbq_buf);
1705                         else
1706                                 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1707                                         hbq_buf);
1708                 }
1709         }
1710
1711         /* Mark the HBQs not in use */
1712         phba->hbq_in_use = 0;
1713         spin_unlock_irqrestore(&phba->hbalock, flags);
1714 }
1715
1716 /**
1717  * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1718  * @phba: Pointer to HBA context object.
1719  * @hbqno: HBQ number.
1720  * @hbq_buf: Pointer to HBQ buffer.
1721  *
1722  * This function is called with the hbalock held to post a
1723  * hbq buffer to the firmware. If the function finds an empty
1724  * slot in the HBQ, it will post the buffer. The function will return
1725  * pointer to the hbq entry if it successfully post the buffer
1726  * else it will return NULL.
1727  **/
1728 static int
1729 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
1730                          struct hbq_dmabuf *hbq_buf)
1731 {
1732         return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1733 }
1734
1735 /**
1736  * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1737  * @phba: Pointer to HBA context object.
1738  * @hbqno: HBQ number.
1739  * @hbq_buf: Pointer to HBQ buffer.
1740  *
1741  * This function is called with the hbalock held to post a hbq buffer to the
1742  * firmware. If the function finds an empty slot in the HBQ, it will post the
1743  * buffer and place it on the hbq_buffer_list. The function will return zero if
1744  * it successfully post the buffer else it will return an error.
1745  **/
1746 static int
1747 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1748                             struct hbq_dmabuf *hbq_buf)
1749 {
1750         struct lpfc_hbq_entry *hbqe;
1751         dma_addr_t physaddr = hbq_buf->dbuf.phys;
1752
1753         /* Get next HBQ entry slot to use */
1754         hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1755         if (hbqe) {
1756                 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1757
1758                 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1759                 hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
1760                 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
1761                 hbqe->bde.tus.f.bdeFlags = 0;
1762                 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1763                 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1764                                 /* Sync SLIM */
1765                 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1766                 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
1767                                 /* flush */
1768                 readl(phba->hbq_put + hbqno);
1769                 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
1770                 return 0;
1771         } else
1772                 return -ENOMEM;
1773 }
1774
1775 /**
1776  * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1777  * @phba: Pointer to HBA context object.
1778  * @hbqno: HBQ number.
1779  * @hbq_buf: Pointer to HBQ buffer.
1780  *
1781  * This function is called with the hbalock held to post an RQE to the SLI4
1782  * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1783  * the hbq_buffer_list and return zero, otherwise it will return an error.
1784  **/
1785 static int
1786 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1787                             struct hbq_dmabuf *hbq_buf)
1788 {
1789         int rc;
1790         struct lpfc_rqe hrqe;
1791         struct lpfc_rqe drqe;
1792
1793         hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1794         hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1795         drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1796         drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1797         rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1798                               &hrqe, &drqe);
1799         if (rc < 0)
1800                 return rc;
1801         hbq_buf->tag = rc;
1802         list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1803         return 0;
1804 }
1805
1806 /* HBQ for ELS and CT traffic. */
1807 static struct lpfc_hbq_init lpfc_els_hbq = {
1808         .rn = 1,
1809         .entry_count = 256,
1810         .mask_count = 0,
1811         .profile = 0,
1812         .ring_mask = (1 << LPFC_ELS_RING),
1813         .buffer_count = 0,
1814         .init_count = 40,
1815         .add_count = 40,
1816 };
1817
1818 /* HBQ for the extra ring if needed */
1819 static struct lpfc_hbq_init lpfc_extra_hbq = {
1820         .rn = 1,
1821         .entry_count = 200,
1822         .mask_count = 0,
1823         .profile = 0,
1824         .ring_mask = (1 << LPFC_EXTRA_RING),
1825         .buffer_count = 0,
1826         .init_count = 0,
1827         .add_count = 5,
1828 };
1829
1830 /* Array of HBQs */
1831 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
1832         &lpfc_els_hbq,
1833         &lpfc_extra_hbq,
1834 };
1835
1836 /**
1837  * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
1838  * @phba: Pointer to HBA context object.
1839  * @hbqno: HBQ number.
1840  * @count: Number of HBQ buffers to be posted.
1841  *
1842  * This function is called with no lock held to post more hbq buffers to the
1843  * given HBQ. The function returns the number of HBQ buffers successfully
1844  * posted.
1845  **/
1846 static int
1847 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
1848 {
1849         uint32_t i, posted = 0;
1850         unsigned long flags;
1851         struct hbq_dmabuf *hbq_buffer;
1852         LIST_HEAD(hbq_buf_list);
1853         if (!phba->hbqs[hbqno].hbq_alloc_buffer)
1854                 return 0;
1855
1856         if ((phba->hbqs[hbqno].buffer_count + count) >
1857             lpfc_hbq_defs[hbqno]->entry_count)
1858                 count = lpfc_hbq_defs[hbqno]->entry_count -
1859                                         phba->hbqs[hbqno].buffer_count;
1860         if (!count)
1861                 return 0;
1862         /* Allocate HBQ entries */
1863         for (i = 0; i < count; i++) {
1864                 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1865                 if (!hbq_buffer)
1866                         break;
1867                 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1868         }
1869         /* Check whether HBQ is still in use */
1870         spin_lock_irqsave(&phba->hbalock, flags);
1871         if (!phba->hbq_in_use)
1872                 goto err;
1873         while (!list_empty(&hbq_buf_list)) {
1874                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1875                                  dbuf.list);
1876                 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1877                                       (hbqno << 16));
1878                 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
1879                         phba->hbqs[hbqno].buffer_count++;
1880                         posted++;
1881                 } else
1882                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1883         }
1884         spin_unlock_irqrestore(&phba->hbalock, flags);
1885         return posted;
1886 err:
1887         spin_unlock_irqrestore(&phba->hbalock, flags);
1888         while (!list_empty(&hbq_buf_list)) {
1889                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1890                                  dbuf.list);
1891                 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1892         }
1893         return 0;
1894 }
1895
1896 /**
1897  * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
1898  * @phba: Pointer to HBA context object.
1899  * @qno: HBQ number.
1900  *
1901  * This function posts more buffers to the HBQ. This function
1902  * is called with no lock held. The function returns the number of HBQ entries
1903  * successfully allocated.
1904  **/
1905 int
1906 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1907 {
1908         if (phba->sli_rev == LPFC_SLI_REV4)
1909                 return 0;
1910         else
1911                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1912                                          lpfc_hbq_defs[qno]->add_count);
1913 }
1914
1915 /**
1916  * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
1917  * @phba: Pointer to HBA context object.
1918  * @qno:  HBQ queue number.
1919  *
1920  * This function is called from SLI initialization code path with
1921  * no lock held to post initial HBQ buffers to firmware. The
1922  * function returns the number of HBQ entries successfully allocated.
1923  **/
1924 static int
1925 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1926 {
1927         if (phba->sli_rev == LPFC_SLI_REV4)
1928                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1929                                         lpfc_hbq_defs[qno]->entry_count);
1930         else
1931                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1932                                          lpfc_hbq_defs[qno]->init_count);
1933 }
1934
1935 /**
1936  * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1937  * @phba: Pointer to HBA context object.
1938  * @hbqno: HBQ number.
1939  *
1940  * This function removes the first hbq buffer on an hbq list and returns a
1941  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1942  **/
1943 static struct hbq_dmabuf *
1944 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1945 {
1946         struct lpfc_dmabuf *d_buf;
1947
1948         list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1949         if (!d_buf)
1950                 return NULL;
1951         return container_of(d_buf, struct hbq_dmabuf, dbuf);
1952 }
1953
1954 /**
1955  * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
1956  * @phba: Pointer to HBA context object.
1957  * @tag: Tag of the hbq buffer.
1958  *
1959  * This function is called with hbalock held. This function searches
1960  * for the hbq buffer associated with the given tag in the hbq buffer
1961  * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
1962  * it returns NULL.
1963  **/
1964 static struct hbq_dmabuf *
1965 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
1966 {
1967         struct lpfc_dmabuf *d_buf;
1968         struct hbq_dmabuf *hbq_buf;
1969         uint32_t hbqno;
1970
1971         hbqno = tag >> 16;
1972         if (hbqno >= LPFC_MAX_HBQS)
1973                 return NULL;
1974
1975         spin_lock_irq(&phba->hbalock);
1976         list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
1977                 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
1978                 if (hbq_buf->tag == tag) {
1979                         spin_unlock_irq(&phba->hbalock);
1980                         return hbq_buf;
1981                 }
1982         }
1983         spin_unlock_irq(&phba->hbalock);
1984         lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
1985                         "1803 Bad hbq tag. Data: x%x x%x\n",
1986                         tag, phba->hbqs[tag >> 16].buffer_count);
1987         return NULL;
1988 }
1989
1990 /**
1991  * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
1992  * @phba: Pointer to HBA context object.
1993  * @hbq_buffer: Pointer to HBQ buffer.
1994  *
1995  * This function is called with hbalock. This function gives back
1996  * the hbq buffer to firmware. If the HBQ does not have space to
1997  * post the buffer, it will free the buffer.
1998  **/
1999 void
2000 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2001 {
2002         uint32_t hbqno;
2003
2004         if (hbq_buffer) {
2005                 hbqno = hbq_buffer->tag >> 16;
2006                 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2007                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2008         }
2009 }
2010
2011 /**
2012  * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2013  * @mbxCommand: mailbox command code.
2014  *
2015  * This function is called by the mailbox event handler function to verify
2016  * that the completed mailbox command is a legitimate mailbox command. If the
2017  * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2018  * and the mailbox event handler will take the HBA offline.
2019  **/
2020 static int
2021 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2022 {
2023         uint8_t ret;
2024
2025         switch (mbxCommand) {
2026         case MBX_LOAD_SM:
2027         case MBX_READ_NV:
2028         case MBX_WRITE_NV:
2029         case MBX_WRITE_VPARMS:
2030         case MBX_RUN_BIU_DIAG:
2031         case MBX_INIT_LINK:
2032         case MBX_DOWN_LINK:
2033         case MBX_CONFIG_LINK:
2034         case MBX_CONFIG_RING:
2035         case MBX_RESET_RING:
2036         case MBX_READ_CONFIG:
2037         case MBX_READ_RCONFIG:
2038         case MBX_READ_SPARM:
2039         case MBX_READ_STATUS:
2040         case MBX_READ_RPI:
2041         case MBX_READ_XRI:
2042         case MBX_READ_REV:
2043         case MBX_READ_LNK_STAT:
2044         case MBX_REG_LOGIN:
2045         case MBX_UNREG_LOGIN:
2046         case MBX_CLEAR_LA:
2047         case MBX_DUMP_MEMORY:
2048         case MBX_DUMP_CONTEXT:
2049         case MBX_RUN_DIAGS:
2050         case MBX_RESTART:
2051         case MBX_UPDATE_CFG:
2052         case MBX_DOWN_LOAD:
2053         case MBX_DEL_LD_ENTRY:
2054         case MBX_RUN_PROGRAM:
2055         case MBX_SET_MASK:
2056         case MBX_SET_VARIABLE:
2057         case MBX_UNREG_D_ID:
2058         case MBX_KILL_BOARD:
2059         case MBX_CONFIG_FARP:
2060         case MBX_BEACON:
2061         case MBX_LOAD_AREA:
2062         case MBX_RUN_BIU_DIAG64:
2063         case MBX_CONFIG_PORT:
2064         case MBX_READ_SPARM64:
2065         case MBX_READ_RPI64:
2066         case MBX_REG_LOGIN64:
2067         case MBX_READ_TOPOLOGY:
2068         case MBX_WRITE_WWN:
2069         case MBX_SET_DEBUG:
2070         case MBX_LOAD_EXP_ROM:
2071         case MBX_ASYNCEVT_ENABLE:
2072         case MBX_REG_VPI:
2073         case MBX_UNREG_VPI:
2074         case MBX_HEARTBEAT:
2075         case MBX_PORT_CAPABILITIES:
2076         case MBX_PORT_IOV_CONTROL:
2077         case MBX_SLI4_CONFIG:
2078         case MBX_SLI4_REQ_FTRS:
2079         case MBX_REG_FCFI:
2080         case MBX_UNREG_FCFI:
2081         case MBX_REG_VFI:
2082         case MBX_UNREG_VFI:
2083         case MBX_INIT_VPI:
2084         case MBX_INIT_VFI:
2085         case MBX_RESUME_RPI:
2086         case MBX_READ_EVENT_LOG_STATUS:
2087         case MBX_READ_EVENT_LOG:
2088         case MBX_SECURITY_MGMT:
2089         case MBX_AUTH_PORT:
2090         case MBX_ACCESS_VDATA:
2091                 ret = mbxCommand;
2092                 break;
2093         default:
2094                 ret = MBX_SHUTDOWN;
2095                 break;
2096         }
2097         return ret;
2098 }
2099
2100 /**
2101  * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2102  * @phba: Pointer to HBA context object.
2103  * @pmboxq: Pointer to mailbox command.
2104  *
2105  * This is completion handler function for mailbox commands issued from
2106  * lpfc_sli_issue_mbox_wait function. This function is called by the
2107  * mailbox event handler function with no lock held. This function
2108  * will wake up thread waiting on the wait queue pointed by context1
2109  * of the mailbox.
2110  **/
2111 void
2112 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2113 {
2114         wait_queue_head_t *pdone_q;
2115         unsigned long drvr_flag;
2116
2117         /*
2118          * If pdone_q is empty, the driver thread gave up waiting and
2119          * continued running.
2120          */
2121         pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2122         spin_lock_irqsave(&phba->hbalock, drvr_flag);
2123         pdone_q = (wait_queue_head_t *) pmboxq->context1;
2124         if (pdone_q)
2125                 wake_up_interruptible(pdone_q);
2126         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2127         return;
2128 }
2129
2130
2131 /**
2132  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2133  * @phba: Pointer to HBA context object.
2134  * @pmb: Pointer to mailbox object.
2135  *
2136  * This function is the default mailbox completion handler. It
2137  * frees the memory resources associated with the completed mailbox
2138  * command. If the completed command is a REG_LOGIN mailbox command,
2139  * this function will issue a UREG_LOGIN to re-claim the RPI.
2140  **/
2141 void
2142 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2143 {
2144         struct lpfc_vport  *vport = pmb->vport;
2145         struct lpfc_dmabuf *mp;
2146         struct lpfc_nodelist *ndlp;
2147         struct Scsi_Host *shost;
2148         uint16_t rpi, vpi;
2149         int rc;
2150
2151         mp = (struct lpfc_dmabuf *) (pmb->context1);
2152
2153         if (mp) {
2154                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2155                 kfree(mp);
2156         }
2157
2158         /*
2159          * If a REG_LOGIN succeeded  after node is destroyed or node
2160          * is in re-discovery driver need to cleanup the RPI.
2161          */
2162         if (!(phba->pport->load_flag & FC_UNLOADING) &&
2163             pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2164             !pmb->u.mb.mbxStatus) {
2165                 rpi = pmb->u.mb.un.varWords[0];
2166                 vpi = pmb->u.mb.un.varRegLogin.vpi;
2167                 lpfc_unreg_login(phba, vpi, rpi, pmb);
2168                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2169                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2170                 if (rc != MBX_NOT_FINISHED)
2171                         return;
2172         }
2173
2174         if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2175                 !(phba->pport->load_flag & FC_UNLOADING) &&
2176                 !pmb->u.mb.mbxStatus) {
2177                 shost = lpfc_shost_from_vport(vport);
2178                 spin_lock_irq(shost->host_lock);
2179                 vport->vpi_state |= LPFC_VPI_REGISTERED;
2180                 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2181                 spin_unlock_irq(shost->host_lock);
2182         }
2183
2184         if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2185                 ndlp = (struct lpfc_nodelist *)pmb->context2;
2186                 lpfc_nlp_put(ndlp);
2187                 pmb->context2 = NULL;
2188         }
2189
2190         /* Check security permission status on INIT_LINK mailbox command */
2191         if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2192             (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2193                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2194                                 "2860 SLI authentication is required "
2195                                 "for INIT_LINK but has not done yet\n");
2196
2197         if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2198                 lpfc_sli4_mbox_cmd_free(phba, pmb);
2199         else
2200                 mempool_free(pmb, phba->mbox_mem_pool);
2201 }
2202
2203 /**
2204  * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2205  * @phba: Pointer to HBA context object.
2206  *
2207  * This function is called with no lock held. This function processes all
2208  * the completed mailbox commands and gives it to upper layers. The interrupt
2209  * service routine processes mailbox completion interrupt and adds completed
2210  * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2211  * Worker thread call lpfc_sli_handle_mb_event, which will return the
2212  * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2213  * function returns the mailbox commands to the upper layer by calling the
2214  * completion handler function of each mailbox.
2215  **/
2216 int
2217 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2218 {
2219         MAILBOX_t *pmbox;
2220         LPFC_MBOXQ_t *pmb;
2221         int rc;
2222         LIST_HEAD(cmplq);
2223
2224         phba->sli.slistat.mbox_event++;
2225
2226         /* Get all completed mailboxe buffers into the cmplq */
2227         spin_lock_irq(&phba->hbalock);
2228         list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2229         spin_unlock_irq(&phba->hbalock);
2230
2231         /* Get a Mailbox buffer to setup mailbox commands for callback */
2232         do {
2233                 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2234                 if (pmb == NULL)
2235                         break;
2236
2237                 pmbox = &pmb->u.mb;
2238
2239                 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2240                         if (pmb->vport) {
2241                                 lpfc_debugfs_disc_trc(pmb->vport,
2242                                         LPFC_DISC_TRC_MBOX_VPORT,
2243                                         "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2244                                         (uint32_t)pmbox->mbxCommand,
2245                                         pmbox->un.varWords[0],
2246                                         pmbox->un.varWords[1]);
2247                         }
2248                         else {
2249                                 lpfc_debugfs_disc_trc(phba->pport,
2250                                         LPFC_DISC_TRC_MBOX,
2251                                         "MBOX cmpl:       cmd:x%x mb:x%x x%x",
2252                                         (uint32_t)pmbox->mbxCommand,
2253                                         pmbox->un.varWords[0],
2254                                         pmbox->un.varWords[1]);
2255                         }
2256                 }
2257
2258                 /*
2259                  * It is a fatal error if unknown mbox command completion.
2260                  */
2261                 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2262                     MBX_SHUTDOWN) {
2263                         /* Unknown mailbox command compl */
2264                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2265                                         "(%d):0323 Unknown Mailbox command "
2266                                         "x%x (x%x/x%x) Cmpl\n",
2267                                         pmb->vport ? pmb->vport->vpi : 0,
2268                                         pmbox->mbxCommand,
2269                                         lpfc_sli_config_mbox_subsys_get(phba,
2270                                                                         pmb),
2271                                         lpfc_sli_config_mbox_opcode_get(phba,
2272                                                                         pmb));
2273                         phba->link_state = LPFC_HBA_ERROR;
2274                         phba->work_hs = HS_FFER3;
2275                         lpfc_handle_eratt(phba);
2276                         continue;
2277                 }
2278
2279                 if (pmbox->mbxStatus) {
2280                         phba->sli.slistat.mbox_stat_err++;
2281                         if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2282                                 /* Mbox cmd cmpl error - RETRYing */
2283                                 lpfc_printf_log(phba, KERN_INFO,
2284                                         LOG_MBOX | LOG_SLI,
2285                                         "(%d):0305 Mbox cmd cmpl "
2286                                         "error - RETRYing Data: x%x "
2287                                         "(x%x/x%x) x%x x%x x%x\n",
2288                                         pmb->vport ? pmb->vport->vpi : 0,
2289                                         pmbox->mbxCommand,
2290                                         lpfc_sli_config_mbox_subsys_get(phba,
2291                                                                         pmb),
2292                                         lpfc_sli_config_mbox_opcode_get(phba,
2293                                                                         pmb),
2294                                         pmbox->mbxStatus,
2295                                         pmbox->un.varWords[0],
2296                                         pmb->vport->port_state);
2297                                 pmbox->mbxStatus = 0;
2298                                 pmbox->mbxOwner = OWN_HOST;
2299                                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2300                                 if (rc != MBX_NOT_FINISHED)
2301                                         continue;
2302                         }
2303                 }
2304
2305                 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2306                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2307                                 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2308                                 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
2309                                 pmb->vport ? pmb->vport->vpi : 0,
2310                                 pmbox->mbxCommand,
2311                                 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2312                                 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2313                                 pmb->mbox_cmpl,
2314                                 *((uint32_t *) pmbox),
2315                                 pmbox->un.varWords[0],
2316                                 pmbox->un.varWords[1],
2317                                 pmbox->un.varWords[2],
2318                                 pmbox->un.varWords[3],
2319                                 pmbox->un.varWords[4],
2320                                 pmbox->un.varWords[5],
2321                                 pmbox->un.varWords[6],
2322                                 pmbox->un.varWords[7]);
2323
2324                 if (pmb->mbox_cmpl)
2325                         pmb->mbox_cmpl(phba,pmb);
2326         } while (1);
2327         return 0;
2328 }
2329
2330 /**
2331  * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2332  * @phba: Pointer to HBA context object.
2333  * @pring: Pointer to driver SLI ring object.
2334  * @tag: buffer tag.
2335  *
2336  * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2337  * is set in the tag the buffer is posted for a particular exchange,
2338  * the function will return the buffer without replacing the buffer.
2339  * If the buffer is for unsolicited ELS or CT traffic, this function
2340  * returns the buffer and also posts another buffer to the firmware.
2341  **/
2342 static struct lpfc_dmabuf *
2343 lpfc_sli_get_buff(struct lpfc_hba *phba,
2344                   struct lpfc_sli_ring *pring,
2345                   uint32_t tag)
2346 {
2347         struct hbq_dmabuf *hbq_entry;
2348
2349         if (tag & QUE_BUFTAG_BIT)
2350                 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2351         hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2352         if (!hbq_entry)
2353                 return NULL;
2354         return &hbq_entry->dbuf;
2355 }
2356
2357 /**
2358  * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2359  * @phba: Pointer to HBA context object.
2360  * @pring: Pointer to driver SLI ring object.
2361  * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2362  * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2363  * @fch_type: the type for the first frame of the sequence.
2364  *
2365  * This function is called with no lock held. This function uses the r_ctl and
2366  * type of the received sequence to find the correct callback function to call
2367  * to process the sequence.
2368  **/
2369 static int
2370 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2371                          struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2372                          uint32_t fch_type)
2373 {
2374         int i;
2375
2376         /* unSolicited Responses */
2377         if (pring->prt[0].profile) {
2378                 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2379                         (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2380                                                                         saveq);
2381                 return 1;
2382         }
2383         /* We must search, based on rctl / type
2384            for the right routine */
2385         for (i = 0; i < pring->num_mask; i++) {
2386                 if ((pring->prt[i].rctl == fch_r_ctl) &&
2387                     (pring->prt[i].type == fch_type)) {
2388                         if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2389                                 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2390                                                 (phba, pring, saveq);
2391                         return 1;
2392                 }
2393         }
2394         return 0;
2395 }
2396
2397 /**
2398  * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2399  * @phba: Pointer to HBA context object.
2400  * @pring: Pointer to driver SLI ring object.
2401  * @saveq: Pointer to the unsolicited iocb.
2402  *
2403  * This function is called with no lock held by the ring event handler
2404  * when there is an unsolicited iocb posted to the response ring by the
2405  * firmware. This function gets the buffer associated with the iocbs
2406  * and calls the event handler for the ring. This function handles both
2407  * qring buffers and hbq buffers.
2408  * When the function returns 1 the caller can free the iocb object otherwise
2409  * upper layer functions will free the iocb objects.
2410  **/
2411 static int
2412 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2413                             struct lpfc_iocbq *saveq)
2414 {
2415         IOCB_t           * irsp;
2416         WORD5            * w5p;
2417         uint32_t           Rctl, Type;
2418         uint32_t           match;
2419         struct lpfc_iocbq *iocbq;
2420         struct lpfc_dmabuf *dmzbuf;
2421
2422         match = 0;
2423         irsp = &(saveq->iocb);
2424
2425         if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2426                 if (pring->lpfc_sli_rcv_async_status)
2427                         pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2428                 else
2429                         lpfc_printf_log(phba,
2430                                         KERN_WARNING,
2431                                         LOG_SLI,
2432                                         "0316 Ring %d handler: unexpected "
2433                                         "ASYNC_STATUS iocb received evt_code "
2434                                         "0x%x\n",
2435                                         pring->ringno,
2436                                         irsp->un.asyncstat.evt_code);
2437                 return 1;
2438         }
2439
2440         if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2441                 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2442                 if (irsp->ulpBdeCount > 0) {
2443                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2444                                         irsp->un.ulpWord[3]);
2445                         lpfc_in_buf_free(phba, dmzbuf);
2446                 }
2447
2448                 if (irsp->ulpBdeCount > 1) {
2449                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2450                                         irsp->unsli3.sli3Words[3]);
2451                         lpfc_in_buf_free(phba, dmzbuf);
2452                 }
2453
2454                 if (irsp->ulpBdeCount > 2) {
2455                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2456                                 irsp->unsli3.sli3Words[7]);
2457                         lpfc_in_buf_free(phba, dmzbuf);
2458                 }
2459
2460                 return 1;
2461         }
2462
2463         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2464                 if (irsp->ulpBdeCount != 0) {
2465                         saveq->context2 = lpfc_sli_get_buff(phba, pring,
2466                                                 irsp->un.ulpWord[3]);
2467                         if (!saveq->context2)
2468                                 lpfc_printf_log(phba,
2469                                         KERN_ERR,
2470                                         LOG_SLI,
2471                                         "0341 Ring %d Cannot find buffer for "
2472                                         "an unsolicited iocb. tag 0x%x\n",
2473                                         pring->ringno,
2474                                         irsp->un.ulpWord[3]);
2475                 }
2476                 if (irsp->ulpBdeCount == 2) {
2477                         saveq->context3 = lpfc_sli_get_buff(phba, pring,
2478                                                 irsp->unsli3.sli3Words[7]);
2479                         if (!saveq->context3)
2480                                 lpfc_printf_log(phba,
2481                                         KERN_ERR,
2482                                         LOG_SLI,
2483                                         "0342 Ring %d Cannot find buffer for an"
2484                                         " unsolicited iocb. tag 0x%x\n",
2485                                         pring->ringno,
2486                                         irsp->unsli3.sli3Words[7]);
2487                 }
2488                 list_for_each_entry(iocbq, &saveq->list, list) {
2489                         irsp = &(iocbq->iocb);
2490                         if (irsp->ulpBdeCount != 0) {
2491                                 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2492                                                         irsp->un.ulpWord[3]);
2493                                 if (!iocbq->context2)
2494                                         lpfc_printf_log(phba,
2495                                                 KERN_ERR,
2496                                                 LOG_SLI,
2497                                                 "0343 Ring %d Cannot find "
2498                                                 "buffer for an unsolicited iocb"
2499                                                 ". tag 0x%x\n", pring->ringno,
2500                                                 irsp->un.ulpWord[3]);
2501                         }
2502                         if (irsp->ulpBdeCount == 2) {
2503                                 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2504                                                 irsp->unsli3.sli3Words[7]);
2505                                 if (!iocbq->context3)
2506                                         lpfc_printf_log(phba,
2507                                                 KERN_ERR,
2508                                                 LOG_SLI,
2509                                                 "0344 Ring %d Cannot find "
2510                                                 "buffer for an unsolicited "
2511                                                 "iocb. tag 0x%x\n",
2512                                                 pring->ringno,
2513                                                 irsp->unsli3.sli3Words[7]);
2514                         }
2515                 }
2516         }
2517         if (irsp->ulpBdeCount != 0 &&
2518             (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2519              irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2520                 int found = 0;
2521
2522                 /* search continue save q for same XRI */
2523                 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2524                         if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2525                                 saveq->iocb.unsli3.rcvsli3.ox_id) {
2526                                 list_add_tail(&saveq->list, &iocbq->list);
2527                                 found = 1;
2528                                 break;
2529                         }
2530                 }
2531                 if (!found)
2532                         list_add_tail(&saveq->clist,
2533                                       &pring->iocb_continue_saveq);
2534                 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2535                         list_del_init(&iocbq->clist);
2536                         saveq = iocbq;
2537                         irsp = &(saveq->iocb);
2538                 } else
2539                         return 0;
2540         }
2541         if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2542             (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2543             (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2544                 Rctl = FC_RCTL_ELS_REQ;
2545                 Type = FC_TYPE_ELS;
2546         } else {
2547                 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2548                 Rctl = w5p->hcsw.Rctl;
2549                 Type = w5p->hcsw.Type;
2550
2551                 /* Firmware Workaround */
2552                 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2553                         (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2554                          irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2555                         Rctl = FC_RCTL_ELS_REQ;
2556                         Type = FC_TYPE_ELS;
2557                         w5p->hcsw.Rctl = Rctl;
2558                         w5p->hcsw.Type = Type;
2559                 }
2560         }
2561
2562         if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2563                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2564                                 "0313 Ring %d handler: unexpected Rctl x%x "
2565                                 "Type x%x received\n",
2566                                 pring->ringno, Rctl, Type);
2567
2568         return 1;
2569 }
2570
2571 /**
2572  * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2573  * @phba: Pointer to HBA context object.
2574  * @pring: Pointer to driver SLI ring object.
2575  * @prspiocb: Pointer to response iocb object.
2576  *
2577  * This function looks up the iocb_lookup table to get the command iocb
2578  * corresponding to the given response iocb using the iotag of the
2579  * response iocb. This function is called with the hbalock held.
2580  * This function returns the command iocb object if it finds the command
2581  * iocb else returns NULL.
2582  **/
2583 static struct lpfc_iocbq *
2584 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2585                       struct lpfc_sli_ring *pring,
2586                       struct lpfc_iocbq *prspiocb)
2587 {
2588         struct lpfc_iocbq *cmd_iocb = NULL;
2589         uint16_t iotag;
2590
2591         iotag = prspiocb->iocb.ulpIoTag;
2592
2593         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2594                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2595                 list_del_init(&cmd_iocb->list);
2596                 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2597                         pring->txcmplq_cnt--;
2598                         cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2599                 }
2600                 return cmd_iocb;
2601         }
2602
2603         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2604                         "0317 iotag x%x is out off "
2605                         "range: max iotag x%x wd0 x%x\n",
2606                         iotag, phba->sli.last_iotag,
2607                         *(((uint32_t *) &prspiocb->iocb) + 7));
2608         return NULL;
2609 }
2610
2611 /**
2612  * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2613  * @phba: Pointer to HBA context object.
2614  * @pring: Pointer to driver SLI ring object.
2615  * @iotag: IOCB tag.
2616  *
2617  * This function looks up the iocb_lookup table to get the command iocb
2618  * corresponding to the given iotag. This function is called with the
2619  * hbalock held.
2620  * This function returns the command iocb object if it finds the command
2621  * iocb else returns NULL.
2622  **/
2623 static struct lpfc_iocbq *
2624 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2625                              struct lpfc_sli_ring *pring, uint16_t iotag)
2626 {
2627         struct lpfc_iocbq *cmd_iocb;
2628
2629         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2630                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2631                 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2632                         /* remove from txcmpl queue list */
2633                         list_del_init(&cmd_iocb->list);
2634                         cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2635                         pring->txcmplq_cnt--;
2636                         return cmd_iocb;
2637                 }
2638         }
2639         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2640                         "0372 iotag x%x is out off range: max iotag (x%x)\n",
2641                         iotag, phba->sli.last_iotag);
2642         return NULL;
2643 }
2644
2645 /**
2646  * lpfc_sli_process_sol_iocb - process solicited iocb completion
2647  * @phba: Pointer to HBA context object.
2648  * @pring: Pointer to driver SLI ring object.
2649  * @saveq: Pointer to the response iocb to be processed.
2650  *
2651  * This function is called by the ring event handler for non-fcp
2652  * rings when there is a new response iocb in the response ring.
2653  * The caller is not required to hold any locks. This function
2654  * gets the command iocb associated with the response iocb and
2655  * calls the completion handler for the command iocb. If there
2656  * is no completion handler, the function will free the resources
2657  * associated with command iocb. If the response iocb is for
2658  * an already aborted command iocb, the status of the completion
2659  * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2660  * This function always returns 1.
2661  **/
2662 static int
2663 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2664                           struct lpfc_iocbq *saveq)
2665 {
2666         struct lpfc_iocbq *cmdiocbp;
2667         int rc = 1;
2668         unsigned long iflag;
2669
2670         /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2671         spin_lock_irqsave(&phba->hbalock, iflag);
2672         cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2673         spin_unlock_irqrestore(&phba->hbalock, iflag);
2674
2675         if (cmdiocbp) {
2676                 if (cmdiocbp->iocb_cmpl) {
2677                         /*
2678                          * If an ELS command failed send an event to mgmt
2679                          * application.
2680                          */
2681                         if (saveq->iocb.ulpStatus &&
2682                              (pring->ringno == LPFC_ELS_RING) &&
2683                              (cmdiocbp->iocb.ulpCommand ==
2684                                 CMD_ELS_REQUEST64_CR))
2685                                 lpfc_send_els_failure_event(phba,
2686                                         cmdiocbp, saveq);
2687
2688                         /*
2689                          * Post all ELS completions to the worker thread.
2690                          * All other are passed to the completion callback.
2691                          */
2692                         if (pring->ringno == LPFC_ELS_RING) {
2693                                 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2694                                     (cmdiocbp->iocb_flag &
2695                                                         LPFC_DRIVER_ABORTED)) {
2696                                         spin_lock_irqsave(&phba->hbalock,
2697                                                           iflag);
2698                                         cmdiocbp->iocb_flag &=
2699                                                 ~LPFC_DRIVER_ABORTED;
2700                                         spin_unlock_irqrestore(&phba->hbalock,
2701                                                                iflag);
2702                                         saveq->iocb.ulpStatus =
2703                                                 IOSTAT_LOCAL_REJECT;
2704                                         saveq->iocb.un.ulpWord[4] =
2705                                                 IOERR_SLI_ABORTED;
2706
2707                                         /* Firmware could still be in progress
2708                                          * of DMAing payload, so don't free data
2709                                          * buffer till after a hbeat.
2710                                          */
2711                                         spin_lock_irqsave(&phba->hbalock,
2712                                                           iflag);
2713                                         saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2714                                         spin_unlock_irqrestore(&phba->hbalock,
2715                                                                iflag);
2716                                 }
2717                                 if (phba->sli_rev == LPFC_SLI_REV4) {
2718                                         if (saveq->iocb_flag &
2719                                             LPFC_EXCHANGE_BUSY) {
2720                                                 /* Set cmdiocb flag for the
2721                                                  * exchange busy so sgl (xri)
2722                                                  * will not be released until
2723                                                  * the abort xri is received
2724                                                  * from hba.
2725                                                  */
2726                                                 spin_lock_irqsave(
2727                                                         &phba->hbalock, iflag);
2728                                                 cmdiocbp->iocb_flag |=
2729                                                         LPFC_EXCHANGE_BUSY;
2730                                                 spin_unlock_irqrestore(
2731                                                         &phba->hbalock, iflag);
2732                                         }
2733                                         if (cmdiocbp->iocb_flag &
2734                                             LPFC_DRIVER_ABORTED) {
2735                                                 /*
2736                                                  * Clear LPFC_DRIVER_ABORTED
2737                                                  * bit in case it was driver
2738                                                  * initiated abort.
2739                                                  */
2740                                                 spin_lock_irqsave(
2741                                                         &phba->hbalock, iflag);
2742                                                 cmdiocbp->iocb_flag &=
2743                                                         ~LPFC_DRIVER_ABORTED;
2744                                                 spin_unlock_irqrestore(
2745                                                         &phba->hbalock, iflag);
2746                                                 cmdiocbp->iocb.ulpStatus =
2747                                                         IOSTAT_LOCAL_REJECT;
2748                                                 cmdiocbp->iocb.un.ulpWord[4] =
2749                                                         IOERR_ABORT_REQUESTED;
2750                                                 /*
2751                                                  * For SLI4, irsiocb contains
2752                                                  * NO_XRI in sli_xritag, it
2753                                                  * shall not affect releasing
2754                                                  * sgl (xri) process.
2755                                                  */
2756                                                 saveq->iocb.ulpStatus =
2757                                                         IOSTAT_LOCAL_REJECT;
2758                                                 saveq->iocb.un.ulpWord[4] =
2759                                                         IOERR_SLI_ABORTED;
2760                                                 spin_lock_irqsave(
2761                                                         &phba->hbalock, iflag);
2762                                                 saveq->iocb_flag |=
2763                                                         LPFC_DELAY_MEM_FREE;
2764                                                 spin_unlock_irqrestore(
2765                                                         &phba->hbalock, iflag);
2766                                         }
2767                                 }
2768                         }
2769                         (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
2770                 } else
2771                         lpfc_sli_release_iocbq(phba, cmdiocbp);
2772         } else {
2773                 /*
2774                  * Unknown initiating command based on the response iotag.
2775                  * This could be the case on the ELS ring because of
2776                  * lpfc_els_abort().
2777                  */
2778                 if (pring->ringno != LPFC_ELS_RING) {
2779                         /*
2780                          * Ring <ringno> handler: unexpected completion IoTag
2781                          * <IoTag>
2782                          */
2783                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2784                                          "0322 Ring %d handler: "
2785                                          "unexpected completion IoTag x%x "
2786                                          "Data: x%x x%x x%x x%x\n",
2787                                          pring->ringno,
2788                                          saveq->iocb.ulpIoTag,
2789                                          saveq->iocb.ulpStatus,
2790                                          saveq->iocb.un.ulpWord[4],
2791                                          saveq->iocb.ulpCommand,
2792                                          saveq->iocb.ulpContext);
2793                 }
2794         }
2795
2796         return rc;
2797 }
2798
2799 /**
2800  * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
2801  * @phba: Pointer to HBA context object.
2802  * @pring: Pointer to driver SLI ring object.
2803  *
2804  * This function is called from the iocb ring event handlers when
2805  * put pointer is ahead of the get pointer for a ring. This function signal
2806  * an error attention condition to the worker thread and the worker
2807  * thread will transition the HBA to offline state.
2808  **/
2809 static void
2810 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2811 {
2812         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2813         /*
2814          * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2815          * rsp ring <portRspMax>
2816          */
2817         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2818                         "0312 Ring %d handler: portRspPut %d "
2819                         "is bigger than rsp ring %d\n",
2820                         pring->ringno, le32_to_cpu(pgp->rspPutInx),
2821                         pring->sli.sli3.numRiocb);
2822
2823         phba->link_state = LPFC_HBA_ERROR;
2824
2825         /*
2826          * All error attention handlers are posted to
2827          * worker thread
2828          */
2829         phba->work_ha |= HA_ERATT;
2830         phba->work_hs = HS_FFER3;
2831
2832         lpfc_worker_wake_up(phba);
2833
2834         return;
2835 }
2836
2837 /**
2838  * lpfc_poll_eratt - Error attention polling timer timeout handler
2839  * @ptr: Pointer to address of HBA context object.
2840  *
2841  * This function is invoked by the Error Attention polling timer when the
2842  * timer times out. It will check the SLI Error Attention register for
2843  * possible attention events. If so, it will post an Error Attention event
2844  * and wake up worker thread to process it. Otherwise, it will set up the
2845  * Error Attention polling timer for the next poll.
2846  **/
2847 void lpfc_poll_eratt(unsigned long ptr)
2848 {
2849         struct lpfc_hba *phba;
2850         uint32_t eratt = 0, rem;
2851         uint64_t sli_intr, cnt;
2852
2853         phba = (struct lpfc_hba *)ptr;
2854
2855         /* Here we will also keep track of interrupts per sec of the hba */
2856         sli_intr = phba->sli.slistat.sli_intr;
2857
2858         if (phba->sli.slistat.sli_prev_intr > sli_intr)
2859                 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
2860                         sli_intr);
2861         else
2862                 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
2863
2864         /* 64-bit integer division not supporte on 32-bit x86 - use do_div */
2865         rem = do_div(cnt, LPFC_ERATT_POLL_INTERVAL);
2866         phba->sli.slistat.sli_ips = cnt;
2867
2868         phba->sli.slistat.sli_prev_intr = sli_intr;
2869
2870         /* Check chip HA register for error event */
2871         eratt = lpfc_sli_check_eratt(phba);
2872
2873         if (eratt)
2874                 /* Tell the worker thread there is work to do */
2875                 lpfc_worker_wake_up(phba);
2876         else
2877                 /* Restart the timer for next eratt poll */
2878                 mod_timer(&phba->eratt_poll, jiffies +
2879                                         HZ * LPFC_ERATT_POLL_INTERVAL);
2880         return;
2881 }
2882
2883
2884 /**
2885  * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
2886  * @phba: Pointer to HBA context object.
2887  * @pring: Pointer to driver SLI ring object.
2888  * @mask: Host attention register mask for this ring.
2889  *
2890  * This function is called from the interrupt context when there is a ring
2891  * event for the fcp ring. The caller does not hold any lock.
2892  * The function processes each response iocb in the response ring until it
2893  * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
2894  * LE bit set. The function will call the completion handler of the command iocb
2895  * if the response iocb indicates a completion for a command iocb or it is
2896  * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2897  * function if this is an unsolicited iocb.
2898  * This routine presumes LPFC_FCP_RING handling and doesn't bother
2899  * to check it explicitly.
2900  */
2901 int
2902 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2903                                 struct lpfc_sli_ring *pring, uint32_t mask)
2904 {
2905         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2906         IOCB_t *irsp = NULL;
2907         IOCB_t *entry = NULL;
2908         struct lpfc_iocbq *cmdiocbq = NULL;
2909         struct lpfc_iocbq rspiocbq;
2910         uint32_t status;
2911         uint32_t portRspPut, portRspMax;
2912         int rc = 1;
2913         lpfc_iocb_type type;
2914         unsigned long iflag;
2915         uint32_t rsp_cmpl = 0;
2916
2917         spin_lock_irqsave(&phba->hbalock, iflag);
2918         pring->stats.iocb_event++;
2919
2920         /*
2921          * The next available response entry should never exceed the maximum
2922          * entries.  If it does, treat it as an adapter hardware error.
2923          */
2924         portRspMax = pring->sli.sli3.numRiocb;
2925         portRspPut = le32_to_cpu(pgp->rspPutInx);
2926         if (unlikely(portRspPut >= portRspMax)) {
2927                 lpfc_sli_rsp_pointers_error(phba, pring);
2928                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2929                 return 1;
2930         }
2931         if (phba->fcp_ring_in_use) {
2932                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2933                 return 1;
2934         } else
2935                 phba->fcp_ring_in_use = 1;
2936
2937         rmb();
2938         while (pring->sli.sli3.rspidx != portRspPut) {
2939                 /*
2940                  * Fetch an entry off the ring and copy it into a local data
2941                  * structure.  The copy involves a byte-swap since the
2942                  * network byte order and pci byte orders are different.
2943                  */
2944                 entry = lpfc_resp_iocb(phba, pring);
2945                 phba->last_completion_time = jiffies;
2946
2947                 if (++pring->sli.sli3.rspidx >= portRspMax)
2948                         pring->sli.sli3.rspidx = 0;
2949
2950                 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2951                                       (uint32_t *) &rspiocbq.iocb,
2952                                       phba->iocb_rsp_size);
2953                 INIT_LIST_HEAD(&(rspiocbq.list));
2954                 irsp = &rspiocbq.iocb;
2955
2956                 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2957                 pring->stats.iocb_rsp++;
2958                 rsp_cmpl++;
2959
2960                 if (unlikely(irsp->ulpStatus)) {
2961                         /*
2962                          * If resource errors reported from HBA, reduce
2963                          * queuedepths of the SCSI device.
2964                          */
2965                         if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2966                             ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
2967                              IOERR_NO_RESOURCES)) {
2968                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2969                                 phba->lpfc_rampdown_queue_depth(phba);
2970                                 spin_lock_irqsave(&phba->hbalock, iflag);
2971                         }
2972
2973                         /* Rsp ring <ringno> error: IOCB */
2974                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2975                                         "0336 Rsp Ring %d error: IOCB Data: "
2976                                         "x%x x%x x%x x%x x%x x%x x%x x%x\n",
2977                                         pring->ringno,
2978                                         irsp->un.ulpWord[0],
2979                                         irsp->un.ulpWord[1],
2980                                         irsp->un.ulpWord[2],
2981                                         irsp->un.ulpWord[3],
2982                                         irsp->un.ulpWord[4],
2983                                         irsp->un.ulpWord[5],
2984                                         *(uint32_t *)&irsp->un1,
2985                                         *((uint32_t *)&irsp->un1 + 1));
2986                 }
2987
2988                 switch (type) {
2989                 case LPFC_ABORT_IOCB:
2990                 case LPFC_SOL_IOCB:
2991                         /*
2992                          * Idle exchange closed via ABTS from port.  No iocb
2993                          * resources need to be recovered.
2994                          */
2995                         if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
2996                                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2997                                                 "0333 IOCB cmd 0x%x"
2998                                                 " processed. Skipping"
2999                                                 " completion\n",
3000                                                 irsp->ulpCommand);
3001                                 break;
3002                         }
3003
3004                         cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3005                                                          &rspiocbq);
3006                         if (unlikely(!cmdiocbq))
3007                                 break;
3008                         if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3009                                 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3010                         if (cmdiocbq->iocb_cmpl) {
3011                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3012                                 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3013                                                       &rspiocbq);
3014                                 spin_lock_irqsave(&phba->hbalock, iflag);
3015                         }
3016                         break;
3017                 case LPFC_UNSOL_IOCB:
3018                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3019                         lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3020                         spin_lock_irqsave(&phba->hbalock, iflag);
3021                         break;
3022                 default:
3023                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3024                                 char adaptermsg[LPFC_MAX_ADPTMSG];
3025                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3026                                 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3027                                        MAX_MSG_DATA);
3028                                 dev_warn(&((phba->pcidev)->dev),
3029                                          "lpfc%d: %s\n",
3030                                          phba->brd_no, adaptermsg);
3031                         } else {
3032                                 /* Unknown IOCB command */
3033                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3034                                                 "0334 Unknown IOCB command "
3035                                                 "Data: x%x, x%x x%x x%x x%x\n",
3036                                                 type, irsp->ulpCommand,
3037                                                 irsp->ulpStatus,
3038                                                 irsp->ulpIoTag,
3039                                                 irsp->ulpContext);
3040                         }
3041                         break;
3042                 }
3043
3044                 /*
3045                  * The response IOCB has been processed.  Update the ring
3046                  * pointer in SLIM.  If the port response put pointer has not
3047                  * been updated, sync the pgp->rspPutInx and fetch the new port
3048                  * response put pointer.
3049                  */
3050                 writel(pring->sli.sli3.rspidx,
3051                         &phba->host_gp[pring->ringno].rspGetInx);
3052
3053                 if (pring->sli.sli3.rspidx == portRspPut)
3054                         portRspPut = le32_to_cpu(pgp->rspPutInx);
3055         }
3056
3057         if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3058                 pring->stats.iocb_rsp_full++;
3059                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3060                 writel(status, phba->CAregaddr);
3061                 readl(phba->CAregaddr);
3062         }
3063         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3064                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3065                 pring->stats.iocb_cmd_empty++;
3066
3067                 /* Force update of the local copy of cmdGetInx */
3068                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3069                 lpfc_sli_resume_iocb(phba, pring);
3070
3071                 if ((pring->lpfc_sli_cmd_available))
3072                         (pring->lpfc_sli_cmd_available) (phba, pring);
3073
3074         }
3075
3076         phba->fcp_ring_in_use = 0;
3077         spin_unlock_irqrestore(&phba->hbalock, iflag);
3078         return rc;
3079 }
3080
3081 /**
3082  * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3083  * @phba: Pointer to HBA context object.
3084  * @pring: Pointer to driver SLI ring object.
3085  * @rspiocbp: Pointer to driver response IOCB object.
3086  *
3087  * This function is called from the worker thread when there is a slow-path
3088  * response IOCB to process. This function chains all the response iocbs until
3089  * seeing the iocb with the LE bit set. The function will call
3090  * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3091  * completion of a command iocb. The function will call the
3092  * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3093  * The function frees the resources or calls the completion handler if this
3094  * iocb is an abort completion. The function returns NULL when the response
3095  * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3096  * this function shall chain the iocb on to the iocb_continueq and return the
3097  * response iocb passed in.
3098  **/
3099 static struct lpfc_iocbq *
3100 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3101                         struct lpfc_iocbq *rspiocbp)
3102 {
3103         struct lpfc_iocbq *saveq;
3104         struct lpfc_iocbq *cmdiocbp;
3105         struct lpfc_iocbq *next_iocb;
3106         IOCB_t *irsp = NULL;
3107         uint32_t free_saveq;
3108         uint8_t iocb_cmd_type;
3109         lpfc_iocb_type type;
3110         unsigned long iflag;
3111         int rc;
3112
3113         spin_lock_irqsave(&phba->hbalock, iflag);
3114         /* First add the response iocb to the countinueq list */
3115         list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3116         pring->iocb_continueq_cnt++;
3117
3118         /* Now, determine whether the list is completed for processing */
3119         irsp = &rspiocbp->iocb;
3120         if (irsp->ulpLe) {
3121                 /*
3122                  * By default, the driver expects to free all resources
3123                  * associated with this iocb completion.
3124                  */
3125                 free_saveq = 1;
3126                 saveq = list_get_first(&pring->iocb_continueq,
3127                                        struct lpfc_iocbq, list);
3128                 irsp = &(saveq->iocb);
3129                 list_del_init(&pring->iocb_continueq);
3130                 pring->iocb_continueq_cnt = 0;
3131
3132                 pring->stats.iocb_rsp++;
3133
3134                 /*
3135                  * If resource errors reported from HBA, reduce
3136                  * queuedepths of the SCSI device.
3137                  */
3138                 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3139                     ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3140                      IOERR_NO_RESOURCES)) {
3141                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3142                         phba->lpfc_rampdown_queue_depth(phba);
3143                         spin_lock_irqsave(&phba->hbalock, iflag);
3144                 }
3145
3146                 if (irsp->ulpStatus) {
3147                         /* Rsp ring <ringno> error: IOCB */
3148                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3149                                         "0328 Rsp Ring %d error: "
3150                                         "IOCB Data: "
3151                                         "x%x x%x x%x x%x "
3152                                         "x%x x%x x%x x%x "
3153                                         "x%x x%x x%x x%x "
3154                                         "x%x x%x x%x x%x\n",
3155                                         pring->ringno,
3156                                         irsp->un.ulpWord[0],
3157                                         irsp->un.ulpWord[1],
3158                                         irsp->un.ulpWord[2],
3159                                         irsp->un.ulpWord[3],
3160                                         irsp->un.ulpWord[4],
3161                                         irsp->un.ulpWord[5],
3162                                         *(((uint32_t *) irsp) + 6),
3163                                         *(((uint32_t *) irsp) + 7),
3164                                         *(((uint32_t *) irsp) + 8),
3165                                         *(((uint32_t *) irsp) + 9),
3166                                         *(((uint32_t *) irsp) + 10),
3167                                         *(((uint32_t *) irsp) + 11),
3168                                         *(((uint32_t *) irsp) + 12),
3169                                         *(((uint32_t *) irsp) + 13),
3170                                         *(((uint32_t *) irsp) + 14),
3171                                         *(((uint32_t *) irsp) + 15));
3172                 }
3173
3174                 /*
3175                  * Fetch the IOCB command type and call the correct completion
3176                  * routine. Solicited and Unsolicited IOCBs on the ELS ring
3177                  * get freed back to the lpfc_iocb_list by the discovery
3178                  * kernel thread.
3179                  */
3180                 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3181                 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3182                 switch (type) {
3183                 case LPFC_SOL_IOCB:
3184                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3185                         rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3186                         spin_lock_irqsave(&phba->hbalock, iflag);
3187                         break;
3188
3189                 case LPFC_UNSOL_IOCB:
3190                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3191                         rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3192                         spin_lock_irqsave(&phba->hbalock, iflag);
3193                         if (!rc)
3194                                 free_saveq = 0;
3195                         break;
3196
3197                 case LPFC_ABORT_IOCB:
3198                         cmdiocbp = NULL;
3199                         if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3200                                 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3201                                                                  saveq);
3202                         if (cmdiocbp) {
3203                                 /* Call the specified completion routine */
3204                                 if (cmdiocbp->iocb_cmpl) {
3205                                         spin_unlock_irqrestore(&phba->hbalock,
3206                                                                iflag);
3207                                         (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3208                                                               saveq);
3209                                         spin_lock_irqsave(&phba->hbalock,
3210                                                           iflag);
3211                                 } else
3212                                         __lpfc_sli_release_iocbq(phba,
3213                                                                  cmdiocbp);
3214                         }
3215                         break;
3216
3217                 case LPFC_UNKNOWN_IOCB:
3218                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3219                                 char adaptermsg[LPFC_MAX_ADPTMSG];
3220                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3221                                 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3222                                        MAX_MSG_DATA);
3223                                 dev_warn(&((phba->pcidev)->dev),
3224                                          "lpfc%d: %s\n",
3225                                          phba->brd_no, adaptermsg);
3226                         } else {
3227                                 /* Unknown IOCB command */
3228                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3229                                                 "0335 Unknown IOCB "
3230                                                 "command Data: x%x "
3231                                                 "x%x x%x x%x\n",
3232                                                 irsp->ulpCommand,
3233                                                 irsp->ulpStatus,
3234                                                 irsp->ulpIoTag,
3235                                                 irsp->ulpContext);
3236                         }
3237                         break;
3238                 }
3239
3240                 if (free_saveq) {
3241                         list_for_each_entry_safe(rspiocbp, next_iocb,
3242                                                  &saveq->list, list) {
3243                                 list_del(&rspiocbp->list);
3244                                 __lpfc_sli_release_iocbq(phba, rspiocbp);
3245                         }
3246                         __lpfc_sli_release_iocbq(phba, saveq);
3247                 }
3248                 rspiocbp = NULL;
3249         }
3250         spin_unlock_irqrestore(&phba->hbalock, iflag);
3251         return rspiocbp;
3252 }
3253
3254 /**
3255  * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3256  * @phba: Pointer to HBA context object.
3257  * @pring: Pointer to driver SLI ring object.
3258  * @mask: Host attention register mask for this ring.
3259  *
3260  * This routine wraps the actual slow_ring event process routine from the
3261  * API jump table function pointer from the lpfc_hba struct.
3262  **/
3263 void
3264 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3265                                 struct lpfc_sli_ring *pring, uint32_t mask)
3266 {
3267         phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3268 }
3269
3270 /**
3271  * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3272  * @phba: Pointer to HBA context object.
3273  * @pring: Pointer to driver SLI ring object.
3274  * @mask: Host attention register mask for this ring.
3275  *
3276  * This function is called from the worker thread when there is a ring event
3277  * for non-fcp rings. The caller does not hold any lock. The function will
3278  * remove each response iocb in the response ring and calls the handle
3279  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3280  **/
3281 static void
3282 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3283                                    struct lpfc_sli_ring *pring, uint32_t mask)
3284 {
3285         struct lpfc_pgp *pgp;
3286         IOCB_t *entry;
3287         IOCB_t *irsp = NULL;
3288         struct lpfc_iocbq *rspiocbp = NULL;
3289         uint32_t portRspPut, portRspMax;
3290         unsigned long iflag;
3291         uint32_t status;
3292
3293         pgp = &phba->port_gp[pring->ringno];
3294         spin_lock_irqsave(&phba->hbalock, iflag);
3295         pring->stats.iocb_event++;
3296
3297         /*
3298          * The next available response entry should never exceed the maximum
3299          * entries.  If it does, treat it as an adapter hardware error.
3300          */
3301         portRspMax = pring->sli.sli3.numRiocb;
3302         portRspPut = le32_to_cpu(pgp->rspPutInx);
3303         if (portRspPut >= portRspMax) {
3304                 /*
3305                  * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3306                  * rsp ring <portRspMax>
3307                  */
3308                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3309                                 "0303 Ring %d handler: portRspPut %d "
3310                                 "is bigger than rsp ring %d\n",
3311                                 pring->ringno, portRspPut, portRspMax);
3312
3313                 phba->link_state = LPFC_HBA_ERROR;
3314                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3315
3316                 phba->work_hs = HS_FFER3;
3317                 lpfc_handle_eratt(phba);
3318
3319                 return;
3320         }
3321
3322         rmb();
3323         while (pring->sli.sli3.rspidx != portRspPut) {
3324                 /*
3325                  * Build a completion list and call the appropriate handler.
3326                  * The process is to get the next available response iocb, get
3327                  * a free iocb from the list, copy the response data into the
3328                  * free iocb, insert to the continuation list, and update the
3329                  * next response index to slim.  This process makes response
3330                  * iocb's in the ring available to DMA as fast as possible but
3331                  * pays a penalty for a copy operation.  Since the iocb is
3332                  * only 32 bytes, this penalty is considered small relative to
3333                  * the PCI reads for register values and a slim write.  When
3334                  * the ulpLe field is set, the entire Command has been
3335                  * received.
3336                  */
3337                 entry = lpfc_resp_iocb(phba, pring);
3338
3339                 phba->last_completion_time = jiffies;
3340                 rspiocbp = __lpfc_sli_get_iocbq(phba);
3341                 if (rspiocbp == NULL) {
3342                         printk(KERN_ERR "%s: out of buffers! Failing "
3343                                "completion.\n", __func__);
3344                         break;
3345                 }
3346
3347                 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3348                                       phba->iocb_rsp_size);
3349                 irsp = &rspiocbp->iocb;
3350
3351                 if (++pring->sli.sli3.rspidx >= portRspMax)
3352                         pring->sli.sli3.rspidx = 0;
3353
3354                 if (pring->ringno == LPFC_ELS_RING) {
3355                         lpfc_debugfs_slow_ring_trc(phba,
3356                         "IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
3357                                 *(((uint32_t *) irsp) + 4),
3358                                 *(((uint32_t *) irsp) + 6),
3359                                 *(((uint32_t *) irsp) + 7));
3360                 }
3361
3362                 writel(pring->sli.sli3.rspidx,
3363                         &phba->host_gp[pring->ringno].rspGetInx);
3364
3365                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3366                 /* Handle the response IOCB */
3367                 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3368                 spin_lock_irqsave(&phba->hbalock, iflag);
3369
3370                 /*
3371                  * If the port response put pointer has not been updated, sync
3372                  * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3373                  * response put pointer.
3374                  */
3375                 if (pring->sli.sli3.rspidx == portRspPut) {
3376                         portRspPut = le32_to_cpu(pgp->rspPutInx);
3377                 }
3378         } /* while (pring->sli.sli3.rspidx != portRspPut) */
3379
3380         if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3381                 /* At least one response entry has been freed */
3382                 pring->stats.iocb_rsp_full++;
3383                 /* SET RxRE_RSP in Chip Att register */
3384                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3385                 writel(status, phba->CAregaddr);
3386                 readl(phba->CAregaddr); /* flush */
3387         }
3388         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3389                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3390                 pring->stats.iocb_cmd_empty++;
3391
3392                 /* Force update of the local copy of cmdGetInx */
3393                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3394                 lpfc_sli_resume_iocb(phba, pring);
3395
3396                 if ((pring->lpfc_sli_cmd_available))
3397                         (pring->lpfc_sli_cmd_available) (phba, pring);
3398
3399         }
3400
3401         spin_unlock_irqrestore(&phba->hbalock, iflag);
3402         return;
3403 }
3404
3405 /**
3406  * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3407  * @phba: Pointer to HBA context object.
3408  * @pring: Pointer to driver SLI ring object.
3409  * @mask: Host attention register mask for this ring.
3410  *
3411  * This function is called from the worker thread when there is a pending
3412  * ELS response iocb on the driver internal slow-path response iocb worker
3413  * queue. The caller does not hold any lock. The function will remove each
3414  * response iocb from the response worker queue and calls the handle
3415  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3416  **/
3417 static void
3418 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3419                                    struct lpfc_sli_ring *pring, uint32_t mask)
3420 {
3421         struct lpfc_iocbq *irspiocbq;
3422         struct hbq_dmabuf *dmabuf;
3423         struct lpfc_cq_event *cq_event;
3424         unsigned long iflag;
3425
3426         spin_lock_irqsave(&phba->hbalock, iflag);
3427         phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3428         spin_unlock_irqrestore(&phba->hbalock, iflag);
3429         while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3430                 /* Get the response iocb from the head of work queue */
3431                 spin_lock_irqsave(&phba->hbalock, iflag);
3432                 list_remove_head(&phba->sli4_hba.sp_queue_event,
3433                                  cq_event, struct lpfc_cq_event, list);
3434                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3435
3436                 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3437                 case CQE_CODE_COMPL_WQE:
3438                         irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3439                                                  cq_event);
3440                         /* Translate ELS WCQE to response IOCBQ */
3441                         irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3442                                                                    irspiocbq);
3443                         if (irspiocbq)
3444                                 lpfc_sli_sp_handle_rspiocb(phba, pring,
3445                                                            irspiocbq);
3446                         break;
3447                 case CQE_CODE_RECEIVE:
3448                 case CQE_CODE_RECEIVE_V1:
3449                         dmabuf = container_of(cq_event, struct hbq_dmabuf,
3450                                               cq_event);
3451                         lpfc_sli4_handle_received_buffer(phba, dmabuf);
3452                         break;
3453                 default:
3454                         break;
3455                 }
3456         }
3457 }
3458
3459 /**
3460  * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3461  * @phba: Pointer to HBA context object.
3462  * @pring: Pointer to driver SLI ring object.
3463  *
3464  * This function aborts all iocbs in the given ring and frees all the iocb
3465  * objects in txq. This function issues an abort iocb for all the iocb commands
3466  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3467  * the return of this function. The caller is not required to hold any locks.
3468  **/
3469 void
3470 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3471 {
3472         LIST_HEAD(completions);
3473         struct lpfc_iocbq *iocb, *next_iocb;
3474
3475         if (pring->ringno == LPFC_ELS_RING) {
3476                 lpfc_fabric_abort_hba(phba);
3477         }
3478
3479         /* Error everything on txq and txcmplq
3480          * First do the txq.
3481          */
3482         spin_lock_irq(&phba->hbalock);
3483         list_splice_init(&pring->txq, &completions);
3484         pring->txq_cnt = 0;
3485
3486         /* Next issue ABTS for everything on the txcmplq */
3487         list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3488                 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3489
3490         spin_unlock_irq(&phba->hbalock);
3491
3492         /* Cancel all the IOCBs from the completions list */
3493         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3494                               IOERR_SLI_ABORTED);
3495 }
3496
3497 /**
3498  * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
3499  * @phba: Pointer to HBA context object.
3500  *
3501  * This function flushes all iocbs in the fcp ring and frees all the iocb
3502  * objects in txq and txcmplq. This function will not issue abort iocbs
3503  * for all the iocb commands in txcmplq, they will just be returned with
3504  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3505  * slot has been permanently disabled.
3506  **/
3507 void
3508 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3509 {
3510         LIST_HEAD(txq);
3511         LIST_HEAD(txcmplq);
3512         struct lpfc_sli *psli = &phba->sli;
3513         struct lpfc_sli_ring  *pring;
3514
3515         /* Currently, only one fcp ring */
3516         pring = &psli->ring[psli->fcp_ring];
3517
3518         spin_lock_irq(&phba->hbalock);
3519         /* Retrieve everything on txq */
3520         list_splice_init(&pring->txq, &txq);
3521         pring->txq_cnt = 0;
3522
3523         /* Retrieve everything on the txcmplq */
3524         list_splice_init(&pring->txcmplq, &txcmplq);
3525         pring->txcmplq_cnt = 0;
3526
3527         /* Indicate the I/O queues are flushed */
3528         phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
3529         spin_unlock_irq(&phba->hbalock);
3530
3531         /* Flush the txq */
3532         lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3533                               IOERR_SLI_DOWN);
3534
3535         /* Flush the txcmpq */
3536         lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3537                               IOERR_SLI_DOWN);
3538 }
3539
3540 /**
3541  * lpfc_sli_brdready_s3 - Check for sli3 host ready status
3542  * @phba: Pointer to HBA context object.
3543  * @mask: Bit mask to be checked.
3544  *
3545  * This function reads the host status register and compares
3546  * with the provided bit mask to check if HBA completed
3547  * the restart. This function will wait in a loop for the
3548  * HBA to complete restart. If the HBA does not restart within
3549  * 15 iterations, the function will reset the HBA again. The
3550  * function returns 1 when HBA fail to restart otherwise returns
3551  * zero.
3552  **/
3553 static int
3554 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3555 {
3556         uint32_t status;
3557         int i = 0;
3558         int retval = 0;
3559
3560         /* Read the HBA Host Status Register */
3561         if (lpfc_readl(phba->HSregaddr, &status))
3562                 return 1;
3563
3564         /*
3565          * Check status register every 100ms for 5 retries, then every
3566          * 500ms for 5, then every 2.5 sec for 5, then reset board and
3567          * every 2.5 sec for 4.
3568          * Break our of the loop if errors occurred during init.
3569          */
3570         while (((status & mask) != mask) &&
3571                !(status & HS_FFERM) &&
3572                i++ < 20) {
3573
3574                 if (i <= 5)
3575                         msleep(10);
3576                 else if (i <= 10)
3577                         msleep(500);
3578                 else
3579                         msleep(2500);
3580
3581                 if (i == 15) {
3582                                 /* Do post */
3583                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3584                         lpfc_sli_brdrestart(phba);
3585                 }
3586                 /* Read the HBA Host Status Register */
3587                 if (lpfc_readl(phba->HSregaddr, &status)) {
3588                         retval = 1;
3589                         break;
3590                 }
3591         }
3592
3593         /* Check to see if any errors occurred during init */
3594         if ((status & HS_FFERM) || (i >= 20)) {
3595                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3596                                 "2751 Adapter failed to restart, "
3597                                 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3598                                 status,
3599                                 readl(phba->MBslimaddr + 0xa8),
3600                                 readl(phba->MBslimaddr + 0xac));
3601                 phba->link_state = LPFC_HBA_ERROR;
3602                 retval = 1;
3603         }
3604
3605         return retval;
3606 }
3607
3608 /**
3609  * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3610  * @phba: Pointer to HBA context object.
3611  * @mask: Bit mask to be checked.
3612  *
3613  * This function checks the host status register to check if HBA is
3614  * ready. This function will wait in a loop for the HBA to be ready
3615  * If the HBA is not ready , the function will will reset the HBA PCI
3616  * function again. The function returns 1 when HBA fail to be ready
3617  * otherwise returns zero.
3618  **/
3619 static int
3620 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3621 {
3622         uint32_t status;
3623         int retval = 0;
3624
3625         /* Read the HBA Host Status Register */
3626         status = lpfc_sli4_post_status_check(phba);
3627
3628         if (status) {
3629                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3630                 lpfc_sli_brdrestart(phba);
3631                 status = lpfc_sli4_post_status_check(phba);
3632         }
3633
3634         /* Check to see if any errors occurred during init */
3635         if (status) {
3636                 phba->link_state = LPFC_HBA_ERROR;
3637                 retval = 1;
3638         } else
3639                 phba->sli4_hba.intr_enable = 0;
3640
3641         return retval;
3642 }
3643
3644 /**
3645  * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3646  * @phba: Pointer to HBA context object.
3647  * @mask: Bit mask to be checked.
3648  *
3649  * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3650  * from the API jump table function pointer from the lpfc_hba struct.
3651  **/
3652 int
3653 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3654 {
3655         return phba->lpfc_sli_brdready(phba, mask);
3656 }
3657
3658 #define BARRIER_TEST_PATTERN (0xdeadbeef)
3659
3660 /**
3661  * lpfc_reset_barrier - Make HBA ready for HBA reset
3662  * @phba: Pointer to HBA context object.
3663  *
3664  * This function is called before resetting an HBA. This function is called
3665  * with hbalock held and requests HBA to quiesce DMAs before a reset.
3666  **/
3667 void lpfc_reset_barrier(struct lpfc_hba *phba)
3668 {
3669         uint32_t __iomem *resp_buf;
3670         uint32_t __iomem *mbox_buf;
3671         volatile uint32_t mbox;
3672         uint32_t hc_copy, ha_copy, resp_data;
3673         int  i;
3674         uint8_t hdrtype;
3675
3676         pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
3677         if (hdrtype != 0x80 ||
3678             (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
3679              FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
3680                 return;
3681
3682         /*
3683          * Tell the other part of the chip to suspend temporarily all
3684          * its DMA activity.
3685          */
3686         resp_buf = phba->MBslimaddr;
3687
3688         /* Disable the error attention */
3689         if (lpfc_readl(phba->HCregaddr, &hc_copy))
3690                 return;
3691         writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
3692         readl(phba->HCregaddr); /* flush */
3693         phba->link_flag |= LS_IGNORE_ERATT;
3694
3695         if (lpfc_readl(phba->HAregaddr, &ha_copy))
3696                 return;
3697         if (ha_copy & HA_ERATT) {
3698                 /* Clear Chip error bit */
3699                 writel(HA_ERATT, phba->HAregaddr);
3700                 phba->pport->stopped = 1;
3701         }
3702
3703         mbox = 0;
3704         ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
3705         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
3706
3707         writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
3708         mbox_buf = phba->MBslimaddr;
3709         writel(mbox, mbox_buf);
3710
3711         for (i = 0; i < 50; i++) {
3712                 if (lpfc_readl((resp_buf + 1), &resp_data))
3713                         return;
3714                 if (resp_data != ~(BARRIER_TEST_PATTERN))
3715                         mdelay(1);
3716                 else
3717                         break;
3718         }
3719         resp_data = 0;
3720         if (lpfc_readl((resp_buf + 1), &resp_data))
3721                 return;
3722         if (resp_data  != ~(BARRIER_TEST_PATTERN)) {
3723                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
3724                     phba->pport->stopped)
3725                         goto restore_hc;
3726                 else
3727                         goto clear_errat;
3728         }
3729
3730         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
3731         resp_data = 0;
3732         for (i = 0; i < 500; i++) {
3733                 if (lpfc_readl(resp_buf, &resp_data))
3734                         return;
3735                 if (resp_data != mbox)
3736                         mdelay(1);
3737                 else
3738                         break;
3739         }
3740
3741 clear_errat:
3742
3743         while (++i < 500) {
3744                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3745                         return;
3746                 if (!(ha_copy & HA_ERATT))
3747                         mdelay(1);
3748                 else
3749                         break;
3750         }
3751
3752         if (readl(phba->HAregaddr) & HA_ERATT) {
3753                 writel(HA_ERATT, phba->HAregaddr);
3754                 phba->pport->stopped = 1;
3755         }
3756
3757 restore_hc:
3758         phba->link_flag &= ~LS_IGNORE_ERATT;
3759         writel(hc_copy, phba->HCregaddr);
3760         readl(phba->HCregaddr); /* flush */
3761 }
3762
3763 /**
3764  * lpfc_sli_brdkill - Issue a kill_board mailbox command
3765  * @phba: Pointer to HBA context object.
3766  *
3767  * This function issues a kill_board mailbox command and waits for
3768  * the error attention interrupt. This function is called for stopping
3769  * the firmware processing. The caller is not required to hold any
3770  * locks. This function calls lpfc_hba_down_post function to free
3771  * any pending commands after the kill. The function will return 1 when it
3772  * fails to kill the board else will return 0.
3773  **/
3774 int
3775 lpfc_sli_brdkill(struct lpfc_hba *phba)
3776 {
3777         struct lpfc_sli *psli;
3778         LPFC_MBOXQ_t *pmb;
3779         uint32_t status;
3780         uint32_t ha_copy;
3781         int retval;
3782         int i = 0;
3783
3784         psli = &phba->sli;
3785
3786         /* Kill HBA */
3787         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3788                         "0329 Kill HBA Data: x%x x%x\n",
3789                         phba->pport->port_state, psli->sli_flag);
3790
3791         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3792         if (!pmb)
3793                 return 1;
3794
3795         /* Disable the error attention */
3796         spin_lock_irq(&phba->hbalock);
3797         if (lpfc_readl(phba->HCregaddr, &status)) {
3798                 spin_unlock_irq(&phba->hbalock);
3799                 mempool_free(pmb, phba->mbox_mem_pool);
3800                 return 1;
3801         }
3802         status &= ~HC_ERINT_ENA;
3803         writel(status, phba->HCregaddr);
3804         readl(phba->HCregaddr); /* flush */
3805         phba->link_flag |= LS_IGNORE_ERATT;
3806         spin_unlock_irq(&phba->hbalock);
3807
3808         lpfc_kill_board(phba, pmb);
3809         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3810         retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3811
3812         if (retval != MBX_SUCCESS) {
3813                 if (retval != MBX_BUSY)
3814                         mempool_free(pmb, phba->mbox_mem_pool);
3815                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3816                                 "2752 KILL_BOARD command failed retval %d\n",
3817                                 retval);
3818                 spin_lock_irq(&phba->hbalock);
3819                 phba->link_flag &= ~LS_IGNORE_ERATT;
3820                 spin_unlock_irq(&phba->hbalock);
3821                 return 1;
3822         }
3823
3824         spin_lock_irq(&phba->hbalock);
3825         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3826         spin_unlock_irq(&phba->hbalock);
3827
3828         mempool_free(pmb, phba->mbox_mem_pool);
3829
3830         /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
3831          * attention every 100ms for 3 seconds. If we don't get ERATT after
3832          * 3 seconds we still set HBA_ERROR state because the status of the
3833          * board is now undefined.
3834          */
3835         if (lpfc_readl(phba->HAregaddr, &ha_copy))
3836                 return 1;
3837         while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
3838                 mdelay(100);
3839                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3840                         return 1;
3841         }
3842
3843         del_timer_sync(&psli->mbox_tmo);
3844         if (ha_copy & HA_ERATT) {
3845                 writel(HA_ERATT, phba->HAregaddr);
3846                 phba->pport->stopped = 1;
3847         }
3848         spin_lock_irq(&phba->hbalock);
3849         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3850         psli->mbox_active = NULL;
3851         phba->link_flag &= ~LS_IGNORE_ERATT;
3852         spin_unlock_irq(&phba->hbalock);
3853
3854         lpfc_hba_down_post(phba);
3855         phba->link_state = LPFC_HBA_ERROR;
3856
3857         return ha_copy & HA_ERATT ? 0 : 1;
3858 }
3859
3860 /**
3861  * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
3862  * @phba: Pointer to HBA context object.
3863  *
3864  * This function resets the HBA by writing HC_INITFF to the control
3865  * register. After the HBA resets, this function resets all the iocb ring
3866  * indices. This function disables PCI layer parity checking during
3867  * the reset.
3868  * This function returns 0 always.
3869  * The caller is not required to hold any locks.
3870  **/
3871 int
3872 lpfc_sli_brdreset(struct lpfc_hba *phba)
3873 {
3874         struct lpfc_sli *psli;
3875         struct lpfc_sli_ring *pring;
3876         uint16_t cfg_value;
3877         int i;
3878
3879         psli = &phba->sli;
3880
3881         /* Reset HBA */
3882         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3883                         "0325 Reset HBA Data: x%x x%x\n",
3884                         phba->pport->port_state, psli->sli_flag);
3885
3886         /* perform board reset */
3887         phba->fc_eventTag = 0;
3888         phba->link_events = 0;
3889         phba->pport->fc_myDID = 0;
3890         phba->pport->fc_prevDID = 0;
3891
3892         /* Turn off parity checking and serr during the physical reset */
3893         pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3894         pci_write_config_word(phba->pcidev, PCI_COMMAND,
3895                               (cfg_value &
3896                                ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3897
3898         psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
3899
3900         /* Now toggle INITFF bit in the Host Control Register */
3901         writel(HC_INITFF, phba->HCregaddr);
3902         mdelay(1);
3903         readl(phba->HCregaddr); /* flush */
3904         writel(0, phba->HCregaddr);
3905         readl(phba->HCregaddr); /* flush */
3906
3907         /* Restore PCI cmd register */
3908         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
3909
3910         /* Initialize relevant SLI info */
3911         for (i = 0; i < psli->num_rings; i++) {
3912                 pring = &psli->ring[i];
3913                 pring->flag = 0;
3914                 pring->sli.sli3.rspidx = 0;
3915                 pring->sli.sli3.next_cmdidx  = 0;
3916                 pring->sli.sli3.local_getidx = 0;
3917                 pring->sli.sli3.cmdidx = 0;
3918                 pring->missbufcnt = 0;
3919         }
3920
3921         phba->link_state = LPFC_WARM_START;
3922         return 0;
3923 }
3924
3925 /**
3926  * lpfc_sli4_brdreset - Reset a sli-4 HBA
3927  * @phba: Pointer to HBA context object.
3928  *
3929  * This function resets a SLI4 HBA. This function disables PCI layer parity
3930  * checking during resets the device. The caller is not required to hold
3931  * any locks.
3932  *
3933  * This function returns 0 always.
3934  **/
3935 int
3936 lpfc_sli4_brdreset(struct lpfc_hba *phba)
3937 {
3938         struct lpfc_sli *psli = &phba->sli;
3939         uint16_t cfg_value;
3940         int rc;
3941
3942         /* Reset HBA */
3943         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3944                         "0295 Reset HBA Data: x%x x%x\n",
3945                         phba->pport->port_state, psli->sli_flag);
3946
3947         /* perform board reset */
3948         phba->fc_eventTag = 0;
3949         phba->link_events = 0;
3950         phba->pport->fc_myDID = 0;
3951         phba->pport->fc_prevDID = 0;
3952
3953         spin_lock_irq(&phba->hbalock);
3954         psli->sli_flag &= ~(LPFC_PROCESS_LA);
3955         phba->fcf.fcf_flag = 0;
3956         spin_unlock_irq(&phba->hbalock);
3957
3958         /* Now physically reset the device */
3959         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3960                         "0389 Performing PCI function reset!\n");
3961
3962         /* Turn off parity checking and serr during the physical reset */
3963         pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3964         pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
3965                               ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3966
3967         /* Perform FCoE PCI function reset before freeing queue memory */
3968         rc = lpfc_pci_function_reset(phba);
3969         lpfc_sli4_queue_destroy(phba);
3970
3971         /* Restore PCI cmd register */
3972         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
3973
3974         return rc;
3975 }
3976
3977 /**
3978  * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
3979  * @phba: Pointer to HBA context object.
3980  *
3981  * This function is called in the SLI initialization code path to
3982  * restart the HBA. The caller is not required to hold any lock.
3983  * This function writes MBX_RESTART mailbox command to the SLIM and
3984  * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
3985  * function to free any pending commands. The function enables
3986  * POST only during the first initialization. The function returns zero.
3987  * The function does not guarantee completion of MBX_RESTART mailbox
3988  * command before the return of this function.
3989  **/
3990 static int
3991 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
3992 {
3993         MAILBOX_t *mb;
3994         struct lpfc_sli *psli;
3995         volatile uint32_t word0;
3996         void __iomem *to_slim;
3997         uint32_t hba_aer_enabled;
3998
3999         spin_lock_irq(&phba->hbalock);
4000
4001         /* Take PCIe device Advanced Error Reporting (AER) state */
4002         hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4003
4004         psli = &phba->sli;
4005
4006         /* Restart HBA */
4007         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4008                         "0337 Restart HBA Data: x%x x%x\n",
4009                         phba->pport->port_state, psli->sli_flag);
4010
4011         word0 = 0;
4012         mb = (MAILBOX_t *) &word0;
4013         mb->mbxCommand = MBX_RESTART;
4014         mb->mbxHc = 1;
4015
4016         lpfc_reset_barrier(phba);
4017
4018         to_slim = phba->MBslimaddr;
4019         writel(*(uint32_t *) mb, to_slim);
4020         readl(to_slim); /* flush */
4021
4022         /* Only skip post after fc_ffinit is completed */
4023         if (phba->pport->port_state)
4024                 word0 = 1;      /* This is really setting up word1 */
4025         else
4026                 word0 = 0;      /* This is really setting up word1 */
4027         to_slim = phba->MBslimaddr + sizeof (uint32_t);
4028         writel(*(uint32_t *) mb, to_slim);
4029         readl(to_slim); /* flush */
4030
4031         lpfc_sli_brdreset(phba);
4032         phba->pport->stopped = 0;
4033         phba->link_state = LPFC_INIT_START;
4034         phba->hba_flag = 0;
4035         spin_unlock_irq(&phba->hbalock);
4036
4037         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4038         psli->stats_start = get_seconds();
4039
4040         /* Give the INITFF and Post time to settle. */
4041         mdelay(100);
4042
4043         /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4044         if (hba_aer_enabled)
4045                 pci_disable_pcie_error_reporting(phba->pcidev);
4046
4047         lpfc_hba_down_post(phba);
4048
4049         return 0;
4050 }
4051
4052 /**
4053  * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4054  * @phba: Pointer to HBA context object.
4055  *
4056  * This function is called in the SLI initialization code path to restart
4057  * a SLI4 HBA. The caller is not required to hold any lock.
4058  * At the end of the function, it calls lpfc_hba_down_post function to
4059  * free any pending commands.
4060  **/
4061 static int
4062 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4063 {
4064         struct lpfc_sli *psli = &phba->sli;
4065         uint32_t hba_aer_enabled;
4066         int rc;
4067
4068         /* Restart HBA */
4069         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4070                         "0296 Restart HBA Data: x%x x%x\n",
4071                         phba->pport->port_state, psli->sli_flag);
4072
4073         /* Take PCIe device Advanced Error Reporting (AER) state */
4074         hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4075
4076         rc = lpfc_sli4_brdreset(phba);
4077
4078         spin_lock_irq(&phba->hbalock);
4079         phba->pport->stopped = 0;
4080         phba->link_state = LPFC_INIT_START;
4081         phba->hba_flag = 0;
4082         spin_unlock_irq(&phba->hbalock);
4083
4084         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4085         psli->stats_start = get_seconds();
4086
4087         /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4088         if (hba_aer_enabled)
4089                 pci_disable_pcie_error_reporting(phba->pcidev);
4090
4091         lpfc_hba_down_post(phba);
4092
4093         return rc;
4094 }
4095
4096 /**
4097  * lpfc_sli_brdrestart - Wrapper func for restarting hba
4098  * @phba: Pointer to HBA context object.
4099  *
4100  * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4101  * API jump table function pointer from the lpfc_hba struct.
4102 **/
4103 int
4104 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4105 {
4106         return phba->lpfc_sli_brdrestart(phba);
4107 }
4108
4109 /**
4110  * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4111  * @phba: Pointer to HBA context object.
4112  *
4113  * This function is called after a HBA restart to wait for successful
4114  * restart of the HBA. Successful restart of the HBA is indicated by
4115  * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4116  * iteration, the function will restart the HBA again. The function returns
4117  * zero if HBA successfully restarted else returns negative error code.
4118  **/
4119 static int
4120 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4121 {
4122         uint32_t status, i = 0;
4123
4124         /* Read the HBA Host Status Register */
4125         if (lpfc_readl(phba->HSregaddr, &status))
4126                 return -EIO;
4127
4128         /* Check status register to see what current state is */
4129         i = 0;
4130         while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4131
4132                 /* Check every 10ms for 10 retries, then every 100ms for 90
4133                  * retries, then every 1 sec for 50 retires for a total of
4134                  * ~60 seconds before reset the board again and check every
4135                  * 1 sec for 50 retries. The up to 60 seconds before the
4136                  * board ready is required by the Falcon FIPS zeroization
4137                  * complete, and any reset the board in between shall cause
4138                  * restart of zeroization, further delay the board ready.
4139                  */
4140                 if (i++ >= 200) {
4141                         /* Adapter failed to init, timeout, status reg
4142                            <status> */
4143                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4144                                         "0436 Adapter failed to init, "
4145                                         "timeout, status reg x%x, "
4146                                         "FW Data: A8 x%x AC x%x\n", status,
4147                                         readl(phba->MBslimaddr + 0xa8),
4148                                         readl(phba->MBslimaddr + 0xac));
4149                         phba->link_state = LPFC_HBA_ERROR;
4150                         return -ETIMEDOUT;
4151                 }
4152
4153                 /* Check to see if any errors occurred during init */
4154                 if (status & HS_FFERM) {
4155                         /* ERROR: During chipset initialization */
4156                         /* Adapter failed to init, chipset, status reg
4157                            <status> */
4158                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4159                                         "0437 Adapter failed to init, "
4160                                         "chipset, status reg x%x, "
4161                                         "FW Data: A8 x%x AC x%x\n", status,
4162                                         readl(phba->MBslimaddr + 0xa8),
4163                                         readl(phba->MBslimaddr + 0xac));
4164                         phba->link_state = LPFC_HBA_ERROR;
4165                         return -EIO;
4166                 }
4167
4168                 if (i <= 10)
4169                         msleep(10);
4170                 else if (i <= 100)
4171                         msleep(100);
4172                 else
4173                         msleep(1000);
4174
4175                 if (i == 150) {
4176                         /* Do post */
4177                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4178                         lpfc_sli_brdrestart(phba);
4179                 }
4180                 /* Read the HBA Host Status Register */
4181                 if (lpfc_readl(phba->HSregaddr, &status))
4182                         return -EIO;
4183         }
4184
4185         /* Check to see if any errors occurred during init */
4186         if (status & HS_FFERM) {
4187                 /* ERROR: During chipset initialization */
4188                 /* Adapter failed to init, chipset, status reg <status> */
4189                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4190                                 "0438 Adapter failed to init, chipset, "
4191                                 "status reg x%x, "
4192                                 "FW Data: A8 x%x AC x%x\n", status,
4193                                 readl(phba->MBslimaddr + 0xa8),
4194                                 readl(phba->MBslimaddr + 0xac));
4195                 phba->link_state = LPFC_HBA_ERROR;
4196                 return -EIO;
4197         }
4198
4199         /* Clear all interrupt enable conditions */
4200         writel(0, phba->HCregaddr);
4201         readl(phba->HCregaddr); /* flush */
4202
4203         /* setup host attn register */
4204         writel(0xffffffff, phba->HAregaddr);
4205         readl(phba->HAregaddr); /* flush */
4206         return 0;
4207 }
4208
4209 /**
4210  * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4211  *
4212  * This function calculates and returns the number of HBQs required to be
4213  * configured.
4214  **/
4215 int
4216 lpfc_sli_hbq_count(void)
4217 {
4218         return ARRAY_SIZE(lpfc_hbq_defs);
4219 }
4220
4221 /**
4222  * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4223  *
4224  * This function adds the number of hbq entries in every HBQ to get
4225  * the total number of hbq entries required for the HBA and returns
4226  * the total count.
4227  **/
4228 static int
4229 lpfc_sli_hbq_entry_count(void)
4230 {
4231         int  hbq_count = lpfc_sli_hbq_count();
4232         int  count = 0;
4233         int  i;
4234
4235         for (i = 0; i < hbq_count; ++i)
4236                 count += lpfc_hbq_defs[i]->entry_count;
4237         return count;
4238 }
4239
4240 /**
4241  * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4242  *
4243  * This function calculates amount of memory required for all hbq entries
4244  * to be configured and returns the total memory required.
4245  **/
4246 int
4247 lpfc_sli_hbq_size(void)
4248 {
4249         return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4250 }
4251
4252 /**
4253  * lpfc_sli_hbq_setup - configure and initialize HBQs
4254  * @phba: Pointer to HBA context object.
4255  *
4256  * This function is called during the SLI initialization to configure
4257  * all the HBQs and post buffers to the HBQ. The caller is not
4258  * required to hold any locks. This function will return zero if successful
4259  * else it will return negative error code.
4260  **/
4261 static int
4262 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4263 {
4264         int  hbq_count = lpfc_sli_hbq_count();
4265         LPFC_MBOXQ_t *pmb;
4266         MAILBOX_t *pmbox;
4267         uint32_t hbqno;
4268         uint32_t hbq_entry_index;
4269
4270                                 /* Get a Mailbox buffer to setup mailbox
4271                                  * commands for HBA initialization
4272                                  */
4273         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4274
4275         if (!pmb)
4276                 return -ENOMEM;
4277
4278         pmbox = &pmb->u.mb;
4279
4280         /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4281         phba->link_state = LPFC_INIT_MBX_CMDS;
4282         phba->hbq_in_use = 1;
4283
4284         hbq_entry_index = 0;
4285         for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4286                 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4287                 phba->hbqs[hbqno].hbqPutIdx      = 0;
4288                 phba->hbqs[hbqno].local_hbqGetIdx   = 0;
4289                 phba->hbqs[hbqno].entry_count =
4290                         lpfc_hbq_defs[hbqno]->entry_count;
4291                 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4292                         hbq_entry_index, pmb);
4293                 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4294
4295                 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4296                         /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4297                            mbxStatus <status>, ring <num> */
4298
4299                         lpfc_printf_log(phba, KERN_ERR,
4300                                         LOG_SLI | LOG_VPORT,
4301                                         "1805 Adapter failed to init. "
4302                                         "Data: x%x x%x x%x\n",
4303                                         pmbox->mbxCommand,
4304                                         pmbox->mbxStatus, hbqno);
4305
4306                         phba->link_state = LPFC_HBA_ERROR;
4307                         mempool_free(pmb, phba->mbox_mem_pool);
4308                         return -ENXIO;
4309                 }
4310         }
4311         phba->hbq_count = hbq_count;
4312
4313         mempool_free(pmb, phba->mbox_mem_pool);
4314
4315         /* Initially populate or replenish the HBQs */
4316         for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4317                 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4318         return 0;
4319 }
4320
4321 /**
4322  * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4323  * @phba: Pointer to HBA context object.
4324  *
4325  * This function is called during the SLI initialization to configure
4326  * all the HBQs and post buffers to the HBQ. The caller is not
4327  * required to hold any locks. This function will return zero if successful
4328  * else it will return negative error code.
4329  **/
4330 static int
4331 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4332 {
4333         phba->hbq_in_use = 1;
4334         phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
4335         phba->hbq_count = 1;
4336         /* Initially populate or replenish the HBQs */
4337         lpfc_sli_hbqbuf_init_hbqs(phba, 0);
4338         return 0;
4339 }
4340
4341 /**
4342  * lpfc_sli_config_port - Issue config port mailbox command
4343  * @phba: Pointer to HBA context object.
4344  * @sli_mode: sli mode - 2/3
4345  *
4346  * This function is called by the sli intialization code path
4347  * to issue config_port mailbox command. This function restarts the
4348  * HBA firmware and issues a config_port mailbox command to configure
4349  * the SLI interface in the sli mode specified by sli_mode
4350  * variable. The caller is not required to hold any locks.
4351  * The function returns 0 if successful, else returns negative error
4352  * code.
4353  **/
4354 int
4355 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4356 {
4357         LPFC_MBOXQ_t *pmb;
4358         uint32_t resetcount = 0, rc = 0, done = 0;
4359
4360         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4361         if (!pmb) {
4362                 phba->link_state = LPFC_HBA_ERROR;
4363                 return -ENOMEM;
4364         }
4365
4366         phba->sli_rev = sli_mode;
4367         while (resetcount < 2 && !done) {
4368                 spin_lock_irq(&phba->hbalock);
4369                 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4370                 spin_unlock_irq(&phba->hbalock);
4371                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4372                 lpfc_sli_brdrestart(phba);
4373                 rc = lpfc_sli_chipset_init(phba);
4374                 if (rc)
4375                         break;
4376
4377                 spin_lock_irq(&phba->hbalock);
4378                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4379                 spin_unlock_irq(&phba->hbalock);
4380                 resetcount++;
4381
4382                 /* Call pre CONFIG_PORT mailbox command initialization.  A
4383                  * value of 0 means the call was successful.  Any other
4384                  * nonzero value is a failure, but if ERESTART is returned,
4385                  * the driver may reset the HBA and try again.
4386                  */
4387                 rc = lpfc_config_port_prep(phba);
4388                 if (rc == -ERESTART) {
4389                         phba->link_state = LPFC_LINK_UNKNOWN;
4390                         continue;
4391                 } else if (rc)
4392                         break;
4393
4394                 phba->link_state = LPFC_INIT_MBX_CMDS;
4395                 lpfc_config_port(phba, pmb);
4396                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4397                 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4398                                         LPFC_SLI3_HBQ_ENABLED |
4399                                         LPFC_SLI3_CRP_ENABLED |
4400                                         LPFC_SLI3_BG_ENABLED |
4401                                         LPFC_SLI3_DSS_ENABLED);
4402                 if (rc != MBX_SUCCESS) {
4403                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4404                                 "0442 Adapter failed to init, mbxCmd x%x "
4405                                 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4406                                 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
4407                         spin_lock_irq(&phba->hbalock);
4408                         phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
4409                         spin_unlock_irq(&phba->hbalock);
4410                         rc = -ENXIO;
4411                 } else {
4412                         /* Allow asynchronous mailbox command to go through */
4413                         spin_lock_irq(&phba->hbalock);
4414                         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4415                         spin_unlock_irq(&phba->hbalock);
4416                         done = 1;
4417
4418                         if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4419                             (pmb->u.mb.un.varCfgPort.gasabt == 0))
4420                                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4421                                         "3110 Port did not grant ASABT\n");
4422                 }
4423         }
4424         if (!done) {
4425                 rc = -EINVAL;
4426                 goto do_prep_failed;
4427         }
4428         if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4429                 if (!pmb->u.mb.un.varCfgPort.cMA) {
4430                         rc = -ENXIO;
4431                         goto do_prep_failed;
4432                 }
4433                 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
4434                         phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
4435                         phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4436                         phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4437                                 phba->max_vpi : phba->max_vports;
4438
4439                 } else
4440                         phba->max_vpi = 0;
4441                 phba->fips_level = 0;
4442                 phba->fips_spec_rev = 0;
4443                 if (pmb->u.mb.un.varCfgPort.gdss) {
4444                         phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
4445                         phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
4446                         phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
4447                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4448                                         "2850 Security Crypto Active. FIPS x%d "
4449                                         "(Spec Rev: x%d)",
4450                                         phba->fips_level, phba->fips_spec_rev);
4451                 }
4452                 if (pmb->u.mb.un.varCfgPort.sec_err) {
4453                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4454                                         "2856 Config Port Security Crypto "
4455                                         "Error: x%x ",
4456                                         pmb->u.mb.un.varCfgPort.sec_err);
4457                 }
4458                 if (pmb->u.mb.un.varCfgPort.gerbm)
4459                         phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
4460                 if (pmb->u.mb.un.varCfgPort.gcrp)
4461                         phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
4462
4463                 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
4464                 phba->port_gp = phba->mbox->us.s3_pgp.port;
4465
4466                 if (phba->cfg_enable_bg) {
4467                         if (pmb->u.mb.un.varCfgPort.gbg)
4468                                 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
4469                         else
4470                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4471                                                 "0443 Adapter did not grant "
4472                                                 "BlockGuard\n");
4473                 }
4474         } else {
4475                 phba->hbq_get = NULL;
4476                 phba->port_gp = phba->mbox->us.s2.port;
4477                 phba->max_vpi = 0;
4478         }
4479 do_prep_failed:
4480         mempool_free(pmb, phba->mbox_mem_pool);
4481         return rc;
4482 }
4483
4484
4485 /**
4486  * lpfc_sli_hba_setup - SLI intialization function
4487  * @phba: Pointer to HBA context object.
4488  *
4489  * This function is the main SLI intialization function. This function
4490  * is called by the HBA intialization code, HBA reset code and HBA
4491  * error attention handler code. Caller is not required to hold any
4492  * locks. This function issues config_port mailbox command to configure
4493  * the SLI, setup iocb rings and HBQ rings. In the end the function
4494  * calls the config_port_post function to issue init_link mailbox
4495  * command and to start the discovery. The function will return zero
4496  * if successful, else it will return negative error code.
4497  **/
4498 int
4499 lpfc_sli_hba_setup(struct lpfc_hba *phba)
4500 {
4501         uint32_t rc;
4502         int  mode = 3, i;
4503         int longs;
4504
4505         switch (lpfc_sli_mode) {
4506         case 2:
4507                 if (phba->cfg_enable_npiv) {
4508                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4509                                 "1824 NPIV enabled: Override lpfc_sli_mode "
4510                                 "parameter (%d) to auto (0).\n",
4511                                 lpfc_sli_mode);
4512                         break;
4513                 }
4514                 mode = 2;
4515                 break;
4516         case 0:
4517         case 3:
4518                 break;
4519         default:
4520                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4521                                 "1819 Unrecognized lpfc_sli_mode "
4522                                 "parameter: %d.\n", lpfc_sli_mode);
4523
4524                 break;
4525         }
4526
4527         rc = lpfc_sli_config_port(phba, mode);
4528
4529         if (rc && lpfc_sli_mode == 3)
4530                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4531                                 "1820 Unable to select SLI-3.  "
4532                                 "Not supported by adapter.\n");
4533         if (rc && mode != 2)
4534                 rc = lpfc_sli_config_port(phba, 2);
4535         if (rc)
4536                 goto lpfc_sli_hba_setup_error;
4537
4538         /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4539         if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4540                 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4541                 if (!rc) {
4542                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4543                                         "2709 This device supports "
4544                                         "Advanced Error Reporting (AER)\n");
4545                         spin_lock_irq(&phba->hbalock);
4546                         phba->hba_flag |= HBA_AER_ENABLED;
4547                         spin_unlock_irq(&phba->hbalock);
4548                 } else {
4549                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4550                                         "2708 This device does not support "
4551                                         "Advanced Error Reporting (AER)\n");
4552                         phba->cfg_aer_support = 0;
4553                 }
4554         }
4555
4556         if (phba->sli_rev == 3) {
4557                 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4558                 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
4559         } else {
4560                 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
4561                 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
4562                 phba->sli3_options = 0;
4563         }
4564
4565         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4566                         "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4567                         phba->sli_rev, phba->max_vpi);
4568         rc = lpfc_sli_ring_map(phba);
4569
4570         if (rc)
4571                 goto lpfc_sli_hba_setup_error;
4572
4573         /* Initialize VPIs. */
4574         if (phba->sli_rev == LPFC_SLI_REV3) {
4575                 /*
4576                  * The VPI bitmask and physical ID array are allocated
4577                  * and initialized once only - at driver load.  A port
4578                  * reset doesn't need to reinitialize this memory.
4579                  */
4580                 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
4581                         longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
4582                         phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
4583                                                   GFP_KERNEL);
4584                         if (!phba->vpi_bmask) {
4585                                 rc = -ENOMEM;
4586                                 goto lpfc_sli_hba_setup_error;
4587                         }
4588
4589                         phba->vpi_ids = kzalloc(
4590                                         (phba->max_vpi+1) * sizeof(uint16_t),
4591                                         GFP_KERNEL);
4592                         if (!phba->vpi_ids) {
4593                                 kfree(phba->vpi_bmask);
4594                                 rc = -ENOMEM;
4595                                 goto lpfc_sli_hba_setup_error;
4596                         }
4597                         for (i = 0; i < phba->max_vpi; i++)
4598                                 phba->vpi_ids[i] = i;
4599                 }
4600         }
4601
4602         /* Init HBQs */
4603         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4604                 rc = lpfc_sli_hbq_setup(phba);
4605                 if (rc)
4606                         goto lpfc_sli_hba_setup_error;
4607         }
4608         spin_lock_irq(&phba->hbalock);
4609         phba->sli.sli_flag |= LPFC_PROCESS_LA;
4610         spin_unlock_irq(&phba->hbalock);
4611
4612         rc = lpfc_config_port_post(phba);
4613         if (rc)
4614                 goto lpfc_sli_hba_setup_error;
4615
4616         return rc;
4617
4618 lpfc_sli_hba_setup_error:
4619         phba->link_state = LPFC_HBA_ERROR;
4620         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4621                         "0445 Firmware initialization failed\n");
4622         return rc;
4623 }
4624
4625 /**
4626  * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4627  * @phba: Pointer to HBA context object.
4628  * @mboxq: mailbox pointer.
4629  * This function issue a dump mailbox command to read config region
4630  * 23 and parse the records in the region and populate driver
4631  * data structure.
4632  **/
4633 static int
4634 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
4635 {
4636         LPFC_MBOXQ_t *mboxq;
4637         struct lpfc_dmabuf *mp;
4638         struct lpfc_mqe *mqe;
4639         uint32_t data_length;
4640         int rc;
4641
4642         /* Program the default value of vlan_id and fc_map */
4643         phba->valid_vlan = 0;
4644         phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4645         phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4646         phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4647
4648         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4649         if (!mboxq)
4650                 return -ENOMEM;
4651
4652         mqe = &mboxq->u.mqe;
4653         if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
4654                 rc = -ENOMEM;
4655                 goto out_free_mboxq;
4656         }
4657
4658         mp = (struct lpfc_dmabuf *) mboxq->context1;
4659         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4660
4661         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4662                         "(%d):2571 Mailbox cmd x%x Status x%x "
4663                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4664                         "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4665                         "CQ: x%x x%x x%x x%x\n",
4666                         mboxq->vport ? mboxq->vport->vpi : 0,
4667                         bf_get(lpfc_mqe_command, mqe),
4668                         bf_get(lpfc_mqe_status, mqe),
4669                         mqe->un.mb_words[0], mqe->un.mb_words[1],
4670                         mqe->un.mb_words[2], mqe->un.mb_words[3],
4671                         mqe->un.mb_words[4], mqe->un.mb_words[5],
4672                         mqe->un.mb_words[6], mqe->un.mb_words[7],
4673                         mqe->un.mb_words[8], mqe->un.mb_words[9],
4674                         mqe->un.mb_words[10], mqe->un.mb_words[11],
4675                         mqe->un.mb_words[12], mqe->un.mb_words[13],
4676                         mqe->un.mb_words[14], mqe->un.mb_words[15],
4677                         mqe->un.mb_words[16], mqe->un.mb_words[50],
4678                         mboxq->mcqe.word0,
4679                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
4680                         mboxq->mcqe.trailer);
4681
4682         if (rc) {
4683                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4684                 kfree(mp);
4685                 rc = -EIO;
4686                 goto out_free_mboxq;
4687         }
4688         data_length = mqe->un.mb_words[5];
4689         if (data_length > DMP_RGN23_SIZE) {
4690                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4691                 kfree(mp);
4692                 rc = -EIO;
4693                 goto out_free_mboxq;
4694         }
4695
4696         lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4697         lpfc_mbuf_free(phba, mp->virt, mp->phys);
4698         kfree(mp);
4699         rc = 0;
4700
4701 out_free_mboxq:
4702         mempool_free(mboxq, phba->mbox_mem_pool);
4703         return rc;
4704 }
4705
4706 /**
4707  * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4708  * @phba: pointer to lpfc hba data structure.
4709  * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4710  * @vpd: pointer to the memory to hold resulting port vpd data.
4711  * @vpd_size: On input, the number of bytes allocated to @vpd.
4712  *            On output, the number of data bytes in @vpd.
4713  *
4714  * This routine executes a READ_REV SLI4 mailbox command.  In
4715  * addition, this routine gets the port vpd data.
4716  *
4717  * Return codes
4718  *      0 - successful
4719  *      -ENOMEM - could not allocated memory.
4720  **/
4721 static int
4722 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4723                     uint8_t *vpd, uint32_t *vpd_size)
4724 {
4725         int rc = 0;
4726         uint32_t dma_size;
4727         struct lpfc_dmabuf *dmabuf;
4728         struct lpfc_mqe *mqe;
4729
4730         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4731         if (!dmabuf)
4732                 return -ENOMEM;
4733
4734         /*
4735          * Get a DMA buffer for the vpd data resulting from the READ_REV
4736          * mailbox command.
4737          */
4738         dma_size = *vpd_size;
4739         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4740                                           dma_size,
4741                                           &dmabuf->phys,
4742                                           GFP_KERNEL);
4743         if (!dmabuf->virt) {
4744                 kfree(dmabuf);
4745                 return -ENOMEM;
4746         }
4747         memset(dmabuf->virt, 0, dma_size);
4748
4749         /*
4750          * The SLI4 implementation of READ_REV conflicts at word1,
4751          * bits 31:16 and SLI4 adds vpd functionality not present
4752          * in SLI3.  This code corrects the conflicts.
4753          */
4754         lpfc_read_rev(phba, mboxq);
4755         mqe = &mboxq->u.mqe;
4756         mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4757         mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4758         mqe->un.read_rev.word1 &= 0x0000FFFF;
4759         bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4760         bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4761
4762         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4763         if (rc) {
4764                 dma_free_coherent(&phba->pcidev->dev, dma_size,
4765                                   dmabuf->virt, dmabuf->phys);
4766                 kfree(dmabuf);
4767                 return -EIO;
4768         }
4769
4770         /*
4771          * The available vpd length cannot be bigger than the
4772          * DMA buffer passed to the port.  Catch the less than
4773          * case and update the caller's size.
4774          */
4775         if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4776                 *vpd_size = mqe->un.read_rev.avail_vpd_len;
4777
4778         memcpy(vpd, dmabuf->virt, *vpd_size);
4779
4780         dma_free_coherent(&phba->pcidev->dev, dma_size,
4781                           dmabuf->virt, dmabuf->phys);
4782         kfree(dmabuf);
4783         return 0;
4784 }
4785
4786 /**
4787  * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
4788  * @phba: pointer to lpfc hba data structure.
4789  *
4790  * This routine retrieves SLI4 device physical port name this PCI function
4791  * is attached to.
4792  *
4793  * Return codes
4794  *      0 - successful
4795  *      otherwise - failed to retrieve physical port name
4796  **/
4797 static int
4798 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
4799 {
4800         LPFC_MBOXQ_t *mboxq;
4801         struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
4802         struct lpfc_controller_attribute *cntl_attr;
4803         struct lpfc_mbx_get_port_name *get_port_name;
4804         void *virtaddr = NULL;
4805         uint32_t alloclen, reqlen;
4806         uint32_t shdr_status, shdr_add_status;
4807         union lpfc_sli4_cfg_shdr *shdr;
4808         char cport_name = 0;
4809         int rc;
4810
4811         /* We assume nothing at this point */
4812         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4813         phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
4814
4815         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4816         if (!mboxq)
4817                 return -ENOMEM;
4818         /* obtain link type and link number via READ_CONFIG */
4819         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4820         lpfc_sli4_read_config(phba);
4821         if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
4822                 goto retrieve_ppname;
4823
4824         /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
4825         reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
4826         alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
4827                         LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
4828                         LPFC_SLI4_MBX_NEMBED);
4829         if (alloclen < reqlen) {
4830                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4831                                 "3084 Allocated DMA memory size (%d) is "
4832                                 "less than the requested DMA memory size "
4833                                 "(%d)\n", alloclen, reqlen);
4834                 rc = -ENOMEM;
4835                 goto out_free_mboxq;
4836         }
4837         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4838         virtaddr = mboxq->sge_array->addr[0];
4839         mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
4840         shdr = &mbx_cntl_attr->cfg_shdr;
4841         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
4842         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
4843         if (shdr_status || shdr_add_status || rc) {
4844                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4845                                 "3085 Mailbox x%x (x%x/x%x) failed, "
4846                                 "rc:x%x, status:x%x, add_status:x%x\n",
4847                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4848                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
4849                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
4850                                 rc, shdr_status, shdr_add_status);
4851                 rc = -ENXIO;
4852                 goto out_free_mboxq;
4853         }
4854         cntl_attr = &mbx_cntl_attr->cntl_attr;
4855         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
4856         phba->sli4_hba.lnk_info.lnk_tp =
4857                 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
4858         phba->sli4_hba.lnk_info.lnk_no =
4859                 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
4860         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4861                         "3086 lnk_type:%d, lnk_numb:%d\n",
4862                         phba->sli4_hba.lnk_info.lnk_tp,
4863                         phba->sli4_hba.lnk_info.lnk_no);
4864
4865 retrieve_ppname:
4866         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
4867                 LPFC_MBOX_OPCODE_GET_PORT_NAME,
4868                 sizeof(struct lpfc_mbx_get_port_name) -
4869                 sizeof(struct lpfc_sli4_cfg_mhdr),
4870                 LPFC_SLI4_MBX_EMBED);
4871         get_port_name = &mboxq->u.mqe.un.get_port_name;
4872         shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
4873         bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
4874         bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
4875                 phba->sli4_hba.lnk_info.lnk_tp);
4876         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4877         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
4878         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
4879         if (shdr_status || shdr_add_status || rc) {
4880                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4881                                 "3087 Mailbox x%x (x%x/x%x) failed: "
4882                                 "rc:x%x, status:x%x, add_status:x%x\n",
4883                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4884                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
4885                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
4886                                 rc, shdr_status, shdr_add_status);
4887                 rc = -ENXIO;
4888                 goto out_free_mboxq;
4889         }
4890         switch (phba->sli4_hba.lnk_info.lnk_no) {
4891         case LPFC_LINK_NUMBER_0:
4892                 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
4893                                 &get_port_name->u.response);
4894                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4895                 break;
4896         case LPFC_LINK_NUMBER_1:
4897                 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
4898                                 &get_port_name->u.response);
4899                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4900                 break;
4901         case LPFC_LINK_NUMBER_2:
4902                 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
4903                                 &get_port_name->u.response);
4904                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4905                 break;
4906         case LPFC_LINK_NUMBER_3:
4907                 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
4908                                 &get_port_name->u.response);
4909                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4910                 break;
4911         default:
4912                 break;
4913         }
4914
4915         if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
4916                 phba->Port[0] = cport_name;
4917                 phba->Port[1] = '\0';
4918                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4919                                 "3091 SLI get port name: %s\n", phba->Port);
4920         }
4921
4922 out_free_mboxq:
4923         if (rc != MBX_TIMEOUT) {
4924                 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
4925                         lpfc_sli4_mbox_cmd_free(phba, mboxq);
4926                 else
4927                         mempool_free(mboxq, phba->mbox_mem_pool);
4928         }
4929         return rc;
4930 }
4931
4932 /**
4933  * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
4934  * @phba: pointer to lpfc hba data structure.
4935  *
4936  * This routine is called to explicitly arm the SLI4 device's completion and
4937  * event queues
4938  **/
4939 static void
4940 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4941 {
4942         uint8_t fcp_eqidx;
4943
4944         lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4945         lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4946         fcp_eqidx = 0;
4947         if (phba->sli4_hba.fcp_cq) {
4948                 do {
4949                         lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4950                                              LPFC_QUEUE_REARM);
4951                 } while (++fcp_eqidx < phba->cfg_fcp_io_channel);
4952         }
4953         if (phba->sli4_hba.hba_eq) {
4954                 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
4955                      fcp_eqidx++)
4956                         lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
4957                                              LPFC_QUEUE_REARM);
4958         }
4959 }
4960
4961 /**
4962  * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
4963  * @phba: Pointer to HBA context object.
4964  * @type: The resource extent type.
4965  * @extnt_count: buffer to hold port available extent count.
4966  * @extnt_size: buffer to hold element count per extent.
4967  *
4968  * This function calls the port and retrievs the number of available
4969  * extents and their size for a particular extent type.
4970  *
4971  * Returns: 0 if successful.  Nonzero otherwise.
4972  **/
4973 int
4974 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
4975                                uint16_t *extnt_count, uint16_t *extnt_size)
4976 {
4977         int rc = 0;
4978         uint32_t length;
4979         uint32_t mbox_tmo;
4980         struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
4981         LPFC_MBOXQ_t *mbox;
4982
4983         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4984         if (!mbox)
4985                 return -ENOMEM;
4986
4987         /* Find out how many extents are available for this resource type */
4988         length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
4989                   sizeof(struct lpfc_sli4_cfg_mhdr));
4990         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
4991                          LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
4992                          length, LPFC_SLI4_MBX_EMBED);
4993
4994         /* Send an extents count of 0 - the GET doesn't use it. */
4995         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
4996                                         LPFC_SLI4_MBX_EMBED);
4997         if (unlikely(rc)) {
4998                 rc = -EIO;
4999                 goto err_exit;
5000         }
5001
5002         if (!phba->sli4_hba.intr_enable)
5003                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5004         else {
5005                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5006                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5007         }
5008         if (unlikely(rc)) {
5009                 rc = -EIO;
5010                 goto err_exit;
5011         }
5012
5013         rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5014         if (bf_get(lpfc_mbox_hdr_status,
5015                    &rsrc_info->header.cfg_shdr.response)) {
5016                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5017                                 "2930 Failed to get resource extents "
5018                                 "Status 0x%x Add'l Status 0x%x\n",
5019                                 bf_get(lpfc_mbox_hdr_status,
5020                                        &rsrc_info->header.cfg_shdr.response),
5021                                 bf_get(lpfc_mbox_hdr_add_status,
5022                                        &rsrc_info->header.cfg_shdr.response));
5023                 rc = -EIO;
5024                 goto err_exit;
5025         }
5026
5027         *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5028                               &rsrc_info->u.rsp);
5029         *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5030                              &rsrc_info->u.rsp);
5031
5032         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5033                         "3162 Retrieved extents type-%d from port: count:%d, "
5034                         "size:%d\n", type, *extnt_count, *extnt_size);
5035
5036 err_exit:
5037         mempool_free(mbox, phba->mbox_mem_pool);
5038         return rc;
5039 }
5040
5041 /**
5042  * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5043  * @phba: Pointer to HBA context object.
5044  * @type: The extent type to check.
5045  *
5046  * This function reads the current available extents from the port and checks
5047  * if the extent count or extent size has changed since the last access.
5048  * Callers use this routine post port reset to understand if there is a
5049  * extent reprovisioning requirement.
5050  *
5051  * Returns:
5052  *   -Error: error indicates problem.
5053  *   1: Extent count or size has changed.
5054  *   0: No changes.
5055  **/
5056 static int
5057 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5058 {
5059         uint16_t curr_ext_cnt, rsrc_ext_cnt;
5060         uint16_t size_diff, rsrc_ext_size;
5061         int rc = 0;
5062         struct lpfc_rsrc_blks *rsrc_entry;
5063         struct list_head *rsrc_blk_list = NULL;
5064
5065         size_diff = 0;
5066         curr_ext_cnt = 0;
5067         rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5068                                             &rsrc_ext_cnt,
5069                                             &rsrc_ext_size);
5070         if (unlikely(rc))
5071                 return -EIO;
5072
5073         switch (type) {
5074         case LPFC_RSC_TYPE_FCOE_RPI:
5075                 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5076                 break;
5077         case LPFC_RSC_TYPE_FCOE_VPI:
5078                 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5079                 break;
5080         case LPFC_RSC_TYPE_FCOE_XRI:
5081                 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5082                 break;
5083         case LPFC_RSC_TYPE_FCOE_VFI:
5084                 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5085                 break;
5086         default:
5087                 break;
5088         }
5089
5090         list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5091                 curr_ext_cnt++;
5092                 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5093                         size_diff++;
5094         }
5095
5096         if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5097                 rc = 1;
5098
5099         return rc;
5100 }
5101
5102 /**
5103  * lpfc_sli4_cfg_post_extnts -
5104  * @phba: Pointer to HBA context object.
5105  * @extnt_cnt - number of available extents.
5106  * @type - the extent type (rpi, xri, vfi, vpi).
5107  * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5108  * @mbox - pointer to the caller's allocated mailbox structure.
5109  *
5110  * This function executes the extents allocation request.  It also
5111  * takes care of the amount of memory needed to allocate or get the
5112  * allocated extents. It is the caller's responsibility to evaluate
5113  * the response.
5114  *
5115  * Returns:
5116  *   -Error:  Error value describes the condition found.
5117  *   0: if successful
5118  **/
5119 static int
5120 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5121                           uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5122 {
5123         int rc = 0;
5124         uint32_t req_len;
5125         uint32_t emb_len;
5126         uint32_t alloc_len, mbox_tmo;
5127
5128         /* Calculate the total requested length of the dma memory */
5129         req_len = extnt_cnt * sizeof(uint16_t);
5130
5131         /*
5132          * Calculate the size of an embedded mailbox.  The uint32_t
5133          * accounts for extents-specific word.
5134          */
5135         emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5136                 sizeof(uint32_t);
5137
5138         /*
5139          * Presume the allocation and response will fit into an embedded
5140          * mailbox.  If not true, reconfigure to a non-embedded mailbox.
5141          */
5142         *emb = LPFC_SLI4_MBX_EMBED;
5143         if (req_len > emb_len) {
5144                 req_len = extnt_cnt * sizeof(uint16_t) +
5145                         sizeof(union lpfc_sli4_cfg_shdr) +
5146                         sizeof(uint32_t);
5147                 *emb = LPFC_SLI4_MBX_NEMBED;
5148         }
5149
5150         alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5151                                      LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5152                                      req_len, *emb);
5153         if (alloc_len < req_len) {
5154                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5155                         "2982 Allocated DMA memory size (x%x) is "
5156                         "less than the requested DMA memory "
5157                         "size (x%x)\n", alloc_len, req_len);
5158                 return -ENOMEM;
5159         }
5160         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5161         if (unlikely(rc))
5162                 return -EIO;
5163
5164         if (!phba->sli4_hba.intr_enable)
5165                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5166         else {
5167                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5168                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5169         }
5170
5171         if (unlikely(rc))
5172                 rc = -EIO;
5173         return rc;
5174 }
5175
5176 /**
5177  * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5178  * @phba: Pointer to HBA context object.
5179  * @type:  The resource extent type to allocate.
5180  *
5181  * This function allocates the number of elements for the specified
5182  * resource type.
5183  **/
5184 static int
5185 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5186 {
5187         bool emb = false;
5188         uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5189         uint16_t rsrc_id, rsrc_start, j, k;
5190         uint16_t *ids;
5191         int i, rc;
5192         unsigned long longs;
5193         unsigned long *bmask;
5194         struct lpfc_rsrc_blks *rsrc_blks;
5195         LPFC_MBOXQ_t *mbox;
5196         uint32_t length;
5197         struct lpfc_id_range *id_array = NULL;
5198         void *virtaddr = NULL;
5199         struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5200         struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5201         struct list_head *ext_blk_list;
5202
5203         rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5204                                             &rsrc_cnt,
5205                                             &rsrc_size);
5206         if (unlikely(rc))
5207                 return -EIO;
5208
5209         if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5210                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5211                         "3009 No available Resource Extents "
5212                         "for resource type 0x%x: Count: 0x%x, "
5213                         "Size 0x%x\n", type, rsrc_cnt,
5214                         rsrc_size);
5215                 return -ENOMEM;
5216         }
5217
5218         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5219                         "2903 Post resource extents type-0x%x: "
5220                         "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5221
5222         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5223         if (!mbox)
5224                 return -ENOMEM;
5225
5226         rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5227         if (unlikely(rc)) {
5228                 rc = -EIO;
5229                 goto err_exit;
5230         }
5231
5232         /*
5233          * Figure out where the response is located.  Then get local pointers
5234          * to the response data.  The port does not guarantee to respond to
5235          * all extents counts request so update the local variable with the
5236          * allocated count from the port.
5237          */
5238         if (emb == LPFC_SLI4_MBX_EMBED) {
5239                 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5240                 id_array = &rsrc_ext->u.rsp.id[0];
5241                 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5242         } else {
5243                 virtaddr = mbox->sge_array->addr[0];
5244                 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5245                 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5246                 id_array = &n_rsrc->id;
5247         }
5248
5249         longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5250         rsrc_id_cnt = rsrc_cnt * rsrc_size;
5251
5252         /*
5253          * Based on the resource size and count, correct the base and max
5254          * resource values.
5255          */
5256         length = sizeof(struct lpfc_rsrc_blks);
5257         switch (type) {
5258         case LPFC_RSC_TYPE_FCOE_RPI:
5259                 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5260                                                    sizeof(unsigned long),
5261                                                    GFP_KERNEL);
5262                 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5263                         rc = -ENOMEM;
5264                         goto err_exit;
5265                 }
5266                 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5267                                                  sizeof(uint16_t),
5268                                                  GFP_KERNEL);
5269                 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5270                         kfree(phba->sli4_hba.rpi_bmask);
5271                         rc = -ENOMEM;
5272                         goto err_exit;
5273                 }
5274
5275                 /*
5276                  * The next_rpi was initialized with the maximum available
5277                  * count but the port may allocate a smaller number.  Catch
5278                  * that case and update the next_rpi.
5279                  */
5280                 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5281
5282                 /* Initialize local ptrs for common extent processing later. */
5283                 bmask = phba->sli4_hba.rpi_bmask;
5284                 ids = phba->sli4_hba.rpi_ids;
5285                 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5286                 break;
5287         case LPFC_RSC_TYPE_FCOE_VPI:
5288                 phba->vpi_bmask = kzalloc(longs *
5289                                           sizeof(unsigned long),
5290                                           GFP_KERNEL);
5291                 if (unlikely(!phba->vpi_bmask)) {
5292                         rc = -ENOMEM;
5293                         goto err_exit;
5294                 }
5295                 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5296                                          sizeof(uint16_t),
5297                                          GFP_KERNEL);
5298                 if (unlikely(!phba->vpi_ids)) {
5299                         kfree(phba->vpi_bmask);
5300                         rc = -ENOMEM;
5301                         goto err_exit;
5302                 }
5303
5304                 /* Initialize local ptrs for common extent processing later. */
5305                 bmask = phba->vpi_bmask;
5306                 ids = phba->vpi_ids;
5307                 ext_blk_list = &phba->lpfc_vpi_blk_list;
5308                 break;
5309         case LPFC_RSC_TYPE_FCOE_XRI:
5310                 phba->sli4_hba.xri_bmask = kzalloc(longs *
5311                                                    sizeof(unsigned long),
5312                                                    GFP_KERNEL);
5313                 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5314                         rc = -ENOMEM;
5315                         goto err_exit;
5316                 }
5317                 phba->sli4_hba.max_cfg_param.xri_used = 0;
5318                 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5319                                                  sizeof(uint16_t),
5320                                                  GFP_KERNEL);
5321                 if (unlikely(!phba->sli4_hba.xri_ids)) {
5322                         kfree(phba->sli4_hba.xri_bmask);
5323                         rc = -ENOMEM;
5324                         goto err_exit;
5325                 }
5326
5327                 /* Initialize local ptrs for common extent processing later. */
5328                 bmask = phba->sli4_hba.xri_bmask;
5329                 ids = phba->sli4_hba.xri_ids;
5330                 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5331                 break;
5332         case LPFC_RSC_TYPE_FCOE_VFI:
5333                 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5334                                                    sizeof(unsigned long),
5335                                                    GFP_KERNEL);
5336                 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5337                         rc = -ENOMEM;
5338                         goto err_exit;
5339                 }
5340                 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5341                                                  sizeof(uint16_t),
5342                                                  GFP_KERNEL);
5343                 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5344                         kfree(phba->sli4_hba.vfi_bmask);
5345                         rc = -ENOMEM;
5346                         goto err_exit;
5347                 }
5348
5349                 /* Initialize local ptrs for common extent processing later. */
5350                 bmask = phba->sli4_hba.vfi_bmask;
5351                 ids = phba->sli4_hba.vfi_ids;
5352                 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5353                 break;
5354         default:
5355                 /* Unsupported Opcode.  Fail call. */
5356                 id_array = NULL;
5357                 bmask = NULL;
5358                 ids = NULL;
5359                 ext_blk_list = NULL;
5360                 goto err_exit;
5361         }
5362
5363         /*
5364          * Complete initializing the extent configuration with the
5365          * allocated ids assigned to this function.  The bitmask serves
5366          * as an index into the array and manages the available ids.  The
5367          * array just stores the ids communicated to the port via the wqes.
5368          */
5369         for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5370                 if ((i % 2) == 0)
5371                         rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5372                                          &id_array[k]);
5373                 else
5374                         rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5375                                          &id_array[k]);
5376
5377                 rsrc_blks = kzalloc(length, GFP_KERNEL);
5378                 if (unlikely(!rsrc_blks)) {
5379                         rc = -ENOMEM;
5380                         kfree(bmask);
5381                         kfree(ids);
5382                         goto err_exit;
5383                 }
5384                 rsrc_blks->rsrc_start = rsrc_id;
5385                 rsrc_blks->rsrc_size = rsrc_size;
5386                 list_add_tail(&rsrc_blks->list, ext_blk_list);
5387                 rsrc_start = rsrc_id;
5388                 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
5389                         phba->sli4_hba.scsi_xri_start = rsrc_start +
5390                                 lpfc_sli4_get_els_iocb_cnt(phba);
5391
5392                 while (rsrc_id < (rsrc_start + rsrc_size)) {
5393                         ids[j] = rsrc_id;
5394                         rsrc_id++;
5395                         j++;
5396                 }
5397                 /* Entire word processed.  Get next word.*/
5398                 if ((i % 2) == 1)
5399                         k++;
5400         }
5401  err_exit:
5402         lpfc_sli4_mbox_cmd_free(phba, mbox);
5403         return rc;
5404 }
5405
5406 /**
5407  * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5408  * @phba: Pointer to HBA context object.
5409  * @type: the extent's type.
5410  *
5411  * This function deallocates all extents of a particular resource type.
5412  * SLI4 does not allow for deallocating a particular extent range.  It
5413  * is the caller's responsibility to release all kernel memory resources.
5414  **/
5415 static int
5416 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5417 {
5418         int rc;
5419         uint32_t length, mbox_tmo = 0;
5420         LPFC_MBOXQ_t *mbox;
5421         struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5422         struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5423
5424         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5425         if (!mbox)
5426                 return -ENOMEM;
5427
5428         /*
5429          * This function sends an embedded mailbox because it only sends the
5430          * the resource type.  All extents of this type are released by the
5431          * port.
5432          */
5433         length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5434                   sizeof(struct lpfc_sli4_cfg_mhdr));
5435         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5436                          LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5437                          length, LPFC_SLI4_MBX_EMBED);
5438
5439         /* Send an extents count of 0 - the dealloc doesn't use it. */
5440         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5441                                         LPFC_SLI4_MBX_EMBED);
5442         if (unlikely(rc)) {
5443                 rc = -EIO;
5444                 goto out_free_mbox;
5445         }
5446         if (!phba->sli4_hba.intr_enable)
5447                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5448         else {
5449                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5450                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5451         }
5452         if (unlikely(rc)) {
5453                 rc = -EIO;
5454                 goto out_free_mbox;
5455         }
5456
5457         dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
5458         if (bf_get(lpfc_mbox_hdr_status,
5459                    &dealloc_rsrc->header.cfg_shdr.response)) {
5460                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5461                                 "2919 Failed to release resource extents "
5462                                 "for type %d - Status 0x%x Add'l Status 0x%x. "
5463                                 "Resource memory not released.\n",
5464                                 type,
5465                                 bf_get(lpfc_mbox_hdr_status,
5466                                     &dealloc_rsrc->header.cfg_shdr.response),
5467                                 bf_get(lpfc_mbox_hdr_add_status,
5468                                     &dealloc_rsrc->header.cfg_shdr.response));
5469                 rc = -EIO;
5470                 goto out_free_mbox;
5471         }
5472
5473         /* Release kernel memory resources for the specific type. */
5474         switch (type) {
5475         case LPFC_RSC_TYPE_FCOE_VPI:
5476                 kfree(phba->vpi_bmask);
5477                 kfree(phba->vpi_ids);
5478                 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5479                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5480                                     &phba->lpfc_vpi_blk_list, list) {
5481                         list_del_init(&rsrc_blk->list);
5482                         kfree(rsrc_blk);
5483                 }
5484                 break;
5485         case LPFC_RSC_TYPE_FCOE_XRI:
5486                 kfree(phba->sli4_hba.xri_bmask);
5487                 kfree(phba->sli4_hba.xri_ids);
5488                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5489                                     &phba->sli4_hba.lpfc_xri_blk_list, list) {
5490                         list_del_init(&rsrc_blk->list);
5491                         kfree(rsrc_blk);
5492                 }
5493                 break;
5494         case LPFC_RSC_TYPE_FCOE_VFI:
5495                 kfree(phba->sli4_hba.vfi_bmask);
5496                 kfree(phba->sli4_hba.vfi_ids);
5497                 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5498                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5499                                     &phba->sli4_hba.lpfc_vfi_blk_list, list) {
5500                         list_del_init(&rsrc_blk->list);
5501                         kfree(rsrc_blk);
5502                 }
5503                 break;
5504         case LPFC_RSC_TYPE_FCOE_RPI:
5505                 /* RPI bitmask and physical id array are cleaned up earlier. */
5506                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5507                                     &phba->sli4_hba.lpfc_rpi_blk_list, list) {
5508                         list_del_init(&rsrc_blk->list);
5509                         kfree(rsrc_blk);
5510                 }
5511                 break;
5512         default:
5513                 break;
5514         }
5515
5516         bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5517
5518  out_free_mbox:
5519         mempool_free(mbox, phba->mbox_mem_pool);
5520         return rc;
5521 }
5522
5523 /**
5524  * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
5525  * @phba: Pointer to HBA context object.
5526  *
5527  * This function allocates all SLI4 resource identifiers.
5528  **/
5529 int
5530 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5531 {
5532         int i, rc, error = 0;
5533         uint16_t count, base;
5534         unsigned long longs;
5535
5536         if (!phba->sli4_hba.rpi_hdrs_in_use)
5537                 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
5538         if (phba->sli4_hba.extents_in_use) {
5539                 /*
5540                  * The port supports resource extents. The XRI, VPI, VFI, RPI
5541                  * resource extent count must be read and allocated before
5542                  * provisioning the resource id arrays.
5543                  */
5544                 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5545                     LPFC_IDX_RSRC_RDY) {
5546                         /*
5547                          * Extent-based resources are set - the driver could
5548                          * be in a port reset. Figure out if any corrective
5549                          * actions need to be taken.
5550                          */
5551                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5552                                                  LPFC_RSC_TYPE_FCOE_VFI);
5553                         if (rc != 0)
5554                                 error++;
5555                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5556                                                  LPFC_RSC_TYPE_FCOE_VPI);
5557                         if (rc != 0)
5558                                 error++;
5559                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5560                                                  LPFC_RSC_TYPE_FCOE_XRI);
5561                         if (rc != 0)
5562                                 error++;
5563                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5564                                                  LPFC_RSC_TYPE_FCOE_RPI);
5565                         if (rc != 0)
5566                                 error++;
5567
5568                         /*
5569                          * It's possible that the number of resources
5570                          * provided to this port instance changed between
5571                          * resets.  Detect this condition and reallocate
5572                          * resources.  Otherwise, there is no action.
5573                          */
5574                         if (error) {
5575                                 lpfc_printf_log(phba, KERN_INFO,
5576                                                 LOG_MBOX | LOG_INIT,
5577                                                 "2931 Detected extent resource "
5578                                                 "change.  Reallocating all "
5579                                                 "extents.\n");
5580                                 rc = lpfc_sli4_dealloc_extent(phba,
5581                                                  LPFC_RSC_TYPE_FCOE_VFI);
5582                                 rc = lpfc_sli4_dealloc_extent(phba,
5583                                                  LPFC_RSC_TYPE_FCOE_VPI);
5584                                 rc = lpfc_sli4_dealloc_extent(phba,
5585                                                  LPFC_RSC_TYPE_FCOE_XRI);
5586                                 rc = lpfc_sli4_dealloc_extent(phba,
5587                                                  LPFC_RSC_TYPE_FCOE_RPI);
5588                         } else
5589                                 return 0;
5590                 }
5591
5592                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5593                 if (unlikely(rc))
5594                         goto err_exit;
5595
5596                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5597                 if (unlikely(rc))
5598                         goto err_exit;
5599
5600                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5601                 if (unlikely(rc))
5602                         goto err_exit;
5603
5604                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5605                 if (unlikely(rc))
5606                         goto err_exit;
5607                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5608                        LPFC_IDX_RSRC_RDY);
5609                 return rc;
5610         } else {
5611                 /*
5612                  * The port does not support resource extents.  The XRI, VPI,
5613                  * VFI, RPI resource ids were determined from READ_CONFIG.
5614                  * Just allocate the bitmasks and provision the resource id
5615                  * arrays.  If a port reset is active, the resources don't
5616                  * need any action - just exit.
5617                  */
5618                 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5619                     LPFC_IDX_RSRC_RDY) {
5620                         lpfc_sli4_dealloc_resource_identifiers(phba);
5621                         lpfc_sli4_remove_rpis(phba);
5622                 }
5623                 /* RPIs. */
5624                 count = phba->sli4_hba.max_cfg_param.max_rpi;
5625                 base = phba->sli4_hba.max_cfg_param.rpi_base;
5626                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5627                 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5628                                                    sizeof(unsigned long),
5629                                                    GFP_KERNEL);
5630                 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5631                         rc = -ENOMEM;
5632                         goto err_exit;
5633                 }
5634                 phba->sli4_hba.rpi_ids = kzalloc(count *
5635                                                  sizeof(uint16_t),
5636                                                  GFP_KERNEL);
5637                 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5638                         rc = -ENOMEM;
5639                         goto free_rpi_bmask;
5640                 }
5641
5642                 for (i = 0; i < count; i++)
5643                         phba->sli4_hba.rpi_ids[i] = base + i;
5644
5645                 /* VPIs. */
5646                 count = phba->sli4_hba.max_cfg_param.max_vpi;
5647                 base = phba->sli4_hba.max_cfg_param.vpi_base;
5648                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5649                 phba->vpi_bmask = kzalloc(longs *
5650                                           sizeof(unsigned long),
5651                                           GFP_KERNEL);
5652                 if (unlikely(!phba->vpi_bmask)) {
5653                         rc = -ENOMEM;
5654                         goto free_rpi_ids;
5655                 }
5656                 phba->vpi_ids = kzalloc(count *
5657                                         sizeof(uint16_t),
5658                                         GFP_KERNEL);
5659                 if (unlikely(!phba->vpi_ids)) {
5660                         rc = -ENOMEM;
5661                         goto free_vpi_bmask;
5662                 }
5663
5664                 for (i = 0; i < count; i++)
5665                         phba->vpi_ids[i] = base + i;
5666
5667                 /* XRIs. */
5668                 count = phba->sli4_hba.max_cfg_param.max_xri;
5669                 base = phba->sli4_hba.max_cfg_param.xri_base;
5670                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5671                 phba->sli4_hba.xri_bmask = kzalloc(longs *
5672                                                    sizeof(unsigned long),
5673                                                    GFP_KERNEL);
5674                 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5675                         rc = -ENOMEM;
5676                         goto free_vpi_ids;
5677                 }
5678                 phba->sli4_hba.max_cfg_param.xri_used = 0;
5679                 phba->sli4_hba.xri_ids = kzalloc(count *
5680                                                  sizeof(uint16_t),
5681                                                  GFP_KERNEL);
5682                 if (unlikely(!phba->sli4_hba.xri_ids)) {
5683                         rc = -ENOMEM;
5684                         goto free_xri_bmask;
5685                 }
5686
5687                 for (i = 0; i < count; i++)
5688                         phba->sli4_hba.xri_ids[i] = base + i;
5689
5690                 /* VFIs. */
5691                 count = phba->sli4_hba.max_cfg_param.max_vfi;
5692                 base = phba->sli4_hba.max_cfg_param.vfi_base;
5693                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5694                 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5695                                                    sizeof(unsigned long),
5696                                                    GFP_KERNEL);
5697                 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5698                         rc = -ENOMEM;
5699                         goto free_xri_ids;
5700                 }
5701                 phba->sli4_hba.vfi_ids = kzalloc(count *
5702                                                  sizeof(uint16_t),
5703                                                  GFP_KERNEL);
5704                 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5705                         rc = -ENOMEM;
5706                         goto free_vfi_bmask;
5707                 }
5708
5709                 for (i = 0; i < count; i++)
5710                         phba->sli4_hba.vfi_ids[i] = base + i;
5711
5712                 /*
5713                  * Mark all resources ready.  An HBA reset doesn't need
5714                  * to reset the initialization.
5715                  */
5716                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5717                        LPFC_IDX_RSRC_RDY);
5718                 return 0;
5719         }
5720
5721  free_vfi_bmask:
5722         kfree(phba->sli4_hba.vfi_bmask);
5723  free_xri_ids:
5724         kfree(phba->sli4_hba.xri_ids);
5725  free_xri_bmask:
5726         kfree(phba->sli4_hba.xri_bmask);
5727  free_vpi_ids:
5728         kfree(phba->vpi_ids);
5729  free_vpi_bmask:
5730         kfree(phba->vpi_bmask);
5731  free_rpi_ids:
5732         kfree(phba->sli4_hba.rpi_ids);
5733  free_rpi_bmask:
5734         kfree(phba->sli4_hba.rpi_bmask);
5735  err_exit:
5736         return rc;
5737 }
5738
5739 /**
5740  * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
5741  * @phba: Pointer to HBA context object.
5742  *
5743  * This function allocates the number of elements for the specified
5744  * resource type.
5745  **/
5746 int
5747 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5748 {
5749         if (phba->sli4_hba.extents_in_use) {
5750                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5751                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5752                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5753                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5754         } else {
5755                 kfree(phba->vpi_bmask);
5756                 kfree(phba->vpi_ids);
5757                 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5758                 kfree(phba->sli4_hba.xri_bmask);
5759                 kfree(phba->sli4_hba.xri_ids);
5760                 kfree(phba->sli4_hba.vfi_bmask);
5761                 kfree(phba->sli4_hba.vfi_ids);
5762                 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5763                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5764         }
5765
5766         return 0;
5767 }
5768
5769 /**
5770  * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
5771  * @phba: Pointer to HBA context object.
5772  * @type: The resource extent type.
5773  * @extnt_count: buffer to hold port extent count response
5774  * @extnt_size: buffer to hold port extent size response.
5775  *
5776  * This function calls the port to read the host allocated extents
5777  * for a particular type.
5778  **/
5779 int
5780 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
5781                                uint16_t *extnt_cnt, uint16_t *extnt_size)
5782 {
5783         bool emb;
5784         int rc = 0;
5785         uint16_t curr_blks = 0;
5786         uint32_t req_len, emb_len;
5787         uint32_t alloc_len, mbox_tmo;
5788         struct list_head *blk_list_head;
5789         struct lpfc_rsrc_blks *rsrc_blk;
5790         LPFC_MBOXQ_t *mbox;
5791         void *virtaddr = NULL;
5792         struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5793         struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5794         union  lpfc_sli4_cfg_shdr *shdr;
5795
5796         switch (type) {
5797         case LPFC_RSC_TYPE_FCOE_VPI:
5798                 blk_list_head = &phba->lpfc_vpi_blk_list;
5799                 break;
5800         case LPFC_RSC_TYPE_FCOE_XRI:
5801                 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
5802                 break;
5803         case LPFC_RSC_TYPE_FCOE_VFI:
5804                 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
5805                 break;
5806         case LPFC_RSC_TYPE_FCOE_RPI:
5807                 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
5808                 break;
5809         default:
5810                 return -EIO;
5811         }
5812
5813         /* Count the number of extents currently allocatd for this type. */
5814         list_for_each_entry(rsrc_blk, blk_list_head, list) {
5815                 if (curr_blks == 0) {
5816                         /*
5817                          * The GET_ALLOCATED mailbox does not return the size,
5818                          * just the count.  The size should be just the size
5819                          * stored in the current allocated block and all sizes
5820                          * for an extent type are the same so set the return
5821                          * value now.
5822                          */
5823                         *extnt_size = rsrc_blk->rsrc_size;
5824                 }
5825                 curr_blks++;
5826         }
5827
5828         /* Calculate the total requested length of the dma memory. */
5829         req_len = curr_blks * sizeof(uint16_t);
5830
5831         /*
5832          * Calculate the size of an embedded mailbox.  The uint32_t
5833          * accounts for extents-specific word.
5834          */
5835         emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5836                 sizeof(uint32_t);
5837
5838         /*
5839          * Presume the allocation and response will fit into an embedded
5840          * mailbox.  If not true, reconfigure to a non-embedded mailbox.
5841          */
5842         emb = LPFC_SLI4_MBX_EMBED;
5843         req_len = emb_len;
5844         if (req_len > emb_len) {
5845                 req_len = curr_blks * sizeof(uint16_t) +
5846                         sizeof(union lpfc_sli4_cfg_shdr) +
5847                         sizeof(uint32_t);
5848                 emb = LPFC_SLI4_MBX_NEMBED;
5849         }
5850
5851         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5852         if (!mbox)
5853                 return -ENOMEM;
5854         memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
5855
5856         alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5857                                      LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
5858                                      req_len, emb);
5859         if (alloc_len < req_len) {
5860                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5861                         "2983 Allocated DMA memory size (x%x) is "
5862                         "less than the requested DMA memory "
5863                         "size (x%x)\n", alloc_len, req_len);
5864                 rc = -ENOMEM;
5865                 goto err_exit;
5866         }
5867         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
5868         if (unlikely(rc)) {
5869                 rc = -EIO;
5870                 goto err_exit;
5871         }
5872
5873         if (!phba->sli4_hba.intr_enable)
5874                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5875         else {
5876                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5877                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5878         }
5879
5880         if (unlikely(rc)) {
5881                 rc = -EIO;
5882                 goto err_exit;
5883         }
5884
5885         /*
5886          * Figure out where the response is located.  Then get local pointers
5887          * to the response data.  The port does not guarantee to respond to
5888          * all extents counts request so update the local variable with the
5889          * allocated count from the port.
5890          */
5891         if (emb == LPFC_SLI4_MBX_EMBED) {
5892                 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5893                 shdr = &rsrc_ext->header.cfg_shdr;
5894                 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5895         } else {
5896                 virtaddr = mbox->sge_array->addr[0];
5897                 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5898                 shdr = &n_rsrc->cfg_shdr;
5899                 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5900         }
5901
5902         if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
5903                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5904                         "2984 Failed to read allocated resources "
5905                         "for type %d - Status 0x%x Add'l Status 0x%x.\n",
5906                         type,
5907                         bf_get(lpfc_mbox_hdr_status, &shdr->response),
5908                         bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
5909                 rc = -EIO;
5910                 goto err_exit;
5911         }
5912  err_exit:
5913         lpfc_sli4_mbox_cmd_free(phba, mbox);
5914         return rc;
5915 }
5916
5917 /**
5918  * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block
5919  * @phba: pointer to lpfc hba data structure.
5920  *
5921  * This routine walks the list of els buffers that have been allocated and
5922  * repost them to the port by using SGL block post. This is needed after a
5923  * pci_function_reset/warm_start or start. It attempts to construct blocks
5924  * of els buffer sgls which contains contiguous xris and uses the non-embedded
5925  * SGL block post mailbox commands to post them to the port. For single els
5926  * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
5927  * mailbox command for posting.
5928  *
5929  * Returns: 0 = success, non-zero failure.
5930  **/
5931 static int
5932 lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
5933 {
5934         struct lpfc_sglq *sglq_entry = NULL;
5935         struct lpfc_sglq *sglq_entry_next = NULL;
5936         struct lpfc_sglq *sglq_entry_first = NULL;
5937         int status, post_cnt = 0, num_posted = 0, block_cnt = 0;
5938         int last_xritag = NO_XRI;
5939         LIST_HEAD(prep_sgl_list);
5940         LIST_HEAD(blck_sgl_list);
5941         LIST_HEAD(allc_sgl_list);
5942         LIST_HEAD(post_sgl_list);
5943         LIST_HEAD(free_sgl_list);
5944
5945         spin_lock(&phba->hbalock);
5946         list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
5947         spin_unlock(&phba->hbalock);
5948
5949         list_for_each_entry_safe(sglq_entry, sglq_entry_next,
5950                                  &allc_sgl_list, list) {
5951                 list_del_init(&sglq_entry->list);
5952                 block_cnt++;
5953                 if ((last_xritag != NO_XRI) &&
5954                     (sglq_entry->sli4_xritag != last_xritag + 1)) {
5955                         /* a hole in xri block, form a sgl posting block */
5956                         list_splice_init(&prep_sgl_list, &blck_sgl_list);
5957                         post_cnt = block_cnt - 1;
5958                         /* prepare list for next posting block */
5959                         list_add_tail(&sglq_entry->list, &prep_sgl_list);
5960                         block_cnt = 1;
5961                 } else {
5962                         /* prepare list for next posting block */
5963                         list_add_tail(&sglq_entry->list, &prep_sgl_list);
5964                         /* enough sgls for non-embed sgl mbox command */
5965                         if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
5966                                 list_splice_init(&prep_sgl_list,
5967                                                  &blck_sgl_list);
5968                                 post_cnt = block_cnt;
5969                                 block_cnt = 0;
5970                         }
5971                 }
5972                 num_posted++;
5973
5974                 /* keep track of last sgl's xritag */
5975                 last_xritag = sglq_entry->sli4_xritag;
5976
5977                 /* end of repost sgl list condition for els buffers */
5978                 if (num_posted == phba->sli4_hba.els_xri_cnt) {
5979                         if (post_cnt == 0) {
5980                                 list_splice_init(&prep_sgl_list,
5981                                                  &blck_sgl_list);
5982                                 post_cnt = block_cnt;
5983                         } else if (block_cnt == 1) {
5984                                 status = lpfc_sli4_post_sgl(phba,
5985                                                 sglq_entry->phys, 0,
5986                                                 sglq_entry->sli4_xritag);
5987                                 if (!status) {
5988                                         /* successful, put sgl to posted list */
5989                                         list_add_tail(&sglq_entry->list,
5990                                                       &post_sgl_list);
5991                                 } else {
5992                                         /* Failure, put sgl to free list */
5993                                         lpfc_printf_log(phba, KERN_WARNING,
5994                                                 LOG_SLI,
5995                                                 "3159 Failed to post els "
5996                                                 "sgl, xritag:x%x\n",
5997                                                 sglq_entry->sli4_xritag);
5998                                         list_add_tail(&sglq_entry->list,
5999                                                       &free_sgl_list);
6000                                         spin_lock_irq(&phba->hbalock);
6001                                         phba->sli4_hba.els_xri_cnt--;
6002                                         spin_unlock_irq(&phba->hbalock);
6003                                 }
6004                         }
6005                 }
6006
6007                 /* continue until a nembed page worth of sgls */
6008                 if (post_cnt == 0)
6009                         continue;
6010
6011                 /* post the els buffer list sgls as a block */
6012                 status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list,
6013                                                      post_cnt);
6014
6015                 if (!status) {
6016                         /* success, put sgl list to posted sgl list */
6017                         list_splice_init(&blck_sgl_list, &post_sgl_list);
6018                 } else {
6019                         /* Failure, put sgl list to free sgl list */
6020                         sglq_entry_first = list_first_entry(&blck_sgl_list,
6021                                                             struct lpfc_sglq,
6022                                                             list);
6023                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6024                                         "3160 Failed to post els sgl-list, "
6025                                         "xritag:x%x-x%x\n",
6026                                         sglq_entry_first->sli4_xritag,
6027                                         (sglq_entry_first->sli4_xritag +
6028                                          post_cnt - 1));
6029                         list_splice_init(&blck_sgl_list, &free_sgl_list);
6030                         spin_lock_irq(&phba->hbalock);
6031                         phba->sli4_hba.els_xri_cnt -= post_cnt;
6032                         spin_unlock_irq(&phba->hbalock);
6033                 }
6034
6035                 /* don't reset xirtag due to hole in xri block */
6036                 if (block_cnt == 0)
6037                         last_xritag = NO_XRI;
6038
6039                 /* reset els sgl post count for next round of posting */
6040                 post_cnt = 0;
6041         }
6042
6043         /* free the els sgls failed to post */
6044         lpfc_free_sgl_list(phba, &free_sgl_list);
6045
6046         /* push els sgls posted to the availble list */
6047         if (!list_empty(&post_sgl_list)) {
6048                 spin_lock(&phba->hbalock);
6049                 list_splice_init(&post_sgl_list,
6050                                  &phba->sli4_hba.lpfc_sgl_list);
6051                 spin_unlock(&phba->hbalock);
6052         } else {
6053                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6054                                 "3161 Failure to post els sgl to port.\n");
6055                 return -EIO;
6056         }
6057         return 0;
6058 }
6059
6060 /**
6061  * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
6062  * @phba: Pointer to HBA context object.
6063  *
6064  * This function is the main SLI4 device intialization PCI function. This
6065  * function is called by the HBA intialization code, HBA reset code and
6066  * HBA error attention handler code. Caller is not required to hold any
6067  * locks.
6068  **/
6069 int
6070 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6071 {
6072         int rc;
6073         LPFC_MBOXQ_t *mboxq;
6074         struct lpfc_mqe *mqe;
6075         uint8_t *vpd;
6076         uint32_t vpd_size;
6077         uint32_t ftr_rsp = 0;
6078         struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
6079         struct lpfc_vport *vport = phba->pport;
6080         struct lpfc_dmabuf *mp;
6081
6082         /* Perform a PCI function reset to start from clean */
6083         rc = lpfc_pci_function_reset(phba);
6084         if (unlikely(rc))
6085                 return -ENODEV;
6086
6087         /* Check the HBA Host Status Register for readyness */
6088         rc = lpfc_sli4_post_status_check(phba);
6089         if (unlikely(rc))
6090                 return -ENODEV;
6091         else {
6092                 spin_lock_irq(&phba->hbalock);
6093                 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
6094                 spin_unlock_irq(&phba->hbalock);
6095         }
6096
6097         /*
6098          * Allocate a single mailbox container for initializing the
6099          * port.
6100          */
6101         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6102         if (!mboxq)
6103                 return -ENOMEM;
6104
6105         /* Issue READ_REV to collect vpd and FW information. */
6106         vpd_size = SLI4_PAGE_SIZE;
6107         vpd = kzalloc(vpd_size, GFP_KERNEL);
6108         if (!vpd) {
6109                 rc = -ENOMEM;
6110                 goto out_free_mbox;
6111         }
6112
6113         rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
6114         if (unlikely(rc)) {
6115                 kfree(vpd);
6116                 goto out_free_mbox;
6117         }
6118         mqe = &mboxq->u.mqe;
6119         phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
6120         if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
6121                 phba->hba_flag |= HBA_FCOE_MODE;
6122         else
6123                 phba->hba_flag &= ~HBA_FCOE_MODE;
6124
6125         if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
6126                 LPFC_DCBX_CEE_MODE)
6127                 phba->hba_flag |= HBA_FIP_SUPPORT;
6128         else
6129                 phba->hba_flag &= ~HBA_FIP_SUPPORT;
6130
6131         phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
6132
6133         if (phba->sli_rev != LPFC_SLI_REV4) {
6134                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6135                         "0376 READ_REV Error. SLI Level %d "
6136                         "FCoE enabled %d\n",
6137                         phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
6138                 rc = -EIO;
6139                 kfree(vpd);
6140                 goto out_free_mbox;
6141         }
6142
6143         /*
6144          * Continue initialization with default values even if driver failed
6145          * to read FCoE param config regions, only read parameters if the
6146          * board is FCoE
6147          */
6148         if (phba->hba_flag & HBA_FCOE_MODE &&
6149             lpfc_sli4_read_fcoe_params(phba))
6150                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
6151                         "2570 Failed to read FCoE parameters\n");
6152
6153         /*
6154          * Retrieve sli4 device physical port name, failure of doing it
6155          * is considered as non-fatal.
6156          */
6157         rc = lpfc_sli4_retrieve_pport_name(phba);
6158         if (!rc)
6159                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6160                                 "3080 Successful retrieving SLI4 device "
6161                                 "physical port name: %s.\n", phba->Port);
6162
6163         /*
6164          * Evaluate the read rev and vpd data. Populate the driver
6165          * state with the results. If this routine fails, the failure
6166          * is not fatal as the driver will use generic values.
6167          */
6168         rc = lpfc_parse_vpd(phba, vpd, vpd_size);
6169         if (unlikely(!rc)) {
6170                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6171                                 "0377 Error %d parsing vpd. "
6172                                 "Using defaults.\n", rc);
6173                 rc = 0;
6174         }
6175         kfree(vpd);
6176
6177         /* Save information as VPD data */
6178         phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
6179         phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
6180         phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
6181         phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
6182                                          &mqe->un.read_rev);
6183         phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
6184                                        &mqe->un.read_rev);
6185         phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
6186                                             &mqe->un.read_rev);
6187         phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
6188                                            &mqe->un.read_rev);
6189         phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
6190         memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
6191         phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
6192         memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
6193         phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
6194         memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
6195         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6196                         "(%d):0380 READ_REV Status x%x "
6197                         "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
6198                         mboxq->vport ? mboxq->vport->vpi : 0,
6199                         bf_get(lpfc_mqe_status, mqe),
6200                         phba->vpd.rev.opFwName,
6201                         phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
6202                         phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
6203
6204         /*
6205          * Discover the port's supported feature set and match it against the
6206          * hosts requests.
6207          */
6208         lpfc_request_features(phba, mboxq);
6209         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6210         if (unlikely(rc)) {
6211                 rc = -EIO;
6212                 goto out_free_mbox;
6213         }
6214
6215         /*
6216          * The port must support FCP initiator mode as this is the
6217          * only mode running in the host.
6218          */
6219         if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
6220                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6221                                 "0378 No support for fcpi mode.\n");
6222                 ftr_rsp++;
6223         }
6224         if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
6225                 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
6226         else
6227                 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
6228         /*
6229          * If the port cannot support the host's requested features
6230          * then turn off the global config parameters to disable the
6231          * feature in the driver.  This is not a fatal error.
6232          */
6233         phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
6234         if (phba->cfg_enable_bg) {
6235                 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
6236                         phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
6237                 else
6238                         ftr_rsp++;
6239         }
6240
6241         if (phba->max_vpi && phba->cfg_enable_npiv &&
6242             !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6243                 ftr_rsp++;
6244
6245         if (ftr_rsp) {
6246                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6247                                 "0379 Feature Mismatch Data: x%08x %08x "
6248                                 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
6249                                 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
6250                                 phba->cfg_enable_npiv, phba->max_vpi);
6251                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
6252                         phba->cfg_enable_bg = 0;
6253                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6254                         phba->cfg_enable_npiv = 0;
6255         }
6256
6257         /* These SLI3 features are assumed in SLI4 */
6258         spin_lock_irq(&phba->hbalock);
6259         phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
6260         spin_unlock_irq(&phba->hbalock);
6261
6262         /*
6263          * Allocate all resources (xri,rpi,vpi,vfi) now.  Subsequent
6264          * calls depends on these resources to complete port setup.
6265          */
6266         rc = lpfc_sli4_alloc_resource_identifiers(phba);
6267         if (rc) {
6268                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6269                                 "2920 Failed to alloc Resource IDs "
6270                                 "rc = x%x\n", rc);
6271                 goto out_free_mbox;
6272         }
6273
6274         /* Read the port's service parameters. */
6275         rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
6276         if (rc) {
6277                 phba->link_state = LPFC_HBA_ERROR;
6278                 rc = -ENOMEM;
6279                 goto out_free_mbox;
6280         }
6281
6282         mboxq->vport = vport;
6283         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6284         mp = (struct lpfc_dmabuf *) mboxq->context1;
6285         if (rc == MBX_SUCCESS) {
6286                 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
6287                 rc = 0;
6288         }
6289
6290         /*
6291          * This memory was allocated by the lpfc_read_sparam routine. Release
6292          * it to the mbuf pool.
6293          */
6294         lpfc_mbuf_free(phba, mp->virt, mp->phys);
6295         kfree(mp);
6296         mboxq->context1 = NULL;
6297         if (unlikely(rc)) {
6298                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6299                                 "0382 READ_SPARAM command failed "
6300                                 "status %d, mbxStatus x%x\n",
6301                                 rc, bf_get(lpfc_mqe_status, mqe));
6302                 phba->link_state = LPFC_HBA_ERROR;
6303                 rc = -EIO;
6304                 goto out_free_mbox;
6305         }
6306
6307         lpfc_update_vport_wwn(vport);
6308
6309         /* Update the fc_host data structures with new wwn. */
6310         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
6311         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
6312
6313         /* update host els and scsi xri-sgl sizes and mappings */
6314         rc = lpfc_sli4_xri_sgl_update(phba);
6315         if (unlikely(rc)) {
6316                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6317                                 "1400 Failed to update xri-sgl size and "
6318                                 "mapping: %d\n", rc);
6319                 goto out_free_mbox;
6320         }
6321
6322         /* register the els sgl pool to the port */
6323         rc = lpfc_sli4_repost_els_sgl_list(phba);
6324         if (unlikely(rc)) {
6325                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6326                                 "0582 Error %d during els sgl post "
6327                                 "operation\n", rc);
6328                 rc = -ENODEV;
6329                 goto out_free_mbox;
6330         }
6331
6332         /* register the allocated scsi sgl pool to the port */
6333         rc = lpfc_sli4_repost_scsi_sgl_list(phba);
6334         if (unlikely(rc)) {
6335                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6336                                 "0383 Error %d during scsi sgl post "
6337                                 "operation\n", rc);
6338                 /* Some Scsi buffers were moved to the abort scsi list */
6339                 /* A pci function reset will repost them */
6340                 rc = -ENODEV;
6341                 goto out_free_mbox;
6342         }
6343
6344         /* Post the rpi header region to the device. */
6345         rc = lpfc_sli4_post_all_rpi_hdrs(phba);
6346         if (unlikely(rc)) {
6347                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6348                                 "0393 Error %d during rpi post operation\n",
6349                                 rc);
6350                 rc = -ENODEV;
6351                 goto out_free_mbox;
6352         }
6353         lpfc_sli4_node_prep(phba);
6354
6355         /* Create all the SLI4 queues */
6356         rc = lpfc_sli4_queue_create(phba);
6357         if (rc) {
6358                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6359                                 "3089 Failed to allocate queues\n");
6360                 rc = -ENODEV;
6361                 goto out_stop_timers;
6362         }
6363         /* Set up all the queues to the device */
6364         rc = lpfc_sli4_queue_setup(phba);
6365         if (unlikely(rc)) {
6366                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6367                                 "0381 Error %d during queue setup.\n ", rc);
6368                 goto out_destroy_queue;
6369         }
6370
6371         /* Arm the CQs and then EQs on device */
6372         lpfc_sli4_arm_cqeq_intr(phba);
6373
6374         /* Indicate device interrupt mode */
6375         phba->sli4_hba.intr_enable = 1;
6376
6377         /* Allow asynchronous mailbox command to go through */
6378         spin_lock_irq(&phba->hbalock);
6379         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
6380         spin_unlock_irq(&phba->hbalock);
6381
6382         /* Post receive buffers to the device */
6383         lpfc_sli4_rb_setup(phba);
6384
6385         /* Reset HBA FCF states after HBA reset */
6386         phba->fcf.fcf_flag = 0;
6387         phba->fcf.current_rec.flag = 0;
6388
6389         /* Start the ELS watchdog timer */
6390         mod_timer(&vport->els_tmofunc,
6391                   jiffies + HZ * (phba->fc_ratov * 2));
6392
6393         /* Start heart beat timer */
6394         mod_timer(&phba->hb_tmofunc,
6395                   jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
6396         phba->hb_outstanding = 0;
6397         phba->last_completion_time = jiffies;
6398
6399         /* Start error attention (ERATT) polling timer */
6400         mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
6401
6402         /* Enable PCIe device Advanced Error Reporting (AER) if configured */
6403         if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
6404                 rc = pci_enable_pcie_error_reporting(phba->pcidev);
6405                 if (!rc) {
6406                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6407                                         "2829 This device supports "
6408                                         "Advanced Error Reporting (AER)\n");
6409                         spin_lock_irq(&phba->hbalock);
6410                         phba->hba_flag |= HBA_AER_ENABLED;
6411                         spin_unlock_irq(&phba->hbalock);
6412                 } else {
6413                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6414                                         "2830 This device does not support "
6415                                         "Advanced Error Reporting (AER)\n");
6416                         phba->cfg_aer_support = 0;
6417                 }
6418                 rc = 0;
6419         }
6420
6421         if (!(phba->hba_flag & HBA_FCOE_MODE)) {
6422                 /*
6423                  * The FC Port needs to register FCFI (index 0)
6424                  */
6425                 lpfc_reg_fcfi(phba, mboxq);
6426                 mboxq->vport = phba->pport;
6427                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6428                 if (rc != MBX_SUCCESS)
6429                         goto out_unset_queue;
6430                 rc = 0;
6431                 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
6432                                         &mboxq->u.mqe.un.reg_fcfi);
6433
6434                 /* Check if the port is configured to be disabled */
6435                 lpfc_sli_read_link_ste(phba);
6436         }
6437
6438         /*
6439          * The port is ready, set the host's link state to LINK_DOWN
6440          * in preparation for link interrupts.
6441          */
6442         spin_lock_irq(&phba->hbalock);
6443         phba->link_state = LPFC_LINK_DOWN;
6444         spin_unlock_irq(&phba->hbalock);
6445         if (!(phba->hba_flag & HBA_FCOE_MODE) &&
6446             (phba->hba_flag & LINK_DISABLED)) {
6447                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6448                                 "3103 Adapter Link is disabled.\n");
6449                 lpfc_down_link(phba, mboxq);
6450                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6451                 if (rc != MBX_SUCCESS) {
6452                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6453                                         "3104 Adapter failed to issue "
6454                                         "DOWN_LINK mbox cmd, rc:x%x\n", rc);
6455                         goto out_unset_queue;
6456                 }
6457         } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
6458                 /* don't perform init_link on SLI4 FC port loopback test */
6459                 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
6460                         rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
6461                         if (rc)
6462                                 goto out_unset_queue;
6463                 }
6464         }
6465         mempool_free(mboxq, phba->mbox_mem_pool);
6466         return rc;
6467 out_unset_queue:
6468         /* Unset all the queues set up in this routine when error out */
6469         lpfc_sli4_queue_unset(phba);
6470 out_destroy_queue:
6471         lpfc_sli4_queue_destroy(phba);
6472 out_stop_timers:
6473         lpfc_stop_hba_timers(phba);
6474 out_free_mbox:
6475         mempool_free(mboxq, phba->mbox_mem_pool);
6476         return rc;
6477 }
6478
6479 /**
6480  * lpfc_mbox_timeout - Timeout call back function for mbox timer
6481  * @ptr: context object - pointer to hba structure.
6482  *
6483  * This is the callback function for mailbox timer. The mailbox
6484  * timer is armed when a new mailbox command is issued and the timer
6485  * is deleted when the mailbox complete. The function is called by
6486  * the kernel timer code when a mailbox does not complete within
6487  * expected time. This function wakes up the worker thread to
6488  * process the mailbox timeout and returns. All the processing is
6489  * done by the worker thread function lpfc_mbox_timeout_handler.
6490  **/
6491 void
6492 lpfc_mbox_timeout(unsigned long ptr)
6493 {
6494         struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
6495         unsigned long iflag;
6496         uint32_t tmo_posted;
6497
6498         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
6499         tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
6500         if (!tmo_posted)
6501                 phba->pport->work_port_events |= WORKER_MBOX_TMO;
6502         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
6503
6504         if (!tmo_posted)
6505                 lpfc_worker_wake_up(phba);
6506         return;
6507 }
6508
6509
6510 /**
6511  * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
6512  * @phba: Pointer to HBA context object.
6513  *
6514  * This function is called from worker thread when a mailbox command times out.
6515  * The caller is not required to hold any locks. This function will reset the
6516  * HBA and recover all the pending commands.
6517  **/
6518 void
6519 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
6520 {
6521         LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
6522         MAILBOX_t *mb = &pmbox->u.mb;
6523         struct lpfc_sli *psli = &phba->sli;
6524         struct lpfc_sli_ring *pring;
6525
6526         /* Check the pmbox pointer first.  There is a race condition
6527          * between the mbox timeout handler getting executed in the
6528          * worklist and the mailbox actually completing. When this
6529          * race condition occurs, the mbox_active will be NULL.
6530          */
6531         spin_lock_irq(&phba->hbalock);
6532         if (pmbox == NULL) {
6533                 lpfc_printf_log(phba, KERN_WARNING,
6534                                 LOG_MBOX | LOG_SLI,
6535                                 "0353 Active Mailbox cleared - mailbox timeout "
6536                                 "exiting\n");
6537                 spin_unlock_irq(&phba->hbalock);
6538                 return;
6539         }
6540
6541         /* Mbox cmd <mbxCommand> timeout */
6542         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6543                         "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
6544                         mb->mbxCommand,
6545                         phba->pport->port_state,
6546                         phba->sli.sli_flag,
6547                         phba->sli.mbox_active);
6548         spin_unlock_irq(&phba->hbalock);
6549
6550         /* Setting state unknown so lpfc_sli_abort_iocb_ring
6551          * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
6552          * it to fail all outstanding SCSI IO.
6553          */
6554         spin_lock_irq(&phba->pport->work_port_lock);
6555         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
6556         spin_unlock_irq(&phba->pport->work_port_lock);
6557         spin_lock_irq(&phba->hbalock);
6558         phba->link_state = LPFC_LINK_UNKNOWN;
6559         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
6560         spin_unlock_irq(&phba->hbalock);
6561
6562         pring = &psli->ring[psli->fcp_ring];
6563         lpfc_sli_abort_iocb_ring(phba, pring);
6564
6565         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6566                         "0345 Resetting board due to mailbox timeout\n");
6567
6568         /* Reset the HBA device */
6569         lpfc_reset_hba(phba);
6570 }
6571
6572 /**
6573  * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
6574  * @phba: Pointer to HBA context object.
6575  * @pmbox: Pointer to mailbox object.
6576  * @flag: Flag indicating how the mailbox need to be processed.
6577  *
6578  * This function is called by discovery code and HBA management code
6579  * to submit a mailbox command to firmware with SLI-3 interface spec. This
6580  * function gets the hbalock to protect the data structures.
6581  * The mailbox command can be submitted in polling mode, in which case
6582  * this function will wait in a polling loop for the completion of the
6583  * mailbox.
6584  * If the mailbox is submitted in no_wait mode (not polling) the
6585  * function will submit the command and returns immediately without waiting
6586  * for the mailbox completion. The no_wait is supported only when HBA
6587  * is in SLI2/SLI3 mode - interrupts are enabled.
6588  * The SLI interface allows only one mailbox pending at a time. If the
6589  * mailbox is issued in polling mode and there is already a mailbox
6590  * pending, then the function will return an error. If the mailbox is issued
6591  * in NO_WAIT mode and there is a mailbox pending already, the function
6592  * will return MBX_BUSY after queuing the mailbox into mailbox queue.
6593  * The sli layer owns the mailbox object until the completion of mailbox
6594  * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
6595  * return codes the caller owns the mailbox command after the return of
6596  * the function.
6597  **/
6598 static int
6599 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
6600                        uint32_t flag)
6601 {
6602         MAILBOX_t *mb;
6603         struct lpfc_sli *psli = &phba->sli;
6604         uint32_t status, evtctr;
6605         uint32_t ha_copy, hc_copy;
6606         int i;
6607         unsigned long timeout;
6608         unsigned long drvr_flag = 0;
6609         uint32_t word0, ldata;
6610         void __iomem *to_slim;
6611         int processing_queue = 0;
6612
6613         spin_lock_irqsave(&phba->hbalock, drvr_flag);
6614         if (!pmbox) {
6615                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6616                 /* processing mbox queue from intr_handler */
6617                 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
6618                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6619                         return MBX_SUCCESS;
6620                 }
6621                 processing_queue = 1;
6622                 pmbox = lpfc_mbox_get(phba);
6623                 if (!pmbox) {
6624                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6625                         return MBX_SUCCESS;
6626                 }
6627         }
6628
6629         if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
6630                 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
6631                 if(!pmbox->vport) {
6632                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6633                         lpfc_printf_log(phba, KERN_ERR,
6634                                         LOG_MBOX | LOG_VPORT,
6635                                         "1806 Mbox x%x failed. No vport\n",
6636                                         pmbox->u.mb.mbxCommand);
6637                         dump_stack();
6638                         goto out_not_finished;
6639                 }
6640         }
6641
6642         /* If the PCI channel is in offline state, do not post mbox. */
6643         if (unlikely(pci_channel_offline(phba->pcidev))) {
6644                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6645                 goto out_not_finished;
6646         }
6647
6648         /* If HBA has a deferred error attention, fail the iocb. */
6649         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
6650                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6651                 goto out_not_finished;
6652         }
6653
6654         psli = &phba->sli;
6655
6656         mb = &pmbox->u.mb;
6657         status = MBX_SUCCESS;
6658
6659         if (phba->link_state == LPFC_HBA_ERROR) {
6660                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6661
6662                 /* Mbox command <mbxCommand> cannot issue */
6663                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6664                                 "(%d):0311 Mailbox command x%x cannot "
6665                                 "issue Data: x%x x%x\n",
6666                                 pmbox->vport ? pmbox->vport->vpi : 0,
6667                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
6668                 goto out_not_finished;
6669         }
6670
6671         if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
6672                 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
6673                         !(hc_copy & HC_MBINT_ENA)) {
6674                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6675                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6676                                 "(%d):2528 Mailbox command x%x cannot "
6677                                 "issue Data: x%x x%x\n",
6678                                 pmbox->vport ? pmbox->vport->vpi : 0,
6679                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
6680                         goto out_not_finished;
6681                 }
6682         }
6683
6684         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6685                 /* Polling for a mbox command when another one is already active
6686                  * is not allowed in SLI. Also, the driver must have established
6687                  * SLI2 mode to queue and process multiple mbox commands.
6688                  */
6689
6690                 if (flag & MBX_POLL) {
6691                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6692
6693                         /* Mbox command <mbxCommand> cannot issue */
6694                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6695                                         "(%d):2529 Mailbox command x%x "
6696                                         "cannot issue Data: x%x x%x\n",
6697                                         pmbox->vport ? pmbox->vport->vpi : 0,
6698                                         pmbox->u.mb.mbxCommand,
6699                                         psli->sli_flag, flag);
6700                         goto out_not_finished;
6701                 }
6702
6703                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
6704                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6705                         /* Mbox command <mbxCommand> cannot issue */
6706                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6707                                         "(%d):2530 Mailbox command x%x "
6708                                         "cannot issue Data: x%x x%x\n",
6709                                         pmbox->vport ? pmbox->vport->vpi : 0,
6710                                         pmbox->u.mb.mbxCommand,
6711                                         psli->sli_flag, flag);
6712                         goto out_not_finished;
6713                 }
6714
6715                 /* Another mailbox command is still being processed, queue this
6716                  * command to be processed later.
6717                  */
6718                 lpfc_mbox_put(phba, pmbox);
6719
6720                 /* Mbox cmd issue - BUSY */
6721                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6722                                 "(%d):0308 Mbox cmd issue - BUSY Data: "
6723                                 "x%x x%x x%x x%x\n",
6724                                 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
6725                                 mb->mbxCommand, phba->pport->port_state,
6726                                 psli->sli_flag, flag);
6727
6728                 psli->slistat.mbox_busy++;
6729                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6730
6731                 if (pmbox->vport) {
6732                         lpfc_debugfs_disc_trc(pmbox->vport,
6733                                 LPFC_DISC_TRC_MBOX_VPORT,
6734                                 "MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
6735                                 (uint32_t)mb->mbxCommand,
6736                                 mb->un.varWords[0], mb->un.varWords[1]);
6737                 }
6738                 else {
6739                         lpfc_debugfs_disc_trc(phba->pport,
6740                                 LPFC_DISC_TRC_MBOX,
6741                                 "MBOX Bsy:        cmd:x%x mb:x%x x%x",
6742                                 (uint32_t)mb->mbxCommand,
6743                                 mb->un.varWords[0], mb->un.varWords[1]);
6744                 }
6745
6746                 return MBX_BUSY;
6747         }
6748
6749         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
6750
6751         /* If we are not polling, we MUST be in SLI2 mode */
6752         if (flag != MBX_POLL) {
6753                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
6754                     (mb->mbxCommand != MBX_KILL_BOARD)) {
6755                         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6756                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6757                         /* Mbox command <mbxCommand> cannot issue */
6758                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6759                                         "(%d):2531 Mailbox command x%x "
6760                                         "cannot issue Data: x%x x%x\n",
6761                                         pmbox->vport ? pmbox->vport->vpi : 0,
6762                                         pmbox->u.mb.mbxCommand,
6763                                         psli->sli_flag, flag);
6764                         goto out_not_finished;
6765                 }
6766                 /* timeout active mbox command */
6767                 mod_timer(&psli->mbox_tmo, (jiffies +
6768                                (HZ * lpfc_mbox_tmo_val(phba, pmbox))));
6769         }
6770
6771         /* Mailbox cmd <cmd> issue */
6772         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6773                         "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
6774                         "x%x\n",
6775                         pmbox->vport ? pmbox->vport->vpi : 0,
6776                         mb->mbxCommand, phba->pport->port_state,
6777                         psli->sli_flag, flag);
6778
6779         if (mb->mbxCommand != MBX_HEARTBEAT) {
6780                 if (pmbox->vport) {
6781                         lpfc_debugfs_disc_trc(pmbox->vport,
6782                                 LPFC_DISC_TRC_MBOX_VPORT,
6783                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
6784                                 (uint32_t)mb->mbxCommand,
6785                                 mb->un.varWords[0], mb->un.varWords[1]);
6786                 }
6787                 else {
6788                         lpfc_debugfs_disc_trc(phba->pport,
6789                                 LPFC_DISC_TRC_MBOX,
6790                                 "MBOX Send:       cmd:x%x mb:x%x x%x",
6791                                 (uint32_t)mb->mbxCommand,
6792                                 mb->un.varWords[0], mb->un.varWords[1]);
6793                 }
6794         }
6795
6796         psli->slistat.mbox_cmd++;
6797         evtctr = psli->slistat.mbox_event;
6798
6799         /* next set own bit for the adapter and copy over command word */
6800         mb->mbxOwner = OWN_CHIP;
6801
6802         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
6803                 /* Populate mbox extension offset word. */
6804                 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
6805                         *(((uint32_t *)mb) + pmbox->mbox_offset_word)
6806                                 = (uint8_t *)phba->mbox_ext
6807                                   - (uint8_t *)phba->mbox;
6808                 }
6809
6810                 /* Copy the mailbox extension data */
6811                 if (pmbox->in_ext_byte_len && pmbox->context2) {
6812                         lpfc_sli_pcimem_bcopy(pmbox->context2,
6813                                 (uint8_t *)phba->mbox_ext,
6814                                 pmbox->in_ext_byte_len);
6815                 }
6816                 /* Copy command data to host SLIM area */
6817                 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
6818         } else {
6819                 /* Populate mbox extension offset word. */
6820                 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
6821                         *(((uint32_t *)mb) + pmbox->mbox_offset_word)
6822                                 = MAILBOX_HBA_EXT_OFFSET;
6823
6824                 /* Copy the mailbox extension data */
6825                 if (pmbox->in_ext_byte_len && pmbox->context2) {
6826                         lpfc_memcpy_to_slim(phba->MBslimaddr +
6827                                 MAILBOX_HBA_EXT_OFFSET,
6828                                 pmbox->context2, pmbox->in_ext_byte_len);
6829
6830                 }
6831                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
6832                         /* copy command data into host mbox for cmpl */
6833                         lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
6834                 }
6835
6836                 /* First copy mbox command data to HBA SLIM, skip past first
6837                    word */
6838                 to_slim = phba->MBslimaddr + sizeof (uint32_t);
6839                 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
6840                             MAILBOX_CMD_SIZE - sizeof (uint32_t));
6841
6842                 /* Next copy over first word, with mbxOwner set */
6843                 ldata = *((uint32_t *)mb);
6844                 to_slim = phba->MBslimaddr;
6845                 writel(ldata, to_slim);
6846                 readl(to_slim); /* flush */
6847
6848                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
6849                         /* switch over to host mailbox */
6850                         psli->sli_flag |= LPFC_SLI_ACTIVE;
6851                 }
6852         }
6853
6854         wmb();
6855
6856         switch (flag) {
6857         case MBX_NOWAIT:
6858                 /* Set up reference to mailbox command */
6859                 psli->mbox_active = pmbox;
6860                 /* Interrupt board to do it */
6861                 writel(CA_MBATT, phba->CAregaddr);
6862                 readl(phba->CAregaddr); /* flush */
6863                 /* Don't wait for it to finish, just return */
6864                 break;
6865
6866         case MBX_POLL:
6867                 /* Set up null reference to mailbox command */
6868                 psli->mbox_active = NULL;
6869                 /* Interrupt board to do it */
6870                 writel(CA_MBATT, phba->CAregaddr);
6871                 readl(phba->CAregaddr); /* flush */
6872
6873                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
6874                         /* First read mbox status word */
6875                         word0 = *((uint32_t *)phba->mbox);
6876                         word0 = le32_to_cpu(word0);
6877                 } else {
6878                         /* First read mbox status word */
6879                         if (lpfc_readl(phba->MBslimaddr, &word0)) {
6880                                 spin_unlock_irqrestore(&phba->hbalock,
6881                                                        drvr_flag);
6882                                 goto out_not_finished;
6883                         }
6884                 }
6885
6886                 /* Read the HBA Host Attention Register */
6887                 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
6888                         spin_unlock_irqrestore(&phba->hbalock,
6889                                                        drvr_flag);
6890                         goto out_not_finished;
6891                 }
6892                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
6893                                                         1000) + jiffies;
6894                 i = 0;
6895                 /* Wait for command to complete */
6896                 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
6897                        (!(ha_copy & HA_MBATT) &&
6898                         (phba->link_state > LPFC_WARM_START))) {
6899                         if (time_after(jiffies, timeout)) {
6900                                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6901                                 spin_unlock_irqrestore(&phba->hbalock,
6902                                                        drvr_flag);
6903                                 goto out_not_finished;
6904                         }
6905
6906                         /* Check if we took a mbox interrupt while we were
6907                            polling */
6908                         if (((word0 & OWN_CHIP) != OWN_CHIP)
6909                             && (evtctr != psli->slistat.mbox_event))
6910                                 break;
6911
6912                         if (i++ > 10) {
6913                                 spin_unlock_irqrestore(&phba->hbalock,
6914                                                        drvr_flag);
6915                                 msleep(1);
6916                                 spin_lock_irqsave(&phba->hbalock, drvr_flag);
6917                         }
6918
6919                         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
6920                                 /* First copy command data */
6921                                 word0 = *((uint32_t *)phba->mbox);
6922                                 word0 = le32_to_cpu(word0);
6923                                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
6924                                         MAILBOX_t *slimmb;
6925                                         uint32_t slimword0;
6926                                         /* Check real SLIM for any errors */
6927                                         slimword0 = readl(phba->MBslimaddr);
6928                                         slimmb = (MAILBOX_t *) & slimword0;
6929                                         if (((slimword0 & OWN_CHIP) != OWN_CHIP)
6930                                             && slimmb->mbxStatus) {
6931                                                 psli->sli_flag &=
6932                                                     ~LPFC_SLI_ACTIVE;
6933                                                 word0 = slimword0;
6934                                         }
6935                                 }
6936                         } else {
6937                                 /* First copy command data */
6938                                 word0 = readl(phba->MBslimaddr);
6939                         }
6940                         /* Read the HBA Host Attention Register */
6941                         if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
6942                                 spin_unlock_irqrestore(&phba->hbalock,
6943                                                        drvr_flag);
6944                                 goto out_not_finished;
6945                         }
6946                 }
6947
6948                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
6949                         /* copy results back to user */
6950                         lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
6951                         /* Copy the mailbox extension data */
6952                         if (pmbox->out_ext_byte_len && pmbox->context2) {
6953                                 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
6954                                                       pmbox->context2,
6955                                                       pmbox->out_ext_byte_len);
6956                         }
6957                 } else {
6958                         /* First copy command data */
6959                         lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
6960                                                         MAILBOX_CMD_SIZE);
6961                         /* Copy the mailbox extension data */
6962                         if (pmbox->out_ext_byte_len && pmbox->context2) {
6963                                 lpfc_memcpy_from_slim(pmbox->context2,
6964                                         phba->MBslimaddr +
6965                                         MAILBOX_HBA_EXT_OFFSET,
6966                                         pmbox->out_ext_byte_len);
6967                         }
6968                 }
6969
6970                 writel(HA_MBATT, phba->HAregaddr);
6971                 readl(phba->HAregaddr); /* flush */
6972
6973                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6974                 status = mb->mbxStatus;
6975         }
6976
6977         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6978         return status;
6979
6980 out_not_finished:
6981         if (processing_queue) {
6982                 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
6983                 lpfc_mbox_cmpl_put(phba, pmbox);
6984         }
6985         return MBX_NOT_FINISHED;
6986 }
6987
6988 /**
6989  * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
6990  * @phba: Pointer to HBA context object.
6991  *
6992  * The function blocks the posting of SLI4 asynchronous mailbox commands from
6993  * the driver internal pending mailbox queue. It will then try to wait out the
6994  * possible outstanding mailbox command before return.
6995  *
6996  * Returns:
6997  *      0 - the outstanding mailbox command completed; otherwise, the wait for
6998  *      the outstanding mailbox command timed out.
6999  **/
7000 static int
7001 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
7002 {
7003         struct lpfc_sli *psli = &phba->sli;
7004         int rc = 0;
7005         unsigned long timeout = 0;
7006
7007         /* Mark the asynchronous mailbox command posting as blocked */
7008         spin_lock_irq(&phba->hbalock);
7009         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7010         /* Determine how long we might wait for the active mailbox
7011          * command to be gracefully completed by firmware.
7012          */
7013         if (phba->sli.mbox_active)
7014                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
7015                                                 phba->sli.mbox_active) *
7016                                                 1000) + jiffies;
7017         spin_unlock_irq(&phba->hbalock);
7018
7019         /* Wait for the outstnading mailbox command to complete */
7020         while (phba->sli.mbox_active) {
7021                 /* Check active mailbox complete status every 2ms */
7022                 msleep(2);
7023                 if (time_after(jiffies, timeout)) {
7024                         /* Timeout, marked the outstanding cmd not complete */
7025                         rc = 1;
7026                         break;
7027                 }
7028         }
7029
7030         /* Can not cleanly block async mailbox command, fails it */
7031         if (rc) {
7032                 spin_lock_irq(&phba->hbalock);
7033                 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7034                 spin_unlock_irq(&phba->hbalock);
7035         }
7036         return rc;
7037 }
7038
7039 /**
7040  * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
7041  * @phba: Pointer to HBA context object.
7042  *
7043  * The function unblocks and resume posting of SLI4 asynchronous mailbox
7044  * commands from the driver internal pending mailbox queue. It makes sure
7045  * that there is no outstanding mailbox command before resuming posting
7046  * asynchronous mailbox commands. If, for any reason, there is outstanding
7047  * mailbox command, it will try to wait it out before resuming asynchronous
7048  * mailbox command posting.
7049  **/
7050 static void
7051 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
7052 {
7053         struct lpfc_sli *psli = &phba->sli;
7054
7055         spin_lock_irq(&phba->hbalock);
7056         if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7057                 /* Asynchronous mailbox posting is not blocked, do nothing */
7058                 spin_unlock_irq(&phba->hbalock);
7059                 return;
7060         }
7061
7062         /* Outstanding synchronous mailbox command is guaranteed to be done,
7063          * successful or timeout, after timing-out the outstanding mailbox
7064          * command shall always be removed, so just unblock posting async
7065          * mailbox command and resume
7066          */
7067         psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7068         spin_unlock_irq(&phba->hbalock);
7069
7070         /* wake up worker thread to post asynchronlous mailbox command */
7071         lpfc_worker_wake_up(phba);
7072 }
7073
7074 /**
7075  * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
7076  * @phba: Pointer to HBA context object.
7077  * @mboxq: Pointer to mailbox object.
7078  *
7079  * The function waits for the bootstrap mailbox register ready bit from
7080  * port for twice the regular mailbox command timeout value.
7081  *
7082  *      0 - no timeout on waiting for bootstrap mailbox register ready.
7083  *      MBXERR_ERROR - wait for bootstrap mailbox register timed out.
7084  **/
7085 static int
7086 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7087 {
7088         uint32_t db_ready;
7089         unsigned long timeout;
7090         struct lpfc_register bmbx_reg;
7091
7092         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
7093                                    * 1000) + jiffies;
7094
7095         do {
7096                 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
7097                 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7098                 if (!db_ready)
7099                         msleep(2);
7100
7101                 if (time_after(jiffies, timeout))
7102                         return MBXERR_ERROR;
7103         } while (!db_ready);
7104
7105         return 0;
7106 }
7107
7108 /**
7109  * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
7110  * @phba: Pointer to HBA context object.
7111  * @mboxq: Pointer to mailbox object.
7112  *
7113  * The function posts a mailbox to the port.  The mailbox is expected
7114  * to be comletely filled in and ready for the port to operate on it.
7115  * This routine executes a synchronous completion operation on the
7116  * mailbox by polling for its completion.
7117  *
7118  * The caller must not be holding any locks when calling this routine.
7119  *
7120  * Returns:
7121  *      MBX_SUCCESS - mailbox posted successfully
7122  *      Any of the MBX error values.
7123  **/
7124 static int
7125 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7126 {
7127         int rc = MBX_SUCCESS;
7128         unsigned long iflag;
7129         uint32_t mcqe_status;
7130         uint32_t mbx_cmnd;
7131         struct lpfc_sli *psli = &phba->sli;
7132         struct lpfc_mqe *mb = &mboxq->u.mqe;
7133         struct lpfc_bmbx_create *mbox_rgn;
7134         struct dma_address *dma_address;
7135
7136         /*
7137          * Only one mailbox can be active to the bootstrap mailbox region
7138          * at a time and there is no queueing provided.
7139          */
7140         spin_lock_irqsave(&phba->hbalock, iflag);
7141         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7142                 spin_unlock_irqrestore(&phba->hbalock, iflag);
7143                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7144                                 "(%d):2532 Mailbox command x%x (x%x/x%x) "
7145                                 "cannot issue Data: x%x x%x\n",
7146                                 mboxq->vport ? mboxq->vport->vpi : 0,
7147                                 mboxq->u.mb.mbxCommand,
7148                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7149                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7150                                 psli->sli_flag, MBX_POLL);
7151                 return MBXERR_ERROR;
7152         }
7153         /* The server grabs the token and owns it until release */
7154         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7155         phba->sli.mbox_active = mboxq;
7156         spin_unlock_irqrestore(&phba->hbalock, iflag);
7157
7158         /* wait for bootstrap mbox register for readyness */
7159         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7160         if (rc)
7161                 goto exit;
7162
7163         /*
7164          * Initialize the bootstrap memory region to avoid stale data areas
7165          * in the mailbox post.  Then copy the caller's mailbox contents to
7166          * the bmbx mailbox region.
7167          */
7168         mbx_cmnd = bf_get(lpfc_mqe_command, mb);
7169         memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
7170         lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
7171                               sizeof(struct lpfc_mqe));
7172
7173         /* Post the high mailbox dma address to the port and wait for ready. */
7174         dma_address = &phba->sli4_hba.bmbx.dma_address;
7175         writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
7176
7177         /* wait for bootstrap mbox register for hi-address write done */
7178         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7179         if (rc)
7180                 goto exit;
7181
7182         /* Post the low mailbox dma address to the port. */
7183         writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
7184
7185         /* wait for bootstrap mbox register for low address write done */
7186         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7187         if (rc)
7188                 goto exit;
7189
7190         /*
7191          * Read the CQ to ensure the mailbox has completed.
7192          * If so, update the mailbox status so that the upper layers
7193          * can complete the request normally.
7194          */
7195         lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
7196                               sizeof(struct lpfc_mqe));
7197         mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
7198         lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
7199                               sizeof(struct lpfc_mcqe));
7200         mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
7201         /*
7202          * When the CQE status indicates a failure and the mailbox status
7203          * indicates success then copy the CQE status into the mailbox status
7204          * (and prefix it with x4000).
7205          */
7206         if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
7207                 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
7208                         bf_set(lpfc_mqe_status, mb,
7209                                (LPFC_MBX_ERROR_RANGE | mcqe_status));
7210                 rc = MBXERR_ERROR;
7211         } else
7212                 lpfc_sli4_swap_str(phba, mboxq);
7213
7214         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7215                         "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
7216                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
7217                         " x%x x%x CQ: x%x x%x x%x x%x\n",
7218                         mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
7219                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7220                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7221                         bf_get(lpfc_mqe_status, mb),
7222                         mb->un.mb_words[0], mb->un.mb_words[1],
7223                         mb->un.mb_words[2], mb->un.mb_words[3],
7224                         mb->un.mb_words[4], mb->un.mb_words[5],
7225                         mb->un.mb_words[6], mb->un.mb_words[7],
7226                         mb->un.mb_words[8], mb->un.mb_words[9],
7227                         mb->un.mb_words[10], mb->un.mb_words[11],
7228                         mb->un.mb_words[12], mboxq->mcqe.word0,
7229                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
7230                         mboxq->mcqe.trailer);
7231 exit:
7232         /* We are holding the token, no needed for lock when release */
7233         spin_lock_irqsave(&phba->hbalock, iflag);
7234         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7235         phba->sli.mbox_active = NULL;
7236         spin_unlock_irqrestore(&phba->hbalock, iflag);
7237         return rc;
7238 }
7239
7240 /**
7241  * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
7242  * @phba: Pointer to HBA context object.
7243  * @pmbox: Pointer to mailbox object.
7244  * @flag: Flag indicating how the mailbox need to be processed.
7245  *
7246  * This function is called by discovery code and HBA management code to submit
7247  * a mailbox command to firmware with SLI-4 interface spec.
7248  *
7249  * Return codes the caller owns the mailbox command after the return of the
7250  * function.
7251  **/
7252 static int
7253 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
7254                        uint32_t flag)
7255 {
7256         struct lpfc_sli *psli = &phba->sli;
7257         unsigned long iflags;
7258         int rc;
7259
7260         /* dump from issue mailbox command if setup */
7261         lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
7262
7263         rc = lpfc_mbox_dev_check(phba);
7264         if (unlikely(rc)) {
7265                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7266                                 "(%d):2544 Mailbox command x%x (x%x/x%x) "
7267                                 "cannot issue Data: x%x x%x\n",
7268                                 mboxq->vport ? mboxq->vport->vpi : 0,
7269                                 mboxq->u.mb.mbxCommand,
7270                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7271                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7272                                 psli->sli_flag, flag);
7273                 goto out_not_finished;
7274         }
7275
7276         /* Detect polling mode and jump to a handler */
7277         if (!phba->sli4_hba.intr_enable) {
7278                 if (flag == MBX_POLL)
7279                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7280                 else
7281                         rc = -EIO;
7282                 if (rc != MBX_SUCCESS)
7283                         lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7284                                         "(%d):2541 Mailbox command x%x "
7285                                         "(x%x/x%x) failure: "
7286                                         "mqe_sta: x%x mcqe_sta: x%x/x%x "
7287                                         "Data: x%x x%x\n,",
7288                                         mboxq->vport ? mboxq->vport->vpi : 0,
7289                                         mboxq->u.mb.mbxCommand,
7290                                         lpfc_sli_config_mbox_subsys_get(phba,
7291                                                                         mboxq),
7292                                         lpfc_sli_config_mbox_opcode_get(phba,
7293                                                                         mboxq),
7294                                         bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7295                                         bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7296                                         bf_get(lpfc_mcqe_ext_status,
7297                                                &mboxq->mcqe),
7298                                         psli->sli_flag, flag);
7299                 return rc;
7300         } else if (flag == MBX_POLL) {
7301                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7302                                 "(%d):2542 Try to issue mailbox command "
7303                                 "x%x (x%x/x%x) synchronously ahead of async"
7304                                 "mailbox command queue: x%x x%x\n",
7305                                 mboxq->vport ? mboxq->vport->vpi : 0,
7306                                 mboxq->u.mb.mbxCommand,
7307                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7308                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7309                                 psli->sli_flag, flag);
7310                 /* Try to block the asynchronous mailbox posting */
7311                 rc = lpfc_sli4_async_mbox_block(phba);
7312                 if (!rc) {
7313                         /* Successfully blocked, now issue sync mbox cmd */
7314                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7315                         if (rc != MBX_SUCCESS)
7316                                 lpfc_printf_log(phba, KERN_WARNING,
7317                                         LOG_MBOX | LOG_SLI,
7318                                         "(%d):2597 Sync Mailbox command "
7319                                         "x%x (x%x/x%x) failure: "
7320                                         "mqe_sta: x%x mcqe_sta: x%x/x%x "
7321                                         "Data: x%x x%x\n,",
7322                                         mboxq->vport ? mboxq->vport->vpi : 0,
7323                                         mboxq->u.mb.mbxCommand,
7324                                         lpfc_sli_config_mbox_subsys_get(phba,
7325                                                                         mboxq),
7326                                         lpfc_sli_config_mbox_opcode_get(phba,
7327                                                                         mboxq),
7328                                         bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7329                                         bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7330                                         bf_get(lpfc_mcqe_ext_status,
7331                                                &mboxq->mcqe),
7332                                         psli->sli_flag, flag);
7333                         /* Unblock the async mailbox posting afterward */
7334                         lpfc_sli4_async_mbox_unblock(phba);
7335                 }
7336                 return rc;
7337         }
7338
7339         /* Now, interrupt mode asynchrous mailbox command */
7340         rc = lpfc_mbox_cmd_check(phba, mboxq);
7341         if (rc) {
7342                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7343                                 "(%d):2543 Mailbox command x%x (x%x/x%x) "
7344                                 "cannot issue Data: x%x x%x\n",
7345                                 mboxq->vport ? mboxq->vport->vpi : 0,
7346                                 mboxq->u.mb.mbxCommand,
7347                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7348                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7349                                 psli->sli_flag, flag);
7350                 goto out_not_finished;
7351         }
7352
7353         /* Put the mailbox command to the driver internal FIFO */
7354         psli->slistat.mbox_busy++;
7355         spin_lock_irqsave(&phba->hbalock, iflags);
7356         lpfc_mbox_put(phba, mboxq);
7357         spin_unlock_irqrestore(&phba->hbalock, iflags);
7358         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7359                         "(%d):0354 Mbox cmd issue - Enqueue Data: "
7360                         "x%x (x%x/x%x) x%x x%x x%x\n",
7361                         mboxq->vport ? mboxq->vport->vpi : 0xffffff,
7362                         bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7363                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7364                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7365                         phba->pport->port_state,
7366                         psli->sli_flag, MBX_NOWAIT);
7367         /* Wake up worker thread to transport mailbox command from head */
7368         lpfc_worker_wake_up(phba);
7369
7370         return MBX_BUSY;
7371
7372 out_not_finished:
7373         return MBX_NOT_FINISHED;
7374 }
7375
7376 /**
7377  * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
7378  * @phba: Pointer to HBA context object.
7379  *
7380  * This function is called by worker thread to send a mailbox command to
7381  * SLI4 HBA firmware.
7382  *
7383  **/
7384 int
7385 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
7386 {
7387         struct lpfc_sli *psli = &phba->sli;
7388         LPFC_MBOXQ_t *mboxq;
7389         int rc = MBX_SUCCESS;
7390         unsigned long iflags;
7391         struct lpfc_mqe *mqe;
7392         uint32_t mbx_cmnd;
7393
7394         /* Check interrupt mode before post async mailbox command */
7395         if (unlikely(!phba->sli4_hba.intr_enable))
7396                 return MBX_NOT_FINISHED;
7397
7398         /* Check for mailbox command service token */
7399         spin_lock_irqsave(&phba->hbalock, iflags);
7400         if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7401                 spin_unlock_irqrestore(&phba->hbalock, iflags);
7402                 return MBX_NOT_FINISHED;
7403         }
7404         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7405                 spin_unlock_irqrestore(&phba->hbalock, iflags);
7406                 return MBX_NOT_FINISHED;
7407         }
7408         if (unlikely(phba->sli.mbox_active)) {
7409                 spin_unlock_irqrestore(&phba->hbalock, iflags);
7410                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7411                                 "0384 There is pending active mailbox cmd\n");
7412                 return MBX_NOT_FINISHED;
7413         }
7414         /* Take the mailbox command service token */
7415         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7416
7417         /* Get the next mailbox command from head of queue */
7418         mboxq = lpfc_mbox_get(phba);
7419
7420         /* If no more mailbox command waiting for post, we're done */
7421         if (!mboxq) {
7422                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7423                 spin_unlock_irqrestore(&phba->hbalock, iflags);
7424                 return MBX_SUCCESS;
7425         }
7426         phba->sli.mbox_active = mboxq;
7427         spin_unlock_irqrestore(&phba->hbalock, iflags);
7428
7429         /* Check device readiness for posting mailbox command */
7430         rc = lpfc_mbox_dev_check(phba);
7431         if (unlikely(rc))
7432                 /* Driver clean routine will clean up pending mailbox */
7433                 goto out_not_finished;
7434
7435         /* Prepare the mbox command to be posted */
7436         mqe = &mboxq->u.mqe;
7437         mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
7438
7439         /* Start timer for the mbox_tmo and log some mailbox post messages */
7440         mod_timer(&psli->mbox_tmo, (jiffies +
7441                   (HZ * lpfc_mbox_tmo_val(phba, mboxq))));
7442
7443         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7444                         "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
7445                         "x%x x%x\n",
7446                         mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
7447                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7448                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7449                         phba->pport->port_state, psli->sli_flag);
7450
7451         if (mbx_cmnd != MBX_HEARTBEAT) {
7452                 if (mboxq->vport) {
7453                         lpfc_debugfs_disc_trc(mboxq->vport,
7454                                 LPFC_DISC_TRC_MBOX_VPORT,
7455                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7456                                 mbx_cmnd, mqe->un.mb_words[0],
7457                                 mqe->un.mb_words[1]);
7458                 } else {
7459                         lpfc_debugfs_disc_trc(phba->pport,
7460                                 LPFC_DISC_TRC_MBOX,
7461                                 "MBOX Send: cmd:x%x mb:x%x x%x",
7462                                 mbx_cmnd, mqe->un.mb_words[0],
7463                                 mqe->un.mb_words[1]);
7464                 }
7465         }
7466         psli->slistat.mbox_cmd++;
7467
7468         /* Post the mailbox command to the port */
7469         rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
7470         if (rc != MBX_SUCCESS) {
7471                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7472                                 "(%d):2533 Mailbox command x%x (x%x/x%x) "
7473                                 "cannot issue Data: x%x x%x\n",
7474                                 mboxq->vport ? mboxq->vport->vpi : 0,
7475                                 mboxq->u.mb.mbxCommand,
7476                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7477                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7478                                 psli->sli_flag, MBX_NOWAIT);
7479                 goto out_not_finished;
7480         }
7481
7482         return rc;
7483
7484 out_not_finished:
7485         spin_lock_irqsave(&phba->hbalock, iflags);
7486         if (phba->sli.mbox_active) {
7487                 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7488                 __lpfc_mbox_cmpl_put(phba, mboxq);
7489                 /* Release the token */
7490                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7491                 phba->sli.mbox_active = NULL;
7492         }
7493         spin_unlock_irqrestore(&phba->hbalock, iflags);
7494
7495         return MBX_NOT_FINISHED;
7496 }
7497
7498 /**
7499  * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
7500  * @phba: Pointer to HBA context object.
7501  * @pmbox: Pointer to mailbox object.
7502  * @flag: Flag indicating how the mailbox need to be processed.
7503  *
7504  * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
7505  * the API jump table function pointer from the lpfc_hba struct.
7506  *
7507  * Return codes the caller owns the mailbox command after the return of the
7508  * function.
7509  **/
7510 int
7511 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
7512 {
7513         return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
7514 }
7515
7516 /**
7517  * lpfc_mbox_api_table_setup - Set up mbox api function jump table
7518  * @phba: The hba struct for which this call is being executed.
7519  * @dev_grp: The HBA PCI-Device group number.
7520  *
7521  * This routine sets up the mbox interface API function jump table in @phba
7522  * struct.
7523  * Returns: 0 - success, -ENODEV - failure.
7524  **/
7525 int
7526 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7527 {
7528
7529         switch (dev_grp) {
7530         case LPFC_PCI_DEV_LP:
7531                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
7532                 phba->lpfc_sli_handle_slow_ring_event =
7533                                 lpfc_sli_handle_slow_ring_event_s3;
7534                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
7535                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
7536                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
7537                 break;
7538         case LPFC_PCI_DEV_OC:
7539                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
7540                 phba->lpfc_sli_handle_slow_ring_event =
7541                                 lpfc_sli_handle_slow_ring_event_s4;
7542                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
7543                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
7544                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
7545                 break;
7546         default:
7547                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7548                                 "1420 Invalid HBA PCI-device group: 0x%x\n",
7549                                 dev_grp);
7550                 return -ENODEV;
7551                 break;
7552         }
7553         return 0;
7554 }
7555
7556 /**
7557  * __lpfc_sli_ringtx_put - Add an iocb to the txq
7558  * @phba: Pointer to HBA context object.
7559  * @pring: Pointer to driver SLI ring object.
7560  * @piocb: Pointer to address of newly added command iocb.
7561  *
7562  * This function is called with hbalock held to add a command
7563  * iocb to the txq when SLI layer cannot submit the command iocb
7564  * to the ring.
7565  **/
7566 void
7567 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7568                     struct lpfc_iocbq *piocb)
7569 {
7570         /* Insert the caller's iocb in the txq tail for later processing. */
7571         list_add_tail(&piocb->list, &pring->txq);
7572         pring->txq_cnt++;
7573 }
7574
7575 /**
7576  * lpfc_sli_next_iocb - Get the next iocb in the txq
7577  * @phba: Pointer to HBA context object.
7578  * @pring: Pointer to driver SLI ring object.
7579  * @piocb: Pointer to address of newly added command iocb.
7580  *
7581  * This function is called with hbalock held before a new
7582  * iocb is submitted to the firmware. This function checks
7583  * txq to flush the iocbs in txq to Firmware before
7584  * submitting new iocbs to the Firmware.
7585  * If there are iocbs in the txq which need to be submitted
7586  * to firmware, lpfc_sli_next_iocb returns the first element
7587  * of the txq after dequeuing it from txq.
7588  * If there is no iocb in the txq then the function will return
7589  * *piocb and *piocb is set to NULL. Caller needs to check
7590  * *piocb to find if there are more commands in the txq.
7591  **/
7592 static struct lpfc_iocbq *
7593 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7594                    struct lpfc_iocbq **piocb)
7595 {
7596         struct lpfc_iocbq * nextiocb;
7597
7598         nextiocb = lpfc_sli_ringtx_get(phba, pring);
7599         if (!nextiocb) {
7600                 nextiocb = *piocb;
7601                 *piocb = NULL;
7602         }
7603
7604         return nextiocb;
7605 }
7606
7607 /**
7608  * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
7609  * @phba: Pointer to HBA context object.
7610  * @ring_number: SLI ring number to issue iocb on.
7611  * @piocb: Pointer to command iocb.
7612  * @flag: Flag indicating if this command can be put into txq.
7613  *
7614  * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
7615  * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
7616  * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
7617  * flag is turned on, the function returns IOCB_ERROR. When the link is down,
7618  * this function allows only iocbs for posting buffers. This function finds
7619  * next available slot in the command ring and posts the command to the
7620  * available slot and writes the port attention register to request HBA start
7621  * processing new iocb. If there is no slot available in the ring and
7622  * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
7623  * the function returns IOCB_BUSY.
7624  *
7625  * This function is called with hbalock held. The function will return success
7626  * after it successfully submit the iocb to firmware or after adding to the
7627  * txq.
7628  **/
7629 static int
7630 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
7631                     struct lpfc_iocbq *piocb, uint32_t flag)
7632 {
7633         struct lpfc_iocbq *nextiocb;
7634         IOCB_t *iocb;
7635         struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
7636
7637         if (piocb->iocb_cmpl && (!piocb->vport) &&
7638            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
7639            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
7640                 lpfc_printf_log(phba, KERN_ERR,
7641                                 LOG_SLI | LOG_VPORT,
7642                                 "1807 IOCB x%x failed. No vport\n",
7643                                 piocb->iocb.ulpCommand);
7644                 dump_stack();
7645                 return IOCB_ERROR;
7646         }
7647
7648
7649         /* If the PCI channel is in offline state, do not post iocbs. */
7650         if (unlikely(pci_channel_offline(phba->pcidev)))
7651                 return IOCB_ERROR;
7652
7653         /* If HBA has a deferred error attention, fail the iocb. */
7654         if (unlikely(phba->hba_flag & DEFER_ERATT))
7655                 return IOCB_ERROR;
7656
7657         /*
7658          * We should never get an IOCB if we are in a < LINK_DOWN state
7659          */
7660         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
7661                 return IOCB_ERROR;
7662
7663         /*
7664          * Check to see if we are blocking IOCB processing because of a
7665          * outstanding event.
7666          */
7667         if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
7668                 goto iocb_busy;
7669
7670         if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
7671                 /*
7672                  * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
7673                  * can be issued if the link is not up.
7674                  */
7675                 switch (piocb->iocb.ulpCommand) {
7676                 case CMD_GEN_REQUEST64_CR:
7677                 case CMD_GEN_REQUEST64_CX:
7678                         if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
7679                                 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
7680                                         FC_RCTL_DD_UNSOL_CMD) ||
7681                                 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
7682                                         MENLO_TRANSPORT_TYPE))
7683
7684                                 goto iocb_busy;
7685                         break;
7686                 case CMD_QUE_RING_BUF_CN:
7687                 case CMD_QUE_RING_BUF64_CN:
7688                         /*
7689                          * For IOCBs, like QUE_RING_BUF, that have no rsp ring
7690                          * completion, iocb_cmpl MUST be 0.
7691                          */
7692                         if (piocb->iocb_cmpl)
7693                                 piocb->iocb_cmpl = NULL;
7694                         /*FALLTHROUGH*/
7695                 case CMD_CREATE_XRI_CR:
7696                 case CMD_CLOSE_XRI_CN:
7697                 case CMD_CLOSE_XRI_CX:
7698                         break;
7699                 default:
7700                         goto iocb_busy;
7701                 }
7702
7703         /*
7704          * For FCP commands, we must be in a state where we can process link
7705          * attention events.
7706          */
7707         } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
7708                             !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
7709                 goto iocb_busy;
7710         }
7711
7712         while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
7713                (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
7714                 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
7715
7716         if (iocb)
7717                 lpfc_sli_update_ring(phba, pring);
7718         else
7719                 lpfc_sli_update_full_ring(phba, pring);
7720
7721         if (!piocb)
7722                 return IOCB_SUCCESS;
7723
7724         goto out_busy;
7725
7726  iocb_busy:
7727         pring->stats.iocb_cmd_delay++;
7728
7729  out_busy:
7730
7731         if (!(flag & SLI_IOCB_RET_IOCB)) {
7732                 __lpfc_sli_ringtx_put(phba, pring, piocb);
7733                 return IOCB_SUCCESS;
7734         }
7735
7736         return IOCB_BUSY;
7737 }
7738
7739 /**
7740  * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
7741  * @phba: Pointer to HBA context object.
7742  * @piocb: Pointer to command iocb.
7743  * @sglq: Pointer to the scatter gather queue object.
7744  *
7745  * This routine converts the bpl or bde that is in the IOCB
7746  * to a sgl list for the sli4 hardware. The physical address
7747  * of the bpl/bde is converted back to a virtual address.
7748  * If the IOCB contains a BPL then the list of BDE's is
7749  * converted to sli4_sge's. If the IOCB contains a single
7750  * BDE then it is converted to a single sli_sge.
7751  * The IOCB is still in cpu endianess so the contents of
7752  * the bpl can be used without byte swapping.
7753  *
7754  * Returns valid XRI = Success, NO_XRI = Failure.
7755 **/
7756 static uint16_t
7757 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
7758                 struct lpfc_sglq *sglq)
7759 {
7760         uint16_t xritag = NO_XRI;
7761         struct ulp_bde64 *bpl = NULL;
7762         struct ulp_bde64 bde;
7763         struct sli4_sge *sgl  = NULL;
7764         struct lpfc_dmabuf *dmabuf;
7765         IOCB_t *icmd;
7766         int numBdes = 0;
7767         int i = 0;
7768         uint32_t offset = 0; /* accumulated offset in the sg request list */
7769         int inbound = 0; /* number of sg reply entries inbound from firmware */
7770
7771         if (!piocbq || !sglq)
7772                 return xritag;
7773
7774         sgl  = (struct sli4_sge *)sglq->sgl;
7775         icmd = &piocbq->iocb;
7776         if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
7777                 return sglq->sli4_xritag;
7778         if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
7779                 numBdes = icmd->un.genreq64.bdl.bdeSize /
7780                                 sizeof(struct ulp_bde64);
7781                 /* The addrHigh and addrLow fields within the IOCB
7782                  * have not been byteswapped yet so there is no
7783                  * need to swap them back.
7784                  */
7785                 if (piocbq->context3)
7786                         dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
7787                 else
7788                         return xritag;
7789
7790                 bpl  = (struct ulp_bde64 *)dmabuf->virt;
7791                 if (!bpl)
7792                         return xritag;
7793
7794                 for (i = 0; i < numBdes; i++) {
7795                         /* Should already be byte swapped. */
7796                         sgl->addr_hi = bpl->addrHigh;
7797                         sgl->addr_lo = bpl->addrLow;
7798
7799                         sgl->word2 = le32_to_cpu(sgl->word2);
7800                         if ((i+1) == numBdes)
7801                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
7802                         else
7803                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
7804                         /* swap the size field back to the cpu so we
7805                          * can assign it to the sgl.
7806                          */
7807                         bde.tus.w = le32_to_cpu(bpl->tus.w);
7808                         sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
7809                         /* The offsets in the sgl need to be accumulated
7810                          * separately for the request and reply lists.
7811                          * The request is always first, the reply follows.
7812                          */
7813                         if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
7814                                 /* add up the reply sg entries */
7815                                 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
7816                                         inbound++;
7817                                 /* first inbound? reset the offset */
7818                                 if (inbound == 1)
7819                                         offset = 0;
7820                                 bf_set(lpfc_sli4_sge_offset, sgl, offset);
7821                                 bf_set(lpfc_sli4_sge_type, sgl,
7822                                         LPFC_SGE_TYPE_DATA);
7823                                 offset += bde.tus.f.bdeSize;
7824                         }
7825                         sgl->word2 = cpu_to_le32(sgl->word2);
7826                         bpl++;
7827                         sgl++;
7828                 }
7829         } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
7830                         /* The addrHigh and addrLow fields of the BDE have not
7831                          * been byteswapped yet so they need to be swapped
7832                          * before putting them in the sgl.
7833                          */
7834                         sgl->addr_hi =
7835                                 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
7836                         sgl->addr_lo =
7837                                 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
7838                         sgl->word2 = le32_to_cpu(sgl->word2);
7839                         bf_set(lpfc_sli4_sge_last, sgl, 1);
7840                         sgl->word2 = cpu_to_le32(sgl->word2);
7841                         sgl->sge_len =
7842                                 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
7843         }
7844         return sglq->sli4_xritag;
7845 }
7846
7847 /**
7848  * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
7849  * @phba: Pointer to HBA context object.
7850  *
7851  * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
7852  * distribution.  This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
7853  * held.
7854  *
7855  * Return: index into SLI4 fast-path FCP queue index.
7856  **/
7857 static inline uint32_t
7858 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
7859 {
7860         int i;
7861
7862         if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU)
7863                 i = smp_processor_id();
7864         else
7865                 i = atomic_add_return(1, &phba->fcp_qidx);
7866
7867         i = (i % phba->cfg_fcp_io_channel);
7868         return i;
7869 }
7870
7871 /**
7872  * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
7873  * @phba: Pointer to HBA context object.
7874  * @piocb: Pointer to command iocb.
7875  * @wqe: Pointer to the work queue entry.
7876  *
7877  * This routine converts the iocb command to its Work Queue Entry
7878  * equivalent. The wqe pointer should not have any fields set when
7879  * this routine is called because it will memcpy over them.
7880  * This routine does not set the CQ_ID or the WQEC bits in the
7881  * wqe.
7882  *
7883  * Returns: 0 = Success, IOCB_ERROR = Failure.
7884  **/
7885 static int
7886 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7887                 union lpfc_wqe *wqe)
7888 {
7889         uint32_t xmit_len = 0, total_len = 0;
7890         uint8_t ct = 0;
7891         uint32_t fip;
7892         uint32_t abort_tag;
7893         uint8_t command_type = ELS_COMMAND_NON_FIP;
7894         uint8_t cmnd;
7895         uint16_t xritag;
7896         uint16_t abrt_iotag;
7897         struct lpfc_iocbq *abrtiocbq;
7898         struct ulp_bde64 *bpl = NULL;
7899         uint32_t els_id = LPFC_ELS_ID_DEFAULT;
7900         int numBdes, i;
7901         struct ulp_bde64 bde;
7902         struct lpfc_nodelist *ndlp;
7903         uint32_t *pcmd;
7904         uint32_t if_type;
7905
7906         fip = phba->hba_flag & HBA_FIP_SUPPORT;
7907         /* The fcp commands will set command type */
7908         if (iocbq->iocb_flag &  LPFC_IO_FCP)
7909                 command_type = FCP_COMMAND;
7910         else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
7911                 command_type = ELS_COMMAND_FIP;
7912         else
7913                 command_type = ELS_COMMAND_NON_FIP;
7914
7915         /* Some of the fields are in the right position already */
7916         memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
7917         abort_tag = (uint32_t) iocbq->iotag;
7918         xritag = iocbq->sli4_xritag;
7919         wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
7920         /* words0-2 bpl convert bde */
7921         if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
7922                 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
7923                                 sizeof(struct ulp_bde64);
7924                 bpl  = (struct ulp_bde64 *)
7925                         ((struct lpfc_dmabuf *)iocbq->context3)->virt;
7926                 if (!bpl)
7927                         return IOCB_ERROR;
7928
7929                 /* Should already be byte swapped. */
7930                 wqe->generic.bde.addrHigh =  le32_to_cpu(bpl->addrHigh);
7931                 wqe->generic.bde.addrLow =  le32_to_cpu(bpl->addrLow);
7932                 /* swap the size field back to the cpu so we
7933                  * can assign it to the sgl.
7934                  */
7935                 wqe->generic.bde.tus.w  = le32_to_cpu(bpl->tus.w);
7936                 xmit_len = wqe->generic.bde.tus.f.bdeSize;
7937                 total_len = 0;
7938                 for (i = 0; i < numBdes; i++) {
7939                         bde.tus.w  = le32_to_cpu(bpl[i].tus.w);
7940                         total_len += bde.tus.f.bdeSize;
7941                 }
7942         } else
7943                 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
7944
7945         iocbq->iocb.ulpIoTag = iocbq->iotag;
7946         cmnd = iocbq->iocb.ulpCommand;
7947
7948         switch (iocbq->iocb.ulpCommand) {
7949         case CMD_ELS_REQUEST64_CR:
7950                 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
7951                         ndlp = iocbq->context_un.ndlp;
7952                 else
7953                         ndlp = (struct lpfc_nodelist *)iocbq->context1;
7954                 if (!iocbq->iocb.ulpLe) {
7955                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7956                                 "2007 Only Limited Edition cmd Format"
7957                                 " supported 0x%x\n",
7958                                 iocbq->iocb.ulpCommand);
7959                         return IOCB_ERROR;
7960                 }
7961
7962                 wqe->els_req.payload_len = xmit_len;
7963                 /* Els_reguest64 has a TMO */
7964                 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
7965                         iocbq->iocb.ulpTimeout);
7966                 /* Need a VF for word 4 set the vf bit*/
7967                 bf_set(els_req64_vf, &wqe->els_req, 0);
7968                 /* And a VFID for word 12 */
7969                 bf_set(els_req64_vfid, &wqe->els_req, 0);
7970                 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
7971                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
7972                        iocbq->iocb.ulpContext);
7973                 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
7974                 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
7975                 /* CCP CCPE PV PRI in word10 were set in the memcpy */
7976                 if (command_type == ELS_COMMAND_FIP)
7977                         els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
7978                                         >> LPFC_FIP_ELS_ID_SHIFT);
7979                 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
7980                                         iocbq->context2)->virt);
7981                 if_type = bf_get(lpfc_sli_intf_if_type,
7982                                         &phba->sli4_hba.sli_intf);
7983                 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
7984                         if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
7985                                 *pcmd == ELS_CMD_SCR ||
7986                                 *pcmd == ELS_CMD_FDISC ||
7987                                 *pcmd == ELS_CMD_LOGO ||
7988                                 *pcmd == ELS_CMD_PLOGI)) {
7989                                 bf_set(els_req64_sp, &wqe->els_req, 1);
7990                                 bf_set(els_req64_sid, &wqe->els_req,
7991                                         iocbq->vport->fc_myDID);
7992                                 if ((*pcmd == ELS_CMD_FLOGI) &&
7993                                         !(phba->fc_topology ==
7994                                                 LPFC_TOPOLOGY_LOOP))
7995                                         bf_set(els_req64_sid, &wqe->els_req, 0);
7996                                 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
7997                                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
7998                                         phba->vpi_ids[iocbq->vport->vpi]);
7999                         } else if (pcmd && iocbq->context1) {
8000                                 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
8001                                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8002                                         phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8003                         }
8004                 }
8005                 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
8006                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8007                 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
8008                 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
8009                 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
8010                 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
8011                 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8012                 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
8013                 break;
8014         case CMD_XMIT_SEQUENCE64_CX:
8015                 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
8016                        iocbq->iocb.un.ulpWord[3]);
8017                 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
8018                        iocbq->iocb.unsli3.rcvsli3.ox_id);
8019                 /* The entire sequence is transmitted for this IOCB */
8020                 xmit_len = total_len;
8021                 cmnd = CMD_XMIT_SEQUENCE64_CR;
8022                 if (phba->link_flag & LS_LOOPBACK_MODE)
8023                         bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
8024         case CMD_XMIT_SEQUENCE64_CR:
8025                 /* word3 iocb=io_tag32 wqe=reserved */
8026                 wqe->xmit_sequence.rsvd3 = 0;
8027                 /* word4 relative_offset memcpy */
8028                 /* word5 r_ctl/df_ctl memcpy */
8029                 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
8030                 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
8031                 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
8032                        LPFC_WQE_IOD_WRITE);
8033                 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
8034                        LPFC_WQE_LENLOC_WORD12);
8035                 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
8036                 wqe->xmit_sequence.xmit_len = xmit_len;
8037                 command_type = OTHER_COMMAND;
8038                 break;
8039         case CMD_XMIT_BCAST64_CN:
8040                 /* word3 iocb=iotag32 wqe=seq_payload_len */
8041                 wqe->xmit_bcast64.seq_payload_len = xmit_len;
8042                 /* word4 iocb=rsvd wqe=rsvd */
8043                 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
8044                 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
8045                 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
8046                         ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8047                 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
8048                 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
8049                 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
8050                        LPFC_WQE_LENLOC_WORD3);
8051                 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
8052                 break;
8053         case CMD_FCP_IWRITE64_CR:
8054                 command_type = FCP_COMMAND_DATA_OUT;
8055                 /* word3 iocb=iotag wqe=payload_offset_len */
8056                 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8057                 wqe->fcp_iwrite.payload_offset_len =
8058                         xmit_len + sizeof(struct fcp_rsp);
8059                 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8060                 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8061                 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
8062                        iocbq->iocb.ulpFCP2Rcvy);
8063                 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
8064                 /* Always open the exchange */
8065                 bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
8066                 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
8067                 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
8068                        LPFC_WQE_LENLOC_WORD4);
8069                 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
8070                 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
8071                 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
8072                 break;
8073         case CMD_FCP_IREAD64_CR:
8074                 /* word3 iocb=iotag wqe=payload_offset_len */
8075                 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8076                 wqe->fcp_iread.payload_offset_len =
8077                         xmit_len + sizeof(struct fcp_rsp);
8078                 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8079                 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8080                 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
8081                        iocbq->iocb.ulpFCP2Rcvy);
8082                 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
8083                 /* Always open the exchange */
8084                 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
8085                 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
8086                 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
8087                        LPFC_WQE_LENLOC_WORD4);
8088                 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
8089                 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
8090                 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
8091                 break;
8092         case CMD_FCP_ICMND64_CR:
8093                 /* word3 iocb=IO_TAG wqe=reserved */
8094                 wqe->fcp_icmd.rsrvd3 = 0;
8095                 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
8096                 /* Always open the exchange */
8097                 bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
8098                 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
8099                 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
8100                 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
8101                 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
8102                        LPFC_WQE_LENLOC_NONE);
8103                 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
8104                 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
8105                        iocbq->iocb.ulpFCP2Rcvy);
8106                 break;
8107         case CMD_GEN_REQUEST64_CR:
8108                 /* For this command calculate the xmit length of the
8109                  * request bde.
8110                  */
8111                 xmit_len = 0;
8112                 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8113                         sizeof(struct ulp_bde64);
8114                 for (i = 0; i < numBdes; i++) {
8115                         bde.tus.w = le32_to_cpu(bpl[i].tus.w);
8116                         if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
8117                                 break;
8118                         xmit_len += bde.tus.f.bdeSize;
8119                 }
8120                 /* word3 iocb=IO_TAG wqe=request_payload_len */
8121                 wqe->gen_req.request_payload_len = xmit_len;
8122                 /* word4 iocb=parameter wqe=relative_offset memcpy */
8123                 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
8124                 /* word6 context tag copied in memcpy */
8125                 if (iocbq->iocb.ulpCt_h  || iocbq->iocb.ulpCt_l) {
8126                         ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8127                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8128                                 "2015 Invalid CT %x command 0x%x\n",
8129                                 ct, iocbq->iocb.ulpCommand);
8130                         return IOCB_ERROR;
8131                 }
8132                 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
8133                 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
8134                 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
8135                 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
8136                 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
8137                 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
8138                 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8139                 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
8140                 command_type = OTHER_COMMAND;
8141                 break;
8142         case CMD_XMIT_ELS_RSP64_CX:
8143                 ndlp = (struct lpfc_nodelist *)iocbq->context1;
8144                 /* words0-2 BDE memcpy */
8145                 /* word3 iocb=iotag32 wqe=response_payload_len */
8146                 wqe->xmit_els_rsp.response_payload_len = xmit_len;
8147                 /* word4 */
8148                 wqe->xmit_els_rsp.word4 = 0;
8149                 /* word5 iocb=rsvd wge=did */
8150                 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
8151                          iocbq->iocb.un.xseq64.xmit_els_remoteID);
8152
8153                 if_type = bf_get(lpfc_sli_intf_if_type,
8154                                         &phba->sli4_hba.sli_intf);
8155                 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8156                         if (iocbq->vport->fc_flag & FC_PT2PT) {
8157                                 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8158                                 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
8159                                         iocbq->vport->fc_myDID);
8160                                 if (iocbq->vport->fc_myDID == Fabric_DID) {
8161                                         bf_set(wqe_els_did,
8162                                                 &wqe->xmit_els_rsp.wqe_dest, 0);
8163                                 }
8164                         }
8165                 }
8166                 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
8167                        ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8168                 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
8169                 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
8170                        iocbq->iocb.unsli3.rcvsli3.ox_id);
8171                 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
8172                         bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
8173                                phba->vpi_ids[iocbq->vport->vpi]);
8174                 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
8175                 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
8176                 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
8177                 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
8178                        LPFC_WQE_LENLOC_WORD3);
8179                 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
8180                 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
8181                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8182                 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8183                                         iocbq->context2)->virt);
8184                 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
8185                                 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8186                                 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
8187                                         iocbq->vport->fc_myDID);
8188                                 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
8189                                 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
8190                                         phba->vpi_ids[phba->pport->vpi]);
8191                 }
8192                 command_type = OTHER_COMMAND;
8193                 break;
8194         case CMD_CLOSE_XRI_CN:
8195         case CMD_ABORT_XRI_CN:
8196         case CMD_ABORT_XRI_CX:
8197                 /* words 0-2 memcpy should be 0 rserved */
8198                 /* port will send abts */
8199                 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
8200                 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
8201                         abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
8202                         fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
8203                 } else
8204                         fip = 0;
8205
8206                 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
8207                         /*
8208                          * The link is down, or the command was ELS_FIP
8209                          * so the fw does not need to send abts
8210                          * on the wire.
8211                          */
8212                         bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
8213                 else
8214                         bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
8215                 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
8216                 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
8217                 wqe->abort_cmd.rsrvd5 = 0;
8218                 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
8219                         ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8220                 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
8221                 /*
8222                  * The abort handler will send us CMD_ABORT_XRI_CN or
8223                  * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
8224                  */
8225                 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
8226                 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
8227                 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
8228                        LPFC_WQE_LENLOC_NONE);
8229                 cmnd = CMD_ABORT_XRI_CX;
8230                 command_type = OTHER_COMMAND;
8231                 xritag = 0;
8232                 break;
8233         case CMD_XMIT_BLS_RSP64_CX:
8234                 ndlp = (struct lpfc_nodelist *)iocbq->context1;
8235                 /* As BLS ABTS RSP WQE is very different from other WQEs,
8236                  * we re-construct this WQE here based on information in
8237                  * iocbq from scratch.
8238                  */
8239                 memset(wqe, 0, sizeof(union lpfc_wqe));
8240                 /* OX_ID is invariable to who sent ABTS to CT exchange */
8241                 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
8242                        bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
8243                 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
8244                     LPFC_ABTS_UNSOL_INT) {
8245                         /* ABTS sent by initiator to CT exchange, the
8246                          * RX_ID field will be filled with the newly
8247                          * allocated responder XRI.
8248                          */
8249                         bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
8250                                iocbq->sli4_xritag);
8251                 } else {
8252                         /* ABTS sent by responder to CT exchange, the
8253                          * RX_ID field will be filled with the responder
8254                          * RX_ID from ABTS.
8255                          */
8256                         bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
8257                                bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
8258                 }
8259                 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
8260                 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
8261
8262                 /* Use CT=VPI */
8263                 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
8264                         ndlp->nlp_DID);
8265                 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
8266                         iocbq->iocb.ulpContext);
8267                 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
8268                 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
8269                         phba->vpi_ids[phba->pport->vpi]);
8270                 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
8271                 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
8272                        LPFC_WQE_LENLOC_NONE);
8273                 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
8274                 command_type = OTHER_COMMAND;
8275                 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
8276                         bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
8277                                bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
8278                         bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
8279                                bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
8280                         bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
8281                                bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
8282                 }
8283
8284                 break;
8285         case CMD_XRI_ABORTED_CX:
8286         case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
8287         case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
8288         case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
8289         case CMD_FCP_TRSP64_CX: /* Target mode rcv */
8290         case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
8291         default:
8292                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8293                                 "2014 Invalid command 0x%x\n",
8294                                 iocbq->iocb.ulpCommand);
8295                 return IOCB_ERROR;
8296                 break;
8297         }
8298
8299         if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
8300                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
8301         else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
8302                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
8303         else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
8304                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
8305         iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
8306                               LPFC_IO_DIF_INSERT);
8307         bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
8308         bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
8309         wqe->generic.wqe_com.abort_tag = abort_tag;
8310         bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
8311         bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
8312         bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
8313         bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
8314         return 0;
8315 }
8316
8317 /**
8318  * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
8319  * @phba: Pointer to HBA context object.
8320  * @ring_number: SLI ring number to issue iocb on.
8321  * @piocb: Pointer to command iocb.
8322  * @flag: Flag indicating if this command can be put into txq.
8323  *
8324  * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
8325  * an iocb command to an HBA with SLI-4 interface spec.
8326  *
8327  * This function is called with hbalock held. The function will return success
8328  * after it successfully submit the iocb to firmware or after adding to the
8329  * txq.
8330  **/
8331 static int
8332 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8333                          struct lpfc_iocbq *piocb, uint32_t flag)
8334 {
8335         struct lpfc_sglq *sglq;
8336         union lpfc_wqe wqe;
8337         struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
8338
8339         if (piocb->sli4_xritag == NO_XRI) {
8340                 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
8341                     piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
8342                         sglq = NULL;
8343                 else {
8344                         if (pring->txq_cnt) {
8345                                 if (!(flag & SLI_IOCB_RET_IOCB)) {
8346                                         __lpfc_sli_ringtx_put(phba,
8347                                                 pring, piocb);
8348                                         return IOCB_SUCCESS;
8349                                 } else {
8350                                         return IOCB_BUSY;
8351                                 }
8352                         } else {
8353                                 sglq = __lpfc_sli_get_sglq(phba, piocb);
8354                                 if (!sglq) {
8355                                         if (!(flag & SLI_IOCB_RET_IOCB)) {
8356                                                 __lpfc_sli_ringtx_put(phba,
8357                                                                 pring,
8358                                                                 piocb);
8359                                                 return IOCB_SUCCESS;
8360                                         } else
8361                                                 return IOCB_BUSY;
8362                                 }
8363                         }
8364                 }
8365         } else if (piocb->iocb_flag &  LPFC_IO_FCP) {
8366                 /* These IO's already have an XRI and a mapped sgl. */
8367                 sglq = NULL;
8368         } else {
8369                 /*
8370                  * This is a continuation of a commandi,(CX) so this
8371                  * sglq is on the active list
8372                  */
8373                 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
8374                 if (!sglq)
8375                         return IOCB_ERROR;
8376         }
8377
8378         if (sglq) {
8379                 piocb->sli4_lxritag = sglq->sli4_lxritag;
8380                 piocb->sli4_xritag = sglq->sli4_xritag;
8381                 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
8382                         return IOCB_ERROR;
8383         }
8384
8385         if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
8386                 return IOCB_ERROR;
8387
8388         if ((piocb->iocb_flag & LPFC_IO_FCP) ||
8389                 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
8390                 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
8391                                      &wqe))
8392                         return IOCB_ERROR;
8393         } else {
8394                 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
8395                         return IOCB_ERROR;
8396         }
8397         lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
8398
8399         return 0;
8400 }
8401
8402 /**
8403  * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
8404  *
8405  * This routine wraps the actual lockless version for issusing IOCB function
8406  * pointer from the lpfc_hba struct.
8407  *
8408  * Return codes:
8409  *      IOCB_ERROR - Error
8410  *      IOCB_SUCCESS - Success
8411  *      IOCB_BUSY - Busy
8412  **/
8413 int
8414 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8415                 struct lpfc_iocbq *piocb, uint32_t flag)
8416 {
8417         return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8418 }
8419
8420 /**
8421  * lpfc_sli_api_table_setup - Set up sli api function jump table
8422  * @phba: The hba struct for which this call is being executed.
8423  * @dev_grp: The HBA PCI-Device group number.
8424  *
8425  * This routine sets up the SLI interface API function jump table in @phba
8426  * struct.
8427  * Returns: 0 - success, -ENODEV - failure.
8428  **/
8429 int
8430 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8431 {
8432
8433         switch (dev_grp) {
8434         case LPFC_PCI_DEV_LP:
8435                 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
8436                 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
8437                 break;
8438         case LPFC_PCI_DEV_OC:
8439                 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
8440                 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
8441                 break;
8442         default:
8443                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8444                                 "1419 Invalid HBA PCI-device group: 0x%x\n",
8445                                 dev_grp);
8446                 return -ENODEV;
8447                 break;
8448         }
8449         phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
8450         return 0;
8451 }
8452
8453 /**
8454  * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
8455  * @phba: Pointer to HBA context object.
8456  * @pring: Pointer to driver SLI ring object.
8457  * @piocb: Pointer to command iocb.
8458  * @flag: Flag indicating if this command can be put into txq.
8459  *
8460  * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
8461  * function. This function gets the hbalock and calls
8462  * __lpfc_sli_issue_iocb function and will return the error returned
8463  * by __lpfc_sli_issue_iocb function. This wrapper is used by
8464  * functions which do not hold hbalock.
8465  **/
8466 int
8467 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8468                     struct lpfc_iocbq *piocb, uint32_t flag)
8469 {
8470         struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
8471         struct lpfc_sli_ring *pring;
8472         struct lpfc_queue *fpeq;
8473         struct lpfc_eqe *eqe;
8474         unsigned long iflags;
8475         int rc, idx;
8476
8477         if (phba->sli_rev == LPFC_SLI_REV4) {
8478                 if (piocb->iocb_flag &  LPFC_IO_FCP) {
8479                         if (unlikely(!phba->sli4_hba.fcp_wq))
8480                                 return IOCB_ERROR;
8481                         idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
8482                         piocb->fcp_wqidx = idx;
8483                         ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
8484
8485                         pring = &phba->sli.ring[ring_number];
8486                         spin_lock_irqsave(&pring->ring_lock, iflags);
8487                         rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
8488                                 flag);
8489                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
8490
8491                         if (lpfc_fcp_look_ahead) {
8492                                 fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx];
8493
8494                                 if (atomic_dec_and_test(&fcp_eq_hdl->
8495                                         fcp_eq_in_use)) {
8496
8497                                         /* Get associated EQ with this index */
8498                                         fpeq = phba->sli4_hba.hba_eq[idx];
8499
8500                                         /* Turn off interrupts from this EQ */
8501                                         lpfc_sli4_eq_clr_intr(fpeq);
8502
8503                                         /*
8504                                          * Process all the events on FCP EQ
8505                                          */
8506                                         while ((eqe = lpfc_sli4_eq_get(fpeq))) {
8507                                                 lpfc_sli4_hba_handle_eqe(phba,
8508                                                         eqe, idx);
8509                                                 fpeq->EQ_processed++;
8510                                         }
8511
8512                                         /* Always clear and re-arm the EQ */
8513                                         lpfc_sli4_eq_release(fpeq,
8514                                                 LPFC_QUEUE_REARM);
8515                                 }
8516                                 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
8517                         }
8518                 } else {
8519                         pring = &phba->sli.ring[ring_number];
8520                         spin_lock_irqsave(&pring->ring_lock, iflags);
8521                         rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
8522                                 flag);
8523                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
8524
8525                 }
8526         } else {
8527                 /* For now, SLI2/3 will still use hbalock */
8528                 spin_lock_irqsave(&phba->hbalock, iflags);
8529                 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8530                 spin_unlock_irqrestore(&phba->hbalock, iflags);
8531         }
8532         return rc;
8533 }
8534
8535 /**
8536  * lpfc_extra_ring_setup - Extra ring setup function
8537  * @phba: Pointer to HBA context object.
8538  *
8539  * This function is called while driver attaches with the
8540  * HBA to setup the extra ring. The extra ring is used
8541  * only when driver needs to support target mode functionality
8542  * or IP over FC functionalities.
8543  *
8544  * This function is called with no lock held.
8545  **/
8546 static int
8547 lpfc_extra_ring_setup( struct lpfc_hba *phba)
8548 {
8549         struct lpfc_sli *psli;
8550         struct lpfc_sli_ring *pring;
8551
8552         psli = &phba->sli;
8553
8554         /* Adjust cmd/rsp ring iocb entries more evenly */
8555
8556         /* Take some away from the FCP ring */
8557         pring = &psli->ring[psli->fcp_ring];
8558         pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8559         pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8560         pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8561         pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8562
8563         /* and give them to the extra ring */
8564         pring = &psli->ring[psli->extra_ring];
8565
8566         pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8567         pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8568         pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8569         pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8570
8571         /* Setup default profile for this ring */
8572         pring->iotag_max = 4096;
8573         pring->num_mask = 1;
8574         pring->prt[0].profile = 0;      /* Mask 0 */
8575         pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
8576         pring->prt[0].type = phba->cfg_multi_ring_type;
8577         pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
8578         return 0;
8579 }
8580
8581 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
8582  * @phba: Pointer to HBA context object.
8583  * @iocbq: Pointer to iocb object.
8584  *
8585  * The async_event handler calls this routine when it receives
8586  * an ASYNC_STATUS_CN event from the port.  The port generates
8587  * this event when an Abort Sequence request to an rport fails
8588  * twice in succession.  The abort could be originated by the
8589  * driver or by the port.  The ABTS could have been for an ELS
8590  * or FCP IO.  The port only generates this event when an ABTS
8591  * fails to complete after one retry.
8592  */
8593 static void
8594 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
8595                           struct lpfc_iocbq *iocbq)
8596 {
8597         struct lpfc_nodelist *ndlp = NULL;
8598         uint16_t rpi = 0, vpi = 0;
8599         struct lpfc_vport *vport = NULL;
8600
8601         /* The rpi in the ulpContext is vport-sensitive. */
8602         vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
8603         rpi = iocbq->iocb.ulpContext;
8604
8605         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8606                         "3092 Port generated ABTS async event "
8607                         "on vpi %d rpi %d status 0x%x\n",
8608                         vpi, rpi, iocbq->iocb.ulpStatus);
8609
8610         vport = lpfc_find_vport_by_vpid(phba, vpi);
8611         if (!vport)
8612                 goto err_exit;
8613         ndlp = lpfc_findnode_rpi(vport, rpi);
8614         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
8615                 goto err_exit;
8616
8617         if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
8618                 lpfc_sli_abts_recover_port(vport, ndlp);
8619         return;
8620
8621  err_exit:
8622         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8623                         "3095 Event Context not found, no "
8624                         "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
8625                         iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
8626                         vpi, rpi);
8627 }
8628
8629 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
8630  * @phba: pointer to HBA context object.
8631  * @ndlp: nodelist pointer for the impacted rport.
8632  * @axri: pointer to the wcqe containing the failed exchange.
8633  *
8634  * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
8635  * port.  The port generates this event when an abort exchange request to an
8636  * rport fails twice in succession with no reply.  The abort could be originated
8637  * by the driver or by the port.  The ABTS could have been for an ELS or FCP IO.
8638  */
8639 void
8640 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
8641                            struct lpfc_nodelist *ndlp,
8642                            struct sli4_wcqe_xri_aborted *axri)
8643 {
8644         struct lpfc_vport *vport;
8645         uint32_t ext_status = 0;
8646
8647         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
8648                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8649                                 "3115 Node Context not found, driver "
8650                                 "ignoring abts err event\n");
8651                 return;
8652         }
8653
8654         vport = ndlp->vport;
8655         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8656                         "3116 Port generated FCP XRI ABORT event on "
8657                         "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
8658                         ndlp->vport->vpi, ndlp->nlp_rpi,
8659                         bf_get(lpfc_wcqe_xa_xri, axri),
8660                         bf_get(lpfc_wcqe_xa_status, axri),
8661                         axri->parameter);
8662
8663         /*
8664          * Catch the ABTS protocol failure case.  Older OCe FW releases returned
8665          * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
8666          * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
8667          */
8668         ext_status = axri->parameter & IOERR_PARAM_MASK;
8669         if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
8670             ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
8671                 lpfc_sli_abts_recover_port(vport, ndlp);
8672 }
8673
8674 /**
8675  * lpfc_sli_async_event_handler - ASYNC iocb handler function
8676  * @phba: Pointer to HBA context object.
8677  * @pring: Pointer to driver SLI ring object.
8678  * @iocbq: Pointer to iocb object.
8679  *
8680  * This function is called by the slow ring event handler
8681  * function when there is an ASYNC event iocb in the ring.
8682  * This function is called with no lock held.
8683  * Currently this function handles only temperature related
8684  * ASYNC events. The function decodes the temperature sensor
8685  * event message and posts events for the management applications.
8686  **/
8687 static void
8688 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
8689         struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
8690 {
8691         IOCB_t *icmd;
8692         uint16_t evt_code;
8693         struct temp_event temp_event_data;
8694         struct Scsi_Host *shost;
8695         uint32_t *iocb_w;
8696
8697         icmd = &iocbq->iocb;
8698         evt_code = icmd->un.asyncstat.evt_code;
8699
8700         switch (evt_code) {
8701         case ASYNC_TEMP_WARN:
8702         case ASYNC_TEMP_SAFE:
8703                 temp_event_data.data = (uint32_t) icmd->ulpContext;
8704                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
8705                 if (evt_code == ASYNC_TEMP_WARN) {
8706                         temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
8707                         lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
8708                                 "0347 Adapter is very hot, please take "
8709                                 "corrective action. temperature : %d Celsius\n",
8710                                 (uint32_t) icmd->ulpContext);
8711                 } else {
8712                         temp_event_data.event_code = LPFC_NORMAL_TEMP;
8713                         lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
8714                                 "0340 Adapter temperature is OK now. "
8715                                 "temperature : %d Celsius\n",
8716                                 (uint32_t) icmd->ulpContext);
8717                 }
8718
8719                 /* Send temperature change event to applications */
8720                 shost = lpfc_shost_from_vport(phba->pport);
8721                 fc_host_post_vendor_event(shost, fc_get_event_number(),
8722                         sizeof(temp_event_data), (char *) &temp_event_data,
8723                         LPFC_NL_VENDOR_ID);
8724                 break;
8725         case ASYNC_STATUS_CN:
8726                 lpfc_sli_abts_err_handler(phba, iocbq);
8727                 break;
8728         default:
8729                 iocb_w = (uint32_t *) icmd;
8730                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8731                         "0346 Ring %d handler: unexpected ASYNC_STATUS"
8732                         " evt_code 0x%x\n"
8733                         "W0  0x%08x W1  0x%08x W2  0x%08x W3  0x%08x\n"
8734                         "W4  0x%08x W5  0x%08x W6  0x%08x W7  0x%08x\n"
8735                         "W8  0x%08x W9  0x%08x W10 0x%08x W11 0x%08x\n"
8736                         "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
8737                         pring->ringno, icmd->un.asyncstat.evt_code,
8738                         iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
8739                         iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
8740                         iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
8741                         iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
8742
8743                 break;
8744         }
8745 }
8746
8747
8748 /**
8749  * lpfc_sli_setup - SLI ring setup function
8750  * @phba: Pointer to HBA context object.
8751  *
8752  * lpfc_sli_setup sets up rings of the SLI interface with
8753  * number of iocbs per ring and iotags. This function is
8754  * called while driver attach to the HBA and before the
8755  * interrupts are enabled. So there is no need for locking.
8756  *
8757  * This function always returns 0.
8758  **/
8759 int
8760 lpfc_sli_setup(struct lpfc_hba *phba)
8761 {
8762         int i, totiocbsize = 0;
8763         struct lpfc_sli *psli = &phba->sli;
8764         struct lpfc_sli_ring *pring;
8765
8766         psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
8767         if (phba->sli_rev == LPFC_SLI_REV4)
8768                 psli->num_rings += phba->cfg_fcp_io_channel;
8769         psli->sli_flag = 0;
8770         psli->fcp_ring = LPFC_FCP_RING;
8771         psli->next_ring = LPFC_FCP_NEXT_RING;
8772         psli->extra_ring = LPFC_EXTRA_RING;
8773
8774         psli->iocbq_lookup = NULL;
8775         psli->iocbq_lookup_len = 0;
8776         psli->last_iotag = 0;
8777
8778         for (i = 0; i < psli->num_rings; i++) {
8779                 pring = &psli->ring[i];
8780                 switch (i) {
8781                 case LPFC_FCP_RING:     /* ring 0 - FCP */
8782                         /* numCiocb and numRiocb are used in config_port */
8783                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
8784                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
8785                         pring->sli.sli3.numCiocb +=
8786                                 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8787                         pring->sli.sli3.numRiocb +=
8788                                 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8789                         pring->sli.sli3.numCiocb +=
8790                                 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8791                         pring->sli.sli3.numRiocb +=
8792                                 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8793                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
8794                                                         SLI3_IOCB_CMD_SIZE :
8795                                                         SLI2_IOCB_CMD_SIZE;
8796                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
8797                                                         SLI3_IOCB_RSP_SIZE :
8798                                                         SLI2_IOCB_RSP_SIZE;
8799                         pring->iotag_ctr = 0;
8800                         pring->iotag_max =
8801                             (phba->cfg_hba_queue_depth * 2);
8802                         pring->fast_iotag = pring->iotag_max;
8803                         pring->num_mask = 0;
8804                         break;
8805                 case LPFC_EXTRA_RING:   /* ring 1 - EXTRA */
8806                         /* numCiocb and numRiocb are used in config_port */
8807                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
8808                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
8809                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
8810                                                         SLI3_IOCB_CMD_SIZE :
8811                                                         SLI2_IOCB_CMD_SIZE;
8812                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
8813                                                         SLI3_IOCB_RSP_SIZE :
8814                                                         SLI2_IOCB_RSP_SIZE;
8815                         pring->iotag_max = phba->cfg_hba_queue_depth;
8816                         pring->num_mask = 0;
8817                         break;
8818                 case LPFC_ELS_RING:     /* ring 2 - ELS / CT */
8819                         /* numCiocb and numRiocb are used in config_port */
8820                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
8821                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
8822                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
8823                                                         SLI3_IOCB_CMD_SIZE :
8824                                                         SLI2_IOCB_CMD_SIZE;
8825                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
8826                                                         SLI3_IOCB_RSP_SIZE :
8827                                                         SLI2_IOCB_RSP_SIZE;
8828                         pring->fast_iotag = 0;
8829                         pring->iotag_ctr = 0;
8830                         pring->iotag_max = 4096;
8831                         pring->lpfc_sli_rcv_async_status =
8832                                 lpfc_sli_async_event_handler;
8833                         pring->num_mask = LPFC_MAX_RING_MASK;
8834                         pring->prt[0].profile = 0;      /* Mask 0 */
8835                         pring->prt[0].rctl = FC_RCTL_ELS_REQ;
8836                         pring->prt[0].type = FC_TYPE_ELS;
8837                         pring->prt[0].lpfc_sli_rcv_unsol_event =
8838                             lpfc_els_unsol_event;
8839                         pring->prt[1].profile = 0;      /* Mask 1 */
8840                         pring->prt[1].rctl = FC_RCTL_ELS_REP;
8841                         pring->prt[1].type = FC_TYPE_ELS;
8842                         pring->prt[1].lpfc_sli_rcv_unsol_event =
8843                             lpfc_els_unsol_event;
8844                         pring->prt[2].profile = 0;      /* Mask 2 */
8845                         /* NameServer Inquiry */
8846                         pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
8847                         /* NameServer */
8848                         pring->prt[2].type = FC_TYPE_CT;
8849                         pring->prt[2].lpfc_sli_rcv_unsol_event =
8850                             lpfc_ct_unsol_event;
8851                         pring->prt[3].profile = 0;      /* Mask 3 */
8852                         /* NameServer response */
8853                         pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
8854                         /* NameServer */
8855                         pring->prt[3].type = FC_TYPE_CT;
8856                         pring->prt[3].lpfc_sli_rcv_unsol_event =
8857                             lpfc_ct_unsol_event;
8858                         /* abort unsolicited sequence */
8859                         pring->prt[4].profile = 0;      /* Mask 4 */
8860                         pring->prt[4].rctl = FC_RCTL_BA_ABTS;
8861                         pring->prt[4].type = FC_TYPE_BLS;
8862                         pring->prt[4].lpfc_sli_rcv_unsol_event =
8863                             lpfc_sli4_ct_abort_unsol_event;
8864                         break;
8865                 }
8866                 totiocbsize += (pring->sli.sli3.numCiocb *
8867                         pring->sli.sli3.sizeCiocb) +
8868                         (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
8869         }
8870         if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
8871                 /* Too many cmd / rsp ring entries in SLI2 SLIM */
8872                 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
8873                        "SLI2 SLIM Data: x%x x%lx\n",
8874                        phba->brd_no, totiocbsize,
8875                        (unsigned long) MAX_SLIM_IOCB_SIZE);
8876         }
8877         if (phba->cfg_multi_ring_support == 2)
8878                 lpfc_extra_ring_setup(phba);
8879
8880         return 0;
8881 }
8882
8883 /**
8884  * lpfc_sli_queue_setup - Queue initialization function
8885  * @phba: Pointer to HBA context object.
8886  *
8887  * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
8888  * ring. This function also initializes ring indices of each ring.
8889  * This function is called during the initialization of the SLI
8890  * interface of an HBA.
8891  * This function is called with no lock held and always returns
8892  * 1.
8893  **/
8894 int
8895 lpfc_sli_queue_setup(struct lpfc_hba *phba)
8896 {
8897         struct lpfc_sli *psli;
8898         struct lpfc_sli_ring *pring;
8899         int i;
8900
8901         psli = &phba->sli;
8902         spin_lock_irq(&phba->hbalock);
8903         INIT_LIST_HEAD(&psli->mboxq);
8904         INIT_LIST_HEAD(&psli->mboxq_cmpl);
8905         /* Initialize list headers for txq and txcmplq as double linked lists */
8906         for (i = 0; i < psli->num_rings; i++) {
8907                 pring = &psli->ring[i];
8908                 pring->ringno = i;
8909                 pring->sli.sli3.next_cmdidx  = 0;
8910                 pring->sli.sli3.local_getidx = 0;
8911                 pring->sli.sli3.cmdidx = 0;
8912                 INIT_LIST_HEAD(&pring->txq);
8913                 INIT_LIST_HEAD(&pring->txcmplq);
8914                 INIT_LIST_HEAD(&pring->iocb_continueq);
8915                 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
8916                 INIT_LIST_HEAD(&pring->postbufq);
8917                 spin_lock_init(&pring->ring_lock);
8918         }
8919         spin_unlock_irq(&phba->hbalock);
8920         return 1;
8921 }
8922
8923 /**
8924  * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
8925  * @phba: Pointer to HBA context object.
8926  *
8927  * This routine flushes the mailbox command subsystem. It will unconditionally
8928  * flush all the mailbox commands in the three possible stages in the mailbox
8929  * command sub-system: pending mailbox command queue; the outstanding mailbox
8930  * command; and completed mailbox command queue. It is caller's responsibility
8931  * to make sure that the driver is in the proper state to flush the mailbox
8932  * command sub-system. Namely, the posting of mailbox commands into the
8933  * pending mailbox command queue from the various clients must be stopped;
8934  * either the HBA is in a state that it will never works on the outstanding
8935  * mailbox command (such as in EEH or ERATT conditions) or the outstanding
8936  * mailbox command has been completed.
8937  **/
8938 static void
8939 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
8940 {
8941         LIST_HEAD(completions);
8942         struct lpfc_sli *psli = &phba->sli;
8943         LPFC_MBOXQ_t *pmb;
8944         unsigned long iflag;
8945
8946         /* Flush all the mailbox commands in the mbox system */
8947         spin_lock_irqsave(&phba->hbalock, iflag);
8948         /* The pending mailbox command queue */
8949         list_splice_init(&phba->sli.mboxq, &completions);
8950         /* The outstanding active mailbox command */
8951         if (psli->mbox_active) {
8952                 list_add_tail(&psli->mbox_active->list, &completions);
8953                 psli->mbox_active = NULL;
8954                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8955         }
8956         /* The completed mailbox command queue */
8957         list_splice_init(&phba->sli.mboxq_cmpl, &completions);
8958         spin_unlock_irqrestore(&phba->hbalock, iflag);
8959
8960         /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
8961         while (!list_empty(&completions)) {
8962                 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
8963                 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
8964                 if (pmb->mbox_cmpl)
8965                         pmb->mbox_cmpl(phba, pmb);
8966         }
8967 }
8968
8969 /**
8970  * lpfc_sli_host_down - Vport cleanup function
8971  * @vport: Pointer to virtual port object.
8972  *
8973  * lpfc_sli_host_down is called to clean up the resources
8974  * associated with a vport before destroying virtual
8975  * port data structures.
8976  * This function does following operations:
8977  * - Free discovery resources associated with this virtual
8978  *   port.
8979  * - Free iocbs associated with this virtual port in
8980  *   the txq.
8981  * - Send abort for all iocb commands associated with this
8982  *   vport in txcmplq.
8983  *
8984  * This function is called with no lock held and always returns 1.
8985  **/
8986 int
8987 lpfc_sli_host_down(struct lpfc_vport *vport)
8988 {
8989         LIST_HEAD(completions);
8990         struct lpfc_hba *phba = vport->phba;
8991         struct lpfc_sli *psli = &phba->sli;
8992         struct lpfc_sli_ring *pring;
8993         struct lpfc_iocbq *iocb, *next_iocb;
8994         int i;
8995         unsigned long flags = 0;
8996         uint16_t prev_pring_flag;
8997
8998         lpfc_cleanup_discovery_resources(vport);
8999
9000         spin_lock_irqsave(&phba->hbalock, flags);
9001         for (i = 0; i < psli->num_rings; i++) {
9002                 pring = &psli->ring[i];
9003                 prev_pring_flag = pring->flag;
9004                 /* Only slow rings */
9005                 if (pring->ringno == LPFC_ELS_RING) {
9006                         pring->flag |= LPFC_DEFERRED_RING_EVENT;
9007                         /* Set the lpfc data pending flag */
9008                         set_bit(LPFC_DATA_READY, &phba->data_flags);
9009                 }
9010                 /*
9011                  * Error everything on the txq since these iocbs have not been
9012                  * given to the FW yet.
9013                  */
9014                 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
9015                         if (iocb->vport != vport)
9016                                 continue;
9017                         list_move_tail(&iocb->list, &completions);
9018                         pring->txq_cnt--;
9019                 }
9020
9021                 /* Next issue ABTS for everything on the txcmplq */
9022                 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
9023                                                                         list) {
9024                         if (iocb->vport != vport)
9025                                 continue;
9026                         lpfc_sli_issue_abort_iotag(phba, pring, iocb);
9027                 }
9028
9029                 pring->flag = prev_pring_flag;
9030         }
9031
9032         spin_unlock_irqrestore(&phba->hbalock, flags);
9033
9034         /* Cancel all the IOCBs from the completions list */
9035         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9036                               IOERR_SLI_DOWN);
9037         return 1;
9038 }
9039
9040 /**
9041  * lpfc_sli_hba_down - Resource cleanup function for the HBA
9042  * @phba: Pointer to HBA context object.
9043  *
9044  * This function cleans up all iocb, buffers, mailbox commands
9045  * while shutting down the HBA. This function is called with no
9046  * lock held and always returns 1.
9047  * This function does the following to cleanup driver resources:
9048  * - Free discovery resources for each virtual port
9049  * - Cleanup any pending fabric iocbs
9050  * - Iterate through the iocb txq and free each entry
9051  *   in the list.
9052  * - Free up any buffer posted to the HBA
9053  * - Free mailbox commands in the mailbox queue.
9054  **/
9055 int
9056 lpfc_sli_hba_down(struct lpfc_hba *phba)
9057 {
9058         LIST_HEAD(completions);
9059         struct lpfc_sli *psli = &phba->sli;
9060         struct lpfc_sli_ring *pring;
9061         struct lpfc_dmabuf *buf_ptr;
9062         unsigned long flags = 0;
9063         int i;
9064
9065         /* Shutdown the mailbox command sub-system */
9066         lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
9067
9068         lpfc_hba_down_prep(phba);
9069
9070         lpfc_fabric_abort_hba(phba);
9071
9072         spin_lock_irqsave(&phba->hbalock, flags);
9073         for (i = 0; i < psli->num_rings; i++) {
9074                 pring = &psli->ring[i];
9075                 /* Only slow rings */
9076                 if (pring->ringno == LPFC_ELS_RING) {
9077                         pring->flag |= LPFC_DEFERRED_RING_EVENT;
9078                         /* Set the lpfc data pending flag */
9079                         set_bit(LPFC_DATA_READY, &phba->data_flags);
9080                 }
9081
9082                 /*
9083                  * Error everything on the txq since these iocbs have not been
9084                  * given to the FW yet.
9085                  */
9086                 list_splice_init(&pring->txq, &completions);
9087                 pring->txq_cnt = 0;
9088
9089         }
9090         spin_unlock_irqrestore(&phba->hbalock, flags);
9091
9092         /* Cancel all the IOCBs from the completions list */
9093         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9094                               IOERR_SLI_DOWN);
9095
9096         spin_lock_irqsave(&phba->hbalock, flags);
9097         list_splice_init(&phba->elsbuf, &completions);
9098         phba->elsbuf_cnt = 0;
9099         phba->elsbuf_prev_cnt = 0;
9100         spin_unlock_irqrestore(&phba->hbalock, flags);
9101
9102         while (!list_empty(&completions)) {
9103                 list_remove_head(&completions, buf_ptr,
9104                         struct lpfc_dmabuf, list);
9105                 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
9106                 kfree(buf_ptr);
9107         }
9108
9109         /* Return any active mbox cmds */
9110         del_timer_sync(&psli->mbox_tmo);
9111
9112         spin_lock_irqsave(&phba->pport->work_port_lock, flags);
9113         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
9114         spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
9115
9116         return 1;
9117 }
9118
9119 /**
9120  * lpfc_sli_pcimem_bcopy - SLI memory copy function
9121  * @srcp: Source memory pointer.
9122  * @destp: Destination memory pointer.
9123  * @cnt: Number of words required to be copied.
9124  *
9125  * This function is used for copying data between driver memory
9126  * and the SLI memory. This function also changes the endianness
9127  * of each word if native endianness is different from SLI
9128  * endianness. This function can be called with or without
9129  * lock.
9130  **/
9131 void
9132 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
9133 {
9134         uint32_t *src = srcp;
9135         uint32_t *dest = destp;
9136         uint32_t ldata;
9137         int i;
9138
9139         for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
9140                 ldata = *src;
9141                 ldata = le32_to_cpu(ldata);
9142                 *dest = ldata;
9143                 src++;
9144                 dest++;
9145         }
9146 }
9147
9148
9149 /**
9150  * lpfc_sli_bemem_bcopy - SLI memory copy function
9151  * @srcp: Source memory pointer.
9152  * @destp: Destination memory pointer.
9153  * @cnt: Number of words required to be copied.
9154  *
9155  * This function is used for copying data between a data structure
9156  * with big endian representation to local endianness.
9157  * This function can be called with or without lock.
9158  **/
9159 void
9160 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
9161 {
9162         uint32_t *src = srcp;
9163         uint32_t *dest = destp;
9164         uint32_t ldata;
9165         int i;
9166
9167         for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
9168                 ldata = *src;
9169                 ldata = be32_to_cpu(ldata);
9170                 *dest = ldata;
9171                 src++;
9172                 dest++;
9173         }
9174 }
9175
9176 /**
9177  * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
9178  * @phba: Pointer to HBA context object.
9179  * @pring: Pointer to driver SLI ring object.
9180  * @mp: Pointer to driver buffer object.
9181  *
9182  * This function is called with no lock held.
9183  * It always return zero after adding the buffer to the postbufq
9184  * buffer list.
9185  **/
9186 int
9187 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9188                          struct lpfc_dmabuf *mp)
9189 {
9190         /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
9191            later */
9192         spin_lock_irq(&phba->hbalock);
9193         list_add_tail(&mp->list, &pring->postbufq);
9194         pring->postbufq_cnt++;
9195         spin_unlock_irq(&phba->hbalock);
9196         return 0;
9197 }
9198
9199 /**
9200  * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
9201  * @phba: Pointer to HBA context object.
9202  *
9203  * When HBQ is enabled, buffers are searched based on tags. This function
9204  * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
9205  * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
9206  * does not conflict with tags of buffer posted for unsolicited events.
9207  * The function returns the allocated tag. The function is called with
9208  * no locks held.
9209  **/
9210 uint32_t
9211 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
9212 {
9213         spin_lock_irq(&phba->hbalock);
9214         phba->buffer_tag_count++;
9215         /*
9216          * Always set the QUE_BUFTAG_BIT to distiguish between
9217          * a tag assigned by HBQ.
9218          */
9219         phba->buffer_tag_count |= QUE_BUFTAG_BIT;
9220         spin_unlock_irq(&phba->hbalock);
9221         return phba->buffer_tag_count;
9222 }
9223
9224 /**
9225  * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
9226  * @phba: Pointer to HBA context object.
9227  * @pring: Pointer to driver SLI ring object.
9228  * @tag: Buffer tag.
9229  *
9230  * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
9231  * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
9232  * iocb is posted to the response ring with the tag of the buffer.
9233  * This function searches the pring->postbufq list using the tag
9234  * to find buffer associated with CMD_IOCB_RET_XRI64_CX
9235  * iocb. If the buffer is found then lpfc_dmabuf object of the
9236  * buffer is returned to the caller else NULL is returned.
9237  * This function is called with no lock held.
9238  **/
9239 struct lpfc_dmabuf *
9240 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9241                         uint32_t tag)
9242 {
9243         struct lpfc_dmabuf *mp, *next_mp;
9244         struct list_head *slp = &pring->postbufq;
9245
9246         /* Search postbufq, from the beginning, looking for a match on tag */
9247         spin_lock_irq(&phba->hbalock);
9248         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
9249                 if (mp->buffer_tag == tag) {
9250                         list_del_init(&mp->list);
9251                         pring->postbufq_cnt--;
9252                         spin_unlock_irq(&phba->hbalock);
9253                         return mp;
9254                 }
9255         }
9256
9257         spin_unlock_irq(&phba->hbalock);
9258         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9259                         "0402 Cannot find virtual addr for buffer tag on "
9260                         "ring %d Data x%lx x%p x%p x%x\n",
9261                         pring->ringno, (unsigned long) tag,
9262                         slp->next, slp->prev, pring->postbufq_cnt);
9263
9264         return NULL;
9265 }
9266
9267 /**
9268  * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
9269  * @phba: Pointer to HBA context object.
9270  * @pring: Pointer to driver SLI ring object.
9271  * @phys: DMA address of the buffer.
9272  *
9273  * This function searches the buffer list using the dma_address
9274  * of unsolicited event to find the driver's lpfc_dmabuf object
9275  * corresponding to the dma_address. The function returns the
9276  * lpfc_dmabuf object if a buffer is found else it returns NULL.
9277  * This function is called by the ct and els unsolicited event
9278  * handlers to get the buffer associated with the unsolicited
9279  * event.
9280  *
9281  * This function is called with no lock held.
9282  **/
9283 struct lpfc_dmabuf *
9284 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9285                          dma_addr_t phys)
9286 {
9287         struct lpfc_dmabuf *mp, *next_mp;
9288         struct list_head *slp = &pring->postbufq;
9289
9290         /* Search postbufq, from the beginning, looking for a match on phys */
9291         spin_lock_irq(&phba->hbalock);
9292         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
9293                 if (mp->phys == phys) {
9294                         list_del_init(&mp->list);
9295                         pring->postbufq_cnt--;
9296                         spin_unlock_irq(&phba->hbalock);
9297                         return mp;
9298                 }
9299         }
9300
9301         spin_unlock_irq(&phba->hbalock);
9302         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9303                         "0410 Cannot find virtual addr for mapped buf on "
9304                         "ring %d Data x%llx x%p x%p x%x\n",
9305                         pring->ringno, (unsigned long long)phys,
9306                         slp->next, slp->prev, pring->postbufq_cnt);
9307         return NULL;
9308 }
9309
9310 /**
9311  * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
9312  * @phba: Pointer to HBA context object.
9313  * @cmdiocb: Pointer to driver command iocb object.
9314  * @rspiocb: Pointer to driver response iocb object.
9315  *
9316  * This function is the completion handler for the abort iocbs for
9317  * ELS commands. This function is called from the ELS ring event
9318  * handler with no lock held. This function frees memory resources
9319  * associated with the abort iocb.
9320  **/
9321 static void
9322 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9323                         struct lpfc_iocbq *rspiocb)
9324 {
9325         IOCB_t *irsp = &rspiocb->iocb;
9326         uint16_t abort_iotag, abort_context;
9327         struct lpfc_iocbq *abort_iocb = NULL;
9328
9329         if (irsp->ulpStatus) {
9330
9331                 /*
9332                  * Assume that the port already completed and returned, or
9333                  * will return the iocb. Just Log the message.
9334                  */
9335                 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
9336                 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
9337
9338                 spin_lock_irq(&phba->hbalock);
9339                 if (phba->sli_rev < LPFC_SLI_REV4) {
9340                         if (abort_iotag != 0 &&
9341                                 abort_iotag <= phba->sli.last_iotag)
9342                                 abort_iocb =
9343                                         phba->sli.iocbq_lookup[abort_iotag];
9344                 } else
9345                         /* For sli4 the abort_tag is the XRI,
9346                          * so the abort routine puts the iotag  of the iocb
9347                          * being aborted in the context field of the abort
9348                          * IOCB.
9349                          */
9350                         abort_iocb = phba->sli.iocbq_lookup[abort_context];
9351
9352                 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
9353                                 "0327 Cannot abort els iocb %p "
9354                                 "with tag %x context %x, abort status %x, "
9355                                 "abort code %x\n",
9356                                 abort_iocb, abort_iotag, abort_context,
9357                                 irsp->ulpStatus, irsp->un.ulpWord[4]);
9358
9359                 spin_unlock_irq(&phba->hbalock);
9360         }
9361         lpfc_sli_release_iocbq(phba, cmdiocb);
9362         return;
9363 }
9364
9365 /**
9366  * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
9367  * @phba: Pointer to HBA context object.
9368  * @cmdiocb: Pointer to driver command iocb object.
9369  * @rspiocb: Pointer to driver response iocb object.
9370  *
9371  * The function is called from SLI ring event handler with no
9372  * lock held. This function is the completion handler for ELS commands
9373  * which are aborted. The function frees memory resources used for
9374  * the aborted ELS commands.
9375  **/
9376 static void
9377 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9378                      struct lpfc_iocbq *rspiocb)
9379 {
9380         IOCB_t *irsp = &rspiocb->iocb;
9381
9382         /* ELS cmd tag <ulpIoTag> completes */
9383         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
9384                         "0139 Ignoring ELS cmd tag x%x completion Data: "
9385                         "x%x x%x x%x\n",
9386                         irsp->ulpIoTag, irsp->ulpStatus,
9387                         irsp->un.ulpWord[4], irsp->ulpTimeout);
9388         if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
9389                 lpfc_ct_free_iocb(phba, cmdiocb);
9390         else
9391                 lpfc_els_free_iocb(phba, cmdiocb);
9392         return;
9393 }
9394
9395 /**
9396  * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
9397  * @phba: Pointer to HBA context object.
9398  * @pring: Pointer to driver SLI ring object.
9399  * @cmdiocb: Pointer to driver command iocb object.
9400  *
9401  * This function issues an abort iocb for the provided command iocb down to
9402  * the port. Other than the case the outstanding command iocb is an abort
9403  * request, this function issues abort out unconditionally. This function is
9404  * called with hbalock held. The function returns 0 when it fails due to
9405  * memory allocation failure or when the command iocb is an abort request.
9406  **/
9407 static int
9408 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9409                            struct lpfc_iocbq *cmdiocb)
9410 {
9411         struct lpfc_vport *vport = cmdiocb->vport;
9412         struct lpfc_iocbq *abtsiocbp;
9413         IOCB_t *icmd = NULL;
9414         IOCB_t *iabt = NULL;
9415         int retval;
9416         unsigned long iflags;
9417
9418         /*
9419          * There are certain command types we don't want to abort.  And we
9420          * don't want to abort commands that are already in the process of
9421          * being aborted.
9422          */
9423         icmd = &cmdiocb->iocb;
9424         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
9425             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
9426             (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
9427                 return 0;
9428
9429         /* issue ABTS for this IOCB based on iotag */
9430         abtsiocbp = __lpfc_sli_get_iocbq(phba);
9431         if (abtsiocbp == NULL)
9432                 return 0;
9433
9434         /* This signals the response to set the correct status
9435          * before calling the completion handler
9436          */
9437         cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
9438
9439         iabt = &abtsiocbp->iocb;
9440         iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
9441         iabt->un.acxri.abortContextTag = icmd->ulpContext;
9442         if (phba->sli_rev == LPFC_SLI_REV4) {
9443                 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
9444                 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
9445         }
9446         else
9447                 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
9448         iabt->ulpLe = 1;
9449         iabt->ulpClass = icmd->ulpClass;
9450
9451         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
9452         abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
9453         if (cmdiocb->iocb_flag & LPFC_IO_FCP)
9454                 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
9455
9456         if (phba->link_state >= LPFC_LINK_UP)
9457                 iabt->ulpCommand = CMD_ABORT_XRI_CN;
9458         else
9459                 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
9460
9461         abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
9462
9463         lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
9464                          "0339 Abort xri x%x, original iotag x%x, "
9465                          "abort cmd iotag x%x\n",
9466                          iabt->un.acxri.abortIoTag,
9467                          iabt->un.acxri.abortContextTag,
9468                          abtsiocbp->iotag);
9469
9470         if (phba->sli_rev == LPFC_SLI_REV4) {
9471                 /* Note: both hbalock and ring_lock need to be set here */
9472                 spin_lock_irqsave(&pring->ring_lock, iflags);
9473                 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
9474                         abtsiocbp, 0);
9475                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
9476         } else {
9477                 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
9478                         abtsiocbp, 0);
9479         }
9480
9481         if (retval)
9482                 __lpfc_sli_release_iocbq(phba, abtsiocbp);
9483
9484         /*
9485          * Caller to this routine should check for IOCB_ERROR
9486          * and handle it properly.  This routine no longer removes
9487          * iocb off txcmplq and call compl in case of IOCB_ERROR.
9488          */
9489         return retval;
9490 }
9491
9492 /**
9493  * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
9494  * @phba: Pointer to HBA context object.
9495  * @pring: Pointer to driver SLI ring object.
9496  * @cmdiocb: Pointer to driver command iocb object.
9497  *
9498  * This function issues an abort iocb for the provided command iocb. In case
9499  * of unloading, the abort iocb will not be issued to commands on the ELS
9500  * ring. Instead, the callback function shall be changed to those commands
9501  * so that nothing happens when them finishes. This function is called with
9502  * hbalock held. The function returns 0 when the command iocb is an abort
9503  * request.
9504  **/
9505 int
9506 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9507                            struct lpfc_iocbq *cmdiocb)
9508 {
9509         struct lpfc_vport *vport = cmdiocb->vport;
9510         int retval = IOCB_ERROR;
9511         IOCB_t *icmd = NULL;
9512
9513         /*
9514          * There are certain command types we don't want to abort.  And we
9515          * don't want to abort commands that are already in the process of
9516          * being aborted.
9517          */
9518         icmd = &cmdiocb->iocb;
9519         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
9520             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
9521             (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
9522                 return 0;
9523
9524         /*
9525          * If we're unloading, don't abort iocb on the ELS ring, but change
9526          * the callback so that nothing happens when it finishes.
9527          */
9528         if ((vport->load_flag & FC_UNLOADING) &&
9529             (pring->ringno == LPFC_ELS_RING)) {
9530                 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
9531                         cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
9532                 else
9533                         cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
9534                 goto abort_iotag_exit;
9535         }
9536
9537         /* Now, we try to issue the abort to the cmdiocb out */
9538         retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
9539
9540 abort_iotag_exit:
9541         /*
9542          * Caller to this routine should check for IOCB_ERROR
9543          * and handle it properly.  This routine no longer removes
9544          * iocb off txcmplq and call compl in case of IOCB_ERROR.
9545          */
9546         return retval;
9547 }
9548
9549 /**
9550  * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
9551  * @phba: Pointer to HBA context object.
9552  * @pring: Pointer to driver SLI ring object.
9553  *
9554  * This function aborts all iocbs in the given ring and frees all the iocb
9555  * objects in txq. This function issues abort iocbs unconditionally for all
9556  * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
9557  * to complete before the return of this function. The caller is not required
9558  * to hold any locks.
9559  **/
9560 static void
9561 lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
9562 {
9563         LIST_HEAD(completions);
9564         struct lpfc_iocbq *iocb, *next_iocb;
9565
9566         if (pring->ringno == LPFC_ELS_RING)
9567                 lpfc_fabric_abort_hba(phba);
9568
9569         spin_lock_irq(&phba->hbalock);
9570
9571         /* Take off all the iocbs on txq for cancelling */
9572         list_splice_init(&pring->txq, &completions);
9573         pring->txq_cnt = 0;
9574
9575         /* Next issue ABTS for everything on the txcmplq */
9576         list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
9577                 lpfc_sli_abort_iotag_issue(phba, pring, iocb);
9578
9579         spin_unlock_irq(&phba->hbalock);
9580
9581         /* Cancel all the IOCBs from the completions list */
9582         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9583                               IOERR_SLI_ABORTED);
9584 }
9585
9586 /**
9587  * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
9588  * @phba: pointer to lpfc HBA data structure.
9589  *
9590  * This routine will abort all pending and outstanding iocbs to an HBA.
9591  **/
9592 void
9593 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
9594 {
9595         struct lpfc_sli *psli = &phba->sli;
9596         struct lpfc_sli_ring *pring;
9597         int i;
9598
9599         for (i = 0; i < psli->num_rings; i++) {
9600                 pring = &psli->ring[i];
9601                 lpfc_sli_iocb_ring_abort(phba, pring);
9602         }
9603 }
9604
9605 /**
9606  * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
9607  * @iocbq: Pointer to driver iocb object.
9608  * @vport: Pointer to driver virtual port object.
9609  * @tgt_id: SCSI ID of the target.
9610  * @lun_id: LUN ID of the scsi device.
9611  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
9612  *
9613  * This function acts as an iocb filter for functions which abort or count
9614  * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
9615  * 0 if the filtering criteria is met for the given iocb and will return
9616  * 1 if the filtering criteria is not met.
9617  * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
9618  * given iocb is for the SCSI device specified by vport, tgt_id and
9619  * lun_id parameter.
9620  * If ctx_cmd == LPFC_CTX_TGT,  the function returns 0 only if the
9621  * given iocb is for the SCSI target specified by vport and tgt_id
9622  * parameters.
9623  * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
9624  * given iocb is for the SCSI host associated with the given vport.
9625  * This function is called with no locks held.
9626  **/
9627 static int
9628 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
9629                            uint16_t tgt_id, uint64_t lun_id,
9630                            lpfc_ctx_cmd ctx_cmd)
9631 {
9632         struct lpfc_scsi_buf *lpfc_cmd;
9633         int rc = 1;
9634
9635         if (!(iocbq->iocb_flag &  LPFC_IO_FCP))
9636                 return rc;
9637
9638         if (iocbq->vport != vport)
9639                 return rc;
9640
9641         lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
9642
9643         if (lpfc_cmd->pCmd == NULL)
9644                 return rc;
9645
9646         switch (ctx_cmd) {
9647         case LPFC_CTX_LUN:
9648                 if ((lpfc_cmd->rdata->pnode) &&
9649                     (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
9650                     (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
9651                         rc = 0;
9652                 break;
9653         case LPFC_CTX_TGT:
9654                 if ((lpfc_cmd->rdata->pnode) &&
9655                     (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
9656                         rc = 0;
9657                 break;
9658         case LPFC_CTX_HOST:
9659                 rc = 0;
9660                 break;
9661         default:
9662                 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
9663                         __func__, ctx_cmd);
9664                 break;
9665         }
9666
9667         return rc;
9668 }
9669
9670 /**
9671  * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
9672  * @vport: Pointer to virtual port.
9673  * @tgt_id: SCSI ID of the target.
9674  * @lun_id: LUN ID of the scsi device.
9675  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
9676  *
9677  * This function returns number of FCP commands pending for the vport.
9678  * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
9679  * commands pending on the vport associated with SCSI device specified
9680  * by tgt_id and lun_id parameters.
9681  * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
9682  * commands pending on the vport associated with SCSI target specified
9683  * by tgt_id parameter.
9684  * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
9685  * commands pending on the vport.
9686  * This function returns the number of iocbs which satisfy the filter.
9687  * This function is called without any lock held.
9688  **/
9689 int
9690 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
9691                   lpfc_ctx_cmd ctx_cmd)
9692 {
9693         struct lpfc_hba *phba = vport->phba;
9694         struct lpfc_iocbq *iocbq;
9695         int sum, i;
9696
9697         for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
9698                 iocbq = phba->sli.iocbq_lookup[i];
9699
9700                 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
9701                                                 ctx_cmd) == 0)
9702                         sum++;
9703         }
9704
9705         return sum;
9706 }
9707
9708 /**
9709  * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
9710  * @phba: Pointer to HBA context object
9711  * @cmdiocb: Pointer to command iocb object.
9712  * @rspiocb: Pointer to response iocb object.
9713  *
9714  * This function is called when an aborted FCP iocb completes. This
9715  * function is called by the ring event handler with no lock held.
9716  * This function frees the iocb.
9717  **/
9718 void
9719 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9720                         struct lpfc_iocbq *rspiocb)
9721 {
9722         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9723                         "3096 ABORT_XRI_CN completing on xri x%x "
9724                         "original iotag x%x, abort cmd iotag x%x "
9725                         "status 0x%x, reason 0x%x\n",
9726                         cmdiocb->iocb.un.acxri.abortContextTag,
9727                         cmdiocb->iocb.un.acxri.abortIoTag,
9728                         cmdiocb->iotag, rspiocb->iocb.ulpStatus,
9729                         rspiocb->iocb.un.ulpWord[4]);
9730         lpfc_sli_release_iocbq(phba, cmdiocb);
9731         return;
9732 }
9733
9734 /**
9735  * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
9736  * @vport: Pointer to virtual port.
9737  * @pring: Pointer to driver SLI ring object.
9738  * @tgt_id: SCSI ID of the target.
9739  * @lun_id: LUN ID of the scsi device.
9740  * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
9741  *
9742  * This function sends an abort command for every SCSI command
9743  * associated with the given virtual port pending on the ring
9744  * filtered by lpfc_sli_validate_fcp_iocb function.
9745  * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
9746  * FCP iocbs associated with lun specified by tgt_id and lun_id
9747  * parameters
9748  * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
9749  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
9750  * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
9751  * FCP iocbs associated with virtual port.
9752  * This function returns number of iocbs it failed to abort.
9753  * This function is called with no locks held.
9754  **/
9755 int
9756 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
9757                     uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
9758 {
9759         struct lpfc_hba *phba = vport->phba;
9760         struct lpfc_iocbq *iocbq;
9761         struct lpfc_iocbq *abtsiocb;
9762         IOCB_t *cmd = NULL;
9763         int errcnt = 0, ret_val = 0;
9764         int i;
9765
9766         for (i = 1; i <= phba->sli.last_iotag; i++) {
9767                 iocbq = phba->sli.iocbq_lookup[i];
9768
9769                 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
9770                                                abort_cmd) != 0)
9771                         continue;
9772
9773                 /* issue ABTS for this IOCB based on iotag */
9774                 abtsiocb = lpfc_sli_get_iocbq(phba);
9775                 if (abtsiocb == NULL) {
9776                         errcnt++;
9777                         continue;
9778                 }
9779
9780                 cmd = &iocbq->iocb;
9781                 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
9782                 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
9783                 if (phba->sli_rev == LPFC_SLI_REV4)
9784                         abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
9785                 else
9786                         abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
9787                 abtsiocb->iocb.ulpLe = 1;
9788                 abtsiocb->iocb.ulpClass = cmd->ulpClass;
9789                 abtsiocb->vport = phba->pport;
9790
9791                 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
9792                 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
9793                 if (iocbq->iocb_flag & LPFC_IO_FCP)
9794                         abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
9795
9796                 if (lpfc_is_link_up(phba))
9797                         abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
9798                 else
9799                         abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
9800
9801                 /* Setup callback routine and issue the command. */
9802                 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
9803                 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
9804                                               abtsiocb, 0);
9805                 if (ret_val == IOCB_ERROR) {
9806                         lpfc_sli_release_iocbq(phba, abtsiocb);
9807                         errcnt++;
9808                         continue;
9809                 }
9810         }
9811
9812         return errcnt;
9813 }
9814
9815 /**
9816  * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
9817  * @phba: Pointer to HBA context object.
9818  * @cmdiocbq: Pointer to command iocb.
9819  * @rspiocbq: Pointer to response iocb.
9820  *
9821  * This function is the completion handler for iocbs issued using
9822  * lpfc_sli_issue_iocb_wait function. This function is called by the
9823  * ring event handler function without any lock held. This function
9824  * can be called from both worker thread context and interrupt
9825  * context. This function also can be called from other thread which
9826  * cleans up the SLI layer objects.
9827  * This function copy the contents of the response iocb to the
9828  * response iocb memory object provided by the caller of
9829  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
9830  * sleeps for the iocb completion.
9831  **/
9832 static void
9833 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
9834                         struct lpfc_iocbq *cmdiocbq,
9835                         struct lpfc_iocbq *rspiocbq)
9836 {
9837         wait_queue_head_t *pdone_q;
9838         unsigned long iflags;
9839         struct lpfc_scsi_buf *lpfc_cmd;
9840
9841         spin_lock_irqsave(&phba->hbalock, iflags);
9842         cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
9843         if (cmdiocbq->context2 && rspiocbq)
9844                 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
9845                        &rspiocbq->iocb, sizeof(IOCB_t));
9846
9847         /* Set the exchange busy flag for task management commands */
9848         if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
9849                 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
9850                 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
9851                         cur_iocbq);
9852                 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
9853         }
9854
9855         pdone_q = cmdiocbq->context_un.wait_queue;
9856         if (pdone_q)
9857                 wake_up(pdone_q);
9858         spin_unlock_irqrestore(&phba->hbalock, iflags);
9859         return;
9860 }
9861
9862 /**
9863  * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
9864  * @phba: Pointer to HBA context object..
9865  * @piocbq: Pointer to command iocb.
9866  * @flag: Flag to test.
9867  *
9868  * This routine grabs the hbalock and then test the iocb_flag to
9869  * see if the passed in flag is set.
9870  * Returns:
9871  * 1 if flag is set.
9872  * 0 if flag is not set.
9873  **/
9874 static int
9875 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
9876                  struct lpfc_iocbq *piocbq, uint32_t flag)
9877 {
9878         unsigned long iflags;
9879         int ret;
9880
9881         spin_lock_irqsave(&phba->hbalock, iflags);
9882         ret = piocbq->iocb_flag & flag;
9883         spin_unlock_irqrestore(&phba->hbalock, iflags);
9884         return ret;
9885
9886 }
9887
9888 /**
9889  * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
9890  * @phba: Pointer to HBA context object..
9891  * @pring: Pointer to sli ring.
9892  * @piocb: Pointer to command iocb.
9893  * @prspiocbq: Pointer to response iocb.
9894  * @timeout: Timeout in number of seconds.
9895  *
9896  * This function issues the iocb to firmware and waits for the
9897  * iocb to complete. If the iocb command is not
9898  * completed within timeout seconds, it returns IOCB_TIMEDOUT.
9899  * Caller should not free the iocb resources if this function
9900  * returns IOCB_TIMEDOUT.
9901  * The function waits for the iocb completion using an
9902  * non-interruptible wait.
9903  * This function will sleep while waiting for iocb completion.
9904  * So, this function should not be called from any context which
9905  * does not allow sleeping. Due to the same reason, this function
9906  * cannot be called with interrupt disabled.
9907  * This function assumes that the iocb completions occur while
9908  * this function sleep. So, this function cannot be called from
9909  * the thread which process iocb completion for this ring.
9910  * This function clears the iocb_flag of the iocb object before
9911  * issuing the iocb and the iocb completion handler sets this
9912  * flag and wakes this thread when the iocb completes.
9913  * The contents of the response iocb will be copied to prspiocbq
9914  * by the completion handler when the command completes.
9915  * This function returns IOCB_SUCCESS when success.
9916  * This function is called with no lock held.
9917  **/
9918 int
9919 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
9920                          uint32_t ring_number,
9921                          struct lpfc_iocbq *piocb,
9922                          struct lpfc_iocbq *prspiocbq,
9923                          uint32_t timeout)
9924 {
9925         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
9926         long timeleft, timeout_req = 0;
9927         int retval = IOCB_SUCCESS;
9928         uint32_t creg_val;
9929         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
9930         /*
9931          * If the caller has provided a response iocbq buffer, then context2
9932          * is NULL or its an error.
9933          */
9934         if (prspiocbq) {
9935                 if (piocb->context2)
9936                         return IOCB_ERROR;
9937                 piocb->context2 = prspiocbq;
9938         }
9939
9940         piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
9941         piocb->context_un.wait_queue = &done_q;
9942         piocb->iocb_flag &= ~LPFC_IO_WAKE;
9943
9944         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9945                 if (lpfc_readl(phba->HCregaddr, &creg_val))
9946                         return IOCB_ERROR;
9947                 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
9948                 writel(creg_val, phba->HCregaddr);
9949                 readl(phba->HCregaddr); /* flush */
9950         }
9951
9952         retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
9953                                      SLI_IOCB_RET_IOCB);
9954         if (retval == IOCB_SUCCESS) {
9955                 timeout_req = timeout * HZ;
9956                 timeleft = wait_event_timeout(done_q,
9957                                 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
9958                                 timeout_req);
9959
9960                 if (piocb->iocb_flag & LPFC_IO_WAKE) {
9961                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9962                                         "0331 IOCB wake signaled\n");
9963                 } else if (timeleft == 0) {
9964                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9965                                         "0338 IOCB wait timeout error - no "
9966                                         "wake response Data x%x\n", timeout);
9967                         retval = IOCB_TIMEDOUT;
9968                 } else {
9969                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9970                                         "0330 IOCB wake NOT set, "
9971                                         "Data x%x x%lx\n",
9972                                         timeout, (timeleft / jiffies));
9973                         retval = IOCB_TIMEDOUT;
9974                 }
9975         } else if (retval == IOCB_BUSY) {
9976                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9977                         "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
9978                         phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt);
9979                 return retval;
9980         } else {
9981                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9982                                 "0332 IOCB wait issue failed, Data x%x\n",
9983                                 retval);
9984                 retval = IOCB_ERROR;
9985         }
9986
9987         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9988                 if (lpfc_readl(phba->HCregaddr, &creg_val))
9989                         return IOCB_ERROR;
9990                 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
9991                 writel(creg_val, phba->HCregaddr);
9992                 readl(phba->HCregaddr); /* flush */
9993         }
9994
9995         if (prspiocbq)
9996                 piocb->context2 = NULL;
9997
9998         piocb->context_un.wait_queue = NULL;
9999         piocb->iocb_cmpl = NULL;
10000         return retval;
10001 }
10002
10003 /**
10004  * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
10005  * @phba: Pointer to HBA context object.
10006  * @pmboxq: Pointer to driver mailbox object.
10007  * @timeout: Timeout in number of seconds.
10008  *
10009  * This function issues the mailbox to firmware and waits for the
10010  * mailbox command to complete. If the mailbox command is not
10011  * completed within timeout seconds, it returns MBX_TIMEOUT.
10012  * The function waits for the mailbox completion using an
10013  * interruptible wait. If the thread is woken up due to a
10014  * signal, MBX_TIMEOUT error is returned to the caller. Caller
10015  * should not free the mailbox resources, if this function returns
10016  * MBX_TIMEOUT.
10017  * This function will sleep while waiting for mailbox completion.
10018  * So, this function should not be called from any context which
10019  * does not allow sleeping. Due to the same reason, this function
10020  * cannot be called with interrupt disabled.
10021  * This function assumes that the mailbox completion occurs while
10022  * this function sleep. So, this function cannot be called from
10023  * the worker thread which processes mailbox completion.
10024  * This function is called in the context of HBA management
10025  * applications.
10026  * This function returns MBX_SUCCESS when successful.
10027  * This function is called with no lock held.
10028  **/
10029 int
10030 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
10031                          uint32_t timeout)
10032 {
10033         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
10034         int retval;
10035         unsigned long flag;
10036
10037         /* The caller must leave context1 empty. */
10038         if (pmboxq->context1)
10039                 return MBX_NOT_FINISHED;
10040
10041         pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
10042         /* setup wake call as IOCB callback */
10043         pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
10044         /* setup context field to pass wait_queue pointer to wake function  */
10045         pmboxq->context1 = &done_q;
10046
10047         /* now issue the command */
10048         retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
10049         if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
10050                 wait_event_interruptible_timeout(done_q,
10051                                 pmboxq->mbox_flag & LPFC_MBX_WAKE,
10052                                 timeout * HZ);
10053
10054                 spin_lock_irqsave(&phba->hbalock, flag);
10055                 pmboxq->context1 = NULL;
10056                 /*
10057                  * if LPFC_MBX_WAKE flag is set the mailbox is completed
10058                  * else do not free the resources.
10059                  */
10060                 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
10061                         retval = MBX_SUCCESS;
10062                         lpfc_sli4_swap_str(phba, pmboxq);
10063                 } else {
10064                         retval = MBX_TIMEOUT;
10065                         pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10066                 }
10067                 spin_unlock_irqrestore(&phba->hbalock, flag);
10068         }
10069
10070         return retval;
10071 }
10072
10073 /**
10074  * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
10075  * @phba: Pointer to HBA context.
10076  *
10077  * This function is called to shutdown the driver's mailbox sub-system.
10078  * It first marks the mailbox sub-system is in a block state to prevent
10079  * the asynchronous mailbox command from issued off the pending mailbox
10080  * command queue. If the mailbox command sub-system shutdown is due to
10081  * HBA error conditions such as EEH or ERATT, this routine shall invoke
10082  * the mailbox sub-system flush routine to forcefully bring down the
10083  * mailbox sub-system. Otherwise, if it is due to normal condition (such
10084  * as with offline or HBA function reset), this routine will wait for the
10085  * outstanding mailbox command to complete before invoking the mailbox
10086  * sub-system flush routine to gracefully bring down mailbox sub-system.
10087  **/
10088 void
10089 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
10090 {
10091         struct lpfc_sli *psli = &phba->sli;
10092         unsigned long timeout;
10093
10094         if (mbx_action == LPFC_MBX_NO_WAIT) {
10095                 /* delay 100ms for port state */
10096                 msleep(100);
10097                 lpfc_sli_mbox_sys_flush(phba);
10098                 return;
10099         }
10100         timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
10101
10102         spin_lock_irq(&phba->hbalock);
10103         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
10104
10105         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
10106                 /* Determine how long we might wait for the active mailbox
10107                  * command to be gracefully completed by firmware.
10108                  */
10109                 if (phba->sli.mbox_active)
10110                         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
10111                                                 phba->sli.mbox_active) *
10112                                                 1000) + jiffies;
10113                 spin_unlock_irq(&phba->hbalock);
10114
10115                 while (phba->sli.mbox_active) {
10116                         /* Check active mailbox complete status every 2ms */
10117                         msleep(2);
10118                         if (time_after(jiffies, timeout))
10119                                 /* Timeout, let the mailbox flush routine to
10120                                  * forcefully release active mailbox command
10121                                  */
10122                                 break;
10123                 }
10124         } else
10125                 spin_unlock_irq(&phba->hbalock);
10126
10127         lpfc_sli_mbox_sys_flush(phba);
10128 }
10129
10130 /**
10131  * lpfc_sli_eratt_read - read sli-3 error attention events
10132  * @phba: Pointer to HBA context.
10133  *
10134  * This function is called to read the SLI3 device error attention registers
10135  * for possible error attention events. The caller must hold the hostlock
10136  * with spin_lock_irq().
10137  *
10138  * This function returns 1 when there is Error Attention in the Host Attention
10139  * Register and returns 0 otherwise.
10140  **/
10141 static int
10142 lpfc_sli_eratt_read(struct lpfc_hba *phba)
10143 {
10144         uint32_t ha_copy;
10145
10146         /* Read chip Host Attention (HA) register */
10147         if (lpfc_readl(phba->HAregaddr, &ha_copy))
10148                 goto unplug_err;
10149
10150         if (ha_copy & HA_ERATT) {
10151                 /* Read host status register to retrieve error event */
10152                 if (lpfc_sli_read_hs(phba))
10153                         goto unplug_err;
10154
10155                 /* Check if there is a deferred error condition is active */
10156                 if ((HS_FFER1 & phba->work_hs) &&
10157                     ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
10158                       HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
10159                         phba->hba_flag |= DEFER_ERATT;
10160                         /* Clear all interrupt enable conditions */
10161                         writel(0, phba->HCregaddr);
10162                         readl(phba->HCregaddr);
10163                 }
10164
10165                 /* Set the driver HA work bitmap */
10166                 phba->work_ha |= HA_ERATT;
10167                 /* Indicate polling handles this ERATT */
10168                 phba->hba_flag |= HBA_ERATT_HANDLED;
10169                 return 1;
10170         }
10171         return 0;
10172
10173 unplug_err:
10174         /* Set the driver HS work bitmap */
10175         phba->work_hs |= UNPLUG_ERR;
10176         /* Set the driver HA work bitmap */
10177         phba->work_ha |= HA_ERATT;
10178         /* Indicate polling handles this ERATT */
10179         phba->hba_flag |= HBA_ERATT_HANDLED;
10180         return 1;
10181 }
10182
10183 /**
10184  * lpfc_sli4_eratt_read - read sli-4 error attention events
10185  * @phba: Pointer to HBA context.
10186  *
10187  * This function is called to read the SLI4 device error attention registers
10188  * for possible error attention events. The caller must hold the hostlock
10189  * with spin_lock_irq().
10190  *
10191  * This function returns 1 when there is Error Attention in the Host Attention
10192  * Register and returns 0 otherwise.
10193  **/
10194 static int
10195 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
10196 {
10197         uint32_t uerr_sta_hi, uerr_sta_lo;
10198         uint32_t if_type, portsmphr;
10199         struct lpfc_register portstat_reg;
10200
10201         /*
10202          * For now, use the SLI4 device internal unrecoverable error
10203          * registers for error attention. This can be changed later.
10204          */
10205         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10206         switch (if_type) {
10207         case LPFC_SLI_INTF_IF_TYPE_0:
10208                 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
10209                         &uerr_sta_lo) ||
10210                         lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
10211                         &uerr_sta_hi)) {
10212                         phba->work_hs |= UNPLUG_ERR;
10213                         phba->work_ha |= HA_ERATT;
10214                         phba->hba_flag |= HBA_ERATT_HANDLED;
10215                         return 1;
10216                 }
10217                 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
10218                     (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
10219                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10220                                         "1423 HBA Unrecoverable error: "
10221                                         "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
10222                                         "ue_mask_lo_reg=0x%x, "
10223                                         "ue_mask_hi_reg=0x%x\n",
10224                                         uerr_sta_lo, uerr_sta_hi,
10225                                         phba->sli4_hba.ue_mask_lo,
10226                                         phba->sli4_hba.ue_mask_hi);
10227                         phba->work_status[0] = uerr_sta_lo;
10228                         phba->work_status[1] = uerr_sta_hi;
10229                         phba->work_ha |= HA_ERATT;
10230                         phba->hba_flag |= HBA_ERATT_HANDLED;
10231                         return 1;
10232                 }
10233                 break;
10234         case LPFC_SLI_INTF_IF_TYPE_2:
10235                 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
10236                         &portstat_reg.word0) ||
10237                         lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
10238                         &portsmphr)){
10239                         phba->work_hs |= UNPLUG_ERR;
10240                         phba->work_ha |= HA_ERATT;
10241                         phba->hba_flag |= HBA_ERATT_HANDLED;
10242                         return 1;
10243                 }
10244                 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
10245                         phba->work_status[0] =
10246                                 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
10247                         phba->work_status[1] =
10248                                 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
10249                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10250                                         "2885 Port Status Event: "
10251                                         "port status reg 0x%x, "
10252                                         "port smphr reg 0x%x, "
10253                                         "error 1=0x%x, error 2=0x%x\n",
10254                                         portstat_reg.word0,
10255                                         portsmphr,
10256                                         phba->work_status[0],
10257                                         phba->work_status[1]);
10258                         phba->work_ha |= HA_ERATT;
10259                         phba->hba_flag |= HBA_ERATT_HANDLED;
10260                         return 1;
10261                 }
10262                 break;
10263         case LPFC_SLI_INTF_IF_TYPE_1:
10264         default:
10265                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10266                                 "2886 HBA Error Attention on unsupported "
10267                                 "if type %d.", if_type);
10268                 return 1;
10269         }
10270
10271         return 0;
10272 }
10273
10274 /**
10275  * lpfc_sli_check_eratt - check error attention events
10276  * @phba: Pointer to HBA context.
10277  *
10278  * This function is called from timer soft interrupt context to check HBA's
10279  * error attention register bit for error attention events.
10280  *
10281  * This function returns 1 when there is Error Attention in the Host Attention
10282  * Register and returns 0 otherwise.
10283  **/
10284 int
10285 lpfc_sli_check_eratt(struct lpfc_hba *phba)
10286 {
10287         uint32_t ha_copy;
10288
10289         /* If somebody is waiting to handle an eratt, don't process it
10290          * here. The brdkill function will do this.
10291          */
10292         if (phba->link_flag & LS_IGNORE_ERATT)
10293                 return 0;
10294
10295         /* Check if interrupt handler handles this ERATT */
10296         spin_lock_irq(&phba->hbalock);
10297         if (phba->hba_flag & HBA_ERATT_HANDLED) {
10298                 /* Interrupt handler has handled ERATT */
10299                 spin_unlock_irq(&phba->hbalock);
10300                 return 0;
10301         }
10302
10303         /*
10304          * If there is deferred error attention, do not check for error
10305          * attention
10306          */
10307         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
10308                 spin_unlock_irq(&phba->hbalock);
10309                 return 0;
10310         }
10311
10312         /* If PCI channel is offline, don't process it */
10313         if (unlikely(pci_channel_offline(phba->pcidev))) {
10314                 spin_unlock_irq(&phba->hbalock);
10315                 return 0;
10316         }
10317
10318         switch (phba->sli_rev) {
10319         case LPFC_SLI_REV2:
10320         case LPFC_SLI_REV3:
10321                 /* Read chip Host Attention (HA) register */
10322                 ha_copy = lpfc_sli_eratt_read(phba);
10323                 break;
10324         case LPFC_SLI_REV4:
10325                 /* Read device Uncoverable Error (UERR) registers */
10326                 ha_copy = lpfc_sli4_eratt_read(phba);
10327                 break;
10328         default:
10329                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10330                                 "0299 Invalid SLI revision (%d)\n",
10331                                 phba->sli_rev);
10332                 ha_copy = 0;
10333                 break;
10334         }
10335         spin_unlock_irq(&phba->hbalock);
10336
10337         return ha_copy;
10338 }
10339
10340 /**
10341  * lpfc_intr_state_check - Check device state for interrupt handling
10342  * @phba: Pointer to HBA context.
10343  *
10344  * This inline routine checks whether a device or its PCI slot is in a state
10345  * that the interrupt should be handled.
10346  *
10347  * This function returns 0 if the device or the PCI slot is in a state that
10348  * interrupt should be handled, otherwise -EIO.
10349  */
10350 static inline int
10351 lpfc_intr_state_check(struct lpfc_hba *phba)
10352 {
10353         /* If the pci channel is offline, ignore all the interrupts */
10354         if (unlikely(pci_channel_offline(phba->pcidev)))
10355                 return -EIO;
10356
10357         /* Update device level interrupt statistics */
10358         phba->sli.slistat.sli_intr++;
10359
10360         /* Ignore all interrupts during initialization. */
10361         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
10362                 return -EIO;
10363
10364         return 0;
10365 }
10366
10367 /**
10368  * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
10369  * @irq: Interrupt number.
10370  * @dev_id: The device context pointer.
10371  *
10372  * This function is directly called from the PCI layer as an interrupt
10373  * service routine when device with SLI-3 interface spec is enabled with
10374  * MSI-X multi-message interrupt mode and there are slow-path events in
10375  * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
10376  * interrupt mode, this function is called as part of the device-level
10377  * interrupt handler. When the PCI slot is in error recovery or the HBA
10378  * is undergoing initialization, the interrupt handler will not process
10379  * the interrupt. The link attention and ELS ring attention events are
10380  * handled by the worker thread. The interrupt handler signals the worker
10381  * thread and returns for these events. This function is called without
10382  * any lock held. It gets the hbalock to access and update SLI data
10383  * structures.
10384  *
10385  * This function returns IRQ_HANDLED when interrupt is handled else it
10386  * returns IRQ_NONE.
10387  **/
10388 irqreturn_t
10389 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
10390 {
10391         struct lpfc_hba  *phba;
10392         uint32_t ha_copy, hc_copy;
10393         uint32_t work_ha_copy;
10394         unsigned long status;
10395         unsigned long iflag;
10396         uint32_t control;
10397
10398         MAILBOX_t *mbox, *pmbox;
10399         struct lpfc_vport *vport;
10400         struct lpfc_nodelist *ndlp;
10401         struct lpfc_dmabuf *mp;
10402         LPFC_MBOXQ_t *pmb;
10403         int rc;
10404
10405         /*
10406          * Get the driver's phba structure from the dev_id and
10407          * assume the HBA is not interrupting.
10408          */
10409         phba = (struct lpfc_hba *)dev_id;
10410
10411         if (unlikely(!phba))
10412                 return IRQ_NONE;
10413
10414         /*
10415          * Stuff needs to be attented to when this function is invoked as an
10416          * individual interrupt handler in MSI-X multi-message interrupt mode
10417          */
10418         if (phba->intr_type == MSIX) {
10419                 /* Check device state for handling interrupt */
10420                 if (lpfc_intr_state_check(phba))
10421                         return IRQ_NONE;
10422                 /* Need to read HA REG for slow-path events */
10423                 spin_lock_irqsave(&phba->hbalock, iflag);
10424                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10425                         goto unplug_error;
10426                 /* If somebody is waiting to handle an eratt don't process it
10427                  * here. The brdkill function will do this.
10428                  */
10429                 if (phba->link_flag & LS_IGNORE_ERATT)
10430                         ha_copy &= ~HA_ERATT;
10431                 /* Check the need for handling ERATT in interrupt handler */
10432                 if (ha_copy & HA_ERATT) {
10433                         if (phba->hba_flag & HBA_ERATT_HANDLED)
10434                                 /* ERATT polling has handled ERATT */
10435                                 ha_copy &= ~HA_ERATT;
10436                         else
10437                                 /* Indicate interrupt handler handles ERATT */
10438                                 phba->hba_flag |= HBA_ERATT_HANDLED;
10439                 }
10440
10441                 /*
10442                  * If there is deferred error attention, do not check for any
10443                  * interrupt.
10444                  */
10445                 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
10446                         spin_unlock_irqrestore(&phba->hbalock, iflag);
10447                         return IRQ_NONE;
10448                 }
10449
10450                 /* Clear up only attention source related to slow-path */
10451                 if (lpfc_readl(phba->HCregaddr, &hc_copy))
10452                         goto unplug_error;
10453
10454                 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
10455                         HC_LAINT_ENA | HC_ERINT_ENA),
10456                         phba->HCregaddr);
10457                 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
10458                         phba->HAregaddr);
10459                 writel(hc_copy, phba->HCregaddr);
10460                 readl(phba->HAregaddr); /* flush */
10461                 spin_unlock_irqrestore(&phba->hbalock, iflag);
10462         } else
10463                 ha_copy = phba->ha_copy;
10464
10465         work_ha_copy = ha_copy & phba->work_ha_mask;
10466
10467         if (work_ha_copy) {
10468                 if (work_ha_copy & HA_LATT) {
10469                         if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
10470                                 /*
10471                                  * Turn off Link Attention interrupts
10472                                  * until CLEAR_LA done
10473                                  */
10474                                 spin_lock_irqsave(&phba->hbalock, iflag);
10475                                 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
10476                                 if (lpfc_readl(phba->HCregaddr, &control))
10477                                         goto unplug_error;
10478                                 control &= ~HC_LAINT_ENA;
10479                                 writel(control, phba->HCregaddr);
10480                                 readl(phba->HCregaddr); /* flush */
10481                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
10482                         }
10483                         else
10484                                 work_ha_copy &= ~HA_LATT;
10485                 }
10486
10487                 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
10488                         /*
10489                          * Turn off Slow Rings interrupts, LPFC_ELS_RING is
10490                          * the only slow ring.
10491                          */
10492                         status = (work_ha_copy &
10493                                 (HA_RXMASK  << (4*LPFC_ELS_RING)));
10494                         status >>= (4*LPFC_ELS_RING);
10495                         if (status & HA_RXMASK) {
10496                                 spin_lock_irqsave(&phba->hbalock, iflag);
10497                                 if (lpfc_readl(phba->HCregaddr, &control))
10498                                         goto unplug_error;
10499
10500                                 lpfc_debugfs_slow_ring_trc(phba,
10501                                 "ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
10502                                 control, status,
10503                                 (uint32_t)phba->sli.slistat.sli_intr);
10504
10505                                 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
10506                                         lpfc_debugfs_slow_ring_trc(phba,
10507                                                 "ISR Disable ring:"
10508                                                 "pwork:x%x hawork:x%x wait:x%x",
10509                                                 phba->work_ha, work_ha_copy,
10510                                                 (uint32_t)((unsigned long)
10511                                                 &phba->work_waitq));
10512
10513                                         control &=
10514                                             ~(HC_R0INT_ENA << LPFC_ELS_RING);
10515                                         writel(control, phba->HCregaddr);
10516                                         readl(phba->HCregaddr); /* flush */
10517                                 }
10518                                 else {
10519                                         lpfc_debugfs_slow_ring_trc(phba,
10520                                                 "ISR slow ring:   pwork:"
10521                                                 "x%x hawork:x%x wait:x%x",
10522                                                 phba->work_ha, work_ha_copy,
10523                                                 (uint32_t)((unsigned long)
10524                                                 &phba->work_waitq));
10525                                 }
10526                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
10527                         }
10528                 }
10529                 spin_lock_irqsave(&phba->hbalock, iflag);
10530                 if (work_ha_copy & HA_ERATT) {
10531                         if (lpfc_sli_read_hs(phba))
10532                                 goto unplug_error;
10533                         /*
10534                          * Check if there is a deferred error condition
10535                          * is active
10536                          */
10537                         if ((HS_FFER1 & phba->work_hs) &&
10538                                 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
10539                                   HS_FFER6 | HS_FFER7 | HS_FFER8) &
10540                                   phba->work_hs)) {
10541                                 phba->hba_flag |= DEFER_ERATT;
10542                                 /* Clear all interrupt enable conditions */
10543                                 writel(0, phba->HCregaddr);
10544                                 readl(phba->HCregaddr);
10545                         }
10546                 }
10547
10548                 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
10549                         pmb = phba->sli.mbox_active;
10550                         pmbox = &pmb->u.mb;
10551                         mbox = phba->mbox;
10552                         vport = pmb->vport;
10553
10554                         /* First check out the status word */
10555                         lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
10556                         if (pmbox->mbxOwner != OWN_HOST) {
10557                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
10558                                 /*
10559                                  * Stray Mailbox Interrupt, mbxCommand <cmd>
10560                                  * mbxStatus <status>
10561                                  */
10562                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
10563                                                 LOG_SLI,
10564                                                 "(%d):0304 Stray Mailbox "
10565                                                 "Interrupt mbxCommand x%x "
10566                                                 "mbxStatus x%x\n",
10567                                                 (vport ? vport->vpi : 0),
10568                                                 pmbox->mbxCommand,
10569                                                 pmbox->mbxStatus);
10570                                 /* clear mailbox attention bit */
10571                                 work_ha_copy &= ~HA_MBATT;
10572                         } else {
10573                                 phba->sli.mbox_active = NULL;
10574                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
10575                                 phba->last_completion_time = jiffies;
10576                                 del_timer(&phba->sli.mbox_tmo);
10577                                 if (pmb->mbox_cmpl) {
10578                                         lpfc_sli_pcimem_bcopy(mbox, pmbox,
10579                                                         MAILBOX_CMD_SIZE);
10580                                         if (pmb->out_ext_byte_len &&
10581                                                 pmb->context2)
10582                                                 lpfc_sli_pcimem_bcopy(
10583                                                 phba->mbox_ext,
10584                                                 pmb->context2,
10585                                                 pmb->out_ext_byte_len);
10586                                 }
10587                                 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
10588                                         pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
10589
10590                                         lpfc_debugfs_disc_trc(vport,
10591                                                 LPFC_DISC_TRC_MBOX_VPORT,
10592                                                 "MBOX dflt rpi: : "
10593                                                 "status:x%x rpi:x%x",
10594                                                 (uint32_t)pmbox->mbxStatus,
10595                                                 pmbox->un.varWords[0], 0);
10596
10597                                         if (!pmbox->mbxStatus) {
10598                                                 mp = (struct lpfc_dmabuf *)
10599                                                         (pmb->context1);
10600                                                 ndlp = (struct lpfc_nodelist *)
10601                                                         pmb->context2;
10602
10603                                                 /* Reg_LOGIN of dflt RPI was
10604                                                  * successful. new lets get
10605                                                  * rid of the RPI using the
10606                                                  * same mbox buffer.
10607                                                  */
10608                                                 lpfc_unreg_login(phba,
10609                                                         vport->vpi,
10610                                                         pmbox->un.varWords[0],
10611                                                         pmb);
10612                                                 pmb->mbox_cmpl =
10613                                                         lpfc_mbx_cmpl_dflt_rpi;
10614                                                 pmb->context1 = mp;
10615                                                 pmb->context2 = ndlp;
10616                                                 pmb->vport = vport;
10617                                                 rc = lpfc_sli_issue_mbox(phba,
10618                                                                 pmb,
10619                                                                 MBX_NOWAIT);
10620                                                 if (rc != MBX_BUSY)
10621                                                         lpfc_printf_log(phba,
10622                                                         KERN_ERR,
10623                                                         LOG_MBOX | LOG_SLI,
10624                                                         "0350 rc should have"
10625                                                         "been MBX_BUSY\n");
10626                                                 if (rc != MBX_NOT_FINISHED)
10627                                                         goto send_current_mbox;
10628                                         }
10629                                 }
10630                                 spin_lock_irqsave(
10631                                                 &phba->pport->work_port_lock,
10632                                                 iflag);
10633                                 phba->pport->work_port_events &=
10634                                         ~WORKER_MBOX_TMO;
10635                                 spin_unlock_irqrestore(
10636                                                 &phba->pport->work_port_lock,
10637                                                 iflag);
10638                                 lpfc_mbox_cmpl_put(phba, pmb);
10639                         }
10640                 } else
10641                         spin_unlock_irqrestore(&phba->hbalock, iflag);
10642
10643                 if ((work_ha_copy & HA_MBATT) &&
10644                     (phba->sli.mbox_active == NULL)) {
10645 send_current_mbox:
10646                         /* Process next mailbox command if there is one */
10647                         do {
10648                                 rc = lpfc_sli_issue_mbox(phba, NULL,
10649                                                          MBX_NOWAIT);
10650                         } while (rc == MBX_NOT_FINISHED);
10651                         if (rc != MBX_SUCCESS)
10652                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
10653                                                 LOG_SLI, "0349 rc should be "
10654                                                 "MBX_SUCCESS\n");
10655                 }
10656
10657                 spin_lock_irqsave(&phba->hbalock, iflag);
10658                 phba->work_ha |= work_ha_copy;
10659                 spin_unlock_irqrestore(&phba->hbalock, iflag);
10660                 lpfc_worker_wake_up(phba);
10661         }
10662         return IRQ_HANDLED;
10663 unplug_error:
10664         spin_unlock_irqrestore(&phba->hbalock, iflag);
10665         return IRQ_HANDLED;
10666
10667 } /* lpfc_sli_sp_intr_handler */
10668
10669 /**
10670  * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
10671  * @irq: Interrupt number.
10672  * @dev_id: The device context pointer.
10673  *
10674  * This function is directly called from the PCI layer as an interrupt
10675  * service routine when device with SLI-3 interface spec is enabled with
10676  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
10677  * ring event in the HBA. However, when the device is enabled with either
10678  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
10679  * device-level interrupt handler. When the PCI slot is in error recovery
10680  * or the HBA is undergoing initialization, the interrupt handler will not
10681  * process the interrupt. The SCSI FCP fast-path ring event are handled in
10682  * the intrrupt context. This function is called without any lock held.
10683  * It gets the hbalock to access and update SLI data structures.
10684  *
10685  * This function returns IRQ_HANDLED when interrupt is handled else it
10686  * returns IRQ_NONE.
10687  **/
10688 irqreturn_t
10689 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
10690 {
10691         struct lpfc_hba  *phba;
10692         uint32_t ha_copy;
10693         unsigned long status;
10694         unsigned long iflag;
10695
10696         /* Get the driver's phba structure from the dev_id and
10697          * assume the HBA is not interrupting.
10698          */
10699         phba = (struct lpfc_hba *) dev_id;
10700
10701         if (unlikely(!phba))
10702                 return IRQ_NONE;
10703
10704         /*
10705          * Stuff needs to be attented to when this function is invoked as an
10706          * individual interrupt handler in MSI-X multi-message interrupt mode
10707          */
10708         if (phba->intr_type == MSIX) {
10709                 /* Check device state for handling interrupt */
10710                 if (lpfc_intr_state_check(phba))
10711                         return IRQ_NONE;
10712                 /* Need to read HA REG for FCP ring and other ring events */
10713                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10714                         return IRQ_HANDLED;
10715                 /* Clear up only attention source related to fast-path */
10716                 spin_lock_irqsave(&phba->hbalock, iflag);
10717                 /*
10718                  * If there is deferred error attention, do not check for
10719                  * any interrupt.
10720                  */
10721                 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
10722                         spin_unlock_irqrestore(&phba->hbalock, iflag);
10723                         return IRQ_NONE;
10724                 }
10725                 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
10726                         phba->HAregaddr);
10727                 readl(phba->HAregaddr); /* flush */
10728                 spin_unlock_irqrestore(&phba->hbalock, iflag);
10729         } else
10730                 ha_copy = phba->ha_copy;
10731
10732         /*
10733          * Process all events on FCP ring. Take the optimized path for FCP IO.
10734          */
10735         ha_copy &= ~(phba->work_ha_mask);
10736
10737         status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
10738         status >>= (4*LPFC_FCP_RING);
10739         if (status & HA_RXMASK)
10740                 lpfc_sli_handle_fast_ring_event(phba,
10741                                                 &phba->sli.ring[LPFC_FCP_RING],
10742                                                 status);
10743
10744         if (phba->cfg_multi_ring_support == 2) {
10745                 /*
10746                  * Process all events on extra ring. Take the optimized path
10747                  * for extra ring IO.
10748                  */
10749                 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
10750                 status >>= (4*LPFC_EXTRA_RING);
10751                 if (status & HA_RXMASK) {
10752                         lpfc_sli_handle_fast_ring_event(phba,
10753                                         &phba->sli.ring[LPFC_EXTRA_RING],
10754                                         status);
10755                 }
10756         }
10757         return IRQ_HANDLED;
10758 }  /* lpfc_sli_fp_intr_handler */
10759
10760 /**
10761  * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
10762  * @irq: Interrupt number.
10763  * @dev_id: The device context pointer.
10764  *
10765  * This function is the HBA device-level interrupt handler to device with
10766  * SLI-3 interface spec, called from the PCI layer when either MSI or
10767  * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
10768  * requires driver attention. This function invokes the slow-path interrupt
10769  * attention handling function and fast-path interrupt attention handling
10770  * function in turn to process the relevant HBA attention events. This
10771  * function is called without any lock held. It gets the hbalock to access
10772  * and update SLI data structures.
10773  *
10774  * This function returns IRQ_HANDLED when interrupt is handled, else it
10775  * returns IRQ_NONE.
10776  **/
10777 irqreturn_t
10778 lpfc_sli_intr_handler(int irq, void *dev_id)
10779 {
10780         struct lpfc_hba  *phba;
10781         irqreturn_t sp_irq_rc, fp_irq_rc;
10782         unsigned long status1, status2;
10783         uint32_t hc_copy;
10784
10785         /*
10786          * Get the driver's phba structure from the dev_id and
10787          * assume the HBA is not interrupting.
10788          */
10789         phba = (struct lpfc_hba *) dev_id;
10790
10791         if (unlikely(!phba))
10792                 return IRQ_NONE;
10793
10794         /* Check device state for handling interrupt */
10795         if (lpfc_intr_state_check(phba))
10796                 return IRQ_NONE;
10797
10798         spin_lock(&phba->hbalock);
10799         if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
10800                 spin_unlock(&phba->hbalock);
10801                 return IRQ_HANDLED;
10802         }
10803
10804         if (unlikely(!phba->ha_copy)) {
10805                 spin_unlock(&phba->hbalock);
10806                 return IRQ_NONE;
10807         } else if (phba->ha_copy & HA_ERATT) {
10808                 if (phba->hba_flag & HBA_ERATT_HANDLED)
10809                         /* ERATT polling has handled ERATT */
10810                         phba->ha_copy &= ~HA_ERATT;
10811                 else
10812                         /* Indicate interrupt handler handles ERATT */
10813                         phba->hba_flag |= HBA_ERATT_HANDLED;
10814         }
10815
10816         /*
10817          * If there is deferred error attention, do not check for any interrupt.
10818          */
10819         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
10820                 spin_unlock(&phba->hbalock);
10821                 return IRQ_NONE;
10822         }
10823
10824         /* Clear attention sources except link and error attentions */
10825         if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
10826                 spin_unlock(&phba->hbalock);
10827                 return IRQ_HANDLED;
10828         }
10829         writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
10830                 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
10831                 phba->HCregaddr);
10832         writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
10833         writel(hc_copy, phba->HCregaddr);
10834         readl(phba->HAregaddr); /* flush */
10835         spin_unlock(&phba->hbalock);
10836
10837         /*
10838          * Invokes slow-path host attention interrupt handling as appropriate.
10839          */
10840
10841         /* status of events with mailbox and link attention */
10842         status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
10843
10844         /* status of events with ELS ring */
10845         status2 = (phba->ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
10846         status2 >>= (4*LPFC_ELS_RING);
10847
10848         if (status1 || (status2 & HA_RXMASK))
10849                 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
10850         else
10851                 sp_irq_rc = IRQ_NONE;
10852
10853         /*
10854          * Invoke fast-path host attention interrupt handling as appropriate.
10855          */
10856
10857         /* status of events with FCP ring */
10858         status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
10859         status1 >>= (4*LPFC_FCP_RING);
10860
10861         /* status of events with extra ring */
10862         if (phba->cfg_multi_ring_support == 2) {
10863                 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
10864                 status2 >>= (4*LPFC_EXTRA_RING);
10865         } else
10866                 status2 = 0;
10867
10868         if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
10869                 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
10870         else
10871                 fp_irq_rc = IRQ_NONE;
10872
10873         /* Return device-level interrupt handling status */
10874         return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
10875 }  /* lpfc_sli_intr_handler */
10876
10877 /**
10878  * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
10879  * @phba: pointer to lpfc hba data structure.
10880  *
10881  * This routine is invoked by the worker thread to process all the pending
10882  * SLI4 FCP abort XRI events.
10883  **/
10884 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
10885 {
10886         struct lpfc_cq_event *cq_event;
10887
10888         /* First, declare the fcp xri abort event has been handled */
10889         spin_lock_irq(&phba->hbalock);
10890         phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
10891         spin_unlock_irq(&phba->hbalock);
10892         /* Now, handle all the fcp xri abort events */
10893         while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
10894                 /* Get the first event from the head of the event queue */
10895                 spin_lock_irq(&phba->hbalock);
10896                 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
10897                                  cq_event, struct lpfc_cq_event, list);
10898                 spin_unlock_irq(&phba->hbalock);
10899                 /* Notify aborted XRI for FCP work queue */
10900                 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
10901                 /* Free the event processed back to the free pool */
10902                 lpfc_sli4_cq_event_release(phba, cq_event);
10903         }
10904 }
10905
10906 /**
10907  * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
10908  * @phba: pointer to lpfc hba data structure.
10909  *
10910  * This routine is invoked by the worker thread to process all the pending
10911  * SLI4 els abort xri events.
10912  **/
10913 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
10914 {
10915         struct lpfc_cq_event *cq_event;
10916
10917         /* First, declare the els xri abort event has been handled */
10918         spin_lock_irq(&phba->hbalock);
10919         phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
10920         spin_unlock_irq(&phba->hbalock);
10921         /* Now, handle all the els xri abort events */
10922         while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
10923                 /* Get the first event from the head of the event queue */
10924                 spin_lock_irq(&phba->hbalock);
10925                 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
10926                                  cq_event, struct lpfc_cq_event, list);
10927                 spin_unlock_irq(&phba->hbalock);
10928                 /* Notify aborted XRI for ELS work queue */
10929                 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
10930                 /* Free the event processed back to the free pool */
10931                 lpfc_sli4_cq_event_release(phba, cq_event);
10932         }
10933 }
10934
10935 /**
10936  * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
10937  * @phba: pointer to lpfc hba data structure
10938  * @pIocbIn: pointer to the rspiocbq
10939  * @pIocbOut: pointer to the cmdiocbq
10940  * @wcqe: pointer to the complete wcqe
10941  *
10942  * This routine transfers the fields of a command iocbq to a response iocbq
10943  * by copying all the IOCB fields from command iocbq and transferring the
10944  * completion status information from the complete wcqe.
10945  **/
10946 static void
10947 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
10948                               struct lpfc_iocbq *pIocbIn,
10949                               struct lpfc_iocbq *pIocbOut,
10950                               struct lpfc_wcqe_complete *wcqe)
10951 {
10952         unsigned long iflags;
10953         uint32_t status;
10954         size_t offset = offsetof(struct lpfc_iocbq, iocb);
10955
10956         memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
10957                sizeof(struct lpfc_iocbq) - offset);
10958         /* Map WCQE parameters into irspiocb parameters */
10959         status = bf_get(lpfc_wcqe_c_status, wcqe);
10960         pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
10961         if (pIocbOut->iocb_flag & LPFC_IO_FCP)
10962                 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
10963                         pIocbIn->iocb.un.fcpi.fcpi_parm =
10964                                         pIocbOut->iocb.un.fcpi.fcpi_parm -
10965                                         wcqe->total_data_placed;
10966                 else
10967                         pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
10968         else {
10969                 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
10970                 pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
10971         }
10972
10973         /* Convert BG errors for completion status */
10974         if (status == CQE_STATUS_DI_ERROR) {
10975                 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
10976
10977                 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
10978                         pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
10979                 else
10980                         pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
10981
10982                 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
10983                 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
10984                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
10985                                 BGS_GUARD_ERR_MASK;
10986                 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
10987                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
10988                                 BGS_APPTAG_ERR_MASK;
10989                 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
10990                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
10991                                 BGS_REFTAG_ERR_MASK;
10992
10993                 /* Check to see if there was any good data before the error */
10994                 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
10995                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
10996                                 BGS_HI_WATER_MARK_PRESENT_MASK;
10997                         pIocbIn->iocb.unsli3.sli3_bg.bghm =
10998                                 wcqe->total_data_placed;
10999                 }
11000
11001                 /*
11002                 * Set ALL the error bits to indicate we don't know what
11003                 * type of error it is.
11004                 */
11005                 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
11006                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11007                                 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
11008                                 BGS_GUARD_ERR_MASK);
11009         }
11010
11011         /* Pick up HBA exchange busy condition */
11012         if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
11013                 spin_lock_irqsave(&phba->hbalock, iflags);
11014                 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
11015                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11016         }
11017 }
11018
11019 /**
11020  * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
11021  * @phba: Pointer to HBA context object.
11022  * @wcqe: Pointer to work-queue completion queue entry.
11023  *
11024  * This routine handles an ELS work-queue completion event and construct
11025  * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
11026  * discovery engine to handle.
11027  *
11028  * Return: Pointer to the receive IOCBQ, NULL otherwise.
11029  **/
11030 static struct lpfc_iocbq *
11031 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
11032                                struct lpfc_iocbq *irspiocbq)
11033 {
11034         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
11035         struct lpfc_iocbq *cmdiocbq;
11036         struct lpfc_wcqe_complete *wcqe;
11037         unsigned long iflags;
11038
11039         wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
11040         spin_lock_irqsave(&pring->ring_lock, iflags);
11041         pring->stats.iocb_event++;
11042         /* Look up the ELS command IOCB and create pseudo response IOCB */
11043         cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
11044                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11045         spin_unlock_irqrestore(&pring->ring_lock, iflags);
11046
11047         if (unlikely(!cmdiocbq)) {
11048                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11049                                 "0386 ELS complete with no corresponding "
11050                                 "cmdiocb: iotag (%d)\n",
11051                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11052                 lpfc_sli_release_iocbq(phba, irspiocbq);
11053                 return NULL;
11054         }
11055
11056         /* Fake the irspiocbq and copy necessary response information */
11057         lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
11058
11059         return irspiocbq;
11060 }
11061
11062 /**
11063  * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
11064  * @phba: Pointer to HBA context object.
11065  * @cqe: Pointer to mailbox completion queue entry.
11066  *
11067  * This routine process a mailbox completion queue entry with asynchrous
11068  * event.
11069  *
11070  * Return: true if work posted to worker thread, otherwise false.
11071  **/
11072 static bool
11073 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
11074 {
11075         struct lpfc_cq_event *cq_event;
11076         unsigned long iflags;
11077
11078         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11079                         "0392 Async Event: word0:x%x, word1:x%x, "
11080                         "word2:x%x, word3:x%x\n", mcqe->word0,
11081                         mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
11082
11083         /* Allocate a new internal CQ_EVENT entry */
11084         cq_event = lpfc_sli4_cq_event_alloc(phba);
11085         if (!cq_event) {
11086                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11087                                 "0394 Failed to allocate CQ_EVENT entry\n");
11088                 return false;
11089         }
11090
11091         /* Move the CQE into an asynchronous event entry */
11092         memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
11093         spin_lock_irqsave(&phba->hbalock, iflags);
11094         list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
11095         /* Set the async event flag */
11096         phba->hba_flag |= ASYNC_EVENT;
11097         spin_unlock_irqrestore(&phba->hbalock, iflags);
11098
11099         return true;
11100 }
11101
11102 /**
11103  * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
11104  * @phba: Pointer to HBA context object.
11105  * @cqe: Pointer to mailbox completion queue entry.
11106  *
11107  * This routine process a mailbox completion queue entry with mailbox
11108  * completion event.
11109  *
11110  * Return: true if work posted to worker thread, otherwise false.
11111  **/
11112 static bool
11113 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
11114 {
11115         uint32_t mcqe_status;
11116         MAILBOX_t *mbox, *pmbox;
11117         struct lpfc_mqe *mqe;
11118         struct lpfc_vport *vport;
11119         struct lpfc_nodelist *ndlp;
11120         struct lpfc_dmabuf *mp;
11121         unsigned long iflags;
11122         LPFC_MBOXQ_t *pmb;
11123         bool workposted = false;
11124         int rc;
11125
11126         /* If not a mailbox complete MCQE, out by checking mailbox consume */
11127         if (!bf_get(lpfc_trailer_completed, mcqe))
11128                 goto out_no_mqe_complete;
11129
11130         /* Get the reference to the active mbox command */
11131         spin_lock_irqsave(&phba->hbalock, iflags);
11132         pmb = phba->sli.mbox_active;
11133         if (unlikely(!pmb)) {
11134                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11135                                 "1832 No pending MBOX command to handle\n");
11136                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11137                 goto out_no_mqe_complete;
11138         }
11139         spin_unlock_irqrestore(&phba->hbalock, iflags);
11140         mqe = &pmb->u.mqe;
11141         pmbox = (MAILBOX_t *)&pmb->u.mqe;
11142         mbox = phba->mbox;
11143         vport = pmb->vport;
11144
11145         /* Reset heartbeat timer */
11146         phba->last_completion_time = jiffies;
11147         del_timer(&phba->sli.mbox_tmo);
11148
11149         /* Move mbox data to caller's mailbox region, do endian swapping */
11150         if (pmb->mbox_cmpl && mbox)
11151                 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
11152
11153         /*
11154          * For mcqe errors, conditionally move a modified error code to
11155          * the mbox so that the error will not be missed.
11156          */
11157         mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
11158         if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
11159                 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
11160                         bf_set(lpfc_mqe_status, mqe,
11161                                (LPFC_MBX_ERROR_RANGE | mcqe_status));
11162         }
11163         if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
11164                 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
11165                 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
11166                                       "MBOX dflt rpi: status:x%x rpi:x%x",
11167                                       mcqe_status,
11168                                       pmbox->un.varWords[0], 0);
11169                 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
11170                         mp = (struct lpfc_dmabuf *)(pmb->context1);
11171                         ndlp = (struct lpfc_nodelist *)pmb->context2;
11172                         /* Reg_LOGIN of dflt RPI was successful. Now lets get
11173                          * RID of the PPI using the same mbox buffer.
11174                          */
11175                         lpfc_unreg_login(phba, vport->vpi,
11176                                          pmbox->un.varWords[0], pmb);
11177                         pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
11178                         pmb->context1 = mp;
11179                         pmb->context2 = ndlp;
11180                         pmb->vport = vport;
11181                         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
11182                         if (rc != MBX_BUSY)
11183                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11184                                                 LOG_SLI, "0385 rc should "
11185                                                 "have been MBX_BUSY\n");
11186                         if (rc != MBX_NOT_FINISHED)
11187                                 goto send_current_mbox;
11188                 }
11189         }
11190         spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
11191         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
11192         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
11193
11194         /* There is mailbox completion work to do */
11195         spin_lock_irqsave(&phba->hbalock, iflags);
11196         __lpfc_mbox_cmpl_put(phba, pmb);
11197         phba->work_ha |= HA_MBATT;
11198         spin_unlock_irqrestore(&phba->hbalock, iflags);
11199         workposted = true;
11200
11201 send_current_mbox:
11202         spin_lock_irqsave(&phba->hbalock, iflags);
11203         /* Release the mailbox command posting token */
11204         phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11205         /* Setting active mailbox pointer need to be in sync to flag clear */
11206         phba->sli.mbox_active = NULL;
11207         spin_unlock_irqrestore(&phba->hbalock, iflags);
11208         /* Wake up worker thread to post the next pending mailbox command */
11209         lpfc_worker_wake_up(phba);
11210 out_no_mqe_complete:
11211         if (bf_get(lpfc_trailer_consumed, mcqe))
11212                 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
11213         return workposted;
11214 }
11215
11216 /**
11217  * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
11218  * @phba: Pointer to HBA context object.
11219  * @cqe: Pointer to mailbox completion queue entry.
11220  *
11221  * This routine process a mailbox completion queue entry, it invokes the
11222  * proper mailbox complete handling or asynchrous event handling routine
11223  * according to the MCQE's async bit.
11224  *
11225  * Return: true if work posted to worker thread, otherwise false.
11226  **/
11227 static bool
11228 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
11229 {
11230         struct lpfc_mcqe mcqe;
11231         bool workposted;
11232
11233         /* Copy the mailbox MCQE and convert endian order as needed */
11234         lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
11235
11236         /* Invoke the proper event handling routine */
11237         if (!bf_get(lpfc_trailer_async, &mcqe))
11238                 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
11239         else
11240                 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
11241         return workposted;
11242 }
11243
11244 /**
11245  * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
11246  * @phba: Pointer to HBA context object.
11247  * @cq: Pointer to associated CQ
11248  * @wcqe: Pointer to work-queue completion queue entry.
11249  *
11250  * This routine handles an ELS work-queue completion event.
11251  *
11252  * Return: true if work posted to worker thread, otherwise false.
11253  **/
11254 static bool
11255 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11256                              struct lpfc_wcqe_complete *wcqe)
11257 {
11258         struct lpfc_iocbq *irspiocbq;
11259         unsigned long iflags;
11260         struct lpfc_sli_ring *pring = cq->pring;
11261
11262         /* Get an irspiocbq for later ELS response processing use */
11263         irspiocbq = lpfc_sli_get_iocbq(phba);
11264         if (!irspiocbq) {
11265                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11266                         "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
11267                         "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
11268                         pring->txq_cnt, phba->iocb_cnt,
11269                         phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt,
11270                         phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt);
11271                 return false;
11272         }
11273
11274         /* Save off the slow-path queue event for work thread to process */
11275         memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
11276         spin_lock_irqsave(&phba->hbalock, iflags);
11277         list_add_tail(&irspiocbq->cq_event.list,
11278                       &phba->sli4_hba.sp_queue_event);
11279         phba->hba_flag |= HBA_SP_QUEUE_EVT;
11280         spin_unlock_irqrestore(&phba->hbalock, iflags);
11281
11282         return true;
11283 }
11284
11285 /**
11286  * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
11287  * @phba: Pointer to HBA context object.
11288  * @wcqe: Pointer to work-queue completion queue entry.
11289  *
11290  * This routine handles slow-path WQ entry comsumed event by invoking the
11291  * proper WQ release routine to the slow-path WQ.
11292  **/
11293 static void
11294 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
11295                              struct lpfc_wcqe_release *wcqe)
11296 {
11297         /* sanity check on queue memory */
11298         if (unlikely(!phba->sli4_hba.els_wq))
11299                 return;
11300         /* Check for the slow-path ELS work queue */
11301         if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
11302                 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
11303                                      bf_get(lpfc_wcqe_r_wqe_index, wcqe));
11304         else
11305                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11306                                 "2579 Slow-path wqe consume event carries "
11307                                 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
11308                                 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
11309                                 phba->sli4_hba.els_wq->queue_id);
11310 }
11311
11312 /**
11313  * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
11314  * @phba: Pointer to HBA context object.
11315  * @cq: Pointer to a WQ completion queue.
11316  * @wcqe: Pointer to work-queue completion queue entry.
11317  *
11318  * This routine handles an XRI abort event.
11319  *
11320  * Return: true if work posted to worker thread, otherwise false.
11321  **/
11322 static bool
11323 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
11324                                    struct lpfc_queue *cq,
11325                                    struct sli4_wcqe_xri_aborted *wcqe)
11326 {
11327         bool workposted = false;
11328         struct lpfc_cq_event *cq_event;
11329         unsigned long iflags;
11330
11331         /* Allocate a new internal CQ_EVENT entry */
11332         cq_event = lpfc_sli4_cq_event_alloc(phba);
11333         if (!cq_event) {
11334                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11335                                 "0602 Failed to allocate CQ_EVENT entry\n");
11336                 return false;
11337         }
11338
11339         /* Move the CQE into the proper xri abort event list */
11340         memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
11341         switch (cq->subtype) {
11342         case LPFC_FCP:
11343                 spin_lock_irqsave(&phba->hbalock, iflags);
11344                 list_add_tail(&cq_event->list,
11345                               &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
11346                 /* Set the fcp xri abort event flag */
11347                 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
11348                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11349                 workposted = true;
11350                 break;
11351         case LPFC_ELS:
11352                 spin_lock_irqsave(&phba->hbalock, iflags);
11353                 list_add_tail(&cq_event->list,
11354                               &phba->sli4_hba.sp_els_xri_aborted_work_queue);
11355                 /* Set the els xri abort event flag */
11356                 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
11357                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11358                 workposted = true;
11359                 break;
11360         default:
11361                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11362                                 "0603 Invalid work queue CQE subtype (x%x)\n",
11363                                 cq->subtype);
11364                 workposted = false;
11365                 break;
11366         }
11367         return workposted;
11368 }
11369
11370 /**
11371  * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
11372  * @phba: Pointer to HBA context object.
11373  * @rcqe: Pointer to receive-queue completion queue entry.
11374  *
11375  * This routine process a receive-queue completion queue entry.
11376  *
11377  * Return: true if work posted to worker thread, otherwise false.
11378  **/
11379 static bool
11380 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
11381 {
11382         bool workposted = false;
11383         struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
11384         struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
11385         struct hbq_dmabuf *dma_buf;
11386         uint32_t status, rq_id;
11387         unsigned long iflags;
11388
11389         /* sanity check on queue memory */
11390         if (unlikely(!hrq) || unlikely(!drq))
11391                 return workposted;
11392
11393         if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
11394                 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
11395         else
11396                 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
11397         if (rq_id != hrq->queue_id)
11398                 goto out;
11399
11400         status = bf_get(lpfc_rcqe_status, rcqe);
11401         switch (status) {
11402         case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
11403                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11404                                 "2537 Receive Frame Truncated!!\n");
11405                 hrq->RQ_buf_trunc++;
11406         case FC_STATUS_RQ_SUCCESS:
11407                 lpfc_sli4_rq_release(hrq, drq);
11408                 spin_lock_irqsave(&phba->hbalock, iflags);
11409                 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
11410                 if (!dma_buf) {
11411                         hrq->RQ_no_buf_found++;
11412                         spin_unlock_irqrestore(&phba->hbalock, iflags);
11413                         goto out;
11414                 }
11415                 hrq->RQ_rcv_buf++;
11416                 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
11417                 /* save off the frame for the word thread to process */
11418                 list_add_tail(&dma_buf->cq_event.list,
11419                               &phba->sli4_hba.sp_queue_event);
11420                 /* Frame received */
11421                 phba->hba_flag |= HBA_SP_QUEUE_EVT;
11422                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11423                 workposted = true;
11424                 break;
11425         case FC_STATUS_INSUFF_BUF_NEED_BUF:
11426         case FC_STATUS_INSUFF_BUF_FRM_DISC:
11427                 hrq->RQ_no_posted_buf++;
11428                 /* Post more buffers if possible */
11429                 spin_lock_irqsave(&phba->hbalock, iflags);
11430                 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
11431                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11432                 workposted = true;
11433                 break;
11434         }
11435 out:
11436         return workposted;
11437 }
11438
11439 /**
11440  * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
11441  * @phba: Pointer to HBA context object.
11442  * @cq: Pointer to the completion queue.
11443  * @wcqe: Pointer to a completion queue entry.
11444  *
11445  * This routine process a slow-path work-queue or receive queue completion queue
11446  * entry.
11447  *
11448  * Return: true if work posted to worker thread, otherwise false.
11449  **/
11450 static bool
11451 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11452                          struct lpfc_cqe *cqe)
11453 {
11454         struct lpfc_cqe cqevt;
11455         bool workposted = false;
11456
11457         /* Copy the work queue CQE and convert endian order if needed */
11458         lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
11459
11460         /* Check and process for different type of WCQE and dispatch */
11461         switch (bf_get(lpfc_cqe_code, &cqevt)) {
11462         case CQE_CODE_COMPL_WQE:
11463                 /* Process the WQ/RQ complete event */
11464                 phba->last_completion_time = jiffies;
11465                 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
11466                                 (struct lpfc_wcqe_complete *)&cqevt);
11467                 break;
11468         case CQE_CODE_RELEASE_WQE:
11469                 /* Process the WQ release event */
11470                 lpfc_sli4_sp_handle_rel_wcqe(phba,
11471                                 (struct lpfc_wcqe_release *)&cqevt);
11472                 break;
11473         case CQE_CODE_XRI_ABORTED:
11474                 /* Process the WQ XRI abort event */
11475                 phba->last_completion_time = jiffies;
11476                 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
11477                                 (struct sli4_wcqe_xri_aborted *)&cqevt);
11478                 break;
11479         case CQE_CODE_RECEIVE:
11480         case CQE_CODE_RECEIVE_V1:
11481                 /* Process the RQ event */
11482                 phba->last_completion_time = jiffies;
11483                 workposted = lpfc_sli4_sp_handle_rcqe(phba,
11484                                 (struct lpfc_rcqe *)&cqevt);
11485                 break;
11486         default:
11487                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11488                                 "0388 Not a valid WCQE code: x%x\n",
11489                                 bf_get(lpfc_cqe_code, &cqevt));
11490                 break;
11491         }
11492         return workposted;
11493 }
11494
11495 /**
11496  * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
11497  * @phba: Pointer to HBA context object.
11498  * @eqe: Pointer to fast-path event queue entry.
11499  *
11500  * This routine process a event queue entry from the slow-path event queue.
11501  * It will check the MajorCode and MinorCode to determine this is for a
11502  * completion event on a completion queue, if not, an error shall be logged
11503  * and just return. Otherwise, it will get to the corresponding completion
11504  * queue and process all the entries on that completion queue, rearm the
11505  * completion queue, and then return.
11506  *
11507  **/
11508 static void
11509 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11510         struct lpfc_queue *speq)
11511 {
11512         struct lpfc_queue *cq = NULL, *childq;
11513         struct lpfc_cqe *cqe;
11514         bool workposted = false;
11515         int ecount = 0;
11516         uint16_t cqid;
11517
11518         /* Get the reference to the corresponding CQ */
11519         cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
11520
11521         list_for_each_entry(childq, &speq->child_list, list) {
11522                 if (childq->queue_id == cqid) {
11523                         cq = childq;
11524                         break;
11525                 }
11526         }
11527         if (unlikely(!cq)) {
11528                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11529                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11530                                         "0365 Slow-path CQ identifier "
11531                                         "(%d) does not exist\n", cqid);
11532                 return;
11533         }
11534
11535         /* Process all the entries to the CQ */
11536         switch (cq->type) {
11537         case LPFC_MCQ:
11538                 while ((cqe = lpfc_sli4_cq_get(cq))) {
11539                         workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
11540                         if (!(++ecount % cq->entry_repost))
11541                                 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11542                         cq->CQ_mbox++;
11543                 }
11544                 break;
11545         case LPFC_WCQ:
11546                 while ((cqe = lpfc_sli4_cq_get(cq))) {
11547                         if (cq->subtype == LPFC_FCP)
11548                                 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
11549                                                                        cqe);
11550                         else
11551                                 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
11552                                                                       cqe);
11553                         if (!(++ecount % cq->entry_repost))
11554                                 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11555                 }
11556
11557                 /* Track the max number of CQEs processed in 1 EQ */
11558                 if (ecount > cq->CQ_max_cqe)
11559                         cq->CQ_max_cqe = ecount;
11560                 break;
11561         default:
11562                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11563                                 "0370 Invalid completion queue type (%d)\n",
11564                                 cq->type);
11565                 return;
11566         }
11567
11568         /* Catch the no cq entry condition, log an error */
11569         if (unlikely(ecount == 0))
11570                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11571                                 "0371 No entry from the CQ: identifier "
11572                                 "(x%x), type (%d)\n", cq->queue_id, cq->type);
11573
11574         /* In any case, flash and re-arm the RCQ */
11575         lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
11576
11577         /* wake up worker thread if there are works to be done */
11578         if (workposted)
11579                 lpfc_worker_wake_up(phba);
11580 }
11581
11582 /**
11583  * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
11584  * @phba: Pointer to HBA context object.
11585  * @cq: Pointer to associated CQ
11586  * @wcqe: Pointer to work-queue completion queue entry.
11587  *
11588  * This routine process a fast-path work queue completion entry from fast-path
11589  * event queue for FCP command response completion.
11590  **/
11591 static void
11592 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11593                              struct lpfc_wcqe_complete *wcqe)
11594 {
11595         struct lpfc_sli_ring *pring = cq->pring;
11596         struct lpfc_iocbq *cmdiocbq;
11597         struct lpfc_iocbq irspiocbq;
11598         unsigned long iflags;
11599
11600         /* Check for response status */
11601         if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
11602                 /* If resource errors reported from HBA, reduce queue
11603                  * depth of the SCSI device.
11604                  */
11605                 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
11606                      IOSTAT_LOCAL_REJECT)) &&
11607                     ((wcqe->parameter & IOERR_PARAM_MASK) ==
11608                      IOERR_NO_RESOURCES))
11609                         phba->lpfc_rampdown_queue_depth(phba);
11610
11611                 /* Log the error status */
11612                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11613                                 "0373 FCP complete error: status=x%x, "
11614                                 "hw_status=x%x, total_data_specified=%d, "
11615                                 "parameter=x%x, word3=x%x\n",
11616                                 bf_get(lpfc_wcqe_c_status, wcqe),
11617                                 bf_get(lpfc_wcqe_c_hw_status, wcqe),
11618                                 wcqe->total_data_placed, wcqe->parameter,
11619                                 wcqe->word3);
11620         }
11621
11622         /* Look up the FCP command IOCB and create pseudo response IOCB */
11623         spin_lock_irqsave(&pring->ring_lock, iflags);
11624         pring->stats.iocb_event++;
11625         cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
11626                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11627         spin_unlock_irqrestore(&pring->ring_lock, iflags);
11628         if (unlikely(!cmdiocbq)) {
11629                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11630                                 "0374 FCP complete with no corresponding "
11631                                 "cmdiocb: iotag (%d)\n",
11632                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11633                 return;
11634         }
11635         if (unlikely(!cmdiocbq->iocb_cmpl)) {
11636                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11637                                 "0375 FCP cmdiocb not callback function "
11638                                 "iotag: (%d)\n",
11639                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11640                 return;
11641         }
11642
11643         /* Fake the irspiocb and copy necessary response information */
11644         lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
11645
11646         if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
11647                 spin_lock_irqsave(&phba->hbalock, iflags);
11648                 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
11649                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11650         }
11651
11652         /* Pass the cmd_iocb and the rsp state to the upper layer */
11653         (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
11654 }
11655
11656 /**
11657  * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
11658  * @phba: Pointer to HBA context object.
11659  * @cq: Pointer to completion queue.
11660  * @wcqe: Pointer to work-queue completion queue entry.
11661  *
11662  * This routine handles an fast-path WQ entry comsumed event by invoking the
11663  * proper WQ release routine to the slow-path WQ.
11664  **/
11665 static void
11666 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11667                              struct lpfc_wcqe_release *wcqe)
11668 {
11669         struct lpfc_queue *childwq;
11670         bool wqid_matched = false;
11671         uint16_t fcp_wqid;
11672
11673         /* Check for fast-path FCP work queue release */
11674         fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
11675         list_for_each_entry(childwq, &cq->child_list, list) {
11676                 if (childwq->queue_id == fcp_wqid) {
11677                         lpfc_sli4_wq_release(childwq,
11678                                         bf_get(lpfc_wcqe_r_wqe_index, wcqe));
11679                         wqid_matched = true;
11680                         break;
11681                 }
11682         }
11683         /* Report warning log message if no match found */
11684         if (wqid_matched != true)
11685                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11686                                 "2580 Fast-path wqe consume event carries "
11687                                 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
11688 }
11689
11690 /**
11691  * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
11692  * @cq: Pointer to the completion queue.
11693  * @eqe: Pointer to fast-path completion queue entry.
11694  *
11695  * This routine process a fast-path work queue completion entry from fast-path
11696  * event queue for FCP command response completion.
11697  **/
11698 static int
11699 lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11700                          struct lpfc_cqe *cqe)
11701 {
11702         struct lpfc_wcqe_release wcqe;
11703         bool workposted = false;
11704
11705         /* Copy the work queue CQE and convert endian order if needed */
11706         lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
11707
11708         /* Check and process for different type of WCQE and dispatch */
11709         switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
11710         case CQE_CODE_COMPL_WQE:
11711                 cq->CQ_wq++;
11712                 /* Process the WQ complete event */
11713                 phba->last_completion_time = jiffies;
11714                 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
11715                                 (struct lpfc_wcqe_complete *)&wcqe);
11716                 break;
11717         case CQE_CODE_RELEASE_WQE:
11718                 cq->CQ_release_wqe++;
11719                 /* Process the WQ release event */
11720                 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
11721                                 (struct lpfc_wcqe_release *)&wcqe);
11722                 break;
11723         case CQE_CODE_XRI_ABORTED:
11724                 cq->CQ_xri_aborted++;
11725                 /* Process the WQ XRI abort event */
11726                 phba->last_completion_time = jiffies;
11727                 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
11728                                 (struct sli4_wcqe_xri_aborted *)&wcqe);
11729                 break;
11730         default:
11731                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11732                                 "0144 Not a valid WCQE code: x%x\n",
11733                                 bf_get(lpfc_wcqe_c_code, &wcqe));
11734                 break;
11735         }
11736         return workposted;
11737 }
11738
11739 /**
11740  * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
11741  * @phba: Pointer to HBA context object.
11742  * @eqe: Pointer to fast-path event queue entry.
11743  *
11744  * This routine process a event queue entry from the fast-path event queue.
11745  * It will check the MajorCode and MinorCode to determine this is for a
11746  * completion event on a completion queue, if not, an error shall be logged
11747  * and just return. Otherwise, it will get to the corresponding completion
11748  * queue and process all the entries on the completion queue, rearm the
11749  * completion queue, and then return.
11750  **/
11751 static void
11752 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11753                         uint32_t qidx)
11754 {
11755         struct lpfc_queue *cq;
11756         struct lpfc_cqe *cqe;
11757         bool workposted = false;
11758         uint16_t cqid;
11759         int ecount = 0;
11760
11761         if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
11762                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11763                                 "0366 Not a valid completion "
11764                                 "event: majorcode=x%x, minorcode=x%x\n",
11765                                 bf_get_le32(lpfc_eqe_major_code, eqe),
11766                                 bf_get_le32(lpfc_eqe_minor_code, eqe));
11767                 return;
11768         }
11769
11770         /* Get the reference to the corresponding CQ */
11771         cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
11772
11773         /* Check if this is a Slow path event */
11774         if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) {
11775                 lpfc_sli4_sp_handle_eqe(phba, eqe,
11776                         phba->sli4_hba.hba_eq[qidx]);
11777                 return;
11778         }
11779
11780         if (unlikely(!phba->sli4_hba.fcp_cq)) {
11781                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11782                                 "3146 Fast-path completion queues "
11783                                 "does not exist\n");
11784                 return;
11785         }
11786         cq = phba->sli4_hba.fcp_cq[qidx];
11787         if (unlikely(!cq)) {
11788                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11789                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11790                                         "0367 Fast-path completion queue "
11791                                         "(%d) does not exist\n", qidx);
11792                 return;
11793         }
11794
11795         if (unlikely(cqid != cq->queue_id)) {
11796                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11797                                 "0368 Miss-matched fast-path completion "
11798                                 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
11799                                 cqid, cq->queue_id);
11800                 return;
11801         }
11802
11803         /* Process all the entries to the CQ */
11804         while ((cqe = lpfc_sli4_cq_get(cq))) {
11805                 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
11806                 if (!(++ecount % cq->entry_repost))
11807                         lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11808         }
11809
11810         /* Track the max number of CQEs processed in 1 EQ */
11811         if (ecount > cq->CQ_max_cqe)
11812                 cq->CQ_max_cqe = ecount;
11813
11814         /* Catch the no cq entry condition */
11815         if (unlikely(ecount == 0))
11816                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11817                                 "0369 No entry from fast-path completion "
11818                                 "queue fcpcqid=%d\n", cq->queue_id);
11819
11820         /* In any case, flash and re-arm the CQ */
11821         lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
11822
11823         /* wake up worker thread if there are works to be done */
11824         if (workposted)
11825                 lpfc_worker_wake_up(phba);
11826 }
11827
11828 static void
11829 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
11830 {
11831         struct lpfc_eqe *eqe;
11832
11833         /* walk all the EQ entries and drop on the floor */
11834         while ((eqe = lpfc_sli4_eq_get(eq)))
11835                 ;
11836
11837         /* Clear and re-arm the EQ */
11838         lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
11839 }
11840
11841 /**
11842  * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
11843  * @irq: Interrupt number.
11844  * @dev_id: The device context pointer.
11845  *
11846  * This function is directly called from the PCI layer as an interrupt
11847  * service routine when device with SLI-4 interface spec is enabled with
11848  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
11849  * ring event in the HBA. However, when the device is enabled with either
11850  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
11851  * device-level interrupt handler. When the PCI slot is in error recovery
11852  * or the HBA is undergoing initialization, the interrupt handler will not
11853  * process the interrupt. The SCSI FCP fast-path ring event are handled in
11854  * the intrrupt context. This function is called without any lock held.
11855  * It gets the hbalock to access and update SLI data structures. Note that,
11856  * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
11857  * equal to that of FCP CQ index.
11858  *
11859  * The link attention and ELS ring attention events are handled
11860  * by the worker thread. The interrupt handler signals the worker thread
11861  * and returns for these events. This function is called without any lock
11862  * held. It gets the hbalock to access and update SLI data structures.
11863  *
11864  * This function returns IRQ_HANDLED when interrupt is handled else it
11865  * returns IRQ_NONE.
11866  **/
11867 irqreturn_t
11868 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
11869 {
11870         struct lpfc_hba *phba;
11871         struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
11872         struct lpfc_queue *fpeq;
11873         struct lpfc_eqe *eqe;
11874         unsigned long iflag;
11875         int ecount = 0;
11876         uint32_t fcp_eqidx;
11877
11878         /* Get the driver's phba structure from the dev_id */
11879         fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
11880         phba = fcp_eq_hdl->phba;
11881         fcp_eqidx = fcp_eq_hdl->idx;
11882
11883         if (unlikely(!phba))
11884                 return IRQ_NONE;
11885         if (unlikely(!phba->sli4_hba.hba_eq))
11886                 return IRQ_NONE;
11887
11888         /* Get to the EQ struct associated with this vector */
11889         fpeq = phba->sli4_hba.hba_eq[fcp_eqidx];
11890         if (unlikely(!fpeq))
11891                 return IRQ_NONE;
11892
11893         if (lpfc_fcp_look_ahead) {
11894                 if (atomic_dec_and_test(&fcp_eq_hdl->fcp_eq_in_use))
11895                         lpfc_sli4_eq_clr_intr(fpeq);
11896                 else {
11897                         atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11898                         return IRQ_NONE;
11899                 }
11900         }
11901
11902         /* Check device state for handling interrupt */
11903         if (unlikely(lpfc_intr_state_check(phba))) {
11904                 fpeq->EQ_badstate++;
11905                 /* Check again for link_state with lock held */
11906                 spin_lock_irqsave(&phba->hbalock, iflag);
11907                 if (phba->link_state < LPFC_LINK_DOWN)
11908                         /* Flush, clear interrupt, and rearm the EQ */
11909                         lpfc_sli4_eq_flush(phba, fpeq);
11910                 spin_unlock_irqrestore(&phba->hbalock, iflag);
11911                 if (lpfc_fcp_look_ahead)
11912                         atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11913                 return IRQ_NONE;
11914         }
11915
11916         /*
11917          * Process all the event on FCP fast-path EQ
11918          */
11919         while ((eqe = lpfc_sli4_eq_get(fpeq))) {
11920                 lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx);
11921                 if (!(++ecount % fpeq->entry_repost))
11922                         lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
11923                 fpeq->EQ_processed++;
11924         }
11925
11926         /* Track the max number of EQEs processed in 1 intr */
11927         if (ecount > fpeq->EQ_max_eqe)
11928                 fpeq->EQ_max_eqe = ecount;
11929
11930         /* Always clear and re-arm the fast-path EQ */
11931         lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
11932
11933         if (unlikely(ecount == 0)) {
11934                 fpeq->EQ_no_entry++;
11935
11936                 if (lpfc_fcp_look_ahead) {
11937                         atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11938                         return IRQ_NONE;
11939                 }
11940
11941                 if (phba->intr_type == MSIX)
11942                         /* MSI-X treated interrupt served as no EQ share INT */
11943                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11944                                         "0358 MSI-X interrupt with no EQE\n");
11945                 else
11946                         /* Non MSI-X treated on interrupt as EQ share INT */
11947                         return IRQ_NONE;
11948         }
11949
11950         if (lpfc_fcp_look_ahead)
11951                 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11952         return IRQ_HANDLED;
11953 } /* lpfc_sli4_fp_intr_handler */
11954
11955 /**
11956  * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
11957  * @irq: Interrupt number.
11958  * @dev_id: The device context pointer.
11959  *
11960  * This function is the device-level interrupt handler to device with SLI-4
11961  * interface spec, called from the PCI layer when either MSI or Pin-IRQ
11962  * interrupt mode is enabled and there is an event in the HBA which requires
11963  * driver attention. This function invokes the slow-path interrupt attention
11964  * handling function and fast-path interrupt attention handling function in
11965  * turn to process the relevant HBA attention events. This function is called
11966  * without any lock held. It gets the hbalock to access and update SLI data
11967  * structures.
11968  *
11969  * This function returns IRQ_HANDLED when interrupt is handled, else it
11970  * returns IRQ_NONE.
11971  **/
11972 irqreturn_t
11973 lpfc_sli4_intr_handler(int irq, void *dev_id)
11974 {
11975         struct lpfc_hba  *phba;
11976         irqreturn_t hba_irq_rc;
11977         bool hba_handled = false;
11978         uint32_t fcp_eqidx;
11979
11980         /* Get the driver's phba structure from the dev_id */
11981         phba = (struct lpfc_hba *)dev_id;
11982
11983         if (unlikely(!phba))
11984                 return IRQ_NONE;
11985
11986         /*
11987          * Invoke fast-path host attention interrupt handling as appropriate.
11988          */
11989         for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
11990                 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
11991                                         &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
11992                 if (hba_irq_rc == IRQ_HANDLED)
11993                         hba_handled |= true;
11994         }
11995
11996         return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
11997 } /* lpfc_sli4_intr_handler */
11998
11999 /**
12000  * lpfc_sli4_queue_free - free a queue structure and associated memory
12001  * @queue: The queue structure to free.
12002  *
12003  * This function frees a queue structure and the DMAable memory used for
12004  * the host resident queue. This function must be called after destroying the
12005  * queue on the HBA.
12006  **/
12007 void
12008 lpfc_sli4_queue_free(struct lpfc_queue *queue)
12009 {
12010         struct lpfc_dmabuf *dmabuf;
12011
12012         if (!queue)
12013                 return;
12014
12015         while (!list_empty(&queue->page_list)) {
12016                 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
12017                                  list);
12018                 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
12019                                   dmabuf->virt, dmabuf->phys);
12020                 kfree(dmabuf);
12021         }
12022         kfree(queue);
12023         return;
12024 }
12025
12026 /**
12027  * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
12028  * @phba: The HBA that this queue is being created on.
12029  * @entry_size: The size of each queue entry for this queue.
12030  * @entry count: The number of entries that this queue will handle.
12031  *
12032  * This function allocates a queue structure and the DMAable memory used for
12033  * the host resident queue. This function must be called before creating the
12034  * queue on the HBA.
12035  **/
12036 struct lpfc_queue *
12037 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
12038                       uint32_t entry_count)
12039 {
12040         struct lpfc_queue *queue;
12041         struct lpfc_dmabuf *dmabuf;
12042         int x, total_qe_count;
12043         void *dma_pointer;
12044         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12045
12046         if (!phba->sli4_hba.pc_sli4_params.supported)
12047                 hw_page_size = SLI4_PAGE_SIZE;
12048
12049         queue = kzalloc(sizeof(struct lpfc_queue) +
12050                         (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
12051         if (!queue)
12052                 return NULL;
12053         queue->page_count = (ALIGN(entry_size * entry_count,
12054                         hw_page_size))/hw_page_size;
12055         INIT_LIST_HEAD(&queue->list);
12056         INIT_LIST_HEAD(&queue->page_list);
12057         INIT_LIST_HEAD(&queue->child_list);
12058         for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
12059                 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
12060                 if (!dmabuf)
12061                         goto out_fail;
12062                 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
12063                                                   hw_page_size, &dmabuf->phys,
12064                                                   GFP_KERNEL);
12065                 if (!dmabuf->virt) {
12066                         kfree(dmabuf);
12067                         goto out_fail;
12068                 }
12069                 memset(dmabuf->virt, 0, hw_page_size);
12070                 dmabuf->buffer_tag = x;
12071                 list_add_tail(&dmabuf->list, &queue->page_list);
12072                 /* initialize queue's entry array */
12073                 dma_pointer = dmabuf->virt;
12074                 for (; total_qe_count < entry_count &&
12075                      dma_pointer < (hw_page_size + dmabuf->virt);
12076                      total_qe_count++, dma_pointer += entry_size) {
12077                         queue->qe[total_qe_count].address = dma_pointer;
12078                 }
12079         }
12080         queue->entry_size = entry_size;
12081         queue->entry_count = entry_count;
12082
12083         /*
12084          * entry_repost is calculated based on the number of entries in the
12085          * queue. This works out except for RQs. If buffers are NOT initially
12086          * posted for every RQE, entry_repost should be adjusted accordingly.
12087          */
12088         queue->entry_repost = (entry_count >> 3);
12089         if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
12090                 queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
12091         queue->phba = phba;
12092
12093         return queue;
12094 out_fail:
12095         lpfc_sli4_queue_free(queue);
12096         return NULL;
12097 }
12098
12099 /**
12100  * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs
12101  * @phba: HBA structure that indicates port to create a queue on.
12102  * @startq: The starting FCP EQ to modify
12103  *
12104  * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
12105  *
12106  * The @phba struct is used to send mailbox command to HBA. The @startq
12107  * is used to get the starting FCP EQ to change.
12108  * This function is asynchronous and will wait for the mailbox
12109  * command to finish before continuing.
12110  *
12111  * On success this function will return a zero. If unable to allocate enough
12112  * memory this function will return -ENOMEM. If the queue create mailbox command
12113  * fails this function will return -ENXIO.
12114  **/
12115 uint32_t
12116 lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
12117 {
12118         struct lpfc_mbx_modify_eq_delay *eq_delay;
12119         LPFC_MBOXQ_t *mbox;
12120         struct lpfc_queue *eq;
12121         int cnt, rc, length, status = 0;
12122         uint32_t shdr_status, shdr_add_status;
12123         uint32_t result;
12124         int fcp_eqidx;
12125         union lpfc_sli4_cfg_shdr *shdr;
12126         uint16_t dmult;
12127
12128         if (startq >= phba->cfg_fcp_io_channel)
12129                 return 0;
12130
12131         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12132         if (!mbox)
12133                 return -ENOMEM;
12134         length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
12135                   sizeof(struct lpfc_sli4_cfg_mhdr));
12136         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12137                          LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
12138                          length, LPFC_SLI4_MBX_EMBED);
12139         eq_delay = &mbox->u.mqe.un.eq_delay;
12140
12141         /* Calculate delay multiper from maximum interrupt per second */
12142         result = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel;
12143         if (result > LPFC_DMULT_CONST)
12144                 dmult = 0;
12145         else
12146                 dmult = LPFC_DMULT_CONST/result - 1;
12147
12148         cnt = 0;
12149         for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel;
12150             fcp_eqidx++) {
12151                 eq = phba->sli4_hba.hba_eq[fcp_eqidx];
12152                 if (!eq)
12153                         continue;
12154                 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
12155                 eq_delay->u.request.eq[cnt].phase = 0;
12156                 eq_delay->u.request.eq[cnt].delay_multi = dmult;
12157                 cnt++;
12158                 if (cnt >= LPFC_MAX_EQ_DELAY)
12159                         break;
12160         }
12161         eq_delay->u.request.num_eq = cnt;
12162
12163         mbox->vport = phba->pport;
12164         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12165         mbox->context1 = NULL;
12166         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12167         shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
12168         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12169         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12170         if (shdr_status || shdr_add_status || rc) {
12171                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12172                                 "2512 MODIFY_EQ_DELAY mailbox failed with "
12173                                 "status x%x add_status x%x, mbx status x%x\n",
12174                                 shdr_status, shdr_add_status, rc);
12175                 status = -ENXIO;
12176         }
12177         mempool_free(mbox, phba->mbox_mem_pool);
12178         return status;
12179 }
12180
12181 /**
12182  * lpfc_eq_create - Create an Event Queue on the HBA
12183  * @phba: HBA structure that indicates port to create a queue on.
12184  * @eq: The queue structure to use to create the event queue.
12185  * @imax: The maximum interrupt per second limit.
12186  *
12187  * This function creates an event queue, as detailed in @eq, on a port,
12188  * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
12189  *
12190  * The @phba struct is used to send mailbox command to HBA. The @eq struct
12191  * is used to get the entry count and entry size that are necessary to
12192  * determine the number of pages to allocate and use for this queue. This
12193  * function will send the EQ_CREATE mailbox command to the HBA to setup the
12194  * event queue. This function is asynchronous and will wait for the mailbox
12195  * command to finish before continuing.
12196  *
12197  * On success this function will return a zero. If unable to allocate enough
12198  * memory this function will return -ENOMEM. If the queue create mailbox command
12199  * fails this function will return -ENXIO.
12200  **/
12201 uint32_t
12202 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
12203 {
12204         struct lpfc_mbx_eq_create *eq_create;
12205         LPFC_MBOXQ_t *mbox;
12206         int rc, length, status = 0;
12207         struct lpfc_dmabuf *dmabuf;
12208         uint32_t shdr_status, shdr_add_status;
12209         union lpfc_sli4_cfg_shdr *shdr;
12210         uint16_t dmult;
12211         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12212
12213         /* sanity check on queue memory */
12214         if (!eq)
12215                 return -ENODEV;
12216         if (!phba->sli4_hba.pc_sli4_params.supported)
12217                 hw_page_size = SLI4_PAGE_SIZE;
12218
12219         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12220         if (!mbox)
12221                 return -ENOMEM;
12222         length = (sizeof(struct lpfc_mbx_eq_create) -
12223                   sizeof(struct lpfc_sli4_cfg_mhdr));
12224         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12225                          LPFC_MBOX_OPCODE_EQ_CREATE,
12226                          length, LPFC_SLI4_MBX_EMBED);
12227         eq_create = &mbox->u.mqe.un.eq_create;
12228         bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
12229                eq->page_count);
12230         bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
12231                LPFC_EQE_SIZE);
12232         bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
12233         /* Calculate delay multiper from maximum interrupt per second */
12234         if (imax > LPFC_DMULT_CONST)
12235                 dmult = 0;
12236         else
12237                 dmult = LPFC_DMULT_CONST/imax - 1;
12238         bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
12239                dmult);
12240         switch (eq->entry_count) {
12241         default:
12242                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12243                                 "0360 Unsupported EQ count. (%d)\n",
12244                                 eq->entry_count);
12245                 if (eq->entry_count < 256)
12246                         return -EINVAL;
12247                 /* otherwise default to smallest count (drop through) */
12248         case 256:
12249                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12250                        LPFC_EQ_CNT_256);
12251                 break;
12252         case 512:
12253                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12254                        LPFC_EQ_CNT_512);
12255                 break;
12256         case 1024:
12257                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12258                        LPFC_EQ_CNT_1024);
12259                 break;
12260         case 2048:
12261                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12262                        LPFC_EQ_CNT_2048);
12263                 break;
12264         case 4096:
12265                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12266                        LPFC_EQ_CNT_4096);
12267                 break;
12268         }
12269         list_for_each_entry(dmabuf, &eq->page_list, list) {
12270                 memset(dmabuf->virt, 0, hw_page_size);
12271                 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12272                                         putPaddrLow(dmabuf->phys);
12273                 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12274                                         putPaddrHigh(dmabuf->phys);
12275         }
12276         mbox->vport = phba->pport;
12277         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12278         mbox->context1 = NULL;
12279         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12280         shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
12281         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12282         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12283         if (shdr_status || shdr_add_status || rc) {
12284                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12285                                 "2500 EQ_CREATE mailbox failed with "
12286                                 "status x%x add_status x%x, mbx status x%x\n",
12287                                 shdr_status, shdr_add_status, rc);
12288                 status = -ENXIO;
12289         }
12290         eq->type = LPFC_EQ;
12291         eq->subtype = LPFC_NONE;
12292         eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
12293         if (eq->queue_id == 0xFFFF)
12294                 status = -ENXIO;
12295         eq->host_index = 0;
12296         eq->hba_index = 0;
12297
12298         mempool_free(mbox, phba->mbox_mem_pool);
12299         return status;
12300 }
12301
12302 /**
12303  * lpfc_cq_create - Create a Completion Queue on the HBA
12304  * @phba: HBA structure that indicates port to create a queue on.
12305  * @cq: The queue structure to use to create the completion queue.
12306  * @eq: The event queue to bind this completion queue to.
12307  *
12308  * This function creates a completion queue, as detailed in @wq, on a port,
12309  * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
12310  *
12311  * The @phba struct is used to send mailbox command to HBA. The @cq struct
12312  * is used to get the entry count and entry size that are necessary to
12313  * determine the number of pages to allocate and use for this queue. The @eq
12314  * is used to indicate which event queue to bind this completion queue to. This
12315  * function will send the CQ_CREATE mailbox command to the HBA to setup the
12316  * completion queue. This function is asynchronous and will wait for the mailbox
12317  * command to finish before continuing.
12318  *
12319  * On success this function will return a zero. If unable to allocate enough
12320  * memory this function will return -ENOMEM. If the queue create mailbox command
12321  * fails this function will return -ENXIO.
12322  **/
12323 uint32_t
12324 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
12325                struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
12326 {
12327         struct lpfc_mbx_cq_create *cq_create;
12328         struct lpfc_dmabuf *dmabuf;
12329         LPFC_MBOXQ_t *mbox;
12330         int rc, length, status = 0;
12331         uint32_t shdr_status, shdr_add_status;
12332         union lpfc_sli4_cfg_shdr *shdr;
12333         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12334
12335         /* sanity check on queue memory */
12336         if (!cq || !eq)
12337                 return -ENODEV;
12338         if (!phba->sli4_hba.pc_sli4_params.supported)
12339                 hw_page_size = SLI4_PAGE_SIZE;
12340
12341         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12342         if (!mbox)
12343                 return -ENOMEM;
12344         length = (sizeof(struct lpfc_mbx_cq_create) -
12345                   sizeof(struct lpfc_sli4_cfg_mhdr));
12346         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12347                          LPFC_MBOX_OPCODE_CQ_CREATE,
12348                          length, LPFC_SLI4_MBX_EMBED);
12349         cq_create = &mbox->u.mqe.un.cq_create;
12350         shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
12351         bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
12352                     cq->page_count);
12353         bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
12354         bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
12355         bf_set(lpfc_mbox_hdr_version, &shdr->request,
12356                phba->sli4_hba.pc_sli4_params.cqv);
12357         if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
12358                 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
12359                 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
12360                 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
12361                        eq->queue_id);
12362         } else {
12363                 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
12364                        eq->queue_id);
12365         }
12366         switch (cq->entry_count) {
12367         default:
12368                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12369                                 "0361 Unsupported CQ count. (%d)\n",
12370                                 cq->entry_count);
12371                 if (cq->entry_count < 256) {
12372                         status = -EINVAL;
12373                         goto out;
12374                 }
12375                 /* otherwise default to smallest count (drop through) */
12376         case 256:
12377                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
12378                        LPFC_CQ_CNT_256);
12379                 break;
12380         case 512:
12381                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
12382                        LPFC_CQ_CNT_512);
12383                 break;
12384         case 1024:
12385                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
12386                        LPFC_CQ_CNT_1024);
12387                 break;
12388         }
12389         list_for_each_entry(dmabuf, &cq->page_list, list) {
12390                 memset(dmabuf->virt, 0, hw_page_size);
12391                 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12392                                         putPaddrLow(dmabuf->phys);
12393                 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12394                                         putPaddrHigh(dmabuf->phys);
12395         }
12396         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12397
12398         /* The IOCTL status is embedded in the mailbox subheader. */
12399         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12400         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12401         if (shdr_status || shdr_add_status || rc) {
12402                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12403                                 "2501 CQ_CREATE mailbox failed with "
12404                                 "status x%x add_status x%x, mbx status x%x\n",
12405                                 shdr_status, shdr_add_status, rc);
12406                 status = -ENXIO;
12407                 goto out;
12408         }
12409         cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
12410         if (cq->queue_id == 0xFFFF) {
12411                 status = -ENXIO;
12412                 goto out;
12413         }
12414         /* link the cq onto the parent eq child list */
12415         list_add_tail(&cq->list, &eq->child_list);
12416         /* Set up completion queue's type and subtype */
12417         cq->type = type;
12418         cq->subtype = subtype;
12419         cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
12420         cq->assoc_qid = eq->queue_id;
12421         cq->host_index = 0;
12422         cq->hba_index = 0;
12423
12424 out:
12425         mempool_free(mbox, phba->mbox_mem_pool);
12426         return status;
12427 }
12428
12429 /**
12430  * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
12431  * @phba: HBA structure that indicates port to create a queue on.
12432  * @mq: The queue structure to use to create the mailbox queue.
12433  * @mbox: An allocated pointer to type LPFC_MBOXQ_t
12434  * @cq: The completion queue to associate with this cq.
12435  *
12436  * This function provides failback (fb) functionality when the
12437  * mq_create_ext fails on older FW generations.  It's purpose is identical
12438  * to mq_create_ext otherwise.
12439  *
12440  * This routine cannot fail as all attributes were previously accessed and
12441  * initialized in mq_create_ext.
12442  **/
12443 static void
12444 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
12445                        LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
12446 {
12447         struct lpfc_mbx_mq_create *mq_create;
12448         struct lpfc_dmabuf *dmabuf;
12449         int length;
12450
12451         length = (sizeof(struct lpfc_mbx_mq_create) -
12452                   sizeof(struct lpfc_sli4_cfg_mhdr));
12453         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12454                          LPFC_MBOX_OPCODE_MQ_CREATE,
12455                          length, LPFC_SLI4_MBX_EMBED);
12456         mq_create = &mbox->u.mqe.un.mq_create;
12457         bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
12458                mq->page_count);
12459         bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
12460                cq->queue_id);
12461         bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
12462         switch (mq->entry_count) {
12463         case 16:
12464                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12465                        LPFC_MQ_RING_SIZE_16);
12466                 break;
12467         case 32:
12468                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12469                        LPFC_MQ_RING_SIZE_32);
12470                 break;
12471         case 64:
12472                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12473                        LPFC_MQ_RING_SIZE_64);
12474                 break;
12475         case 128:
12476                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12477                        LPFC_MQ_RING_SIZE_128);
12478                 break;
12479         }
12480         list_for_each_entry(dmabuf, &mq->page_list, list) {
12481                 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12482                         putPaddrLow(dmabuf->phys);
12483                 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12484                         putPaddrHigh(dmabuf->phys);
12485         }
12486 }
12487
12488 /**
12489  * lpfc_mq_create - Create a mailbox Queue on the HBA
12490  * @phba: HBA structure that indicates port to create a queue on.
12491  * @mq: The queue structure to use to create the mailbox queue.
12492  * @cq: The completion queue to associate with this cq.
12493  * @subtype: The queue's subtype.
12494  *
12495  * This function creates a mailbox queue, as detailed in @mq, on a port,
12496  * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
12497  *
12498  * The @phba struct is used to send mailbox command to HBA. The @cq struct
12499  * is used to get the entry count and entry size that are necessary to
12500  * determine the number of pages to allocate and use for this queue. This
12501  * function will send the MQ_CREATE mailbox command to the HBA to setup the
12502  * mailbox queue. This function is asynchronous and will wait for the mailbox
12503  * command to finish before continuing.
12504  *
12505  * On success this function will return a zero. If unable to allocate enough
12506  * memory this function will return -ENOMEM. If the queue create mailbox command
12507  * fails this function will return -ENXIO.
12508  **/
12509 int32_t
12510 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
12511                struct lpfc_queue *cq, uint32_t subtype)
12512 {
12513         struct lpfc_mbx_mq_create *mq_create;
12514         struct lpfc_mbx_mq_create_ext *mq_create_ext;
12515         struct lpfc_dmabuf *dmabuf;
12516         LPFC_MBOXQ_t *mbox;
12517         int rc, length, status = 0;
12518         uint32_t shdr_status, shdr_add_status;
12519         union lpfc_sli4_cfg_shdr *shdr;
12520         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12521
12522         /* sanity check on queue memory */
12523         if (!mq || !cq)
12524                 return -ENODEV;
12525         if (!phba->sli4_hba.pc_sli4_params.supported)
12526                 hw_page_size = SLI4_PAGE_SIZE;
12527
12528         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12529         if (!mbox)
12530                 return -ENOMEM;
12531         length = (sizeof(struct lpfc_mbx_mq_create_ext) -
12532                   sizeof(struct lpfc_sli4_cfg_mhdr));
12533         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12534                          LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
12535                          length, LPFC_SLI4_MBX_EMBED);
12536
12537         mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
12538         shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
12539         bf_set(lpfc_mbx_mq_create_ext_num_pages,
12540                &mq_create_ext->u.request, mq->page_count);
12541         bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
12542                &mq_create_ext->u.request, 1);
12543         bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
12544                &mq_create_ext->u.request, 1);
12545         bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
12546                &mq_create_ext->u.request, 1);
12547         bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
12548                &mq_create_ext->u.request, 1);
12549         bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
12550                &mq_create_ext->u.request, 1);
12551         bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
12552         bf_set(lpfc_mbox_hdr_version, &shdr->request,
12553                phba->sli4_hba.pc_sli4_params.mqv);
12554         if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
12555                 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
12556                        cq->queue_id);
12557         else
12558                 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
12559                        cq->queue_id);
12560         switch (mq->entry_count) {
12561         default:
12562                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12563                                 "0362 Unsupported MQ count. (%d)\n",
12564                                 mq->entry_count);
12565                 if (mq->entry_count < 16) {
12566                         status = -EINVAL;
12567                         goto out;
12568                 }
12569                 /* otherwise default to smallest count (drop through) */
12570         case 16:
12571                 bf_set(lpfc_mq_context_ring_size,
12572                        &mq_create_ext->u.request.context,
12573                        LPFC_MQ_RING_SIZE_16);
12574                 break;
12575         case 32:
12576                 bf_set(lpfc_mq_context_ring_size,
12577                        &mq_create_ext->u.request.context,
12578                        LPFC_MQ_RING_SIZE_32);
12579                 break;
12580         case 64:
12581                 bf_set(lpfc_mq_context_ring_size,
12582                        &mq_create_ext->u.request.context,
12583                        LPFC_MQ_RING_SIZE_64);
12584                 break;
12585         case 128:
12586                 bf_set(lpfc_mq_context_ring_size,
12587                        &mq_create_ext->u.request.context,
12588                        LPFC_MQ_RING_SIZE_128);
12589                 break;
12590         }
12591         list_for_each_entry(dmabuf, &mq->page_list, list) {
12592                 memset(dmabuf->virt, 0, hw_page_size);
12593                 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
12594                                         putPaddrLow(dmabuf->phys);
12595                 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
12596                                         putPaddrHigh(dmabuf->phys);
12597         }
12598         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12599         mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
12600                               &mq_create_ext->u.response);
12601         if (rc != MBX_SUCCESS) {
12602                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12603                                 "2795 MQ_CREATE_EXT failed with "
12604                                 "status x%x. Failback to MQ_CREATE.\n",
12605                                 rc);
12606                 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
12607                 mq_create = &mbox->u.mqe.un.mq_create;
12608                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12609                 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
12610                 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
12611                                       &mq_create->u.response);
12612         }
12613
12614         /* The IOCTL status is embedded in the mailbox subheader. */
12615         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12616         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12617         if (shdr_status || shdr_add_status || rc) {
12618                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12619                                 "2502 MQ_CREATE mailbox failed with "
12620                                 "status x%x add_status x%x, mbx status x%x\n",
12621                                 shdr_status, shdr_add_status, rc);
12622                 status = -ENXIO;
12623                 goto out;
12624         }
12625         if (mq->queue_id == 0xFFFF) {
12626                 status = -ENXIO;
12627                 goto out;
12628         }
12629         mq->type = LPFC_MQ;
12630         mq->assoc_qid = cq->queue_id;
12631         mq->subtype = subtype;
12632         mq->host_index = 0;
12633         mq->hba_index = 0;
12634
12635         /* link the mq onto the parent cq child list */
12636         list_add_tail(&mq->list, &cq->child_list);
12637 out:
12638         mempool_free(mbox, phba->mbox_mem_pool);
12639         return status;
12640 }
12641
12642 /**
12643  * lpfc_wq_create - Create a Work Queue on the HBA
12644  * @phba: HBA structure that indicates port to create a queue on.
12645  * @wq: The queue structure to use to create the work queue.
12646  * @cq: The completion queue to bind this work queue to.
12647  * @subtype: The subtype of the work queue indicating its functionality.
12648  *
12649  * This function creates a work queue, as detailed in @wq, on a port, described
12650  * by @phba by sending a WQ_CREATE mailbox command to the HBA.
12651  *
12652  * The @phba struct is used to send mailbox command to HBA. The @wq struct
12653  * is used to get the entry count and entry size that are necessary to
12654  * determine the number of pages to allocate and use for this queue. The @cq
12655  * is used to indicate which completion queue to bind this work queue to. This
12656  * function will send the WQ_CREATE mailbox command to the HBA to setup the
12657  * work queue. This function is asynchronous and will wait for the mailbox
12658  * command to finish before continuing.
12659  *
12660  * On success this function will return a zero. If unable to allocate enough
12661  * memory this function will return -ENOMEM. If the queue create mailbox command
12662  * fails this function will return -ENXIO.
12663  **/
12664 uint32_t
12665 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12666                struct lpfc_queue *cq, uint32_t subtype)
12667 {
12668         struct lpfc_mbx_wq_create *wq_create;
12669         struct lpfc_dmabuf *dmabuf;
12670         LPFC_MBOXQ_t *mbox;
12671         int rc, length, status = 0;
12672         uint32_t shdr_status, shdr_add_status;
12673         union lpfc_sli4_cfg_shdr *shdr;
12674         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12675         struct dma_address *page;
12676
12677         /* sanity check on queue memory */
12678         if (!wq || !cq)
12679                 return -ENODEV;
12680         if (!phba->sli4_hba.pc_sli4_params.supported)
12681                 hw_page_size = SLI4_PAGE_SIZE;
12682
12683         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12684         if (!mbox)
12685                 return -ENOMEM;
12686         length = (sizeof(struct lpfc_mbx_wq_create) -
12687                   sizeof(struct lpfc_sli4_cfg_mhdr));
12688         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12689                          LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
12690                          length, LPFC_SLI4_MBX_EMBED);
12691         wq_create = &mbox->u.mqe.un.wq_create;
12692         shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
12693         bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
12694                     wq->page_count);
12695         bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
12696                     cq->queue_id);
12697         bf_set(lpfc_mbox_hdr_version, &shdr->request,
12698                phba->sli4_hba.pc_sli4_params.wqv);
12699         if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
12700                 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
12701                        wq->entry_count);
12702                 switch (wq->entry_size) {
12703                 default:
12704                 case 64:
12705                         bf_set(lpfc_mbx_wq_create_wqe_size,
12706                                &wq_create->u.request_1,
12707                                LPFC_WQ_WQE_SIZE_64);
12708                         break;
12709                 case 128:
12710                         bf_set(lpfc_mbx_wq_create_wqe_size,
12711                                &wq_create->u.request_1,
12712                                LPFC_WQ_WQE_SIZE_128);
12713                         break;
12714                 }
12715                 bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
12716                        (PAGE_SIZE/SLI4_PAGE_SIZE));
12717                 page = wq_create->u.request_1.page;
12718         } else {
12719                 page = wq_create->u.request.page;
12720         }
12721         list_for_each_entry(dmabuf, &wq->page_list, list) {
12722                 memset(dmabuf->virt, 0, hw_page_size);
12723                 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
12724                 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
12725         }
12726         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12727         /* The IOCTL status is embedded in the mailbox subheader. */
12728         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12729         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12730         if (shdr_status || shdr_add_status || rc) {
12731                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12732                                 "2503 WQ_CREATE mailbox failed with "
12733                                 "status x%x add_status x%x, mbx status x%x\n",
12734                                 shdr_status, shdr_add_status, rc);
12735                 status = -ENXIO;
12736                 goto out;
12737         }
12738         wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
12739         if (wq->queue_id == 0xFFFF) {
12740                 status = -ENXIO;
12741                 goto out;
12742         }
12743         wq->type = LPFC_WQ;
12744         wq->assoc_qid = cq->queue_id;
12745         wq->subtype = subtype;
12746         wq->host_index = 0;
12747         wq->hba_index = 0;
12748         wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
12749
12750         /* link the wq onto the parent cq child list */
12751         list_add_tail(&wq->list, &cq->child_list);
12752 out:
12753         mempool_free(mbox, phba->mbox_mem_pool);
12754         return status;
12755 }
12756
12757 /**
12758  * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
12759  * @phba: HBA structure that indicates port to create a queue on.
12760  * @rq:   The queue structure to use for the receive queue.
12761  * @qno:  The associated HBQ number
12762  *
12763  *
12764  * For SLI4 we need to adjust the RQ repost value based on
12765  * the number of buffers that are initially posted to the RQ.
12766  */
12767 void
12768 lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
12769 {
12770         uint32_t cnt;
12771
12772         /* sanity check on queue memory */
12773         if (!rq)
12774                 return;
12775         cnt = lpfc_hbq_defs[qno]->entry_count;
12776
12777         /* Recalc repost for RQs based on buffers initially posted */
12778         cnt = (cnt >> 3);
12779         if (cnt < LPFC_QUEUE_MIN_REPOST)
12780                 cnt = LPFC_QUEUE_MIN_REPOST;
12781
12782         rq->entry_repost = cnt;
12783 }
12784
12785 /**
12786  * lpfc_rq_create - Create a Receive Queue on the HBA
12787  * @phba: HBA structure that indicates port to create a queue on.
12788  * @hrq: The queue structure to use to create the header receive queue.
12789  * @drq: The queue structure to use to create the data receive queue.
12790  * @cq: The completion queue to bind this work queue to.
12791  *
12792  * This function creates a receive buffer queue pair , as detailed in @hrq and
12793  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
12794  * to the HBA.
12795  *
12796  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
12797  * struct is used to get the entry count that is necessary to determine the
12798  * number of pages to use for this queue. The @cq is used to indicate which
12799  * completion queue to bind received buffers that are posted to these queues to.
12800  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
12801  * receive queue pair. This function is asynchronous and will wait for the
12802  * mailbox command to finish before continuing.
12803  *
12804  * On success this function will return a zero. If unable to allocate enough
12805  * memory this function will return -ENOMEM. If the queue create mailbox command
12806  * fails this function will return -ENXIO.
12807  **/
12808 uint32_t
12809 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12810                struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
12811 {
12812         struct lpfc_mbx_rq_create *rq_create;
12813         struct lpfc_dmabuf *dmabuf;
12814         LPFC_MBOXQ_t *mbox;
12815         int rc, length, status = 0;
12816         uint32_t shdr_status, shdr_add_status;
12817         union lpfc_sli4_cfg_shdr *shdr;
12818         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12819
12820         /* sanity check on queue memory */
12821         if (!hrq || !drq || !cq)
12822                 return -ENODEV;
12823         if (!phba->sli4_hba.pc_sli4_params.supported)
12824                 hw_page_size = SLI4_PAGE_SIZE;
12825
12826         if (hrq->entry_count != drq->entry_count)
12827                 return -EINVAL;
12828         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12829         if (!mbox)
12830                 return -ENOMEM;
12831         length = (sizeof(struct lpfc_mbx_rq_create) -
12832                   sizeof(struct lpfc_sli4_cfg_mhdr));
12833         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12834                          LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
12835                          length, LPFC_SLI4_MBX_EMBED);
12836         rq_create = &mbox->u.mqe.un.rq_create;
12837         shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
12838         bf_set(lpfc_mbox_hdr_version, &shdr->request,
12839                phba->sli4_hba.pc_sli4_params.rqv);
12840         if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
12841                 bf_set(lpfc_rq_context_rqe_count_1,
12842                        &rq_create->u.request.context,
12843                        hrq->entry_count);
12844                 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
12845                 bf_set(lpfc_rq_context_rqe_size,
12846                        &rq_create->u.request.context,
12847                        LPFC_RQE_SIZE_8);
12848                 bf_set(lpfc_rq_context_page_size,
12849                        &rq_create->u.request.context,
12850                        (PAGE_SIZE/SLI4_PAGE_SIZE));
12851         } else {
12852                 switch (hrq->entry_count) {
12853                 default:
12854                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12855                                         "2535 Unsupported RQ count. (%d)\n",
12856                                         hrq->entry_count);
12857                         if (hrq->entry_count < 512) {
12858                                 status = -EINVAL;
12859                                 goto out;
12860                         }
12861                         /* otherwise default to smallest count (drop through) */
12862                 case 512:
12863                         bf_set(lpfc_rq_context_rqe_count,
12864                                &rq_create->u.request.context,
12865                                LPFC_RQ_RING_SIZE_512);
12866                         break;
12867                 case 1024:
12868                         bf_set(lpfc_rq_context_rqe_count,
12869                                &rq_create->u.request.context,
12870                                LPFC_RQ_RING_SIZE_1024);
12871                         break;
12872                 case 2048:
12873                         bf_set(lpfc_rq_context_rqe_count,
12874                                &rq_create->u.request.context,
12875                                LPFC_RQ_RING_SIZE_2048);
12876                         break;
12877                 case 4096:
12878                         bf_set(lpfc_rq_context_rqe_count,
12879                                &rq_create->u.request.context,
12880                                LPFC_RQ_RING_SIZE_4096);
12881                         break;
12882                 }
12883                 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
12884                        LPFC_HDR_BUF_SIZE);
12885         }
12886         bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
12887                cq->queue_id);
12888         bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
12889                hrq->page_count);
12890         list_for_each_entry(dmabuf, &hrq->page_list, list) {
12891                 memset(dmabuf->virt, 0, hw_page_size);
12892                 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12893                                         putPaddrLow(dmabuf->phys);
12894                 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12895                                         putPaddrHigh(dmabuf->phys);
12896         }
12897         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12898         /* The IOCTL status is embedded in the mailbox subheader. */
12899         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12900         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12901         if (shdr_status || shdr_add_status || rc) {
12902                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12903                                 "2504 RQ_CREATE mailbox failed with "
12904                                 "status x%x add_status x%x, mbx status x%x\n",
12905                                 shdr_status, shdr_add_status, rc);
12906                 status = -ENXIO;
12907                 goto out;
12908         }
12909         hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
12910         if (hrq->queue_id == 0xFFFF) {
12911                 status = -ENXIO;
12912                 goto out;
12913         }
12914         hrq->type = LPFC_HRQ;
12915         hrq->assoc_qid = cq->queue_id;
12916         hrq->subtype = subtype;
12917         hrq->host_index = 0;
12918         hrq->hba_index = 0;
12919
12920         /* now create the data queue */
12921         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12922                          LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
12923                          length, LPFC_SLI4_MBX_EMBED);
12924         bf_set(lpfc_mbox_hdr_version, &shdr->request,
12925                phba->sli4_hba.pc_sli4_params.rqv);
12926         if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
12927                 bf_set(lpfc_rq_context_rqe_count_1,
12928                        &rq_create->u.request.context, hrq->entry_count);
12929                 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
12930                 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
12931                        LPFC_RQE_SIZE_8);
12932                 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
12933                        (PAGE_SIZE/SLI4_PAGE_SIZE));
12934         } else {
12935                 switch (drq->entry_count) {
12936                 default:
12937                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12938                                         "2536 Unsupported RQ count. (%d)\n",
12939                                         drq->entry_count);
12940                         if (drq->entry_count < 512) {
12941                                 status = -EINVAL;
12942                                 goto out;
12943                         }
12944                         /* otherwise default to smallest count (drop through) */
12945                 case 512:
12946                         bf_set(lpfc_rq_context_rqe_count,
12947                                &rq_create->u.request.context,
12948                                LPFC_RQ_RING_SIZE_512);
12949                         break;
12950                 case 1024:
12951                         bf_set(lpfc_rq_context_rqe_count,
12952                                &rq_create->u.request.context,
12953                                LPFC_RQ_RING_SIZE_1024);
12954                         break;
12955                 case 2048:
12956                         bf_set(lpfc_rq_context_rqe_count,
12957                                &rq_create->u.request.context,
12958                                LPFC_RQ_RING_SIZE_2048);
12959                         break;
12960                 case 4096:
12961                         bf_set(lpfc_rq_context_rqe_count,
12962                                &rq_create->u.request.context,
12963                                LPFC_RQ_RING_SIZE_4096);
12964                         break;
12965                 }
12966                 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
12967                        LPFC_DATA_BUF_SIZE);
12968         }
12969         bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
12970                cq->queue_id);
12971         bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
12972                drq->page_count);
12973         list_for_each_entry(dmabuf, &drq->page_list, list) {
12974                 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12975                                         putPaddrLow(dmabuf->phys);
12976                 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12977                                         putPaddrHigh(dmabuf->phys);
12978         }
12979         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12980         /* The IOCTL status is embedded in the mailbox subheader. */
12981         shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
12982         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12983         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12984         if (shdr_status || shdr_add_status || rc) {
12985                 status = -ENXIO;
12986                 goto out;
12987         }
12988         drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
12989         if (drq->queue_id == 0xFFFF) {
12990                 status = -ENXIO;
12991                 goto out;
12992         }
12993         drq->type = LPFC_DRQ;
12994         drq->assoc_qid = cq->queue_id;
12995         drq->subtype = subtype;
12996         drq->host_index = 0;
12997         drq->hba_index = 0;
12998
12999         /* link the header and data RQs onto the parent cq child list */
13000         list_add_tail(&hrq->list, &cq->child_list);
13001         list_add_tail(&drq->list, &cq->child_list);
13002
13003 out:
13004         mempool_free(mbox, phba->mbox_mem_pool);
13005         return status;
13006 }
13007
13008 /**
13009  * lpfc_eq_destroy - Destroy an event Queue on the HBA
13010  * @eq: The queue structure associated with the queue to destroy.
13011  *
13012  * This function destroys a queue, as detailed in @eq by sending an mailbox
13013  * command, specific to the type of queue, to the HBA.
13014  *
13015  * The @eq struct is used to get the queue ID of the queue to destroy.
13016  *
13017  * On success this function will return a zero. If the queue destroy mailbox
13018  * command fails this function will return -ENXIO.
13019  **/
13020 uint32_t
13021 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
13022 {
13023         LPFC_MBOXQ_t *mbox;
13024         int rc, length, status = 0;
13025         uint32_t shdr_status, shdr_add_status;
13026         union lpfc_sli4_cfg_shdr *shdr;
13027
13028         /* sanity check on queue memory */
13029         if (!eq)
13030                 return -ENODEV;
13031         mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
13032         if (!mbox)
13033                 return -ENOMEM;
13034         length = (sizeof(struct lpfc_mbx_eq_destroy) -
13035                   sizeof(struct lpfc_sli4_cfg_mhdr));
13036         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13037                          LPFC_MBOX_OPCODE_EQ_DESTROY,
13038                          length, LPFC_SLI4_MBX_EMBED);
13039         bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
13040                eq->queue_id);
13041         mbox->vport = eq->phba->pport;
13042         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13043
13044         rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
13045         /* The IOCTL status is embedded in the mailbox subheader. */
13046         shdr = (union lpfc_sli4_cfg_shdr *)
13047                 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
13048         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13049         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13050         if (shdr_status || shdr_add_status || rc) {
13051                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13052                                 "2505 EQ_DESTROY mailbox failed with "
13053                                 "status x%x add_status x%x, mbx status x%x\n",
13054                                 shdr_status, shdr_add_status, rc);
13055                 status = -ENXIO;
13056         }
13057
13058         /* Remove eq from any list */
13059         list_del_init(&eq->list);
13060         mempool_free(mbox, eq->phba->mbox_mem_pool);
13061         return status;
13062 }
13063
13064 /**
13065  * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
13066  * @cq: The queue structure associated with the queue to destroy.
13067  *
13068  * This function destroys a queue, as detailed in @cq by sending an mailbox
13069  * command, specific to the type of queue, to the HBA.
13070  *
13071  * The @cq struct is used to get the queue ID of the queue to destroy.
13072  *
13073  * On success this function will return a zero. If the queue destroy mailbox
13074  * command fails this function will return -ENXIO.
13075  **/
13076 uint32_t
13077 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
13078 {
13079         LPFC_MBOXQ_t *mbox;
13080         int rc, length, status = 0;
13081         uint32_t shdr_status, shdr_add_status;
13082         union lpfc_sli4_cfg_shdr *shdr;
13083
13084         /* sanity check on queue memory */
13085         if (!cq)
13086                 return -ENODEV;
13087         mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
13088         if (!mbox)
13089                 return -ENOMEM;
13090         length = (sizeof(struct lpfc_mbx_cq_destroy) -
13091                   sizeof(struct lpfc_sli4_cfg_mhdr));
13092         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13093                          LPFC_MBOX_OPCODE_CQ_DESTROY,
13094                          length, LPFC_SLI4_MBX_EMBED);
13095         bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
13096                cq->queue_id);
13097         mbox->vport = cq->phba->pport;
13098         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13099         rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
13100         /* The IOCTL status is embedded in the mailbox subheader. */
13101         shdr = (union lpfc_sli4_cfg_shdr *)
13102                 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
13103         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13104         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13105         if (shdr_status || shdr_add_status || rc) {
13106                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13107                                 "2506 CQ_DESTROY mailbox failed with "
13108                                 "status x%x add_status x%x, mbx status x%x\n",
13109                                 shdr_status, shdr_add_status, rc);
13110                 status = -ENXIO;
13111         }
13112         /* Remove cq from any list */
13113         list_del_init(&cq->list);
13114         mempool_free(mbox, cq->phba->mbox_mem_pool);
13115         return status;
13116 }
13117
13118 /**
13119  * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
13120  * @qm: The queue structure associated with the queue to destroy.
13121  *
13122  * This function destroys a queue, as detailed in @mq by sending an mailbox
13123  * command, specific to the type of queue, to the HBA.
13124  *
13125  * The @mq struct is used to get the queue ID of the queue to destroy.
13126  *
13127  * On success this function will return a zero. If the queue destroy mailbox
13128  * command fails this function will return -ENXIO.
13129  **/
13130 uint32_t
13131 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
13132 {
13133         LPFC_MBOXQ_t *mbox;
13134         int rc, length, status = 0;
13135         uint32_t shdr_status, shdr_add_status;
13136         union lpfc_sli4_cfg_shdr *shdr;
13137
13138         /* sanity check on queue memory */
13139         if (!mq)
13140                 return -ENODEV;
13141         mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
13142         if (!mbox)
13143                 return -ENOMEM;
13144         length = (sizeof(struct lpfc_mbx_mq_destroy) -
13145                   sizeof(struct lpfc_sli4_cfg_mhdr));
13146         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13147                          LPFC_MBOX_OPCODE_MQ_DESTROY,
13148                          length, LPFC_SLI4_MBX_EMBED);
13149         bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
13150                mq->queue_id);
13151         mbox->vport = mq->phba->pport;
13152         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13153         rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
13154         /* The IOCTL status is embedded in the mailbox subheader. */
13155         shdr = (union lpfc_sli4_cfg_shdr *)
13156                 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
13157         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13158         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13159         if (shdr_status || shdr_add_status || rc) {
13160                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13161                                 "2507 MQ_DESTROY mailbox failed with "
13162                                 "status x%x add_status x%x, mbx status x%x\n",
13163                                 shdr_status, shdr_add_status, rc);
13164                 status = -ENXIO;
13165         }
13166         /* Remove mq from any list */
13167         list_del_init(&mq->list);
13168         mempool_free(mbox, mq->phba->mbox_mem_pool);
13169         return status;
13170 }
13171
13172 /**
13173  * lpfc_wq_destroy - Destroy a Work Queue on the HBA
13174  * @wq: The queue structure associated with the queue to destroy.
13175  *
13176  * This function destroys a queue, as detailed in @wq by sending an mailbox
13177  * command, specific to the type of queue, to the HBA.
13178  *
13179  * The @wq struct is used to get the queue ID of the queue to destroy.
13180  *
13181  * On success this function will return a zero. If the queue destroy mailbox
13182  * command fails this function will return -ENXIO.
13183  **/
13184 uint32_t
13185 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
13186 {
13187         LPFC_MBOXQ_t *mbox;
13188         int rc, length, status = 0;
13189         uint32_t shdr_status, shdr_add_status;
13190         union lpfc_sli4_cfg_shdr *shdr;
13191
13192         /* sanity check on queue memory */
13193         if (!wq)
13194                 return -ENODEV;
13195         mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
13196         if (!mbox)
13197                 return -ENOMEM;
13198         length = (sizeof(struct lpfc_mbx_wq_destroy) -
13199                   sizeof(struct lpfc_sli4_cfg_mhdr));
13200         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13201                          LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
13202                          length, LPFC_SLI4_MBX_EMBED);
13203         bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
13204                wq->queue_id);
13205         mbox->vport = wq->phba->pport;
13206         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13207         rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
13208         shdr = (union lpfc_sli4_cfg_shdr *)
13209                 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
13210         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13211         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13212         if (shdr_status || shdr_add_status || rc) {
13213                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13214                                 "2508 WQ_DESTROY mailbox failed with "
13215                                 "status x%x add_status x%x, mbx status x%x\n",
13216                                 shdr_status, shdr_add_status, rc);
13217                 status = -ENXIO;
13218         }
13219         /* Remove wq from any list */
13220         list_del_init(&wq->list);
13221         mempool_free(mbox, wq->phba->mbox_mem_pool);
13222         return status;
13223 }
13224
13225 /**
13226  * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
13227  * @rq: The queue structure associated with the queue to destroy.
13228  *
13229  * This function destroys a queue, as detailed in @rq by sending an mailbox
13230  * command, specific to the type of queue, to the HBA.
13231  *
13232  * The @rq struct is used to get the queue ID of the queue to destroy.
13233  *
13234  * On success this function will return a zero. If the queue destroy mailbox
13235  * command fails this function will return -ENXIO.
13236  **/
13237 uint32_t
13238 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
13239                 struct lpfc_queue *drq)
13240 {
13241         LPFC_MBOXQ_t *mbox;
13242         int rc, length, status = 0;
13243         uint32_t shdr_status, shdr_add_status;
13244         union lpfc_sli4_cfg_shdr *shdr;
13245
13246         /* sanity check on queue memory */
13247         if (!hrq || !drq)
13248                 return -ENODEV;
13249         mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
13250         if (!mbox)
13251                 return -ENOMEM;
13252         length = (sizeof(struct lpfc_mbx_rq_destroy) -
13253                   sizeof(struct lpfc_sli4_cfg_mhdr));
13254         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13255                          LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
13256                          length, LPFC_SLI4_MBX_EMBED);
13257         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
13258                hrq->queue_id);
13259         mbox->vport = hrq->phba->pport;
13260         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13261         rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
13262         /* The IOCTL status is embedded in the mailbox subheader. */
13263         shdr = (union lpfc_sli4_cfg_shdr *)
13264                 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
13265         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13266         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13267         if (shdr_status || shdr_add_status || rc) {
13268                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13269                                 "2509 RQ_DESTROY mailbox failed with "
13270                                 "status x%x add_status x%x, mbx status x%x\n",
13271                                 shdr_status, shdr_add_status, rc);
13272                 if (rc != MBX_TIMEOUT)
13273                         mempool_free(mbox, hrq->phba->mbox_mem_pool);
13274                 return -ENXIO;
13275         }
13276         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
13277                drq->queue_id);
13278         rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
13279         shdr = (union lpfc_sli4_cfg_shdr *)
13280                 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
13281         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13282         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13283         if (shdr_status || shdr_add_status || rc) {
13284                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13285                                 "2510 RQ_DESTROY mailbox failed with "
13286                                 "status x%x add_status x%x, mbx status x%x\n",
13287                                 shdr_status, shdr_add_status, rc);
13288                 status = -ENXIO;
13289         }
13290         list_del_init(&hrq->list);
13291         list_del_init(&drq->list);
13292         mempool_free(mbox, hrq->phba->mbox_mem_pool);
13293         return status;
13294 }
13295
13296 /**
13297  * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
13298  * @phba: The virtual port for which this call being executed.
13299  * @pdma_phys_addr0: Physical address of the 1st SGL page.
13300  * @pdma_phys_addr1: Physical address of the 2nd SGL page.
13301  * @xritag: the xritag that ties this io to the SGL pages.
13302  *
13303  * This routine will post the sgl pages for the IO that has the xritag
13304  * that is in the iocbq structure. The xritag is assigned during iocbq
13305  * creation and persists for as long as the driver is loaded.
13306  * if the caller has fewer than 256 scatter gather segments to map then
13307  * pdma_phys_addr1 should be 0.
13308  * If the caller needs to map more than 256 scatter gather segment then
13309  * pdma_phys_addr1 should be a valid physical address.
13310  * physical address for SGLs must be 64 byte aligned.
13311  * If you are going to map 2 SGL's then the first one must have 256 entries
13312  * the second sgl can have between 1 and 256 entries.
13313  *
13314  * Return codes:
13315  *      0 - Success
13316  *      -ENXIO, -ENOMEM - Failure
13317  **/
13318 int
13319 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
13320                 dma_addr_t pdma_phys_addr0,
13321                 dma_addr_t pdma_phys_addr1,
13322                 uint16_t xritag)
13323 {
13324         struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
13325         LPFC_MBOXQ_t *mbox;
13326         int rc;
13327         uint32_t shdr_status, shdr_add_status;
13328         uint32_t mbox_tmo;
13329         union lpfc_sli4_cfg_shdr *shdr;
13330
13331         if (xritag == NO_XRI) {
13332                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13333                                 "0364 Invalid param:\n");
13334                 return -EINVAL;
13335         }
13336
13337         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13338         if (!mbox)
13339                 return -ENOMEM;
13340
13341         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13342                         LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
13343                         sizeof(struct lpfc_mbx_post_sgl_pages) -
13344                         sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
13345
13346         post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
13347                                 &mbox->u.mqe.un.post_sgl_pages;
13348         bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
13349         bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
13350
13351         post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
13352                                 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
13353         post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
13354                                 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
13355
13356         post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
13357                                 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
13358         post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
13359                                 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
13360         if (!phba->sli4_hba.intr_enable)
13361                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13362         else {
13363                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
13364                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13365         }
13366         /* The IOCTL status is embedded in the mailbox subheader. */
13367         shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
13368         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13369         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13370         if (rc != MBX_TIMEOUT)
13371                 mempool_free(mbox, phba->mbox_mem_pool);
13372         if (shdr_status || shdr_add_status || rc) {
13373                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13374                                 "2511 POST_SGL mailbox failed with "
13375                                 "status x%x add_status x%x, mbx status x%x\n",
13376                                 shdr_status, shdr_add_status, rc);
13377                 rc = -ENXIO;
13378         }
13379         return 0;
13380 }
13381
13382 /**
13383  * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
13384  * @phba: pointer to lpfc hba data structure.
13385  *
13386  * This routine is invoked to post rpi header templates to the
13387  * HBA consistent with the SLI-4 interface spec.  This routine
13388  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
13389  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
13390  *
13391  * Returns
13392  *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
13393  *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
13394  **/
13395 uint16_t
13396 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
13397 {
13398         unsigned long xri;
13399
13400         /*
13401          * Fetch the next logical xri.  Because this index is logical,
13402          * the driver starts at 0 each time.
13403          */
13404         spin_lock_irq(&phba->hbalock);
13405         xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
13406                                  phba->sli4_hba.max_cfg_param.max_xri, 0);
13407         if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
13408                 spin_unlock_irq(&phba->hbalock);
13409                 return NO_XRI;
13410         } else {
13411                 set_bit(xri, phba->sli4_hba.xri_bmask);
13412                 phba->sli4_hba.max_cfg_param.xri_used++;
13413         }
13414         spin_unlock_irq(&phba->hbalock);
13415         return xri;
13416 }
13417
13418 /**
13419  * lpfc_sli4_free_xri - Release an xri for reuse.
13420  * @phba: pointer to lpfc hba data structure.
13421  *
13422  * This routine is invoked to release an xri to the pool of
13423  * available rpis maintained by the driver.
13424  **/
13425 void
13426 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
13427 {
13428         if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
13429                 phba->sli4_hba.max_cfg_param.xri_used--;
13430         }
13431 }
13432
13433 /**
13434  * lpfc_sli4_free_xri - Release an xri for reuse.
13435  * @phba: pointer to lpfc hba data structure.
13436  *
13437  * This routine is invoked to release an xri to the pool of
13438  * available rpis maintained by the driver.
13439  **/
13440 void
13441 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
13442 {
13443         spin_lock_irq(&phba->hbalock);
13444         __lpfc_sli4_free_xri(phba, xri);
13445         spin_unlock_irq(&phba->hbalock);
13446 }
13447
13448 /**
13449  * lpfc_sli4_next_xritag - Get an xritag for the io
13450  * @phba: Pointer to HBA context object.
13451  *
13452  * This function gets an xritag for the iocb. If there is no unused xritag
13453  * it will return 0xffff.
13454  * The function returns the allocated xritag if successful, else returns zero.
13455  * Zero is not a valid xritag.
13456  * The caller is not required to hold any lock.
13457  **/
13458 uint16_t
13459 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
13460 {
13461         uint16_t xri_index;
13462
13463         xri_index = lpfc_sli4_alloc_xri(phba);
13464         if (xri_index == NO_XRI)
13465                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13466                                 "2004 Failed to allocate XRI.last XRITAG is %d"
13467                                 " Max XRI is %d, Used XRI is %d\n",
13468                                 xri_index,
13469                                 phba->sli4_hba.max_cfg_param.max_xri,
13470                                 phba->sli4_hba.max_cfg_param.xri_used);
13471         return xri_index;
13472 }
13473
13474 /**
13475  * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
13476  * @phba: pointer to lpfc hba data structure.
13477  * @post_sgl_list: pointer to els sgl entry list.
13478  * @count: number of els sgl entries on the list.
13479  *
13480  * This routine is invoked to post a block of driver's sgl pages to the
13481  * HBA using non-embedded mailbox command. No Lock is held. This routine
13482  * is only called when the driver is loading and after all IO has been
13483  * stopped.
13484  **/
13485 static int
13486 lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba,
13487                             struct list_head *post_sgl_list,
13488                             int post_cnt)
13489 {
13490         struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
13491         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13492         struct sgl_page_pairs *sgl_pg_pairs;
13493         void *viraddr;
13494         LPFC_MBOXQ_t *mbox;
13495         uint32_t reqlen, alloclen, pg_pairs;
13496         uint32_t mbox_tmo;
13497         uint16_t xritag_start = 0;
13498         int rc = 0;
13499         uint32_t shdr_status, shdr_add_status;
13500         union lpfc_sli4_cfg_shdr *shdr;
13501
13502         reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) +
13503                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13504         if (reqlen > SLI4_PAGE_SIZE) {
13505                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13506                                 "2559 Block sgl registration required DMA "
13507                                 "size (%d) great than a page\n", reqlen);
13508                 return -ENOMEM;
13509         }
13510         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13511         if (!mbox)
13512                 return -ENOMEM;
13513
13514         /* Allocate DMA memory and set up the non-embedded mailbox command */
13515         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13516                          LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
13517                          LPFC_SLI4_MBX_NEMBED);
13518
13519         if (alloclen < reqlen) {
13520                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13521                                 "0285 Allocated DMA memory size (%d) is "
13522                                 "less than the requested DMA memory "
13523                                 "size (%d)\n", alloclen, reqlen);
13524                 lpfc_sli4_mbox_cmd_free(phba, mbox);
13525                 return -ENOMEM;
13526         }
13527         /* Set up the SGL pages in the non-embedded DMA pages */
13528         viraddr = mbox->sge_array->addr[0];
13529         sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13530         sgl_pg_pairs = &sgl->sgl_pg_pairs;
13531
13532         pg_pairs = 0;
13533         list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
13534                 /* Set up the sge entry */
13535                 sgl_pg_pairs->sgl_pg0_addr_lo =
13536                                 cpu_to_le32(putPaddrLow(sglq_entry->phys));
13537                 sgl_pg_pairs->sgl_pg0_addr_hi =
13538                                 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
13539                 sgl_pg_pairs->sgl_pg1_addr_lo =
13540                                 cpu_to_le32(putPaddrLow(0));
13541                 sgl_pg_pairs->sgl_pg1_addr_hi =
13542                                 cpu_to_le32(putPaddrHigh(0));
13543
13544                 /* Keep the first xritag on the list */
13545                 if (pg_pairs == 0)
13546                         xritag_start = sglq_entry->sli4_xritag;
13547                 sgl_pg_pairs++;
13548                 pg_pairs++;
13549         }
13550
13551         /* Complete initialization and perform endian conversion. */
13552         bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
13553         bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt);
13554         sgl->word0 = cpu_to_le32(sgl->word0);
13555         if (!phba->sli4_hba.intr_enable)
13556                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13557         else {
13558                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
13559                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13560         }
13561         shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13562         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13563         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13564         if (rc != MBX_TIMEOUT)
13565                 lpfc_sli4_mbox_cmd_free(phba, mbox);
13566         if (shdr_status || shdr_add_status || rc) {
13567                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13568                                 "2513 POST_SGL_BLOCK mailbox command failed "
13569                                 "status x%x add_status x%x mbx status x%x\n",
13570                                 shdr_status, shdr_add_status, rc);
13571                 rc = -ENXIO;
13572         }
13573         return rc;
13574 }
13575
13576 /**
13577  * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
13578  * @phba: pointer to lpfc hba data structure.
13579  * @sblist: pointer to scsi buffer list.
13580  * @count: number of scsi buffers on the list.
13581  *
13582  * This routine is invoked to post a block of @count scsi sgl pages from a
13583  * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
13584  * No Lock is held.
13585  *
13586  **/
13587 int
13588 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
13589                               struct list_head *sblist,
13590                               int count)
13591 {
13592         struct lpfc_scsi_buf *psb;
13593         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13594         struct sgl_page_pairs *sgl_pg_pairs;
13595         void *viraddr;
13596         LPFC_MBOXQ_t *mbox;
13597         uint32_t reqlen, alloclen, pg_pairs;
13598         uint32_t mbox_tmo;
13599         uint16_t xritag_start = 0;
13600         int rc = 0;
13601         uint32_t shdr_status, shdr_add_status;
13602         dma_addr_t pdma_phys_bpl1;
13603         union lpfc_sli4_cfg_shdr *shdr;
13604
13605         /* Calculate the requested length of the dma memory */
13606         reqlen = count * sizeof(struct sgl_page_pairs) +
13607                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13608         if (reqlen > SLI4_PAGE_SIZE) {
13609                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13610                                 "0217 Block sgl registration required DMA "
13611                                 "size (%d) great than a page\n", reqlen);
13612                 return -ENOMEM;
13613         }
13614         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13615         if (!mbox) {
13616                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13617                                 "0283 Failed to allocate mbox cmd memory\n");
13618                 return -ENOMEM;
13619         }
13620
13621         /* Allocate DMA memory and set up the non-embedded mailbox command */
13622         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13623                                 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
13624                                 LPFC_SLI4_MBX_NEMBED);
13625
13626         if (alloclen < reqlen) {
13627                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13628                                 "2561 Allocated DMA memory size (%d) is "
13629                                 "less than the requested DMA memory "
13630                                 "size (%d)\n", alloclen, reqlen);
13631                 lpfc_sli4_mbox_cmd_free(phba, mbox);
13632                 return -ENOMEM;
13633         }
13634
13635         /* Get the first SGE entry from the non-embedded DMA memory */
13636         viraddr = mbox->sge_array->addr[0];
13637
13638         /* Set up the SGL pages in the non-embedded DMA pages */
13639         sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13640         sgl_pg_pairs = &sgl->sgl_pg_pairs;
13641
13642         pg_pairs = 0;
13643         list_for_each_entry(psb, sblist, list) {
13644                 /* Set up the sge entry */
13645                 sgl_pg_pairs->sgl_pg0_addr_lo =
13646                         cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
13647                 sgl_pg_pairs->sgl_pg0_addr_hi =
13648                         cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
13649                 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
13650                         pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
13651                 else
13652                         pdma_phys_bpl1 = 0;
13653                 sgl_pg_pairs->sgl_pg1_addr_lo =
13654                         cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
13655                 sgl_pg_pairs->sgl_pg1_addr_hi =
13656                         cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
13657                 /* Keep the first xritag on the list */
13658                 if (pg_pairs == 0)
13659                         xritag_start = psb->cur_iocbq.sli4_xritag;
13660                 sgl_pg_pairs++;
13661                 pg_pairs++;
13662         }
13663         bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
13664         bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
13665         /* Perform endian conversion if necessary */
13666         sgl->word0 = cpu_to_le32(sgl->word0);
13667
13668         if (!phba->sli4_hba.intr_enable)
13669                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13670         else {
13671                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
13672                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13673         }
13674         shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13675         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13676         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13677         if (rc != MBX_TIMEOUT)
13678                 lpfc_sli4_mbox_cmd_free(phba, mbox);
13679         if (shdr_status || shdr_add_status || rc) {
13680                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13681                                 "2564 POST_SGL_BLOCK mailbox command failed "
13682                                 "status x%x add_status x%x mbx status x%x\n",
13683                                 shdr_status, shdr_add_status, rc);
13684                 rc = -ENXIO;
13685         }
13686         return rc;
13687 }
13688
13689 /**
13690  * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
13691  * @phba: pointer to lpfc_hba struct that the frame was received on
13692  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
13693  *
13694  * This function checks the fields in the @fc_hdr to see if the FC frame is a
13695  * valid type of frame that the LPFC driver will handle. This function will
13696  * return a zero if the frame is a valid frame or a non zero value when the
13697  * frame does not pass the check.
13698  **/
13699 static int
13700 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
13701 {
13702         /*  make rctl_names static to save stack space */
13703         static char *rctl_names[] = FC_RCTL_NAMES_INIT;
13704         char *type_names[] = FC_TYPE_NAMES_INIT;
13705         struct fc_vft_header *fc_vft_hdr;
13706         uint32_t *header = (uint32_t *) fc_hdr;
13707
13708         switch (fc_hdr->fh_r_ctl) {
13709         case FC_RCTL_DD_UNCAT:          /* uncategorized information */
13710         case FC_RCTL_DD_SOL_DATA:       /* solicited data */
13711         case FC_RCTL_DD_UNSOL_CTL:      /* unsolicited control */
13712         case FC_RCTL_DD_SOL_CTL:        /* solicited control or reply */
13713         case FC_RCTL_DD_UNSOL_DATA:     /* unsolicited data */
13714         case FC_RCTL_DD_DATA_DESC:      /* data descriptor */
13715         case FC_RCTL_DD_UNSOL_CMD:      /* unsolicited command */
13716         case FC_RCTL_DD_CMD_STATUS:     /* command status */
13717         case FC_RCTL_ELS_REQ:   /* extended link services request */
13718         case FC_RCTL_ELS_REP:   /* extended link services reply */
13719         case FC_RCTL_ELS4_REQ:  /* FC-4 ELS request */
13720         case FC_RCTL_ELS4_REP:  /* FC-4 ELS reply */
13721         case FC_RCTL_BA_NOP:    /* basic link service NOP */
13722         case FC_RCTL_BA_ABTS:   /* basic link service abort */
13723         case FC_RCTL_BA_RMC:    /* remove connection */
13724         case FC_RCTL_BA_ACC:    /* basic accept */
13725         case FC_RCTL_BA_RJT:    /* basic reject */
13726         case FC_RCTL_BA_PRMT:
13727         case FC_RCTL_ACK_1:     /* acknowledge_1 */
13728         case FC_RCTL_ACK_0:     /* acknowledge_0 */
13729         case FC_RCTL_P_RJT:     /* port reject */
13730         case FC_RCTL_F_RJT:     /* fabric reject */
13731         case FC_RCTL_P_BSY:     /* port busy */
13732         case FC_RCTL_F_BSY:     /* fabric busy to data frame */
13733         case FC_RCTL_F_BSYL:    /* fabric busy to link control frame */
13734         case FC_RCTL_LCR:       /* link credit reset */
13735         case FC_RCTL_END:       /* end */
13736                 break;
13737         case FC_RCTL_VFTH:      /* Virtual Fabric tagging Header */
13738                 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
13739                 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
13740                 return lpfc_fc_frame_check(phba, fc_hdr);
13741         default:
13742                 goto drop;
13743         }
13744         switch (fc_hdr->fh_type) {
13745         case FC_TYPE_BLS:
13746         case FC_TYPE_ELS:
13747         case FC_TYPE_FCP:
13748         case FC_TYPE_CT:
13749                 break;
13750         case FC_TYPE_IP:
13751         case FC_TYPE_ILS:
13752         default:
13753                 goto drop;
13754         }
13755
13756         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
13757                         "2538 Received frame rctl:%s type:%s "
13758                         "Frame Data:%08x %08x %08x %08x %08x %08x\n",
13759                         rctl_names[fc_hdr->fh_r_ctl],
13760                         type_names[fc_hdr->fh_type],
13761                         be32_to_cpu(header[0]), be32_to_cpu(header[1]),
13762                         be32_to_cpu(header[2]), be32_to_cpu(header[3]),
13763                         be32_to_cpu(header[4]), be32_to_cpu(header[5]));
13764         return 0;
13765 drop:
13766         lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
13767                         "2539 Dropped frame rctl:%s type:%s\n",
13768                         rctl_names[fc_hdr->fh_r_ctl],
13769                         type_names[fc_hdr->fh_type]);
13770         return 1;
13771 }
13772
13773 /**
13774  * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
13775  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
13776  *
13777  * This function processes the FC header to retrieve the VFI from the VF
13778  * header, if one exists. This function will return the VFI if one exists
13779  * or 0 if no VSAN Header exists.
13780  **/
13781 static uint32_t
13782 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
13783 {
13784         struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
13785
13786         if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
13787                 return 0;
13788         return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
13789 }
13790
13791 /**
13792  * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
13793  * @phba: Pointer to the HBA structure to search for the vport on
13794  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
13795  * @fcfi: The FC Fabric ID that the frame came from
13796  *
13797  * This function searches the @phba for a vport that matches the content of the
13798  * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
13799  * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
13800  * returns the matching vport pointer or NULL if unable to match frame to a
13801  * vport.
13802  **/
13803 static struct lpfc_vport *
13804 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
13805                        uint16_t fcfi)
13806 {
13807         struct lpfc_vport **vports;
13808         struct lpfc_vport *vport = NULL;
13809         int i;
13810         uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
13811                         fc_hdr->fh_d_id[1] << 8 |
13812                         fc_hdr->fh_d_id[2]);
13813
13814         if (did == Fabric_DID)
13815                 return phba->pport;
13816         if ((phba->pport->fc_flag & FC_PT2PT) &&
13817                 !(phba->link_state == LPFC_HBA_READY))
13818                 return phba->pport;
13819
13820         vports = lpfc_create_vport_work_array(phba);
13821         if (vports != NULL)
13822                 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
13823                         if (phba->fcf.fcfi == fcfi &&
13824                             vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
13825                             vports[i]->fc_myDID == did) {
13826                                 vport = vports[i];
13827                                 break;
13828                         }
13829                 }
13830         lpfc_destroy_vport_work_array(phba, vports);
13831         return vport;
13832 }
13833
13834 /**
13835  * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
13836  * @vport: The vport to work on.
13837  *
13838  * This function updates the receive sequence time stamp for this vport. The
13839  * receive sequence time stamp indicates the time that the last frame of the
13840  * the sequence that has been idle for the longest amount of time was received.
13841  * the driver uses this time stamp to indicate if any received sequences have
13842  * timed out.
13843  **/
13844 void
13845 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
13846 {
13847         struct lpfc_dmabuf *h_buf;
13848         struct hbq_dmabuf *dmabuf = NULL;
13849
13850         /* get the oldest sequence on the rcv list */
13851         h_buf = list_get_first(&vport->rcv_buffer_list,
13852                                struct lpfc_dmabuf, list);
13853         if (!h_buf)
13854                 return;
13855         dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
13856         vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
13857 }
13858
13859 /**
13860  * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
13861  * @vport: The vport that the received sequences were sent to.
13862  *
13863  * This function cleans up all outstanding received sequences. This is called
13864  * by the driver when a link event or user action invalidates all the received
13865  * sequences.
13866  **/
13867 void
13868 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
13869 {
13870         struct lpfc_dmabuf *h_buf, *hnext;
13871         struct lpfc_dmabuf *d_buf, *dnext;
13872         struct hbq_dmabuf *dmabuf = NULL;
13873
13874         /* start with the oldest sequence on the rcv list */
13875         list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
13876                 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
13877                 list_del_init(&dmabuf->hbuf.list);
13878                 list_for_each_entry_safe(d_buf, dnext,
13879                                          &dmabuf->dbuf.list, list) {
13880                         list_del_init(&d_buf->list);
13881                         lpfc_in_buf_free(vport->phba, d_buf);
13882                 }
13883                 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
13884         }
13885 }
13886
13887 /**
13888  * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
13889  * @vport: The vport that the received sequences were sent to.
13890  *
13891  * This function determines whether any received sequences have timed out by
13892  * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
13893  * indicates that there is at least one timed out sequence this routine will
13894  * go through the received sequences one at a time from most inactive to most
13895  * active to determine which ones need to be cleaned up. Once it has determined
13896  * that a sequence needs to be cleaned up it will simply free up the resources
13897  * without sending an abort.
13898  **/
13899 void
13900 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
13901 {
13902         struct lpfc_dmabuf *h_buf, *hnext;
13903         struct lpfc_dmabuf *d_buf, *dnext;
13904         struct hbq_dmabuf *dmabuf = NULL;
13905         unsigned long timeout;
13906         int abort_count = 0;
13907
13908         timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
13909                    vport->rcv_buffer_time_stamp);
13910         if (list_empty(&vport->rcv_buffer_list) ||
13911             time_before(jiffies, timeout))
13912                 return;
13913         /* start with the oldest sequence on the rcv list */
13914         list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
13915                 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
13916                 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
13917                            dmabuf->time_stamp);
13918                 if (time_before(jiffies, timeout))
13919                         break;
13920                 abort_count++;
13921                 list_del_init(&dmabuf->hbuf.list);
13922                 list_for_each_entry_safe(d_buf, dnext,
13923                                          &dmabuf->dbuf.list, list) {
13924                         list_del_init(&d_buf->list);
13925                         lpfc_in_buf_free(vport->phba, d_buf);
13926                 }
13927                 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
13928         }
13929         if (abort_count)
13930                 lpfc_update_rcv_time_stamp(vport);
13931 }
13932
13933 /**
13934  * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
13935  * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
13936  *
13937  * This function searches through the existing incomplete sequences that have
13938  * been sent to this @vport. If the frame matches one of the incomplete
13939  * sequences then the dbuf in the @dmabuf is added to the list of frames that
13940  * make up that sequence. If no sequence is found that matches this frame then
13941  * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
13942  * This function returns a pointer to the first dmabuf in the sequence list that
13943  * the frame was linked to.
13944  **/
13945 static struct hbq_dmabuf *
13946 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
13947 {
13948         struct fc_frame_header *new_hdr;
13949         struct fc_frame_header *temp_hdr;
13950         struct lpfc_dmabuf *d_buf;
13951         struct lpfc_dmabuf *h_buf;
13952         struct hbq_dmabuf *seq_dmabuf = NULL;
13953         struct hbq_dmabuf *temp_dmabuf = NULL;
13954
13955         INIT_LIST_HEAD(&dmabuf->dbuf.list);
13956         dmabuf->time_stamp = jiffies;
13957         new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
13958         /* Use the hdr_buf to find the sequence that this frame belongs to */
13959         list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
13960                 temp_hdr = (struct fc_frame_header *)h_buf->virt;
13961                 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
13962                     (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
13963                     (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
13964                         continue;
13965                 /* found a pending sequence that matches this frame */
13966                 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
13967                 break;
13968         }
13969         if (!seq_dmabuf) {
13970                 /*
13971                  * This indicates first frame received for this sequence.
13972                  * Queue the buffer on the vport's rcv_buffer_list.
13973                  */
13974                 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
13975                 lpfc_update_rcv_time_stamp(vport);
13976                 return dmabuf;
13977         }
13978         temp_hdr = seq_dmabuf->hbuf.virt;
13979         if (be16_to_cpu(new_hdr->fh_seq_cnt) <
13980                 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
13981                 list_del_init(&seq_dmabuf->hbuf.list);
13982                 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
13983                 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
13984                 lpfc_update_rcv_time_stamp(vport);
13985                 return dmabuf;
13986         }
13987         /* move this sequence to the tail to indicate a young sequence */
13988         list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
13989         seq_dmabuf->time_stamp = jiffies;
13990         lpfc_update_rcv_time_stamp(vport);
13991         if (list_empty(&seq_dmabuf->dbuf.list)) {
13992                 temp_hdr = dmabuf->hbuf.virt;
13993                 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
13994                 return seq_dmabuf;
13995         }
13996         /* find the correct place in the sequence to insert this frame */
13997         list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
13998                 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
13999                 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
14000                 /*
14001                  * If the frame's sequence count is greater than the frame on
14002                  * the list then insert the frame right after this frame
14003                  */
14004                 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
14005                         be16_to_cpu(temp_hdr->fh_seq_cnt)) {
14006                         list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
14007                         return seq_dmabuf;
14008                 }
14009         }
14010         return NULL;
14011 }
14012
14013 /**
14014  * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
14015  * @vport: pointer to a vitural port
14016  * @dmabuf: pointer to a dmabuf that describes the FC sequence
14017  *
14018  * This function tries to abort from the partially assembed sequence, described
14019  * by the information from basic abbort @dmabuf. It checks to see whether such
14020  * partially assembled sequence held by the driver. If so, it shall free up all
14021  * the frames from the partially assembled sequence.
14022  *
14023  * Return
14024  * true  -- if there is matching partially assembled sequence present and all
14025  *          the frames freed with the sequence;
14026  * false -- if there is no matching partially assembled sequence present so
14027  *          nothing got aborted in the lower layer driver
14028  **/
14029 static bool
14030 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
14031                             struct hbq_dmabuf *dmabuf)
14032 {
14033         struct fc_frame_header *new_hdr;
14034         struct fc_frame_header *temp_hdr;
14035         struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
14036         struct hbq_dmabuf *seq_dmabuf = NULL;
14037
14038         /* Use the hdr_buf to find the sequence that matches this frame */
14039         INIT_LIST_HEAD(&dmabuf->dbuf.list);
14040         INIT_LIST_HEAD(&dmabuf->hbuf.list);
14041         new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14042         list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
14043                 temp_hdr = (struct fc_frame_header *)h_buf->virt;
14044                 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
14045                     (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
14046                     (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
14047                         continue;
14048                 /* found a pending sequence that matches this frame */
14049                 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14050                 break;
14051         }
14052
14053         /* Free up all the frames from the partially assembled sequence */
14054         if (seq_dmabuf) {
14055                 list_for_each_entry_safe(d_buf, n_buf,
14056                                          &seq_dmabuf->dbuf.list, list) {
14057                         list_del_init(&d_buf->list);
14058                         lpfc_in_buf_free(vport->phba, d_buf);
14059                 }
14060                 return true;
14061         }
14062         return false;
14063 }
14064
14065 /**
14066  * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
14067  * @phba: Pointer to HBA context object.
14068  * @cmd_iocbq: pointer to the command iocbq structure.
14069  * @rsp_iocbq: pointer to the response iocbq structure.
14070  *
14071  * This function handles the sequence abort response iocb command complete
14072  * event. It properly releases the memory allocated to the sequence abort
14073  * accept iocb.
14074  **/
14075 static void
14076 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
14077                              struct lpfc_iocbq *cmd_iocbq,
14078                              struct lpfc_iocbq *rsp_iocbq)
14079 {
14080         if (cmd_iocbq)
14081                 lpfc_sli_release_iocbq(phba, cmd_iocbq);
14082
14083         /* Failure means BLS ABORT RSP did not get delivered to remote node*/
14084         if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
14085                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14086                         "3154 BLS ABORT RSP failed, data:  x%x/x%x\n",
14087                         rsp_iocbq->iocb.ulpStatus,
14088                         rsp_iocbq->iocb.un.ulpWord[4]);
14089 }
14090
14091 /**
14092  * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
14093  * @phba: Pointer to HBA context object.
14094  * @xri: xri id in transaction.
14095  *
14096  * This function validates the xri maps to the known range of XRIs allocated an
14097  * used by the driver.
14098  **/
14099 uint16_t
14100 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
14101                       uint16_t xri)
14102 {
14103         int i;
14104
14105         for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
14106                 if (xri == phba->sli4_hba.xri_ids[i])
14107                         return i;
14108         }
14109         return NO_XRI;
14110 }
14111
14112 /**
14113  * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
14114  * @phba: Pointer to HBA context object.
14115  * @fc_hdr: pointer to a FC frame header.
14116  *
14117  * This function sends a basic response to a previous unsol sequence abort
14118  * event after aborting the sequence handling.
14119  **/
14120 static void
14121 lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
14122                         struct fc_frame_header *fc_hdr)
14123 {
14124         struct lpfc_iocbq *ctiocb = NULL;
14125         struct lpfc_nodelist *ndlp;
14126         uint16_t oxid, rxid, xri, lxri;
14127         uint32_t sid, fctl;
14128         IOCB_t *icmd;
14129         int rc;
14130
14131         if (!lpfc_is_link_up(phba))
14132                 return;
14133
14134         sid = sli4_sid_from_fc_hdr(fc_hdr);
14135         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
14136         rxid = be16_to_cpu(fc_hdr->fh_rx_id);
14137
14138         ndlp = lpfc_findnode_did(phba->pport, sid);
14139         if (!ndlp) {
14140                 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
14141                                 "1268 Find ndlp returned NULL for oxid:x%x "
14142                                 "SID:x%x\n", oxid, sid);
14143                 return;
14144         }
14145
14146         /* Allocate buffer for rsp iocb */
14147         ctiocb = lpfc_sli_get_iocbq(phba);
14148         if (!ctiocb)
14149                 return;
14150
14151         /* Extract the F_CTL field from FC_HDR */
14152         fctl = sli4_fctl_from_fc_hdr(fc_hdr);
14153
14154         icmd = &ctiocb->iocb;
14155         icmd->un.xseq64.bdl.bdeSize = 0;
14156         icmd->un.xseq64.bdl.ulpIoTag32 = 0;
14157         icmd->un.xseq64.w5.hcsw.Dfctl = 0;
14158         icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
14159         icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
14160
14161         /* Fill in the rest of iocb fields */
14162         icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
14163         icmd->ulpBdeCount = 0;
14164         icmd->ulpLe = 1;
14165         icmd->ulpClass = CLASS3;
14166         icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
14167         ctiocb->context1 = ndlp;
14168
14169         ctiocb->iocb_cmpl = NULL;
14170         ctiocb->vport = phba->pport;
14171         ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
14172         ctiocb->sli4_lxritag = NO_XRI;
14173         ctiocb->sli4_xritag = NO_XRI;
14174
14175         if (fctl & FC_FC_EX_CTX)
14176                 /* Exchange responder sent the abort so we
14177                  * own the oxid.
14178                  */
14179                 xri = oxid;
14180         else
14181                 xri = rxid;
14182         lxri = lpfc_sli4_xri_inrange(phba, xri);
14183         if (lxri != NO_XRI)
14184                 lpfc_set_rrq_active(phba, ndlp, lxri,
14185                         (xri == oxid) ? rxid : oxid, 0);
14186         /* If the oxid maps to the FCP XRI range or if it is out of range,
14187          * send a BLS_RJT.  The driver no longer has that exchange.
14188          * Override the IOCB for a BA_RJT.
14189          */
14190         if (xri > (phba->sli4_hba.max_cfg_param.max_xri +
14191                     phba->sli4_hba.max_cfg_param.xri_base) ||
14192             xri > (lpfc_sli4_get_els_iocb_cnt(phba) +
14193                     phba->sli4_hba.max_cfg_param.xri_base)) {
14194                 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
14195                 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
14196                 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
14197                 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
14198         }
14199
14200         if (fctl & FC_FC_EX_CTX) {
14201                 /* ABTS sent by responder to CT exchange, construction
14202                  * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
14203                  * field and RX_ID from ABTS for RX_ID field.
14204                  */
14205                 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
14206         } else {
14207                 /* ABTS sent by initiator to CT exchange, construction
14208                  * of BA_ACC will need to allocate a new XRI as for the
14209                  * XRI_TAG field.
14210                  */
14211                 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
14212         }
14213         bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
14214         bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
14215
14216         /* Xmit CT abts response on exchange <xid> */
14217         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
14218                         "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
14219                         icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
14220
14221         rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
14222         if (rc == IOCB_ERROR) {
14223                 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
14224                                 "2925 Failed to issue CT ABTS RSP x%x on "
14225                                 "xri x%x, Data x%x\n",
14226                                 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
14227                                 phba->link_state);
14228                 lpfc_sli_release_iocbq(phba, ctiocb);
14229         }
14230 }
14231
14232 /**
14233  * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
14234  * @vport: Pointer to the vport on which this sequence was received
14235  * @dmabuf: pointer to a dmabuf that describes the FC sequence
14236  *
14237  * This function handles an SLI-4 unsolicited abort event. If the unsolicited
14238  * receive sequence is only partially assembed by the driver, it shall abort
14239  * the partially assembled frames for the sequence. Otherwise, if the
14240  * unsolicited receive sequence has been completely assembled and passed to
14241  * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
14242  * unsolicited sequence has been aborted. After that, it will issue a basic
14243  * accept to accept the abort.
14244  **/
14245 void
14246 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
14247                              struct hbq_dmabuf *dmabuf)
14248 {
14249         struct lpfc_hba *phba = vport->phba;
14250         struct fc_frame_header fc_hdr;
14251         uint32_t fctl;
14252         bool abts_par;
14253
14254         /* Make a copy of fc_hdr before the dmabuf being released */
14255         memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
14256         fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
14257
14258         if (fctl & FC_FC_EX_CTX) {
14259                 /*
14260                  * ABTS sent by responder to exchange, just free the buffer
14261                  */
14262                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14263         } else {
14264                 /*
14265                  * ABTS sent by initiator to exchange, need to do cleanup
14266                  */
14267                 /* Try to abort partially assembled seq */
14268                 abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf);
14269
14270                 /* Send abort to ULP if partially seq abort failed */
14271                 if (abts_par == false)
14272                         lpfc_sli4_send_seq_to_ulp(vport, dmabuf);
14273                 else
14274                         lpfc_in_buf_free(phba, &dmabuf->dbuf);
14275         }
14276         /* Send basic accept (BA_ACC) to the abort requester */
14277         lpfc_sli4_seq_abort_rsp(phba, &fc_hdr);
14278 }
14279
14280 /**
14281  * lpfc_seq_complete - Indicates if a sequence is complete
14282  * @dmabuf: pointer to a dmabuf that describes the FC sequence
14283  *
14284  * This function checks the sequence, starting with the frame described by
14285  * @dmabuf, to see if all the frames associated with this sequence are present.
14286  * the frames associated with this sequence are linked to the @dmabuf using the
14287  * dbuf list. This function looks for two major things. 1) That the first frame
14288  * has a sequence count of zero. 2) There is a frame with last frame of sequence
14289  * set. 3) That there are no holes in the sequence count. The function will
14290  * return 1 when the sequence is complete, otherwise it will return 0.
14291  **/
14292 static int
14293 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
14294 {
14295         struct fc_frame_header *hdr;
14296         struct lpfc_dmabuf *d_buf;
14297         struct hbq_dmabuf *seq_dmabuf;
14298         uint32_t fctl;
14299         int seq_count = 0;
14300
14301         hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14302         /* make sure first fame of sequence has a sequence count of zero */
14303         if (hdr->fh_seq_cnt != seq_count)
14304                 return 0;
14305         fctl = (hdr->fh_f_ctl[0] << 16 |
14306                 hdr->fh_f_ctl[1] << 8 |
14307                 hdr->fh_f_ctl[2]);
14308         /* If last frame of sequence we can return success. */
14309         if (fctl & FC_FC_END_SEQ)
14310                 return 1;
14311         list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
14312                 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14313                 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14314                 /* If there is a hole in the sequence count then fail. */
14315                 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
14316                         return 0;
14317                 fctl = (hdr->fh_f_ctl[0] << 16 |
14318                         hdr->fh_f_ctl[1] << 8 |
14319                         hdr->fh_f_ctl[2]);
14320                 /* If last frame of sequence we can return success. */
14321                 if (fctl & FC_FC_END_SEQ)
14322                         return 1;
14323         }
14324         return 0;
14325 }
14326
14327 /**
14328  * lpfc_prep_seq - Prep sequence for ULP processing
14329  * @vport: Pointer to the vport on which this sequence was received
14330  * @dmabuf: pointer to a dmabuf that describes the FC sequence
14331  *
14332  * This function takes a sequence, described by a list of frames, and creates
14333  * a list of iocbq structures to describe the sequence. This iocbq list will be
14334  * used to issue to the generic unsolicited sequence handler. This routine
14335  * returns a pointer to the first iocbq in the list. If the function is unable
14336  * to allocate an iocbq then it throw out the received frames that were not
14337  * able to be described and return a pointer to the first iocbq. If unable to
14338  * allocate any iocbqs (including the first) this function will return NULL.
14339  **/
14340 static struct lpfc_iocbq *
14341 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
14342 {
14343         struct hbq_dmabuf *hbq_buf;
14344         struct lpfc_dmabuf *d_buf, *n_buf;
14345         struct lpfc_iocbq *first_iocbq, *iocbq;
14346         struct fc_frame_header *fc_hdr;
14347         uint32_t sid;
14348         uint32_t len, tot_len;
14349         struct ulp_bde64 *pbde;
14350
14351         fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14352         /* remove from receive buffer list */
14353         list_del_init(&seq_dmabuf->hbuf.list);
14354         lpfc_update_rcv_time_stamp(vport);
14355         /* get the Remote Port's SID */
14356         sid = sli4_sid_from_fc_hdr(fc_hdr);
14357         tot_len = 0;
14358         /* Get an iocbq struct to fill in. */
14359         first_iocbq = lpfc_sli_get_iocbq(vport->phba);
14360         if (first_iocbq) {
14361                 /* Initialize the first IOCB. */
14362                 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
14363                 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
14364
14365                 /* Check FC Header to see what TYPE of frame we are rcv'ing */
14366                 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
14367                         first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
14368                         first_iocbq->iocb.un.rcvels.parmRo =
14369                                 sli4_did_from_fc_hdr(fc_hdr);
14370                         first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
14371                 } else
14372                         first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
14373                 first_iocbq->iocb.ulpContext = NO_XRI;
14374                 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
14375                         be16_to_cpu(fc_hdr->fh_ox_id);
14376                 /* iocbq is prepped for internal consumption.  Physical vpi. */
14377                 first_iocbq->iocb.unsli3.rcvsli3.vpi =
14378                         vport->phba->vpi_ids[vport->vpi];
14379                 /* put the first buffer into the first IOCBq */
14380                 first_iocbq->context2 = &seq_dmabuf->dbuf;
14381                 first_iocbq->context3 = NULL;
14382                 first_iocbq->iocb.ulpBdeCount = 1;
14383                 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
14384                                                         LPFC_DATA_BUF_SIZE;
14385                 first_iocbq->iocb.un.rcvels.remoteID = sid;
14386                 tot_len = bf_get(lpfc_rcqe_length,
14387                                        &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
14388                 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
14389         }
14390         iocbq = first_iocbq;
14391         /*
14392          * Each IOCBq can have two Buffers assigned, so go through the list
14393          * of buffers for this sequence and save two buffers in each IOCBq
14394          */
14395         list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
14396                 if (!iocbq) {
14397                         lpfc_in_buf_free(vport->phba, d_buf);
14398                         continue;
14399                 }
14400                 if (!iocbq->context3) {
14401                         iocbq->context3 = d_buf;
14402                         iocbq->iocb.ulpBdeCount++;
14403                         pbde = (struct ulp_bde64 *)
14404                                         &iocbq->iocb.unsli3.sli3Words[4];
14405                         pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
14406
14407                         /* We need to get the size out of the right CQE */
14408                         hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14409                         len = bf_get(lpfc_rcqe_length,
14410                                        &hbq_buf->cq_event.cqe.rcqe_cmpl);
14411                         iocbq->iocb.unsli3.rcvsli3.acc_len += len;
14412                         tot_len += len;
14413                 } else {
14414                         iocbq = lpfc_sli_get_iocbq(vport->phba);
14415                         if (!iocbq) {
14416                                 if (first_iocbq) {
14417                                         first_iocbq->iocb.ulpStatus =
14418                                                         IOSTAT_FCP_RSP_ERROR;
14419                                         first_iocbq->iocb.un.ulpWord[4] =
14420                                                         IOERR_NO_RESOURCES;
14421                                 }
14422                                 lpfc_in_buf_free(vport->phba, d_buf);
14423                                 continue;
14424                         }
14425                         iocbq->context2 = d_buf;
14426                         iocbq->context3 = NULL;
14427                         iocbq->iocb.ulpBdeCount = 1;
14428                         iocbq->iocb.un.cont64[0].tus.f.bdeSize =
14429                                                         LPFC_DATA_BUF_SIZE;
14430
14431                         /* We need to get the size out of the right CQE */
14432                         hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14433                         len = bf_get(lpfc_rcqe_length,
14434                                        &hbq_buf->cq_event.cqe.rcqe_cmpl);
14435                         tot_len += len;
14436                         iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
14437
14438                         iocbq->iocb.un.rcvels.remoteID = sid;
14439                         list_add_tail(&iocbq->list, &first_iocbq->list);
14440                 }
14441         }
14442         return first_iocbq;
14443 }
14444
14445 static void
14446 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
14447                           struct hbq_dmabuf *seq_dmabuf)
14448 {
14449         struct fc_frame_header *fc_hdr;
14450         struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
14451         struct lpfc_hba *phba = vport->phba;
14452
14453         fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14454         iocbq = lpfc_prep_seq(vport, seq_dmabuf);
14455         if (!iocbq) {
14456                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14457                                 "2707 Ring %d handler: Failed to allocate "
14458                                 "iocb Rctl x%x Type x%x received\n",
14459                                 LPFC_ELS_RING,
14460                                 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
14461                 return;
14462         }
14463         if (!lpfc_complete_unsol_iocb(phba,
14464                                       &phba->sli.ring[LPFC_ELS_RING],
14465                                       iocbq, fc_hdr->fh_r_ctl,
14466                                       fc_hdr->fh_type))
14467                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14468                                 "2540 Ring %d handler: unexpected Rctl "
14469                                 "x%x Type x%x received\n",
14470                                 LPFC_ELS_RING,
14471                                 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
14472
14473         /* Free iocb created in lpfc_prep_seq */
14474         list_for_each_entry_safe(curr_iocb, next_iocb,
14475                 &iocbq->list, list) {
14476                 list_del_init(&curr_iocb->list);
14477                 lpfc_sli_release_iocbq(phba, curr_iocb);
14478         }
14479         lpfc_sli_release_iocbq(phba, iocbq);
14480 }
14481
14482 /**
14483  * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
14484  * @phba: Pointer to HBA context object.
14485  *
14486  * This function is called with no lock held. This function processes all
14487  * the received buffers and gives it to upper layers when a received buffer
14488  * indicates that it is the final frame in the sequence. The interrupt
14489  * service routine processes received buffers at interrupt contexts and adds
14490  * received dma buffers to the rb_pend_list queue and signals the worker thread.
14491  * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
14492  * appropriate receive function when the final frame in a sequence is received.
14493  **/
14494 void
14495 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
14496                                  struct hbq_dmabuf *dmabuf)
14497 {
14498         struct hbq_dmabuf *seq_dmabuf;
14499         struct fc_frame_header *fc_hdr;
14500         struct lpfc_vport *vport;
14501         uint32_t fcfi;
14502         uint32_t did;
14503
14504         /* Process each received buffer */
14505         fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14506         /* check to see if this a valid type of frame */
14507         if (lpfc_fc_frame_check(phba, fc_hdr)) {
14508                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14509                 return;
14510         }
14511         if ((bf_get(lpfc_cqe_code,
14512                     &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
14513                 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
14514                               &dmabuf->cq_event.cqe.rcqe_cmpl);
14515         else
14516                 fcfi = bf_get(lpfc_rcqe_fcf_id,
14517                               &dmabuf->cq_event.cqe.rcqe_cmpl);
14518
14519         vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
14520         if (!vport) {
14521                 /* throw out the frame */
14522                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14523                 return;
14524         }
14525
14526         /* d_id this frame is directed to */
14527         did = sli4_did_from_fc_hdr(fc_hdr);
14528
14529         /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
14530         if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
14531                 (did != Fabric_DID)) {
14532                 /*
14533                  * Throw out the frame if we are not pt2pt.
14534                  * The pt2pt protocol allows for discovery frames
14535                  * to be received without a registered VPI.
14536                  */
14537                 if (!(vport->fc_flag & FC_PT2PT) ||
14538                         (phba->link_state == LPFC_HBA_READY)) {
14539                         lpfc_in_buf_free(phba, &dmabuf->dbuf);
14540                         return;
14541                 }
14542         }
14543
14544         /* Handle the basic abort sequence (BA_ABTS) event */
14545         if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
14546                 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
14547                 return;
14548         }
14549
14550         /* Link this frame */
14551         seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
14552         if (!seq_dmabuf) {
14553                 /* unable to add frame to vport - throw it out */
14554                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14555                 return;
14556         }
14557         /* If not last frame in sequence continue processing frames. */
14558         if (!lpfc_seq_complete(seq_dmabuf))
14559                 return;
14560
14561         /* Send the complete sequence to the upper layer protocol */
14562         lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
14563 }
14564
14565 /**
14566  * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
14567  * @phba: pointer to lpfc hba data structure.
14568  *
14569  * This routine is invoked to post rpi header templates to the
14570  * HBA consistent with the SLI-4 interface spec.  This routine
14571  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
14572  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
14573  *
14574  * This routine does not require any locks.  It's usage is expected
14575  * to be driver load or reset recovery when the driver is
14576  * sequential.
14577  *
14578  * Return codes
14579  *      0 - successful
14580  *      -EIO - The mailbox failed to complete successfully.
14581  *      When this error occurs, the driver is not guaranteed
14582  *      to have any rpi regions posted to the device and
14583  *      must either attempt to repost the regions or take a
14584  *      fatal error.
14585  **/
14586 int
14587 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
14588 {
14589         struct lpfc_rpi_hdr *rpi_page;
14590         uint32_t rc = 0;
14591         uint16_t lrpi = 0;
14592
14593         /* SLI4 ports that support extents do not require RPI headers. */
14594         if (!phba->sli4_hba.rpi_hdrs_in_use)
14595                 goto exit;
14596         if (phba->sli4_hba.extents_in_use)
14597                 return -EIO;
14598
14599         list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
14600                 /*
14601                  * Assign the rpi headers a physical rpi only if the driver
14602                  * has not initialized those resources.  A port reset only
14603                  * needs the headers posted.
14604                  */
14605                 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
14606                     LPFC_RPI_RSRC_RDY)
14607                         rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
14608
14609                 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
14610                 if (rc != MBX_SUCCESS) {
14611                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14612                                         "2008 Error %d posting all rpi "
14613                                         "headers\n", rc);
14614                         rc = -EIO;
14615                         break;
14616                 }
14617         }
14618
14619  exit:
14620         bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
14621                LPFC_RPI_RSRC_RDY);
14622         return rc;
14623 }
14624
14625 /**
14626  * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
14627  * @phba: pointer to lpfc hba data structure.
14628  * @rpi_page:  pointer to the rpi memory region.
14629  *
14630  * This routine is invoked to post a single rpi header to the
14631  * HBA consistent with the SLI-4 interface spec.  This memory region
14632  * maps up to 64 rpi context regions.
14633  *
14634  * Return codes
14635  *      0 - successful
14636  *      -ENOMEM - No available memory
14637  *      -EIO - The mailbox failed to complete successfully.
14638  **/
14639 int
14640 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
14641 {
14642         LPFC_MBOXQ_t *mboxq;
14643         struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
14644         uint32_t rc = 0;
14645         uint32_t shdr_status, shdr_add_status;
14646         union lpfc_sli4_cfg_shdr *shdr;
14647
14648         /* SLI4 ports that support extents do not require RPI headers. */
14649         if (!phba->sli4_hba.rpi_hdrs_in_use)
14650                 return rc;
14651         if (phba->sli4_hba.extents_in_use)
14652                 return -EIO;
14653
14654         /* The port is notified of the header region via a mailbox command. */
14655         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14656         if (!mboxq) {
14657                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14658                                 "2001 Unable to allocate memory for issuing "
14659                                 "SLI_CONFIG_SPECIAL mailbox command\n");
14660                 return -ENOMEM;
14661         }
14662
14663         /* Post all rpi memory regions to the port. */
14664         hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
14665         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
14666                          LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
14667                          sizeof(struct lpfc_mbx_post_hdr_tmpl) -
14668                          sizeof(struct lpfc_sli4_cfg_mhdr),
14669                          LPFC_SLI4_MBX_EMBED);
14670
14671
14672         /* Post the physical rpi to the port for this rpi header. */
14673         bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
14674                rpi_page->start_rpi);
14675         bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
14676                hdr_tmpl, rpi_page->page_count);
14677
14678         hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
14679         hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
14680         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
14681         shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
14682         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14683         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14684         if (rc != MBX_TIMEOUT)
14685                 mempool_free(mboxq, phba->mbox_mem_pool);
14686         if (shdr_status || shdr_add_status || rc) {
14687                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14688                                 "2514 POST_RPI_HDR mailbox failed with "
14689                                 "status x%x add_status x%x, mbx status x%x\n",
14690                                 shdr_status, shdr_add_status, rc);
14691                 rc = -ENXIO;
14692         }
14693         return rc;
14694 }
14695
14696 /**
14697  * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
14698  * @phba: pointer to lpfc hba data structure.
14699  *
14700  * This routine is invoked to post rpi header templates to the
14701  * HBA consistent with the SLI-4 interface spec.  This routine
14702  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
14703  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
14704  *
14705  * Returns
14706  *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
14707  *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
14708  **/
14709 int
14710 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
14711 {
14712         unsigned long rpi;
14713         uint16_t max_rpi, rpi_limit;
14714         uint16_t rpi_remaining, lrpi = 0;
14715         struct lpfc_rpi_hdr *rpi_hdr;
14716
14717         max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
14718         rpi_limit = phba->sli4_hba.next_rpi;
14719
14720         /*
14721          * Fetch the next logical rpi.  Because this index is logical,
14722          * the  driver starts at 0 each time.
14723          */
14724         spin_lock_irq(&phba->hbalock);
14725         rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
14726         if (rpi >= rpi_limit)
14727                 rpi = LPFC_RPI_ALLOC_ERROR;
14728         else {
14729                 set_bit(rpi, phba->sli4_hba.rpi_bmask);
14730                 phba->sli4_hba.max_cfg_param.rpi_used++;
14731                 phba->sli4_hba.rpi_count++;
14732         }
14733
14734         /*
14735          * Don't try to allocate more rpi header regions if the device limit
14736          * has been exhausted.
14737          */
14738         if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
14739             (phba->sli4_hba.rpi_count >= max_rpi)) {
14740                 spin_unlock_irq(&phba->hbalock);
14741                 return rpi;
14742         }
14743
14744         /*
14745          * RPI header postings are not required for SLI4 ports capable of
14746          * extents.
14747          */
14748         if (!phba->sli4_hba.rpi_hdrs_in_use) {
14749                 spin_unlock_irq(&phba->hbalock);
14750                 return rpi;
14751         }
14752
14753         /*
14754          * If the driver is running low on rpi resources, allocate another
14755          * page now.  Note that the next_rpi value is used because
14756          * it represents how many are actually in use whereas max_rpi notes
14757          * how many are supported max by the device.
14758          */
14759         rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
14760         spin_unlock_irq(&phba->hbalock);
14761         if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
14762                 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
14763                 if (!rpi_hdr) {
14764                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14765                                         "2002 Error Could not grow rpi "
14766                                         "count\n");
14767                 } else {
14768                         lrpi = rpi_hdr->start_rpi;
14769                         rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
14770                         lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
14771                 }
14772         }
14773
14774         return rpi;
14775 }
14776
14777 /**
14778  * lpfc_sli4_free_rpi - Release an rpi for reuse.
14779  * @phba: pointer to lpfc hba data structure.
14780  *
14781  * This routine is invoked to release an rpi to the pool of
14782  * available rpis maintained by the driver.
14783  **/
14784 void
14785 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
14786 {
14787         if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
14788                 phba->sli4_hba.rpi_count--;
14789                 phba->sli4_hba.max_cfg_param.rpi_used--;
14790         }
14791 }
14792
14793 /**
14794  * lpfc_sli4_free_rpi - Release an rpi for reuse.
14795  * @phba: pointer to lpfc hba data structure.
14796  *
14797  * This routine is invoked to release an rpi to the pool of
14798  * available rpis maintained by the driver.
14799  **/
14800 void
14801 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
14802 {
14803         spin_lock_irq(&phba->hbalock);
14804         __lpfc_sli4_free_rpi(phba, rpi);
14805         spin_unlock_irq(&phba->hbalock);
14806 }
14807
14808 /**
14809  * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
14810  * @phba: pointer to lpfc hba data structure.
14811  *
14812  * This routine is invoked to remove the memory region that
14813  * provided rpi via a bitmask.
14814  **/
14815 void
14816 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
14817 {
14818         kfree(phba->sli4_hba.rpi_bmask);
14819         kfree(phba->sli4_hba.rpi_ids);
14820         bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
14821 }
14822
14823 /**
14824  * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
14825  * @phba: pointer to lpfc hba data structure.
14826  *
14827  * This routine is invoked to remove the memory region that
14828  * provided rpi via a bitmask.
14829  **/
14830 int
14831 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
14832         void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
14833 {
14834         LPFC_MBOXQ_t *mboxq;
14835         struct lpfc_hba *phba = ndlp->phba;
14836         int rc;
14837
14838         /* The port is notified of the header region via a mailbox command. */
14839         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14840         if (!mboxq)
14841                 return -ENOMEM;
14842
14843         /* Post all rpi memory regions to the port. */
14844         lpfc_resume_rpi(mboxq, ndlp);
14845         if (cmpl) {
14846                 mboxq->mbox_cmpl = cmpl;
14847                 mboxq->context1 = arg;
14848                 mboxq->context2 = ndlp;
14849         } else
14850                 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14851         mboxq->vport = ndlp->vport;
14852         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
14853         if (rc == MBX_NOT_FINISHED) {
14854                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14855                                 "2010 Resume RPI Mailbox failed "
14856                                 "status %d, mbxStatus x%x\n", rc,
14857                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
14858                 mempool_free(mboxq, phba->mbox_mem_pool);
14859                 return -EIO;
14860         }
14861         return 0;
14862 }
14863
14864 /**
14865  * lpfc_sli4_init_vpi - Initialize a vpi with the port
14866  * @vport: Pointer to the vport for which the vpi is being initialized
14867  *
14868  * This routine is invoked to activate a vpi with the port.
14869  *
14870  * Returns:
14871  *    0 success
14872  *    -Evalue otherwise
14873  **/
14874 int
14875 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
14876 {
14877         LPFC_MBOXQ_t *mboxq;
14878         int rc = 0;
14879         int retval = MBX_SUCCESS;
14880         uint32_t mbox_tmo;
14881         struct lpfc_hba *phba = vport->phba;
14882         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14883         if (!mboxq)
14884                 return -ENOMEM;
14885         lpfc_init_vpi(phba, mboxq, vport->vpi);
14886         mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
14887         rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
14888         if (rc != MBX_SUCCESS) {
14889                 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
14890                                 "2022 INIT VPI Mailbox failed "
14891                                 "status %d, mbxStatus x%x\n", rc,
14892                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
14893                 retval = -EIO;
14894         }
14895         if (rc != MBX_TIMEOUT)
14896                 mempool_free(mboxq, vport->phba->mbox_mem_pool);
14897
14898         return retval;
14899 }
14900
14901 /**
14902  * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
14903  * @phba: pointer to lpfc hba data structure.
14904  * @mboxq: Pointer to mailbox object.
14905  *
14906  * This routine is invoked to manually add a single FCF record. The caller
14907  * must pass a completely initialized FCF_Record.  This routine takes
14908  * care of the nonembedded mailbox operations.
14909  **/
14910 static void
14911 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
14912 {
14913         void *virt_addr;
14914         union lpfc_sli4_cfg_shdr *shdr;
14915         uint32_t shdr_status, shdr_add_status;
14916
14917         virt_addr = mboxq->sge_array->addr[0];
14918         /* The IOCTL status is embedded in the mailbox subheader. */
14919         shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
14920         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14921         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14922
14923         if ((shdr_status || shdr_add_status) &&
14924                 (shdr_status != STATUS_FCF_IN_USE))
14925                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14926                         "2558 ADD_FCF_RECORD mailbox failed with "
14927                         "status x%x add_status x%x\n",
14928                         shdr_status, shdr_add_status);
14929
14930         lpfc_sli4_mbox_cmd_free(phba, mboxq);
14931 }
14932
14933 /**
14934  * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
14935  * @phba: pointer to lpfc hba data structure.
14936  * @fcf_record:  pointer to the initialized fcf record to add.
14937  *
14938  * This routine is invoked to manually add a single FCF record. The caller
14939  * must pass a completely initialized FCF_Record.  This routine takes
14940  * care of the nonembedded mailbox operations.
14941  **/
14942 int
14943 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
14944 {
14945         int rc = 0;
14946         LPFC_MBOXQ_t *mboxq;
14947         uint8_t *bytep;
14948         void *virt_addr;
14949         dma_addr_t phys_addr;
14950         struct lpfc_mbx_sge sge;
14951         uint32_t alloc_len, req_len;
14952         uint32_t fcfindex;
14953
14954         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14955         if (!mboxq) {
14956                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14957                         "2009 Failed to allocate mbox for ADD_FCF cmd\n");
14958                 return -ENOMEM;
14959         }
14960
14961         req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
14962                   sizeof(uint32_t);
14963
14964         /* Allocate DMA memory and set up the non-embedded mailbox command */
14965         alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
14966                                      LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
14967                                      req_len, LPFC_SLI4_MBX_NEMBED);
14968         if (alloc_len < req_len) {
14969                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14970                         "2523 Allocated DMA memory size (x%x) is "
14971                         "less than the requested DMA memory "
14972                         "size (x%x)\n", alloc_len, req_len);
14973                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
14974                 return -ENOMEM;
14975         }
14976
14977         /*
14978          * Get the first SGE entry from the non-embedded DMA memory.  This
14979          * routine only uses a single SGE.
14980          */
14981         lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
14982         phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
14983         virt_addr = mboxq->sge_array->addr[0];
14984         /*
14985          * Configure the FCF record for FCFI 0.  This is the driver's
14986          * hardcoded default and gets used in nonFIP mode.
14987          */
14988         fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
14989         bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
14990         lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
14991
14992         /*
14993          * Copy the fcf_index and the FCF Record Data. The data starts after
14994          * the FCoE header plus word10. The data copy needs to be endian
14995          * correct.
14996          */
14997         bytep += sizeof(uint32_t);
14998         lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
14999         mboxq->vport = phba->pport;
15000         mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
15001         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15002         if (rc == MBX_NOT_FINISHED) {
15003                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15004                         "2515 ADD_FCF_RECORD mailbox failed with "
15005                         "status 0x%x\n", rc);
15006                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15007                 rc = -EIO;
15008         } else
15009                 rc = 0;
15010
15011         return rc;
15012 }
15013
15014 /**
15015  * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
15016  * @phba: pointer to lpfc hba data structure.
15017  * @fcf_record:  pointer to the fcf record to write the default data.
15018  * @fcf_index: FCF table entry index.
15019  *
15020  * This routine is invoked to build the driver's default FCF record.  The
15021  * values used are hardcoded.  This routine handles memory initialization.
15022  *
15023  **/
15024 void
15025 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
15026                                 struct fcf_record *fcf_record,
15027                                 uint16_t fcf_index)
15028 {
15029         memset(fcf_record, 0, sizeof(struct fcf_record));
15030         fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
15031         fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
15032         fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
15033         bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
15034         bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
15035         bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
15036         bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
15037         bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
15038         bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
15039         bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
15040         bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
15041         bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
15042         bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
15043         bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
15044         bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
15045         bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
15046                 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
15047         /* Set the VLAN bit map */
15048         if (phba->valid_vlan) {
15049                 fcf_record->vlan_bitmap[phba->vlan_id / 8]
15050                         = 1 << (phba->vlan_id % 8);
15051         }
15052 }
15053
15054 /**
15055  * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
15056  * @phba: pointer to lpfc hba data structure.
15057  * @fcf_index: FCF table entry offset.
15058  *
15059  * This routine is invoked to scan the entire FCF table by reading FCF
15060  * record and processing it one at a time starting from the @fcf_index
15061  * for initial FCF discovery or fast FCF failover rediscovery.
15062  *
15063  * Return 0 if the mailbox command is submitted successfully, none 0
15064  * otherwise.
15065  **/
15066 int
15067 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
15068 {
15069         int rc = 0, error;
15070         LPFC_MBOXQ_t *mboxq;
15071
15072         phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
15073         phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
15074         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15075         if (!mboxq) {
15076                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15077                                 "2000 Failed to allocate mbox for "
15078                                 "READ_FCF cmd\n");
15079                 error = -ENOMEM;
15080                 goto fail_fcf_scan;
15081         }
15082         /* Construct the read FCF record mailbox command */
15083         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
15084         if (rc) {
15085                 error = -EINVAL;
15086                 goto fail_fcf_scan;
15087         }
15088         /* Issue the mailbox command asynchronously */
15089         mboxq->vport = phba->pport;
15090         mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
15091
15092         spin_lock_irq(&phba->hbalock);
15093         phba->hba_flag |= FCF_TS_INPROG;
15094         spin_unlock_irq(&phba->hbalock);
15095
15096         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15097         if (rc == MBX_NOT_FINISHED)
15098                 error = -EIO;
15099         else {
15100                 /* Reset eligible FCF count for new scan */
15101                 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
15102                         phba->fcf.eligible_fcf_cnt = 0;
15103                 error = 0;
15104         }
15105 fail_fcf_scan:
15106         if (error) {
15107                 if (mboxq)
15108                         lpfc_sli4_mbox_cmd_free(phba, mboxq);
15109                 /* FCF scan failed, clear FCF_TS_INPROG flag */
15110                 spin_lock_irq(&phba->hbalock);
15111                 phba->hba_flag &= ~FCF_TS_INPROG;
15112                 spin_unlock_irq(&phba->hbalock);
15113         }
15114         return error;
15115 }
15116
15117 /**
15118  * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
15119  * @phba: pointer to lpfc hba data structure.
15120  * @fcf_index: FCF table entry offset.
15121  *
15122  * This routine is invoked to read an FCF record indicated by @fcf_index
15123  * and to use it for FLOGI roundrobin FCF failover.
15124  *
15125  * Return 0 if the mailbox command is submitted successfully, none 0
15126  * otherwise.
15127  **/
15128 int
15129 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
15130 {
15131         int rc = 0, error;
15132         LPFC_MBOXQ_t *mboxq;
15133
15134         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15135         if (!mboxq) {
15136                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
15137                                 "2763 Failed to allocate mbox for "
15138                                 "READ_FCF cmd\n");
15139                 error = -ENOMEM;
15140                 goto fail_fcf_read;
15141         }
15142         /* Construct the read FCF record mailbox command */
15143         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
15144         if (rc) {
15145                 error = -EINVAL;
15146                 goto fail_fcf_read;
15147         }
15148         /* Issue the mailbox command asynchronously */
15149         mboxq->vport = phba->pport;
15150         mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
15151         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15152         if (rc == MBX_NOT_FINISHED)
15153                 error = -EIO;
15154         else
15155                 error = 0;
15156
15157 fail_fcf_read:
15158         if (error && mboxq)
15159                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15160         return error;
15161 }
15162
15163 /**
15164  * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
15165  * @phba: pointer to lpfc hba data structure.
15166  * @fcf_index: FCF table entry offset.
15167  *
15168  * This routine is invoked to read an FCF record indicated by @fcf_index to
15169  * determine whether it's eligible for FLOGI roundrobin failover list.
15170  *
15171  * Return 0 if the mailbox command is submitted successfully, none 0
15172  * otherwise.
15173  **/
15174 int
15175 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
15176 {
15177         int rc = 0, error;
15178         LPFC_MBOXQ_t *mboxq;
15179
15180         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15181         if (!mboxq) {
15182                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
15183                                 "2758 Failed to allocate mbox for "
15184                                 "READ_FCF cmd\n");
15185                                 error = -ENOMEM;
15186                                 goto fail_fcf_read;
15187         }
15188         /* Construct the read FCF record mailbox command */
15189         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
15190         if (rc) {
15191                 error = -EINVAL;
15192                 goto fail_fcf_read;
15193         }
15194         /* Issue the mailbox command asynchronously */
15195         mboxq->vport = phba->pport;
15196         mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
15197         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15198         if (rc == MBX_NOT_FINISHED)
15199                 error = -EIO;
15200         else
15201                 error = 0;
15202
15203 fail_fcf_read:
15204         if (error && mboxq)
15205                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15206         return error;
15207 }
15208
15209 /**
15210  * lpfc_check_next_fcf_pri
15211  * phba pointer to the lpfc_hba struct for this port.
15212  * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
15213  * routine when the rr_bmask is empty. The FCF indecies are put into the
15214  * rr_bmask based on their priority level. Starting from the highest priority
15215  * to the lowest. The most likely FCF candidate will be in the highest
15216  * priority group. When this routine is called it searches the fcf_pri list for
15217  * next lowest priority group and repopulates the rr_bmask with only those
15218  * fcf_indexes.
15219  * returns:
15220  * 1=success 0=failure
15221  **/
15222 int
15223 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
15224 {
15225         uint16_t next_fcf_pri;
15226         uint16_t last_index;
15227         struct lpfc_fcf_pri *fcf_pri;
15228         int rc;
15229         int ret = 0;
15230
15231         last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
15232                         LPFC_SLI4_FCF_TBL_INDX_MAX);
15233         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15234                         "3060 Last IDX %d\n", last_index);
15235         if (list_empty(&phba->fcf.fcf_pri_list)) {
15236                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
15237                         "3061 Last IDX %d\n", last_index);
15238                 return 0; /* Empty rr list */
15239         }
15240         next_fcf_pri = 0;
15241         /*
15242          * Clear the rr_bmask and set all of the bits that are at this
15243          * priority.
15244          */
15245         memset(phba->fcf.fcf_rr_bmask, 0,
15246                         sizeof(*phba->fcf.fcf_rr_bmask));
15247         spin_lock_irq(&phba->hbalock);
15248         list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
15249                 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
15250                         continue;
15251                 /*
15252                  * the 1st priority that has not FLOGI failed
15253                  * will be the highest.
15254                  */
15255                 if (!next_fcf_pri)
15256                         next_fcf_pri = fcf_pri->fcf_rec.priority;
15257                 spin_unlock_irq(&phba->hbalock);
15258                 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
15259                         rc = lpfc_sli4_fcf_rr_index_set(phba,
15260                                                 fcf_pri->fcf_rec.fcf_index);
15261                         if (rc)
15262                                 return 0;
15263                 }
15264                 spin_lock_irq(&phba->hbalock);
15265         }
15266         /*
15267          * if next_fcf_pri was not set above and the list is not empty then
15268          * we have failed flogis on all of them. So reset flogi failed
15269          * and start at the beginning.
15270          */
15271         if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
15272                 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
15273                         fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
15274                         /*
15275                          * the 1st priority that has not FLOGI failed
15276                          * will be the highest.
15277                          */
15278                         if (!next_fcf_pri)
15279                                 next_fcf_pri = fcf_pri->fcf_rec.priority;
15280                         spin_unlock_irq(&phba->hbalock);
15281                         if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
15282                                 rc = lpfc_sli4_fcf_rr_index_set(phba,
15283                                                 fcf_pri->fcf_rec.fcf_index);
15284                                 if (rc)
15285                                         return 0;
15286                         }
15287                         spin_lock_irq(&phba->hbalock);
15288                 }
15289         } else
15290                 ret = 1;
15291         spin_unlock_irq(&phba->hbalock);
15292
15293         return ret;
15294 }
15295 /**
15296  * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
15297  * @phba: pointer to lpfc hba data structure.
15298  *
15299  * This routine is to get the next eligible FCF record index in a round
15300  * robin fashion. If the next eligible FCF record index equals to the
15301  * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
15302  * shall be returned, otherwise, the next eligible FCF record's index
15303  * shall be returned.
15304  **/
15305 uint16_t
15306 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
15307 {
15308         uint16_t next_fcf_index;
15309
15310         /* Search start from next bit of currently registered FCF index */
15311 next_priority:
15312         next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
15313                                         LPFC_SLI4_FCF_TBL_INDX_MAX;
15314         next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
15315                                        LPFC_SLI4_FCF_TBL_INDX_MAX,
15316                                        next_fcf_index);
15317
15318         /* Wrap around condition on phba->fcf.fcf_rr_bmask */
15319         if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15320                 /*
15321                  * If we have wrapped then we need to clear the bits that
15322                  * have been tested so that we can detect when we should
15323                  * change the priority level.
15324                  */
15325                 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
15326                                                LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
15327         }
15328
15329
15330         /* Check roundrobin failover list empty condition */
15331         if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
15332                 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
15333                 /*
15334                  * If next fcf index is not found check if there are lower
15335                  * Priority level fcf's in the fcf_priority list.
15336                  * Set up the rr_bmask with all of the avaiable fcf bits
15337                  * at that level and continue the selection process.
15338                  */
15339                 if (lpfc_check_next_fcf_pri_level(phba))
15340                         goto next_priority;
15341                 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
15342                                 "2844 No roundrobin failover FCF available\n");
15343                 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
15344                         return LPFC_FCOE_FCF_NEXT_NONE;
15345                 else {
15346                         lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
15347                                 "3063 Only FCF available idx %d, flag %x\n",
15348                                 next_fcf_index,
15349                         phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
15350                         return next_fcf_index;
15351                 }
15352         }
15353
15354         if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
15355                 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
15356                 LPFC_FCF_FLOGI_FAILED)
15357                 goto next_priority;
15358
15359         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15360                         "2845 Get next roundrobin failover FCF (x%x)\n",
15361                         next_fcf_index);
15362
15363         return next_fcf_index;
15364 }
15365
15366 /**
15367  * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
15368  * @phba: pointer to lpfc hba data structure.
15369  *
15370  * This routine sets the FCF record index in to the eligible bmask for
15371  * roundrobin failover search. It checks to make sure that the index
15372  * does not go beyond the range of the driver allocated bmask dimension
15373  * before setting the bit.
15374  *
15375  * Returns 0 if the index bit successfully set, otherwise, it returns
15376  * -EINVAL.
15377  **/
15378 int
15379 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
15380 {
15381         if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15382                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
15383                                 "2610 FCF (x%x) reached driver's book "
15384                                 "keeping dimension:x%x\n",
15385                                 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
15386                 return -EINVAL;
15387         }
15388         /* Set the eligible FCF record index bmask */
15389         set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
15390
15391         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15392                         "2790 Set FCF (x%x) to roundrobin FCF failover "
15393                         "bmask\n", fcf_index);
15394
15395         return 0;
15396 }
15397
15398 /**
15399  * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
15400  * @phba: pointer to lpfc hba data structure.
15401  *
15402  * This routine clears the FCF record index from the eligible bmask for
15403  * roundrobin failover search. It checks to make sure that the index
15404  * does not go beyond the range of the driver allocated bmask dimension
15405  * before clearing the bit.
15406  **/
15407 void
15408 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
15409 {
15410         struct lpfc_fcf_pri *fcf_pri;
15411         if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15412                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
15413                                 "2762 FCF (x%x) reached driver's book "
15414                                 "keeping dimension:x%x\n",
15415                                 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
15416                 return;
15417         }
15418         /* Clear the eligible FCF record index bmask */
15419         spin_lock_irq(&phba->hbalock);
15420         list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
15421                 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
15422                         list_del_init(&fcf_pri->list);
15423                         break;
15424                 }
15425         }
15426         spin_unlock_irq(&phba->hbalock);
15427         clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
15428
15429         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15430                         "2791 Clear FCF (x%x) from roundrobin failover "
15431                         "bmask\n", fcf_index);
15432 }
15433
15434 /**
15435  * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
15436  * @phba: pointer to lpfc hba data structure.
15437  *
15438  * This routine is the completion routine for the rediscover FCF table mailbox
15439  * command. If the mailbox command returned failure, it will try to stop the
15440  * FCF rediscover wait timer.
15441  **/
15442 void
15443 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
15444 {
15445         struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
15446         uint32_t shdr_status, shdr_add_status;
15447
15448         redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
15449
15450         shdr_status = bf_get(lpfc_mbox_hdr_status,
15451                              &redisc_fcf->header.cfg_shdr.response);
15452         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
15453                              &redisc_fcf->header.cfg_shdr.response);
15454         if (shdr_status || shdr_add_status) {
15455                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
15456                                 "2746 Requesting for FCF rediscovery failed "
15457                                 "status x%x add_status x%x\n",
15458                                 shdr_status, shdr_add_status);
15459                 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
15460                         spin_lock_irq(&phba->hbalock);
15461                         phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
15462                         spin_unlock_irq(&phba->hbalock);
15463                         /*
15464                          * CVL event triggered FCF rediscover request failed,
15465                          * last resort to re-try current registered FCF entry.
15466                          */
15467                         lpfc_retry_pport_discovery(phba);
15468                 } else {
15469                         spin_lock_irq(&phba->hbalock);
15470                         phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
15471                         spin_unlock_irq(&phba->hbalock);
15472                         /*
15473                          * DEAD FCF event triggered FCF rediscover request
15474                          * failed, last resort to fail over as a link down
15475                          * to FCF registration.
15476                          */
15477                         lpfc_sli4_fcf_dead_failthrough(phba);
15478                 }
15479         } else {
15480                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15481                                 "2775 Start FCF rediscover quiescent timer\n");
15482                 /*
15483                  * Start FCF rediscovery wait timer for pending FCF
15484                  * before rescan FCF record table.
15485                  */
15486                 lpfc_fcf_redisc_wait_start_timer(phba);
15487         }
15488
15489         mempool_free(mbox, phba->mbox_mem_pool);
15490 }
15491
15492 /**
15493  * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
15494  * @phba: pointer to lpfc hba data structure.
15495  *
15496  * This routine is invoked to request for rediscovery of the entire FCF table
15497  * by the port.
15498  **/
15499 int
15500 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
15501 {
15502         LPFC_MBOXQ_t *mbox;
15503         struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
15504         int rc, length;
15505
15506         /* Cancel retry delay timers to all vports before FCF rediscover */
15507         lpfc_cancel_all_vport_retry_delay_timer(phba);
15508
15509         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15510         if (!mbox) {
15511                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15512                                 "2745 Failed to allocate mbox for "
15513                                 "requesting FCF rediscover.\n");
15514                 return -ENOMEM;
15515         }
15516
15517         length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
15518                   sizeof(struct lpfc_sli4_cfg_mhdr));
15519         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15520                          LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
15521                          length, LPFC_SLI4_MBX_EMBED);
15522
15523         redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
15524         /* Set count to 0 for invalidating the entire FCF database */
15525         bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
15526
15527         /* Issue the mailbox command asynchronously */
15528         mbox->vport = phba->pport;
15529         mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
15530         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
15531
15532         if (rc == MBX_NOT_FINISHED) {
15533                 mempool_free(mbox, phba->mbox_mem_pool);
15534                 return -EIO;
15535         }
15536         return 0;
15537 }
15538
15539 /**
15540  * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
15541  * @phba: pointer to lpfc hba data structure.
15542  *
15543  * This function is the failover routine as a last resort to the FCF DEAD
15544  * event when driver failed to perform fast FCF failover.
15545  **/
15546 void
15547 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
15548 {
15549         uint32_t link_state;
15550
15551         /*
15552          * Last resort as FCF DEAD event failover will treat this as
15553          * a link down, but save the link state because we don't want
15554          * it to be changed to Link Down unless it is already down.
15555          */
15556         link_state = phba->link_state;
15557         lpfc_linkdown(phba);
15558         phba->link_state = link_state;
15559
15560         /* Unregister FCF if no devices connected to it */
15561         lpfc_unregister_unused_fcf(phba);
15562 }
15563
15564 /**
15565  * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
15566  * @phba: pointer to lpfc hba data structure.
15567  * @rgn23_data: pointer to configure region 23 data.
15568  *
15569  * This function gets SLI3 port configure region 23 data through memory dump
15570  * mailbox command. When it successfully retrieves data, the size of the data
15571  * will be returned, otherwise, 0 will be returned.
15572  **/
15573 static uint32_t
15574 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
15575 {
15576         LPFC_MBOXQ_t *pmb = NULL;
15577         MAILBOX_t *mb;
15578         uint32_t offset = 0;
15579         int rc;
15580
15581         if (!rgn23_data)
15582                 return 0;
15583
15584         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15585         if (!pmb) {
15586                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15587                                 "2600 failed to allocate mailbox memory\n");
15588                 return 0;
15589         }
15590         mb = &pmb->u.mb;
15591
15592         do {
15593                 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
15594                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
15595
15596                 if (rc != MBX_SUCCESS) {
15597                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15598                                         "2601 failed to read config "
15599                                         "region 23, rc 0x%x Status 0x%x\n",
15600                                         rc, mb->mbxStatus);
15601                         mb->un.varDmp.word_cnt = 0;
15602                 }
15603                 /*
15604                  * dump mem may return a zero when finished or we got a
15605                  * mailbox error, either way we are done.
15606                  */
15607                 if (mb->un.varDmp.word_cnt == 0)
15608                         break;
15609                 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
15610                         mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
15611
15612                 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
15613                                        rgn23_data + offset,
15614                                        mb->un.varDmp.word_cnt);
15615                 offset += mb->un.varDmp.word_cnt;
15616         } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
15617
15618         mempool_free(pmb, phba->mbox_mem_pool);
15619         return offset;
15620 }
15621
15622 /**
15623  * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
15624  * @phba: pointer to lpfc hba data structure.
15625  * @rgn23_data: pointer to configure region 23 data.
15626  *
15627  * This function gets SLI4 port configure region 23 data through memory dump
15628  * mailbox command. When it successfully retrieves data, the size of the data
15629  * will be returned, otherwise, 0 will be returned.
15630  **/
15631 static uint32_t
15632 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
15633 {
15634         LPFC_MBOXQ_t *mboxq = NULL;
15635         struct lpfc_dmabuf *mp = NULL;
15636         struct lpfc_mqe *mqe;
15637         uint32_t data_length = 0;
15638         int rc;
15639
15640         if (!rgn23_data)
15641                 return 0;
15642
15643         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15644         if (!mboxq) {
15645                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15646                                 "3105 failed to allocate mailbox memory\n");
15647                 return 0;
15648         }
15649
15650         if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
15651                 goto out;
15652         mqe = &mboxq->u.mqe;
15653         mp = (struct lpfc_dmabuf *) mboxq->context1;
15654         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
15655         if (rc)
15656                 goto out;
15657         data_length = mqe->un.mb_words[5];
15658         if (data_length == 0)
15659                 goto out;
15660         if (data_length > DMP_RGN23_SIZE) {
15661                 data_length = 0;
15662                 goto out;
15663         }
15664         lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
15665 out:
15666         mempool_free(mboxq, phba->mbox_mem_pool);
15667         if (mp) {
15668                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
15669                 kfree(mp);
15670         }
15671         return data_length;
15672 }
15673
15674 /**
15675  * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
15676  * @phba: pointer to lpfc hba data structure.
15677  *
15678  * This function read region 23 and parse TLV for port status to
15679  * decide if the user disaled the port. If the TLV indicates the
15680  * port is disabled, the hba_flag is set accordingly.
15681  **/
15682 void
15683 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
15684 {
15685         uint8_t *rgn23_data = NULL;
15686         uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
15687         uint32_t offset = 0;
15688
15689         /* Get adapter Region 23 data */
15690         rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
15691         if (!rgn23_data)
15692                 goto out;
15693
15694         if (phba->sli_rev < LPFC_SLI_REV4)
15695                 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
15696         else {
15697                 if_type = bf_get(lpfc_sli_intf_if_type,
15698                                  &phba->sli4_hba.sli_intf);
15699                 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
15700                         goto out;
15701                 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
15702         }
15703
15704         if (!data_size)
15705                 goto out;
15706
15707         /* Check the region signature first */
15708         if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
15709                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15710                         "2619 Config region 23 has bad signature\n");
15711                         goto out;
15712         }
15713         offset += 4;
15714
15715         /* Check the data structure version */
15716         if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
15717                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15718                         "2620 Config region 23 has bad version\n");
15719                 goto out;
15720         }
15721         offset += 4;
15722
15723         /* Parse TLV entries in the region */
15724         while (offset < data_size) {
15725                 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
15726                         break;
15727                 /*
15728                  * If the TLV is not driver specific TLV or driver id is
15729                  * not linux driver id, skip the record.
15730                  */
15731                 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
15732                     (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
15733                     (rgn23_data[offset + 3] != 0)) {
15734                         offset += rgn23_data[offset + 1] * 4 + 4;
15735                         continue;
15736                 }
15737
15738                 /* Driver found a driver specific TLV in the config region */
15739                 sub_tlv_len = rgn23_data[offset + 1] * 4;
15740                 offset += 4;
15741                 tlv_offset = 0;
15742
15743                 /*
15744                  * Search for configured port state sub-TLV.
15745                  */
15746                 while ((offset < data_size) &&
15747                         (tlv_offset < sub_tlv_len)) {
15748                         if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
15749                                 offset += 4;
15750                                 tlv_offset += 4;
15751                                 break;
15752                         }
15753                         if (rgn23_data[offset] != PORT_STE_TYPE) {
15754                                 offset += rgn23_data[offset + 1] * 4 + 4;
15755                                 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
15756                                 continue;
15757                         }
15758
15759                         /* This HBA contains PORT_STE configured */
15760                         if (!rgn23_data[offset + 2])
15761                                 phba->hba_flag |= LINK_DISABLED;
15762
15763                         goto out;
15764                 }
15765         }
15766
15767 out:
15768         kfree(rgn23_data);
15769         return;
15770 }
15771
15772 /**
15773  * lpfc_wr_object - write an object to the firmware
15774  * @phba: HBA structure that indicates port to create a queue on.
15775  * @dmabuf_list: list of dmabufs to write to the port.
15776  * @size: the total byte value of the objects to write to the port.
15777  * @offset: the current offset to be used to start the transfer.
15778  *
15779  * This routine will create a wr_object mailbox command to send to the port.
15780  * the mailbox command will be constructed using the dma buffers described in
15781  * @dmabuf_list to create a list of BDEs. This routine will fill in as many
15782  * BDEs that the imbedded mailbox can support. The @offset variable will be
15783  * used to indicate the starting offset of the transfer and will also return
15784  * the offset after the write object mailbox has completed. @size is used to
15785  * determine the end of the object and whether the eof bit should be set.
15786  *
15787  * Return 0 is successful and offset will contain the the new offset to use
15788  * for the next write.
15789  * Return negative value for error cases.
15790  **/
15791 int
15792 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
15793                uint32_t size, uint32_t *offset)
15794 {
15795         struct lpfc_mbx_wr_object *wr_object;
15796         LPFC_MBOXQ_t *mbox;
15797         int rc = 0, i = 0;
15798         uint32_t shdr_status, shdr_add_status;
15799         uint32_t mbox_tmo;
15800         union lpfc_sli4_cfg_shdr *shdr;
15801         struct lpfc_dmabuf *dmabuf;
15802         uint32_t written = 0;
15803
15804         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15805         if (!mbox)
15806                 return -ENOMEM;
15807
15808         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15809                         LPFC_MBOX_OPCODE_WRITE_OBJECT,
15810                         sizeof(struct lpfc_mbx_wr_object) -
15811                         sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
15812
15813         wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
15814         wr_object->u.request.write_offset = *offset;
15815         sprintf((uint8_t *)wr_object->u.request.object_name, "/");
15816         wr_object->u.request.object_name[0] =
15817                 cpu_to_le32(wr_object->u.request.object_name[0]);
15818         bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
15819         list_for_each_entry(dmabuf, dmabuf_list, list) {
15820                 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
15821                         break;
15822                 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
15823                 wr_object->u.request.bde[i].addrHigh =
15824                         putPaddrHigh(dmabuf->phys);
15825                 if (written + SLI4_PAGE_SIZE >= size) {
15826                         wr_object->u.request.bde[i].tus.f.bdeSize =
15827                                 (size - written);
15828                         written += (size - written);
15829                         bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
15830                 } else {
15831                         wr_object->u.request.bde[i].tus.f.bdeSize =
15832                                 SLI4_PAGE_SIZE;
15833                         written += SLI4_PAGE_SIZE;
15834                 }
15835                 i++;
15836         }
15837         wr_object->u.request.bde_count = i;
15838         bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
15839         if (!phba->sli4_hba.intr_enable)
15840                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15841         else {
15842                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
15843                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
15844         }
15845         /* The IOCTL status is embedded in the mailbox subheader. */
15846         shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
15847         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15848         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15849         if (rc != MBX_TIMEOUT)
15850                 mempool_free(mbox, phba->mbox_mem_pool);
15851         if (shdr_status || shdr_add_status || rc) {
15852                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15853                                 "3025 Write Object mailbox failed with "
15854                                 "status x%x add_status x%x, mbx status x%x\n",
15855                                 shdr_status, shdr_add_status, rc);
15856                 rc = -ENXIO;
15857         } else
15858                 *offset += wr_object->u.response.actual_write_length;
15859         return rc;
15860 }
15861
15862 /**
15863  * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
15864  * @vport: pointer to vport data structure.
15865  *
15866  * This function iterate through the mailboxq and clean up all REG_LOGIN
15867  * and REG_VPI mailbox commands associated with the vport. This function
15868  * is called when driver want to restart discovery of the vport due to
15869  * a Clear Virtual Link event.
15870  **/
15871 void
15872 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
15873 {
15874         struct lpfc_hba *phba = vport->phba;
15875         LPFC_MBOXQ_t *mb, *nextmb;
15876         struct lpfc_dmabuf *mp;
15877         struct lpfc_nodelist *ndlp;
15878         struct lpfc_nodelist *act_mbx_ndlp = NULL;
15879         struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
15880         LIST_HEAD(mbox_cmd_list);
15881         uint8_t restart_loop;
15882
15883         /* Clean up internally queued mailbox commands with the vport */
15884         spin_lock_irq(&phba->hbalock);
15885         list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
15886                 if (mb->vport != vport)
15887                         continue;
15888
15889                 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
15890                         (mb->u.mb.mbxCommand != MBX_REG_VPI))
15891                         continue;
15892
15893                 list_del(&mb->list);
15894                 list_add_tail(&mb->list, &mbox_cmd_list);
15895         }
15896         /* Clean up active mailbox command with the vport */
15897         mb = phba->sli.mbox_active;
15898         if (mb && (mb->vport == vport)) {
15899                 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
15900                         (mb->u.mb.mbxCommand == MBX_REG_VPI))
15901                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15902                 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
15903                         act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
15904                         /* Put reference count for delayed processing */
15905                         act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
15906                         /* Unregister the RPI when mailbox complete */
15907                         mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
15908                 }
15909         }
15910         /* Cleanup any mailbox completions which are not yet processed */
15911         do {
15912                 restart_loop = 0;
15913                 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
15914                         /*
15915                          * If this mailox is already processed or it is
15916                          * for another vport ignore it.
15917                          */
15918                         if ((mb->vport != vport) ||
15919                                 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
15920                                 continue;
15921
15922                         if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
15923                                 (mb->u.mb.mbxCommand != MBX_REG_VPI))
15924                                 continue;
15925
15926                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15927                         if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
15928                                 ndlp = (struct lpfc_nodelist *)mb->context2;
15929                                 /* Unregister the RPI when mailbox complete */
15930                                 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
15931                                 restart_loop = 1;
15932                                 spin_unlock_irq(&phba->hbalock);
15933                                 spin_lock(shost->host_lock);
15934                                 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
15935                                 spin_unlock(shost->host_lock);
15936                                 spin_lock_irq(&phba->hbalock);
15937                                 break;
15938                         }
15939                 }
15940         } while (restart_loop);
15941
15942         spin_unlock_irq(&phba->hbalock);
15943
15944         /* Release the cleaned-up mailbox commands */
15945         while (!list_empty(&mbox_cmd_list)) {
15946                 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
15947                 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
15948                         mp = (struct lpfc_dmabuf *) (mb->context1);
15949                         if (mp) {
15950                                 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
15951                                 kfree(mp);
15952                         }
15953                         ndlp = (struct lpfc_nodelist *) mb->context2;
15954                         mb->context2 = NULL;
15955                         if (ndlp) {
15956                                 spin_lock(shost->host_lock);
15957                                 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
15958                                 spin_unlock(shost->host_lock);
15959                                 lpfc_nlp_put(ndlp);
15960                         }
15961                 }
15962                 mempool_free(mb, phba->mbox_mem_pool);
15963         }
15964
15965         /* Release the ndlp with the cleaned-up active mailbox command */
15966         if (act_mbx_ndlp) {
15967                 spin_lock(shost->host_lock);
15968                 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
15969                 spin_unlock(shost->host_lock);
15970                 lpfc_nlp_put(act_mbx_ndlp);
15971         }
15972 }
15973
15974 /**
15975  * lpfc_drain_txq - Drain the txq
15976  * @phba: Pointer to HBA context object.
15977  *
15978  * This function attempt to submit IOCBs on the txq
15979  * to the adapter.  For SLI4 adapters, the txq contains
15980  * ELS IOCBs that have been deferred because the there
15981  * are no SGLs.  This congestion can occur with large
15982  * vport counts during node discovery.
15983  **/
15984
15985 uint32_t
15986 lpfc_drain_txq(struct lpfc_hba *phba)
15987 {
15988         LIST_HEAD(completions);
15989         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
15990         struct lpfc_iocbq *piocbq = 0;
15991         unsigned long iflags = 0;
15992         char *fail_msg = NULL;
15993         struct lpfc_sglq *sglq;
15994         union lpfc_wqe wqe;
15995
15996         spin_lock_irqsave(&phba->hbalock, iflags);
15997         if (pring->txq_cnt > pring->txq_max)
15998                 pring->txq_max = pring->txq_cnt;
15999
16000         spin_unlock_irqrestore(&phba->hbalock, iflags);
16001
16002         while (pring->txq_cnt) {
16003                 spin_lock_irqsave(&phba->hbalock, iflags);
16004
16005                 piocbq = lpfc_sli_ringtx_get(phba, pring);
16006                 if (!piocbq) {
16007                         spin_unlock_irqrestore(&phba->hbalock, iflags);
16008                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16009                                 "2823 txq empty and txq_cnt is %d\n ",
16010                                 pring->txq_cnt);
16011                         break;
16012                 }
16013                 sglq = __lpfc_sli_get_sglq(phba, piocbq);
16014                 if (!sglq) {
16015                         __lpfc_sli_ringtx_put(phba, pring, piocbq);
16016                         spin_unlock_irqrestore(&phba->hbalock, iflags);
16017                         break;
16018                 }
16019
16020                 /* The xri and iocb resources secured,
16021                  * attempt to issue request
16022                  */
16023                 piocbq->sli4_lxritag = sglq->sli4_lxritag;
16024                 piocbq->sli4_xritag = sglq->sli4_xritag;
16025                 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
16026                         fail_msg = "to convert bpl to sgl";
16027                 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
16028                         fail_msg = "to convert iocb to wqe";
16029                 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
16030                         fail_msg = " - Wq is full";
16031                 else
16032                         lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
16033
16034                 if (fail_msg) {
16035                         /* Failed means we can't issue and need to cancel */
16036                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16037                                         "2822 IOCB failed %s iotag 0x%x "
16038                                         "xri 0x%x\n",
16039                                         fail_msg,
16040                                         piocbq->iotag, piocbq->sli4_xritag);
16041                         list_add_tail(&piocbq->list, &completions);
16042                 }
16043                 spin_unlock_irqrestore(&phba->hbalock, iflags);
16044         }
16045
16046         /* Cancel all the IOCBs that cannot be issued */
16047         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
16048                                 IOERR_SLI_ABORTED);
16049
16050         return pring->txq_cnt;
16051 }