7509de2f4566065a77f06c0654f510304fba8c14
[linux-2.6.git] / drivers / scsi / lpfc / lpfc_sli.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/slab.h>
27
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_cmnd.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
33 #include <scsi/fc/fc_fs.h>
34 #include <linux/aer.h>
35
36 #include "lpfc_hw4.h"
37 #include "lpfc_hw.h"
38 #include "lpfc_sli.h"
39 #include "lpfc_sli4.h"
40 #include "lpfc_nl.h"
41 #include "lpfc_disc.h"
42 #include "lpfc_scsi.h"
43 #include "lpfc.h"
44 #include "lpfc_crtn.h"
45 #include "lpfc_logmsg.h"
46 #include "lpfc_compat.h"
47 #include "lpfc_debugfs.h"
48 #include "lpfc_vport.h"
49
50 /* There are only four IOCB completion types. */
51 typedef enum _lpfc_iocb_type {
52         LPFC_UNKNOWN_IOCB,
53         LPFC_UNSOL_IOCB,
54         LPFC_SOL_IOCB,
55         LPFC_ABORT_IOCB
56 } lpfc_iocb_type;
57
58
59 /* Provide function prototypes local to this module. */
60 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
61                                   uint32_t);
62 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
63                               uint8_t *, uint32_t *);
64 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
65                                                          struct lpfc_iocbq *);
66 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
67                                       struct hbq_dmabuf *);
68 static IOCB_t *
69 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
70 {
71         return &iocbq->iocb;
72 }
73
74 /**
75  * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
76  * @q: The Work Queue to operate on.
77  * @wqe: The work Queue Entry to put on the Work queue.
78  *
79  * This routine will copy the contents of @wqe to the next available entry on
80  * the @q. This function will then ring the Work Queue Doorbell to signal the
81  * HBA to start processing the Work Queue Entry. This function returns 0 if
82  * successful. If no entries are available on @q then this function will return
83  * -ENOMEM.
84  * The caller is expected to hold the hbalock when calling this routine.
85  **/
86 static uint32_t
87 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
88 {
89         union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
90         struct lpfc_register doorbell;
91         uint32_t host_index;
92
93         /* If the host has not yet processed the next entry then we are done */
94         if (((q->host_index + 1) % q->entry_count) == q->hba_index)
95                 return -ENOMEM;
96         /* set consumption flag every once in a while */
97         if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
98                 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
99
100         lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
101
102         /* Update the host index before invoking device */
103         host_index = q->host_index;
104         q->host_index = ((q->host_index + 1) % q->entry_count);
105
106         /* Ring Doorbell */
107         doorbell.word0 = 0;
108         bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
109         bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
110         bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
111         writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
112         readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
113
114         return 0;
115 }
116
117 /**
118  * lpfc_sli4_wq_release - Updates internal hba index for WQ
119  * @q: The Work Queue to operate on.
120  * @index: The index to advance the hba index to.
121  *
122  * This routine will update the HBA index of a queue to reflect consumption of
123  * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
124  * an entry the host calls this function to update the queue's internal
125  * pointers. This routine returns the number of entries that were consumed by
126  * the HBA.
127  **/
128 static uint32_t
129 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
130 {
131         uint32_t released = 0;
132
133         if (q->hba_index == index)
134                 return 0;
135         do {
136                 q->hba_index = ((q->hba_index + 1) % q->entry_count);
137                 released++;
138         } while (q->hba_index != index);
139         return released;
140 }
141
142 /**
143  * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
144  * @q: The Mailbox Queue to operate on.
145  * @wqe: The Mailbox Queue Entry to put on the Work queue.
146  *
147  * This routine will copy the contents of @mqe to the next available entry on
148  * the @q. This function will then ring the Work Queue Doorbell to signal the
149  * HBA to start processing the Work Queue Entry. This function returns 0 if
150  * successful. If no entries are available on @q then this function will return
151  * -ENOMEM.
152  * The caller is expected to hold the hbalock when calling this routine.
153  **/
154 static uint32_t
155 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
156 {
157         struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
158         struct lpfc_register doorbell;
159         uint32_t host_index;
160
161         /* If the host has not yet processed the next entry then we are done */
162         if (((q->host_index + 1) % q->entry_count) == q->hba_index)
163                 return -ENOMEM;
164         lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
165         /* Save off the mailbox pointer for completion */
166         q->phba->mbox = (MAILBOX_t *)temp_mqe;
167
168         /* Update the host index before invoking device */
169         host_index = q->host_index;
170         q->host_index = ((q->host_index + 1) % q->entry_count);
171
172         /* Ring Doorbell */
173         doorbell.word0 = 0;
174         bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
175         bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
176         writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
177         readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
178         return 0;
179 }
180
181 /**
182  * lpfc_sli4_mq_release - Updates internal hba index for MQ
183  * @q: The Mailbox Queue to operate on.
184  *
185  * This routine will update the HBA index of a queue to reflect consumption of
186  * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
187  * an entry the host calls this function to update the queue's internal
188  * pointers. This routine returns the number of entries that were consumed by
189  * the HBA.
190  **/
191 static uint32_t
192 lpfc_sli4_mq_release(struct lpfc_queue *q)
193 {
194         /* Clear the mailbox pointer for completion */
195         q->phba->mbox = NULL;
196         q->hba_index = ((q->hba_index + 1) % q->entry_count);
197         return 1;
198 }
199
200 /**
201  * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
202  * @q: The Event Queue to get the first valid EQE from
203  *
204  * This routine will get the first valid Event Queue Entry from @q, update
205  * the queue's internal hba index, and return the EQE. If no valid EQEs are in
206  * the Queue (no more work to do), or the Queue is full of EQEs that have been
207  * processed, but not popped back to the HBA then this routine will return NULL.
208  **/
209 static struct lpfc_eqe *
210 lpfc_sli4_eq_get(struct lpfc_queue *q)
211 {
212         struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
213
214         /* If the next EQE is not valid then we are done */
215         if (!bf_get_le32(lpfc_eqe_valid, eqe))
216                 return NULL;
217         /* If the host has not yet processed the next entry then we are done */
218         if (((q->hba_index + 1) % q->entry_count) == q->host_index)
219                 return NULL;
220
221         q->hba_index = ((q->hba_index + 1) % q->entry_count);
222         return eqe;
223 }
224
225 /**
226  * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
227  * @q: The Event Queue that the host has completed processing for.
228  * @arm: Indicates whether the host wants to arms this CQ.
229  *
230  * This routine will mark all Event Queue Entries on @q, from the last
231  * known completed entry to the last entry that was processed, as completed
232  * by clearing the valid bit for each completion queue entry. Then it will
233  * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
234  * The internal host index in the @q will be updated by this routine to indicate
235  * that the host has finished processing the entries. The @arm parameter
236  * indicates that the queue should be rearmed when ringing the doorbell.
237  *
238  * This function will return the number of EQEs that were popped.
239  **/
240 uint32_t
241 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
242 {
243         uint32_t released = 0;
244         struct lpfc_eqe *temp_eqe;
245         struct lpfc_register doorbell;
246
247         /* while there are valid entries */
248         while (q->hba_index != q->host_index) {
249                 temp_eqe = q->qe[q->host_index].eqe;
250                 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
251                 released++;
252                 q->host_index = ((q->host_index + 1) % q->entry_count);
253         }
254         if (unlikely(released == 0 && !arm))
255                 return 0;
256
257         /* ring doorbell for number popped */
258         doorbell.word0 = 0;
259         if (arm) {
260                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
261                 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
262         }
263         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
264         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
265         bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
266         writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
267         /* PCI read to flush PCI pipeline on re-arming for INTx mode */
268         if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
269                 readl(q->phba->sli4_hba.EQCQDBregaddr);
270         return released;
271 }
272
273 /**
274  * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
275  * @q: The Completion Queue to get the first valid CQE from
276  *
277  * This routine will get the first valid Completion Queue Entry from @q, update
278  * the queue's internal hba index, and return the CQE. If no valid CQEs are in
279  * the Queue (no more work to do), or the Queue is full of CQEs that have been
280  * processed, but not popped back to the HBA then this routine will return NULL.
281  **/
282 static struct lpfc_cqe *
283 lpfc_sli4_cq_get(struct lpfc_queue *q)
284 {
285         struct lpfc_cqe *cqe;
286
287         /* If the next CQE is not valid then we are done */
288         if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
289                 return NULL;
290         /* If the host has not yet processed the next entry then we are done */
291         if (((q->hba_index + 1) % q->entry_count) == q->host_index)
292                 return NULL;
293
294         cqe = q->qe[q->hba_index].cqe;
295         q->hba_index = ((q->hba_index + 1) % q->entry_count);
296         return cqe;
297 }
298
299 /**
300  * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
301  * @q: The Completion Queue that the host has completed processing for.
302  * @arm: Indicates whether the host wants to arms this CQ.
303  *
304  * This routine will mark all Completion queue entries on @q, from the last
305  * known completed entry to the last entry that was processed, as completed
306  * by clearing the valid bit for each completion queue entry. Then it will
307  * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
308  * The internal host index in the @q will be updated by this routine to indicate
309  * that the host has finished processing the entries. The @arm parameter
310  * indicates that the queue should be rearmed when ringing the doorbell.
311  *
312  * This function will return the number of CQEs that were released.
313  **/
314 uint32_t
315 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
316 {
317         uint32_t released = 0;
318         struct lpfc_cqe *temp_qe;
319         struct lpfc_register doorbell;
320
321         /* while there are valid entries */
322         while (q->hba_index != q->host_index) {
323                 temp_qe = q->qe[q->host_index].cqe;
324                 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
325                 released++;
326                 q->host_index = ((q->host_index + 1) % q->entry_count);
327         }
328         if (unlikely(released == 0 && !arm))
329                 return 0;
330
331         /* ring doorbell for number popped */
332         doorbell.word0 = 0;
333         if (arm)
334                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
335         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
336         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
337         bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
338         writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
339         return released;
340 }
341
342 /**
343  * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
344  * @q: The Header Receive Queue to operate on.
345  * @wqe: The Receive Queue Entry to put on the Receive queue.
346  *
347  * This routine will copy the contents of @wqe to the next available entry on
348  * the @q. This function will then ring the Receive Queue Doorbell to signal the
349  * HBA to start processing the Receive Queue Entry. This function returns the
350  * index that the rqe was copied to if successful. If no entries are available
351  * on @q then this function will return -ENOMEM.
352  * The caller is expected to hold the hbalock when calling this routine.
353  **/
354 static int
355 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
356                  struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
357 {
358         struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
359         struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
360         struct lpfc_register doorbell;
361         int put_index = hq->host_index;
362
363         if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
364                 return -EINVAL;
365         if (hq->host_index != dq->host_index)
366                 return -EINVAL;
367         /* If the host has not yet processed the next entry then we are done */
368         if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
369                 return -EBUSY;
370         lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
371         lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
372
373         /* Update the host index to point to the next slot */
374         hq->host_index = ((hq->host_index + 1) % hq->entry_count);
375         dq->host_index = ((dq->host_index + 1) % dq->entry_count);
376
377         /* Ring The Header Receive Queue Doorbell */
378         if (!(hq->host_index % LPFC_RQ_POST_BATCH)) {
379                 doorbell.word0 = 0;
380                 bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
381                        LPFC_RQ_POST_BATCH);
382                 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
383                 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
384         }
385         return put_index;
386 }
387
388 /**
389  * lpfc_sli4_rq_release - Updates internal hba index for RQ
390  * @q: The Header Receive Queue to operate on.
391  *
392  * This routine will update the HBA index of a queue to reflect consumption of
393  * one Receive Queue Entry by the HBA. When the HBA indicates that it has
394  * consumed an entry the host calls this function to update the queue's
395  * internal pointers. This routine returns the number of entries that were
396  * consumed by the HBA.
397  **/
398 static uint32_t
399 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
400 {
401         if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
402                 return 0;
403         hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
404         dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
405         return 1;
406 }
407
408 /**
409  * lpfc_cmd_iocb - Get next command iocb entry in the ring
410  * @phba: Pointer to HBA context object.
411  * @pring: Pointer to driver SLI ring object.
412  *
413  * This function returns pointer to next command iocb entry
414  * in the command ring. The caller must hold hbalock to prevent
415  * other threads consume the next command iocb.
416  * SLI-2/SLI-3 provide different sized iocbs.
417  **/
418 static inline IOCB_t *
419 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
420 {
421         return (IOCB_t *) (((char *) pring->cmdringaddr) +
422                            pring->cmdidx * phba->iocb_cmd_size);
423 }
424
425 /**
426  * lpfc_resp_iocb - Get next response iocb entry in the ring
427  * @phba: Pointer to HBA context object.
428  * @pring: Pointer to driver SLI ring object.
429  *
430  * This function returns pointer to next response iocb entry
431  * in the response ring. The caller must hold hbalock to make sure
432  * that no other thread consume the next response iocb.
433  * SLI-2/SLI-3 provide different sized iocbs.
434  **/
435 static inline IOCB_t *
436 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
437 {
438         return (IOCB_t *) (((char *) pring->rspringaddr) +
439                            pring->rspidx * phba->iocb_rsp_size);
440 }
441
442 /**
443  * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
444  * @phba: Pointer to HBA context object.
445  *
446  * This function is called with hbalock held. This function
447  * allocates a new driver iocb object from the iocb pool. If the
448  * allocation is successful, it returns pointer to the newly
449  * allocated iocb object else it returns NULL.
450  **/
451 static struct lpfc_iocbq *
452 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
453 {
454         struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
455         struct lpfc_iocbq * iocbq = NULL;
456
457         list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
458
459         if (iocbq)
460                 phba->iocb_cnt++;
461         if (phba->iocb_cnt > phba->iocb_max)
462                 phba->iocb_max = phba->iocb_cnt;
463         return iocbq;
464 }
465
466 /**
467  * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
468  * @phba: Pointer to HBA context object.
469  * @xritag: XRI value.
470  *
471  * This function clears the sglq pointer from the array of acive
472  * sglq's. The xritag that is passed in is used to index into the
473  * array. Before the xritag can be used it needs to be adjusted
474  * by subtracting the xribase.
475  *
476  * Returns sglq ponter = success, NULL = Failure.
477  **/
478 static struct lpfc_sglq *
479 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
480 {
481         uint16_t adj_xri;
482         struct lpfc_sglq *sglq;
483         adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
484         if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
485                 return NULL;
486         sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
487         phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
488         return sglq;
489 }
490
491 /**
492  * __lpfc_get_active_sglq - Get the active sglq for this XRI.
493  * @phba: Pointer to HBA context object.
494  * @xritag: XRI value.
495  *
496  * This function returns the sglq pointer from the array of acive
497  * sglq's. The xritag that is passed in is used to index into the
498  * array. Before the xritag can be used it needs to be adjusted
499  * by subtracting the xribase.
500  *
501  * Returns sglq ponter = success, NULL = Failure.
502  **/
503 struct lpfc_sglq *
504 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
505 {
506         uint16_t adj_xri;
507         struct lpfc_sglq *sglq;
508         adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
509         if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
510                 return NULL;
511         sglq =  phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
512         return sglq;
513 }
514
515 /**
516  * __lpfc_set_rrq_active - set RRQ active bit in the ndlp's xri_bitmap.
517  * @phba: Pointer to HBA context object.
518  * @ndlp: nodelist pointer for this target.
519  * @xritag: xri used in this exchange.
520  * @rxid: Remote Exchange ID.
521  * @send_rrq: Flag used to determine if we should send rrq els cmd.
522  *
523  * This function is called with hbalock held.
524  * The active bit is set in the ndlp's active rrq xri_bitmap. Allocates an
525  * rrq struct and adds it to the active_rrq_list.
526  *
527  * returns  0 for rrq slot for this xri
528  *         < 0  Were not able to get rrq mem or invalid parameter.
529  **/
530 static int
531 __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
532                 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
533 {
534         uint16_t adj_xri;
535         struct lpfc_node_rrq *rrq;
536         int empty;
537
538         /*
539          * set the active bit even if there is no mem available.
540          */
541         adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
542         if (!ndlp)
543                 return -EINVAL;
544         if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
545                 return -EINVAL;
546         rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
547         if (rrq) {
548                 rrq->send_rrq = send_rrq;
549                 rrq->xritag = xritag;
550                 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
551                 rrq->ndlp = ndlp;
552                 rrq->nlp_DID = ndlp->nlp_DID;
553                 rrq->vport = ndlp->vport;
554                 rrq->rxid = rxid;
555                 empty = list_empty(&phba->active_rrq_list);
556                 if (phba->cfg_enable_rrq && send_rrq)
557                         /*
558                          * We need the xri before we can add this to the
559                          * phba active rrq list.
560                          */
561                         rrq->send_rrq = send_rrq;
562                 else
563                         rrq->send_rrq = 0;
564                 list_add_tail(&rrq->list, &phba->active_rrq_list);
565                 if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) {
566                         phba->hba_flag |= HBA_RRQ_ACTIVE;
567                         if (empty)
568                                 lpfc_worker_wake_up(phba);
569                 }
570                 return 0;
571         }
572         return -ENOMEM;
573 }
574
575 /**
576  * __lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
577  * @phba: Pointer to HBA context object.
578  * @xritag: xri used in this exchange.
579  * @rrq: The RRQ to be cleared.
580  *
581  * This function is called with hbalock held. This function
582  **/
583 static void
584 __lpfc_clr_rrq_active(struct lpfc_hba *phba,
585                         uint16_t xritag,
586                         struct lpfc_node_rrq *rrq)
587 {
588         uint16_t adj_xri;
589         struct lpfc_nodelist *ndlp;
590
591         ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
592
593         /* The target DID could have been swapped (cable swap)
594          * we should use the ndlp from the findnode if it is
595          * available.
596          */
597         if (!ndlp)
598                 ndlp = rrq->ndlp;
599
600         adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
601         if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
602                 rrq->send_rrq = 0;
603                 rrq->xritag = 0;
604                 rrq->rrq_stop_time = 0;
605         }
606         mempool_free(rrq, phba->rrq_pool);
607 }
608
609 /**
610  * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
611  * @phba: Pointer to HBA context object.
612  *
613  * This function is called with hbalock held. This function
614  * Checks if stop_time (ratov from setting rrq active) has
615  * been reached, if it has and the send_rrq flag is set then
616  * it will call lpfc_send_rrq. If the send_rrq flag is not set
617  * then it will just call the routine to clear the rrq and
618  * free the rrq resource.
619  * The timer is set to the next rrq that is going to expire before
620  * leaving the routine.
621  *
622  **/
623 void
624 lpfc_handle_rrq_active(struct lpfc_hba *phba)
625 {
626         struct lpfc_node_rrq *rrq;
627         struct lpfc_node_rrq *nextrrq;
628         unsigned long next_time;
629         unsigned long iflags;
630
631         spin_lock_irqsave(&phba->hbalock, iflags);
632         phba->hba_flag &= ~HBA_RRQ_ACTIVE;
633         next_time = jiffies + HZ * (phba->fc_ratov + 1);
634         list_for_each_entry_safe(rrq, nextrrq,
635                         &phba->active_rrq_list, list) {
636                 if (time_after(jiffies, rrq->rrq_stop_time)) {
637                         list_del(&rrq->list);
638                         if (!rrq->send_rrq)
639                                 /* this call will free the rrq */
640                                 __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
641                         else {
642                         /* if we send the rrq then the completion handler
643                          *  will clear the bit in the xribitmap.
644                          */
645                                 spin_unlock_irqrestore(&phba->hbalock, iflags);
646                                 if (lpfc_send_rrq(phba, rrq)) {
647                                         lpfc_clr_rrq_active(phba, rrq->xritag,
648                                                                  rrq);
649                                 }
650                                 spin_lock_irqsave(&phba->hbalock, iflags);
651                         }
652                 } else if  (time_before(rrq->rrq_stop_time, next_time))
653                         next_time = rrq->rrq_stop_time;
654         }
655         spin_unlock_irqrestore(&phba->hbalock, iflags);
656         if (!list_empty(&phba->active_rrq_list))
657                 mod_timer(&phba->rrq_tmr, next_time);
658 }
659
660 /**
661  * lpfc_get_active_rrq - Get the active RRQ for this exchange.
662  * @vport: Pointer to vport context object.
663  * @xri: The xri used in the exchange.
664  * @did: The targets DID for this exchange.
665  *
666  * returns NULL = rrq not found in the phba->active_rrq_list.
667  *         rrq = rrq for this xri and target.
668  **/
669 struct lpfc_node_rrq *
670 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
671 {
672         struct lpfc_hba *phba = vport->phba;
673         struct lpfc_node_rrq *rrq;
674         struct lpfc_node_rrq *nextrrq;
675         unsigned long iflags;
676
677         if (phba->sli_rev != LPFC_SLI_REV4)
678                 return NULL;
679         spin_lock_irqsave(&phba->hbalock, iflags);
680         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
681                 if (rrq->vport == vport && rrq->xritag == xri &&
682                                 rrq->nlp_DID == did){
683                         list_del(&rrq->list);
684                         spin_unlock_irqrestore(&phba->hbalock, iflags);
685                         return rrq;
686                 }
687         }
688         spin_unlock_irqrestore(&phba->hbalock, iflags);
689         return NULL;
690 }
691
692 /**
693  * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
694  * @vport: Pointer to vport context object.
695  *
696  * Remove all active RRQs for this vport from the phba->active_rrq_list and
697  * clear the rrq.
698  **/
699 void
700 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport)
701
702 {
703         struct lpfc_hba *phba = vport->phba;
704         struct lpfc_node_rrq *rrq;
705         struct lpfc_node_rrq *nextrrq;
706         unsigned long iflags;
707
708         if (phba->sli_rev != LPFC_SLI_REV4)
709                 return;
710         spin_lock_irqsave(&phba->hbalock, iflags);
711         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
712                 if (rrq->vport == vport) {
713                         list_del(&rrq->list);
714                         __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
715                 }
716         }
717         spin_unlock_irqrestore(&phba->hbalock, iflags);
718 }
719
720 /**
721  * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list.
722  * @phba: Pointer to HBA context object.
723  *
724  * Remove all rrqs from the phba->active_rrq_list and free them by
725  * calling __lpfc_clr_active_rrq
726  *
727  **/
728 void
729 lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
730 {
731         struct lpfc_node_rrq *rrq;
732         struct lpfc_node_rrq *nextrrq;
733         unsigned long next_time;
734         unsigned long iflags;
735
736         if (phba->sli_rev != LPFC_SLI_REV4)
737                 return;
738         spin_lock_irqsave(&phba->hbalock, iflags);
739         phba->hba_flag &= ~HBA_RRQ_ACTIVE;
740         next_time = jiffies + HZ * (phba->fc_ratov * 2);
741         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
742                 list_del(&rrq->list);
743                 __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
744         }
745         spin_unlock_irqrestore(&phba->hbalock, iflags);
746         if (!list_empty(&phba->active_rrq_list))
747                 mod_timer(&phba->rrq_tmr, next_time);
748 }
749
750
751 /**
752  * __lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
753  * @phba: Pointer to HBA context object.
754  * @ndlp: Targets nodelist pointer for this exchange.
755  * @xritag the xri in the bitmap to test.
756  *
757  * This function is called with hbalock held. This function
758  * returns 0 = rrq not active for this xri
759  *         1 = rrq is valid for this xri.
760  **/
761 static int
762 __lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
763                         uint16_t  xritag)
764 {
765         uint16_t adj_xri;
766
767         adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
768         if (!ndlp)
769                 return 0;
770         if (test_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
771                         return 1;
772         else
773                 return 0;
774 }
775
776 /**
777  * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
778  * @phba: Pointer to HBA context object.
779  * @ndlp: nodelist pointer for this target.
780  * @xritag: xri used in this exchange.
781  * @rxid: Remote Exchange ID.
782  * @send_rrq: Flag used to determine if we should send rrq els cmd.
783  *
784  * This function takes the hbalock.
785  * The active bit is always set in the active rrq xri_bitmap even
786  * if there is no slot avaiable for the other rrq information.
787  *
788  * returns 0 rrq actived for this xri
789  *         < 0 No memory or invalid ndlp.
790  **/
791 int
792 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
793                         uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
794 {
795         int ret;
796         unsigned long iflags;
797
798         spin_lock_irqsave(&phba->hbalock, iflags);
799         ret = __lpfc_set_rrq_active(phba, ndlp, xritag, rxid, send_rrq);
800         spin_unlock_irqrestore(&phba->hbalock, iflags);
801         return ret;
802 }
803
804 /**
805  * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
806  * @phba: Pointer to HBA context object.
807  * @xritag: xri used in this exchange.
808  * @rrq: The RRQ to be cleared.
809  *
810  * This function is takes the hbalock.
811  **/
812 void
813 lpfc_clr_rrq_active(struct lpfc_hba *phba,
814                         uint16_t xritag,
815                         struct lpfc_node_rrq *rrq)
816 {
817         unsigned long iflags;
818
819         spin_lock_irqsave(&phba->hbalock, iflags);
820         __lpfc_clr_rrq_active(phba, xritag, rrq);
821         spin_unlock_irqrestore(&phba->hbalock, iflags);
822         return;
823 }
824
825
826
827 /**
828  * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
829  * @phba: Pointer to HBA context object.
830  * @ndlp: Targets nodelist pointer for this exchange.
831  * @xritag the xri in the bitmap to test.
832  *
833  * This function takes the hbalock.
834  * returns 0 = rrq not active for this xri
835  *         1 = rrq is valid for this xri.
836  **/
837 int
838 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
839                         uint16_t  xritag)
840 {
841         int ret;
842         unsigned long iflags;
843
844         spin_lock_irqsave(&phba->hbalock, iflags);
845         ret = __lpfc_test_rrq_active(phba, ndlp, xritag);
846         spin_unlock_irqrestore(&phba->hbalock, iflags);
847         return ret;
848 }
849
850 /**
851  * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
852  * @phba: Pointer to HBA context object.
853  * @piocb: Pointer to the iocbq.
854  *
855  * This function is called with hbalock held. This function
856  * Gets a new driver sglq object from the sglq list. If the
857  * list is not empty then it is successful, it returns pointer to the newly
858  * allocated sglq object else it returns NULL.
859  **/
860 static struct lpfc_sglq *
861 __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
862 {
863         struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
864         struct lpfc_sglq *sglq = NULL;
865         struct lpfc_sglq *start_sglq = NULL;
866         uint16_t adj_xri;
867         struct lpfc_scsi_buf *lpfc_cmd;
868         struct lpfc_nodelist *ndlp;
869         int found = 0;
870
871         if (piocbq->iocb_flag &  LPFC_IO_FCP) {
872                 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
873                 ndlp = lpfc_cmd->rdata->pnode;
874         } else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
875                         !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
876                 ndlp = piocbq->context_un.ndlp;
877         else
878                 ndlp = piocbq->context1;
879
880         list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
881         start_sglq = sglq;
882         while (!found) {
883                 if (!sglq)
884                         return NULL;
885                 adj_xri = sglq->sli4_xritag -
886                                 phba->sli4_hba.max_cfg_param.xri_base;
887                 if (__lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
888                         /* This xri has an rrq outstanding for this DID.
889                          * put it back in the list and get another xri.
890                          */
891                         list_add_tail(&sglq->list, lpfc_sgl_list);
892                         sglq = NULL;
893                         list_remove_head(lpfc_sgl_list, sglq,
894                                                 struct lpfc_sglq, list);
895                         if (sglq == start_sglq) {
896                                 sglq = NULL;
897                                 break;
898                         } else
899                                 continue;
900                 }
901                 sglq->ndlp = ndlp;
902                 found = 1;
903                 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
904                 sglq->state = SGL_ALLOCATED;
905         }
906         return sglq;
907 }
908
909 /**
910  * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
911  * @phba: Pointer to HBA context object.
912  *
913  * This function is called with no lock held. This function
914  * allocates a new driver iocb object from the iocb pool. If the
915  * allocation is successful, it returns pointer to the newly
916  * allocated iocb object else it returns NULL.
917  **/
918 struct lpfc_iocbq *
919 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
920 {
921         struct lpfc_iocbq * iocbq = NULL;
922         unsigned long iflags;
923
924         spin_lock_irqsave(&phba->hbalock, iflags);
925         iocbq = __lpfc_sli_get_iocbq(phba);
926         spin_unlock_irqrestore(&phba->hbalock, iflags);
927         return iocbq;
928 }
929
930 /**
931  * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
932  * @phba: Pointer to HBA context object.
933  * @iocbq: Pointer to driver iocb object.
934  *
935  * This function is called with hbalock held to release driver
936  * iocb object to the iocb pool. The iotag in the iocb object
937  * does not change for each use of the iocb object. This function
938  * clears all other fields of the iocb object when it is freed.
939  * The sqlq structure that holds the xritag and phys and virtual
940  * mappings for the scatter gather list is retrieved from the
941  * active array of sglq. The get of the sglq pointer also clears
942  * the entry in the array. If the status of the IO indiactes that
943  * this IO was aborted then the sglq entry it put on the
944  * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
945  * IO has good status or fails for any other reason then the sglq
946  * entry is added to the free list (lpfc_sgl_list).
947  **/
948 static void
949 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
950 {
951         struct lpfc_sglq *sglq;
952         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
953         unsigned long iflag = 0;
954         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
955
956         if (iocbq->sli4_xritag == NO_XRI)
957                 sglq = NULL;
958         else
959                 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
960         if (sglq)  {
961                 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
962                         (sglq->state != SGL_XRI_ABORTED)) {
963                         spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
964                                         iflag);
965                         list_add(&sglq->list,
966                                 &phba->sli4_hba.lpfc_abts_els_sgl_list);
967                         spin_unlock_irqrestore(
968                                 &phba->sli4_hba.abts_sgl_list_lock, iflag);
969                 } else {
970                         sglq->state = SGL_FREED;
971                         sglq->ndlp = NULL;
972                         list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
973
974                         /* Check if TXQ queue needs to be serviced */
975                         if (pring->txq_cnt)
976                                 lpfc_worker_wake_up(phba);
977                 }
978         }
979
980
981         /*
982          * Clean all volatile data fields, preserve iotag and node struct.
983          */
984         memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
985         iocbq->sli4_xritag = NO_XRI;
986         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
987 }
988
989
990 /**
991  * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
992  * @phba: Pointer to HBA context object.
993  * @iocbq: Pointer to driver iocb object.
994  *
995  * This function is called with hbalock held to release driver
996  * iocb object to the iocb pool. The iotag in the iocb object
997  * does not change for each use of the iocb object. This function
998  * clears all other fields of the iocb object when it is freed.
999  **/
1000 static void
1001 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1002 {
1003         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1004
1005         /*
1006          * Clean all volatile data fields, preserve iotag and node struct.
1007          */
1008         memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1009         iocbq->sli4_xritag = NO_XRI;
1010         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1011 }
1012
1013 /**
1014  * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1015  * @phba: Pointer to HBA context object.
1016  * @iocbq: Pointer to driver iocb object.
1017  *
1018  * This function is called with hbalock held to release driver
1019  * iocb object to the iocb pool. The iotag in the iocb object
1020  * does not change for each use of the iocb object. This function
1021  * clears all other fields of the iocb object when it is freed.
1022  **/
1023 static void
1024 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1025 {
1026         phba->__lpfc_sli_release_iocbq(phba, iocbq);
1027         phba->iocb_cnt--;
1028 }
1029
1030 /**
1031  * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1032  * @phba: Pointer to HBA context object.
1033  * @iocbq: Pointer to driver iocb object.
1034  *
1035  * This function is called with no lock held to release the iocb to
1036  * iocb pool.
1037  **/
1038 void
1039 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1040 {
1041         unsigned long iflags;
1042
1043         /*
1044          * Clean all volatile data fields, preserve iotag and node struct.
1045          */
1046         spin_lock_irqsave(&phba->hbalock, iflags);
1047         __lpfc_sli_release_iocbq(phba, iocbq);
1048         spin_unlock_irqrestore(&phba->hbalock, iflags);
1049 }
1050
1051 /**
1052  * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1053  * @phba: Pointer to HBA context object.
1054  * @iocblist: List of IOCBs.
1055  * @ulpstatus: ULP status in IOCB command field.
1056  * @ulpWord4: ULP word-4 in IOCB command field.
1057  *
1058  * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1059  * on the list by invoking the complete callback function associated with the
1060  * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1061  * fields.
1062  **/
1063 void
1064 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1065                       uint32_t ulpstatus, uint32_t ulpWord4)
1066 {
1067         struct lpfc_iocbq *piocb;
1068
1069         while (!list_empty(iocblist)) {
1070                 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1071
1072                 if (!piocb->iocb_cmpl)
1073                         lpfc_sli_release_iocbq(phba, piocb);
1074                 else {
1075                         piocb->iocb.ulpStatus = ulpstatus;
1076                         piocb->iocb.un.ulpWord[4] = ulpWord4;
1077                         (piocb->iocb_cmpl) (phba, piocb, piocb);
1078                 }
1079         }
1080         return;
1081 }
1082
1083 /**
1084  * lpfc_sli_iocb_cmd_type - Get the iocb type
1085  * @iocb_cmnd: iocb command code.
1086  *
1087  * This function is called by ring event handler function to get the iocb type.
1088  * This function translates the iocb command to an iocb command type used to
1089  * decide the final disposition of each completed IOCB.
1090  * The function returns
1091  * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1092  * LPFC_SOL_IOCB     if it is a solicited iocb completion
1093  * LPFC_ABORT_IOCB   if it is an abort iocb
1094  * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
1095  *
1096  * The caller is not required to hold any lock.
1097  **/
1098 static lpfc_iocb_type
1099 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1100 {
1101         lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1102
1103         if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1104                 return 0;
1105
1106         switch (iocb_cmnd) {
1107         case CMD_XMIT_SEQUENCE_CR:
1108         case CMD_XMIT_SEQUENCE_CX:
1109         case CMD_XMIT_BCAST_CN:
1110         case CMD_XMIT_BCAST_CX:
1111         case CMD_ELS_REQUEST_CR:
1112         case CMD_ELS_REQUEST_CX:
1113         case CMD_CREATE_XRI_CR:
1114         case CMD_CREATE_XRI_CX:
1115         case CMD_GET_RPI_CN:
1116         case CMD_XMIT_ELS_RSP_CX:
1117         case CMD_GET_RPI_CR:
1118         case CMD_FCP_IWRITE_CR:
1119         case CMD_FCP_IWRITE_CX:
1120         case CMD_FCP_IREAD_CR:
1121         case CMD_FCP_IREAD_CX:
1122         case CMD_FCP_ICMND_CR:
1123         case CMD_FCP_ICMND_CX:
1124         case CMD_FCP_TSEND_CX:
1125         case CMD_FCP_TRSP_CX:
1126         case CMD_FCP_TRECEIVE_CX:
1127         case CMD_FCP_AUTO_TRSP_CX:
1128         case CMD_ADAPTER_MSG:
1129         case CMD_ADAPTER_DUMP:
1130         case CMD_XMIT_SEQUENCE64_CR:
1131         case CMD_XMIT_SEQUENCE64_CX:
1132         case CMD_XMIT_BCAST64_CN:
1133         case CMD_XMIT_BCAST64_CX:
1134         case CMD_ELS_REQUEST64_CR:
1135         case CMD_ELS_REQUEST64_CX:
1136         case CMD_FCP_IWRITE64_CR:
1137         case CMD_FCP_IWRITE64_CX:
1138         case CMD_FCP_IREAD64_CR:
1139         case CMD_FCP_IREAD64_CX:
1140         case CMD_FCP_ICMND64_CR:
1141         case CMD_FCP_ICMND64_CX:
1142         case CMD_FCP_TSEND64_CX:
1143         case CMD_FCP_TRSP64_CX:
1144         case CMD_FCP_TRECEIVE64_CX:
1145         case CMD_GEN_REQUEST64_CR:
1146         case CMD_GEN_REQUEST64_CX:
1147         case CMD_XMIT_ELS_RSP64_CX:
1148         case DSSCMD_IWRITE64_CR:
1149         case DSSCMD_IWRITE64_CX:
1150         case DSSCMD_IREAD64_CR:
1151         case DSSCMD_IREAD64_CX:
1152                 type = LPFC_SOL_IOCB;
1153                 break;
1154         case CMD_ABORT_XRI_CN:
1155         case CMD_ABORT_XRI_CX:
1156         case CMD_CLOSE_XRI_CN:
1157         case CMD_CLOSE_XRI_CX:
1158         case CMD_XRI_ABORTED_CX:
1159         case CMD_ABORT_MXRI64_CN:
1160         case CMD_XMIT_BLS_RSP64_CX:
1161                 type = LPFC_ABORT_IOCB;
1162                 break;
1163         case CMD_RCV_SEQUENCE_CX:
1164         case CMD_RCV_ELS_REQ_CX:
1165         case CMD_RCV_SEQUENCE64_CX:
1166         case CMD_RCV_ELS_REQ64_CX:
1167         case CMD_ASYNC_STATUS:
1168         case CMD_IOCB_RCV_SEQ64_CX:
1169         case CMD_IOCB_RCV_ELS64_CX:
1170         case CMD_IOCB_RCV_CONT64_CX:
1171         case CMD_IOCB_RET_XRI64_CX:
1172                 type = LPFC_UNSOL_IOCB;
1173                 break;
1174         case CMD_IOCB_XMIT_MSEQ64_CR:
1175         case CMD_IOCB_XMIT_MSEQ64_CX:
1176         case CMD_IOCB_RCV_SEQ_LIST64_CX:
1177         case CMD_IOCB_RCV_ELS_LIST64_CX:
1178         case CMD_IOCB_CLOSE_EXTENDED_CN:
1179         case CMD_IOCB_ABORT_EXTENDED_CN:
1180         case CMD_IOCB_RET_HBQE64_CN:
1181         case CMD_IOCB_FCP_IBIDIR64_CR:
1182         case CMD_IOCB_FCP_IBIDIR64_CX:
1183         case CMD_IOCB_FCP_ITASKMGT64_CX:
1184         case CMD_IOCB_LOGENTRY_CN:
1185         case CMD_IOCB_LOGENTRY_ASYNC_CN:
1186                 printk("%s - Unhandled SLI-3 Command x%x\n",
1187                                 __func__, iocb_cmnd);
1188                 type = LPFC_UNKNOWN_IOCB;
1189                 break;
1190         default:
1191                 type = LPFC_UNKNOWN_IOCB;
1192                 break;
1193         }
1194
1195         return type;
1196 }
1197
1198 /**
1199  * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1200  * @phba: Pointer to HBA context object.
1201  *
1202  * This function is called from SLI initialization code
1203  * to configure every ring of the HBA's SLI interface. The
1204  * caller is not required to hold any lock. This function issues
1205  * a config_ring mailbox command for each ring.
1206  * This function returns zero if successful else returns a negative
1207  * error code.
1208  **/
1209 static int
1210 lpfc_sli_ring_map(struct lpfc_hba *phba)
1211 {
1212         struct lpfc_sli *psli = &phba->sli;
1213         LPFC_MBOXQ_t *pmb;
1214         MAILBOX_t *pmbox;
1215         int i, rc, ret = 0;
1216
1217         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1218         if (!pmb)
1219                 return -ENOMEM;
1220         pmbox = &pmb->u.mb;
1221         phba->link_state = LPFC_INIT_MBX_CMDS;
1222         for (i = 0; i < psli->num_rings; i++) {
1223                 lpfc_config_ring(phba, i, pmb);
1224                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1225                 if (rc != MBX_SUCCESS) {
1226                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1227                                         "0446 Adapter failed to init (%d), "
1228                                         "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1229                                         "ring %d\n",
1230                                         rc, pmbox->mbxCommand,
1231                                         pmbox->mbxStatus, i);
1232                         phba->link_state = LPFC_HBA_ERROR;
1233                         ret = -ENXIO;
1234                         break;
1235                 }
1236         }
1237         mempool_free(pmb, phba->mbox_mem_pool);
1238         return ret;
1239 }
1240
1241 /**
1242  * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1243  * @phba: Pointer to HBA context object.
1244  * @pring: Pointer to driver SLI ring object.
1245  * @piocb: Pointer to the driver iocb object.
1246  *
1247  * This function is called with hbalock held. The function adds the
1248  * new iocb to txcmplq of the given ring. This function always returns
1249  * 0. If this function is called for ELS ring, this function checks if
1250  * there is a vport associated with the ELS command. This function also
1251  * starts els_tmofunc timer if this is an ELS command.
1252  **/
1253 static int
1254 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1255                         struct lpfc_iocbq *piocb)
1256 {
1257         list_add_tail(&piocb->list, &pring->txcmplq);
1258         piocb->iocb_flag |= LPFC_IO_ON_Q;
1259         pring->txcmplq_cnt++;
1260         if (pring->txcmplq_cnt > pring->txcmplq_max)
1261                 pring->txcmplq_max = pring->txcmplq_cnt;
1262
1263         if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1264            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1265            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1266                 if (!piocb->vport)
1267                         BUG();
1268                 else
1269                         mod_timer(&piocb->vport->els_tmofunc,
1270                                   jiffies + HZ * (phba->fc_ratov << 1));
1271         }
1272
1273
1274         return 0;
1275 }
1276
1277 /**
1278  * lpfc_sli_ringtx_get - Get first element of the txq
1279  * @phba: Pointer to HBA context object.
1280  * @pring: Pointer to driver SLI ring object.
1281  *
1282  * This function is called with hbalock held to get next
1283  * iocb in txq of the given ring. If there is any iocb in
1284  * the txq, the function returns first iocb in the list after
1285  * removing the iocb from the list, else it returns NULL.
1286  **/
1287 struct lpfc_iocbq *
1288 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1289 {
1290         struct lpfc_iocbq *cmd_iocb;
1291
1292         list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1293         if (cmd_iocb != NULL)
1294                 pring->txq_cnt--;
1295         return cmd_iocb;
1296 }
1297
1298 /**
1299  * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1300  * @phba: Pointer to HBA context object.
1301  * @pring: Pointer to driver SLI ring object.
1302  *
1303  * This function is called with hbalock held and the caller must post the
1304  * iocb without releasing the lock. If the caller releases the lock,
1305  * iocb slot returned by the function is not guaranteed to be available.
1306  * The function returns pointer to the next available iocb slot if there
1307  * is available slot in the ring, else it returns NULL.
1308  * If the get index of the ring is ahead of the put index, the function
1309  * will post an error attention event to the worker thread to take the
1310  * HBA to offline state.
1311  **/
1312 static IOCB_t *
1313 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1314 {
1315         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1316         uint32_t  max_cmd_idx = pring->numCiocb;
1317         if ((pring->next_cmdidx == pring->cmdidx) &&
1318            (++pring->next_cmdidx >= max_cmd_idx))
1319                 pring->next_cmdidx = 0;
1320
1321         if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
1322
1323                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1324
1325                 if (unlikely(pring->local_getidx >= max_cmd_idx)) {
1326                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1327                                         "0315 Ring %d issue: portCmdGet %d "
1328                                         "is bigger than cmd ring %d\n",
1329                                         pring->ringno,
1330                                         pring->local_getidx, max_cmd_idx);
1331
1332                         phba->link_state = LPFC_HBA_ERROR;
1333                         /*
1334                          * All error attention handlers are posted to
1335                          * worker thread
1336                          */
1337                         phba->work_ha |= HA_ERATT;
1338                         phba->work_hs = HS_FFER3;
1339
1340                         lpfc_worker_wake_up(phba);
1341
1342                         return NULL;
1343                 }
1344
1345                 if (pring->local_getidx == pring->next_cmdidx)
1346                         return NULL;
1347         }
1348
1349         return lpfc_cmd_iocb(phba, pring);
1350 }
1351
1352 /**
1353  * lpfc_sli_next_iotag - Get an iotag for the iocb
1354  * @phba: Pointer to HBA context object.
1355  * @iocbq: Pointer to driver iocb object.
1356  *
1357  * This function gets an iotag for the iocb. If there is no unused iotag and
1358  * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1359  * array and assigns a new iotag.
1360  * The function returns the allocated iotag if successful, else returns zero.
1361  * Zero is not a valid iotag.
1362  * The caller is not required to hold any lock.
1363  **/
1364 uint16_t
1365 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1366 {
1367         struct lpfc_iocbq **new_arr;
1368         struct lpfc_iocbq **old_arr;
1369         size_t new_len;
1370         struct lpfc_sli *psli = &phba->sli;
1371         uint16_t iotag;
1372
1373         spin_lock_irq(&phba->hbalock);
1374         iotag = psli->last_iotag;
1375         if(++iotag < psli->iocbq_lookup_len) {
1376                 psli->last_iotag = iotag;
1377                 psli->iocbq_lookup[iotag] = iocbq;
1378                 spin_unlock_irq(&phba->hbalock);
1379                 iocbq->iotag = iotag;
1380                 return iotag;
1381         } else if (psli->iocbq_lookup_len < (0xffff
1382                                            - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1383                 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1384                 spin_unlock_irq(&phba->hbalock);
1385                 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
1386                                   GFP_KERNEL);
1387                 if (new_arr) {
1388                         spin_lock_irq(&phba->hbalock);
1389                         old_arr = psli->iocbq_lookup;
1390                         if (new_len <= psli->iocbq_lookup_len) {
1391                                 /* highly unprobable case */
1392                                 kfree(new_arr);
1393                                 iotag = psli->last_iotag;
1394                                 if(++iotag < psli->iocbq_lookup_len) {
1395                                         psli->last_iotag = iotag;
1396                                         psli->iocbq_lookup[iotag] = iocbq;
1397                                         spin_unlock_irq(&phba->hbalock);
1398                                         iocbq->iotag = iotag;
1399                                         return iotag;
1400                                 }
1401                                 spin_unlock_irq(&phba->hbalock);
1402                                 return 0;
1403                         }
1404                         if (psli->iocbq_lookup)
1405                                 memcpy(new_arr, old_arr,
1406                                        ((psli->last_iotag  + 1) *
1407                                         sizeof (struct lpfc_iocbq *)));
1408                         psli->iocbq_lookup = new_arr;
1409                         psli->iocbq_lookup_len = new_len;
1410                         psli->last_iotag = iotag;
1411                         psli->iocbq_lookup[iotag] = iocbq;
1412                         spin_unlock_irq(&phba->hbalock);
1413                         iocbq->iotag = iotag;
1414                         kfree(old_arr);
1415                         return iotag;
1416                 }
1417         } else
1418                 spin_unlock_irq(&phba->hbalock);
1419
1420         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1421                         "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1422                         psli->last_iotag);
1423
1424         return 0;
1425 }
1426
1427 /**
1428  * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1429  * @phba: Pointer to HBA context object.
1430  * @pring: Pointer to driver SLI ring object.
1431  * @iocb: Pointer to iocb slot in the ring.
1432  * @nextiocb: Pointer to driver iocb object which need to be
1433  *            posted to firmware.
1434  *
1435  * This function is called with hbalock held to post a new iocb to
1436  * the firmware. This function copies the new iocb to ring iocb slot and
1437  * updates the ring pointers. It adds the new iocb to txcmplq if there is
1438  * a completion call back for this iocb else the function will free the
1439  * iocb object.
1440  **/
1441 static void
1442 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1443                 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1444 {
1445         /*
1446          * Set up an iotag
1447          */
1448         nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1449
1450
1451         if (pring->ringno == LPFC_ELS_RING) {
1452                 lpfc_debugfs_slow_ring_trc(phba,
1453                         "IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
1454                         *(((uint32_t *) &nextiocb->iocb) + 4),
1455                         *(((uint32_t *) &nextiocb->iocb) + 6),
1456                         *(((uint32_t *) &nextiocb->iocb) + 7));
1457         }
1458
1459         /*
1460          * Issue iocb command to adapter
1461          */
1462         lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1463         wmb();
1464         pring->stats.iocb_cmd++;
1465
1466         /*
1467          * If there is no completion routine to call, we can release the
1468          * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1469          * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1470          */
1471         if (nextiocb->iocb_cmpl)
1472                 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1473         else
1474                 __lpfc_sli_release_iocbq(phba, nextiocb);
1475
1476         /*
1477          * Let the HBA know what IOCB slot will be the next one the
1478          * driver will put a command into.
1479          */
1480         pring->cmdidx = pring->next_cmdidx;
1481         writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1482 }
1483
1484 /**
1485  * lpfc_sli_update_full_ring - Update the chip attention register
1486  * @phba: Pointer to HBA context object.
1487  * @pring: Pointer to driver SLI ring object.
1488  *
1489  * The caller is not required to hold any lock for calling this function.
1490  * This function updates the chip attention bits for the ring to inform firmware
1491  * that there are pending work to be done for this ring and requests an
1492  * interrupt when there is space available in the ring. This function is
1493  * called when the driver is unable to post more iocbs to the ring due
1494  * to unavailability of space in the ring.
1495  **/
1496 static void
1497 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1498 {
1499         int ringno = pring->ringno;
1500
1501         pring->flag |= LPFC_CALL_RING_AVAILABLE;
1502
1503         wmb();
1504
1505         /*
1506          * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1507          * The HBA will tell us when an IOCB entry is available.
1508          */
1509         writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1510         readl(phba->CAregaddr); /* flush */
1511
1512         pring->stats.iocb_cmd_full++;
1513 }
1514
1515 /**
1516  * lpfc_sli_update_ring - Update chip attention register
1517  * @phba: Pointer to HBA context object.
1518  * @pring: Pointer to driver SLI ring object.
1519  *
1520  * This function updates the chip attention register bit for the
1521  * given ring to inform HBA that there is more work to be done
1522  * in this ring. The caller is not required to hold any lock.
1523  **/
1524 static void
1525 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1526 {
1527         int ringno = pring->ringno;
1528
1529         /*
1530          * Tell the HBA that there is work to do in this ring.
1531          */
1532         if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1533                 wmb();
1534                 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1535                 readl(phba->CAregaddr); /* flush */
1536         }
1537 }
1538
1539 /**
1540  * lpfc_sli_resume_iocb - Process iocbs in the txq
1541  * @phba: Pointer to HBA context object.
1542  * @pring: Pointer to driver SLI ring object.
1543  *
1544  * This function is called with hbalock held to post pending iocbs
1545  * in the txq to the firmware. This function is called when driver
1546  * detects space available in the ring.
1547  **/
1548 static void
1549 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1550 {
1551         IOCB_t *iocb;
1552         struct lpfc_iocbq *nextiocb;
1553
1554         /*
1555          * Check to see if:
1556          *  (a) there is anything on the txq to send
1557          *  (b) link is up
1558          *  (c) link attention events can be processed (fcp ring only)
1559          *  (d) IOCB processing is not blocked by the outstanding mbox command.
1560          */
1561         if (pring->txq_cnt &&
1562             lpfc_is_link_up(phba) &&
1563             (pring->ringno != phba->sli.fcp_ring ||
1564              phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1565
1566                 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1567                        (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1568                         lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1569
1570                 if (iocb)
1571                         lpfc_sli_update_ring(phba, pring);
1572                 else
1573                         lpfc_sli_update_full_ring(phba, pring);
1574         }
1575
1576         return;
1577 }
1578
1579 /**
1580  * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1581  * @phba: Pointer to HBA context object.
1582  * @hbqno: HBQ number.
1583  *
1584  * This function is called with hbalock held to get the next
1585  * available slot for the given HBQ. If there is free slot
1586  * available for the HBQ it will return pointer to the next available
1587  * HBQ entry else it will return NULL.
1588  **/
1589 static struct lpfc_hbq_entry *
1590 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1591 {
1592         struct hbq_s *hbqp = &phba->hbqs[hbqno];
1593
1594         if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1595             ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1596                 hbqp->next_hbqPutIdx = 0;
1597
1598         if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1599                 uint32_t raw_index = phba->hbq_get[hbqno];
1600                 uint32_t getidx = le32_to_cpu(raw_index);
1601
1602                 hbqp->local_hbqGetIdx = getidx;
1603
1604                 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1605                         lpfc_printf_log(phba, KERN_ERR,
1606                                         LOG_SLI | LOG_VPORT,
1607                                         "1802 HBQ %d: local_hbqGetIdx "
1608                                         "%u is > than hbqp->entry_count %u\n",
1609                                         hbqno, hbqp->local_hbqGetIdx,
1610                                         hbqp->entry_count);
1611
1612                         phba->link_state = LPFC_HBA_ERROR;
1613                         return NULL;
1614                 }
1615
1616                 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1617                         return NULL;
1618         }
1619
1620         return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1621                         hbqp->hbqPutIdx;
1622 }
1623
1624 /**
1625  * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1626  * @phba: Pointer to HBA context object.
1627  *
1628  * This function is called with no lock held to free all the
1629  * hbq buffers while uninitializing the SLI interface. It also
1630  * frees the HBQ buffers returned by the firmware but not yet
1631  * processed by the upper layers.
1632  **/
1633 void
1634 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1635 {
1636         struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1637         struct hbq_dmabuf *hbq_buf;
1638         unsigned long flags;
1639         int i, hbq_count;
1640         uint32_t hbqno;
1641
1642         hbq_count = lpfc_sli_hbq_count();
1643         /* Return all memory used by all HBQs */
1644         spin_lock_irqsave(&phba->hbalock, flags);
1645         for (i = 0; i < hbq_count; ++i) {
1646                 list_for_each_entry_safe(dmabuf, next_dmabuf,
1647                                 &phba->hbqs[i].hbq_buffer_list, list) {
1648                         hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1649                         list_del(&hbq_buf->dbuf.list);
1650                         (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1651                 }
1652                 phba->hbqs[i].buffer_count = 0;
1653         }
1654         /* Return all HBQ buffer that are in-fly */
1655         list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
1656                                  list) {
1657                 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1658                 list_del(&hbq_buf->dbuf.list);
1659                 if (hbq_buf->tag == -1) {
1660                         (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1661                                 (phba, hbq_buf);
1662                 } else {
1663                         hbqno = hbq_buf->tag >> 16;
1664                         if (hbqno >= LPFC_MAX_HBQS)
1665                                 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1666                                         (phba, hbq_buf);
1667                         else
1668                                 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1669                                         hbq_buf);
1670                 }
1671         }
1672
1673         /* Mark the HBQs not in use */
1674         phba->hbq_in_use = 0;
1675         spin_unlock_irqrestore(&phba->hbalock, flags);
1676 }
1677
1678 /**
1679  * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1680  * @phba: Pointer to HBA context object.
1681  * @hbqno: HBQ number.
1682  * @hbq_buf: Pointer to HBQ buffer.
1683  *
1684  * This function is called with the hbalock held to post a
1685  * hbq buffer to the firmware. If the function finds an empty
1686  * slot in the HBQ, it will post the buffer. The function will return
1687  * pointer to the hbq entry if it successfully post the buffer
1688  * else it will return NULL.
1689  **/
1690 static int
1691 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
1692                          struct hbq_dmabuf *hbq_buf)
1693 {
1694         return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1695 }
1696
1697 /**
1698  * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1699  * @phba: Pointer to HBA context object.
1700  * @hbqno: HBQ number.
1701  * @hbq_buf: Pointer to HBQ buffer.
1702  *
1703  * This function is called with the hbalock held to post a hbq buffer to the
1704  * firmware. If the function finds an empty slot in the HBQ, it will post the
1705  * buffer and place it on the hbq_buffer_list. The function will return zero if
1706  * it successfully post the buffer else it will return an error.
1707  **/
1708 static int
1709 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1710                             struct hbq_dmabuf *hbq_buf)
1711 {
1712         struct lpfc_hbq_entry *hbqe;
1713         dma_addr_t physaddr = hbq_buf->dbuf.phys;
1714
1715         /* Get next HBQ entry slot to use */
1716         hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1717         if (hbqe) {
1718                 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1719
1720                 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1721                 hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
1722                 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
1723                 hbqe->bde.tus.f.bdeFlags = 0;
1724                 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1725                 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1726                                 /* Sync SLIM */
1727                 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1728                 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
1729                                 /* flush */
1730                 readl(phba->hbq_put + hbqno);
1731                 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
1732                 return 0;
1733         } else
1734                 return -ENOMEM;
1735 }
1736
1737 /**
1738  * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1739  * @phba: Pointer to HBA context object.
1740  * @hbqno: HBQ number.
1741  * @hbq_buf: Pointer to HBQ buffer.
1742  *
1743  * This function is called with the hbalock held to post an RQE to the SLI4
1744  * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1745  * the hbq_buffer_list and return zero, otherwise it will return an error.
1746  **/
1747 static int
1748 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1749                             struct hbq_dmabuf *hbq_buf)
1750 {
1751         int rc;
1752         struct lpfc_rqe hrqe;
1753         struct lpfc_rqe drqe;
1754
1755         hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1756         hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1757         drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1758         drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1759         rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1760                               &hrqe, &drqe);
1761         if (rc < 0)
1762                 return rc;
1763         hbq_buf->tag = rc;
1764         list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1765         return 0;
1766 }
1767
1768 /* HBQ for ELS and CT traffic. */
1769 static struct lpfc_hbq_init lpfc_els_hbq = {
1770         .rn = 1,
1771         .entry_count = 256,
1772         .mask_count = 0,
1773         .profile = 0,
1774         .ring_mask = (1 << LPFC_ELS_RING),
1775         .buffer_count = 0,
1776         .init_count = 40,
1777         .add_count = 40,
1778 };
1779
1780 /* HBQ for the extra ring if needed */
1781 static struct lpfc_hbq_init lpfc_extra_hbq = {
1782         .rn = 1,
1783         .entry_count = 200,
1784         .mask_count = 0,
1785         .profile = 0,
1786         .ring_mask = (1 << LPFC_EXTRA_RING),
1787         .buffer_count = 0,
1788         .init_count = 0,
1789         .add_count = 5,
1790 };
1791
1792 /* Array of HBQs */
1793 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
1794         &lpfc_els_hbq,
1795         &lpfc_extra_hbq,
1796 };
1797
1798 /**
1799  * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
1800  * @phba: Pointer to HBA context object.
1801  * @hbqno: HBQ number.
1802  * @count: Number of HBQ buffers to be posted.
1803  *
1804  * This function is called with no lock held to post more hbq buffers to the
1805  * given HBQ. The function returns the number of HBQ buffers successfully
1806  * posted.
1807  **/
1808 static int
1809 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
1810 {
1811         uint32_t i, posted = 0;
1812         unsigned long flags;
1813         struct hbq_dmabuf *hbq_buffer;
1814         LIST_HEAD(hbq_buf_list);
1815         if (!phba->hbqs[hbqno].hbq_alloc_buffer)
1816                 return 0;
1817
1818         if ((phba->hbqs[hbqno].buffer_count + count) >
1819             lpfc_hbq_defs[hbqno]->entry_count)
1820                 count = lpfc_hbq_defs[hbqno]->entry_count -
1821                                         phba->hbqs[hbqno].buffer_count;
1822         if (!count)
1823                 return 0;
1824         /* Allocate HBQ entries */
1825         for (i = 0; i < count; i++) {
1826                 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1827                 if (!hbq_buffer)
1828                         break;
1829                 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1830         }
1831         /* Check whether HBQ is still in use */
1832         spin_lock_irqsave(&phba->hbalock, flags);
1833         if (!phba->hbq_in_use)
1834                 goto err;
1835         while (!list_empty(&hbq_buf_list)) {
1836                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1837                                  dbuf.list);
1838                 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1839                                       (hbqno << 16));
1840                 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
1841                         phba->hbqs[hbqno].buffer_count++;
1842                         posted++;
1843                 } else
1844                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1845         }
1846         spin_unlock_irqrestore(&phba->hbalock, flags);
1847         return posted;
1848 err:
1849         spin_unlock_irqrestore(&phba->hbalock, flags);
1850         while (!list_empty(&hbq_buf_list)) {
1851                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1852                                  dbuf.list);
1853                 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1854         }
1855         return 0;
1856 }
1857
1858 /**
1859  * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
1860  * @phba: Pointer to HBA context object.
1861  * @qno: HBQ number.
1862  *
1863  * This function posts more buffers to the HBQ. This function
1864  * is called with no lock held. The function returns the number of HBQ entries
1865  * successfully allocated.
1866  **/
1867 int
1868 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1869 {
1870         if (phba->sli_rev == LPFC_SLI_REV4)
1871                 return 0;
1872         else
1873                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1874                                          lpfc_hbq_defs[qno]->add_count);
1875 }
1876
1877 /**
1878  * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
1879  * @phba: Pointer to HBA context object.
1880  * @qno:  HBQ queue number.
1881  *
1882  * This function is called from SLI initialization code path with
1883  * no lock held to post initial HBQ buffers to firmware. The
1884  * function returns the number of HBQ entries successfully allocated.
1885  **/
1886 static int
1887 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1888 {
1889         if (phba->sli_rev == LPFC_SLI_REV4)
1890                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1891                                          lpfc_hbq_defs[qno]->entry_count);
1892         else
1893                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1894                                          lpfc_hbq_defs[qno]->init_count);
1895 }
1896
1897 /**
1898  * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1899  * @phba: Pointer to HBA context object.
1900  * @hbqno: HBQ number.
1901  *
1902  * This function removes the first hbq buffer on an hbq list and returns a
1903  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1904  **/
1905 static struct hbq_dmabuf *
1906 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1907 {
1908         struct lpfc_dmabuf *d_buf;
1909
1910         list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1911         if (!d_buf)
1912                 return NULL;
1913         return container_of(d_buf, struct hbq_dmabuf, dbuf);
1914 }
1915
1916 /**
1917  * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
1918  * @phba: Pointer to HBA context object.
1919  * @tag: Tag of the hbq buffer.
1920  *
1921  * This function is called with hbalock held. This function searches
1922  * for the hbq buffer associated with the given tag in the hbq buffer
1923  * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
1924  * it returns NULL.
1925  **/
1926 static struct hbq_dmabuf *
1927 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
1928 {
1929         struct lpfc_dmabuf *d_buf;
1930         struct hbq_dmabuf *hbq_buf;
1931         uint32_t hbqno;
1932
1933         hbqno = tag >> 16;
1934         if (hbqno >= LPFC_MAX_HBQS)
1935                 return NULL;
1936
1937         spin_lock_irq(&phba->hbalock);
1938         list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
1939                 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
1940                 if (hbq_buf->tag == tag) {
1941                         spin_unlock_irq(&phba->hbalock);
1942                         return hbq_buf;
1943                 }
1944         }
1945         spin_unlock_irq(&phba->hbalock);
1946         lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
1947                         "1803 Bad hbq tag. Data: x%x x%x\n",
1948                         tag, phba->hbqs[tag >> 16].buffer_count);
1949         return NULL;
1950 }
1951
1952 /**
1953  * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
1954  * @phba: Pointer to HBA context object.
1955  * @hbq_buffer: Pointer to HBQ buffer.
1956  *
1957  * This function is called with hbalock. This function gives back
1958  * the hbq buffer to firmware. If the HBQ does not have space to
1959  * post the buffer, it will free the buffer.
1960  **/
1961 void
1962 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
1963 {
1964         uint32_t hbqno;
1965
1966         if (hbq_buffer) {
1967                 hbqno = hbq_buffer->tag >> 16;
1968                 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
1969                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1970         }
1971 }
1972
1973 /**
1974  * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
1975  * @mbxCommand: mailbox command code.
1976  *
1977  * This function is called by the mailbox event handler function to verify
1978  * that the completed mailbox command is a legitimate mailbox command. If the
1979  * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
1980  * and the mailbox event handler will take the HBA offline.
1981  **/
1982 static int
1983 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1984 {
1985         uint8_t ret;
1986
1987         switch (mbxCommand) {
1988         case MBX_LOAD_SM:
1989         case MBX_READ_NV:
1990         case MBX_WRITE_NV:
1991         case MBX_WRITE_VPARMS:
1992         case MBX_RUN_BIU_DIAG:
1993         case MBX_INIT_LINK:
1994         case MBX_DOWN_LINK:
1995         case MBX_CONFIG_LINK:
1996         case MBX_CONFIG_RING:
1997         case MBX_RESET_RING:
1998         case MBX_READ_CONFIG:
1999         case MBX_READ_RCONFIG:
2000         case MBX_READ_SPARM:
2001         case MBX_READ_STATUS:
2002         case MBX_READ_RPI:
2003         case MBX_READ_XRI:
2004         case MBX_READ_REV:
2005         case MBX_READ_LNK_STAT:
2006         case MBX_REG_LOGIN:
2007         case MBX_UNREG_LOGIN:
2008         case MBX_CLEAR_LA:
2009         case MBX_DUMP_MEMORY:
2010         case MBX_DUMP_CONTEXT:
2011         case MBX_RUN_DIAGS:
2012         case MBX_RESTART:
2013         case MBX_UPDATE_CFG:
2014         case MBX_DOWN_LOAD:
2015         case MBX_DEL_LD_ENTRY:
2016         case MBX_RUN_PROGRAM:
2017         case MBX_SET_MASK:
2018         case MBX_SET_VARIABLE:
2019         case MBX_UNREG_D_ID:
2020         case MBX_KILL_BOARD:
2021         case MBX_CONFIG_FARP:
2022         case MBX_BEACON:
2023         case MBX_LOAD_AREA:
2024         case MBX_RUN_BIU_DIAG64:
2025         case MBX_CONFIG_PORT:
2026         case MBX_READ_SPARM64:
2027         case MBX_READ_RPI64:
2028         case MBX_REG_LOGIN64:
2029         case MBX_READ_TOPOLOGY:
2030         case MBX_WRITE_WWN:
2031         case MBX_SET_DEBUG:
2032         case MBX_LOAD_EXP_ROM:
2033         case MBX_ASYNCEVT_ENABLE:
2034         case MBX_REG_VPI:
2035         case MBX_UNREG_VPI:
2036         case MBX_HEARTBEAT:
2037         case MBX_PORT_CAPABILITIES:
2038         case MBX_PORT_IOV_CONTROL:
2039         case MBX_SLI4_CONFIG:
2040         case MBX_SLI4_REQ_FTRS:
2041         case MBX_REG_FCFI:
2042         case MBX_UNREG_FCFI:
2043         case MBX_REG_VFI:
2044         case MBX_UNREG_VFI:
2045         case MBX_INIT_VPI:
2046         case MBX_INIT_VFI:
2047         case MBX_RESUME_RPI:
2048         case MBX_READ_EVENT_LOG_STATUS:
2049         case MBX_READ_EVENT_LOG:
2050         case MBX_SECURITY_MGMT:
2051         case MBX_AUTH_PORT:
2052                 ret = mbxCommand;
2053                 break;
2054         default:
2055                 ret = MBX_SHUTDOWN;
2056                 break;
2057         }
2058         return ret;
2059 }
2060
2061 /**
2062  * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2063  * @phba: Pointer to HBA context object.
2064  * @pmboxq: Pointer to mailbox command.
2065  *
2066  * This is completion handler function for mailbox commands issued from
2067  * lpfc_sli_issue_mbox_wait function. This function is called by the
2068  * mailbox event handler function with no lock held. This function
2069  * will wake up thread waiting on the wait queue pointed by context1
2070  * of the mailbox.
2071  **/
2072 void
2073 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2074 {
2075         wait_queue_head_t *pdone_q;
2076         unsigned long drvr_flag;
2077
2078         /*
2079          * If pdone_q is empty, the driver thread gave up waiting and
2080          * continued running.
2081          */
2082         pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2083         spin_lock_irqsave(&phba->hbalock, drvr_flag);
2084         pdone_q = (wait_queue_head_t *) pmboxq->context1;
2085         if (pdone_q)
2086                 wake_up_interruptible(pdone_q);
2087         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2088         return;
2089 }
2090
2091
2092 /**
2093  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2094  * @phba: Pointer to HBA context object.
2095  * @pmb: Pointer to mailbox object.
2096  *
2097  * This function is the default mailbox completion handler. It
2098  * frees the memory resources associated with the completed mailbox
2099  * command. If the completed command is a REG_LOGIN mailbox command,
2100  * this function will issue a UREG_LOGIN to re-claim the RPI.
2101  **/
2102 void
2103 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2104 {
2105         struct lpfc_vport  *vport = pmb->vport;
2106         struct lpfc_dmabuf *mp;
2107         struct lpfc_nodelist *ndlp;
2108         struct Scsi_Host *shost;
2109         uint16_t rpi, vpi;
2110         int rc;
2111
2112         mp = (struct lpfc_dmabuf *) (pmb->context1);
2113
2114         if (mp) {
2115                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2116                 kfree(mp);
2117         }
2118
2119         if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
2120             (phba->sli_rev == LPFC_SLI_REV4) &&
2121             (pmb->u.mb.un.varUnregLogin.rsvd1 == 0x0))
2122                 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
2123
2124         /*
2125          * If a REG_LOGIN succeeded  after node is destroyed or node
2126          * is in re-discovery driver need to cleanup the RPI.
2127          */
2128         if (!(phba->pport->load_flag & FC_UNLOADING) &&
2129             pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2130             !pmb->u.mb.mbxStatus) {
2131                 rpi = pmb->u.mb.un.varWords[0];
2132                 vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
2133                 lpfc_unreg_login(phba, vpi, rpi, pmb);
2134                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2135                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2136                 if (rc != MBX_NOT_FINISHED)
2137                         return;
2138         }
2139
2140         if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2141                 !(phba->pport->load_flag & FC_UNLOADING) &&
2142                 !pmb->u.mb.mbxStatus) {
2143                 shost = lpfc_shost_from_vport(vport);
2144                 spin_lock_irq(shost->host_lock);
2145                 vport->vpi_state |= LPFC_VPI_REGISTERED;
2146                 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2147                 spin_unlock_irq(shost->host_lock);
2148         }
2149
2150         if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2151                 ndlp = (struct lpfc_nodelist *)pmb->context2;
2152                 lpfc_nlp_put(ndlp);
2153                 pmb->context2 = NULL;
2154         }
2155
2156         /* Check security permission status on INIT_LINK mailbox command */
2157         if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2158             (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2159                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2160                                 "2860 SLI authentication is required "
2161                                 "for INIT_LINK but has not done yet\n");
2162
2163         if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2164                 lpfc_sli4_mbox_cmd_free(phba, pmb);
2165         else
2166                 mempool_free(pmb, phba->mbox_mem_pool);
2167 }
2168
2169 /**
2170  * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2171  * @phba: Pointer to HBA context object.
2172  *
2173  * This function is called with no lock held. This function processes all
2174  * the completed mailbox commands and gives it to upper layers. The interrupt
2175  * service routine processes mailbox completion interrupt and adds completed
2176  * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2177  * Worker thread call lpfc_sli_handle_mb_event, which will return the
2178  * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2179  * function returns the mailbox commands to the upper layer by calling the
2180  * completion handler function of each mailbox.
2181  **/
2182 int
2183 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2184 {
2185         MAILBOX_t *pmbox;
2186         LPFC_MBOXQ_t *pmb;
2187         int rc;
2188         LIST_HEAD(cmplq);
2189
2190         phba->sli.slistat.mbox_event++;
2191
2192         /* Get all completed mailboxe buffers into the cmplq */
2193         spin_lock_irq(&phba->hbalock);
2194         list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2195         spin_unlock_irq(&phba->hbalock);
2196
2197         /* Get a Mailbox buffer to setup mailbox commands for callback */
2198         do {
2199                 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2200                 if (pmb == NULL)
2201                         break;
2202
2203                 pmbox = &pmb->u.mb;
2204
2205                 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2206                         if (pmb->vport) {
2207                                 lpfc_debugfs_disc_trc(pmb->vport,
2208                                         LPFC_DISC_TRC_MBOX_VPORT,
2209                                         "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2210                                         (uint32_t)pmbox->mbxCommand,
2211                                         pmbox->un.varWords[0],
2212                                         pmbox->un.varWords[1]);
2213                         }
2214                         else {
2215                                 lpfc_debugfs_disc_trc(phba->pport,
2216                                         LPFC_DISC_TRC_MBOX,
2217                                         "MBOX cmpl:       cmd:x%x mb:x%x x%x",
2218                                         (uint32_t)pmbox->mbxCommand,
2219                                         pmbox->un.varWords[0],
2220                                         pmbox->un.varWords[1]);
2221                         }
2222                 }
2223
2224                 /*
2225                  * It is a fatal error if unknown mbox command completion.
2226                  */
2227                 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2228                     MBX_SHUTDOWN) {
2229                         /* Unknown mailbox command compl */
2230                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2231                                         "(%d):0323 Unknown Mailbox command "
2232                                         "x%x (x%x) Cmpl\n",
2233                                         pmb->vport ? pmb->vport->vpi : 0,
2234                                         pmbox->mbxCommand,
2235                                         lpfc_sli4_mbox_opcode_get(phba, pmb));
2236                         phba->link_state = LPFC_HBA_ERROR;
2237                         phba->work_hs = HS_FFER3;
2238                         lpfc_handle_eratt(phba);
2239                         continue;
2240                 }
2241
2242                 if (pmbox->mbxStatus) {
2243                         phba->sli.slistat.mbox_stat_err++;
2244                         if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2245                                 /* Mbox cmd cmpl error - RETRYing */
2246                                 lpfc_printf_log(phba, KERN_INFO,
2247                                                 LOG_MBOX | LOG_SLI,
2248                                                 "(%d):0305 Mbox cmd cmpl "
2249                                                 "error - RETRYing Data: x%x "
2250                                                 "(x%x) x%x x%x x%x\n",
2251                                                 pmb->vport ? pmb->vport->vpi :0,
2252                                                 pmbox->mbxCommand,
2253                                                 lpfc_sli4_mbox_opcode_get(phba,
2254                                                                           pmb),
2255                                                 pmbox->mbxStatus,
2256                                                 pmbox->un.varWords[0],
2257                                                 pmb->vport->port_state);
2258                                 pmbox->mbxStatus = 0;
2259                                 pmbox->mbxOwner = OWN_HOST;
2260                                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2261                                 if (rc != MBX_NOT_FINISHED)
2262                                         continue;
2263                         }
2264                 }
2265
2266                 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2267                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2268                                 "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p "
2269                                 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
2270                                 pmb->vport ? pmb->vport->vpi : 0,
2271                                 pmbox->mbxCommand,
2272                                 lpfc_sli4_mbox_opcode_get(phba, pmb),
2273                                 pmb->mbox_cmpl,
2274                                 *((uint32_t *) pmbox),
2275                                 pmbox->un.varWords[0],
2276                                 pmbox->un.varWords[1],
2277                                 pmbox->un.varWords[2],
2278                                 pmbox->un.varWords[3],
2279                                 pmbox->un.varWords[4],
2280                                 pmbox->un.varWords[5],
2281                                 pmbox->un.varWords[6],
2282                                 pmbox->un.varWords[7]);
2283
2284                 if (pmb->mbox_cmpl)
2285                         pmb->mbox_cmpl(phba,pmb);
2286         } while (1);
2287         return 0;
2288 }
2289
2290 /**
2291  * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2292  * @phba: Pointer to HBA context object.
2293  * @pring: Pointer to driver SLI ring object.
2294  * @tag: buffer tag.
2295  *
2296  * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2297  * is set in the tag the buffer is posted for a particular exchange,
2298  * the function will return the buffer without replacing the buffer.
2299  * If the buffer is for unsolicited ELS or CT traffic, this function
2300  * returns the buffer and also posts another buffer to the firmware.
2301  **/
2302 static struct lpfc_dmabuf *
2303 lpfc_sli_get_buff(struct lpfc_hba *phba,
2304                   struct lpfc_sli_ring *pring,
2305                   uint32_t tag)
2306 {
2307         struct hbq_dmabuf *hbq_entry;
2308
2309         if (tag & QUE_BUFTAG_BIT)
2310                 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2311         hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2312         if (!hbq_entry)
2313                 return NULL;
2314         return &hbq_entry->dbuf;
2315 }
2316
2317 /**
2318  * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2319  * @phba: Pointer to HBA context object.
2320  * @pring: Pointer to driver SLI ring object.
2321  * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2322  * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2323  * @fch_type: the type for the first frame of the sequence.
2324  *
2325  * This function is called with no lock held. This function uses the r_ctl and
2326  * type of the received sequence to find the correct callback function to call
2327  * to process the sequence.
2328  **/
2329 static int
2330 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2331                          struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2332                          uint32_t fch_type)
2333 {
2334         int i;
2335
2336         /* unSolicited Responses */
2337         if (pring->prt[0].profile) {
2338                 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2339                         (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2340                                                                         saveq);
2341                 return 1;
2342         }
2343         /* We must search, based on rctl / type
2344            for the right routine */
2345         for (i = 0; i < pring->num_mask; i++) {
2346                 if ((pring->prt[i].rctl == fch_r_ctl) &&
2347                     (pring->prt[i].type == fch_type)) {
2348                         if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2349                                 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2350                                                 (phba, pring, saveq);
2351                         return 1;
2352                 }
2353         }
2354         return 0;
2355 }
2356
2357 /**
2358  * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2359  * @phba: Pointer to HBA context object.
2360  * @pring: Pointer to driver SLI ring object.
2361  * @saveq: Pointer to the unsolicited iocb.
2362  *
2363  * This function is called with no lock held by the ring event handler
2364  * when there is an unsolicited iocb posted to the response ring by the
2365  * firmware. This function gets the buffer associated with the iocbs
2366  * and calls the event handler for the ring. This function handles both
2367  * qring buffers and hbq buffers.
2368  * When the function returns 1 the caller can free the iocb object otherwise
2369  * upper layer functions will free the iocb objects.
2370  **/
2371 static int
2372 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2373                             struct lpfc_iocbq *saveq)
2374 {
2375         IOCB_t           * irsp;
2376         WORD5            * w5p;
2377         uint32_t           Rctl, Type;
2378         uint32_t           match;
2379         struct lpfc_iocbq *iocbq;
2380         struct lpfc_dmabuf *dmzbuf;
2381
2382         match = 0;
2383         irsp = &(saveq->iocb);
2384
2385         if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2386                 if (pring->lpfc_sli_rcv_async_status)
2387                         pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2388                 else
2389                         lpfc_printf_log(phba,
2390                                         KERN_WARNING,
2391                                         LOG_SLI,
2392                                         "0316 Ring %d handler: unexpected "
2393                                         "ASYNC_STATUS iocb received evt_code "
2394                                         "0x%x\n",
2395                                         pring->ringno,
2396                                         irsp->un.asyncstat.evt_code);
2397                 return 1;
2398         }
2399
2400         if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2401                 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2402                 if (irsp->ulpBdeCount > 0) {
2403                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2404                                         irsp->un.ulpWord[3]);
2405                         lpfc_in_buf_free(phba, dmzbuf);
2406                 }
2407
2408                 if (irsp->ulpBdeCount > 1) {
2409                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2410                                         irsp->unsli3.sli3Words[3]);
2411                         lpfc_in_buf_free(phba, dmzbuf);
2412                 }
2413
2414                 if (irsp->ulpBdeCount > 2) {
2415                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2416                                 irsp->unsli3.sli3Words[7]);
2417                         lpfc_in_buf_free(phba, dmzbuf);
2418                 }
2419
2420                 return 1;
2421         }
2422
2423         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2424                 if (irsp->ulpBdeCount != 0) {
2425                         saveq->context2 = lpfc_sli_get_buff(phba, pring,
2426                                                 irsp->un.ulpWord[3]);
2427                         if (!saveq->context2)
2428                                 lpfc_printf_log(phba,
2429                                         KERN_ERR,
2430                                         LOG_SLI,
2431                                         "0341 Ring %d Cannot find buffer for "
2432                                         "an unsolicited iocb. tag 0x%x\n",
2433                                         pring->ringno,
2434                                         irsp->un.ulpWord[3]);
2435                 }
2436                 if (irsp->ulpBdeCount == 2) {
2437                         saveq->context3 = lpfc_sli_get_buff(phba, pring,
2438                                                 irsp->unsli3.sli3Words[7]);
2439                         if (!saveq->context3)
2440                                 lpfc_printf_log(phba,
2441                                         KERN_ERR,
2442                                         LOG_SLI,
2443                                         "0342 Ring %d Cannot find buffer for an"
2444                                         " unsolicited iocb. tag 0x%x\n",
2445                                         pring->ringno,
2446                                         irsp->unsli3.sli3Words[7]);
2447                 }
2448                 list_for_each_entry(iocbq, &saveq->list, list) {
2449                         irsp = &(iocbq->iocb);
2450                         if (irsp->ulpBdeCount != 0) {
2451                                 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2452                                                         irsp->un.ulpWord[3]);
2453                                 if (!iocbq->context2)
2454                                         lpfc_printf_log(phba,
2455                                                 KERN_ERR,
2456                                                 LOG_SLI,
2457                                                 "0343 Ring %d Cannot find "
2458                                                 "buffer for an unsolicited iocb"
2459                                                 ". tag 0x%x\n", pring->ringno,
2460                                                 irsp->un.ulpWord[3]);
2461                         }
2462                         if (irsp->ulpBdeCount == 2) {
2463                                 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2464                                                 irsp->unsli3.sli3Words[7]);
2465                                 if (!iocbq->context3)
2466                                         lpfc_printf_log(phba,
2467                                                 KERN_ERR,
2468                                                 LOG_SLI,
2469                                                 "0344 Ring %d Cannot find "
2470                                                 "buffer for an unsolicited "
2471                                                 "iocb. tag 0x%x\n",
2472                                                 pring->ringno,
2473                                                 irsp->unsli3.sli3Words[7]);
2474                         }
2475                 }
2476         }
2477         if (irsp->ulpBdeCount != 0 &&
2478             (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2479              irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2480                 int found = 0;
2481
2482                 /* search continue save q for same XRI */
2483                 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2484                         if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) {
2485                                 list_add_tail(&saveq->list, &iocbq->list);
2486                                 found = 1;
2487                                 break;
2488                         }
2489                 }
2490                 if (!found)
2491                         list_add_tail(&saveq->clist,
2492                                       &pring->iocb_continue_saveq);
2493                 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2494                         list_del_init(&iocbq->clist);
2495                         saveq = iocbq;
2496                         irsp = &(saveq->iocb);
2497                 } else
2498                         return 0;
2499         }
2500         if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2501             (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2502             (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2503                 Rctl = FC_RCTL_ELS_REQ;
2504                 Type = FC_TYPE_ELS;
2505         } else {
2506                 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2507                 Rctl = w5p->hcsw.Rctl;
2508                 Type = w5p->hcsw.Type;
2509
2510                 /* Firmware Workaround */
2511                 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2512                         (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2513                          irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2514                         Rctl = FC_RCTL_ELS_REQ;
2515                         Type = FC_TYPE_ELS;
2516                         w5p->hcsw.Rctl = Rctl;
2517                         w5p->hcsw.Type = Type;
2518                 }
2519         }
2520
2521         if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2522                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2523                                 "0313 Ring %d handler: unexpected Rctl x%x "
2524                                 "Type x%x received\n",
2525                                 pring->ringno, Rctl, Type);
2526
2527         return 1;
2528 }
2529
2530 /**
2531  * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2532  * @phba: Pointer to HBA context object.
2533  * @pring: Pointer to driver SLI ring object.
2534  * @prspiocb: Pointer to response iocb object.
2535  *
2536  * This function looks up the iocb_lookup table to get the command iocb
2537  * corresponding to the given response iocb using the iotag of the
2538  * response iocb. This function is called with the hbalock held.
2539  * This function returns the command iocb object if it finds the command
2540  * iocb else returns NULL.
2541  **/
2542 static struct lpfc_iocbq *
2543 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2544                       struct lpfc_sli_ring *pring,
2545                       struct lpfc_iocbq *prspiocb)
2546 {
2547         struct lpfc_iocbq *cmd_iocb = NULL;
2548         uint16_t iotag;
2549
2550         iotag = prspiocb->iocb.ulpIoTag;
2551
2552         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2553                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2554                 list_del_init(&cmd_iocb->list);
2555                 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
2556                         pring->txcmplq_cnt--;
2557                         cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
2558                 }
2559                 return cmd_iocb;
2560         }
2561
2562         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2563                         "0317 iotag x%x is out off "
2564                         "range: max iotag x%x wd0 x%x\n",
2565                         iotag, phba->sli.last_iotag,
2566                         *(((uint32_t *) &prspiocb->iocb) + 7));
2567         return NULL;
2568 }
2569
2570 /**
2571  * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2572  * @phba: Pointer to HBA context object.
2573  * @pring: Pointer to driver SLI ring object.
2574  * @iotag: IOCB tag.
2575  *
2576  * This function looks up the iocb_lookup table to get the command iocb
2577  * corresponding to the given iotag. This function is called with the
2578  * hbalock held.
2579  * This function returns the command iocb object if it finds the command
2580  * iocb else returns NULL.
2581  **/
2582 static struct lpfc_iocbq *
2583 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2584                              struct lpfc_sli_ring *pring, uint16_t iotag)
2585 {
2586         struct lpfc_iocbq *cmd_iocb;
2587
2588         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2589                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2590                 list_del_init(&cmd_iocb->list);
2591                 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
2592                         cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
2593                         pring->txcmplq_cnt--;
2594                 }
2595                 return cmd_iocb;
2596         }
2597
2598         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2599                         "0372 iotag x%x is out off range: max iotag (x%x)\n",
2600                         iotag, phba->sli.last_iotag);
2601         return NULL;
2602 }
2603
2604 /**
2605  * lpfc_sli_process_sol_iocb - process solicited iocb completion
2606  * @phba: Pointer to HBA context object.
2607  * @pring: Pointer to driver SLI ring object.
2608  * @saveq: Pointer to the response iocb to be processed.
2609  *
2610  * This function is called by the ring event handler for non-fcp
2611  * rings when there is a new response iocb in the response ring.
2612  * The caller is not required to hold any locks. This function
2613  * gets the command iocb associated with the response iocb and
2614  * calls the completion handler for the command iocb. If there
2615  * is no completion handler, the function will free the resources
2616  * associated with command iocb. If the response iocb is for
2617  * an already aborted command iocb, the status of the completion
2618  * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2619  * This function always returns 1.
2620  **/
2621 static int
2622 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2623                           struct lpfc_iocbq *saveq)
2624 {
2625         struct lpfc_iocbq *cmdiocbp;
2626         int rc = 1;
2627         unsigned long iflag;
2628
2629         /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2630         spin_lock_irqsave(&phba->hbalock, iflag);
2631         cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2632         spin_unlock_irqrestore(&phba->hbalock, iflag);
2633
2634         if (cmdiocbp) {
2635                 if (cmdiocbp->iocb_cmpl) {
2636                         /*
2637                          * If an ELS command failed send an event to mgmt
2638                          * application.
2639                          */
2640                         if (saveq->iocb.ulpStatus &&
2641                              (pring->ringno == LPFC_ELS_RING) &&
2642                              (cmdiocbp->iocb.ulpCommand ==
2643                                 CMD_ELS_REQUEST64_CR))
2644                                 lpfc_send_els_failure_event(phba,
2645                                         cmdiocbp, saveq);
2646
2647                         /*
2648                          * Post all ELS completions to the worker thread.
2649                          * All other are passed to the completion callback.
2650                          */
2651                         if (pring->ringno == LPFC_ELS_RING) {
2652                                 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2653                                     (cmdiocbp->iocb_flag &
2654                                                         LPFC_DRIVER_ABORTED)) {
2655                                         spin_lock_irqsave(&phba->hbalock,
2656                                                           iflag);
2657                                         cmdiocbp->iocb_flag &=
2658                                                 ~LPFC_DRIVER_ABORTED;
2659                                         spin_unlock_irqrestore(&phba->hbalock,
2660                                                                iflag);
2661                                         saveq->iocb.ulpStatus =
2662                                                 IOSTAT_LOCAL_REJECT;
2663                                         saveq->iocb.un.ulpWord[4] =
2664                                                 IOERR_SLI_ABORTED;
2665
2666                                         /* Firmware could still be in progress
2667                                          * of DMAing payload, so don't free data
2668                                          * buffer till after a hbeat.
2669                                          */
2670                                         spin_lock_irqsave(&phba->hbalock,
2671                                                           iflag);
2672                                         saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2673                                         spin_unlock_irqrestore(&phba->hbalock,
2674                                                                iflag);
2675                                 }
2676                                 if (phba->sli_rev == LPFC_SLI_REV4) {
2677                                         if (saveq->iocb_flag &
2678                                             LPFC_EXCHANGE_BUSY) {
2679                                                 /* Set cmdiocb flag for the
2680                                                  * exchange busy so sgl (xri)
2681                                                  * will not be released until
2682                                                  * the abort xri is received
2683                                                  * from hba.
2684                                                  */
2685                                                 spin_lock_irqsave(
2686                                                         &phba->hbalock, iflag);
2687                                                 cmdiocbp->iocb_flag |=
2688                                                         LPFC_EXCHANGE_BUSY;
2689                                                 spin_unlock_irqrestore(
2690                                                         &phba->hbalock, iflag);
2691                                         }
2692                                         if (cmdiocbp->iocb_flag &
2693                                             LPFC_DRIVER_ABORTED) {
2694                                                 /*
2695                                                  * Clear LPFC_DRIVER_ABORTED
2696                                                  * bit in case it was driver
2697                                                  * initiated abort.
2698                                                  */
2699                                                 spin_lock_irqsave(
2700                                                         &phba->hbalock, iflag);
2701                                                 cmdiocbp->iocb_flag &=
2702                                                         ~LPFC_DRIVER_ABORTED;
2703                                                 spin_unlock_irqrestore(
2704                                                         &phba->hbalock, iflag);
2705                                                 cmdiocbp->iocb.ulpStatus =
2706                                                         IOSTAT_LOCAL_REJECT;
2707                                                 cmdiocbp->iocb.un.ulpWord[4] =
2708                                                         IOERR_ABORT_REQUESTED;
2709                                                 /*
2710                                                  * For SLI4, irsiocb contains
2711                                                  * NO_XRI in sli_xritag, it
2712                                                  * shall not affect releasing
2713                                                  * sgl (xri) process.
2714                                                  */
2715                                                 saveq->iocb.ulpStatus =
2716                                                         IOSTAT_LOCAL_REJECT;
2717                                                 saveq->iocb.un.ulpWord[4] =
2718                                                         IOERR_SLI_ABORTED;
2719                                                 spin_lock_irqsave(
2720                                                         &phba->hbalock, iflag);
2721                                                 saveq->iocb_flag |=
2722                                                         LPFC_DELAY_MEM_FREE;
2723                                                 spin_unlock_irqrestore(
2724                                                         &phba->hbalock, iflag);
2725                                         }
2726                                 }
2727                         }
2728                         (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
2729                 } else
2730                         lpfc_sli_release_iocbq(phba, cmdiocbp);
2731         } else {
2732                 /*
2733                  * Unknown initiating command based on the response iotag.
2734                  * This could be the case on the ELS ring because of
2735                  * lpfc_els_abort().
2736                  */
2737                 if (pring->ringno != LPFC_ELS_RING) {
2738                         /*
2739                          * Ring <ringno> handler: unexpected completion IoTag
2740                          * <IoTag>
2741                          */
2742                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2743                                          "0322 Ring %d handler: "
2744                                          "unexpected completion IoTag x%x "
2745                                          "Data: x%x x%x x%x x%x\n",
2746                                          pring->ringno,
2747                                          saveq->iocb.ulpIoTag,
2748                                          saveq->iocb.ulpStatus,
2749                                          saveq->iocb.un.ulpWord[4],
2750                                          saveq->iocb.ulpCommand,
2751                                          saveq->iocb.ulpContext);
2752                 }
2753         }
2754
2755         return rc;
2756 }
2757
2758 /**
2759  * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
2760  * @phba: Pointer to HBA context object.
2761  * @pring: Pointer to driver SLI ring object.
2762  *
2763  * This function is called from the iocb ring event handlers when
2764  * put pointer is ahead of the get pointer for a ring. This function signal
2765  * an error attention condition to the worker thread and the worker
2766  * thread will transition the HBA to offline state.
2767  **/
2768 static void
2769 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2770 {
2771         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2772         /*
2773          * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2774          * rsp ring <portRspMax>
2775          */
2776         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2777                         "0312 Ring %d handler: portRspPut %d "
2778                         "is bigger than rsp ring %d\n",
2779                         pring->ringno, le32_to_cpu(pgp->rspPutInx),
2780                         pring->numRiocb);
2781
2782         phba->link_state = LPFC_HBA_ERROR;
2783
2784         /*
2785          * All error attention handlers are posted to
2786          * worker thread
2787          */
2788         phba->work_ha |= HA_ERATT;
2789         phba->work_hs = HS_FFER3;
2790
2791         lpfc_worker_wake_up(phba);
2792
2793         return;
2794 }
2795
2796 /**
2797  * lpfc_poll_eratt - Error attention polling timer timeout handler
2798  * @ptr: Pointer to address of HBA context object.
2799  *
2800  * This function is invoked by the Error Attention polling timer when the
2801  * timer times out. It will check the SLI Error Attention register for
2802  * possible attention events. If so, it will post an Error Attention event
2803  * and wake up worker thread to process it. Otherwise, it will set up the
2804  * Error Attention polling timer for the next poll.
2805  **/
2806 void lpfc_poll_eratt(unsigned long ptr)
2807 {
2808         struct lpfc_hba *phba;
2809         uint32_t eratt = 0;
2810
2811         phba = (struct lpfc_hba *)ptr;
2812
2813         /* Check chip HA register for error event */
2814         eratt = lpfc_sli_check_eratt(phba);
2815
2816         if (eratt)
2817                 /* Tell the worker thread there is work to do */
2818                 lpfc_worker_wake_up(phba);
2819         else
2820                 /* Restart the timer for next eratt poll */
2821                 mod_timer(&phba->eratt_poll, jiffies +
2822                                         HZ * LPFC_ERATT_POLL_INTERVAL);
2823         return;
2824 }
2825
2826
2827 /**
2828  * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
2829  * @phba: Pointer to HBA context object.
2830  * @pring: Pointer to driver SLI ring object.
2831  * @mask: Host attention register mask for this ring.
2832  *
2833  * This function is called from the interrupt context when there is a ring
2834  * event for the fcp ring. The caller does not hold any lock.
2835  * The function processes each response iocb in the response ring until it
2836  * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2837  * LE bit set. The function will call the completion handler of the command iocb
2838  * if the response iocb indicates a completion for a command iocb or it is
2839  * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2840  * function if this is an unsolicited iocb.
2841  * This routine presumes LPFC_FCP_RING handling and doesn't bother
2842  * to check it explicitly.
2843  */
2844 int
2845 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2846                                 struct lpfc_sli_ring *pring, uint32_t mask)
2847 {
2848         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2849         IOCB_t *irsp = NULL;
2850         IOCB_t *entry = NULL;
2851         struct lpfc_iocbq *cmdiocbq = NULL;
2852         struct lpfc_iocbq rspiocbq;
2853         uint32_t status;
2854         uint32_t portRspPut, portRspMax;
2855         int rc = 1;
2856         lpfc_iocb_type type;
2857         unsigned long iflag;
2858         uint32_t rsp_cmpl = 0;
2859
2860         spin_lock_irqsave(&phba->hbalock, iflag);
2861         pring->stats.iocb_event++;
2862
2863         /*
2864          * The next available response entry should never exceed the maximum
2865          * entries.  If it does, treat it as an adapter hardware error.
2866          */
2867         portRspMax = pring->numRiocb;
2868         portRspPut = le32_to_cpu(pgp->rspPutInx);
2869         if (unlikely(portRspPut >= portRspMax)) {
2870                 lpfc_sli_rsp_pointers_error(phba, pring);
2871                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2872                 return 1;
2873         }
2874         if (phba->fcp_ring_in_use) {
2875                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2876                 return 1;
2877         } else
2878                 phba->fcp_ring_in_use = 1;
2879
2880         rmb();
2881         while (pring->rspidx != portRspPut) {
2882                 /*
2883                  * Fetch an entry off the ring and copy it into a local data
2884                  * structure.  The copy involves a byte-swap since the
2885                  * network byte order and pci byte orders are different.
2886                  */
2887                 entry = lpfc_resp_iocb(phba, pring);
2888                 phba->last_completion_time = jiffies;
2889
2890                 if (++pring->rspidx >= portRspMax)
2891                         pring->rspidx = 0;
2892
2893                 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2894                                       (uint32_t *) &rspiocbq.iocb,
2895                                       phba->iocb_rsp_size);
2896                 INIT_LIST_HEAD(&(rspiocbq.list));
2897                 irsp = &rspiocbq.iocb;
2898
2899                 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2900                 pring->stats.iocb_rsp++;
2901                 rsp_cmpl++;
2902
2903                 if (unlikely(irsp->ulpStatus)) {
2904                         /*
2905                          * If resource errors reported from HBA, reduce
2906                          * queuedepths of the SCSI device.
2907                          */
2908                         if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2909                                 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2910                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2911                                 phba->lpfc_rampdown_queue_depth(phba);
2912                                 spin_lock_irqsave(&phba->hbalock, iflag);
2913                         }
2914
2915                         /* Rsp ring <ringno> error: IOCB */
2916                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2917                                         "0336 Rsp Ring %d error: IOCB Data: "
2918                                         "x%x x%x x%x x%x x%x x%x x%x x%x\n",
2919                                         pring->ringno,
2920                                         irsp->un.ulpWord[0],
2921                                         irsp->un.ulpWord[1],
2922                                         irsp->un.ulpWord[2],
2923                                         irsp->un.ulpWord[3],
2924                                         irsp->un.ulpWord[4],
2925                                         irsp->un.ulpWord[5],
2926                                         *(uint32_t *)&irsp->un1,
2927                                         *((uint32_t *)&irsp->un1 + 1));
2928                 }
2929
2930                 switch (type) {
2931                 case LPFC_ABORT_IOCB:
2932                 case LPFC_SOL_IOCB:
2933                         /*
2934                          * Idle exchange closed via ABTS from port.  No iocb
2935                          * resources need to be recovered.
2936                          */
2937                         if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
2938                                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2939                                                 "0333 IOCB cmd 0x%x"
2940                                                 " processed. Skipping"
2941                                                 " completion\n",
2942                                                 irsp->ulpCommand);
2943                                 break;
2944                         }
2945
2946                         cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2947                                                          &rspiocbq);
2948                         if (unlikely(!cmdiocbq))
2949                                 break;
2950                         if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
2951                                 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
2952                         if (cmdiocbq->iocb_cmpl) {
2953                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2954                                 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2955                                                       &rspiocbq);
2956                                 spin_lock_irqsave(&phba->hbalock, iflag);
2957                         }
2958                         break;
2959                 case LPFC_UNSOL_IOCB:
2960                         spin_unlock_irqrestore(&phba->hbalock, iflag);
2961                         lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
2962                         spin_lock_irqsave(&phba->hbalock, iflag);
2963                         break;
2964                 default:
2965                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2966                                 char adaptermsg[LPFC_MAX_ADPTMSG];
2967                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2968                                 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2969                                        MAX_MSG_DATA);
2970                                 dev_warn(&((phba->pcidev)->dev),
2971                                          "lpfc%d: %s\n",
2972                                          phba->brd_no, adaptermsg);
2973                         } else {
2974                                 /* Unknown IOCB command */
2975                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2976                                                 "0334 Unknown IOCB command "
2977                                                 "Data: x%x, x%x x%x x%x x%x\n",
2978                                                 type, irsp->ulpCommand,
2979                                                 irsp->ulpStatus,
2980                                                 irsp->ulpIoTag,
2981                                                 irsp->ulpContext);
2982                         }
2983                         break;
2984                 }
2985
2986                 /*
2987                  * The response IOCB has been processed.  Update the ring
2988                  * pointer in SLIM.  If the port response put pointer has not
2989                  * been updated, sync the pgp->rspPutInx and fetch the new port
2990                  * response put pointer.
2991                  */
2992                 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2993
2994                 if (pring->rspidx == portRspPut)
2995                         portRspPut = le32_to_cpu(pgp->rspPutInx);
2996         }
2997
2998         if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
2999                 pring->stats.iocb_rsp_full++;
3000                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3001                 writel(status, phba->CAregaddr);
3002                 readl(phba->CAregaddr);
3003         }
3004         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3005                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3006                 pring->stats.iocb_cmd_empty++;
3007
3008                 /* Force update of the local copy of cmdGetInx */
3009                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
3010                 lpfc_sli_resume_iocb(phba, pring);
3011
3012                 if ((pring->lpfc_sli_cmd_available))
3013                         (pring->lpfc_sli_cmd_available) (phba, pring);
3014
3015         }
3016
3017         phba->fcp_ring_in_use = 0;
3018         spin_unlock_irqrestore(&phba->hbalock, iflag);
3019         return rc;
3020 }
3021
3022 /**
3023  * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3024  * @phba: Pointer to HBA context object.
3025  * @pring: Pointer to driver SLI ring object.
3026  * @rspiocbp: Pointer to driver response IOCB object.
3027  *
3028  * This function is called from the worker thread when there is a slow-path
3029  * response IOCB to process. This function chains all the response iocbs until
3030  * seeing the iocb with the LE bit set. The function will call
3031  * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3032  * completion of a command iocb. The function will call the
3033  * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3034  * The function frees the resources or calls the completion handler if this
3035  * iocb is an abort completion. The function returns NULL when the response
3036  * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3037  * this function shall chain the iocb on to the iocb_continueq and return the
3038  * response iocb passed in.
3039  **/
3040 static struct lpfc_iocbq *
3041 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3042                         struct lpfc_iocbq *rspiocbp)
3043 {
3044         struct lpfc_iocbq *saveq;
3045         struct lpfc_iocbq *cmdiocbp;
3046         struct lpfc_iocbq *next_iocb;
3047         IOCB_t *irsp = NULL;
3048         uint32_t free_saveq;
3049         uint8_t iocb_cmd_type;
3050         lpfc_iocb_type type;
3051         unsigned long iflag;
3052         int rc;
3053
3054         spin_lock_irqsave(&phba->hbalock, iflag);
3055         /* First add the response iocb to the countinueq list */
3056         list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3057         pring->iocb_continueq_cnt++;
3058
3059         /* Now, determine whetehr the list is completed for processing */
3060         irsp = &rspiocbp->iocb;
3061         if (irsp->ulpLe) {
3062                 /*
3063                  * By default, the driver expects to free all resources
3064                  * associated with this iocb completion.
3065                  */
3066                 free_saveq = 1;
3067                 saveq = list_get_first(&pring->iocb_continueq,
3068                                        struct lpfc_iocbq, list);
3069                 irsp = &(saveq->iocb);
3070                 list_del_init(&pring->iocb_continueq);
3071                 pring->iocb_continueq_cnt = 0;
3072
3073                 pring->stats.iocb_rsp++;
3074
3075                 /*
3076                  * If resource errors reported from HBA, reduce
3077                  * queuedepths of the SCSI device.
3078                  */
3079                 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3080                     (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
3081                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3082                         phba->lpfc_rampdown_queue_depth(phba);
3083                         spin_lock_irqsave(&phba->hbalock, iflag);
3084                 }
3085
3086                 if (irsp->ulpStatus) {
3087                         /* Rsp ring <ringno> error: IOCB */
3088                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3089                                         "0328 Rsp Ring %d error: "
3090                                         "IOCB Data: "
3091                                         "x%x x%x x%x x%x "
3092                                         "x%x x%x x%x x%x "
3093                                         "x%x x%x x%x x%x "
3094                                         "x%x x%x x%x x%x\n",
3095                                         pring->ringno,
3096                                         irsp->un.ulpWord[0],
3097                                         irsp->un.ulpWord[1],
3098                                         irsp->un.ulpWord[2],
3099                                         irsp->un.ulpWord[3],
3100                                         irsp->un.ulpWord[4],
3101                                         irsp->un.ulpWord[5],
3102                                         *(((uint32_t *) irsp) + 6),
3103                                         *(((uint32_t *) irsp) + 7),
3104                                         *(((uint32_t *) irsp) + 8),
3105                                         *(((uint32_t *) irsp) + 9),
3106                                         *(((uint32_t *) irsp) + 10),
3107                                         *(((uint32_t *) irsp) + 11),
3108                                         *(((uint32_t *) irsp) + 12),
3109                                         *(((uint32_t *) irsp) + 13),
3110                                         *(((uint32_t *) irsp) + 14),
3111                                         *(((uint32_t *) irsp) + 15));
3112                 }
3113
3114                 /*
3115                  * Fetch the IOCB command type and call the correct completion
3116                  * routine. Solicited and Unsolicited IOCBs on the ELS ring
3117                  * get freed back to the lpfc_iocb_list by the discovery
3118                  * kernel thread.
3119                  */
3120                 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3121                 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3122                 switch (type) {
3123                 case LPFC_SOL_IOCB:
3124                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3125                         rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3126                         spin_lock_irqsave(&phba->hbalock, iflag);
3127                         break;
3128
3129                 case LPFC_UNSOL_IOCB:
3130                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3131                         rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3132                         spin_lock_irqsave(&phba->hbalock, iflag);
3133                         if (!rc)
3134                                 free_saveq = 0;
3135                         break;
3136
3137                 case LPFC_ABORT_IOCB:
3138                         cmdiocbp = NULL;
3139                         if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3140                                 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3141                                                                  saveq);
3142                         if (cmdiocbp) {
3143                                 /* Call the specified completion routine */
3144                                 if (cmdiocbp->iocb_cmpl) {
3145                                         spin_unlock_irqrestore(&phba->hbalock,
3146                                                                iflag);
3147                                         (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3148                                                               saveq);
3149                                         spin_lock_irqsave(&phba->hbalock,
3150                                                           iflag);
3151                                 } else
3152                                         __lpfc_sli_release_iocbq(phba,
3153                                                                  cmdiocbp);
3154                         }
3155                         break;
3156
3157                 case LPFC_UNKNOWN_IOCB:
3158                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3159                                 char adaptermsg[LPFC_MAX_ADPTMSG];
3160                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3161                                 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3162                                        MAX_MSG_DATA);
3163                                 dev_warn(&((phba->pcidev)->dev),
3164                                          "lpfc%d: %s\n",
3165                                          phba->brd_no, adaptermsg);
3166                         } else {
3167                                 /* Unknown IOCB command */
3168                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3169                                                 "0335 Unknown IOCB "
3170                                                 "command Data: x%x "
3171                                                 "x%x x%x x%x\n",
3172                                                 irsp->ulpCommand,
3173                                                 irsp->ulpStatus,
3174                                                 irsp->ulpIoTag,
3175                                                 irsp->ulpContext);
3176                         }
3177                         break;
3178                 }
3179
3180                 if (free_saveq) {
3181                         list_for_each_entry_safe(rspiocbp, next_iocb,
3182                                                  &saveq->list, list) {
3183                                 list_del(&rspiocbp->list);
3184                                 __lpfc_sli_release_iocbq(phba, rspiocbp);
3185                         }
3186                         __lpfc_sli_release_iocbq(phba, saveq);
3187                 }
3188                 rspiocbp = NULL;
3189         }
3190         spin_unlock_irqrestore(&phba->hbalock, iflag);
3191         return rspiocbp;
3192 }
3193
3194 /**
3195  * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3196  * @phba: Pointer to HBA context object.
3197  * @pring: Pointer to driver SLI ring object.
3198  * @mask: Host attention register mask for this ring.
3199  *
3200  * This routine wraps the actual slow_ring event process routine from the
3201  * API jump table function pointer from the lpfc_hba struct.
3202  **/
3203 void
3204 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3205                                 struct lpfc_sli_ring *pring, uint32_t mask)
3206 {
3207         phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3208 }
3209
3210 /**
3211  * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3212  * @phba: Pointer to HBA context object.
3213  * @pring: Pointer to driver SLI ring object.
3214  * @mask: Host attention register mask for this ring.
3215  *
3216  * This function is called from the worker thread when there is a ring event
3217  * for non-fcp rings. The caller does not hold any lock. The function will
3218  * remove each response iocb in the response ring and calls the handle
3219  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3220  **/
3221 static void
3222 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3223                                    struct lpfc_sli_ring *pring, uint32_t mask)
3224 {
3225         struct lpfc_pgp *pgp;
3226         IOCB_t *entry;
3227         IOCB_t *irsp = NULL;
3228         struct lpfc_iocbq *rspiocbp = NULL;
3229         uint32_t portRspPut, portRspMax;
3230         unsigned long iflag;
3231         uint32_t status;
3232
3233         pgp = &phba->port_gp[pring->ringno];
3234         spin_lock_irqsave(&phba->hbalock, iflag);
3235         pring->stats.iocb_event++;
3236
3237         /*
3238          * The next available response entry should never exceed the maximum
3239          * entries.  If it does, treat it as an adapter hardware error.
3240          */
3241         portRspMax = pring->numRiocb;
3242         portRspPut = le32_to_cpu(pgp->rspPutInx);
3243         if (portRspPut >= portRspMax) {
3244                 /*
3245                  * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3246                  * rsp ring <portRspMax>
3247                  */
3248                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3249                                 "0303 Ring %d handler: portRspPut %d "
3250                                 "is bigger than rsp ring %d\n",
3251                                 pring->ringno, portRspPut, portRspMax);
3252
3253                 phba->link_state = LPFC_HBA_ERROR;
3254                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3255
3256                 phba->work_hs = HS_FFER3;
3257                 lpfc_handle_eratt(phba);
3258
3259                 return;
3260         }
3261
3262         rmb();
3263         while (pring->rspidx != portRspPut) {
3264                 /*
3265                  * Build a completion list and call the appropriate handler.
3266                  * The process is to get the next available response iocb, get
3267                  * a free iocb from the list, copy the response data into the
3268                  * free iocb, insert to the continuation list, and update the
3269                  * next response index to slim.  This process makes response
3270                  * iocb's in the ring available to DMA as fast as possible but
3271                  * pays a penalty for a copy operation.  Since the iocb is
3272                  * only 32 bytes, this penalty is considered small relative to
3273                  * the PCI reads for register values and a slim write.  When
3274                  * the ulpLe field is set, the entire Command has been
3275                  * received.
3276                  */
3277                 entry = lpfc_resp_iocb(phba, pring);
3278
3279                 phba->last_completion_time = jiffies;
3280                 rspiocbp = __lpfc_sli_get_iocbq(phba);
3281                 if (rspiocbp == NULL) {
3282                         printk(KERN_ERR "%s: out of buffers! Failing "
3283                                "completion.\n", __func__);
3284                         break;
3285                 }
3286
3287                 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3288                                       phba->iocb_rsp_size);
3289                 irsp = &rspiocbp->iocb;
3290
3291                 if (++pring->rspidx >= portRspMax)
3292                         pring->rspidx = 0;
3293
3294                 if (pring->ringno == LPFC_ELS_RING) {
3295                         lpfc_debugfs_slow_ring_trc(phba,
3296                         "IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
3297                                 *(((uint32_t *) irsp) + 4),
3298                                 *(((uint32_t *) irsp) + 6),
3299                                 *(((uint32_t *) irsp) + 7));
3300                 }
3301
3302                 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
3303
3304                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3305                 /* Handle the response IOCB */
3306                 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3307                 spin_lock_irqsave(&phba->hbalock, iflag);
3308
3309                 /*
3310                  * If the port response put pointer has not been updated, sync
3311                  * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3312                  * response put pointer.
3313                  */
3314                 if (pring->rspidx == portRspPut) {
3315                         portRspPut = le32_to_cpu(pgp->rspPutInx);
3316                 }
3317         } /* while (pring->rspidx != portRspPut) */
3318
3319         if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3320                 /* At least one response entry has been freed */
3321                 pring->stats.iocb_rsp_full++;
3322                 /* SET RxRE_RSP in Chip Att register */
3323                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3324                 writel(status, phba->CAregaddr);
3325                 readl(phba->CAregaddr); /* flush */
3326         }
3327         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3328                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3329                 pring->stats.iocb_cmd_empty++;
3330
3331                 /* Force update of the local copy of cmdGetInx */
3332                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
3333                 lpfc_sli_resume_iocb(phba, pring);
3334
3335                 if ((pring->lpfc_sli_cmd_available))
3336                         (pring->lpfc_sli_cmd_available) (phba, pring);
3337
3338         }
3339
3340         spin_unlock_irqrestore(&phba->hbalock, iflag);
3341         return;
3342 }
3343
3344 /**
3345  * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3346  * @phba: Pointer to HBA context object.
3347  * @pring: Pointer to driver SLI ring object.
3348  * @mask: Host attention register mask for this ring.
3349  *
3350  * This function is called from the worker thread when there is a pending
3351  * ELS response iocb on the driver internal slow-path response iocb worker
3352  * queue. The caller does not hold any lock. The function will remove each
3353  * response iocb from the response worker queue and calls the handle
3354  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3355  **/
3356 static void
3357 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3358                                    struct lpfc_sli_ring *pring, uint32_t mask)
3359 {
3360         struct lpfc_iocbq *irspiocbq;
3361         struct hbq_dmabuf *dmabuf;
3362         struct lpfc_cq_event *cq_event;
3363         unsigned long iflag;
3364
3365         spin_lock_irqsave(&phba->hbalock, iflag);
3366         phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3367         spin_unlock_irqrestore(&phba->hbalock, iflag);
3368         while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3369                 /* Get the response iocb from the head of work queue */
3370                 spin_lock_irqsave(&phba->hbalock, iflag);
3371                 list_remove_head(&phba->sli4_hba.sp_queue_event,
3372                                  cq_event, struct lpfc_cq_event, list);
3373                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3374
3375                 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3376                 case CQE_CODE_COMPL_WQE:
3377                         irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3378                                                  cq_event);
3379                         /* Translate ELS WCQE to response IOCBQ */
3380                         irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3381                                                                    irspiocbq);
3382                         if (irspiocbq)
3383                                 lpfc_sli_sp_handle_rspiocb(phba, pring,
3384                                                            irspiocbq);
3385                         break;
3386                 case CQE_CODE_RECEIVE:
3387                         dmabuf = container_of(cq_event, struct hbq_dmabuf,
3388                                               cq_event);
3389                         lpfc_sli4_handle_received_buffer(phba, dmabuf);
3390                         break;
3391                 default:
3392                         break;
3393                 }
3394         }
3395 }
3396
3397 /**
3398  * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3399  * @phba: Pointer to HBA context object.
3400  * @pring: Pointer to driver SLI ring object.
3401  *
3402  * This function aborts all iocbs in the given ring and frees all the iocb
3403  * objects in txq. This function issues an abort iocb for all the iocb commands
3404  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3405  * the return of this function. The caller is not required to hold any locks.
3406  **/
3407 void
3408 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3409 {
3410         LIST_HEAD(completions);
3411         struct lpfc_iocbq *iocb, *next_iocb;
3412
3413         if (pring->ringno == LPFC_ELS_RING) {
3414                 lpfc_fabric_abort_hba(phba);
3415         }
3416
3417         /* Error everything on txq and txcmplq
3418          * First do the txq.
3419          */
3420         spin_lock_irq(&phba->hbalock);
3421         list_splice_init(&pring->txq, &completions);
3422         pring->txq_cnt = 0;
3423
3424         /* Next issue ABTS for everything on the txcmplq */
3425         list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3426                 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3427
3428         spin_unlock_irq(&phba->hbalock);
3429
3430         /* Cancel all the IOCBs from the completions list */
3431         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3432                               IOERR_SLI_ABORTED);
3433 }
3434
3435 /**
3436  * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
3437  * @phba: Pointer to HBA context object.
3438  *
3439  * This function flushes all iocbs in the fcp ring and frees all the iocb
3440  * objects in txq and txcmplq. This function will not issue abort iocbs
3441  * for all the iocb commands in txcmplq, they will just be returned with
3442  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3443  * slot has been permanently disabled.
3444  **/
3445 void
3446 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3447 {
3448         LIST_HEAD(txq);
3449         LIST_HEAD(txcmplq);
3450         struct lpfc_sli *psli = &phba->sli;
3451         struct lpfc_sli_ring  *pring;
3452
3453         /* Currently, only one fcp ring */
3454         pring = &psli->ring[psli->fcp_ring];
3455
3456         spin_lock_irq(&phba->hbalock);
3457         /* Retrieve everything on txq */
3458         list_splice_init(&pring->txq, &txq);
3459         pring->txq_cnt = 0;
3460
3461         /* Retrieve everything on the txcmplq */
3462         list_splice_init(&pring->txcmplq, &txcmplq);
3463         pring->txcmplq_cnt = 0;
3464         spin_unlock_irq(&phba->hbalock);
3465
3466         /* Flush the txq */
3467         lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3468                               IOERR_SLI_DOWN);
3469
3470         /* Flush the txcmpq */
3471         lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3472                               IOERR_SLI_DOWN);
3473 }
3474
3475 /**
3476  * lpfc_sli_brdready_s3 - Check for sli3 host ready status
3477  * @phba: Pointer to HBA context object.
3478  * @mask: Bit mask to be checked.
3479  *
3480  * This function reads the host status register and compares
3481  * with the provided bit mask to check if HBA completed
3482  * the restart. This function will wait in a loop for the
3483  * HBA to complete restart. If the HBA does not restart within
3484  * 15 iterations, the function will reset the HBA again. The
3485  * function returns 1 when HBA fail to restart otherwise returns
3486  * zero.
3487  **/
3488 static int
3489 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3490 {
3491         uint32_t status;
3492         int i = 0;
3493         int retval = 0;
3494
3495         /* Read the HBA Host Status Register */
3496         status = readl(phba->HSregaddr);
3497
3498         /*
3499          * Check status register every 100ms for 5 retries, then every
3500          * 500ms for 5, then every 2.5 sec for 5, then reset board and
3501          * every 2.5 sec for 4.
3502          * Break our of the loop if errors occurred during init.
3503          */
3504         while (((status & mask) != mask) &&
3505                !(status & HS_FFERM) &&
3506                i++ < 20) {
3507
3508                 if (i <= 5)
3509                         msleep(10);
3510                 else if (i <= 10)
3511                         msleep(500);
3512                 else
3513                         msleep(2500);
3514
3515                 if (i == 15) {
3516                                 /* Do post */
3517                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3518                         lpfc_sli_brdrestart(phba);
3519                 }
3520                 /* Read the HBA Host Status Register */
3521                 status = readl(phba->HSregaddr);
3522         }
3523
3524         /* Check to see if any errors occurred during init */
3525         if ((status & HS_FFERM) || (i >= 20)) {
3526                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3527                                 "2751 Adapter failed to restart, "
3528                                 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3529                                 status,
3530                                 readl(phba->MBslimaddr + 0xa8),
3531                                 readl(phba->MBslimaddr + 0xac));
3532                 phba->link_state = LPFC_HBA_ERROR;
3533                 retval = 1;
3534         }
3535
3536         return retval;
3537 }
3538
3539 /**
3540  * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3541  * @phba: Pointer to HBA context object.
3542  * @mask: Bit mask to be checked.
3543  *
3544  * This function checks the host status register to check if HBA is
3545  * ready. This function will wait in a loop for the HBA to be ready
3546  * If the HBA is not ready , the function will will reset the HBA PCI
3547  * function again. The function returns 1 when HBA fail to be ready
3548  * otherwise returns zero.
3549  **/
3550 static int
3551 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3552 {
3553         uint32_t status;
3554         int retval = 0;
3555
3556         /* Read the HBA Host Status Register */
3557         status = lpfc_sli4_post_status_check(phba);
3558
3559         if (status) {
3560                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3561                 lpfc_sli_brdrestart(phba);
3562                 status = lpfc_sli4_post_status_check(phba);
3563         }
3564
3565         /* Check to see if any errors occurred during init */
3566         if (status) {
3567                 phba->link_state = LPFC_HBA_ERROR;
3568                 retval = 1;
3569         } else
3570                 phba->sli4_hba.intr_enable = 0;
3571
3572         return retval;
3573 }
3574
3575 /**
3576  * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3577  * @phba: Pointer to HBA context object.
3578  * @mask: Bit mask to be checked.
3579  *
3580  * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3581  * from the API jump table function pointer from the lpfc_hba struct.
3582  **/
3583 int
3584 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3585 {
3586         return phba->lpfc_sli_brdready(phba, mask);
3587 }
3588
3589 #define BARRIER_TEST_PATTERN (0xdeadbeef)
3590
3591 /**
3592  * lpfc_reset_barrier - Make HBA ready for HBA reset
3593  * @phba: Pointer to HBA context object.
3594  *
3595  * This function is called before resetting an HBA. This
3596  * function requests HBA to quiesce DMAs before a reset.
3597  **/
3598 void lpfc_reset_barrier(struct lpfc_hba *phba)
3599 {
3600         uint32_t __iomem *resp_buf;
3601         uint32_t __iomem *mbox_buf;
3602         volatile uint32_t mbox;
3603         uint32_t hc_copy;
3604         int  i;
3605         uint8_t hdrtype;
3606
3607         pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
3608         if (hdrtype != 0x80 ||
3609             (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
3610              FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
3611                 return;
3612
3613         /*
3614          * Tell the other part of the chip to suspend temporarily all
3615          * its DMA activity.
3616          */
3617         resp_buf = phba->MBslimaddr;
3618
3619         /* Disable the error attention */
3620         hc_copy = readl(phba->HCregaddr);
3621         writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
3622         readl(phba->HCregaddr); /* flush */
3623         phba->link_flag |= LS_IGNORE_ERATT;
3624
3625         if (readl(phba->HAregaddr) & HA_ERATT) {
3626                 /* Clear Chip error bit */
3627                 writel(HA_ERATT, phba->HAregaddr);
3628                 phba->pport->stopped = 1;
3629         }
3630
3631         mbox = 0;
3632         ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
3633         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
3634
3635         writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
3636         mbox_buf = phba->MBslimaddr;
3637         writel(mbox, mbox_buf);
3638
3639         for (i = 0;
3640              readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++)
3641                 mdelay(1);
3642
3643         if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
3644                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
3645                     phba->pport->stopped)
3646                         goto restore_hc;
3647                 else
3648                         goto clear_errat;
3649         }
3650
3651         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
3652         for (i = 0; readl(resp_buf) != mbox &&  i < 500; i++)
3653                 mdelay(1);
3654
3655 clear_errat:
3656
3657         while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500)
3658                 mdelay(1);
3659
3660         if (readl(phba->HAregaddr) & HA_ERATT) {
3661                 writel(HA_ERATT, phba->HAregaddr);
3662                 phba->pport->stopped = 1;
3663         }
3664
3665 restore_hc:
3666         phba->link_flag &= ~LS_IGNORE_ERATT;
3667         writel(hc_copy, phba->HCregaddr);
3668         readl(phba->HCregaddr); /* flush */
3669 }
3670
3671 /**
3672  * lpfc_sli_brdkill - Issue a kill_board mailbox command
3673  * @phba: Pointer to HBA context object.
3674  *
3675  * This function issues a kill_board mailbox command and waits for
3676  * the error attention interrupt. This function is called for stopping
3677  * the firmware processing. The caller is not required to hold any
3678  * locks. This function calls lpfc_hba_down_post function to free
3679  * any pending commands after the kill. The function will return 1 when it
3680  * fails to kill the board else will return 0.
3681  **/
3682 int
3683 lpfc_sli_brdkill(struct lpfc_hba *phba)
3684 {
3685         struct lpfc_sli *psli;
3686         LPFC_MBOXQ_t *pmb;
3687         uint32_t status;
3688         uint32_t ha_copy;
3689         int retval;
3690         int i = 0;
3691
3692         psli = &phba->sli;
3693
3694         /* Kill HBA */
3695         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3696                         "0329 Kill HBA Data: x%x x%x\n",
3697                         phba->pport->port_state, psli->sli_flag);
3698
3699         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3700         if (!pmb)
3701                 return 1;
3702
3703         /* Disable the error attention */
3704         spin_lock_irq(&phba->hbalock);
3705         status = readl(phba->HCregaddr);
3706         status &= ~HC_ERINT_ENA;
3707         writel(status, phba->HCregaddr);
3708         readl(phba->HCregaddr); /* flush */
3709         phba->link_flag |= LS_IGNORE_ERATT;
3710         spin_unlock_irq(&phba->hbalock);
3711
3712         lpfc_kill_board(phba, pmb);
3713         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3714         retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3715
3716         if (retval != MBX_SUCCESS) {
3717                 if (retval != MBX_BUSY)
3718                         mempool_free(pmb, phba->mbox_mem_pool);
3719                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3720                                 "2752 KILL_BOARD command failed retval %d\n",
3721                                 retval);
3722                 spin_lock_irq(&phba->hbalock);
3723                 phba->link_flag &= ~LS_IGNORE_ERATT;
3724                 spin_unlock_irq(&phba->hbalock);
3725                 return 1;
3726         }
3727
3728         spin_lock_irq(&phba->hbalock);
3729         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3730         spin_unlock_irq(&phba->hbalock);
3731
3732         mempool_free(pmb, phba->mbox_mem_pool);
3733
3734         /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
3735          * attention every 100ms for 3 seconds. If we don't get ERATT after
3736          * 3 seconds we still set HBA_ERROR state because the status of the
3737          * board is now undefined.
3738          */
3739         ha_copy = readl(phba->HAregaddr);
3740
3741         while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
3742                 mdelay(100);
3743                 ha_copy = readl(phba->HAregaddr);
3744         }
3745
3746         del_timer_sync(&psli->mbox_tmo);
3747         if (ha_copy & HA_ERATT) {
3748                 writel(HA_ERATT, phba->HAregaddr);
3749                 phba->pport->stopped = 1;
3750         }
3751         spin_lock_irq(&phba->hbalock);
3752         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3753         psli->mbox_active = NULL;
3754         phba->link_flag &= ~LS_IGNORE_ERATT;
3755         spin_unlock_irq(&phba->hbalock);
3756
3757         lpfc_hba_down_post(phba);
3758         phba->link_state = LPFC_HBA_ERROR;
3759
3760         return ha_copy & HA_ERATT ? 0 : 1;
3761 }
3762
3763 /**
3764  * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
3765  * @phba: Pointer to HBA context object.
3766  *
3767  * This function resets the HBA by writing HC_INITFF to the control
3768  * register. After the HBA resets, this function resets all the iocb ring
3769  * indices. This function disables PCI layer parity checking during
3770  * the reset.
3771  * This function returns 0 always.
3772  * The caller is not required to hold any locks.
3773  **/
3774 int
3775 lpfc_sli_brdreset(struct lpfc_hba *phba)
3776 {
3777         struct lpfc_sli *psli;
3778         struct lpfc_sli_ring *pring;
3779         uint16_t cfg_value;
3780         int i;
3781
3782         psli = &phba->sli;
3783
3784         /* Reset HBA */
3785         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3786                         "0325 Reset HBA Data: x%x x%x\n",
3787                         phba->pport->port_state, psli->sli_flag);
3788
3789         /* perform board reset */
3790         phba->fc_eventTag = 0;
3791         phba->link_events = 0;
3792         phba->pport->fc_myDID = 0;
3793         phba->pport->fc_prevDID = 0;
3794
3795         /* Turn off parity checking and serr during the physical reset */
3796         pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3797         pci_write_config_word(phba->pcidev, PCI_COMMAND,
3798                               (cfg_value &
3799                                ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3800
3801         psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
3802
3803         /* Now toggle INITFF bit in the Host Control Register */
3804         writel(HC_INITFF, phba->HCregaddr);
3805         mdelay(1);
3806         readl(phba->HCregaddr); /* flush */
3807         writel(0, phba->HCregaddr);
3808         readl(phba->HCregaddr); /* flush */
3809
3810         /* Restore PCI cmd register */
3811         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
3812
3813         /* Initialize relevant SLI info */
3814         for (i = 0; i < psli->num_rings; i++) {
3815                 pring = &psli->ring[i];
3816                 pring->flag = 0;
3817                 pring->rspidx = 0;
3818                 pring->next_cmdidx  = 0;
3819                 pring->local_getidx = 0;
3820                 pring->cmdidx = 0;
3821                 pring->missbufcnt = 0;
3822         }
3823
3824         phba->link_state = LPFC_WARM_START;
3825         return 0;
3826 }
3827
3828 /**
3829  * lpfc_sli4_brdreset - Reset a sli-4 HBA
3830  * @phba: Pointer to HBA context object.
3831  *
3832  * This function resets a SLI4 HBA. This function disables PCI layer parity
3833  * checking during resets the device. The caller is not required to hold
3834  * any locks.
3835  *
3836  * This function returns 0 always.
3837  **/
3838 int
3839 lpfc_sli4_brdreset(struct lpfc_hba *phba)
3840 {
3841         struct lpfc_sli *psli = &phba->sli;
3842         uint16_t cfg_value;
3843         uint8_t qindx;
3844
3845         /* Reset HBA */
3846         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3847                         "0295 Reset HBA Data: x%x x%x\n",
3848                         phba->pport->port_state, psli->sli_flag);
3849
3850         /* perform board reset */
3851         phba->fc_eventTag = 0;
3852         phba->link_events = 0;
3853         phba->pport->fc_myDID = 0;
3854         phba->pport->fc_prevDID = 0;
3855
3856         spin_lock_irq(&phba->hbalock);
3857         psli->sli_flag &= ~(LPFC_PROCESS_LA);
3858         phba->fcf.fcf_flag = 0;
3859         /* Clean up the child queue list for the CQs */
3860         list_del_init(&phba->sli4_hba.mbx_wq->list);
3861         list_del_init(&phba->sli4_hba.els_wq->list);
3862         list_del_init(&phba->sli4_hba.hdr_rq->list);
3863         list_del_init(&phba->sli4_hba.dat_rq->list);
3864         list_del_init(&phba->sli4_hba.mbx_cq->list);
3865         list_del_init(&phba->sli4_hba.els_cq->list);
3866         for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3867                 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3868         for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
3869                 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
3870         spin_unlock_irq(&phba->hbalock);
3871
3872         /* Now physically reset the device */
3873         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3874                         "0389 Performing PCI function reset!\n");
3875
3876         /* Turn off parity checking and serr during the physical reset */
3877         pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3878         pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
3879                               ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3880
3881         /* Perform FCoE PCI function reset */
3882         lpfc_pci_function_reset(phba);
3883
3884         /* Restore PCI cmd register */
3885         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
3886
3887         return 0;
3888 }
3889
3890 /**
3891  * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
3892  * @phba: Pointer to HBA context object.
3893  *
3894  * This function is called in the SLI initialization code path to
3895  * restart the HBA. The caller is not required to hold any lock.
3896  * This function writes MBX_RESTART mailbox command to the SLIM and
3897  * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
3898  * function to free any pending commands. The function enables
3899  * POST only during the first initialization. The function returns zero.
3900  * The function does not guarantee completion of MBX_RESTART mailbox
3901  * command before the return of this function.
3902  **/
3903 static int
3904 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
3905 {
3906         MAILBOX_t *mb;
3907         struct lpfc_sli *psli;
3908         volatile uint32_t word0;
3909         void __iomem *to_slim;
3910         uint32_t hba_aer_enabled;
3911
3912         spin_lock_irq(&phba->hbalock);
3913
3914         /* Take PCIe device Advanced Error Reporting (AER) state */
3915         hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
3916
3917         psli = &phba->sli;
3918
3919         /* Restart HBA */
3920         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3921                         "0337 Restart HBA Data: x%x x%x\n",
3922                         phba->pport->port_state, psli->sli_flag);
3923
3924         word0 = 0;
3925         mb = (MAILBOX_t *) &word0;
3926         mb->mbxCommand = MBX_RESTART;
3927         mb->mbxHc = 1;
3928
3929         lpfc_reset_barrier(phba);
3930
3931         to_slim = phba->MBslimaddr;
3932         writel(*(uint32_t *) mb, to_slim);
3933         readl(to_slim); /* flush */
3934
3935         /* Only skip post after fc_ffinit is completed */
3936         if (phba->pport->port_state)
3937                 word0 = 1;      /* This is really setting up word1 */
3938         else
3939                 word0 = 0;      /* This is really setting up word1 */
3940         to_slim = phba->MBslimaddr + sizeof (uint32_t);
3941         writel(*(uint32_t *) mb, to_slim);
3942         readl(to_slim); /* flush */
3943
3944         lpfc_sli_brdreset(phba);
3945         phba->pport->stopped = 0;
3946         phba->link_state = LPFC_INIT_START;
3947         phba->hba_flag = 0;
3948         spin_unlock_irq(&phba->hbalock);
3949
3950         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
3951         psli->stats_start = get_seconds();
3952
3953         /* Give the INITFF and Post time to settle. */
3954         mdelay(100);
3955
3956         /* Reset HBA AER if it was enabled, note hba_flag was reset above */
3957         if (hba_aer_enabled)
3958                 pci_disable_pcie_error_reporting(phba->pcidev);
3959
3960         lpfc_hba_down_post(phba);
3961
3962         return 0;
3963 }
3964
3965 /**
3966  * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
3967  * @phba: Pointer to HBA context object.
3968  *
3969  * This function is called in the SLI initialization code path to restart
3970  * a SLI4 HBA. The caller is not required to hold any lock.
3971  * At the end of the function, it calls lpfc_hba_down_post function to
3972  * free any pending commands.
3973  **/
3974 static int
3975 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
3976 {
3977         struct lpfc_sli *psli = &phba->sli;
3978         uint32_t hba_aer_enabled;
3979
3980         /* Restart HBA */
3981         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3982                         "0296 Restart HBA Data: x%x x%x\n",
3983                         phba->pport->port_state, psli->sli_flag);
3984
3985         /* Take PCIe device Advanced Error Reporting (AER) state */
3986         hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
3987
3988         lpfc_sli4_brdreset(phba);
3989
3990         spin_lock_irq(&phba->hbalock);
3991         phba->pport->stopped = 0;
3992         phba->link_state = LPFC_INIT_START;
3993         phba->hba_flag = 0;
3994         spin_unlock_irq(&phba->hbalock);
3995
3996         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
3997         psli->stats_start = get_seconds();
3998
3999         /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4000         if (hba_aer_enabled)
4001                 pci_disable_pcie_error_reporting(phba->pcidev);
4002
4003         lpfc_hba_down_post(phba);
4004
4005         return 0;
4006 }
4007
4008 /**
4009  * lpfc_sli_brdrestart - Wrapper func for restarting hba
4010  * @phba: Pointer to HBA context object.
4011  *
4012  * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4013  * API jump table function pointer from the lpfc_hba struct.
4014 **/
4015 int
4016 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4017 {
4018         return phba->lpfc_sli_brdrestart(phba);
4019 }
4020
4021 /**
4022  * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4023  * @phba: Pointer to HBA context object.
4024  *
4025  * This function is called after a HBA restart to wait for successful
4026  * restart of the HBA. Successful restart of the HBA is indicated by
4027  * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4028  * iteration, the function will restart the HBA again. The function returns
4029  * zero if HBA successfully restarted else returns negative error code.
4030  **/
4031 static int
4032 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4033 {
4034         uint32_t status, i = 0;
4035
4036         /* Read the HBA Host Status Register */
4037         status = readl(phba->HSregaddr);
4038
4039         /* Check status register to see what current state is */
4040         i = 0;
4041         while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4042
4043                 /* Check every 10ms for 10 retries, then every 100ms for 90
4044                  * retries, then every 1 sec for 50 retires for a total of
4045                  * ~60 seconds before reset the board again and check every
4046                  * 1 sec for 50 retries. The up to 60 seconds before the
4047                  * board ready is required by the Falcon FIPS zeroization
4048                  * complete, and any reset the board in between shall cause
4049                  * restart of zeroization, further delay the board ready.
4050                  */
4051                 if (i++ >= 200) {
4052                         /* Adapter failed to init, timeout, status reg
4053                            <status> */
4054                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4055                                         "0436 Adapter failed to init, "
4056                                         "timeout, status reg x%x, "
4057                                         "FW Data: A8 x%x AC x%x\n", status,
4058                                         readl(phba->MBslimaddr + 0xa8),
4059                                         readl(phba->MBslimaddr + 0xac));
4060                         phba->link_state = LPFC_HBA_ERROR;
4061                         return -ETIMEDOUT;
4062                 }
4063
4064                 /* Check to see if any errors occurred during init */
4065                 if (status & HS_FFERM) {
4066                         /* ERROR: During chipset initialization */
4067                         /* Adapter failed to init, chipset, status reg
4068                            <status> */
4069                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4070                                         "0437 Adapter failed to init, "
4071                                         "chipset, status reg x%x, "
4072                                         "FW Data: A8 x%x AC x%x\n", status,
4073                                         readl(phba->MBslimaddr + 0xa8),
4074                                         readl(phba->MBslimaddr + 0xac));
4075                         phba->link_state = LPFC_HBA_ERROR;
4076                         return -EIO;
4077                 }
4078
4079                 if (i <= 10)
4080                         msleep(10);
4081                 else if (i <= 100)
4082                         msleep(100);
4083                 else
4084                         msleep(1000);
4085
4086                 if (i == 150) {
4087                         /* Do post */
4088                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4089                         lpfc_sli_brdrestart(phba);
4090                 }
4091                 /* Read the HBA Host Status Register */
4092                 status = readl(phba->HSregaddr);
4093         }
4094
4095         /* Check to see if any errors occurred during init */
4096         if (status & HS_FFERM) {
4097                 /* ERROR: During chipset initialization */
4098                 /* Adapter failed to init, chipset, status reg <status> */
4099                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4100                                 "0438 Adapter failed to init, chipset, "
4101                                 "status reg x%x, "
4102                                 "FW Data: A8 x%x AC x%x\n", status,
4103                                 readl(phba->MBslimaddr + 0xa8),
4104                                 readl(phba->MBslimaddr + 0xac));
4105                 phba->link_state = LPFC_HBA_ERROR;
4106                 return -EIO;
4107         }
4108
4109         /* Clear all interrupt enable conditions */
4110         writel(0, phba->HCregaddr);
4111         readl(phba->HCregaddr); /* flush */
4112
4113         /* setup host attn register */
4114         writel(0xffffffff, phba->HAregaddr);
4115         readl(phba->HAregaddr); /* flush */
4116         return 0;
4117 }
4118
4119 /**
4120  * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4121  *
4122  * This function calculates and returns the number of HBQs required to be
4123  * configured.
4124  **/
4125 int
4126 lpfc_sli_hbq_count(void)
4127 {
4128         return ARRAY_SIZE(lpfc_hbq_defs);
4129 }
4130
4131 /**
4132  * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4133  *
4134  * This function adds the number of hbq entries in every HBQ to get
4135  * the total number of hbq entries required for the HBA and returns
4136  * the total count.
4137  **/
4138 static int
4139 lpfc_sli_hbq_entry_count(void)
4140 {
4141         int  hbq_count = lpfc_sli_hbq_count();
4142         int  count = 0;
4143         int  i;
4144
4145         for (i = 0; i < hbq_count; ++i)
4146                 count += lpfc_hbq_defs[i]->entry_count;
4147         return count;
4148 }
4149
4150 /**
4151  * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4152  *
4153  * This function calculates amount of memory required for all hbq entries
4154  * to be configured and returns the total memory required.
4155  **/
4156 int
4157 lpfc_sli_hbq_size(void)
4158 {
4159         return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4160 }
4161
4162 /**
4163  * lpfc_sli_hbq_setup - configure and initialize HBQs
4164  * @phba: Pointer to HBA context object.
4165  *
4166  * This function is called during the SLI initialization to configure
4167  * all the HBQs and post buffers to the HBQ. The caller is not
4168  * required to hold any locks. This function will return zero if successful
4169  * else it will return negative error code.
4170  **/
4171 static int
4172 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4173 {
4174         int  hbq_count = lpfc_sli_hbq_count();
4175         LPFC_MBOXQ_t *pmb;
4176         MAILBOX_t *pmbox;
4177         uint32_t hbqno;
4178         uint32_t hbq_entry_index;
4179
4180                                 /* Get a Mailbox buffer to setup mailbox
4181                                  * commands for HBA initialization
4182                                  */
4183         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4184
4185         if (!pmb)
4186                 return -ENOMEM;
4187
4188         pmbox = &pmb->u.mb;
4189
4190         /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4191         phba->link_state = LPFC_INIT_MBX_CMDS;
4192         phba->hbq_in_use = 1;
4193
4194         hbq_entry_index = 0;
4195         for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4196                 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4197                 phba->hbqs[hbqno].hbqPutIdx      = 0;
4198                 phba->hbqs[hbqno].local_hbqGetIdx   = 0;
4199                 phba->hbqs[hbqno].entry_count =
4200                         lpfc_hbq_defs[hbqno]->entry_count;
4201                 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4202                         hbq_entry_index, pmb);
4203                 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4204
4205                 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4206                         /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4207                            mbxStatus <status>, ring <num> */
4208
4209                         lpfc_printf_log(phba, KERN_ERR,
4210                                         LOG_SLI | LOG_VPORT,
4211                                         "1805 Adapter failed to init. "
4212                                         "Data: x%x x%x x%x\n",
4213                                         pmbox->mbxCommand,
4214                                         pmbox->mbxStatus, hbqno);
4215
4216                         phba->link_state = LPFC_HBA_ERROR;
4217                         mempool_free(pmb, phba->mbox_mem_pool);
4218                         return -ENXIO;
4219                 }
4220         }
4221         phba->hbq_count = hbq_count;
4222
4223         mempool_free(pmb, phba->mbox_mem_pool);
4224
4225         /* Initially populate or replenish the HBQs */
4226         for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4227                 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4228         return 0;
4229 }
4230
4231 /**
4232  * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4233  * @phba: Pointer to HBA context object.
4234  *
4235  * This function is called during the SLI initialization to configure
4236  * all the HBQs and post buffers to the HBQ. The caller is not
4237  * required to hold any locks. This function will return zero if successful
4238  * else it will return negative error code.
4239  **/
4240 static int
4241 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4242 {
4243         phba->hbq_in_use = 1;
4244         phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
4245         phba->hbq_count = 1;
4246         /* Initially populate or replenish the HBQs */
4247         lpfc_sli_hbqbuf_init_hbqs(phba, 0);
4248         return 0;
4249 }
4250
4251 /**
4252  * lpfc_sli_config_port - Issue config port mailbox command
4253  * @phba: Pointer to HBA context object.
4254  * @sli_mode: sli mode - 2/3
4255  *
4256  * This function is called by the sli intialization code path
4257  * to issue config_port mailbox command. This function restarts the
4258  * HBA firmware and issues a config_port mailbox command to configure
4259  * the SLI interface in the sli mode specified by sli_mode
4260  * variable. The caller is not required to hold any locks.
4261  * The function returns 0 if successful, else returns negative error
4262  * code.
4263  **/
4264 int
4265 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4266 {
4267         LPFC_MBOXQ_t *pmb;
4268         uint32_t resetcount = 0, rc = 0, done = 0;
4269
4270         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4271         if (!pmb) {
4272                 phba->link_state = LPFC_HBA_ERROR;
4273                 return -ENOMEM;
4274         }
4275
4276         phba->sli_rev = sli_mode;
4277         while (resetcount < 2 && !done) {
4278                 spin_lock_irq(&phba->hbalock);
4279                 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4280                 spin_unlock_irq(&phba->hbalock);
4281                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4282                 lpfc_sli_brdrestart(phba);
4283                 rc = lpfc_sli_chipset_init(phba);
4284                 if (rc)
4285                         break;
4286
4287                 spin_lock_irq(&phba->hbalock);
4288                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4289                 spin_unlock_irq(&phba->hbalock);
4290                 resetcount++;
4291
4292                 /* Call pre CONFIG_PORT mailbox command initialization.  A
4293                  * value of 0 means the call was successful.  Any other
4294                  * nonzero value is a failure, but if ERESTART is returned,
4295                  * the driver may reset the HBA and try again.
4296                  */
4297                 rc = lpfc_config_port_prep(phba);
4298                 if (rc == -ERESTART) {
4299                         phba->link_state = LPFC_LINK_UNKNOWN;
4300                         continue;
4301                 } else if (rc)
4302                         break;
4303                 phba->link_state = LPFC_INIT_MBX_CMDS;
4304                 lpfc_config_port(phba, pmb);
4305                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4306                 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4307                                         LPFC_SLI3_HBQ_ENABLED |
4308                                         LPFC_SLI3_CRP_ENABLED |
4309                                         LPFC_SLI3_BG_ENABLED |
4310                                         LPFC_SLI3_DSS_ENABLED);
4311                 if (rc != MBX_SUCCESS) {
4312                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4313                                 "0442 Adapter failed to init, mbxCmd x%x "
4314                                 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4315                                 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
4316                         spin_lock_irq(&phba->hbalock);
4317                         phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
4318                         spin_unlock_irq(&phba->hbalock);
4319                         rc = -ENXIO;
4320                 } else {
4321                         /* Allow asynchronous mailbox command to go through */
4322                         spin_lock_irq(&phba->hbalock);
4323                         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4324                         spin_unlock_irq(&phba->hbalock);
4325                         done = 1;
4326                 }
4327         }
4328         if (!done) {
4329                 rc = -EINVAL;
4330                 goto do_prep_failed;
4331         }
4332         if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4333                 if (!pmb->u.mb.un.varCfgPort.cMA) {
4334                         rc = -ENXIO;
4335                         goto do_prep_failed;
4336                 }
4337                 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
4338                         phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
4339                         phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4340                         phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4341                                 phba->max_vpi : phba->max_vports;
4342
4343                 } else
4344                         phba->max_vpi = 0;
4345                 phba->fips_level = 0;
4346                 phba->fips_spec_rev = 0;
4347                 if (pmb->u.mb.un.varCfgPort.gdss) {
4348                         phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
4349                         phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
4350                         phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
4351                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4352                                         "2850 Security Crypto Active. FIPS x%d "
4353                                         "(Spec Rev: x%d)",
4354                                         phba->fips_level, phba->fips_spec_rev);
4355                 }
4356                 if (pmb->u.mb.un.varCfgPort.sec_err) {
4357                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4358                                         "2856 Config Port Security Crypto "
4359                                         "Error: x%x ",
4360                                         pmb->u.mb.un.varCfgPort.sec_err);
4361                 }
4362                 if (pmb->u.mb.un.varCfgPort.gerbm)
4363                         phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
4364                 if (pmb->u.mb.un.varCfgPort.gcrp)
4365                         phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
4366
4367                 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
4368                 phba->port_gp = phba->mbox->us.s3_pgp.port;
4369
4370                 if (phba->cfg_enable_bg) {
4371                         if (pmb->u.mb.un.varCfgPort.gbg)
4372                                 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
4373                         else
4374                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4375                                                 "0443 Adapter did not grant "
4376                                                 "BlockGuard\n");
4377                 }
4378         } else {
4379                 phba->hbq_get = NULL;
4380                 phba->port_gp = phba->mbox->us.s2.port;
4381                 phba->max_vpi = 0;
4382         }
4383 do_prep_failed:
4384         mempool_free(pmb, phba->mbox_mem_pool);
4385         return rc;
4386 }
4387
4388
4389 /**
4390  * lpfc_sli_hba_setup - SLI intialization function
4391  * @phba: Pointer to HBA context object.
4392  *
4393  * This function is the main SLI intialization function. This function
4394  * is called by the HBA intialization code, HBA reset code and HBA
4395  * error attention handler code. Caller is not required to hold any
4396  * locks. This function issues config_port mailbox command to configure
4397  * the SLI, setup iocb rings and HBQ rings. In the end the function
4398  * calls the config_port_post function to issue init_link mailbox
4399  * command and to start the discovery. The function will return zero
4400  * if successful, else it will return negative error code.
4401  **/
4402 int
4403 lpfc_sli_hba_setup(struct lpfc_hba *phba)
4404 {
4405         uint32_t rc;
4406         int  mode = 3;
4407
4408         switch (lpfc_sli_mode) {
4409         case 2:
4410                 if (phba->cfg_enable_npiv) {
4411                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4412                                 "1824 NPIV enabled: Override lpfc_sli_mode "
4413                                 "parameter (%d) to auto (0).\n",
4414                                 lpfc_sli_mode);
4415                         break;
4416                 }
4417                 mode = 2;
4418                 break;
4419         case 0:
4420         case 3:
4421                 break;
4422         default:
4423                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4424                                 "1819 Unrecognized lpfc_sli_mode "
4425                                 "parameter: %d.\n", lpfc_sli_mode);
4426
4427                 break;
4428         }
4429
4430         rc = lpfc_sli_config_port(phba, mode);
4431
4432         if (rc && lpfc_sli_mode == 3)
4433                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4434                                 "1820 Unable to select SLI-3.  "
4435                                 "Not supported by adapter.\n");
4436         if (rc && mode != 2)
4437                 rc = lpfc_sli_config_port(phba, 2);
4438         if (rc)
4439                 goto lpfc_sli_hba_setup_error;
4440
4441         /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4442         if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4443                 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4444                 if (!rc) {
4445                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4446                                         "2709 This device supports "
4447                                         "Advanced Error Reporting (AER)\n");
4448                         spin_lock_irq(&phba->hbalock);
4449                         phba->hba_flag |= HBA_AER_ENABLED;
4450                         spin_unlock_irq(&phba->hbalock);
4451                 } else {
4452                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4453                                         "2708 This device does not support "
4454                                         "Advanced Error Reporting (AER)\n");
4455                         phba->cfg_aer_support = 0;
4456                 }
4457         }
4458
4459         if (phba->sli_rev == 3) {
4460                 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4461                 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
4462         } else {
4463                 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
4464                 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
4465                 phba->sli3_options = 0;
4466         }
4467
4468         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4469                         "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4470                         phba->sli_rev, phba->max_vpi);
4471         rc = lpfc_sli_ring_map(phba);
4472
4473         if (rc)
4474                 goto lpfc_sli_hba_setup_error;
4475
4476         /* Init HBQs */
4477         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4478                 rc = lpfc_sli_hbq_setup(phba);
4479                 if (rc)
4480                         goto lpfc_sli_hba_setup_error;
4481         }
4482         spin_lock_irq(&phba->hbalock);
4483         phba->sli.sli_flag |= LPFC_PROCESS_LA;
4484         spin_unlock_irq(&phba->hbalock);
4485
4486         rc = lpfc_config_port_post(phba);
4487         if (rc)
4488                 goto lpfc_sli_hba_setup_error;
4489
4490         return rc;
4491
4492 lpfc_sli_hba_setup_error:
4493         phba->link_state = LPFC_HBA_ERROR;
4494         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4495                         "0445 Firmware initialization failed\n");
4496         return rc;
4497 }
4498
4499 /**
4500  * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4501  * @phba: Pointer to HBA context object.
4502  * @mboxq: mailbox pointer.
4503  * This function issue a dump mailbox command to read config region
4504  * 23 and parse the records in the region and populate driver
4505  * data structure.
4506  **/
4507 static int
4508 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
4509                 LPFC_MBOXQ_t *mboxq)
4510 {
4511         struct lpfc_dmabuf *mp;
4512         struct lpfc_mqe *mqe;
4513         uint32_t data_length;
4514         int rc;
4515
4516         /* Program the default value of vlan_id and fc_map */
4517         phba->valid_vlan = 0;
4518         phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4519         phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4520         phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4521
4522         mqe = &mboxq->u.mqe;
4523         if (lpfc_dump_fcoe_param(phba, mboxq))
4524                 return -ENOMEM;
4525
4526         mp = (struct lpfc_dmabuf *) mboxq->context1;
4527         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4528
4529         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4530                         "(%d):2571 Mailbox cmd x%x Status x%x "
4531                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4532                         "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4533                         "CQ: x%x x%x x%x x%x\n",
4534                         mboxq->vport ? mboxq->vport->vpi : 0,
4535                         bf_get(lpfc_mqe_command, mqe),
4536                         bf_get(lpfc_mqe_status, mqe),
4537                         mqe->un.mb_words[0], mqe->un.mb_words[1],
4538                         mqe->un.mb_words[2], mqe->un.mb_words[3],
4539                         mqe->un.mb_words[4], mqe->un.mb_words[5],
4540                         mqe->un.mb_words[6], mqe->un.mb_words[7],
4541                         mqe->un.mb_words[8], mqe->un.mb_words[9],
4542                         mqe->un.mb_words[10], mqe->un.mb_words[11],
4543                         mqe->un.mb_words[12], mqe->un.mb_words[13],
4544                         mqe->un.mb_words[14], mqe->un.mb_words[15],
4545                         mqe->un.mb_words[16], mqe->un.mb_words[50],
4546                         mboxq->mcqe.word0,
4547                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
4548                         mboxq->mcqe.trailer);
4549
4550         if (rc) {
4551                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4552                 kfree(mp);
4553                 return -EIO;
4554         }
4555         data_length = mqe->un.mb_words[5];
4556         if (data_length > DMP_RGN23_SIZE) {
4557                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4558                 kfree(mp);
4559                 return -EIO;
4560         }
4561
4562         lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4563         lpfc_mbuf_free(phba, mp->virt, mp->phys);
4564         kfree(mp);
4565         return 0;
4566 }
4567
4568 /**
4569  * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4570  * @phba: pointer to lpfc hba data structure.
4571  * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4572  * @vpd: pointer to the memory to hold resulting port vpd data.
4573  * @vpd_size: On input, the number of bytes allocated to @vpd.
4574  *            On output, the number of data bytes in @vpd.
4575  *
4576  * This routine executes a READ_REV SLI4 mailbox command.  In
4577  * addition, this routine gets the port vpd data.
4578  *
4579  * Return codes
4580  *      0 - successful
4581  *      -ENOMEM - could not allocated memory.
4582  **/
4583 static int
4584 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4585                     uint8_t *vpd, uint32_t *vpd_size)
4586 {
4587         int rc = 0;
4588         uint32_t dma_size;
4589         struct lpfc_dmabuf *dmabuf;
4590         struct lpfc_mqe *mqe;
4591
4592         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4593         if (!dmabuf)
4594                 return -ENOMEM;
4595
4596         /*
4597          * Get a DMA buffer for the vpd data resulting from the READ_REV
4598          * mailbox command.
4599          */
4600         dma_size = *vpd_size;
4601         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4602                                           dma_size,
4603                                           &dmabuf->phys,
4604                                           GFP_KERNEL);
4605         if (!dmabuf->virt) {
4606                 kfree(dmabuf);
4607                 return -ENOMEM;
4608         }
4609         memset(dmabuf->virt, 0, dma_size);
4610
4611         /*
4612          * The SLI4 implementation of READ_REV conflicts at word1,
4613          * bits 31:16 and SLI4 adds vpd functionality not present
4614          * in SLI3.  This code corrects the conflicts.
4615          */
4616         lpfc_read_rev(phba, mboxq);
4617         mqe = &mboxq->u.mqe;
4618         mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4619         mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4620         mqe->un.read_rev.word1 &= 0x0000FFFF;
4621         bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4622         bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4623
4624         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4625         if (rc) {
4626                 dma_free_coherent(&phba->pcidev->dev, dma_size,
4627                                   dmabuf->virt, dmabuf->phys);
4628                 kfree(dmabuf);
4629                 return -EIO;
4630         }
4631
4632         /*
4633          * The available vpd length cannot be bigger than the
4634          * DMA buffer passed to the port.  Catch the less than
4635          * case and update the caller's size.
4636          */
4637         if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4638                 *vpd_size = mqe->un.read_rev.avail_vpd_len;
4639
4640         memcpy(vpd, dmabuf->virt, *vpd_size);
4641
4642         dma_free_coherent(&phba->pcidev->dev, dma_size,
4643                           dmabuf->virt, dmabuf->phys);
4644         kfree(dmabuf);
4645         return 0;
4646 }
4647
4648 /**
4649  * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
4650  * @phba: pointer to lpfc hba data structure.
4651  *
4652  * This routine is called to explicitly arm the SLI4 device's completion and
4653  * event queues
4654  **/
4655 static void
4656 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4657 {
4658         uint8_t fcp_eqidx;
4659
4660         lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4661         lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4662         for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4663                 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4664                                      LPFC_QUEUE_REARM);
4665         lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
4666         for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4667                 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
4668                                      LPFC_QUEUE_REARM);
4669 }
4670
4671 /**
4672  * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
4673  * @phba: Pointer to HBA context object.
4674  *
4675  * This function is the main SLI4 device intialization PCI function. This
4676  * function is called by the HBA intialization code, HBA reset code and
4677  * HBA error attention handler code. Caller is not required to hold any
4678  * locks.
4679  **/
4680 int
4681 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4682 {
4683         int rc;
4684         LPFC_MBOXQ_t *mboxq;
4685         struct lpfc_mqe *mqe;
4686         uint8_t *vpd;
4687         uint32_t vpd_size;
4688         uint32_t ftr_rsp = 0;
4689         struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
4690         struct lpfc_vport *vport = phba->pport;
4691         struct lpfc_dmabuf *mp;
4692
4693         /* Perform a PCI function reset to start from clean */
4694         rc = lpfc_pci_function_reset(phba);
4695         if (unlikely(rc))
4696                 return -ENODEV;
4697
4698         /* Check the HBA Host Status Register for readyness */
4699         rc = lpfc_sli4_post_status_check(phba);
4700         if (unlikely(rc))
4701                 return -ENODEV;
4702         else {
4703                 spin_lock_irq(&phba->hbalock);
4704                 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
4705                 spin_unlock_irq(&phba->hbalock);
4706         }
4707
4708         /*
4709          * Allocate a single mailbox container for initializing the
4710          * port.
4711          */
4712         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4713         if (!mboxq)
4714                 return -ENOMEM;
4715
4716         /*
4717          * Continue initialization with default values even if driver failed
4718          * to read FCoE param config regions
4719          */
4720         if (lpfc_sli4_read_fcoe_params(phba, mboxq))
4721                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4722                         "2570 Failed to read FCoE parameters\n");
4723
4724         /* Issue READ_REV to collect vpd and FW information. */
4725         vpd_size = SLI4_PAGE_SIZE;
4726         vpd = kzalloc(vpd_size, GFP_KERNEL);
4727         if (!vpd) {
4728                 rc = -ENOMEM;
4729                 goto out_free_mbox;
4730         }
4731
4732         rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
4733         if (unlikely(rc)) {
4734                 kfree(vpd);
4735                 goto out_free_mbox;
4736         }
4737         mqe = &mboxq->u.mqe;
4738         phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
4739         if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
4740                 phba->hba_flag |= HBA_FCOE_MODE;
4741         else
4742                 phba->hba_flag &= ~HBA_FCOE_MODE;
4743
4744         if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
4745                 LPFC_DCBX_CEE_MODE)
4746                 phba->hba_flag |= HBA_FIP_SUPPORT;
4747         else
4748                 phba->hba_flag &= ~HBA_FIP_SUPPORT;
4749
4750         if (phba->sli_rev != LPFC_SLI_REV4 ||
4751             !(phba->hba_flag & HBA_FCOE_MODE)) {
4752                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4753                         "0376 READ_REV Error. SLI Level %d "
4754                         "FCoE enabled %d\n",
4755                         phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
4756                 rc = -EIO;
4757                 kfree(vpd);
4758                 goto out_free_mbox;
4759         }
4760         /*
4761          * Evaluate the read rev and vpd data. Populate the driver
4762          * state with the results. If this routine fails, the failure
4763          * is not fatal as the driver will use generic values.
4764          */
4765         rc = lpfc_parse_vpd(phba, vpd, vpd_size);
4766         if (unlikely(!rc)) {
4767                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4768                                 "0377 Error %d parsing vpd. "
4769                                 "Using defaults.\n", rc);
4770                 rc = 0;
4771         }
4772         kfree(vpd);
4773
4774         /* Save information as VPD data */
4775         phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
4776         phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
4777         phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
4778         phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
4779                                          &mqe->un.read_rev);
4780         phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
4781                                        &mqe->un.read_rev);
4782         phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
4783                                             &mqe->un.read_rev);
4784         phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
4785                                            &mqe->un.read_rev);
4786         phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
4787         memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
4788         phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
4789         memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
4790         phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
4791         memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
4792         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4793                         "(%d):0380 READ_REV Status x%x "
4794                         "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
4795                         mboxq->vport ? mboxq->vport->vpi : 0,
4796                         bf_get(lpfc_mqe_status, mqe),
4797                         phba->vpd.rev.opFwName,
4798                         phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
4799                         phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
4800
4801         /*
4802          * Discover the port's supported feature set and match it against the
4803          * hosts requests.
4804          */
4805         lpfc_request_features(phba, mboxq);
4806         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4807         if (unlikely(rc)) {
4808                 rc = -EIO;
4809                 goto out_free_mbox;
4810         }
4811
4812         /*
4813          * The port must support FCP initiator mode as this is the
4814          * only mode running in the host.
4815          */
4816         if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
4817                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4818                                 "0378 No support for fcpi mode.\n");
4819                 ftr_rsp++;
4820         }
4821
4822         /*
4823          * If the port cannot support the host's requested features
4824          * then turn off the global config parameters to disable the
4825          * feature in the driver.  This is not a fatal error.
4826          */
4827         if ((phba->cfg_enable_bg) &&
4828             !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4829                 ftr_rsp++;
4830
4831         if (phba->max_vpi && phba->cfg_enable_npiv &&
4832             !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4833                 ftr_rsp++;
4834
4835         if (ftr_rsp) {
4836                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4837                                 "0379 Feature Mismatch Data: x%08x %08x "
4838                                 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
4839                                 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
4840                                 phba->cfg_enable_npiv, phba->max_vpi);
4841                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4842                         phba->cfg_enable_bg = 0;
4843                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4844                         phba->cfg_enable_npiv = 0;
4845         }
4846
4847         /* These SLI3 features are assumed in SLI4 */
4848         spin_lock_irq(&phba->hbalock);
4849         phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
4850         spin_unlock_irq(&phba->hbalock);
4851
4852         /* Read the port's service parameters. */
4853         rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
4854         if (rc) {
4855                 phba->link_state = LPFC_HBA_ERROR;
4856                 rc = -ENOMEM;
4857                 goto out_free_mbox;
4858         }
4859
4860         mboxq->vport = vport;
4861         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4862         mp = (struct lpfc_dmabuf *) mboxq->context1;
4863         if (rc == MBX_SUCCESS) {
4864                 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
4865                 rc = 0;
4866         }
4867
4868         /*
4869          * This memory was allocated by the lpfc_read_sparam routine. Release
4870          * it to the mbuf pool.
4871          */
4872         lpfc_mbuf_free(phba, mp->virt, mp->phys);
4873         kfree(mp);
4874         mboxq->context1 = NULL;
4875         if (unlikely(rc)) {
4876                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4877                                 "0382 READ_SPARAM command failed "
4878                                 "status %d, mbxStatus x%x\n",
4879                                 rc, bf_get(lpfc_mqe_status, mqe));
4880                 phba->link_state = LPFC_HBA_ERROR;
4881                 rc = -EIO;
4882                 goto out_free_mbox;
4883         }
4884
4885         if (phba->cfg_soft_wwnn)
4886                 u64_to_wwn(phba->cfg_soft_wwnn,
4887                            vport->fc_sparam.nodeName.u.wwn);
4888         if (phba->cfg_soft_wwpn)
4889                 u64_to_wwn(phba->cfg_soft_wwpn,
4890                            vport->fc_sparam.portName.u.wwn);
4891         memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
4892                sizeof(struct lpfc_name));
4893         memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
4894                sizeof(struct lpfc_name));
4895
4896         /* Update the fc_host data structures with new wwn. */
4897         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4898         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4899
4900         /* Register SGL pool to the device using non-embedded mailbox command */
4901         rc = lpfc_sli4_post_sgl_list(phba);
4902         if (unlikely(rc)) {
4903                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4904                                 "0582 Error %d during sgl post operation\n",
4905                                         rc);
4906                 rc = -ENODEV;
4907                 goto out_free_mbox;
4908         }
4909
4910         /* Register SCSI SGL pool to the device */
4911         rc = lpfc_sli4_repost_scsi_sgl_list(phba);
4912         if (unlikely(rc)) {
4913                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4914                                 "0383 Error %d during scsi sgl post "
4915                                 "operation\n", rc);
4916                 /* Some Scsi buffers were moved to the abort scsi list */
4917                 /* A pci function reset will repost them */
4918                 rc = -ENODEV;
4919                 goto out_free_mbox;
4920         }
4921
4922         /* Post the rpi header region to the device. */
4923         rc = lpfc_sli4_post_all_rpi_hdrs(phba);
4924         if (unlikely(rc)) {
4925                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4926                                 "0393 Error %d during rpi post operation\n",
4927                                 rc);
4928                 rc = -ENODEV;
4929                 goto out_free_mbox;
4930         }
4931
4932         /* Set up all the queues to the device */
4933         rc = lpfc_sli4_queue_setup(phba);
4934         if (unlikely(rc)) {
4935                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4936                                 "0381 Error %d during queue setup.\n ", rc);
4937                 goto out_stop_timers;
4938         }
4939
4940         /* Arm the CQs and then EQs on device */
4941         lpfc_sli4_arm_cqeq_intr(phba);
4942
4943         /* Indicate device interrupt mode */
4944         phba->sli4_hba.intr_enable = 1;
4945
4946         /* Allow asynchronous mailbox command to go through */
4947         spin_lock_irq(&phba->hbalock);
4948         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4949         spin_unlock_irq(&phba->hbalock);
4950
4951         /* Post receive buffers to the device */
4952         lpfc_sli4_rb_setup(phba);
4953
4954         /* Reset HBA FCF states after HBA reset */
4955         phba->fcf.fcf_flag = 0;
4956         phba->fcf.current_rec.flag = 0;
4957
4958         /* Start the ELS watchdog timer */
4959         mod_timer(&vport->els_tmofunc,
4960                   jiffies + HZ * (phba->fc_ratov * 2));
4961
4962         /* Start heart beat timer */
4963         mod_timer(&phba->hb_tmofunc,
4964                   jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
4965         phba->hb_outstanding = 0;
4966         phba->last_completion_time = jiffies;
4967
4968         /* Start error attention (ERATT) polling timer */
4969         mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
4970
4971         /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4972         if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4973                 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4974                 if (!rc) {
4975                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4976                                         "2829 This device supports "
4977                                         "Advanced Error Reporting (AER)\n");
4978                         spin_lock_irq(&phba->hbalock);
4979                         phba->hba_flag |= HBA_AER_ENABLED;
4980                         spin_unlock_irq(&phba->hbalock);
4981                 } else {
4982                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4983                                         "2830 This device does not support "
4984                                         "Advanced Error Reporting (AER)\n");
4985                         phba->cfg_aer_support = 0;
4986                 }
4987         }
4988
4989         if (!(phba->hba_flag & HBA_FCOE_MODE)) {
4990                 /*
4991                  * The FC Port needs to register FCFI (index 0)
4992                  */
4993                 lpfc_reg_fcfi(phba, mboxq);
4994                 mboxq->vport = phba->pport;
4995                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4996                 if (rc == MBX_SUCCESS)
4997                         rc = 0;
4998                 else
4999                         goto out_unset_queue;
5000         }
5001         /*
5002          * The port is ready, set the host's link state to LINK_DOWN
5003          * in preparation for link interrupts.
5004          */
5005         spin_lock_irq(&phba->hbalock);
5006         phba->link_state = LPFC_LINK_DOWN;
5007         spin_unlock_irq(&phba->hbalock);
5008         rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
5009 out_unset_queue:
5010         /* Unset all the queues set up in this routine when error out */
5011         if (rc)
5012                 lpfc_sli4_queue_unset(phba);
5013 out_stop_timers:
5014         if (rc)
5015                 lpfc_stop_hba_timers(phba);
5016 out_free_mbox:
5017         mempool_free(mboxq, phba->mbox_mem_pool);
5018         return rc;
5019 }
5020
5021 /**
5022  * lpfc_mbox_timeout - Timeout call back function for mbox timer
5023  * @ptr: context object - pointer to hba structure.
5024  *
5025  * This is the callback function for mailbox timer. The mailbox
5026  * timer is armed when a new mailbox command is issued and the timer
5027  * is deleted when the mailbox complete. The function is called by
5028  * the kernel timer code when a mailbox does not complete within
5029  * expected time. This function wakes up the worker thread to
5030  * process the mailbox timeout and returns. All the processing is
5031  * done by the worker thread function lpfc_mbox_timeout_handler.
5032  **/
5033 void
5034 lpfc_mbox_timeout(unsigned long ptr)
5035 {
5036         struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
5037         unsigned long iflag;
5038         uint32_t tmo_posted;
5039
5040         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
5041         tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
5042         if (!tmo_posted)
5043                 phba->pport->work_port_events |= WORKER_MBOX_TMO;
5044         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
5045
5046         if (!tmo_posted)
5047                 lpfc_worker_wake_up(phba);
5048         return;
5049 }
5050
5051
5052 /**
5053  * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
5054  * @phba: Pointer to HBA context object.
5055  *
5056  * This function is called from worker thread when a mailbox command times out.
5057  * The caller is not required to hold any locks. This function will reset the
5058  * HBA and recover all the pending commands.
5059  **/
5060 void
5061 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
5062 {
5063         LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
5064         MAILBOX_t *mb = &pmbox->u.mb;
5065         struct lpfc_sli *psli = &phba->sli;
5066         struct lpfc_sli_ring *pring;
5067
5068         /* Check the pmbox pointer first.  There is a race condition
5069          * between the mbox timeout handler getting executed in the
5070          * worklist and the mailbox actually completing. When this
5071          * race condition occurs, the mbox_active will be NULL.
5072          */
5073         spin_lock_irq(&phba->hbalock);
5074         if (pmbox == NULL) {
5075                 lpfc_printf_log(phba, KERN_WARNING,
5076                                 LOG_MBOX | LOG_SLI,
5077                                 "0353 Active Mailbox cleared - mailbox timeout "
5078                                 "exiting\n");
5079                 spin_unlock_irq(&phba->hbalock);
5080                 return;
5081         }
5082
5083         /* Mbox cmd <mbxCommand> timeout */
5084         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5085                         "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
5086                         mb->mbxCommand,
5087                         phba->pport->port_state,
5088                         phba->sli.sli_flag,
5089                         phba->sli.mbox_active);
5090         spin_unlock_irq(&phba->hbalock);
5091
5092         /* Setting state unknown so lpfc_sli_abort_iocb_ring
5093          * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
5094          * it to fail all oustanding SCSI IO.
5095          */
5096         spin_lock_irq(&phba->pport->work_port_lock);
5097         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
5098         spin_unlock_irq(&phba->pport->work_port_lock);
5099         spin_lock_irq(&phba->hbalock);
5100         phba->link_state = LPFC_LINK_UNKNOWN;
5101         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
5102         spin_unlock_irq(&phba->hbalock);
5103
5104         pring = &psli->ring[psli->fcp_ring];
5105         lpfc_sli_abort_iocb_ring(phba, pring);
5106
5107         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5108                         "0345 Resetting board due to mailbox timeout\n");
5109
5110         /* Reset the HBA device */
5111         lpfc_reset_hba(phba);
5112 }
5113
5114 /**
5115  * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
5116  * @phba: Pointer to HBA context object.
5117  * @pmbox: Pointer to mailbox object.
5118  * @flag: Flag indicating how the mailbox need to be processed.
5119  *
5120  * This function is called by discovery code and HBA management code
5121  * to submit a mailbox command to firmware with SLI-3 interface spec. This
5122  * function gets the hbalock to protect the data structures.
5123  * The mailbox command can be submitted in polling mode, in which case
5124  * this function will wait in a polling loop for the completion of the
5125  * mailbox.
5126  * If the mailbox is submitted in no_wait mode (not polling) the
5127  * function will submit the command and returns immediately without waiting
5128  * for the mailbox completion. The no_wait is supported only when HBA
5129  * is in SLI2/SLI3 mode - interrupts are enabled.
5130  * The SLI interface allows only one mailbox pending at a time. If the
5131  * mailbox is issued in polling mode and there is already a mailbox
5132  * pending, then the function will return an error. If the mailbox is issued
5133  * in NO_WAIT mode and there is a mailbox pending already, the function
5134  * will return MBX_BUSY after queuing the mailbox into mailbox queue.
5135  * The sli layer owns the mailbox object until the completion of mailbox
5136  * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
5137  * return codes the caller owns the mailbox command after the return of
5138  * the function.
5139  **/
5140 static int
5141 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
5142                        uint32_t flag)
5143 {
5144         MAILBOX_t *mb;
5145         struct lpfc_sli *psli = &phba->sli;
5146         uint32_t status, evtctr;
5147         uint32_t ha_copy;
5148         int i;
5149         unsigned long timeout;
5150         unsigned long drvr_flag = 0;
5151         uint32_t word0, ldata;
5152         void __iomem *to_slim;
5153         int processing_queue = 0;
5154
5155         spin_lock_irqsave(&phba->hbalock, drvr_flag);
5156         if (!pmbox) {
5157                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5158                 /* processing mbox queue from intr_handler */
5159                 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
5160                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
5161                         return MBX_SUCCESS;
5162                 }
5163                 processing_queue = 1;
5164                 pmbox = lpfc_mbox_get(phba);
5165                 if (!pmbox) {
5166                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
5167                         return MBX_SUCCESS;
5168                 }
5169         }
5170
5171         if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
5172                 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
5173                 if(!pmbox->vport) {
5174                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
5175                         lpfc_printf_log(phba, KERN_ERR,
5176                                         LOG_MBOX | LOG_VPORT,
5177                                         "1806 Mbox x%x failed. No vport\n",
5178                                         pmbox->u.mb.mbxCommand);
5179                         dump_stack();
5180                         goto out_not_finished;
5181                 }
5182         }
5183
5184         /* If the PCI channel is in offline state, do not post mbox. */
5185         if (unlikely(pci_channel_offline(phba->pcidev))) {
5186                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
5187                 goto out_not_finished;
5188         }
5189
5190         /* If HBA has a deferred error attention, fail the iocb. */
5191         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
5192                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
5193                 goto out_not_finished;
5194         }
5195
5196         psli = &phba->sli;
5197
5198         mb = &pmbox->u.mb;
5199         status = MBX_SUCCESS;
5200
5201         if (phba->link_state == LPFC_HBA_ERROR) {
5202                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
5203
5204                 /* Mbox command <mbxCommand> cannot issue */
5205                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5206                                 "(%d):0311 Mailbox command x%x cannot "
5207                                 "issue Data: x%x x%x\n",
5208                                 pmbox->vport ? pmbox->vport->vpi : 0,
5209                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
5210                 goto out_not_finished;
5211         }
5212
5213         if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
5214             !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
5215                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
5216                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5217                                 "(%d):2528 Mailbox command x%x cannot "
5218                                 "issue Data: x%x x%x\n",
5219                                 pmbox->vport ? pmbox->vport->vpi : 0,
5220                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
5221                 goto out_not_finished;
5222         }
5223
5224         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
5225                 /* Polling for a mbox command when another one is already active
5226                  * is not allowed in SLI. Also, the driver must have established
5227                  * SLI2 mode to queue and process multiple mbox commands.
5228                  */
5229
5230                 if (flag & MBX_POLL) {
5231                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
5232
5233                         /* Mbox command <mbxCommand> cannot issue */
5234                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5235                                         "(%d):2529 Mailbox command x%x "
5236                                         "cannot issue Data: x%x x%x\n",
5237                                         pmbox->vport ? pmbox->vport->vpi : 0,
5238                                         pmbox->u.mb.mbxCommand,
5239                                         psli->sli_flag, flag);
5240                         goto out_not_finished;
5241                 }
5242
5243                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
5244                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
5245                         /* Mbox command <mbxCommand> cannot issue */
5246                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5247                                         "(%d):2530 Mailbox command x%x "
5248                                         "cannot issue Data: x%x x%x\n",
5249                                         pmbox->vport ? pmbox->vport->vpi : 0,
5250                                         pmbox->u.mb.mbxCommand,
5251                                         psli->sli_flag, flag);
5252                         goto out_not_finished;
5253                 }
5254
5255                 /* Another mailbox command is still being processed, queue this
5256                  * command to be processed later.
5257                  */
5258                 lpfc_mbox_put(phba, pmbox);
5259
5260                 /* Mbox cmd issue - BUSY */
5261                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5262                                 "(%d):0308 Mbox cmd issue - BUSY Data: "
5263                                 "x%x x%x x%x x%x\n",
5264                                 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
5265                                 mb->mbxCommand, phba->pport->port_state,
5266                                 psli->sli_flag, flag);
5267
5268                 psli->slistat.mbox_busy++;
5269                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
5270
5271                 if (pmbox->vport) {
5272                         lpfc_debugfs_disc_trc(pmbox->vport,
5273                                 LPFC_DISC_TRC_MBOX_VPORT,
5274                                 "MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
5275                                 (uint32_t)mb->mbxCommand,
5276                                 mb->un.varWords[0], mb->un.varWords[1]);
5277                 }
5278                 else {
5279                         lpfc_debugfs_disc_trc(phba->pport,
5280                                 LPFC_DISC_TRC_MBOX,
5281                                 "MBOX Bsy:        cmd:x%x mb:x%x x%x",
5282                                 (uint32_t)mb->mbxCommand,
5283                                 mb->un.varWords[0], mb->un.varWords[1]);
5284                 }
5285
5286                 return MBX_BUSY;
5287         }
5288
5289         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5290
5291         /* If we are not polling, we MUST be in SLI2 mode */
5292         if (flag != MBX_POLL) {
5293                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
5294                     (mb->mbxCommand != MBX_KILL_BOARD)) {
5295                         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5296                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
5297                         /* Mbox command <mbxCommand> cannot issue */
5298                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5299                                         "(%d):2531 Mailbox command x%x "
5300                                         "cannot issue Data: x%x x%x\n",
5301                                         pmbox->vport ? pmbox->vport->vpi : 0,
5302                                         pmbox->u.mb.mbxCommand,
5303                                         psli->sli_flag, flag);
5304                         goto out_not_finished;
5305                 }
5306                 /* timeout active mbox command */
5307                 mod_timer(&psli->mbox_tmo, (jiffies +
5308                                (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
5309         }
5310
5311         /* Mailbox cmd <cmd> issue */
5312         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5313                         "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
5314                         "x%x\n",
5315                         pmbox->vport ? pmbox->vport->vpi : 0,
5316                         mb->mbxCommand, phba->pport->port_state,
5317                         psli->sli_flag, flag);
5318
5319         if (mb->mbxCommand != MBX_HEARTBEAT) {
5320                 if (pmbox->vport) {
5321                         lpfc_debugfs_disc_trc(pmbox->vport,
5322                                 LPFC_DISC_TRC_MBOX_VPORT,
5323                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
5324                                 (uint32_t)mb->mbxCommand,
5325                                 mb->un.varWords[0], mb->un.varWords[1]);
5326                 }
5327                 else {
5328                         lpfc_debugfs_disc_trc(phba->pport,
5329                                 LPFC_DISC_TRC_MBOX,
5330                                 "MBOX Send:       cmd:x%x mb:x%x x%x",
5331                                 (uint32_t)mb->mbxCommand,
5332                                 mb->un.varWords[0], mb->un.varWords[1]);
5333                 }
5334         }
5335
5336         psli->slistat.mbox_cmd++;
5337         evtctr = psli->slistat.mbox_event;
5338
5339         /* next set own bit for the adapter and copy over command word */
5340         mb->mbxOwner = OWN_CHIP;
5341
5342         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
5343                 /* Populate mbox extension offset word. */
5344                 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
5345                         *(((uint32_t *)mb) + pmbox->mbox_offset_word)
5346                                 = (uint8_t *)phba->mbox_ext
5347                                   - (uint8_t *)phba->mbox;
5348                 }
5349
5350                 /* Copy the mailbox extension data */
5351                 if (pmbox->in_ext_byte_len && pmbox->context2) {
5352                         lpfc_sli_pcimem_bcopy(pmbox->context2,
5353                                 (uint8_t *)phba->mbox_ext,
5354                                 pmbox->in_ext_byte_len);
5355                 }
5356                 /* Copy command data to host SLIM area */
5357                 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
5358         } else {
5359                 /* Populate mbox extension offset word. */
5360                 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
5361                         *(((uint32_t *)mb) + pmbox->mbox_offset_word)
5362                                 = MAILBOX_HBA_EXT_OFFSET;
5363
5364                 /* Copy the mailbox extension data */
5365                 if (pmbox->in_ext_byte_len && pmbox->context2) {
5366                         lpfc_memcpy_to_slim(phba->MBslimaddr +
5367                                 MAILBOX_HBA_EXT_OFFSET,
5368                                 pmbox->context2, pmbox->in_ext_byte_len);
5369
5370                 }
5371                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
5372                         /* copy command data into host mbox for cmpl */
5373                         lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
5374                 }
5375
5376                 /* First copy mbox command data to HBA SLIM, skip past first
5377                    word */
5378                 to_slim = phba->MBslimaddr + sizeof (uint32_t);
5379                 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
5380                             MAILBOX_CMD_SIZE - sizeof (uint32_t));
5381
5382                 /* Next copy over first word, with mbxOwner set */
5383                 ldata = *((uint32_t *)mb);
5384                 to_slim = phba->MBslimaddr;
5385                 writel(ldata, to_slim);
5386                 readl(to_slim); /* flush */
5387
5388                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
5389                         /* switch over to host mailbox */
5390                         psli->sli_flag |= LPFC_SLI_ACTIVE;
5391                 }
5392         }
5393
5394         wmb();
5395
5396         switch (flag) {
5397         case MBX_NOWAIT:
5398                 /* Set up reference to mailbox command */
5399                 psli->mbox_active = pmbox;
5400                 /* Interrupt board to do it */
5401                 writel(CA_MBATT, phba->CAregaddr);
5402                 readl(phba->CAregaddr); /* flush */
5403                 /* Don't wait for it to finish, just return */
5404                 break;
5405
5406         case MBX_POLL:
5407                 /* Set up null reference to mailbox command */
5408                 psli->mbox_active = NULL;
5409                 /* Interrupt board to do it */
5410                 writel(CA_MBATT, phba->CAregaddr);
5411                 readl(phba->CAregaddr); /* flush */
5412
5413                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
5414                         /* First read mbox status word */
5415                         word0 = *((uint32_t *)phba->mbox);
5416                         word0 = le32_to_cpu(word0);
5417                 } else {
5418                         /* First read mbox status word */
5419                         word0 = readl(phba->MBslimaddr);
5420                 }
5421
5422                 /* Read the HBA Host Attention Register */
5423                 ha_copy = readl(phba->HAregaddr);
5424                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
5425                                                              mb->mbxCommand) *
5426                                            1000) + jiffies;
5427                 i = 0;
5428                 /* Wait for command to complete */
5429                 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
5430                        (!(ha_copy & HA_MBATT) &&
5431                         (phba->link_state > LPFC_WARM_START))) {
5432                         if (time_after(jiffies, timeout)) {
5433                                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5434                                 spin_unlock_irqrestore(&phba->hbalock,
5435                                                        drvr_flag);
5436                                 goto out_not_finished;
5437                         }
5438
5439                         /* Check if we took a mbox interrupt while we were
5440                            polling */
5441                         if (((word0 & OWN_CHIP) != OWN_CHIP)
5442                             && (evtctr != psli->slistat.mbox_event))
5443                                 break;
5444
5445                         if (i++ > 10) {
5446                                 spin_unlock_irqrestore(&phba->hbalock,
5447                                                        drvr_flag);
5448                                 msleep(1);
5449                                 spin_lock_irqsave(&phba->hbalock, drvr_flag);
5450                         }
5451
5452                         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
5453                                 /* First copy command data */
5454                                 word0 = *((uint32_t *)phba->mbox);
5455                                 word0 = le32_to_cpu(word0);
5456                                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
5457                                         MAILBOX_t *slimmb;
5458                                         uint32_t slimword0;
5459                                         /* Check real SLIM for any errors */
5460                                         slimword0 = readl(phba->MBslimaddr);
5461                                         slimmb = (MAILBOX_t *) & slimword0;
5462                                         if (((slimword0 & OWN_CHIP) != OWN_CHIP)
5463                                             && slimmb->mbxStatus) {
5464                                                 psli->sli_flag &=
5465                                                     ~LPFC_SLI_ACTIVE;
5466                                                 word0 = slimword0;
5467                                         }
5468                                 }
5469                         } else {
5470                                 /* First copy command data */
5471                                 word0 = readl(phba->MBslimaddr);
5472                         }
5473                         /* Read the HBA Host Attention Register */
5474                         ha_copy = readl(phba->HAregaddr);
5475                 }
5476
5477                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
5478                         /* copy results back to user */
5479                         lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
5480                         /* Copy the mailbox extension data */
5481                         if (pmbox->out_ext_byte_len && pmbox->context2) {
5482                                 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
5483                                                       pmbox->context2,
5484                                                       pmbox->out_ext_byte_len);
5485                         }
5486                 } else {
5487                         /* First copy command data */
5488                         lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
5489                                                         MAILBOX_CMD_SIZE);
5490                         /* Copy the mailbox extension data */
5491                         if (pmbox->out_ext_byte_len && pmbox->context2) {
5492                                 lpfc_memcpy_from_slim(pmbox->context2,
5493                                         phba->MBslimaddr +
5494                                         MAILBOX_HBA_EXT_OFFSET,
5495                                         pmbox->out_ext_byte_len);
5496                         }
5497                 }
5498
5499                 writel(HA_MBATT, phba->HAregaddr);
5500                 readl(phba->HAregaddr); /* flush */
5501
5502                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5503                 status = mb->mbxStatus;
5504         }
5505
5506         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
5507         return status;
5508
5509 out_not_finished:
5510         if (processing_queue) {
5511                 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
5512                 lpfc_mbox_cmpl_put(phba, pmbox);
5513         }
5514         return MBX_NOT_FINISHED;
5515 }
5516
5517 /**
5518  * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
5519  * @phba: Pointer to HBA context object.
5520  *
5521  * The function blocks the posting of SLI4 asynchronous mailbox commands from
5522  * the driver internal pending mailbox queue. It will then try to wait out the
5523  * possible outstanding mailbox command before return.
5524  *
5525  * Returns:
5526  *      0 - the outstanding mailbox command completed; otherwise, the wait for
5527  *      the outstanding mailbox command timed out.
5528  **/
5529 static int
5530 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
5531 {
5532         struct lpfc_sli *psli = &phba->sli;
5533         uint8_t actcmd = MBX_HEARTBEAT;
5534         int rc = 0;
5535         unsigned long timeout;
5536
5537         /* Mark the asynchronous mailbox command posting as blocked */
5538         spin_lock_irq(&phba->hbalock);
5539         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
5540         if (phba->sli.mbox_active)
5541                 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
5542         spin_unlock_irq(&phba->hbalock);
5543         /* Determine how long we might wait for the active mailbox
5544          * command to be gracefully completed by firmware.
5545          */
5546         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
5547                                    jiffies;
5548         /* Wait for the outstnading mailbox command to complete */
5549         while (phba->sli.mbox_active) {
5550                 /* Check active mailbox complete status every 2ms */
5551                 msleep(2);
5552                 if (time_after(jiffies, timeout)) {
5553                         /* Timeout, marked the outstanding cmd not complete */
5554                         rc = 1;
5555                         break;
5556                 }
5557         }
5558
5559         /* Can not cleanly block async mailbox command, fails it */
5560         if (rc) {
5561                 spin_lock_irq(&phba->hbalock);
5562                 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5563                 spin_unlock_irq(&phba->hbalock);
5564         }
5565         return rc;
5566 }
5567
5568 /**
5569  * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
5570  * @phba: Pointer to HBA context object.
5571  *
5572  * The function unblocks and resume posting of SLI4 asynchronous mailbox
5573  * commands from the driver internal pending mailbox queue. It makes sure
5574  * that there is no outstanding mailbox command before resuming posting
5575  * asynchronous mailbox commands. If, for any reason, there is outstanding
5576  * mailbox command, it will try to wait it out before resuming asynchronous
5577  * mailbox command posting.
5578  **/
5579 static void
5580 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
5581 {
5582         struct lpfc_sli *psli = &phba->sli;
5583
5584         spin_lock_irq(&phba->hbalock);
5585         if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
5586                 /* Asynchronous mailbox posting is not blocked, do nothing */
5587                 spin_unlock_irq(&phba->hbalock);
5588                 return;
5589         }
5590
5591         /* Outstanding synchronous mailbox command is guaranteed to be done,
5592          * successful or timeout, after timing-out the outstanding mailbox
5593          * command shall always be removed, so just unblock posting async
5594          * mailbox command and resume
5595          */
5596         psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5597         spin_unlock_irq(&phba->hbalock);
5598
5599         /* wake up worker thread to post asynchronlous mailbox command */
5600         lpfc_worker_wake_up(phba);
5601 }
5602
5603 /**
5604  * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
5605  * @phba: Pointer to HBA context object.
5606  * @mboxq: Pointer to mailbox object.
5607  *
5608  * The function posts a mailbox to the port.  The mailbox is expected
5609  * to be comletely filled in and ready for the port to operate on it.
5610  * This routine executes a synchronous completion operation on the
5611  * mailbox by polling for its completion.
5612  *
5613  * The caller must not be holding any locks when calling this routine.
5614  *
5615  * Returns:
5616  *      MBX_SUCCESS - mailbox posted successfully
5617  *      Any of the MBX error values.
5618  **/
5619 static int
5620 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
5621 {
5622         int rc = MBX_SUCCESS;
5623 &n