[SCSI] lpfc 8.3.5: fix sysfs parameters, vport creation and other bugs and update...
[linux-2.6.git] / drivers / scsi / lpfc / lpfc_sli.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_cmnd.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_transport_fc.h>
32 #include <scsi/fc/fc_fs.h>
33
34 #include "lpfc_hw4.h"
35 #include "lpfc_hw.h"
36 #include "lpfc_sli.h"
37 #include "lpfc_sli4.h"
38 #include "lpfc_nl.h"
39 #include "lpfc_disc.h"
40 #include "lpfc_scsi.h"
41 #include "lpfc.h"
42 #include "lpfc_crtn.h"
43 #include "lpfc_logmsg.h"
44 #include "lpfc_compat.h"
45 #include "lpfc_debugfs.h"
46 #include "lpfc_vport.h"
47
48 /* There are only four IOCB completion types. */
49 typedef enum _lpfc_iocb_type {
50         LPFC_UNKNOWN_IOCB,
51         LPFC_UNSOL_IOCB,
52         LPFC_SOL_IOCB,
53         LPFC_ABORT_IOCB
54 } lpfc_iocb_type;
55
56
57 /* Provide function prototypes local to this module. */
58 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
59                                   uint32_t);
60 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
61                             uint8_t *, uint32_t *);
62 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
63                                       struct hbq_dmabuf *);
64 static IOCB_t *
65 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
66 {
67         return &iocbq->iocb;
68 }
69
70 /**
71  * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
72  * @q: The Work Queue to operate on.
73  * @wqe: The work Queue Entry to put on the Work queue.
74  *
75  * This routine will copy the contents of @wqe to the next available entry on
76  * the @q. This function will then ring the Work Queue Doorbell to signal the
77  * HBA to start processing the Work Queue Entry. This function returns 0 if
78  * successful. If no entries are available on @q then this function will return
79  * -ENOMEM.
80  * The caller is expected to hold the hbalock when calling this routine.
81  **/
82 static uint32_t
83 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
84 {
85         union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
86         struct lpfc_register doorbell;
87         uint32_t host_index;
88
89         /* If the host has not yet processed the next entry then we are done */
90         if (((q->host_index + 1) % q->entry_count) == q->hba_index)
91                 return -ENOMEM;
92         /* set consumption flag every once in a while */
93         if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
94                 bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1);
95
96         lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
97
98         /* Update the host index before invoking device */
99         host_index = q->host_index;
100         q->host_index = ((q->host_index + 1) % q->entry_count);
101
102         /* Ring Doorbell */
103         doorbell.word0 = 0;
104         bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
105         bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
106         bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
107         writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
108         readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
109
110         return 0;
111 }
112
113 /**
114  * lpfc_sli4_wq_release - Updates internal hba index for WQ
115  * @q: The Work Queue to operate on.
116  * @index: The index to advance the hba index to.
117  *
118  * This routine will update the HBA index of a queue to reflect consumption of
119  * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
120  * an entry the host calls this function to update the queue's internal
121  * pointers. This routine returns the number of entries that were consumed by
122  * the HBA.
123  **/
124 static uint32_t
125 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
126 {
127         uint32_t released = 0;
128
129         if (q->hba_index == index)
130                 return 0;
131         do {
132                 q->hba_index = ((q->hba_index + 1) % q->entry_count);
133                 released++;
134         } while (q->hba_index != index);
135         return released;
136 }
137
138 /**
139  * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
140  * @q: The Mailbox Queue to operate on.
141  * @wqe: The Mailbox Queue Entry to put on the Work queue.
142  *
143  * This routine will copy the contents of @mqe to the next available entry on
144  * the @q. This function will then ring the Work Queue Doorbell to signal the
145  * HBA to start processing the Work Queue Entry. This function returns 0 if
146  * successful. If no entries are available on @q then this function will return
147  * -ENOMEM.
148  * The caller is expected to hold the hbalock when calling this routine.
149  **/
150 static uint32_t
151 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
152 {
153         struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
154         struct lpfc_register doorbell;
155         uint32_t host_index;
156
157         /* If the host has not yet processed the next entry then we are done */
158         if (((q->host_index + 1) % q->entry_count) == q->hba_index)
159                 return -ENOMEM;
160         lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
161         /* Save off the mailbox pointer for completion */
162         q->phba->mbox = (MAILBOX_t *)temp_mqe;
163
164         /* Update the host index before invoking device */
165         host_index = q->host_index;
166         q->host_index = ((q->host_index + 1) % q->entry_count);
167
168         /* Ring Doorbell */
169         doorbell.word0 = 0;
170         bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
171         bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
172         writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
173         readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
174         return 0;
175 }
176
177 /**
178  * lpfc_sli4_mq_release - Updates internal hba index for MQ
179  * @q: The Mailbox Queue to operate on.
180  *
181  * This routine will update the HBA index of a queue to reflect consumption of
182  * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
183  * an entry the host calls this function to update the queue's internal
184  * pointers. This routine returns the number of entries that were consumed by
185  * the HBA.
186  **/
187 static uint32_t
188 lpfc_sli4_mq_release(struct lpfc_queue *q)
189 {
190         /* Clear the mailbox pointer for completion */
191         q->phba->mbox = NULL;
192         q->hba_index = ((q->hba_index + 1) % q->entry_count);
193         return 1;
194 }
195
196 /**
197  * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
198  * @q: The Event Queue to get the first valid EQE from
199  *
200  * This routine will get the first valid Event Queue Entry from @q, update
201  * the queue's internal hba index, and return the EQE. If no valid EQEs are in
202  * the Queue (no more work to do), or the Queue is full of EQEs that have been
203  * processed, but not popped back to the HBA then this routine will return NULL.
204  **/
205 static struct lpfc_eqe *
206 lpfc_sli4_eq_get(struct lpfc_queue *q)
207 {
208         struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
209
210         /* If the next EQE is not valid then we are done */
211         if (!bf_get(lpfc_eqe_valid, eqe))
212                 return NULL;
213         /* If the host has not yet processed the next entry then we are done */
214         if (((q->hba_index + 1) % q->entry_count) == q->host_index)
215                 return NULL;
216
217         q->hba_index = ((q->hba_index + 1) % q->entry_count);
218         return eqe;
219 }
220
221 /**
222  * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
223  * @q: The Event Queue that the host has completed processing for.
224  * @arm: Indicates whether the host wants to arms this CQ.
225  *
226  * This routine will mark all Event Queue Entries on @q, from the last
227  * known completed entry to the last entry that was processed, as completed
228  * by clearing the valid bit for each completion queue entry. Then it will
229  * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
230  * The internal host index in the @q will be updated by this routine to indicate
231  * that the host has finished processing the entries. The @arm parameter
232  * indicates that the queue should be rearmed when ringing the doorbell.
233  *
234  * This function will return the number of EQEs that were popped.
235  **/
236 uint32_t
237 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
238 {
239         uint32_t released = 0;
240         struct lpfc_eqe *temp_eqe;
241         struct lpfc_register doorbell;
242
243         /* while there are valid entries */
244         while (q->hba_index != q->host_index) {
245                 temp_eqe = q->qe[q->host_index].eqe;
246                 bf_set(lpfc_eqe_valid, temp_eqe, 0);
247                 released++;
248                 q->host_index = ((q->host_index + 1) % q->entry_count);
249         }
250         if (unlikely(released == 0 && !arm))
251                 return 0;
252
253         /* ring doorbell for number popped */
254         doorbell.word0 = 0;
255         if (arm) {
256                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
257                 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
258         }
259         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
260         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
261         bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
262         writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
263         return released;
264 }
265
266 /**
267  * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
268  * @q: The Completion Queue to get the first valid CQE from
269  *
270  * This routine will get the first valid Completion Queue Entry from @q, update
271  * the queue's internal hba index, and return the CQE. If no valid CQEs are in
272  * the Queue (no more work to do), or the Queue is full of CQEs that have been
273  * processed, but not popped back to the HBA then this routine will return NULL.
274  **/
275 static struct lpfc_cqe *
276 lpfc_sli4_cq_get(struct lpfc_queue *q)
277 {
278         struct lpfc_cqe *cqe;
279
280         /* If the next CQE is not valid then we are done */
281         if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
282                 return NULL;
283         /* If the host has not yet processed the next entry then we are done */
284         if (((q->hba_index + 1) % q->entry_count) == q->host_index)
285                 return NULL;
286
287         cqe = q->qe[q->hba_index].cqe;
288         q->hba_index = ((q->hba_index + 1) % q->entry_count);
289         return cqe;
290 }
291
292 /**
293  * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
294  * @q: The Completion Queue that the host has completed processing for.
295  * @arm: Indicates whether the host wants to arms this CQ.
296  *
297  * This routine will mark all Completion queue entries on @q, from the last
298  * known completed entry to the last entry that was processed, as completed
299  * by clearing the valid bit for each completion queue entry. Then it will
300  * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
301  * The internal host index in the @q will be updated by this routine to indicate
302  * that the host has finished processing the entries. The @arm parameter
303  * indicates that the queue should be rearmed when ringing the doorbell.
304  *
305  * This function will return the number of CQEs that were released.
306  **/
307 uint32_t
308 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
309 {
310         uint32_t released = 0;
311         struct lpfc_cqe *temp_qe;
312         struct lpfc_register doorbell;
313
314         /* while there are valid entries */
315         while (q->hba_index != q->host_index) {
316                 temp_qe = q->qe[q->host_index].cqe;
317                 bf_set(lpfc_cqe_valid, temp_qe, 0);
318                 released++;
319                 q->host_index = ((q->host_index + 1) % q->entry_count);
320         }
321         if (unlikely(released == 0 && !arm))
322                 return 0;
323
324         /* ring doorbell for number popped */
325         doorbell.word0 = 0;
326         if (arm)
327                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
328         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
329         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
330         bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
331         writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
332         return released;
333 }
334
335 /**
336  * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
337  * @q: The Header Receive Queue to operate on.
338  * @wqe: The Receive Queue Entry to put on the Receive queue.
339  *
340  * This routine will copy the contents of @wqe to the next available entry on
341  * the @q. This function will then ring the Receive Queue Doorbell to signal the
342  * HBA to start processing the Receive Queue Entry. This function returns the
343  * index that the rqe was copied to if successful. If no entries are available
344  * on @q then this function will return -ENOMEM.
345  * The caller is expected to hold the hbalock when calling this routine.
346  **/
347 static int
348 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
349                  struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
350 {
351         struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
352         struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
353         struct lpfc_register doorbell;
354         int put_index = hq->host_index;
355
356         if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
357                 return -EINVAL;
358         if (hq->host_index != dq->host_index)
359                 return -EINVAL;
360         /* If the host has not yet processed the next entry then we are done */
361         if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
362                 return -EBUSY;
363         lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
364         lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
365
366         /* Update the host index to point to the next slot */
367         hq->host_index = ((hq->host_index + 1) % hq->entry_count);
368         dq->host_index = ((dq->host_index + 1) % dq->entry_count);
369
370         /* Ring The Header Receive Queue Doorbell */
371         if (!(hq->host_index % LPFC_RQ_POST_BATCH)) {
372                 doorbell.word0 = 0;
373                 bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
374                        LPFC_RQ_POST_BATCH);
375                 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
376                 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
377         }
378         return put_index;
379 }
380
381 /**
382  * lpfc_sli4_rq_release - Updates internal hba index for RQ
383  * @q: The Header Receive Queue to operate on.
384  *
385  * This routine will update the HBA index of a queue to reflect consumption of
386  * one Receive Queue Entry by the HBA. When the HBA indicates that it has
387  * consumed an entry the host calls this function to update the queue's
388  * internal pointers. This routine returns the number of entries that were
389  * consumed by the HBA.
390  **/
391 static uint32_t
392 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
393 {
394         if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
395                 return 0;
396         hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
397         dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
398         return 1;
399 }
400
401 /**
402  * lpfc_cmd_iocb - Get next command iocb entry in the ring
403  * @phba: Pointer to HBA context object.
404  * @pring: Pointer to driver SLI ring object.
405  *
406  * This function returns pointer to next command iocb entry
407  * in the command ring. The caller must hold hbalock to prevent
408  * other threads consume the next command iocb.
409  * SLI-2/SLI-3 provide different sized iocbs.
410  **/
411 static inline IOCB_t *
412 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
413 {
414         return (IOCB_t *) (((char *) pring->cmdringaddr) +
415                            pring->cmdidx * phba->iocb_cmd_size);
416 }
417
418 /**
419  * lpfc_resp_iocb - Get next response iocb entry in the ring
420  * @phba: Pointer to HBA context object.
421  * @pring: Pointer to driver SLI ring object.
422  *
423  * This function returns pointer to next response iocb entry
424  * in the response ring. The caller must hold hbalock to make sure
425  * that no other thread consume the next response iocb.
426  * SLI-2/SLI-3 provide different sized iocbs.
427  **/
428 static inline IOCB_t *
429 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
430 {
431         return (IOCB_t *) (((char *) pring->rspringaddr) +
432                            pring->rspidx * phba->iocb_rsp_size);
433 }
434
435 /**
436  * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
437  * @phba: Pointer to HBA context object.
438  *
439  * This function is called with hbalock held. This function
440  * allocates a new driver iocb object from the iocb pool. If the
441  * allocation is successful, it returns pointer to the newly
442  * allocated iocb object else it returns NULL.
443  **/
444 static struct lpfc_iocbq *
445 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
446 {
447         struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
448         struct lpfc_iocbq * iocbq = NULL;
449
450         list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
451         return iocbq;
452 }
453
454 /**
455  * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
456  * @phba: Pointer to HBA context object.
457  * @xritag: XRI value.
458  *
459  * This function clears the sglq pointer from the array of acive
460  * sglq's. The xritag that is passed in is used to index into the
461  * array. Before the xritag can be used it needs to be adjusted
462  * by subtracting the xribase.
463  *
464  * Returns sglq ponter = success, NULL = Failure.
465  **/
466 static struct lpfc_sglq *
467 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
468 {
469         uint16_t adj_xri;
470         struct lpfc_sglq *sglq;
471         adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
472         if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
473                 return NULL;
474         sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
475         phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
476         return sglq;
477 }
478
479 /**
480  * __lpfc_get_active_sglq - Get the active sglq for this XRI.
481  * @phba: Pointer to HBA context object.
482  * @xritag: XRI value.
483  *
484  * This function returns the sglq pointer from the array of acive
485  * sglq's. The xritag that is passed in is used to index into the
486  * array. Before the xritag can be used it needs to be adjusted
487  * by subtracting the xribase.
488  *
489  * Returns sglq ponter = success, NULL = Failure.
490  **/
491 static struct lpfc_sglq *
492 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
493 {
494         uint16_t adj_xri;
495         struct lpfc_sglq *sglq;
496         adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
497         if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
498                 return NULL;
499         sglq =  phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
500         return sglq;
501 }
502
503 /**
504  * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
505  * @phba: Pointer to HBA context object.
506  *
507  * This function is called with hbalock held. This function
508  * Gets a new driver sglq object from the sglq list. If the
509  * list is not empty then it is successful, it returns pointer to the newly
510  * allocated sglq object else it returns NULL.
511  **/
512 static struct lpfc_sglq *
513 __lpfc_sli_get_sglq(struct lpfc_hba *phba)
514 {
515         struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
516         struct lpfc_sglq *sglq = NULL;
517         uint16_t adj_xri;
518         list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
519         if (!sglq)
520                 return NULL;
521         adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
522         phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
523         return sglq;
524 }
525
526 /**
527  * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
528  * @phba: Pointer to HBA context object.
529  *
530  * This function is called with no lock held. This function
531  * allocates a new driver iocb object from the iocb pool. If the
532  * allocation is successful, it returns pointer to the newly
533  * allocated iocb object else it returns NULL.
534  **/
535 struct lpfc_iocbq *
536 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
537 {
538         struct lpfc_iocbq * iocbq = NULL;
539         unsigned long iflags;
540
541         spin_lock_irqsave(&phba->hbalock, iflags);
542         iocbq = __lpfc_sli_get_iocbq(phba);
543         spin_unlock_irqrestore(&phba->hbalock, iflags);
544         return iocbq;
545 }
546
547 /**
548  * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
549  * @phba: Pointer to HBA context object.
550  * @iocbq: Pointer to driver iocb object.
551  *
552  * This function is called with hbalock held to release driver
553  * iocb object to the iocb pool. The iotag in the iocb object
554  * does not change for each use of the iocb object. This function
555  * clears all other fields of the iocb object when it is freed.
556  * The sqlq structure that holds the xritag and phys and virtual
557  * mappings for the scatter gather list is retrieved from the
558  * active array of sglq. The get of the sglq pointer also clears
559  * the entry in the array. If the status of the IO indiactes that
560  * this IO was aborted then the sglq entry it put on the
561  * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
562  * IO has good status or fails for any other reason then the sglq
563  * entry is added to the free list (lpfc_sgl_list).
564  **/
565 static void
566 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
567 {
568         struct lpfc_sglq *sglq;
569         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
570         unsigned long iflag;
571
572         if (iocbq->sli4_xritag == NO_XRI)
573                 sglq = NULL;
574         else
575                 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
576         if (sglq)  {
577                 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED
578                         && ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
579                         && (iocbq->iocb.un.ulpWord[4]
580                                 == IOERR_ABORT_REQUESTED))) {
581                         spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
582                                         iflag);
583                         list_add(&sglq->list,
584                                 &phba->sli4_hba.lpfc_abts_els_sgl_list);
585                         spin_unlock_irqrestore(
586                                 &phba->sli4_hba.abts_sgl_list_lock, iflag);
587                 } else
588                         list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
589         }
590
591
592         /*
593          * Clean all volatile data fields, preserve iotag and node struct.
594          */
595         memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
596         iocbq->sli4_xritag = NO_XRI;
597         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
598 }
599
600 /**
601  * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
602  * @phba: Pointer to HBA context object.
603  * @iocbq: Pointer to driver iocb object.
604  *
605  * This function is called with hbalock held to release driver
606  * iocb object to the iocb pool. The iotag in the iocb object
607  * does not change for each use of the iocb object. This function
608  * clears all other fields of the iocb object when it is freed.
609  **/
610 static void
611 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
612 {
613         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
614
615         /*
616          * Clean all volatile data fields, preserve iotag and node struct.
617          */
618         memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
619         iocbq->sli4_xritag = NO_XRI;
620         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
621 }
622
623 /**
624  * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
625  * @phba: Pointer to HBA context object.
626  * @iocbq: Pointer to driver iocb object.
627  *
628  * This function is called with hbalock held to release driver
629  * iocb object to the iocb pool. The iotag in the iocb object
630  * does not change for each use of the iocb object. This function
631  * clears all other fields of the iocb object when it is freed.
632  **/
633 static void
634 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
635 {
636         phba->__lpfc_sli_release_iocbq(phba, iocbq);
637 }
638
639 /**
640  * lpfc_sli_release_iocbq - Release iocb to the iocb pool
641  * @phba: Pointer to HBA context object.
642  * @iocbq: Pointer to driver iocb object.
643  *
644  * This function is called with no lock held to release the iocb to
645  * iocb pool.
646  **/
647 void
648 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
649 {
650         unsigned long iflags;
651
652         /*
653          * Clean all volatile data fields, preserve iotag and node struct.
654          */
655         spin_lock_irqsave(&phba->hbalock, iflags);
656         __lpfc_sli_release_iocbq(phba, iocbq);
657         spin_unlock_irqrestore(&phba->hbalock, iflags);
658 }
659
660 /**
661  * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
662  * @phba: Pointer to HBA context object.
663  * @iocblist: List of IOCBs.
664  * @ulpstatus: ULP status in IOCB command field.
665  * @ulpWord4: ULP word-4 in IOCB command field.
666  *
667  * This function is called with a list of IOCBs to cancel. It cancels the IOCB
668  * on the list by invoking the complete callback function associated with the
669  * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
670  * fields.
671  **/
672 void
673 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
674                       uint32_t ulpstatus, uint32_t ulpWord4)
675 {
676         struct lpfc_iocbq *piocb;
677
678         while (!list_empty(iocblist)) {
679                 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
680
681                 if (!piocb->iocb_cmpl)
682                         lpfc_sli_release_iocbq(phba, piocb);
683                 else {
684                         piocb->iocb.ulpStatus = ulpstatus;
685                         piocb->iocb.un.ulpWord[4] = ulpWord4;
686                         (piocb->iocb_cmpl) (phba, piocb, piocb);
687                 }
688         }
689         return;
690 }
691
692 /**
693  * lpfc_sli_iocb_cmd_type - Get the iocb type
694  * @iocb_cmnd: iocb command code.
695  *
696  * This function is called by ring event handler function to get the iocb type.
697  * This function translates the iocb command to an iocb command type used to
698  * decide the final disposition of each completed IOCB.
699  * The function returns
700  * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
701  * LPFC_SOL_IOCB     if it is a solicited iocb completion
702  * LPFC_ABORT_IOCB   if it is an abort iocb
703  * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
704  *
705  * The caller is not required to hold any lock.
706  **/
707 static lpfc_iocb_type
708 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
709 {
710         lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
711
712         if (iocb_cmnd > CMD_MAX_IOCB_CMD)
713                 return 0;
714
715         switch (iocb_cmnd) {
716         case CMD_XMIT_SEQUENCE_CR:
717         case CMD_XMIT_SEQUENCE_CX:
718         case CMD_XMIT_BCAST_CN:
719         case CMD_XMIT_BCAST_CX:
720         case CMD_ELS_REQUEST_CR:
721         case CMD_ELS_REQUEST_CX:
722         case CMD_CREATE_XRI_CR:
723         case CMD_CREATE_XRI_CX:
724         case CMD_GET_RPI_CN:
725         case CMD_XMIT_ELS_RSP_CX:
726         case CMD_GET_RPI_CR:
727         case CMD_FCP_IWRITE_CR:
728         case CMD_FCP_IWRITE_CX:
729         case CMD_FCP_IREAD_CR:
730         case CMD_FCP_IREAD_CX:
731         case CMD_FCP_ICMND_CR:
732         case CMD_FCP_ICMND_CX:
733         case CMD_FCP_TSEND_CX:
734         case CMD_FCP_TRSP_CX:
735         case CMD_FCP_TRECEIVE_CX:
736         case CMD_FCP_AUTO_TRSP_CX:
737         case CMD_ADAPTER_MSG:
738         case CMD_ADAPTER_DUMP:
739         case CMD_XMIT_SEQUENCE64_CR:
740         case CMD_XMIT_SEQUENCE64_CX:
741         case CMD_XMIT_BCAST64_CN:
742         case CMD_XMIT_BCAST64_CX:
743         case CMD_ELS_REQUEST64_CR:
744         case CMD_ELS_REQUEST64_CX:
745         case CMD_FCP_IWRITE64_CR:
746         case CMD_FCP_IWRITE64_CX:
747         case CMD_FCP_IREAD64_CR:
748         case CMD_FCP_IREAD64_CX:
749         case CMD_FCP_ICMND64_CR:
750         case CMD_FCP_ICMND64_CX:
751         case CMD_FCP_TSEND64_CX:
752         case CMD_FCP_TRSP64_CX:
753         case CMD_FCP_TRECEIVE64_CX:
754         case CMD_GEN_REQUEST64_CR:
755         case CMD_GEN_REQUEST64_CX:
756         case CMD_XMIT_ELS_RSP64_CX:
757         case DSSCMD_IWRITE64_CR:
758         case DSSCMD_IWRITE64_CX:
759         case DSSCMD_IREAD64_CR:
760         case DSSCMD_IREAD64_CX:
761         case DSSCMD_INVALIDATE_DEK:
762         case DSSCMD_SET_KEK:
763         case DSSCMD_GET_KEK_ID:
764         case DSSCMD_GEN_XFER:
765                 type = LPFC_SOL_IOCB;
766                 break;
767         case CMD_ABORT_XRI_CN:
768         case CMD_ABORT_XRI_CX:
769         case CMD_CLOSE_XRI_CN:
770         case CMD_CLOSE_XRI_CX:
771         case CMD_XRI_ABORTED_CX:
772         case CMD_ABORT_MXRI64_CN:
773         case CMD_XMIT_BLS_RSP64_CX:
774                 type = LPFC_ABORT_IOCB;
775                 break;
776         case CMD_RCV_SEQUENCE_CX:
777         case CMD_RCV_ELS_REQ_CX:
778         case CMD_RCV_SEQUENCE64_CX:
779         case CMD_RCV_ELS_REQ64_CX:
780         case CMD_ASYNC_STATUS:
781         case CMD_IOCB_RCV_SEQ64_CX:
782         case CMD_IOCB_RCV_ELS64_CX:
783         case CMD_IOCB_RCV_CONT64_CX:
784         case CMD_IOCB_RET_XRI64_CX:
785                 type = LPFC_UNSOL_IOCB;
786                 break;
787         case CMD_IOCB_XMIT_MSEQ64_CR:
788         case CMD_IOCB_XMIT_MSEQ64_CX:
789         case CMD_IOCB_RCV_SEQ_LIST64_CX:
790         case CMD_IOCB_RCV_ELS_LIST64_CX:
791         case CMD_IOCB_CLOSE_EXTENDED_CN:
792         case CMD_IOCB_ABORT_EXTENDED_CN:
793         case CMD_IOCB_RET_HBQE64_CN:
794         case CMD_IOCB_FCP_IBIDIR64_CR:
795         case CMD_IOCB_FCP_IBIDIR64_CX:
796         case CMD_IOCB_FCP_ITASKMGT64_CX:
797         case CMD_IOCB_LOGENTRY_CN:
798         case CMD_IOCB_LOGENTRY_ASYNC_CN:
799                 printk("%s - Unhandled SLI-3 Command x%x\n",
800                                 __func__, iocb_cmnd);
801                 type = LPFC_UNKNOWN_IOCB;
802                 break;
803         default:
804                 type = LPFC_UNKNOWN_IOCB;
805                 break;
806         }
807
808         return type;
809 }
810
811 /**
812  * lpfc_sli_ring_map - Issue config_ring mbox for all rings
813  * @phba: Pointer to HBA context object.
814  *
815  * This function is called from SLI initialization code
816  * to configure every ring of the HBA's SLI interface. The
817  * caller is not required to hold any lock. This function issues
818  * a config_ring mailbox command for each ring.
819  * This function returns zero if successful else returns a negative
820  * error code.
821  **/
822 static int
823 lpfc_sli_ring_map(struct lpfc_hba *phba)
824 {
825         struct lpfc_sli *psli = &phba->sli;
826         LPFC_MBOXQ_t *pmb;
827         MAILBOX_t *pmbox;
828         int i, rc, ret = 0;
829
830         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
831         if (!pmb)
832                 return -ENOMEM;
833         pmbox = &pmb->u.mb;
834         phba->link_state = LPFC_INIT_MBX_CMDS;
835         for (i = 0; i < psli->num_rings; i++) {
836                 lpfc_config_ring(phba, i, pmb);
837                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
838                 if (rc != MBX_SUCCESS) {
839                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
840                                         "0446 Adapter failed to init (%d), "
841                                         "mbxCmd x%x CFG_RING, mbxStatus x%x, "
842                                         "ring %d\n",
843                                         rc, pmbox->mbxCommand,
844                                         pmbox->mbxStatus, i);
845                         phba->link_state = LPFC_HBA_ERROR;
846                         ret = -ENXIO;
847                         break;
848                 }
849         }
850         mempool_free(pmb, phba->mbox_mem_pool);
851         return ret;
852 }
853
854 /**
855  * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
856  * @phba: Pointer to HBA context object.
857  * @pring: Pointer to driver SLI ring object.
858  * @piocb: Pointer to the driver iocb object.
859  *
860  * This function is called with hbalock held. The function adds the
861  * new iocb to txcmplq of the given ring. This function always returns
862  * 0. If this function is called for ELS ring, this function checks if
863  * there is a vport associated with the ELS command. This function also
864  * starts els_tmofunc timer if this is an ELS command.
865  **/
866 static int
867 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
868                         struct lpfc_iocbq *piocb)
869 {
870         list_add_tail(&piocb->list, &pring->txcmplq);
871         pring->txcmplq_cnt++;
872         if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
873            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
874            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
875                 if (!piocb->vport)
876                         BUG();
877                 else
878                         mod_timer(&piocb->vport->els_tmofunc,
879                                   jiffies + HZ * (phba->fc_ratov << 1));
880         }
881
882
883         return 0;
884 }
885
886 /**
887  * lpfc_sli_ringtx_get - Get first element of the txq
888  * @phba: Pointer to HBA context object.
889  * @pring: Pointer to driver SLI ring object.
890  *
891  * This function is called with hbalock held to get next
892  * iocb in txq of the given ring. If there is any iocb in
893  * the txq, the function returns first iocb in the list after
894  * removing the iocb from the list, else it returns NULL.
895  **/
896 static struct lpfc_iocbq *
897 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
898 {
899         struct lpfc_iocbq *cmd_iocb;
900
901         list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
902         if (cmd_iocb != NULL)
903                 pring->txq_cnt--;
904         return cmd_iocb;
905 }
906
907 /**
908  * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
909  * @phba: Pointer to HBA context object.
910  * @pring: Pointer to driver SLI ring object.
911  *
912  * This function is called with hbalock held and the caller must post the
913  * iocb without releasing the lock. If the caller releases the lock,
914  * iocb slot returned by the function is not guaranteed to be available.
915  * The function returns pointer to the next available iocb slot if there
916  * is available slot in the ring, else it returns NULL.
917  * If the get index of the ring is ahead of the put index, the function
918  * will post an error attention event to the worker thread to take the
919  * HBA to offline state.
920  **/
921 static IOCB_t *
922 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
923 {
924         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
925         uint32_t  max_cmd_idx = pring->numCiocb;
926         if ((pring->next_cmdidx == pring->cmdidx) &&
927            (++pring->next_cmdidx >= max_cmd_idx))
928                 pring->next_cmdidx = 0;
929
930         if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
931
932                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
933
934                 if (unlikely(pring->local_getidx >= max_cmd_idx)) {
935                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
936                                         "0315 Ring %d issue: portCmdGet %d "
937                                         "is bigger than cmd ring %d\n",
938                                         pring->ringno,
939                                         pring->local_getidx, max_cmd_idx);
940
941                         phba->link_state = LPFC_HBA_ERROR;
942                         /*
943                          * All error attention handlers are posted to
944                          * worker thread
945                          */
946                         phba->work_ha |= HA_ERATT;
947                         phba->work_hs = HS_FFER3;
948
949                         lpfc_worker_wake_up(phba);
950
951                         return NULL;
952                 }
953
954                 if (pring->local_getidx == pring->next_cmdidx)
955                         return NULL;
956         }
957
958         return lpfc_cmd_iocb(phba, pring);
959 }
960
961 /**
962  * lpfc_sli_next_iotag - Get an iotag for the iocb
963  * @phba: Pointer to HBA context object.
964  * @iocbq: Pointer to driver iocb object.
965  *
966  * This function gets an iotag for the iocb. If there is no unused iotag and
967  * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
968  * array and assigns a new iotag.
969  * The function returns the allocated iotag if successful, else returns zero.
970  * Zero is not a valid iotag.
971  * The caller is not required to hold any lock.
972  **/
973 uint16_t
974 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
975 {
976         struct lpfc_iocbq **new_arr;
977         struct lpfc_iocbq **old_arr;
978         size_t new_len;
979         struct lpfc_sli *psli = &phba->sli;
980         uint16_t iotag;
981
982         spin_lock_irq(&phba->hbalock);
983         iotag = psli->last_iotag;
984         if(++iotag < psli->iocbq_lookup_len) {
985                 psli->last_iotag = iotag;
986                 psli->iocbq_lookup[iotag] = iocbq;
987                 spin_unlock_irq(&phba->hbalock);
988                 iocbq->iotag = iotag;
989                 return iotag;
990         } else if (psli->iocbq_lookup_len < (0xffff
991                                            - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
992                 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
993                 spin_unlock_irq(&phba->hbalock);
994                 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
995                                   GFP_KERNEL);
996                 if (new_arr) {
997                         spin_lock_irq(&phba->hbalock);
998                         old_arr = psli->iocbq_lookup;
999                         if (new_len <= psli->iocbq_lookup_len) {
1000                                 /* highly unprobable case */
1001                                 kfree(new_arr);
1002                                 iotag = psli->last_iotag;
1003                                 if(++iotag < psli->iocbq_lookup_len) {
1004                                         psli->last_iotag = iotag;
1005                                         psli->iocbq_lookup[iotag] = iocbq;
1006                                         spin_unlock_irq(&phba->hbalock);
1007                                         iocbq->iotag = iotag;
1008                                         return iotag;
1009                                 }
1010                                 spin_unlock_irq(&phba->hbalock);
1011                                 return 0;
1012                         }
1013                         if (psli->iocbq_lookup)
1014                                 memcpy(new_arr, old_arr,
1015                                        ((psli->last_iotag  + 1) *
1016                                         sizeof (struct lpfc_iocbq *)));
1017                         psli->iocbq_lookup = new_arr;
1018                         psli->iocbq_lookup_len = new_len;
1019                         psli->last_iotag = iotag;
1020                         psli->iocbq_lookup[iotag] = iocbq;
1021                         spin_unlock_irq(&phba->hbalock);
1022                         iocbq->iotag = iotag;
1023                         kfree(old_arr);
1024                         return iotag;
1025                 }
1026         } else
1027                 spin_unlock_irq(&phba->hbalock);
1028
1029         lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
1030                         "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1031                         psli->last_iotag);
1032
1033         return 0;
1034 }
1035
1036 /**
1037  * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1038  * @phba: Pointer to HBA context object.
1039  * @pring: Pointer to driver SLI ring object.
1040  * @iocb: Pointer to iocb slot in the ring.
1041  * @nextiocb: Pointer to driver iocb object which need to be
1042  *            posted to firmware.
1043  *
1044  * This function is called with hbalock held to post a new iocb to
1045  * the firmware. This function copies the new iocb to ring iocb slot and
1046  * updates the ring pointers. It adds the new iocb to txcmplq if there is
1047  * a completion call back for this iocb else the function will free the
1048  * iocb object.
1049  **/
1050 static void
1051 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1052                 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1053 {
1054         /*
1055          * Set up an iotag
1056          */
1057         nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1058
1059
1060         if (pring->ringno == LPFC_ELS_RING) {
1061                 lpfc_debugfs_slow_ring_trc(phba,
1062                         "IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
1063                         *(((uint32_t *) &nextiocb->iocb) + 4),
1064                         *(((uint32_t *) &nextiocb->iocb) + 6),
1065                         *(((uint32_t *) &nextiocb->iocb) + 7));
1066         }
1067
1068         /*
1069          * Issue iocb command to adapter
1070          */
1071         lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1072         wmb();
1073         pring->stats.iocb_cmd++;
1074
1075         /*
1076          * If there is no completion routine to call, we can release the
1077          * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1078          * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1079          */
1080         if (nextiocb->iocb_cmpl)
1081                 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1082         else
1083                 __lpfc_sli_release_iocbq(phba, nextiocb);
1084
1085         /*
1086          * Let the HBA know what IOCB slot will be the next one the
1087          * driver will put a command into.
1088          */
1089         pring->cmdidx = pring->next_cmdidx;
1090         writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1091 }
1092
1093 /**
1094  * lpfc_sli_update_full_ring - Update the chip attention register
1095  * @phba: Pointer to HBA context object.
1096  * @pring: Pointer to driver SLI ring object.
1097  *
1098  * The caller is not required to hold any lock for calling this function.
1099  * This function updates the chip attention bits for the ring to inform firmware
1100  * that there are pending work to be done for this ring and requests an
1101  * interrupt when there is space available in the ring. This function is
1102  * called when the driver is unable to post more iocbs to the ring due
1103  * to unavailability of space in the ring.
1104  **/
1105 static void
1106 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1107 {
1108         int ringno = pring->ringno;
1109
1110         pring->flag |= LPFC_CALL_RING_AVAILABLE;
1111
1112         wmb();
1113
1114         /*
1115          * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1116          * The HBA will tell us when an IOCB entry is available.
1117          */
1118         writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1119         readl(phba->CAregaddr); /* flush */
1120
1121         pring->stats.iocb_cmd_full++;
1122 }
1123
1124 /**
1125  * lpfc_sli_update_ring - Update chip attention register
1126  * @phba: Pointer to HBA context object.
1127  * @pring: Pointer to driver SLI ring object.
1128  *
1129  * This function updates the chip attention register bit for the
1130  * given ring to inform HBA that there is more work to be done
1131  * in this ring. The caller is not required to hold any lock.
1132  **/
1133 static void
1134 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1135 {
1136         int ringno = pring->ringno;
1137
1138         /*
1139          * Tell the HBA that there is work to do in this ring.
1140          */
1141         if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1142                 wmb();
1143                 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1144                 readl(phba->CAregaddr); /* flush */
1145         }
1146 }
1147
1148 /**
1149  * lpfc_sli_resume_iocb - Process iocbs in the txq
1150  * @phba: Pointer to HBA context object.
1151  * @pring: Pointer to driver SLI ring object.
1152  *
1153  * This function is called with hbalock held to post pending iocbs
1154  * in the txq to the firmware. This function is called when driver
1155  * detects space available in the ring.
1156  **/
1157 static void
1158 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1159 {
1160         IOCB_t *iocb;
1161         struct lpfc_iocbq *nextiocb;
1162
1163         /*
1164          * Check to see if:
1165          *  (a) there is anything on the txq to send
1166          *  (b) link is up
1167          *  (c) link attention events can be processed (fcp ring only)
1168          *  (d) IOCB processing is not blocked by the outstanding mbox command.
1169          */
1170         if (pring->txq_cnt &&
1171             lpfc_is_link_up(phba) &&
1172             (pring->ringno != phba->sli.fcp_ring ||
1173              phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1174
1175                 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1176                        (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1177                         lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1178
1179                 if (iocb)
1180                         lpfc_sli_update_ring(phba, pring);
1181                 else
1182                         lpfc_sli_update_full_ring(phba, pring);
1183         }
1184
1185         return;
1186 }
1187
1188 /**
1189  * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1190  * @phba: Pointer to HBA context object.
1191  * @hbqno: HBQ number.
1192  *
1193  * This function is called with hbalock held to get the next
1194  * available slot for the given HBQ. If there is free slot
1195  * available for the HBQ it will return pointer to the next available
1196  * HBQ entry else it will return NULL.
1197  **/
1198 static struct lpfc_hbq_entry *
1199 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1200 {
1201         struct hbq_s *hbqp = &phba->hbqs[hbqno];
1202
1203         if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1204             ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1205                 hbqp->next_hbqPutIdx = 0;
1206
1207         if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1208                 uint32_t raw_index = phba->hbq_get[hbqno];
1209                 uint32_t getidx = le32_to_cpu(raw_index);
1210
1211                 hbqp->local_hbqGetIdx = getidx;
1212
1213                 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1214                         lpfc_printf_log(phba, KERN_ERR,
1215                                         LOG_SLI | LOG_VPORT,
1216                                         "1802 HBQ %d: local_hbqGetIdx "
1217                                         "%u is > than hbqp->entry_count %u\n",
1218                                         hbqno, hbqp->local_hbqGetIdx,
1219                                         hbqp->entry_count);
1220
1221                         phba->link_state = LPFC_HBA_ERROR;
1222                         return NULL;
1223                 }
1224
1225                 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1226                         return NULL;
1227         }
1228
1229         return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1230                         hbqp->hbqPutIdx;
1231 }
1232
1233 /**
1234  * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1235  * @phba: Pointer to HBA context object.
1236  *
1237  * This function is called with no lock held to free all the
1238  * hbq buffers while uninitializing the SLI interface. It also
1239  * frees the HBQ buffers returned by the firmware but not yet
1240  * processed by the upper layers.
1241  **/
1242 void
1243 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1244 {
1245         struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1246         struct hbq_dmabuf *hbq_buf;
1247         unsigned long flags;
1248         int i, hbq_count;
1249         uint32_t hbqno;
1250
1251         hbq_count = lpfc_sli_hbq_count();
1252         /* Return all memory used by all HBQs */
1253         spin_lock_irqsave(&phba->hbalock, flags);
1254         for (i = 0; i < hbq_count; ++i) {
1255                 list_for_each_entry_safe(dmabuf, next_dmabuf,
1256                                 &phba->hbqs[i].hbq_buffer_list, list) {
1257                         hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1258                         list_del(&hbq_buf->dbuf.list);
1259                         (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1260                 }
1261                 phba->hbqs[i].buffer_count = 0;
1262         }
1263         /* Return all HBQ buffer that are in-fly */
1264         list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
1265                                  list) {
1266                 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1267                 list_del(&hbq_buf->dbuf.list);
1268                 if (hbq_buf->tag == -1) {
1269                         (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1270                                 (phba, hbq_buf);
1271                 } else {
1272                         hbqno = hbq_buf->tag >> 16;
1273                         if (hbqno >= LPFC_MAX_HBQS)
1274                                 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1275                                         (phba, hbq_buf);
1276                         else
1277                                 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1278                                         hbq_buf);
1279                 }
1280         }
1281
1282         /* Mark the HBQs not in use */
1283         phba->hbq_in_use = 0;
1284         spin_unlock_irqrestore(&phba->hbalock, flags);
1285 }
1286
1287 /**
1288  * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1289  * @phba: Pointer to HBA context object.
1290  * @hbqno: HBQ number.
1291  * @hbq_buf: Pointer to HBQ buffer.
1292  *
1293  * This function is called with the hbalock held to post a
1294  * hbq buffer to the firmware. If the function finds an empty
1295  * slot in the HBQ, it will post the buffer. The function will return
1296  * pointer to the hbq entry if it successfully post the buffer
1297  * else it will return NULL.
1298  **/
1299 static int
1300 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
1301                          struct hbq_dmabuf *hbq_buf)
1302 {
1303         return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1304 }
1305
1306 /**
1307  * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1308  * @phba: Pointer to HBA context object.
1309  * @hbqno: HBQ number.
1310  * @hbq_buf: Pointer to HBQ buffer.
1311  *
1312  * This function is called with the hbalock held to post a hbq buffer to the
1313  * firmware. If the function finds an empty slot in the HBQ, it will post the
1314  * buffer and place it on the hbq_buffer_list. The function will return zero if
1315  * it successfully post the buffer else it will return an error.
1316  **/
1317 static int
1318 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1319                             struct hbq_dmabuf *hbq_buf)
1320 {
1321         struct lpfc_hbq_entry *hbqe;
1322         dma_addr_t physaddr = hbq_buf->dbuf.phys;
1323
1324         /* Get next HBQ entry slot to use */
1325         hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1326         if (hbqe) {
1327                 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1328
1329                 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1330                 hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
1331                 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
1332                 hbqe->bde.tus.f.bdeFlags = 0;
1333                 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1334                 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1335                                 /* Sync SLIM */
1336                 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1337                 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
1338                                 /* flush */
1339                 readl(phba->hbq_put + hbqno);
1340                 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
1341                 return 0;
1342         } else
1343                 return -ENOMEM;
1344 }
1345
1346 /**
1347  * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1348  * @phba: Pointer to HBA context object.
1349  * @hbqno: HBQ number.
1350  * @hbq_buf: Pointer to HBQ buffer.
1351  *
1352  * This function is called with the hbalock held to post an RQE to the SLI4
1353  * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1354  * the hbq_buffer_list and return zero, otherwise it will return an error.
1355  **/
1356 static int
1357 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1358                             struct hbq_dmabuf *hbq_buf)
1359 {
1360         int rc;
1361         struct lpfc_rqe hrqe;
1362         struct lpfc_rqe drqe;
1363
1364         hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1365         hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1366         drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1367         drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1368         rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1369                               &hrqe, &drqe);
1370         if (rc < 0)
1371                 return rc;
1372         hbq_buf->tag = rc;
1373         list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1374         return 0;
1375 }
1376
1377 /* HBQ for ELS and CT traffic. */
1378 static struct lpfc_hbq_init lpfc_els_hbq = {
1379         .rn = 1,
1380         .entry_count = 200,
1381         .mask_count = 0,
1382         .profile = 0,
1383         .ring_mask = (1 << LPFC_ELS_RING),
1384         .buffer_count = 0,
1385         .init_count = 40,
1386         .add_count = 40,
1387 };
1388
1389 /* HBQ for the extra ring if needed */
1390 static struct lpfc_hbq_init lpfc_extra_hbq = {
1391         .rn = 1,
1392         .entry_count = 200,
1393         .mask_count = 0,
1394         .profile = 0,
1395         .ring_mask = (1 << LPFC_EXTRA_RING),
1396         .buffer_count = 0,
1397         .init_count = 0,
1398         .add_count = 5,
1399 };
1400
1401 /* Array of HBQs */
1402 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
1403         &lpfc_els_hbq,
1404         &lpfc_extra_hbq,
1405 };
1406
1407 /**
1408  * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
1409  * @phba: Pointer to HBA context object.
1410  * @hbqno: HBQ number.
1411  * @count: Number of HBQ buffers to be posted.
1412  *
1413  * This function is called with no lock held to post more hbq buffers to the
1414  * given HBQ. The function returns the number of HBQ buffers successfully
1415  * posted.
1416  **/
1417 static int
1418 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
1419 {
1420         uint32_t i, posted = 0;
1421         unsigned long flags;
1422         struct hbq_dmabuf *hbq_buffer;
1423         LIST_HEAD(hbq_buf_list);
1424         if (!phba->hbqs[hbqno].hbq_alloc_buffer)
1425                 return 0;
1426
1427         if ((phba->hbqs[hbqno].buffer_count + count) >
1428             lpfc_hbq_defs[hbqno]->entry_count)
1429                 count = lpfc_hbq_defs[hbqno]->entry_count -
1430                                         phba->hbqs[hbqno].buffer_count;
1431         if (!count)
1432                 return 0;
1433         /* Allocate HBQ entries */
1434         for (i = 0; i < count; i++) {
1435                 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1436                 if (!hbq_buffer)
1437                         break;
1438                 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1439         }
1440         /* Check whether HBQ is still in use */
1441         spin_lock_irqsave(&phba->hbalock, flags);
1442         if (!phba->hbq_in_use)
1443                 goto err;
1444         while (!list_empty(&hbq_buf_list)) {
1445                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1446                                  dbuf.list);
1447                 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1448                                       (hbqno << 16));
1449                 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
1450                         phba->hbqs[hbqno].buffer_count++;
1451                         posted++;
1452                 } else
1453                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1454         }
1455         spin_unlock_irqrestore(&phba->hbalock, flags);
1456         return posted;
1457 err:
1458         spin_unlock_irqrestore(&phba->hbalock, flags);
1459         while (!list_empty(&hbq_buf_list)) {
1460                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1461                                  dbuf.list);
1462                 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1463         }
1464         return 0;
1465 }
1466
1467 /**
1468  * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
1469  * @phba: Pointer to HBA context object.
1470  * @qno: HBQ number.
1471  *
1472  * This function posts more buffers to the HBQ. This function
1473  * is called with no lock held. The function returns the number of HBQ entries
1474  * successfully allocated.
1475  **/
1476 int
1477 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1478 {
1479         return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1480                                          lpfc_hbq_defs[qno]->add_count));
1481 }
1482
1483 /**
1484  * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
1485  * @phba: Pointer to HBA context object.
1486  * @qno:  HBQ queue number.
1487  *
1488  * This function is called from SLI initialization code path with
1489  * no lock held to post initial HBQ buffers to firmware. The
1490  * function returns the number of HBQ entries successfully allocated.
1491  **/
1492 static int
1493 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1494 {
1495         return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1496                                          lpfc_hbq_defs[qno]->init_count));
1497 }
1498
1499 /**
1500  * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1501  * @phba: Pointer to HBA context object.
1502  * @hbqno: HBQ number.
1503  *
1504  * This function removes the first hbq buffer on an hbq list and returns a
1505  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1506  **/
1507 static struct hbq_dmabuf *
1508 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1509 {
1510         struct lpfc_dmabuf *d_buf;
1511
1512         list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1513         if (!d_buf)
1514                 return NULL;
1515         return container_of(d_buf, struct hbq_dmabuf, dbuf);
1516 }
1517
1518 /**
1519  * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
1520  * @phba: Pointer to HBA context object.
1521  * @tag: Tag of the hbq buffer.
1522  *
1523  * This function is called with hbalock held. This function searches
1524  * for the hbq buffer associated with the given tag in the hbq buffer
1525  * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
1526  * it returns NULL.
1527  **/
1528 static struct hbq_dmabuf *
1529 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
1530 {
1531         struct lpfc_dmabuf *d_buf;
1532         struct hbq_dmabuf *hbq_buf;
1533         uint32_t hbqno;
1534
1535         hbqno = tag >> 16;
1536         if (hbqno >= LPFC_MAX_HBQS)
1537                 return NULL;
1538
1539         spin_lock_irq(&phba->hbalock);
1540         list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
1541                 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
1542                 if (hbq_buf->tag == tag) {
1543                         spin_unlock_irq(&phba->hbalock);
1544                         return hbq_buf;
1545                 }
1546         }
1547         spin_unlock_irq(&phba->hbalock);
1548         lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
1549                         "1803 Bad hbq tag. Data: x%x x%x\n",
1550                         tag, phba->hbqs[tag >> 16].buffer_count);
1551         return NULL;
1552 }
1553
1554 /**
1555  * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
1556  * @phba: Pointer to HBA context object.
1557  * @hbq_buffer: Pointer to HBQ buffer.
1558  *
1559  * This function is called with hbalock. This function gives back
1560  * the hbq buffer to firmware. If the HBQ does not have space to
1561  * post the buffer, it will free the buffer.
1562  **/
1563 void
1564 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
1565 {
1566         uint32_t hbqno;
1567
1568         if (hbq_buffer) {
1569                 hbqno = hbq_buffer->tag >> 16;
1570                 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
1571                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1572         }
1573 }
1574
1575 /**
1576  * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
1577  * @mbxCommand: mailbox command code.
1578  *
1579  * This function is called by the mailbox event handler function to verify
1580  * that the completed mailbox command is a legitimate mailbox command. If the
1581  * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
1582  * and the mailbox event handler will take the HBA offline.
1583  **/
1584 static int
1585 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1586 {
1587         uint8_t ret;
1588
1589         switch (mbxCommand) {
1590         case MBX_LOAD_SM:
1591         case MBX_READ_NV:
1592         case MBX_WRITE_NV:
1593         case MBX_WRITE_VPARMS:
1594         case MBX_RUN_BIU_DIAG:
1595         case MBX_INIT_LINK:
1596         case MBX_DOWN_LINK:
1597         case MBX_CONFIG_LINK:
1598         case MBX_CONFIG_RING:
1599         case MBX_RESET_RING:
1600         case MBX_READ_CONFIG:
1601         case MBX_READ_RCONFIG:
1602         case MBX_READ_SPARM:
1603         case MBX_READ_STATUS:
1604         case MBX_READ_RPI:
1605         case MBX_READ_XRI:
1606         case MBX_READ_REV:
1607         case MBX_READ_LNK_STAT:
1608         case MBX_REG_LOGIN:
1609         case MBX_UNREG_LOGIN:
1610         case MBX_READ_LA:
1611         case MBX_CLEAR_LA:
1612         case MBX_DUMP_MEMORY:
1613         case MBX_DUMP_CONTEXT:
1614         case MBX_RUN_DIAGS:
1615         case MBX_RESTART:
1616         case MBX_UPDATE_CFG:
1617         case MBX_DOWN_LOAD:
1618         case MBX_DEL_LD_ENTRY:
1619         case MBX_RUN_PROGRAM:
1620         case MBX_SET_MASK:
1621         case MBX_SET_VARIABLE:
1622         case MBX_UNREG_D_ID:
1623         case MBX_KILL_BOARD:
1624         case MBX_CONFIG_FARP:
1625         case MBX_BEACON:
1626         case MBX_LOAD_AREA:
1627         case MBX_RUN_BIU_DIAG64:
1628         case MBX_CONFIG_PORT:
1629         case MBX_READ_SPARM64:
1630         case MBX_READ_RPI64:
1631         case MBX_REG_LOGIN64:
1632         case MBX_READ_LA64:
1633         case MBX_WRITE_WWN:
1634         case MBX_SET_DEBUG:
1635         case MBX_LOAD_EXP_ROM:
1636         case MBX_ASYNCEVT_ENABLE:
1637         case MBX_REG_VPI:
1638         case MBX_UNREG_VPI:
1639         case MBX_HEARTBEAT:
1640         case MBX_PORT_CAPABILITIES:
1641         case MBX_PORT_IOV_CONTROL:
1642         case MBX_SLI4_CONFIG:
1643         case MBX_SLI4_REQ_FTRS:
1644         case MBX_REG_FCFI:
1645         case MBX_UNREG_FCFI:
1646         case MBX_REG_VFI:
1647         case MBX_UNREG_VFI:
1648         case MBX_INIT_VPI:
1649         case MBX_INIT_VFI:
1650         case MBX_RESUME_RPI:
1651                 ret = mbxCommand;
1652                 break;
1653         default:
1654                 ret = MBX_SHUTDOWN;
1655                 break;
1656         }
1657         return ret;
1658 }
1659
1660 /**
1661  * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
1662  * @phba: Pointer to HBA context object.
1663  * @pmboxq: Pointer to mailbox command.
1664  *
1665  * This is completion handler function for mailbox commands issued from
1666  * lpfc_sli_issue_mbox_wait function. This function is called by the
1667  * mailbox event handler function with no lock held. This function
1668  * will wake up thread waiting on the wait queue pointed by context1
1669  * of the mailbox.
1670  **/
1671 void
1672 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
1673 {
1674         wait_queue_head_t *pdone_q;
1675         unsigned long drvr_flag;
1676
1677         /*
1678          * If pdone_q is empty, the driver thread gave up waiting and
1679          * continued running.
1680          */
1681         pmboxq->mbox_flag |= LPFC_MBX_WAKE;
1682         spin_lock_irqsave(&phba->hbalock, drvr_flag);
1683         pdone_q = (wait_queue_head_t *) pmboxq->context1;
1684         if (pdone_q)
1685                 wake_up_interruptible(pdone_q);
1686         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1687         return;
1688 }
1689
1690
1691 /**
1692  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
1693  * @phba: Pointer to HBA context object.
1694  * @pmb: Pointer to mailbox object.
1695  *
1696  * This function is the default mailbox completion handler. It
1697  * frees the memory resources associated with the completed mailbox
1698  * command. If the completed command is a REG_LOGIN mailbox command,
1699  * this function will issue a UREG_LOGIN to re-claim the RPI.
1700  **/
1701 void
1702 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1703 {
1704         struct lpfc_dmabuf *mp;
1705         uint16_t rpi, vpi;
1706         int rc;
1707
1708         mp = (struct lpfc_dmabuf *) (pmb->context1);
1709
1710         if (mp) {
1711                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1712                 kfree(mp);
1713         }
1714
1715         if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
1716             (phba->sli_rev == LPFC_SLI_REV4))
1717                 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
1718
1719         /*
1720          * If a REG_LOGIN succeeded  after node is destroyed or node
1721          * is in re-discovery driver need to cleanup the RPI.
1722          */
1723         if (!(phba->pport->load_flag & FC_UNLOADING) &&
1724             pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
1725             !pmb->u.mb.mbxStatus) {
1726                 rpi = pmb->u.mb.un.varWords[0];
1727                 vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
1728                 lpfc_unreg_login(phba, vpi, rpi, pmb);
1729                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1730                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1731                 if (rc != MBX_NOT_FINISHED)
1732                         return;
1733         }
1734
1735         if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
1736                 lpfc_sli4_mbox_cmd_free(phba, pmb);
1737         else
1738                 mempool_free(pmb, phba->mbox_mem_pool);
1739 }
1740
1741 /**
1742  * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
1743  * @phba: Pointer to HBA context object.
1744  *
1745  * This function is called with no lock held. This function processes all
1746  * the completed mailbox commands and gives it to upper layers. The interrupt
1747  * service routine processes mailbox completion interrupt and adds completed
1748  * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
1749  * Worker thread call lpfc_sli_handle_mb_event, which will return the
1750  * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
1751  * function returns the mailbox commands to the upper layer by calling the
1752  * completion handler function of each mailbox.
1753  **/
1754 int
1755 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1756 {
1757         MAILBOX_t *pmbox;
1758         LPFC_MBOXQ_t *pmb;
1759         int rc;
1760         LIST_HEAD(cmplq);
1761
1762         phba->sli.slistat.mbox_event++;
1763
1764         /* Get all completed mailboxe buffers into the cmplq */
1765         spin_lock_irq(&phba->hbalock);
1766         list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
1767         spin_unlock_irq(&phba->hbalock);
1768
1769         /* Get a Mailbox buffer to setup mailbox commands for callback */
1770         do {
1771                 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
1772                 if (pmb == NULL)
1773                         break;
1774
1775                 pmbox = &pmb->u.mb;
1776
1777                 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
1778                         if (pmb->vport) {
1779                                 lpfc_debugfs_disc_trc(pmb->vport,
1780                                         LPFC_DISC_TRC_MBOX_VPORT,
1781                                         "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
1782                                         (uint32_t)pmbox->mbxCommand,
1783                                         pmbox->un.varWords[0],
1784                                         pmbox->un.varWords[1]);
1785                         }
1786                         else {
1787                                 lpfc_debugfs_disc_trc(phba->pport,
1788                                         LPFC_DISC_TRC_MBOX,
1789                                         "MBOX cmpl:       cmd:x%x mb:x%x x%x",
1790                                         (uint32_t)pmbox->mbxCommand,
1791                                         pmbox->un.varWords[0],
1792                                         pmbox->un.varWords[1]);
1793                         }
1794                 }
1795
1796                 /*
1797                  * It is a fatal error if unknown mbox command completion.
1798                  */
1799                 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
1800                     MBX_SHUTDOWN) {
1801                         /* Unknow mailbox command compl */
1802                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1803                                         "(%d):0323 Unknown Mailbox command "
1804                                         "x%x (x%x) Cmpl\n",
1805                                         pmb->vport ? pmb->vport->vpi : 0,
1806                                         pmbox->mbxCommand,
1807                                         lpfc_sli4_mbox_opcode_get(phba, pmb));
1808                         phba->link_state = LPFC_HBA_ERROR;
1809                         phba->work_hs = HS_FFER3;
1810                         lpfc_handle_eratt(phba);
1811                         continue;
1812                 }
1813
1814                 if (pmbox->mbxStatus) {
1815                         phba->sli.slistat.mbox_stat_err++;
1816                         if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
1817                                 /* Mbox cmd cmpl error - RETRYing */
1818                                 lpfc_printf_log(phba, KERN_INFO,
1819                                                 LOG_MBOX | LOG_SLI,
1820                                                 "(%d):0305 Mbox cmd cmpl "
1821                                                 "error - RETRYing Data: x%x "
1822                                                 "(x%x) x%x x%x x%x\n",
1823                                                 pmb->vport ? pmb->vport->vpi :0,
1824                                                 pmbox->mbxCommand,
1825                                                 lpfc_sli4_mbox_opcode_get(phba,
1826                                                                           pmb),
1827                                                 pmbox->mbxStatus,
1828                                                 pmbox->un.varWords[0],
1829                                                 pmb->vport->port_state);
1830                                 pmbox->mbxStatus = 0;
1831                                 pmbox->mbxOwner = OWN_HOST;
1832                                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1833                                 if (rc != MBX_NOT_FINISHED)
1834                                         continue;
1835                         }
1836                 }
1837
1838                 /* Mailbox cmd <cmd> Cmpl <cmpl> */
1839                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
1840                                 "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p "
1841                                 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
1842                                 pmb->vport ? pmb->vport->vpi : 0,
1843                                 pmbox->mbxCommand,
1844                                 lpfc_sli4_mbox_opcode_get(phba, pmb),
1845                                 pmb->mbox_cmpl,
1846                                 *((uint32_t *) pmbox),
1847                                 pmbox->un.varWords[0],
1848                                 pmbox->un.varWords[1],
1849                                 pmbox->un.varWords[2],
1850                                 pmbox->un.varWords[3],
1851                                 pmbox->un.varWords[4],
1852                                 pmbox->un.varWords[5],
1853                                 pmbox->un.varWords[6],
1854                                 pmbox->un.varWords[7]);
1855
1856                 if (pmb->mbox_cmpl)
1857                         pmb->mbox_cmpl(phba,pmb);
1858         } while (1);
1859         return 0;
1860 }
1861
1862 /**
1863  * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
1864  * @phba: Pointer to HBA context object.
1865  * @pring: Pointer to driver SLI ring object.
1866  * @tag: buffer tag.
1867  *
1868  * This function is called with no lock held. When QUE_BUFTAG_BIT bit
1869  * is set in the tag the buffer is posted for a particular exchange,
1870  * the function will return the buffer without replacing the buffer.
1871  * If the buffer is for unsolicited ELS or CT traffic, this function
1872  * returns the buffer and also posts another buffer to the firmware.
1873  **/
1874 static struct lpfc_dmabuf *
1875 lpfc_sli_get_buff(struct lpfc_hba *phba,
1876                   struct lpfc_sli_ring *pring,
1877                   uint32_t tag)
1878 {
1879         struct hbq_dmabuf *hbq_entry;
1880
1881         if (tag & QUE_BUFTAG_BIT)
1882                 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
1883         hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
1884         if (!hbq_entry)
1885                 return NULL;
1886         return &hbq_entry->dbuf;
1887 }
1888
1889 /**
1890  * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
1891  * @phba: Pointer to HBA context object.
1892  * @pring: Pointer to driver SLI ring object.
1893  * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
1894  * @fch_r_ctl: the r_ctl for the first frame of the sequence.
1895  * @fch_type: the type for the first frame of the sequence.
1896  *
1897  * This function is called with no lock held. This function uses the r_ctl and
1898  * type of the received sequence to find the correct callback function to call
1899  * to process the sequence.
1900  **/
1901 static int
1902 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1903                          struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
1904                          uint32_t fch_type)
1905 {
1906         int i;
1907
1908         /* unSolicited Responses */
1909         if (pring->prt[0].profile) {
1910                 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1911                         (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1912                                                                         saveq);
1913                 return 1;
1914         }
1915         /* We must search, based on rctl / type
1916            for the right routine */
1917         for (i = 0; i < pring->num_mask; i++) {
1918                 if ((pring->prt[i].rctl == fch_r_ctl) &&
1919                     (pring->prt[i].type == fch_type)) {
1920                         if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1921                                 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1922                                                 (phba, pring, saveq);
1923                         return 1;
1924                 }
1925         }
1926         return 0;
1927 }
1928
1929 /**
1930  * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
1931  * @phba: Pointer to HBA context object.
1932  * @pring: Pointer to driver SLI ring object.
1933  * @saveq: Pointer to the unsolicited iocb.
1934  *
1935  * This function is called with no lock held by the ring event handler
1936  * when there is an unsolicited iocb posted to the response ring by the
1937  * firmware. This function gets the buffer associated with the iocbs
1938  * and calls the event handler for the ring. This function handles both
1939  * qring buffers and hbq buffers.
1940  * When the function returns 1 the caller can free the iocb object otherwise
1941  * upper layer functions will free the iocb objects.
1942  **/
1943 static int
1944 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1945                             struct lpfc_iocbq *saveq)
1946 {
1947         IOCB_t           * irsp;
1948         WORD5            * w5p;
1949         uint32_t           Rctl, Type;
1950         uint32_t           match;
1951         struct lpfc_iocbq *iocbq;
1952         struct lpfc_dmabuf *dmzbuf;
1953
1954         match = 0;
1955         irsp = &(saveq->iocb);
1956
1957         if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
1958                 if (pring->lpfc_sli_rcv_async_status)
1959                         pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
1960                 else
1961                         lpfc_printf_log(phba,
1962                                         KERN_WARNING,
1963                                         LOG_SLI,
1964                                         "0316 Ring %d handler: unexpected "
1965                                         "ASYNC_STATUS iocb received evt_code "
1966                                         "0x%x\n",
1967                                         pring->ringno,
1968                                         irsp->un.asyncstat.evt_code);
1969                 return 1;
1970         }
1971
1972         if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
1973                 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
1974                 if (irsp->ulpBdeCount > 0) {
1975                         dmzbuf = lpfc_sli_get_buff(phba, pring,
1976                                         irsp->un.ulpWord[3]);
1977                         lpfc_in_buf_free(phba, dmzbuf);
1978                 }
1979
1980                 if (irsp->ulpBdeCount > 1) {
1981                         dmzbuf = lpfc_sli_get_buff(phba, pring,
1982                                         irsp->unsli3.sli3Words[3]);
1983                         lpfc_in_buf_free(phba, dmzbuf);
1984                 }
1985
1986                 if (irsp->ulpBdeCount > 2) {
1987                         dmzbuf = lpfc_sli_get_buff(phba, pring,
1988                                 irsp->unsli3.sli3Words[7]);
1989                         lpfc_in_buf_free(phba, dmzbuf);
1990                 }
1991
1992                 return 1;
1993         }
1994
1995         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
1996                 if (irsp->ulpBdeCount != 0) {
1997                         saveq->context2 = lpfc_sli_get_buff(phba, pring,
1998                                                 irsp->un.ulpWord[3]);
1999                         if (!saveq->context2)
2000                                 lpfc_printf_log(phba,
2001                                         KERN_ERR,
2002                                         LOG_SLI,
2003                                         "0341 Ring %d Cannot find buffer for "
2004                                         "an unsolicited iocb. tag 0x%x\n",
2005                                         pring->ringno,
2006                                         irsp->un.ulpWord[3]);
2007                 }
2008                 if (irsp->ulpBdeCount == 2) {
2009                         saveq->context3 = lpfc_sli_get_buff(phba, pring,
2010                                                 irsp->unsli3.sli3Words[7]);
2011                         if (!saveq->context3)
2012                                 lpfc_printf_log(phba,
2013                                         KERN_ERR,
2014                                         LOG_SLI,
2015                                         "0342 Ring %d Cannot find buffer for an"
2016                                         " unsolicited iocb. tag 0x%x\n",
2017                                         pring->ringno,
2018                                         irsp->unsli3.sli3Words[7]);
2019                 }
2020                 list_for_each_entry(iocbq, &saveq->list, list) {
2021                         irsp = &(iocbq->iocb);
2022                         if (irsp->ulpBdeCount != 0) {
2023                                 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2024                                                         irsp->un.ulpWord[3]);
2025                                 if (!iocbq->context2)
2026                                         lpfc_printf_log(phba,
2027                                                 KERN_ERR,
2028                                                 LOG_SLI,
2029                                                 "0343 Ring %d Cannot find "
2030                                                 "buffer for an unsolicited iocb"
2031                                                 ". tag 0x%x\n", pring->ringno,
2032                                                 irsp->un.ulpWord[3]);
2033                         }
2034                         if (irsp->ulpBdeCount == 2) {
2035                                 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2036                                                 irsp->unsli3.sli3Words[7]);
2037                                 if (!iocbq->context3)
2038                                         lpfc_printf_log(phba,
2039                                                 KERN_ERR,
2040                                                 LOG_SLI,
2041                                                 "0344 Ring %d Cannot find "
2042                                                 "buffer for an unsolicited "
2043                                                 "iocb. tag 0x%x\n",
2044                                                 pring->ringno,
2045                                                 irsp->unsli3.sli3Words[7]);
2046                         }
2047                 }
2048         }
2049         if (irsp->ulpBdeCount != 0 &&
2050             (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2051              irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2052                 int found = 0;
2053
2054                 /* search continue save q for same XRI */
2055                 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2056                         if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) {
2057                                 list_add_tail(&saveq->list, &iocbq->list);
2058                                 found = 1;
2059                                 break;
2060                         }
2061                 }
2062                 if (!found)
2063                         list_add_tail(&saveq->clist,
2064                                       &pring->iocb_continue_saveq);
2065                 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2066                         list_del_init(&iocbq->clist);
2067                         saveq = iocbq;
2068                         irsp = &(saveq->iocb);
2069                 } else
2070                         return 0;
2071         }
2072         if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2073             (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2074             (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2075                 Rctl = FC_RCTL_ELS_REQ;
2076                 Type = FC_TYPE_ELS;
2077         } else {
2078                 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2079                 Rctl = w5p->hcsw.Rctl;
2080                 Type = w5p->hcsw.Type;
2081
2082                 /* Firmware Workaround */
2083                 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2084                         (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2085                          irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2086                         Rctl = FC_RCTL_ELS_REQ;
2087                         Type = FC_TYPE_ELS;
2088                         w5p->hcsw.Rctl = Rctl;
2089                         w5p->hcsw.Type = Type;
2090                 }
2091         }
2092
2093         if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2094                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2095                                 "0313 Ring %d handler: unexpected Rctl x%x "
2096                                 "Type x%x received\n",
2097                                 pring->ringno, Rctl, Type);
2098
2099         return 1;
2100 }
2101
2102 /**
2103  * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2104  * @phba: Pointer to HBA context object.
2105  * @pring: Pointer to driver SLI ring object.
2106  * @prspiocb: Pointer to response iocb object.
2107  *
2108  * This function looks up the iocb_lookup table to get the command iocb
2109  * corresponding to the given response iocb using the iotag of the
2110  * response iocb. This function is called with the hbalock held.
2111  * This function returns the command iocb object if it finds the command
2112  * iocb else returns NULL.
2113  **/
2114 static struct lpfc_iocbq *
2115 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2116                       struct lpfc_sli_ring *pring,
2117                       struct lpfc_iocbq *prspiocb)
2118 {
2119         struct lpfc_iocbq *cmd_iocb = NULL;
2120         uint16_t iotag;
2121
2122         iotag = prspiocb->iocb.ulpIoTag;
2123
2124         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2125                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2126                 list_del_init(&cmd_iocb->list);
2127                 pring->txcmplq_cnt--;
2128                 return cmd_iocb;
2129         }
2130
2131         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2132                         "0317 iotag x%x is out off "
2133                         "range: max iotag x%x wd0 x%x\n",
2134                         iotag, phba->sli.last_iotag,
2135                         *(((uint32_t *) &prspiocb->iocb) + 7));
2136         return NULL;
2137 }
2138
2139 /**
2140  * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2141  * @phba: Pointer to HBA context object.
2142  * @pring: Pointer to driver SLI ring object.
2143  * @iotag: IOCB tag.
2144  *
2145  * This function looks up the iocb_lookup table to get the command iocb
2146  * corresponding to the given iotag. This function is called with the
2147  * hbalock held.
2148  * This function returns the command iocb object if it finds the command
2149  * iocb else returns NULL.
2150  **/
2151 static struct lpfc_iocbq *
2152 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2153                              struct lpfc_sli_ring *pring, uint16_t iotag)
2154 {
2155         struct lpfc_iocbq *cmd_iocb;
2156
2157         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2158                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2159                 list_del_init(&cmd_iocb->list);
2160                 pring->txcmplq_cnt--;
2161                 return cmd_iocb;
2162         }
2163
2164         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2165                         "0372 iotag x%x is out off range: max iotag (x%x)\n",
2166                         iotag, phba->sli.last_iotag);
2167         return NULL;
2168 }
2169
2170 /**
2171  * lpfc_sli_process_sol_iocb - process solicited iocb completion
2172  * @phba: Pointer to HBA context object.
2173  * @pring: Pointer to driver SLI ring object.
2174  * @saveq: Pointer to the response iocb to be processed.
2175  *
2176  * This function is called by the ring event handler for non-fcp
2177  * rings when there is a new response iocb in the response ring.
2178  * The caller is not required to hold any locks. This function
2179  * gets the command iocb associated with the response iocb and
2180  * calls the completion handler for the command iocb. If there
2181  * is no completion handler, the function will free the resources
2182  * associated with command iocb. If the response iocb is for
2183  * an already aborted command iocb, the status of the completion
2184  * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2185  * This function always returns 1.
2186  **/
2187 static int
2188 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2189                           struct lpfc_iocbq *saveq)
2190 {
2191         struct lpfc_iocbq *cmdiocbp;
2192         int rc = 1;
2193         unsigned long iflag;
2194
2195         /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2196         spin_lock_irqsave(&phba->hbalock, iflag);
2197         cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2198         spin_unlock_irqrestore(&phba->hbalock, iflag);
2199
2200         if (cmdiocbp) {
2201                 if (cmdiocbp->iocb_cmpl) {
2202                         /*
2203                          * If an ELS command failed send an event to mgmt
2204                          * application.
2205                          */
2206                         if (saveq->iocb.ulpStatus &&
2207                              (pring->ringno == LPFC_ELS_RING) &&
2208                              (cmdiocbp->iocb.ulpCommand ==
2209                                 CMD_ELS_REQUEST64_CR))
2210                                 lpfc_send_els_failure_event(phba,
2211                                         cmdiocbp, saveq);
2212
2213                         /*
2214                          * Post all ELS completions to the worker thread.
2215                          * All other are passed to the completion callback.
2216                          */
2217                         if (pring->ringno == LPFC_ELS_RING) {
2218                                 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) {
2219                                         cmdiocbp->iocb_flag &=
2220                                                 ~LPFC_DRIVER_ABORTED;
2221                                         saveq->iocb.ulpStatus =
2222                                                 IOSTAT_LOCAL_REJECT;
2223                                         saveq->iocb.un.ulpWord[4] =
2224                                                 IOERR_SLI_ABORTED;
2225
2226                                         /* Firmware could still be in progress
2227                                          * of DMAing payload, so don't free data
2228                                          * buffer till after a hbeat.
2229                                          */
2230                                         saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2231                                 }
2232                         }
2233                         (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
2234                 } else
2235                         lpfc_sli_release_iocbq(phba, cmdiocbp);
2236         } else {
2237                 /*
2238                  * Unknown initiating command based on the response iotag.
2239                  * This could be the case on the ELS ring because of
2240                  * lpfc_els_abort().
2241                  */
2242                 if (pring->ringno != LPFC_ELS_RING) {
2243                         /*
2244                          * Ring <ringno> handler: unexpected completion IoTag
2245                          * <IoTag>
2246                          */
2247                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2248                                          "0322 Ring %d handler: "
2249                                          "unexpected completion IoTag x%x "
2250                                          "Data: x%x x%x x%x x%x\n",
2251                                          pring->ringno,
2252                                          saveq->iocb.ulpIoTag,
2253                                          saveq->iocb.ulpStatus,
2254                                          saveq->iocb.un.ulpWord[4],
2255                                          saveq->iocb.ulpCommand,
2256                                          saveq->iocb.ulpContext);
2257                 }
2258         }
2259
2260         return rc;
2261 }
2262
2263 /**
2264  * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
2265  * @phba: Pointer to HBA context object.
2266  * @pring: Pointer to driver SLI ring object.
2267  *
2268  * This function is called from the iocb ring event handlers when
2269  * put pointer is ahead of the get pointer for a ring. This function signal
2270  * an error attention condition to the worker thread and the worker
2271  * thread will transition the HBA to offline state.
2272  **/
2273 static void
2274 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2275 {
2276         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2277         /*
2278          * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2279          * rsp ring <portRspMax>
2280          */
2281         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2282                         "0312 Ring %d handler: portRspPut %d "
2283                         "is bigger than rsp ring %d\n",
2284                         pring->ringno, le32_to_cpu(pgp->rspPutInx),
2285                         pring->numRiocb);
2286
2287         phba->link_state = LPFC_HBA_ERROR;
2288
2289         /*
2290          * All error attention handlers are posted to
2291          * worker thread
2292          */
2293         phba->work_ha |= HA_ERATT;
2294         phba->work_hs = HS_FFER3;
2295
2296         lpfc_worker_wake_up(phba);
2297
2298         return;
2299 }
2300
2301 /**
2302  * lpfc_poll_eratt - Error attention polling timer timeout handler
2303  * @ptr: Pointer to address of HBA context object.
2304  *
2305  * This function is invoked by the Error Attention polling timer when the
2306  * timer times out. It will check the SLI Error Attention register for
2307  * possible attention events. If so, it will post an Error Attention event
2308  * and wake up worker thread to process it. Otherwise, it will set up the
2309  * Error Attention polling timer for the next poll.
2310  **/
2311 void lpfc_poll_eratt(unsigned long ptr)
2312 {
2313         struct lpfc_hba *phba;
2314         uint32_t eratt = 0;
2315
2316         phba = (struct lpfc_hba *)ptr;
2317
2318         /* Check chip HA register for error event */
2319         eratt = lpfc_sli_check_eratt(phba);
2320
2321         if (eratt)
2322                 /* Tell the worker thread there is work to do */
2323                 lpfc_worker_wake_up(phba);
2324         else
2325                 /* Restart the timer for next eratt poll */
2326                 mod_timer(&phba->eratt_poll, jiffies +
2327                                         HZ * LPFC_ERATT_POLL_INTERVAL);
2328         return;
2329 }
2330
2331 /**
2332  * lpfc_sli_poll_fcp_ring - Handle FCP ring completion in polling mode
2333  * @phba: Pointer to HBA context object.
2334  *
2335  * This function is called from lpfc_queuecommand, lpfc_poll_timeout,
2336  * lpfc_abort_handler and lpfc_slave_configure when FCP_RING_POLLING
2337  * is enabled.
2338  *
2339  * The caller does not hold any lock.
2340  * The function processes each response iocb in the response ring until it
2341  * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2342  * LE bit set. The function will call the completion handler of the command iocb
2343  * if the response iocb indicates a completion for a command iocb or it is
2344  * an abort completion.
2345  **/
2346 void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
2347 {
2348         struct lpfc_sli      *psli  = &phba->sli;
2349         struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
2350         IOCB_t *irsp = NULL;
2351         IOCB_t *entry = NULL;
2352         struct lpfc_iocbq *cmdiocbq = NULL;
2353         struct lpfc_iocbq rspiocbq;
2354         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2355         uint32_t status;
2356         uint32_t portRspPut, portRspMax;
2357         int type;
2358         uint32_t rsp_cmpl = 0;
2359         uint32_t ha_copy;
2360         unsigned long iflags;
2361
2362         pring->stats.iocb_event++;
2363
2364         /*
2365          * The next available response entry should never exceed the maximum
2366          * entries.  If it does, treat it as an adapter hardware error.
2367          */
2368         portRspMax = pring->numRiocb;
2369         portRspPut = le32_to_cpu(pgp->rspPutInx);
2370         if (unlikely(portRspPut >= portRspMax)) {
2371                 lpfc_sli_rsp_pointers_error(phba, pring);
2372                 return;
2373         }
2374
2375         rmb();
2376         while (pring->rspidx != portRspPut) {
2377                 entry = lpfc_resp_iocb(phba, pring);
2378                 if (++pring->rspidx >= portRspMax)
2379                         pring->rspidx = 0;
2380
2381                 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2382                                       (uint32_t *) &rspiocbq.iocb,
2383                                       phba->iocb_rsp_size);
2384                 irsp = &rspiocbq.iocb;
2385                 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2386                 pring->stats.iocb_rsp++;
2387                 rsp_cmpl++;
2388
2389                 if (unlikely(irsp->ulpStatus)) {
2390                         /* Rsp ring <ringno> error: IOCB */
2391                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2392                                         "0326 Rsp Ring %d error: IOCB Data: "
2393                                         "x%x x%x x%x x%x x%x x%x x%x x%x\n",
2394                                         pring->ringno,
2395                                         irsp->un.ulpWord[0],
2396                                         irsp->un.ulpWord[1],
2397                                         irsp->un.ulpWord[2],
2398                                         irsp->un.ulpWord[3],
2399                                         irsp->un.ulpWord[4],
2400                                         irsp->un.ulpWord[5],
2401                                         *(uint32_t *)&irsp->un1,
2402                                         *((uint32_t *)&irsp->un1 + 1));
2403                 }
2404
2405                 switch (type) {
2406                 case LPFC_ABORT_IOCB:
2407                 case LPFC_SOL_IOCB:
2408                         /*
2409                          * Idle exchange closed via ABTS from port.  No iocb
2410                          * resources need to be recovered.
2411                          */
2412                         if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
2413                                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2414                                                 "0314 IOCB cmd 0x%x "
2415                                                 "processed. Skipping "
2416                                                 "completion",
2417                                                 irsp->ulpCommand);
2418                                 break;
2419                         }
2420
2421                         spin_lock_irqsave(&phba->hbalock, iflags);
2422                         cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2423                                                          &rspiocbq);
2424                         spin_unlock_irqrestore(&phba->hbalock, iflags);
2425                         if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
2426                                 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2427                                                       &rspiocbq);
2428                         }
2429                         break;
2430                 default:
2431                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2432                                 char adaptermsg[LPFC_MAX_ADPTMSG];
2433                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2434                                 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2435                                        MAX_MSG_DATA);
2436                                 dev_warn(&((phba->pcidev)->dev),
2437                                          "lpfc%d: %s\n",
2438                                          phba->brd_no, adaptermsg);
2439                         } else {
2440                                 /* Unknown IOCB command */
2441                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2442                                                 "0321 Unknown IOCB command "
2443                                                 "Data: x%x, x%x x%x x%x x%x\n",
2444                                                 type, irsp->ulpCommand,
2445                                                 irsp->ulpStatus,
2446                                                 irsp->ulpIoTag,
2447                                                 irsp->ulpContext);
2448                         }
2449                         break;
2450                 }
2451
2452                 /*
2453                  * The response IOCB has been processed.  Update the ring
2454                  * pointer in SLIM.  If the port response put pointer has not
2455                  * been updated, sync the pgp->rspPutInx and fetch the new port
2456                  * response put pointer.
2457                  */
2458                 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2459
2460                 if (pring->rspidx == portRspPut)
2461                         portRspPut = le32_to_cpu(pgp->rspPutInx);
2462         }
2463
2464         ha_copy = readl(phba->HAregaddr);
2465         ha_copy >>= (LPFC_FCP_RING * 4);
2466
2467         if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
2468                 spin_lock_irqsave(&phba->hbalock, iflags);
2469                 pring->stats.iocb_rsp_full++;
2470                 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
2471                 writel(status, phba->CAregaddr);
2472                 readl(phba->CAregaddr);
2473                 spin_unlock_irqrestore(&phba->hbalock, iflags);
2474         }
2475         if ((ha_copy & HA_R0CE_RSP) &&
2476             (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
2477                 spin_lock_irqsave(&phba->hbalock, iflags);
2478                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
2479                 pring->stats.iocb_cmd_empty++;
2480
2481                 /* Force update of the local copy of cmdGetInx */
2482                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
2483                 lpfc_sli_resume_iocb(phba, pring);
2484
2485                 if ((pring->lpfc_sli_cmd_available))
2486                         (pring->lpfc_sli_cmd_available) (phba, pring);
2487
2488                 spin_unlock_irqrestore(&phba->hbalock, iflags);
2489         }
2490
2491         return;
2492 }
2493
2494 /**
2495  * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
2496  * @phba: Pointer to HBA context object.
2497  * @pring: Pointer to driver SLI ring object.
2498  * @mask: Host attention register mask for this ring.
2499  *
2500  * This function is called from the interrupt context when there is a ring
2501  * event for the fcp ring. The caller does not hold any lock.
2502  * The function processes each response iocb in the response ring until it
2503  * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2504  * LE bit set. The function will call the completion handler of the command iocb
2505  * if the response iocb indicates a completion for a command iocb or it is
2506  * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2507  * function if this is an unsolicited iocb.
2508  * This routine presumes LPFC_FCP_RING handling and doesn't bother
2509  * to check it explicitly. This function always returns 1.
2510  **/
2511 static int
2512 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2513                                 struct lpfc_sli_ring *pring, uint32_t mask)
2514 {
2515         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2516         IOCB_t *irsp = NULL;
2517         IOCB_t *entry = NULL;
2518         struct lpfc_iocbq *cmdiocbq = NULL;
2519         struct lpfc_iocbq rspiocbq;
2520         uint32_t status;
2521         uint32_t portRspPut, portRspMax;
2522         int rc = 1;
2523         lpfc_iocb_type type;
2524         unsigned long iflag;
2525         uint32_t rsp_cmpl = 0;
2526
2527         spin_lock_irqsave(&phba->hbalock, iflag);
2528         pring->stats.iocb_event++;
2529
2530         /*
2531          * The next available response entry should never exceed the maximum
2532          * entries.  If it does, treat it as an adapter hardware error.
2533          */
2534         portRspMax = pring->numRiocb;
2535         portRspPut = le32_to_cpu(pgp->rspPutInx);
2536         if (unlikely(portRspPut >= portRspMax)) {
2537                 lpfc_sli_rsp_pointers_error(phba, pring);
2538                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2539                 return 1;
2540         }
2541
2542         rmb();
2543         while (pring->rspidx != portRspPut) {
2544                 /*
2545                  * Fetch an entry off the ring and copy it into a local data
2546                  * structure.  The copy involves a byte-swap since the
2547                  * network byte order and pci byte orders are different.
2548                  */
2549                 entry = lpfc_resp_iocb(phba, pring);
2550                 phba->last_completion_time = jiffies;
2551
2552                 if (++pring->rspidx >= portRspMax)
2553                         pring->rspidx = 0;
2554
2555                 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2556                                       (uint32_t *) &rspiocbq.iocb,
2557                                       phba->iocb_rsp_size);
2558                 INIT_LIST_HEAD(&(rspiocbq.list));
2559                 irsp = &rspiocbq.iocb;
2560
2561                 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2562                 pring->stats.iocb_rsp++;
2563                 rsp_cmpl++;
2564
2565                 if (unlikely(irsp->ulpStatus)) {
2566                         /*
2567                          * If resource errors reported from HBA, reduce
2568                          * queuedepths of the SCSI device.
2569                          */
2570                         if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2571                                 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2572                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2573                                 phba->lpfc_rampdown_queue_depth(phba);
2574                                 spin_lock_irqsave(&phba->hbalock, iflag);
2575                         }
2576
2577                         /* Rsp ring <ringno> error: IOCB */
2578                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2579                                         "0336 Rsp Ring %d error: IOCB Data: "
2580                                         "x%x x%x x%x x%x x%x x%x x%x x%x\n",
2581                                         pring->ringno,
2582                                         irsp->un.ulpWord[0],
2583                                         irsp->un.ulpWord[1],
2584                                         irsp->un.ulpWord[2],
2585                                         irsp->un.ulpWord[3],
2586                                         irsp->un.ulpWord[4],
2587                                         irsp->un.ulpWord[5],
2588                                         *(uint32_t *)&irsp->un1,
2589                                         *((uint32_t *)&irsp->un1 + 1));
2590                 }
2591
2592                 switch (type) {
2593                 case LPFC_ABORT_IOCB:
2594                 case LPFC_SOL_IOCB:
2595                         /*
2596                          * Idle exchange closed via ABTS from port.  No iocb
2597                          * resources need to be recovered.
2598                          */
2599                         if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
2600                                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2601                                                 "0333 IOCB cmd 0x%x"
2602                                                 " processed. Skipping"
2603                                                 " completion\n",
2604                                                 irsp->ulpCommand);
2605                                 break;
2606                         }
2607
2608                         cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2609                                                          &rspiocbq);
2610                         if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
2611                                 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2612                                         (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2613                                                               &rspiocbq);
2614                                 } else {
2615                                         spin_unlock_irqrestore(&phba->hbalock,
2616                                                                iflag);
2617                                         (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2618                                                               &rspiocbq);
2619                                         spin_lock_irqsave(&phba->hbalock,
2620                                                           iflag);
2621                                 }
2622                         }
2623                         break;
2624                 case LPFC_UNSOL_IOCB:
2625                         spin_unlock_irqrestore(&phba->hbalock, iflag);
2626                         lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
2627                         spin_lock_irqsave(&phba->hbalock, iflag);
2628                         break;
2629                 default:
2630                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2631                                 char adaptermsg[LPFC_MAX_ADPTMSG];
2632                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2633                                 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2634                                        MAX_MSG_DATA);
2635                                 dev_warn(&((phba->pcidev)->dev),
2636                                          "lpfc%d: %s\n",
2637                                          phba->brd_no, adaptermsg);
2638                         } else {
2639                                 /* Unknown IOCB command */
2640                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2641                                                 "0334 Unknown IOCB command "
2642                                                 "Data: x%x, x%x x%x x%x x%x\n",
2643                                                 type, irsp->ulpCommand,
2644                                                 irsp->ulpStatus,
2645                                                 irsp->ulpIoTag,
2646                                                 irsp->ulpContext);
2647                         }
2648                         break;
2649                 }
2650
2651                 /*
2652                  * The response IOCB has been processed.  Update the ring
2653                  * pointer in SLIM.  If the port response put pointer has not
2654                  * been updated, sync the pgp->rspPutInx and fetch the new port
2655                  * response put pointer.
2656                  */
2657                 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2658
2659                 if (pring->rspidx == portRspPut)
2660                         portRspPut = le32_to_cpu(pgp->rspPutInx);
2661         }
2662
2663         if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
2664                 pring->stats.iocb_rsp_full++;
2665                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
2666                 writel(status, phba->CAregaddr);
2667                 readl(phba->CAregaddr);
2668         }
2669         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
2670                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
2671                 pring->stats.iocb_cmd_empty++;
2672
2673                 /* Force update of the local copy of cmdGetInx */
2674                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
2675                 lpfc_sli_resume_iocb(phba, pring);
2676
2677                 if ((pring->lpfc_sli_cmd_available))
2678                         (pring->lpfc_sli_cmd_available) (phba, pring);
2679
2680         }
2681
2682         spin_unlock_irqrestore(&phba->hbalock, iflag);
2683         return rc;
2684 }
2685
2686 /**
2687  * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
2688  * @phba: Pointer to HBA context object.
2689  * @pring: Pointer to driver SLI ring object.
2690  * @rspiocbp: Pointer to driver response IOCB object.
2691  *
2692  * This function is called from the worker thread when there is a slow-path
2693  * response IOCB to process. This function chains all the response iocbs until
2694  * seeing the iocb with the LE bit set. The function will call
2695  * lpfc_sli_process_sol_iocb function if the response iocb indicates a
2696  * completion of a command iocb. The function will call the
2697  * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
2698  * The function frees the resources or calls the completion handler if this
2699  * iocb is an abort completion. The function returns NULL when the response
2700  * iocb has the LE bit set and all the chained iocbs are processed, otherwise
2701  * this function shall chain the iocb on to the iocb_continueq and return the
2702  * response iocb passed in.
2703  **/
2704 static struct lpfc_iocbq *
2705 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2706                         struct lpfc_iocbq *rspiocbp)
2707 {
2708         struct lpfc_iocbq *saveq;
2709         struct lpfc_iocbq *cmdiocbp;
2710         struct lpfc_iocbq *next_iocb;
2711         IOCB_t *irsp = NULL;
2712         uint32_t free_saveq;
2713         uint8_t iocb_cmd_type;
2714         lpfc_iocb_type type;
2715         unsigned long iflag;
2716         int rc;
2717
2718         spin_lock_irqsave(&phba->hbalock, iflag);
2719         /* First add the response iocb to the countinueq list */
2720         list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
2721         pring->iocb_continueq_cnt++;
2722
2723         /* Now, determine whetehr the list is completed for processing */
2724         irsp = &rspiocbp->iocb;
2725         if (irsp->ulpLe) {
2726                 /*
2727                  * By default, the driver expects to free all resources
2728                  * associated with this iocb completion.
2729                  */
2730                 free_saveq = 1;
2731                 saveq = list_get_first(&pring->iocb_continueq,
2732                                        struct lpfc_iocbq, list);
2733                 irsp = &(saveq->iocb);
2734                 list_del_init(&pring->iocb_continueq);
2735                 pring->iocb_continueq_cnt = 0;
2736
2737                 pring->stats.iocb_rsp++;
2738
2739                 /*
2740                  * If resource errors reported from HBA, reduce
2741                  * queuedepths of the SCSI device.
2742                  */
2743                 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2744                     (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2745                         spin_unlock_irqrestore(&phba->hbalock, iflag);
2746                         phba->lpfc_rampdown_queue_depth(phba);
2747                         spin_lock_irqsave(&phba->hbalock, iflag);
2748                 }
2749
2750                 if (irsp->ulpStatus) {
2751                         /* Rsp ring <ringno> error: IOCB */
2752                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2753                                         "0328 Rsp Ring %d error: "
2754                                         "IOCB Data: "
2755                                         "x%x x%x x%x x%x "
2756                                         "x%x x%x x%x x%x "
2757                                         "x%x x%x x%x x%x "
2758                                         "x%x x%x x%x x%x\n",
2759                                         pring->ringno,
2760                                         irsp->un.ulpWord[0],
2761                                         irsp->un.ulpWord[1],
2762                                         irsp->un.ulpWord[2],
2763                                         irsp->un.ulpWord[3],
2764                                         irsp->un.ulpWord[4],
2765                                         irsp->un.ulpWord[5],
2766                                         *(((uint32_t *) irsp) + 6),
2767                                         *(((uint32_t *) irsp) + 7),
2768                                         *(((uint32_t *) irsp) + 8),
2769                                         *(((uint32_t *) irsp) + 9),
2770                                         *(((uint32_t *) irsp) + 10),
2771                                         *(((uint32_t *) irsp) + 11),
2772                                         *(((uint32_t *) irsp) + 12),
2773                                         *(((uint32_t *) irsp) + 13),
2774                                         *(((uint32_t *) irsp) + 14),
2775                                         *(((uint32_t *) irsp) + 15));
2776                 }
2777
2778                 /*
2779                  * Fetch the IOCB command type and call the correct completion
2780                  * routine. Solicited and Unsolicited IOCBs on the ELS ring
2781                  * get freed back to the lpfc_iocb_list by the discovery
2782                  * kernel thread.
2783                  */
2784                 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
2785                 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
2786                 switch (type) {
2787                 case LPFC_SOL_IOCB:
2788                         spin_unlock_irqrestore(&phba->hbalock, iflag);
2789                         rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
2790                         spin_lock_irqsave(&phba->hbalock, iflag);
2791                         break;
2792
2793                 case LPFC_UNSOL_IOCB:
2794                         spin_unlock_irqrestore(&phba->hbalock, iflag);
2795                         rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
2796                         spin_lock_irqsave(&phba->hbalock, iflag);
2797                         if (!rc)
2798                                 free_saveq = 0;
2799                         break;
2800
2801                 case LPFC_ABORT_IOCB:
2802                         cmdiocbp = NULL;
2803                         if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
2804                                 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
2805                                                                  saveq);
2806                         if (cmdiocbp) {
2807                                 /* Call the specified completion routine */
2808                                 if (cmdiocbp->iocb_cmpl) {
2809                                         spin_unlock_irqrestore(&phba->hbalock,
2810                                                                iflag);
2811                                         (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
2812                                                               saveq);
2813                                         spin_lock_irqsave(&phba->hbalock,
2814                                                           iflag);
2815                                 } else
2816                                         __lpfc_sli_release_iocbq(phba,
2817                                                                  cmdiocbp);
2818                         }
2819                         break;
2820
2821                 case LPFC_UNKNOWN_IOCB:
2822                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2823                                 char adaptermsg[LPFC_MAX_ADPTMSG];
2824                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2825                                 memcpy(&adaptermsg[0], (uint8_t *)irsp,
2826                                        MAX_MSG_DATA);
2827                                 dev_warn(&((phba->pcidev)->dev),
2828                                          "lpfc%d: %s\n",
2829                                          phba->brd_no, adaptermsg);
2830                         } else {
2831                                 /* Unknown IOCB command */
2832                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2833                                                 "0335 Unknown IOCB "
2834                                                 "command Data: x%x "
2835                                                 "x%x x%x x%x\n",
2836                                                 irsp->ulpCommand,
2837                                                 irsp->ulpStatus,
2838                                                 irsp->ulpIoTag,
2839                                                 irsp->ulpContext);
2840                         }
2841                         break;
2842                 }
2843
2844                 if (free_saveq) {
2845                         list_for_each_entry_safe(rspiocbp, next_iocb,
2846                                                  &saveq->list, list) {
2847                                 list_del(&rspiocbp->list);
2848                                 __lpfc_sli_release_iocbq(phba, rspiocbp);
2849                         }
2850                         __lpfc_sli_release_iocbq(phba, saveq);
2851                 }
2852                 rspiocbp = NULL;
2853         }
2854         spin_unlock_irqrestore(&phba->hbalock, iflag);
2855         return rspiocbp;
2856 }
2857
2858 /**
2859  * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
2860  * @phba: Pointer to HBA context object.
2861  * @pring: Pointer to driver SLI ring object.
2862  * @mask: Host attention register mask for this ring.
2863  *
2864  * This routine wraps the actual slow_ring event process routine from the
2865  * API jump table function pointer from the lpfc_hba struct.
2866  **/
2867 void
2868 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2869                                 struct lpfc_sli_ring *pring, uint32_t mask)
2870 {
2871         phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
2872 }
2873
2874 /**
2875  * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
2876  * @phba: Pointer to HBA context object.
2877  * @pring: Pointer to driver SLI ring object.
2878  * @mask: Host attention register mask for this ring.
2879  *
2880  * This function is called from the worker thread when there is a ring event
2881  * for non-fcp rings. The caller does not hold any lock. The function will
2882  * remove each response iocb in the response ring and calls the handle
2883  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
2884  **/
2885 static void
2886 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
2887                                    struct lpfc_sli_ring *pring, uint32_t mask)
2888 {
2889         struct lpfc_pgp *pgp;
2890         IOCB_t *entry;
2891         IOCB_t *irsp = NULL;
2892         struct lpfc_iocbq *rspiocbp = NULL;
2893         uint32_t portRspPut, portRspMax;
2894         unsigned long iflag;
2895         uint32_t status;
2896
2897         pgp = &phba->port_gp[pring->ringno];
2898         spin_lock_irqsave(&phba->hbalock, iflag);
2899         pring->stats.iocb_event++;
2900
2901         /*
2902          * The next available response entry should never exceed the maximum
2903          * entries.  If it does, treat it as an adapter hardware error.
2904          */
2905         portRspMax = pring->numRiocb;
2906         portRspPut = le32_to_cpu(pgp->rspPutInx);
2907         if (portRspPut >= portRspMax) {
2908                 /*
2909                  * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2910                  * rsp ring <portRspMax>
2911                  */
2912                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2913                                 "0303 Ring %d handler: portRspPut %d "
2914                                 "is bigger than rsp ring %d\n",
2915                                 pring->ringno, portRspPut, portRspMax);
2916
2917                 phba->link_state = LPFC_HBA_ERROR;
2918                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2919
2920                 phba->work_hs = HS_FFER3;
2921                 lpfc_handle_eratt(phba);
2922
2923                 return;
2924         }
2925
2926         rmb();
2927         while (pring->rspidx != portRspPut) {
2928                 /*
2929                  * Build a completion list and call the appropriate handler.
2930                  * The process is to get the next available response iocb, get
2931                  * a free iocb from the list, copy the response data into the
2932                  * free iocb, insert to the continuation list, and update the
2933                  * next response index to slim.  This process makes response
2934                  * iocb's in the ring available to DMA as fast as possible but
2935                  * pays a penalty for a copy operation.  Since the iocb is
2936                  * only 32 bytes, this penalty is considered small relative to
2937                  * the PCI reads for register values and a slim write.  When
2938                  * the ulpLe field is set, the entire Command has been
2939                  * received.
2940                  */
2941                 entry = lpfc_resp_iocb(phba, pring);
2942
2943                 phba->last_completion_time = jiffies;
2944                 rspiocbp = __lpfc_sli_get_iocbq(phba);
2945                 if (rspiocbp == NULL) {
2946                         printk(KERN_ERR "%s: out of buffers! Failing "
2947                                "completion.\n", __func__);
2948                         break;
2949                 }
2950
2951                 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
2952                                       phba->iocb_rsp_size);
2953                 irsp = &rspiocbp->iocb;
2954
2955                 if (++pring->rspidx >= portRspMax)
2956                         pring->rspidx = 0;
2957
2958                 if (pring->ringno == LPFC_ELS_RING) {
2959                         lpfc_debugfs_slow_ring_trc(phba,
2960                         "IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
2961                                 *(((uint32_t *) irsp) + 4),
2962                                 *(((uint32_t *) irsp) + 6),
2963                                 *(((uint32_t *) irsp) + 7));
2964                 }
2965
2966                 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2967
2968                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2969                 /* Handle the response IOCB */
2970                 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
2971                 spin_lock_irqsave(&phba->hbalock, iflag);
2972
2973                 /*
2974                  * If the port response put pointer has not been updated, sync
2975                  * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
2976                  * response put pointer.
2977                  */
2978                 if (pring->rspidx == portRspPut) {
2979                         portRspPut = le32_to_cpu(pgp->rspPutInx);
2980                 }
2981         } /* while (pring->rspidx != portRspPut) */
2982
2983         if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
2984                 /* At least one response entry has been freed */
2985                 pring->stats.iocb_rsp_full++;
2986                 /* SET RxRE_RSP in Chip Att register */
2987                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
2988                 writel(status, phba->CAregaddr);
2989                 readl(phba->CAregaddr); /* flush */
2990         }
2991         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
2992                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
2993                 pring->stats.iocb_cmd_empty++;
2994
2995                 /* Force update of the local copy of cmdGetInx */
2996                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
2997                 lpfc_sli_resume_iocb(phba, pring);
2998
2999                 if ((pring->lpfc_sli_cmd_available))
3000                         (pring->lpfc_sli_cmd_available) (phba, pring);
3001
3002         }
3003
3004         spin_unlock_irqrestore(&phba->hbalock, iflag);
3005         return;
3006 }
3007
3008 /**
3009  * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3010  * @phba: Pointer to HBA context object.
3011  * @pring: Pointer to driver SLI ring object.
3012  * @mask: Host attention register mask for this ring.
3013  *
3014  * This function is called from the worker thread when there is a pending
3015  * ELS response iocb on the driver internal slow-path response iocb worker
3016  * queue. The caller does not hold any lock. The function will remove each
3017  * response iocb from the response worker queue and calls the handle
3018  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3019  **/
3020 static void
3021 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3022                                    struct lpfc_sli_ring *pring, uint32_t mask)
3023 {
3024         struct lpfc_iocbq *irspiocbq;
3025         struct hbq_dmabuf *dmabuf;
3026         struct lpfc_cq_event *cq_event;
3027         unsigned long iflag;
3028
3029         while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) {
3030                 /* Get the response iocb from the head of work queue */
3031                 spin_lock_irqsave(&phba->hbalock, iflag);
3032                 list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue,
3033                                  cq_event, struct lpfc_cq_event, list);
3034                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3035
3036                 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3037                 case CQE_CODE_COMPL_WQE:
3038                         irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3039                                                  cq_event);
3040                         lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq);
3041                         break;
3042                 case CQE_CODE_RECEIVE:
3043                         dmabuf = container_of(cq_event, struct hbq_dmabuf,
3044                                               cq_event);
3045                         lpfc_sli4_handle_received_buffer(phba, dmabuf);
3046                         break;
3047                 default:
3048                         break;
3049                 }
3050         }
3051 }
3052
3053 /**
3054  * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3055  * @phba: Pointer to HBA context object.
3056  * @pring: Pointer to driver SLI ring object.
3057  *
3058  * This function aborts all iocbs in the given ring and frees all the iocb
3059  * objects in txq. This function issues an abort iocb for all the iocb commands
3060  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3061  * the return of this function. The caller is not required to hold any locks.
3062  **/
3063 void
3064 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3065 {
3066         LIST_HEAD(completions);
3067         struct lpfc_iocbq *iocb, *next_iocb;
3068
3069         if (pring->ringno == LPFC_ELS_RING) {
3070                 lpfc_fabric_abort_hba(phba);
3071         }
3072
3073         /* Error everything on txq and txcmplq
3074          * First do the txq.
3075          */
3076         spin_lock_irq(&phba->hbalock);
3077         list_splice_init(&pring->txq, &completions);
3078         pring->txq_cnt = 0;
3079
3080         /* Next issue ABTS for everything on the txcmplq */
3081         list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3082                 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3083
3084         spin_unlock_irq(&phba->hbalock);
3085
3086         /* Cancel all the IOCBs from the completions list */
3087         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3088                               IOERR_SLI_ABORTED);
3089 }
3090
3091 /**
3092  * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
3093  * @phba: Pointer to HBA context object.
3094  *
3095  * This function flushes all iocbs in the fcp ring and frees all the iocb
3096  * objects in txq and txcmplq. This function will not issue abort iocbs
3097  * for all the iocb commands in txcmplq, they will just be returned with
3098  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3099  * slot has been permanently disabled.
3100  **/
3101 void
3102 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3103 {
3104         LIST_HEAD(txq);
3105         LIST_HEAD(txcmplq);
3106         struct lpfc_sli *psli = &phba->sli;
3107         struct lpfc_sli_ring  *pring;
3108
3109         /* Currently, only one fcp ring */
3110         pring = &psli->ring[psli->fcp_ring];
3111
3112         spin_lock_irq(&phba->hbalock);
3113         /* Retrieve everything on txq */
3114         list_splice_init(&pring->txq, &txq);
3115         pring->txq_cnt = 0;
3116
3117         /* Retrieve everything on the txcmplq */
3118         list_splice_init(&pring->txcmplq, &txcmplq);
3119         pring->txcmplq_cnt = 0;
3120         spin_unlock_irq(&phba->hbalock);
3121
3122         /* Flush the txq */
3123         lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3124                               IOERR_SLI_DOWN);
3125
3126         /* Flush the txcmpq */
3127         lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3128                               IOERR_SLI_DOWN);
3129 }
3130
3131 /**
3132  * lpfc_sli_brdready_s3 - Check for sli3 host ready status
3133  * @phba: Pointer to HBA context object.
3134  * @mask: Bit mask to be checked.
3135  *
3136  * This function reads the host status register and compares
3137  * with the provided bit mask to check if HBA completed
3138  * the restart. This function will wait in a loop for the
3139  * HBA to complete restart. If the HBA does not restart within
3140  * 15 iterations, the function will reset the HBA again. The
3141  * function returns 1 when HBA fail to restart otherwise returns
3142  * zero.
3143  **/
3144 static int
3145 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3146 {
3147         uint32_t status;
3148         int i = 0;
3149         int retval = 0;
3150
3151         /* Read the HBA Host Status Register */
3152         status = readl(phba->HSregaddr);
3153
3154         /*
3155          * Check status register every 100ms for 5 retries, then every
3156          * 500ms for 5, then every 2.5 sec for 5, then reset board and
3157          * every 2.5 sec for 4.
3158          * Break our of the loop if errors occurred during init.
3159          */
3160         while (((status & mask) != mask) &&
3161                !(status & HS_FFERM) &&
3162                i++ < 20) {
3163
3164                 if (i <= 5)
3165                         msleep(10);
3166                 else if (i <= 10)
3167                         msleep(500);
3168                 else
3169                         msleep(2500);
3170
3171                 if (i == 15) {
3172                                 /* Do post */
3173                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3174                         lpfc_sli_brdrestart(phba);
3175                 }
3176                 /* Read the HBA Host Status Register */
3177                 status = readl(phba->HSregaddr);
3178         }
3179
3180         /* Check to see if any errors occurred during init */
3181         if ((status & HS_FFERM) || (i >= 20)) {
3182                 phba->link_state = LPFC_HBA_ERROR;
3183                 retval = 1;
3184         }
3185
3186         return retval;
3187 }
3188
3189 /**
3190  * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3191  * @phba: Pointer to HBA context object.
3192  * @mask: Bit mask to be checked.
3193  *
3194  * This function checks the host status register to check if HBA is
3195  * ready. This function will wait in a loop for the HBA to be ready
3196  * If the HBA is not ready , the function will will reset the HBA PCI
3197  * function again. The function returns 1 when HBA fail to be ready
3198  * otherwise returns zero.
3199  **/
3200 static int
3201 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3202 {
3203         uint32_t status;
3204         int retval = 0;
3205
3206         /* Read the HBA Host Status Register */
3207         status = lpfc_sli4_post_status_check(phba);
3208
3209         if (status) {
3210                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3211                 lpfc_sli_brdrestart(phba);
3212                 status = lpfc_sli4_post_status_check(phba);
3213         }
3214
3215         /* Check to see if any errors occurred during init */
3216         if (status) {
3217                 phba->link_state = LPFC_HBA_ERROR;
3218                 retval = 1;
3219         } else
3220                 phba->sli4_hba.intr_enable = 0;
3221
3222         return retval;
3223 }
3224
3225 /**
3226  * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3227  * @phba: Pointer to HBA context object.
3228  * @mask: Bit mask to be checked.
3229  *
3230  * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3231  * from the API jump table function pointer from the lpfc_hba struct.
3232  **/
3233 int
3234 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3235 {
3236         return phba->lpfc_sli_brdready(phba, mask);
3237 }
3238
3239 #define BARRIER_TEST_PATTERN (0xdeadbeef)
3240
3241 /**
3242  * lpfc_reset_barrier - Make HBA ready for HBA reset
3243  * @phba: Pointer to HBA context object.
3244  *
3245  * This function is called before resetting an HBA. This
3246  * function requests HBA to quiesce DMAs before a reset.
3247  **/
3248 void lpfc_reset_barrier(struct lpfc_hba *phba)
3249 {
3250         uint32_t __iomem *resp_buf;
3251         uint32_t __iomem *mbox_buf;
3252         volatile uint32_t mbox;
3253         uint32_t hc_copy;
3254         int  i;
3255         uint8_t hdrtype;
3256
3257         pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
3258         if (hdrtype != 0x80 ||
3259             (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
3260              FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
3261                 return;
3262
3263         /*
3264          * Tell the other part of the chip to suspend temporarily all
3265          * its DMA activity.
3266          */
3267         resp_buf = phba->MBslimaddr;
3268
3269         /* Disable the error attention */
3270         hc_copy = readl(phba->HCregaddr);
3271         writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
3272         readl(phba->HCregaddr); /* flush */
3273         phba->link_flag |= LS_IGNORE_ERATT;
3274
3275         if (readl(phba->HAregaddr) & HA_ERATT) {
3276                 /* Clear Chip error bit */
3277                 writel(HA_ERATT, phba->HAregaddr);
3278                 phba->pport->stopped = 1;
3279         }
3280
3281         mbox = 0;
3282         ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
3283         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
3284
3285         writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
3286         mbox_buf = phba->MBslimaddr;
3287         writel(mbox, mbox_buf);
3288
3289         for (i = 0;
3290              readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++)
3291                 mdelay(1);
3292
3293         if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
3294                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
3295                     phba->pport->stopped)
3296                         goto restore_hc;
3297                 else
3298                         goto clear_errat;
3299         }
3300
3301         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
3302         for (i = 0; readl(resp_buf) != mbox &&  i < 500; i++)
3303                 mdelay(1);
3304
3305 clear_errat:
3306
3307         while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500)
3308                 mdelay(1);
3309
3310         if (readl(phba->HAregaddr) & HA_ERATT) {
3311                 writel(HA_ERATT, phba->HAregaddr);
3312                 phba->pport->stopped = 1;
3313         }
3314
3315 restore_hc:
3316         phba->link_flag &= ~LS_IGNORE_ERATT;
3317         writel(hc_copy, phba->HCregaddr);
3318         readl(phba->HCregaddr); /* flush */
3319 }
3320
3321 /**
3322  * lpfc_sli_brdkill - Issue a kill_board mailbox command
3323  * @phba: Pointer to HBA context object.
3324  *
3325  * This function issues a kill_board mailbox command and waits for
3326  * the error attention interrupt. This function is called for stopping
3327  * the firmware processing. The caller is not required to hold any
3328  * locks. This function calls lpfc_hba_down_post function to free
3329  * any pending commands after the kill. The function will return 1 when it
3330  * fails to kill the board else will return 0.
3331  **/
3332 int
3333 lpfc_sli_brdkill(struct lpfc_hba *phba)
3334 {
3335         struct lpfc_sli *psli;
3336         LPFC_MBOXQ_t *pmb;
3337         uint32_t status;
3338         uint32_t ha_copy;
3339         int retval;
3340         int i = 0;
3341
3342         psli = &phba->sli;
3343
3344         /* Kill HBA */
3345         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3346                         "0329 Kill HBA Data: x%x x%x\n",
3347                         phba->pport->port_state, psli->sli_flag);
3348
3349         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3350         if (!pmb)
3351                 return 1;
3352
3353         /* Disable the error attention */
3354         spin_lock_irq(&phba->hbalock);
3355         status = readl(phba->HCregaddr);
3356         status &= ~HC_ERINT_ENA;
3357         writel(status, phba->HCregaddr);
3358         readl(phba->HCregaddr); /* flush */
3359         phba->link_flag |= LS_IGNORE_ERATT;
3360         spin_unlock_irq(&phba->hbalock);
3361
3362         lpfc_kill_board(phba, pmb);
3363         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3364         retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3365
3366         if (retval != MBX_SUCCESS) {
3367                 if (retval != MBX_BUSY)
3368                         mempool_free(pmb, phba->mbox_mem_pool);
3369                 spin_lock_irq(&phba->hbalock);
3370                 phba->link_flag &= ~LS_IGNORE_ERATT;
3371                 spin_unlock_irq(&phba->hbalock);
3372                 return 1;
3373         }
3374
3375         spin_lock_irq(&phba->hbalock);
3376         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3377         spin_unlock_irq(&phba->hbalock);
3378
3379         mempool_free(pmb, phba->mbox_mem_pool);
3380
3381         /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
3382          * attention every 100ms for 3 seconds. If we don't get ERATT after
3383          * 3 seconds we still set HBA_ERROR state because the status of the
3384          * board is now undefined.
3385          */
3386         ha_copy = readl(phba->HAregaddr);
3387
3388         while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
3389                 mdelay(100);
3390                 ha_copy = readl(phba->HAregaddr);
3391         }
3392
3393         del_timer_sync(&psli->mbox_tmo);
3394         if (ha_copy & HA_ERATT) {
3395                 writel(HA_ERATT, phba->HAregaddr);
3396                 phba->pport->stopped = 1;
3397         }
3398         spin_lock_irq(&phba->hbalock);
3399         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3400         psli->mbox_active = NULL;
3401         phba->link_flag &= ~LS_IGNORE_ERATT;
3402         spin_unlock_irq(&phba->hbalock);
3403
3404         lpfc_hba_down_post(phba);
3405         phba->link_state = LPFC_HBA_ERROR;
3406
3407         return ha_copy & HA_ERATT ? 0 : 1;
3408 }
3409
3410 /**
3411  * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
3412  * @phba: Pointer to HBA context object.
3413  *
3414  * This function resets the HBA by writing HC_INITFF to the control
3415  * register. After the HBA resets, this function resets all the iocb ring
3416  * indices. This function disables PCI layer parity checking during
3417  * the reset.
3418  * This function returns 0 always.
3419  * The caller is not required to hold any locks.
3420  **/
3421 int
3422 lpfc_sli_brdreset(struct lpfc_hba *phba)
3423 {
3424         struct lpfc_sli *psli;
3425         struct lpfc_sli_ring *pring;
3426         uint16_t cfg_value;
3427         int i;
3428
3429         psli = &phba->sli;
3430
3431         /* Reset HBA */
3432         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3433                         "0325 Reset HBA Data: x%x x%x\n",
3434                         phba->pport->port_state, psli->sli_flag);
3435
3436         /* perform board reset */
3437         phba->fc_eventTag = 0;
3438         phba->link_events = 0;
3439         phba->pport->fc_myDID = 0;
3440         phba->pport->fc_prevDID = 0;
3441
3442         /* Turn off parity checking and serr during the physical reset */
3443         pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3444         pci_write_config_word(phba->pcidev, PCI_COMMAND,
3445                               (cfg_value &
3446                                ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3447
3448         psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
3449
3450         /* Now toggle INITFF bit in the Host Control Register */
3451         writel(HC_INITFF, phba->HCregaddr);
3452         mdelay(1);
3453         readl(phba->HCregaddr); /* flush */
3454         writel(0, phba->HCregaddr);
3455         readl(phba->HCregaddr); /* flush */
3456
3457         /* Restore PCI cmd register */
3458         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
3459
3460         /* Initialize relevant SLI info */
3461         for (i = 0; i < psli->num_rings; i++) {
3462                 pring = &psli->ring[i];
3463                 pring->flag = 0;
3464                 pring->rspidx = 0;
3465                 pring->next_cmdidx  = 0;
3466                 pring->local_getidx = 0;
3467                 pring->cmdidx = 0;
3468                 pring->missbufcnt = 0;
3469         }
3470
3471         phba->link_state = LPFC_WARM_START;
3472         return 0;
3473 }
3474
3475 /**
3476  * lpfc_sli4_brdreset - Reset a sli-4 HBA
3477  * @phba: Pointer to HBA context object.
3478  *
3479  * This function resets a SLI4 HBA. This function disables PCI layer parity
3480  * checking during resets the device. The caller is not required to hold
3481  * any locks.
3482  *
3483  * This function returns 0 always.
3484  **/
3485 int
3486 lpfc_sli4_brdreset(struct lpfc_hba *phba)
3487 {
3488         struct lpfc_sli *psli = &phba->sli;
3489         uint16_t cfg_value;
3490         uint8_t qindx;
3491
3492         /* Reset HBA */
3493         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3494                         "0295 Reset HBA Data: x%x x%x\n",
3495                         phba->pport->port_state, psli->sli_flag);
3496
3497         /* perform board reset */
3498         phba->fc_eventTag = 0;
3499         phba->link_events = 0;
3500         phba->pport->fc_myDID = 0;
3501         phba->pport->fc_prevDID = 0;
3502
3503         /* Turn off parity checking and serr during the physical reset */
3504         pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3505         pci_write_config_word(phba->pcidev, PCI_COMMAND,
3506                               (cfg_value &
3507                               ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3508
3509         spin_lock_irq(&phba->hbalock);
3510         psli->sli_flag &= ~(LPFC_PROCESS_LA);
3511         phba->fcf.fcf_flag = 0;
3512         /* Clean up the child queue list for the CQs */
3513         list_del_init(&phba->sli4_hba.mbx_wq->list);
3514         list_del_init(&phba->sli4_hba.els_wq->list);
3515         list_del_init(&phba->sli4_hba.hdr_rq->list);
3516         list_del_init(&phba->sli4_hba.dat_rq->list);
3517         list_del_init(&phba->sli4_hba.mbx_cq->list);
3518         list_del_init(&phba->sli4_hba.els_cq->list);
3519         for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3520                 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3521         for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
3522                 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
3523         spin_unlock_irq(&phba->hbalock);
3524
3525         /* Now physically reset the device */
3526         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3527                         "0389 Performing PCI function reset!\n");
3528         /* Perform FCoE PCI function reset */
3529         lpfc_pci_function_reset(phba);
3530
3531         return 0;
3532 }
3533
3534 /**
3535  * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
3536  * @phba: Pointer to HBA context object.
3537  *
3538  * This function is called in the SLI initialization code path to
3539  * restart the HBA. The caller is not required to hold any lock.
3540  * This function writes MBX_RESTART mailbox command to the SLIM and
3541  * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
3542  * function to free any pending commands. The function enables
3543  * POST only during the first initialization. The function returns zero.
3544  * The function does not guarantee completion of MBX_RESTART mailbox
3545  * command before the return of this function.
3546  **/
3547 static int
3548 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
3549 {
3550         MAILBOX_t *mb;
3551         struct lpfc_sli *psli;
3552         volatile uint32_t word0;
3553         void __iomem *to_slim;
3554
3555         spin_lock_irq(&phba->hbalock);
3556
3557         psli = &phba->sli;
3558
3559         /* Restart HBA */
3560         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3561                         "0337 Restart HBA Data: x%x x%x\n",
3562                         phba->pport->port_state, psli->sli_flag);
3563
3564         word0 = 0;
3565         mb = (MAILBOX_t *) &word0;
3566         mb->mbxCommand = MBX_RESTART;
3567         mb->mbxHc = 1;
3568
3569         lpfc_reset_barrier(phba);
3570
3571         to_slim = phba->MBslimaddr;
3572         writel(*(uint32_t *) mb, to_slim);
3573         readl(to_slim); /* flush */
3574
3575         /* Only skip post after fc_ffinit is completed */
3576         if (phba->pport->port_state)
3577                 word0 = 1;      /* This is really setting up word1 */
3578         else
3579                 word0 = 0;      /* This is really setting up word1 */
3580         to_slim = phba->MBslimaddr + sizeof (uint32_t);
3581         writel(*(uint32_t *) mb, to_slim);
3582         readl(to_slim); /* flush */
3583
3584         lpfc_sli_brdreset(phba);
3585         phba->pport->stopped = 0;
3586         phba->link_state = LPFC_INIT_START;
3587         phba->hba_flag = 0;
3588         spin_unlock_irq(&phba->hbalock);
3589
3590         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
3591         psli->stats_start = get_seconds();
3592
3593         /* Give the INITFF and Post time to settle. */
3594         mdelay(100);
3595
3596         lpfc_hba_down_post(phba);
3597
3598         return 0;
3599 }
3600
3601 /**
3602  * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
3603  * @phba: Pointer to HBA context object.
3604  *
3605  * This function is called in the SLI initialization code path to restart
3606  * a SLI4 HBA. The caller is not required to hold any lock.
3607  * At the end of the function, it calls lpfc_hba_down_post function to
3608  * free any pending commands.
3609  **/
3610 static int
3611 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
3612 {
3613         struct lpfc_sli *psli = &phba->sli;
3614
3615
3616         /* Restart HBA */
3617         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3618                         "0296 Restart HBA Data: x%x x%x\n",
3619                         phba->pport->port_state, psli->sli_flag);
3620
3621         lpfc_sli4_brdreset(phba);
3622
3623         spin_lock_irq(&phba->hbalock);
3624         phba->pport->stopped = 0;
3625         phba->link_state = LPFC_INIT_START;
3626         phba->hba_flag = 0;
3627         spin_unlock_irq(&phba->hbalock);
3628
3629         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
3630         psli->stats_start = get_seconds();
3631
3632         lpfc_hba_down_post(phba);
3633
3634         return 0;
3635 }
3636
3637 /**
3638  * lpfc_sli_brdrestart - Wrapper func for restarting hba
3639  * @phba: Pointer to HBA context object.
3640  *
3641  * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
3642  * API jump table function pointer from the lpfc_hba struct.
3643 **/
3644 int
3645 lpfc_sli_brdrestart(struct lpfc_hba *phba)
3646 {
3647         return phba->lpfc_sli_brdrestart(phba);
3648 }
3649
3650 /**
3651  * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
3652  * @phba: Pointer to HBA context object.
3653  *
3654  * This function is called after a HBA restart to wait for successful
3655  * restart of the HBA. Successful restart of the HBA is indicated by
3656  * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
3657  * iteration, the function will restart the HBA again. The function returns
3658  * zero if HBA successfully restarted else returns negative error code.
3659  **/
3660 static int
3661 lpfc_sli_chipset_init(struct lpfc_hba *phba)
3662 {
3663         uint32_t status, i = 0;
3664
3665         /* Read the HBA Host Status Register */
3666         status = readl(phba->HSregaddr);
3667
3668         /* Check status register to see what current state is */
3669         i = 0;
3670         while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
3671
3672                 /* Check every 100ms for 5 retries, then every 500ms for 5, then
3673                  * every 2.5 sec for 5, then reset board and every 2.5 sec for
3674                  * 4.
3675                  */
3676                 if (i++ >= 20) {
3677                         /* Adapter failed to init, timeout, status reg
3678                            <status> */
3679                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3680                                         "0436 Adapter failed to init, "
3681                                         "timeout, status reg x%x, "
3682                                         "FW Data: A8 x%x AC x%x\n", status,
3683                                         readl(phba->MBslimaddr + 0xa8),
3684                                         readl(phba->MBslimaddr + 0xac));
3685                         phba->link_state = LPFC_HBA_ERROR;
3686                         return -ETIMEDOUT;
3687                 }
3688
3689                 /* Check to see if any errors occurred during init */
3690                 if (status & HS_FFERM) {
3691                         /* ERROR: During chipset initialization */
3692                         /* Adapter failed to init, chipset, status reg
3693                            <status> */
3694                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3695                                         "0437 Adapter failed to init, "
3696                                         "chipset, status reg x%x, "
3697                                         "FW Data: A8 x%x AC x%x\n", status,
3698                                         readl(phba->MBslimaddr + 0xa8),
3699                                         readl(phba->MBslimaddr + 0xac));
3700                         phba->link_state = LPFC_HBA_ERROR;
3701                         return -EIO;
3702                 }
3703
3704                 if (i <= 5) {
3705                         msleep(10);
3706                 } else if (i <= 10) {
3707                         msleep(500);
3708                 } else {
3709                         msleep(2500);
3710                 }
3711
3712                 if (i == 15) {
3713                                 /* Do post */
3714                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3715                         lpfc_sli_brdrestart(phba);
3716                 }
3717                 /* Read the HBA Host Status Register */
3718                 status = readl(phba->HSregaddr);
3719         }
3720
3721         /* Check to see if any errors occurred during init */
3722         if (status & HS_FFERM) {
3723                 /* ERROR: During chipset initialization */
3724                 /* Adapter failed to init, chipset, status reg <status> */
3725                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3726                                 "0438 Adapter failed to init, chipset, "
3727                                 "status reg x%x, "
3728                                 "FW Data: A8 x%x AC x%x\n", status,
3729                                 readl(phba->MBslimaddr + 0xa8),
3730                                 readl(phba->MBslimaddr + 0xac));
3731                 phba->link_state = LPFC_HBA_ERROR;
3732                 return -EIO;
3733         }
3734
3735         /* Clear all interrupt enable conditions */
3736         writel(0, phba->HCregaddr);
3737         readl(phba->HCregaddr); /* flush */
3738
3739         /* setup host attn register */
3740         writel(0xffffffff, phba->HAregaddr);
3741         readl(phba->HAregaddr); /* flush */
3742         return 0;
3743 }
3744
3745 /**
3746  * lpfc_sli_hbq_count - Get the number of HBQs to be configured
3747  *
3748  * This function calculates and returns the number of HBQs required to be
3749  * configured.
3750  **/
3751 int
3752 lpfc_sli_hbq_count(void)
3753 {
3754         return ARRAY_SIZE(lpfc_hbq_defs);
3755 }
3756
3757 /**
3758  * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
3759  *
3760  * This function adds the number of hbq entries in every HBQ to get
3761  * the total number of hbq entries required for the HBA and returns
3762  * the total count.
3763  **/
3764 static int
3765 lpfc_sli_hbq_entry_count(void)
3766 {
3767         int  hbq_count = lpfc_sli_hbq_count();
3768         int  count = 0;
3769         int  i;
3770
3771         for (i = 0; i < hbq_count; ++i)
3772                 count += lpfc_hbq_defs[i]->entry_count;
3773         return count;
3774 }
3775
3776 /**
3777  * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
3778  *
3779  * This function calculates amount of memory required for all hbq entries
3780  * to be configured and returns the total memory required.
3781  **/
3782 int
3783 lpfc_sli_hbq_size(void)
3784 {
3785         return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
3786 }
3787
3788 /**
3789  * lpfc_sli_hbq_setup - configure and initialize HBQs
3790  * @phba: Pointer to HBA context object.
3791  *
3792  * This function is called during the SLI initialization to configure
3793  * all the HBQs and post buffers to the HBQ. The caller is not
3794  * required to hold any locks. This function will return zero if successful
3795  * else it will return negative error code.
3796  **/
3797 static int
3798 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
3799 {
3800         int  hbq_count = lpfc_sli_hbq_count();
3801         LPFC_MBOXQ_t *pmb;
3802         MAILBOX_t *pmbox;
3803         uint32_t hbqno;
3804         uint32_t hbq_entry_index;
3805
3806                                 /* Get a Mailbox buffer to setup mailbox
3807                                  * commands for HBA initialization
3808                                  */
3809         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3810
3811         if (!pmb)
3812                 return -ENOMEM;
3813
3814         pmbox = &pmb->u.mb;
3815
3816         /* Initialize the struct lpfc_sli_hbq structure for each hbq */
3817         phba->link_state = LPFC_INIT_MBX_CMDS;
3818         phba->hbq_in_use = 1;
3819
3820         hbq_entry_index = 0;
3821         for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
3822                 phba->hbqs[hbqno].next_hbqPutIdx = 0;
3823                 phba->hbqs[hbqno].hbqPutIdx      = 0;
3824                 phba->hbqs[hbqno].local_hbqGetIdx   = 0;
3825                 phba->hbqs[hbqno].entry_count =
3826                         lpfc_hbq_defs[hbqno]->entry_count;
3827                 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
3828                         hbq_entry_index, pmb);
3829                 hbq_entry_index += phba->hbqs[hbqno].entry_count;
3830
3831                 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
3832                         /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
3833                            mbxStatus <status>, ring <num> */
3834
3835                         lpfc_printf_log(phba, KERN_ERR,
3836                                         LOG_SLI | LOG_VPORT,
3837                                         "1805 Adapter failed to init. "
3838                                         "Data: x%x x%x x%x\n",
3839                                         pmbox->mbxCommand,
3840                                         pmbox->mbxStatus, hbqno);
3841
3842                         phba->link_state = LPFC_HBA_ERROR;
3843                         mempool_free(pmb, phba->mbox_mem_pool);
3844                         return ENXIO;
3845                 }
3846         }
3847         phba->hbq_count = hbq_count;
3848
3849         mempool_free(pmb, phba->mbox_mem_pool);
3850
3851         /* Initially populate or replenish the HBQs */
3852         for (hbqno = 0; hbqno < hbq_count; ++hbqno)
3853                 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
3854         return 0;
3855 }
3856
3857 /**
3858  * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
3859  * @phba: Pointer to HBA context object.
3860  *
3861  * This function is called during the SLI initialization to configure
3862  * all the HBQs and post buffers to the HBQ. The caller is not
3863  * required to hold any locks. This function will return zero if successful
3864  * else it will return negative error code.
3865  **/
3866 static int
3867 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
3868 {
3869         phba->hbq_in_use = 1;
3870         phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
3871         phba->hbq_count = 1;
3872         /* Initially populate or replenish the HBQs */
3873         lpfc_sli_hbqbuf_init_hbqs(phba, 0);
3874         return 0;
3875 }
3876
3877 /**
3878  * lpfc_sli_config_port - Issue config port mailbox command
3879  * @phba: Pointer to HBA context object.
3880  * @sli_mode: sli mode - 2/3
3881  *
3882  * This function is called by the sli intialization code path
3883  * to issue config_port mailbox command. This function restarts the
3884  * HBA firmware and issues a config_port mailbox command to configure
3885  * the SLI interface in the sli mode specified by sli_mode
3886  * variable. The caller is not required to hold any locks.
3887  * The function returns 0 if successful, else returns negative error
3888  * code.
3889  **/
3890 int
3891 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3892 {
3893         LPFC_MBOXQ_t *pmb;
3894         uint32_t resetcount = 0, rc = 0, done = 0;
3895
3896         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3897         if (!pmb) {
3898                 phba->link_state = LPFC_HBA_ERROR;
3899                 return -ENOMEM;
3900         }
3901
3902         phba->sli_rev = sli_mode;
3903         while (resetcount < 2 && !done) {
3904                 spin_lock_irq(&phba->hbalock);
3905                 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
3906                 spin_unlock_irq(&phba->hbalock);
3907                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3908                 lpfc_sli_brdrestart(phba);
3909                 rc = lpfc_sli_chipset_init(phba);
3910                 if (rc)
3911                         break;
3912
3913                 spin_lock_irq(&phba->hbalock);
3914                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3915                 spin_unlock_irq(&phba->hbalock);
3916                 resetcount++;
3917
3918                 /* Call pre CONFIG_PORT mailbox command initialization.  A
3919                  * value of 0 means the call was successful.  Any other
3920                  * nonzero value is a failure, but if ERESTART is returned,
3921                  * the driver may reset the HBA and try again.
3922                  */
3923                 rc = lpfc_config_port_prep(phba);
3924                 if (rc == -ERESTART) {
3925                         phba->link_state = LPFC_LINK_UNKNOWN;
3926                         continue;
3927                 } else if (rc)
3928                         break;
3929                 phba->link_state = LPFC_INIT_MBX_CMDS;
3930                 lpfc_config_port(phba, pmb);
3931                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
3932                 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
3933                                         LPFC_SLI3_HBQ_ENABLED |
3934                                         LPFC_SLI3_CRP_ENABLED |
3935                                         LPFC_SLI3_INB_ENABLED |
3936                                         LPFC_SLI3_BG_ENABLED);
3937                 if (rc != MBX_SUCCESS) {
3938                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3939                                 "0442 Adapter failed to init, mbxCmd x%x "
3940                                 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
3941                                 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
3942                         spin_lock_irq(&phba->hbalock);
3943                         phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
3944                         spin_unlock_irq(&phba->hbalock);
3945                         rc = -ENXIO;
3946                 } else {
3947                         /* Allow asynchronous mailbox command to go through */
3948                         spin_lock_irq(&phba->hbalock);
3949                         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
3950                         spin_unlock_irq(&phba->hbalock);
3951                         done = 1;
3952                 }
3953         }
3954         if (!done) {
3955                 rc = -EINVAL;
3956                 goto do_prep_failed;
3957         }
3958         if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
3959                 if (!pmb->u.mb.un.varCfgPort.cMA) {
3960                         rc = -ENXIO;
3961                         goto do_prep_failed;
3962                 }
3963                 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
3964                         phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3965                         phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
3966                         phba->max_vports = (phba->max_vpi > phba->max_vports) ?
3967                                 phba->max_vpi : phba->max_vports;
3968
3969                 } else
3970                         phba->max_vpi = 0;
3971                 if (pmb->u.mb.un.varCfgPort.gdss)
3972                         phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
3973                 if (pmb->u.mb.un.varCfgPort.gerbm)
3974                         phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
3975                 if (pmb->u.mb.un.varCfgPort.gcrp)
3976                         phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
3977                 if (pmb->u.mb.un.varCfgPort.ginb) {
3978                         phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
3979                         phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
3980                         phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
3981                         phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy;
3982                         phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter;
3983                         phba->inb_last_counter =
3984                                         phba->mbox->us.s3_inb_pgp.counter;
3985                 } else {
3986                         phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
3987                         phba->port_gp = phba->mbox->us.s3_pgp.port;
3988                         phba->inb_ha_copy = NULL;
3989                         phba->inb_counter = NULL;
3990                 }
3991
3992                 if (phba->cfg_enable_bg) {
3993                         if (pmb->u.mb.un.varCfgPort.gbg)
3994                                 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
3995                         else
3996                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3997                                                 "0443 Adapter did not grant "
3998                                                 "BlockGuard\n");
3999                 }
4000         } else {
4001                 phba->hbq_get = NULL;
4002                 phba->port_gp = phba->mbox->us.s2.port;
4003                 phba->inb_ha_copy = NULL;
4004                 phba->inb_counter = NULL;
4005                 phba->max_vpi = 0;
4006         }
4007 do_prep_failed:
4008         mempool_free(pmb, phba->mbox_mem_pool);
4009         return rc;
4010 }
4011
4012
4013 /**
4014  * lpfc_sli_hba_setup - SLI intialization function
4015  * @phba: Pointer to HBA context object.
4016  *
4017  * This function is the main SLI intialization function. This function
4018  * is called by the HBA intialization code, HBA reset code and HBA
4019  * error attention handler code. Caller is not required to hold any
4020  * locks. This function issues config_port mailbox command to configure
4021  * the SLI, setup iocb rings and HBQ rings. In the end the function
4022  * calls the config_port_post function to issue init_link mailbox
4023  * command and to start the discovery. The function will return zero
4024  * if successful, else it will return negative error code.
4025  **/
4026 int
4027 lpfc_sli_hba_setup(struct lpfc_hba *phba)
4028 {
4029         uint32_t rc;
4030         int  mode = 3;
4031
4032         switch (lpfc_sli_mode) {
4033         case 2:
4034                 if (phba->cfg_enable_npiv) {
4035                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4036                                 "1824 NPIV enabled: Override lpfc_sli_mode "
4037                                 "parameter (%d) to auto (0).\n",
4038                                 lpfc_sli_mode);
4039                         break;
4040                 }
4041                 mode = 2;
4042                 break;
4043         case 0:
4044         case 3:
4045                 break;
4046         default:
4047                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4048                                 "1819 Unrecognized lpfc_sli_mode "
4049                                 "parameter: %d.\n", lpfc_sli_mode);
4050
4051                 break;
4052         }
4053
4054         rc = lpfc_sli_config_port(phba, mode);
4055
4056         if (rc && lpfc_sli_mode == 3)
4057                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4058                                 "1820 Unable to select SLI-3.  "
4059                                 "Not supported by adapter.\n");
4060         if (rc && mode != 2)
4061                 rc = lpfc_sli_config_port(phba, 2);
4062         if (rc)
4063                 goto lpfc_sli_hba_setup_error;
4064
4065         if (phba->sli_rev == 3) {
4066                 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4067                 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
4068         } else {
4069                 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
4070                 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
4071                 phba->sli3_options = 0;
4072         }
4073
4074         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4075                         "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4076                         phba->sli_rev, phba->max_vpi);
4077         rc = lpfc_sli_ring_map(phba);
4078
4079         if (rc)
4080                 goto lpfc_sli_hba_setup_error;
4081
4082         /* Init HBQs */
4083         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4084                 rc = lpfc_sli_hbq_setup(phba);
4085                 if (rc)
4086                         goto lpfc_sli_hba_setup_error;
4087         }
4088         spin_lock_irq(&phba->hbalock);
4089         phba->sli.sli_flag |= LPFC_PROCESS_LA;
4090         spin_unlock_irq(&phba->hbalock);
4091
4092         rc = lpfc_config_port_post(phba);
4093         if (rc)
4094                 goto lpfc_sli_hba_setup_error;
4095
4096         return rc;
4097
4098 lpfc_sli_hba_setup_error:
4099         phba->link_state = LPFC_HBA_ERROR;
4100         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4101                         "0445 Firmware initialization failed\n");
4102         return rc;
4103 }
4104
4105 /**
4106  * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4107  * @phba: Pointer to HBA context object.
4108  * @mboxq: mailbox pointer.
4109  * This function issue a dump mailbox command to read config region
4110  * 23 and parse the records in the region and populate driver
4111  * data structure.
4112  **/
4113 static int
4114 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
4115                 LPFC_MBOXQ_t *mboxq)
4116 {
4117         struct lpfc_dmabuf *mp;
4118         struct lpfc_mqe *mqe;
4119         uint32_t data_length;
4120         int rc;
4121
4122         /* Program the default value of vlan_id and fc_map */
4123         phba->valid_vlan = 0;
4124         phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4125         phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4126         phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4127
4128         mqe = &mboxq->u.mqe;
4129         if (lpfc_dump_fcoe_param(phba, mboxq))
4130                 return -ENOMEM;
4131
4132         mp = (struct lpfc_dmabuf *) mboxq->context1;
4133         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4134
4135         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4136                         "(%d):2571 Mailbox cmd x%x Status x%x "
4137                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4138                         "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4139                         "CQ: x%x x%x x%x x%x\n",
4140                         mboxq->vport ? mboxq->vport->vpi : 0,
4141                         bf_get(lpfc_mqe_command, mqe),
4142                         bf_get(lpfc_mqe_status, mqe),
4143                         mqe->un.mb_words[0], mqe->un.mb_words[1],
4144                         mqe->un.mb_words[2], mqe->un.mb_words[3],
4145                         mqe->un.mb_words[4], mqe->un.mb_words[5],
4146                         mqe->un.mb_words[6], mqe->un.mb_words[7],
4147                         mqe->un.mb_words[8], mqe->un.mb_words[9],
4148                         mqe->un.mb_words[10], mqe->un.mb_words[11],
4149                         mqe->un.mb_words[12], mqe->un.mb_words[13],
4150                         mqe->un.mb_words[14], mqe->un.mb_words[15],
4151                         mqe->un.mb_words[16], mqe->un.mb_words[50],
4152                         mboxq->mcqe.word0,
4153                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
4154                         mboxq->mcqe.trailer);
4155
4156         if (rc) {
4157                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4158                 kfree(mp);
4159                 return -EIO;
4160         }
4161         data_length = mqe->un.mb_words[5];
4162         if (data_length > DMP_RGN23_SIZE) {
4163                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4164                 kfree(mp);
4165                 return -EIO;
4166         }
4167
4168         lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4169         lpfc_mbuf_free(phba, mp->virt, mp->phys);
4170         kfree(mp);
4171         return 0;
4172 }
4173
4174 /**
4175  * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4176  * @phba: pointer to lpfc hba data structure.
4177  * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4178  * @vpd: pointer to the memory to hold resulting port vpd data.
4179  * @vpd_size: On input, the number of bytes allocated to @vpd.
4180  *            On output, the number of data bytes in @vpd.
4181  *
4182  * This routine executes a READ_REV SLI4 mailbox command.  In
4183  * addition, this routine gets the port vpd data.
4184  *
4185  * Return codes
4186  *      0 - sucessful
4187  *      ENOMEM - could not allocated memory.
4188  **/
4189 static int
4190 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4191                     uint8_t *vpd, uint32_t *vpd_size)
4192 {
4193         int rc = 0;
4194         uint32_t dma_size;
4195         struct lpfc_dmabuf *dmabuf;
4196         struct lpfc_mqe *mqe;
4197
4198         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4199         if (!dmabuf)
4200                 return -ENOMEM;
4201
4202         /*
4203          * Get a DMA buffer for the vpd data resulting from the READ_REV
4204          * mailbox command.
4205          */
4206         dma_size = *vpd_size;
4207         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4208                                           dma_size,
4209                                           &dmabuf->phys,
4210                                           GFP_KERNEL);
4211         if (!dmabuf->virt) {
4212                 kfree(dmabuf);
4213                 return -ENOMEM;
4214         }
4215         memset(dmabuf->virt, 0, dma_size);
4216
4217         /*
4218          * The SLI4 implementation of READ_REV conflicts at word1,
4219          * bits 31:16 and SLI4 adds vpd functionality not present
4220          * in SLI3.  This code corrects the conflicts.
4221          */
4222         lpfc_read_rev(phba, mboxq);
4223         mqe = &mboxq->u.mqe;
4224         mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4225         mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4226         mqe->un.read_rev.word1 &= 0x0000FFFF;
4227         bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4228         bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4229
4230         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4231         if (rc) {
4232                 dma_free_coherent(&phba->pcidev->dev, dma_size,
4233                                   dmabuf->virt, dmabuf->phys);
4234                 return -EIO;
4235         }
4236
4237         /*
4238          * The available vpd length cannot be bigger than the
4239          * DMA buffer passed to the port.  Catch the less than
4240          * case and update the caller's size.
4241          */
4242         if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4243                 *vpd_size = mqe->un.read_rev.avail_vpd_len;
4244
4245         lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size);
4246         dma_free_coherent(&phba->pcidev->dev, dma_size,
4247                           dmabuf->virt, dmabuf->phys);
4248         kfree(dmabuf);
4249         return 0;
4250 }
4251
4252 /**
4253  * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
4254  * @phba: pointer to lpfc hba data structure.
4255  *
4256  * This routine is called to explicitly arm the SLI4 device's completion and
4257  * event queues
4258  **/
4259 static void
4260 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4261 {
4262         uint8_t fcp_eqidx;
4263
4264         lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4265         lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4266         for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4267                 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4268                                      LPFC_QUEUE_REARM);
4269         lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
4270         for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4271                 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
4272                                      LPFC_QUEUE_REARM);
4273 }
4274
4275 /**
4276  * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
4277  * @phba: Pointer to HBA context object.
4278  *
4279  * This function is the main SLI4 device intialization PCI function. This
4280  * function is called by the HBA intialization code, HBA reset code and
4281  * HBA error attention handler code. Caller is not required to hold any
4282  * locks.
4283  **/
4284 int
4285 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4286 {
4287         int rc;
4288         LPFC_MBOXQ_t *mboxq;
4289         struct lpfc_mqe *mqe;
4290         uint8_t *vpd;
4291         uint32_t vpd_size;
4292         uint32_t ftr_rsp = 0;
4293         struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
4294         struct lpfc_vport *vport = phba->pport;
4295         struct lpfc_dmabuf *mp;
4296
4297         /* Perform a PCI function reset to start from clean */
4298         rc = lpfc_pci_function_reset(phba);
4299         if (unlikely(rc))
4300                 return -ENODEV;
4301
4302         /* Check the HBA Host Status Register for readyness */
4303         rc = lpfc_sli4_post_status_check(phba);
4304         if (unlikely(rc))
4305                 return -ENODEV;
4306         else {
4307                 spin_lock_irq(&phba->hbalock);
4308                 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
4309                 spin_unlock_irq(&phba->hbalock);
4310         }
4311
4312         /*
4313          * Allocate a single mailbox container for initializing the
4314          * port.
4315          */
4316         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4317         if (!mboxq)
4318                 return -ENOMEM;
4319
4320         /*
4321          * Continue initialization with default values even if driver failed
4322          * to read FCoE param config regions
4323          */
4324         if (lpfc_sli4_read_fcoe_params(phba, mboxq))
4325                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4326                         "2570 Failed to read FCoE parameters\n");
4327
4328         /* Issue READ_REV to collect vpd and FW information. */
4329         vpd_size = PAGE_SIZE;
4330         vpd = kzalloc(vpd_size, GFP_KERNEL);
4331         if (!vpd) {
4332                 rc = -ENOMEM;
4333                 goto out_free_mbox;
4334         }
4335
4336         rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
4337         if (unlikely(rc))
4338                 goto out_free_vpd;
4339
4340         mqe = &mboxq->u.mqe;
4341         phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
4342         if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
4343                 phba->hba_flag |= HBA_FCOE_SUPPORT;
4344         if (phba->sli_rev != LPFC_SLI_REV4 ||
4345             !(phba->hba_flag & HBA_FCOE_SUPPORT)) {
4346                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4347                         "0376 READ_REV Error. SLI Level %d "
4348                         "FCoE enabled %d\n",
4349                         phba->sli_rev, phba->hba_flag & HBA_FCOE_SUPPORT);
4350                 rc = -EIO;
4351                 goto out_free_vpd;
4352         }
4353         /*
4354          * Evaluate the read rev and vpd data. Populate the driver
4355          * state with the results. If this routine fails, the failure
4356          * is not fatal as the driver will use generic values.
4357          */
4358         rc = lpfc_parse_vpd(phba, vpd, vpd_size);
4359         if (unlikely(!rc)) {
4360                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4361                                 "0377 Error %d parsing vpd. "
4362                                 "Using defaults.\n", rc);
4363                 rc = 0;
4364         }
4365
4366         /* Save information as VPD data */
4367         phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
4368         phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
4369         phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
4370         phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
4371                                          &mqe->un.read_rev);
4372         phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
4373                                        &mqe->un.read_rev);
4374         phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
4375                                             &mqe->un.read_rev);
4376         phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
4377                                            &mqe->un.read_rev);
4378         phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
4379         memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
4380         phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
4381         memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
4382         phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
4383         memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
4384         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4385                         "(%d):0380 READ_REV Status x%x "
4386                         "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
4387                         mboxq->vport ? mboxq->vport->vpi : 0,
4388                         bf_get(lpfc_mqe_status, mqe),
4389                         phba->vpd.rev.opFwName,
4390                         phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
4391                         phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
4392
4393         /*
4394          * Discover the port's supported feature set and match it against the
4395          * hosts requests.
4396          */
4397         lpfc_request_features(phba, mboxq);
4398         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4399         if (unlikely(rc)) {
4400                 rc = -EIO;
4401                 goto out_free_vpd;
4402         }
4403
4404         /*
4405          * The port must support FCP initiator mode as this is the
4406          * only mode running in the host.
4407          */
4408         if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
4409                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4410                                 "0378 No support for fcpi mode.\n");
4411                 ftr_rsp++;
4412         }
4413
4414         /*
4415          * If the port cannot support the host's requested features
4416          * then turn off the global config parameters to disable the
4417          * feature in the driver.  This is not a fatal error.
4418          */
4419         if ((phba->cfg_enable_bg) &&
4420             !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4421                 ftr_rsp++;
4422
4423         if (phba->max_vpi && phba->cfg_enable_npiv &&
4424             !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4425                 ftr_rsp++;
4426
4427         if (ftr_rsp) {
4428                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4429                                 "0379 Feature Mismatch Data: x%08x %08x "
4430                                 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
4431                                 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
4432                                 phba->cfg_enable_npiv, phba->max_vpi);
4433                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4434                         phba->cfg_enable_bg = 0;
4435                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4436                         phba->cfg_enable_npiv = 0;
4437         }
4438
4439         /* These SLI3 features are assumed in SLI4 */
4440         spin_lock_irq(&phba->hbalock);
4441         phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
4442         spin_unlock_irq(&phba->hbalock);
4443
4444         /* Read the port's service parameters. */
4445         lpfc_read_sparam(phba, mboxq, vport->vpi);
4446         mboxq->vport = vport;
4447         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4448         mp = (struct lpfc_dmabuf *) mboxq->context1;
4449         if (rc == MBX_SUCCESS) {
4450                 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
4451                 rc = 0;
4452         }
4453
4454         /*
4455          * This memory was allocated by the lpfc_read_sparam routine. Release
4456          * it to the mbuf pool.
4457          */
4458         lpfc_mbuf_free(phba, mp->virt, mp->phys);
4459         kfree(mp);
4460         mboxq->context1 = NULL;
4461         if (unlikely(rc)) {
4462                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4463                                 "0382 READ_SPARAM command failed "
4464                                 "status %d, mbxStatus x%x\n",
4465                                 rc, bf_get(lpfc_mqe_status, mqe));
4466                 phba->link_state = LPFC_HBA_ERROR;
4467                 rc = -EIO;
4468                 goto out_free_vpd;
4469         }
4470
4471         if (phba->cfg_soft_wwnn)
4472                 u64_to_wwn(phba->cfg_soft_wwnn,
4473                            vport->fc_sparam.nodeName.u.wwn);
4474         if (phba->cfg_soft_wwpn)
4475                 u64_to_wwn(phba->cfg_soft_wwpn,
4476                            vport->fc_sparam.portName.u.wwn);
4477         memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
4478                sizeof(struct lpfc_name));
4479         memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
4480                sizeof(struct lpfc_name));
4481
4482         /* Update the fc_host data structures with new wwn. */
4483         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4484         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4485
4486         /* Register SGL pool to the device using non-embedded mailbox command */
4487         rc = lpfc_sli4_post_sgl_list(phba);
4488         if (unlikely(rc)) {
4489                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4490                                 "0582 Error %d during sgl post operation\n",
4491                                         rc);
4492                 rc = -ENODEV;
4493                 goto out_free_vpd;
4494         }
4495
4496         /* Register SCSI SGL pool to the device */
4497         rc = lpfc_sli4_repost_scsi_sgl_list(phba);
4498         if (unlikely(rc)) {
4499                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4500                                 "0383 Error %d during scsi sgl post "
4501                                 "operation\n", rc);
4502                 /* Some Scsi buffers were moved to the abort scsi list */
4503                 /* A pci function reset will repost them */
4504                 rc = -ENODEV;
4505                 goto out_free_vpd;
4506         }
4507
4508         /* Post the rpi header region to the device. */
4509         rc = lpfc_sli4_post_all_rpi_hdrs(phba);
4510         if (unlikely(rc)) {
4511                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4512                                 "0393 Error %d during rpi post operation\n",
4513                                 rc);
4514                 rc = -ENODEV;
4515                 goto out_free_vpd;
4516         }
4517         if (phba->cfg_enable_fip)
4518                 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 1);
4519         else
4520                 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
4521
4522         /* Set up all the queues to the device */
4523         rc = lpfc_sli4_queue_setup(phba);
4524         if (unlikely(rc)) {
4525                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4526                                 "0381 Error %d during queue setup.\n ", rc);
4527                 goto out_stop_timers;
4528         }
4529
4530         /* Arm the CQs and then EQs on device */
4531         lpfc_sli4_arm_cqeq_intr(phba);
4532
4533         /* Indicate device interrupt mode */
4534         phba->sli4_hba.intr_enable = 1;
4535
4536         /* Allow asynchronous mailbox command to go through */
4537         spin_lock_irq(&phba->hbalock);
4538         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4539         spin_unlock_irq(&phba->hbalock);
4540
4541         /* Post receive buffers to the device */
4542         lpfc_sli4_rb_setup(phba);
4543
4544         /* Start the ELS watchdog timer */
4545         mod_timer(&vport->els_tmofunc,
4546                   jiffies + HZ * (phba->fc_ratov * 2));
4547
4548         /* Start heart beat timer */
4549         mod_timer(&phba->hb_tmofunc,
4550                   jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
4551         phba->hb_outstanding = 0;
4552         phba->last_completion_time = jiffies;
4553
4554         /* Start error attention (ERATT) polling timer */
4555         mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
4556
4557         /*
4558          * The port is ready, set the host's link state to LINK_DOWN
4559          * in preparation for link interrupts.
4560          */
4561         lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed);
4562         mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4563         lpfc_set_loopback_flag(phba);
4564         /* Change driver state to LPFC_LINK_DOWN right before init link */
4565         spin_lock_irq(&phba->hbalock);
4566         phba->link_state = LPFC_LINK_DOWN;
4567         spin_unlock_irq(&phba->hbalock);
4568         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
4569         if (unlikely(rc != MBX_NOT_FINISHED)) {
4570                 kfree(vpd);
4571                 return 0;
4572         } else
4573                 rc = -EIO;
4574
4575         /* Unset all the queues set up in this routine when error out */
4576         if (rc)
4577                 lpfc_sli4_queue_unset(phba);
4578
4579 out_stop_timers:
4580         if (rc)
4581                 lpfc_stop_hba_timers(phba);
4582 out_free_vpd:
4583         kfree(vpd);
4584 out_free_mbox:
4585         mempool_free(mboxq, phba->mbox_mem_pool);
4586         return rc;
4587 }
4588
4589 /**
4590  * lpfc_mbox_timeout - Timeout call back function for mbox timer
4591  * @ptr: context object - pointer to hba structure.
4592  *
4593  * This is the callback function for mailbox timer. The mailbox
4594  * timer is armed when a new mailbox command is issued and the timer
4595  * is deleted when the mailbox complete. The function is called by
4596  * the kernel timer code when a mailbox does not complete within
4597  * expected time. This function wakes up the worker thread to
4598  * process the mailbox timeout and returns. All the processing is
4599  * done by the worker thread function lpfc_mbox_timeout_handler.
4600  **/
4601 void
4602 lpfc_mbox_timeout(unsigned long ptr)
4603 {
4604         struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
4605         unsigned long iflag;
4606         uint32_t tmo_posted;
4607
4608         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
4609         tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
4610         if (!tmo_posted)
4611                 phba->pport->work_port_events |= WORKER_MBOX_TMO;
4612         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
4613
4614         if (!tmo_posted)
4615                 lpfc_worker_wake_up(phba);
4616         return;
4617 }
4618
4619
4620 /**
4621  * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
4622  * @phba: Pointer to HBA context object.
4623  *
4624  * This function is called from worker thread when a mailbox command times out.
4625  * The caller is not required to hold any locks. This function will reset the
4626  * HBA and recover all the pending commands.
4627  **/
4628 void
4629 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
4630 {
4631         LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
4632         MAILBOX_t *mb = &pmbox->u.mb;
4633         struct lpfc_sli *psli = &phba->sli;
4634         struct lpfc_sli_ring *pring;
4635
4636         /* Check the pmbox pointer first.  There is a race condition
4637          * between the mbox timeout handler getting executed in the
4638          * worklist and the mailbox actually completing. When this
4639          * race condition occurs, the mbox_active will be NULL.
4640          */
4641         spin_lock_irq(&phba->hbalock);
4642         if (pmbox == NULL) {
4643                 lpfc_printf_log(phba, KERN_WARNING,
4644                                 LOG_MBOX | LOG_SLI,
4645                                 "0353 Active Mailbox cleared - mailbox timeout "
4646                                 "exiting\n");
4647                 spin_unlock_irq(&phba->hbalock);
4648                 return;
4649         }
4650
4651         /* Mbox cmd <mbxCommand> timeout */
4652         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4653                         "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
4654                         mb->mbxCommand,
4655                         phba->pport->port_state,
4656                         phba->sli.sli_flag,
4657                         phba->sli.mbox_active);
4658         spin_unlock_irq(&phba->hbalock);
4659
4660         /* Setting state unknown so lpfc_sli_abort_iocb_ring
4661          * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
4662          * it to fail all oustanding SCSI IO.
4663          */
4664         spin_lock_irq(&phba->pport->work_port_lock);
4665         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
4666         spin_unlock_irq(&phba->pport->work_port_lock);
4667         spin_lock_irq(&phba->hbalock);
4668         phba->link_state = LPFC_LINK_UNKNOWN;
4669         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4670         spin_unlock_irq(&phba->hbalock);
4671
4672         pring = &psli->ring[psli->fcp_ring];
4673         lpfc_sli_abort_iocb_ring(phba, pring);
4674
4675         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4676                         "0345 Resetting board due to mailbox timeout\n");
4677
4678         /* Reset the HBA device */
4679         lpfc_reset_hba(phba);
4680 }
4681
4682 /**
4683  * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
4684  * @phba: Pointer to HBA context object.
4685  * @pmbox: Pointer to mailbox object.
4686  * @flag: Flag indicating how the mailbox need to be processed.
4687  *
4688  * This function is called by discovery code and HBA management code
4689  * to submit a mailbox command to firmware with SLI-3 interface spec. This
4690  * function gets the hbalock to protect the data structures.
4691  * The mailbox command can be submitted in polling mode, in which case
4692  * this function will wait in a polling loop for the completion of the
4693  * mailbox.
4694  * If the mailbox is submitted in no_wait mode (not polling) the
4695  * function will submit the command and returns immediately without waiting
4696  * for the mailbox completion. The no_wait is supported only when HBA
4697  * is in SLI2/SLI3 mode - interrupts are enabled.
4698  * The SLI interface allows only one mailbox pending at a time. If the
4699  * mailbox is issued in polling mode and there is already a mailbox
4700  * pending, then the function will return an error. If the mailbox is issued
4701  * in NO_WAIT mode and there is a mailbox pending already, the function
4702  * will return MBX_BUSY after queuing the mailbox into mailbox queue.
4703  * The sli layer owns the mailbox object until the completion of mailbox
4704  * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
4705  * return codes the caller owns the mailbox command after the return of
4706  * the function.
4707  **/
4708 static int
4709 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
4710                        uint32_t flag)
4711 {
4712         MAILBOX_t *mb;
4713         struct lpfc_sli *psli = &phba->sli;
4714         uint32_t status, evtctr;
4715         uint32_t ha_copy;
4716         int i;
4717         unsigned long timeout;
4718         unsigned long drvr_flag = 0;
4719         uint32_t word0, ldata;
4720         void __iomem *to_slim;
4721         int processing_queue = 0;
4722
4723         spin_lock_irqsave(&phba->hbalock, drvr_flag);
4724         if (!pmbox) {
4725                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4726                 /* processing mbox queue from intr_handler */
4727                 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
4728                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4729                         return MBX_SUCCESS;
4730                 }
4731                 processing_queue = 1;
4732                 pmbox = lpfc_mbox_get(phba);
4733                 if (!pmbox) {
4734                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4735                         return MBX_SUCCESS;
4736                 }
4737         }
4738
4739         if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
4740                 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
4741                 if(!pmbox->vport) {
4742                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4743                         lpfc_printf_log(phba, KERN_ERR,
4744                                         LOG_MBOX | LOG_VPORT,
4745                                         "1806 Mbox x%x failed. No vport\n",
4746                                         pmbox->u.mb.mbxCommand);
4747                         dump_stack();
4748                         goto out_not_finished;
4749                 }
4750         }
4751
4752         /* If the PCI channel is in offline state, do not post mbox. */
4753         if (unlikely(pci_channel_offline(phba->pcidev))) {
4754                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4755                 goto out_not_finished;
4756         }
4757
4758         /* If HBA has a deferred error attention, fail the iocb. */
4759         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
4760                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4761                 goto out_not_finished;
4762         }
4763
4764         psli = &phba->sli;
4765
4766         mb = &pmbox->u.mb;
4767         status = MBX_SUCCESS;
4768
4769         if (phba->link_state == LPFC_HBA_ERROR) {
4770                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4771
4772                 /* Mbox command <mbxCommand> cannot issue */
4773                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4774                                 "(%d):0311 Mailbox command x%x cannot "
4775                                 "issue Data: x%x x%x\n",
4776                                 pmbox->vport ? pmbox->vport->vpi : 0,
4777                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
4778                 goto out_not_finished;
4779         }
4780
4781         if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
4782             !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
4783                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4784                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4785                                 "(%d):2528 Mailbox command x%x cannot "
4786                                 "issue Data: x%x x%x\n",
4787                                 pmbox->vport ? pmbox->vport->vpi : 0,
4788                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
4789                 goto out_not_finished;
4790         }
4791
4792         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
4793                 /* Polling for a mbox command when another one is already active
4794                  * is not allowed in SLI. Also, the driver must have established
4795                  * SLI2 mode to queue and process multiple mbox commands.
4796                  */
4797
4798                 if (flag & MBX_POLL) {
4799                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4800
4801                         /* Mbox command <mbxCommand> cannot issue */
4802                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4803                                         "(%d):2529 Mailbox command x%x "
4804                                         "cannot issue Data: x%x x%x\n",
4805                                         pmbox->vport ? pmbox->vport->vpi : 0,
4806                                         pmbox->u.mb.mbxCommand,
4807                                         psli->sli_flag, flag);
4808                         goto out_not_finished;
4809                 }
4810
4811                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
4812                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4813                         /* Mbox command <mbxCommand> cannot issue */
4814                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4815                                         "(%d):2530 Mailbox command x%x "
4816                                         "cannot issue Data: x%x x%x\n",
4817                                         pmbox->vport ? pmbox->vport->vpi : 0,
4818                                         pmbox->u.mb.mbxCommand,
4819                                         psli->sli_flag, flag);
4820                         goto out_not_finished;
4821                 }
4822
4823                 /* Another mailbox command is still being processed, queue this
4824                  * command to be processed later.
4825                  */
4826                 lpfc_mbox_put(phba, pmbox);
4827
4828                 /* Mbox cmd issue - BUSY */
4829                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4830                                 "(%d):0308 Mbox cmd issue - BUSY Data: "
4831                                 "x%x x%x x%x x%x\n",
4832                                 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
4833                                 mb->mbxCommand, phba->pport->port_state,
4834                                 psli->sli_flag, flag);
4835
4836                 psli->slistat.mbox_busy++;
4837                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4838
4839                 if (pmbox->vport) {
4840                         lpfc_debugfs_disc_trc(pmbox->vport,
4841                                 LPFC_DISC_TRC_MBOX_VPORT,
4842                                 "MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
4843                                 (uint32_t)mb->mbxCommand,
4844                                 mb->un.varWords[0], mb->un.varWords[1]);
4845                 }
4846                 else {
4847                         lpfc_debugfs_disc_trc(phba->pport,
4848                                 LPFC_DISC_TRC_MBOX,
4849                                 "MBOX Bsy:        cmd:x%x mb:x%x x%x",
4850                                 (uint32_t)mb->mbxCommand,
4851                                 mb->un.varWords[0], mb->un.varWords[1]);
4852                 }
4853
4854                 return MBX_BUSY;
4855         }
4856
4857         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4858
4859         /* If we are not polling, we MUST be in SLI2 mode */
4860         if (flag != MBX_POLL) {
4861                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
4862                     (mb->mbxCommand != MBX_KILL_BOARD)) {
4863                         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4864                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4865                         /* Mbox command <mbxCommand> cannot issue */
4866                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4867                                         "(%d):2531 Mailbox command x%x "
4868                                         "cannot issue Data: x%x x%x\n",
4869                                         pmbox->vport ? pmbox->vport->vpi : 0,
4870                                         pmbox->u.mb.mbxCommand,
4871                                         psli->sli_flag, flag);
4872                         goto out_not_finished;
4873                 }
4874                 /* timeout active mbox command */
4875                 mod_timer(&psli->mbox_tmo, (jiffies +
4876                                (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
4877         }
4878
4879         /* Mailbox cmd <cmd> issue */
4880         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4881                         "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
4882                         "x%x\n",
4883                         pmbox->vport ? pmbox->vport->vpi : 0,
4884                         mb->mbxCommand, phba->pport->port_state,
4885                         psli->sli_flag, flag);
4886
4887         if (mb->mbxCommand != MBX_HEARTBEAT) {
4888                 if (pmbox->vport) {
4889                         lpfc_debugfs_disc_trc(pmbox->vport,
4890                                 LPFC_DISC_TRC_MBOX_VPORT,
4891                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
4892                                 (uint32_t)mb->mbxCommand,
4893                                 mb->un.varWords[0], mb->un.varWords[1]);
4894                 }
4895                 else {
4896                         lpfc_debugfs_disc_trc(phba->pport,
4897                                 LPFC_DISC_TRC_MBOX,
4898                                 "MBOX Send:       cmd:x%x mb:x%x x%x",
4899                                 (uint32_t)mb->mbxCommand,
4900                                 mb->un.varWords[0], mb->un.varWords[1]);
4901                 }
4902         }
4903
4904         psli->slistat.mbox_cmd++;
4905         evtctr = psli->slistat.mbox_event;
4906
4907         /* next set own bit for the adapter and copy over command word */
4908         mb->mbxOwner = OWN_CHIP;
4909
4910         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
4911                 /* First copy command data to host SLIM area */
4912                 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
4913         } else {
4914                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
4915                         /* copy command data into host mbox for cmpl */
4916                         lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
4917                 }
4918
4919                 /* First copy mbox command data to HBA SLIM, skip past first
4920                    word */
4921                 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4922                 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
4923                             MAILBOX_CMD_SIZE - sizeof (uint32_t));
4924
4925                 /* Next copy over first word, with mbxOwner set */
4926                 ldata = *((uint32_t *)mb);
4927                 to_slim = phba->MBslimaddr;
4928                 writel(ldata, to_slim);
4929                 readl(to_slim); /* flush */
4930
4931                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
4932                         /* switch over to host mailbox */
4933                         psli->sli_flag |= LPFC_SLI_ACTIVE;
4934                 }
4935         }
4936
4937         wmb();
4938
4939         switch (flag) {
4940         case MBX_NOWAIT:
4941                 /* Set up reference to mailbox command */
4942                 psli->mbox_active = pmbox;
4943                 /* Interrupt board to do it */
4944                 writel(CA_MBATT, phba->CAregaddr);
4945                 readl(phba->CAregaddr); /* flush */
4946                 /* Don't wait for it to finish, just return */
4947                 break;
4948
4949         case MBX_POLL:
4950                 /* Set up null reference to mailbox command */
4951                 psli->mbox_active = NULL;
4952                 /* Interrupt board to do it */
4953                 writel(CA_MBATT, phba->CAregaddr);
4954                 readl(phba->CAregaddr); /* flush */
4955
4956                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
4957                         /* First read mbox status word */
4958                         word0 = *((uint32_t *)phba->mbox);
4959                         word0 = le32_to_cpu(word0);
4960                 } else {
4961                         /* First read mbox status word */
4962                         word0 = readl(phba->MBslimaddr);
4963                 }
4964
4965                 /* Read the HBA Host Attention Register */
4966                 ha_copy = readl(phba->HAregaddr);
4967                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
4968                                                              mb->mbxCommand) *
4969                                            1000) + jiffies;
4970                 i = 0;
4971                 /* Wait for command to complete */
4972                 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
4973                        (!(ha_copy & HA_MBATT) &&
4974                         (phba->link_state > LPFC_WARM_START))) {
4975                         if (time_after(jiffies, timeout)) {
4976                                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4977                                 spin_unlock_irqrestore(&phba->hbalock,
4978                                                        drvr_flag);
4979                                 goto out_not_finished;
4980                         }
4981
4982                         /* Check if we took a mbox interrupt while we were
4983                            polling */
4984                         if (((word0 & OWN_CHIP) != OWN_CHIP)
4985                             && (evtctr != psli->slistat.mbox_event))
4986                                 break;
4987
4988                         if (i++ > 10) {
4989                                 spin_unlock_irqrestore(&phba->hbalock,
4990                                                        drvr_flag);
4991                                 msleep(1);
4992                                 spin_lock_irqsave(&phba->hbalock, drvr_flag);
4993                         }
4994
4995                         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
4996                                 /* First copy command data */
4997                                 word0 = *((uint32_t *)phba->mbox);
4998                                 word0 = le32_to_cpu(word0);
4999                                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
5000                                         MAILBOX_t *slimmb;
5001                                         uint32_t slimword0;
5002                                         /* Check real SLIM for any errors */
5003                                         slimword0 = readl(phba->MBslimaddr);
5004                                         slimmb = (MAILBOX_t *) & slimword0;
5005                                         if (((slimword0 & OWN_CHIP) != OWN_CHIP)
5006                                             && slimmb->mbxStatus) {
5007                                                 psli->sli_flag &=
5008                                                     ~LPFC_SLI_ACTIVE;
5009                                                 word0 = slimword0;
5010                                         }
5011                                 }
5012                         } else {
5013                                 /* First copy command data */
5014                                 word0 = readl(phba->MBslimaddr);
5015                         }
5016                         /* Read the HBA Host Attention Register */
5017                         ha_copy = readl(phba->HAregaddr);
5018                 }
5019
5020                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
5021                         /* copy results back to user */
5022                         lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
5023                 } else {
5024                         /* First copy command data */
5025                         lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
5026                                                         MAILBOX_CMD_SIZE);
5027                         if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
5028                                 pmbox->context2) {
5029                                 lpfc_memcpy_from_slim((void *)pmbox->context2,
5030                                       phba->MBslimaddr + DMP_RSP_OFFSET,
5031                                                       mb->un.varDmp.word_cnt);
5032                         }
5033                 }
5034
5035                 writel(HA_MBATT, phba->HAregaddr);
5036                 readl(phba->HAregaddr); /* flush */
5037
5038                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5039                 status = mb->mbxStatus;
5040         }
5041
5042         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
5043         return status;
5044
5045 out_not_finished:
5046         if (processing_queue) {
5047                 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
5048                 lpfc_mbox_cmpl_put(phba, pmbox);
5049         }
5050         return MBX_NOT_FINISHED;
5051 }
5052
5053 /**
5054  * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
5055  * @phba: Pointer to HBA context object.
5056  *
5057  * The function blocks the posting of SLI4 asynchronous mailbox commands from
5058  * the driver internal pending mailbox queue. It will then try to wait out the
5059  * possible outstanding mailbox command before return.
5060  *
5061  * Returns:
5062  *      0 - the outstanding mailbox command completed; otherwise, the wait for
5063  *      the outstanding mailbox command timed out.
5064  **/
5065 static int
5066 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
5067 {
5068         struct lpfc_sli *psli = &phba->sli;
5069         uint8_t actcmd = MBX_HEARTBEAT;
5070         int rc = 0;
5071         unsigned long timeout;
5072
5073         /* Mark the asynchronous mailbox command posting as blocked */
5074         spin_lock_irq(&phba->hbalock);
5075         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
5076         if (phba->sli.mbox_active)
5077                 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
5078         spin_unlock_irq(&phba->hbalock);
5079         /* Determine how long we might wait for the active mailbox
5080          * command to be gracefully completed by firmware.
5081          */
5082         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
5083                                    jiffies;
5084         /* Wait for the outstnading mailbox command to complete */
5085         while (phba->sli.mbox_active) {
5086                 /* Check active mailbox complete status every 2ms */
5087                 msleep(2);
5088                 if (time_after(jiffies, timeout)) {
5089                         /* Timeout, marked the outstanding cmd not complete */
5090                         rc = 1;
5091                         break;
5092                 }
5093         }
5094
5095         /* Can not cleanly block async mailbox command, fails it */
5096         if (rc) {
5097                 spin_lock_irq(&phba->hbalock);
5098                 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5099                 spin_unlock_irq(&phba->hbalock);
5100         }
5101         return rc;
5102 }
5103
5104 /**
5105  * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
5106  * @phba: Pointer to HBA context object.
5107  *
5108  * The function unblocks and resume posting of SLI4 asynchronous mailbox
5109  * commands from the driver internal pending mailbox queue. It makes sure
5110  * that there is no outstanding mailbox command before resuming posting
5111  * asynchronous mailbox commands. If, for any reason, there is outstanding
5112  * mailbox command, it will try to wait it out before resuming asynchronous
5113  * mailbox command posting.
5114  **/
5115 static void
5116 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
5117 {
5118         struct lpfc_sli *psli = &phba->sli;
5119
5120         spin_lock_irq(&phba->hbalock);
5121         if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
5122                 /* Asynchronous mailbox posting is not blocked, do nothing */
5123                 spin_unlock_irq(&phba->hbalock);
5124                 return;
5125         }
5126
5127         /* Outstanding synchronous mailbox command is guaranteed to be done,
5128          * successful or timeout, after timing-out the outstanding mailbox
5129          * command shall always be removed, so just unblock posting async
5130          * mailbox command and resume
5131          */
5132         psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5133         spin_unlock_irq(&phba->hbalock);
5134
5135         /* wake up worker thread to post asynchronlous mailbox command */
5136         lpfc_worker_wake_up(phba);
5137 }
5138
5139 /**
5140  * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
5141  * @phba: Pointer to HBA context object.
5142  * @mboxq: Pointer to mailbox object.
5143  *
5144  * The function posts a mailbox to the port.  The mailbox is expected
5145  * to be comletely filled in and ready for the port to operate on it.
5146  * This routine executes a synchronous completion operation on the
5147  * mailbox by polling for its completion.
5148  *
5149  * The caller must not be holding any locks when calling this routine.
5150  *
5151  * Returns:
5152  *      MBX_SUCCESS - mailbox posted successfully
5153  *      Any of the MBX error values.
5154  **/
5155 static int
5156 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
5157 {
5158         int rc = MBX_SUCCESS;
5159         unsigned long iflag;
5160         uint32_t db_ready;
5161         uint32_t mcqe_status;
5162         uint32_t mbx_cmnd;
5163         unsigned long timeout;
5164         struct lpfc_sli *psli = &phba->sli;
5165         struct lpfc_mqe *mb = &mboxq->u.mqe;
5166         struct lpfc_bmbx_create *mbox_rgn;
5167         struct dma_address *dma_address;
5168         struct lpfc_register bmbx_reg;
5169
5170         /*
5171          * Only one mailbox can be active to the bootstrap mailbox region
5172          * at a time and there is no queueing provided.
5173          */
5174         spin_lock_irqsave(&phba->hbalock, iflag);
5175         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
5176                 spin_unlock_irqrestore(&phba->hbalock, iflag);
5177                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5178                                 "(%d):2532 Mailbox command x%x (x%x) "
5179                                 "cannot issue Data: x%x x%x\n",
5180                                 mboxq->vport ? mboxq->vport->vpi : 0,
5181                                 mboxq->u.mb.mbxCommand,
5182                                 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5183                                 psli->sli_flag, MBX_POLL);
5184                 return MBXERR_ERROR;
5185         }
5186         /* The server grabs the token and owns it until release */
5187         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5188         phba->sli.mbox_active = mboxq;
5189         spin_unlock_irqrestore(&phba->hbalock, iflag);
5190
5191         /*
5192          * Initialize the bootstrap memory region to avoid stale data areas
5193          * in the mailbox post.  Then copy the caller's mailbox contents to
5194          * the bmbx mailbox region.
5195          */
5196         mbx_cmnd = bf_get(lpfc_mqe_command, mb);
5197         memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
5198         lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
5199                               sizeof(struct lpfc_mqe));
5200
5201         /* Post the high mailbox dma address to the port and wait for ready. */
5202         dma_address = &phba->sli4_hba.bmbx.dma_address;
5203         writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
5204
5205         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
5206                                    * 1000) + jiffies;
5207         do {
5208                 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
5209                 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
5210                 if (!db_ready)
5211                         msleep(2);
5212
5213                 if (time_after(jiffies, timeout)) {
5214                         rc = MBXERR_ERROR;
5215                         goto exit;
5216                 }
5217         } while (!db_ready);
5218
5219         /* Post the low mailbox dma address to the port. */
5220         writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
5221         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
5222                                    * 1000) + jiffies;
5223         do {
5224                 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
5225                 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
5226                 if (!db_ready)
5227                         msleep(2);
5228
5229                 if (time_after(jiffies, timeout)) {
5230                         rc = MBXERR_ERROR;
5231                         goto exit;
5232                 }
5233         } while (!db_ready);
5234
5235         /*
5236          * Read the CQ to ensure the mailbox has completed.
5237          * If so, update the mailbox status so that the upper layers
5238          * can complete the request normally.
5239          */
5240         lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
5241                               sizeof(struct lpfc_mqe));
5242         mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
5243         lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
5244                               sizeof(struct lpfc_mcqe));
5245         mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
5246
5247         /* Prefix the mailbox status with range x4000 to note SLI4 status. */
5248         if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
5249                 bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
5250                 rc = MBXERR_ERROR;
5251         }
5252
5253         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5254                         "(%d):0356 Mailbox cmd x%x (x%x) Status x%x "
5255                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
5256                         " x%x x%x CQ: x%x x%x x%x x%x\n",
5257                         mboxq->vport ? mboxq->vport->vpi : 0,
5258                         mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq),
5259                         bf_get(lpfc_mqe_status, mb),
5260                         mb->un.mb_words[0], mb->un.mb_words[1],
5261                         mb->un.mb_words[2], mb->un.mb_words[3],
5262                         mb->un.mb_words[4], mb->un.mb_words[5],
5263                         mb->un.mb_words[6], mb->un.mb_words[7],
5264                         mb->un.mb_words[8], mb->un.mb_words[9],
5265                         mb->un.mb_words[10], mb->un.mb_words[11],
5266                         mb->un.mb_words[12], mboxq->mcqe.word0,
5267                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
5268                         mboxq->mcqe.trailer);
5269 exit:
5270         /* We are holding the token, no needed for lock when release */
5271         spin_lock_irqsave(&phba->hbalock, iflag);
5272         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5273         phba->sli.mbox_active = NULL;
5274         spin_unlock_irqrestore(&phba->hbalock, iflag);
5275         return rc;
5276 }
5277
5278 /**
5279  * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
5280  * @phba: Pointer to HBA context object.
5281  * @pmbox: Pointer to mailbox object.
5282  * @flag: Flag indicating how the mailbox need to be processed.
5283  *
5284  * This function is called by discovery code and HBA management code to submit
5285  * a mailbox command to firmware with SLI-4 interface spec.
5286  *
5287  * Return codes the caller owns the mailbox command after the return of the
5288  * function.
5289  **/
5290 static int
5291 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5292                        uint32_t flag)
5293 {
5294         struct lpfc_sli *psli = &phba->sli;
5295         unsigned long iflags;
5296         int rc;
5297
5298         rc = lpfc_mbox_dev_check(phba);
5299         if (unlikely(rc)) {
5300                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5301                                 "(%d):2544 Mailbox command x%x (x%x) "
5302                                 "cannot issue Data: x%x x%x\n",
5303                                 mboxq->vport ? mboxq->vport->vpi : 0,
5304                                 mboxq->u.mb.mbxCommand,
5305                                 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5306                                 psli->sli_flag, flag);
5307                 goto out_not_finished;
5308         }
5309
5310         /* Detect polling mode and jump to a handler */
5311         if (!phba->sli4_hba.intr_enable) {
5312                 if (flag == MBX_POLL)
5313                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
5314                 else
5315                         rc = -EIO;
5316                 if (rc != MBX_SUCCESS)
5317                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5318                                         "(%d):2541 Mailbox command x%x "
5319                                         "(x%x) cannot issue Data: x%x x%x\n",
5320                                         mboxq->vport ? mboxq->vport->vpi : 0,
5321                                         mboxq->u.mb.mbxCommand,
5322                                         lpfc_sli4_mbox_opcode_get(phba, mboxq),
5323                                         psli->sli_flag, flag);
5324                 return rc;
5325         } else if (flag == MBX_POLL) {
5326                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
5327                                 "(%d):2542 Try to issue mailbox command "
5328                                 "x%x (x%x) synchronously ahead of async"
5329                                 "mailbox command queue: x%x x%x\n",
5330                                 mboxq->vport ? mboxq->vport->vpi : 0,
5331                                 mboxq->u.mb.mbxCommand,
5332                                 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5333                                 psli->sli_flag, flag);
5334                 /* Try to block the asynchronous mailbox posting */
5335                 rc = lpfc_sli4_async_mbox_block(phba);
5336                 if (!rc) {
5337                         /* Successfully blocked, now issue sync mbox cmd */
5338                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
5339                         if (rc != MBX_SUCCESS)
5340                                 lpfc_printf_log(phba, KERN_ERR,
5341                                                 LOG_MBOX | LOG_SLI,
5342                                                 "(%d):2597 Mailbox command "
5343                                                 "x%x (x%x) cannot issue "
5344                                                 "Data: x%x x%x\n",
5345                                                 mboxq->vport ?
5346                                                 mboxq->vport->vpi : 0,
5347                                                 mboxq->u.mb.mbxCommand,
5348                                                 lpfc_sli4_mbox_opcode_get(phba,
5349                                                                 mboxq),
5350                                                 psli->sli_flag, flag);
5351                         /* Unblock the async mailbox posting afterward */
5352                         lpfc_sli4_async_mbox_unblock(phba);
5353                 }
5354                 return rc;
5355         }
5356
5357         /* Now, interrupt mode asynchrous mailbox command */
5358         rc = lpfc_mbox_cmd_check(phba, mboxq);
5359         if (rc) {
5360                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5361                                 "(%d):2543 Mailbox command x%x (x%x) "
5362                                 "cannot issue Data: x%x x%x\n",
5363                                 mboxq->vport ? mboxq->vport->vpi : 0,
5364                                 mboxq->u.mb.mbxCommand,
5365                                 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5366                                 psli->sli_flag, flag);
5367                 goto out_not_finished;
5368         }
5369
5370         /* Put the mailbox command to the driver internal FIFO */
5371         psli->slistat.mbox_busy++;
5372         spin_lock_irqsave(&phba->hbalock, iflags);
5373         lpfc_mbox_put(phba, mboxq);
5374         spin_unlock_irqrestore(&phba->hbalock, iflags);
5375         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5376                         "(%d):0354 Mbox cmd issue - Enqueue Data: "
5377                         "x%x (x%x) x%x x%x x%x\n",
5378                         mboxq->vport ? mboxq->vport->vpi : 0xffffff,
5379                         bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5380                         lpfc_sli4_mbox_opcode_get(phba, mboxq),
5381                         phba->pport->port_state,
5382                         psli->sli_flag, MBX_NOWAIT);
5383         /* Wake up worker thread to transport mailbox command from head */
5384         lpfc_worker_wake_up(phba);
5385
5386         return MBX_BUSY;
5387
5388 out_not_finished:
5389         return MBX_NOT_FINISHED;
5390 }
5391
5392 /**
5393  * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
5394  * @phba: Pointer to HBA context object.
5395  *
5396  * This function is called by worker thread to send a mailbox command to
5397  * SLI4 HBA firmware.
5398  *
5399  **/
5400 int
5401 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
5402 {
5403         struct lpfc_sli *psli = &phba->sli;
5404         LPFC_MBOXQ_t *mboxq;
5405         int rc = MBX_SUCCESS;
5406         unsigned long iflags;
5407         struct lpfc_mqe *mqe;
5408         uint32_t mbx_cmnd;
5409
5410         /* Check interrupt mode before post async mailbox command */
5411         if (unlikely(!phba->sli4_hba.intr_enable))
5412                 return MBX_NOT_FINISHED;
5413
5414         /* Check for mailbox command service token */
5415         spin_lock_irqsave(&phba->hbalock, iflags);
5416         if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
5417                 spin_unlock_irqrestore(&phba->hbalock, iflags);
5418                 return MBX_NOT_FINISHED;
5419         }
5420         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
5421                 spin_unlock_irqrestore(&phba->hbalock, iflags);
5422                 return MBX_NOT_FINISHED;
5423         }
5424         if (unlikely(phba->sli.mbox_active)) {
5425                 spin_unlock_irqrestore(&phba->hbalock, iflags);
5426                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5427                                 "0384 There is pending active mailbox cmd\n");
5428                 return MBX_NOT_FINISHED;
5429         }
5430         /* Take the mailbox command service token */
5431         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5432
5433         /* Get the next mailbox command from head of queue */
5434         mboxq = lpfc_mbox_get(phba);
5435
5436         /* If no more mailbox command waiting for post, we're done */
5437         if (!mboxq) {
5438                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5439                 spin_unlock_irqrestore(&phba->hbalock, iflags);
5440                 return MBX_SUCCESS;
5441         }
5442         phba->sli.mbox_active = mboxq;
5443         spin_unlock_irqrestore(&phba->hbalock, iflags);
5444
5445         /* Check device readiness for posting mailbox command */
5446         rc = lpfc_mbox_dev_check(phba);
5447         if (unlikely(rc))
5448                 /* Driver clean routine will clean up pending mailbox */
5449                 goto out_not_finished;
5450
5451         /* Prepare the mbox command to be posted */
5452         mqe = &mboxq->u.mqe;
5453         mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
5454
5455         /* Start timer for the mbox_tmo and log some mailbox post messages */
5456         mod_timer(&psli->mbox_tmo, (jiffies +
5457                   (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd))));
5458
5459         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5460                         "(%d):0355 Mailbox cmd x%x (x%x) issue Data: "
5461                         "x%x x%x\n",
5462                         mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
5463                         lpfc_sli4_mbox_opcode_get(phba, mboxq),
5464                         phba->pport->port_state, psli->sli_flag);
5465
5466         if (mbx_cmnd != MBX_HEARTBEAT) {
5467                 if (mboxq->vport) {
5468                         lpfc_debugfs_disc_trc(mboxq->vport,
5469                                 LPFC_DISC_TRC_MBOX_VPORT,
5470                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
5471                                 mbx_cmnd, mqe->un.mb_words[0],
5472                                 mqe->un.mb_words[1]);
5473                 } else {
5474                         lpfc_debugfs_disc_trc(phba->pport,
5475                                 LPFC_DISC_TRC_MBOX,
5476                                 "MBOX Send: cmd:x%x mb:x%x x%x",
5477                                 mbx_cmnd, mqe->un.mb_words[0],
5478                                 mqe->un.mb_words[1]);
5479                 }
5480         }
5481         psli->slistat.mbox_cmd++;
5482
5483         /* Post the mailbox command to the port */
5484         rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
5485         if (rc != MBX_SUCCESS) {
5486                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5487                                 "(%d):2533 Mailbox command x%x (x%x) "
5488                                 "cannot issue Data: x%x x%x\n",
5489                                 mboxq->vport ? mboxq->vport->vpi : 0,
5490                                 mboxq->u.mb.mbxCommand,
5491                                 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5492                                 psli->sli_flag, MBX_NOWAIT);
5493                 goto out_not_finished;
5494         }
5495
5496         return rc;
5497
5498 out_not_finished:
5499         spin_lock_irqsave(&phba->hbalock, iflags);
5500         mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
5501         __lpfc_mbox_cmpl_put(phba, mboxq);
5502         /* Release the token */
5503         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5504         phba->sli.mbox_active = NULL;
5505         spin_unlock_irqrestore(&phba->hbalock, iflags);
5506
5507         return MBX_NOT_FINISHED;
5508 }
5509
5510 /**
5511  * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
5512  * @phba: Pointer to HBA context object.
5513  * @pmbox: Pointer to mailbox object.
5514  * @flag: Flag indicating how the mailbox need to be processed.
5515  *
5516  * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
5517  * the API jump table function pointer from the lpfc_hba struct.
5518  *
5519  * Return codes the caller owns the mailbox command after the return of the
5520  * function.
5521  **/
5522 int
5523 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
5524 {
5525         return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
5526 }
5527
5528 /**
5529  * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table
5530  * @phba: The hba struct for which this call is being executed.
5531  * @dev_grp: The HBA PCI-Device group number.
5532  *
5533  * This routine sets up the mbox interface API function jump table in @phba
5534  * struct.
5535  * Returns: 0 - success, -ENODEV - failure.
5536  **/
5537 int
5538 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5539 {
5540
5541         switch (dev_grp) {
5542         case LPFC_PCI_DEV_LP:
5543                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
5544                 phba->lpfc_sli_handle_slow_ring_event =
5545                                 lpfc_sli_handle_slow_ring_event_s3;
5546                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
5547                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
5548                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
5549                 break;
5550         case LPFC_PCI_DEV_OC:
5551                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
5552                 phba->lpfc_sli_handle_slow_ring_event =
5553                                 lpfc_sli_handle_slow_ring_event_s4;
5554                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
5555                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
5556                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
5557                 break;
5558         default:
5559                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5560                                 "1420 Invalid HBA PCI-device group: 0x%x\n",
5561                                 dev_grp);
5562                 return -ENODEV;
5563                 break;
5564         }
5565         return 0;
5566 }
5567
5568 /**
5569  * __lpfc_sli_ringtx_put - Add an iocb to the txq
5570  * @phba: Pointer to HBA context object.
5571  * @pring: Pointer to driver SLI ring object.
5572  * @piocb: Pointer to address of newly added command iocb.
5573  *
5574  * This function is called with hbalock held to add a command
5575  * iocb to the txq when SLI layer cannot submit the command iocb
5576  * to the ring.
5577  **/
5578 static void
5579 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5580                     struct lpfc_iocbq *piocb)
5581 {
5582         /* Insert the caller's iocb in the txq tail for later processing. */
5583         list_add_tail(&piocb->list, &pring->txq);
5584         pring->txq_cnt++;
5585 }
5586
5587 /**
5588  * lpfc_sli_next_iocb - Get the next iocb in the txq
5589  * @phba: Pointer to HBA context object.
5590  * @pring: Pointer to driver SLI ring object.
5591  * @piocb: Pointer to address of newly added command iocb.
5592  *
5593  * This function is called with hbalock held before a new
5594  * iocb is submitted to the firmware. This function checks
5595  * txq to flush the iocbs in txq to Firmware before
5596  * submitting new iocbs to the Firmware.
5597  * If there are iocbs in the txq which need to be submitted
5598  * to firmware, lpfc_sli_next_iocb returns the first element
5599  * of the txq after dequeuing it from txq.
5600  * If there is no iocb in the txq then the function will return
5601  * *piocb and *piocb is set to NULL. Caller needs to check
5602  * *piocb to find if there are more commands in the txq.
5603  **/
5604 static struct lpfc_iocbq *
5605 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5606                    struct lpfc_iocbq **piocb)
5607 {
5608         struct lpfc_iocbq * nextiocb;
5609
5610         nextiocb = lpfc_sli_ringtx_get(phba, pring);
5611         if (!nextiocb) {
5612                 nextiocb = *piocb;
5613                 *piocb = NULL;
5614         }
5615
5616         return nextiocb;
5617 }
5618
5619 /**
5620  * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
5621  * @phba: Pointer to HBA context object.
5622  * @ring_number: SLI ring number to issue iocb on.
5623  * @piocb: Pointer to command iocb.
5624  * @flag: Flag indicating if this command can be put into txq.
5625  *
5626  * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
5627  * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is