1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_cmnd.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_transport_fc.h>
32 #include <scsi/fc/fc_fs.h>
33 #include <linux/aer.h>
38 #include "lpfc_sli4.h"
40 #include "lpfc_disc.h"
41 #include "lpfc_scsi.h"
43 #include "lpfc_crtn.h"
44 #include "lpfc_logmsg.h"
45 #include "lpfc_compat.h"
46 #include "lpfc_debugfs.h"
47 #include "lpfc_vport.h"
49 /* There are only four IOCB completion types. */
50 typedef enum _lpfc_iocb_type {
58 /* Provide function prototypes local to this module. */
59 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
61 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
62 uint8_t *, uint32_t *);
63 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
65 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
68 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
74 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
75 * @q: The Work Queue to operate on.
76 * @wqe: The work Queue Entry to put on the Work queue.
78 * This routine will copy the contents of @wqe to the next available entry on
79 * the @q. This function will then ring the Work Queue Doorbell to signal the
80 * HBA to start processing the Work Queue Entry. This function returns 0 if
81 * successful. If no entries are available on @q then this function will return
83 * The caller is expected to hold the hbalock when calling this routine.
86 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
88 union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
89 struct lpfc_register doorbell;
92 /* If the host has not yet processed the next entry then we are done */
93 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
95 /* set consumption flag every once in a while */
96 if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
97 bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1);
99 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
101 /* Update the host index before invoking device */
102 host_index = q->host_index;
103 q->host_index = ((q->host_index + 1) % q->entry_count);
107 bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
108 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
109 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
110 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
111 readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
117 * lpfc_sli4_wq_release - Updates internal hba index for WQ
118 * @q: The Work Queue to operate on.
119 * @index: The index to advance the hba index to.
121 * This routine will update the HBA index of a queue to reflect consumption of
122 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
123 * an entry the host calls this function to update the queue's internal
124 * pointers. This routine returns the number of entries that were consumed by
128 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
130 uint32_t released = 0;
132 if (q->hba_index == index)
135 q->hba_index = ((q->hba_index + 1) % q->entry_count);
137 } while (q->hba_index != index);
142 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
143 * @q: The Mailbox Queue to operate on.
144 * @wqe: The Mailbox Queue Entry to put on the Work queue.
146 * This routine will copy the contents of @mqe to the next available entry on
147 * the @q. This function will then ring the Work Queue Doorbell to signal the
148 * HBA to start processing the Work Queue Entry. This function returns 0 if
149 * successful. If no entries are available on @q then this function will return
151 * The caller is expected to hold the hbalock when calling this routine.
154 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
156 struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
157 struct lpfc_register doorbell;
160 /* If the host has not yet processed the next entry then we are done */
161 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
163 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
164 /* Save off the mailbox pointer for completion */
165 q->phba->mbox = (MAILBOX_t *)temp_mqe;
167 /* Update the host index before invoking device */
168 host_index = q->host_index;
169 q->host_index = ((q->host_index + 1) % q->entry_count);
173 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
174 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
175 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
176 readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
181 * lpfc_sli4_mq_release - Updates internal hba index for MQ
182 * @q: The Mailbox Queue to operate on.
184 * This routine will update the HBA index of a queue to reflect consumption of
185 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
186 * an entry the host calls this function to update the queue's internal
187 * pointers. This routine returns the number of entries that were consumed by
191 lpfc_sli4_mq_release(struct lpfc_queue *q)
193 /* Clear the mailbox pointer for completion */
194 q->phba->mbox = NULL;
195 q->hba_index = ((q->hba_index + 1) % q->entry_count);
200 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
201 * @q: The Event Queue to get the first valid EQE from
203 * This routine will get the first valid Event Queue Entry from @q, update
204 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
205 * the Queue (no more work to do), or the Queue is full of EQEs that have been
206 * processed, but not popped back to the HBA then this routine will return NULL.
208 static struct lpfc_eqe *
209 lpfc_sli4_eq_get(struct lpfc_queue *q)
211 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
213 /* If the next EQE is not valid then we are done */
214 if (!bf_get(lpfc_eqe_valid, eqe))
216 /* If the host has not yet processed the next entry then we are done */
217 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
220 q->hba_index = ((q->hba_index + 1) % q->entry_count);
225 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
226 * @q: The Event Queue that the host has completed processing for.
227 * @arm: Indicates whether the host wants to arms this CQ.
229 * This routine will mark all Event Queue Entries on @q, from the last
230 * known completed entry to the last entry that was processed, as completed
231 * by clearing the valid bit for each completion queue entry. Then it will
232 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
233 * The internal host index in the @q will be updated by this routine to indicate
234 * that the host has finished processing the entries. The @arm parameter
235 * indicates that the queue should be rearmed when ringing the doorbell.
237 * This function will return the number of EQEs that were popped.
240 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
242 uint32_t released = 0;
243 struct lpfc_eqe *temp_eqe;
244 struct lpfc_register doorbell;
246 /* while there are valid entries */
247 while (q->hba_index != q->host_index) {
248 temp_eqe = q->qe[q->host_index].eqe;
249 bf_set(lpfc_eqe_valid, temp_eqe, 0);
251 q->host_index = ((q->host_index + 1) % q->entry_count);
253 if (unlikely(released == 0 && !arm))
256 /* ring doorbell for number popped */
259 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
260 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
262 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
263 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
264 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
265 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
266 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
267 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
268 readl(q->phba->sli4_hba.EQCQDBregaddr);
273 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
274 * @q: The Completion Queue to get the first valid CQE from
276 * This routine will get the first valid Completion Queue Entry from @q, update
277 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
278 * the Queue (no more work to do), or the Queue is full of CQEs that have been
279 * processed, but not popped back to the HBA then this routine will return NULL.
281 static struct lpfc_cqe *
282 lpfc_sli4_cq_get(struct lpfc_queue *q)
284 struct lpfc_cqe *cqe;
286 /* If the next CQE is not valid then we are done */
287 if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
289 /* If the host has not yet processed the next entry then we are done */
290 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
293 cqe = q->qe[q->hba_index].cqe;
294 q->hba_index = ((q->hba_index + 1) % q->entry_count);
299 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
300 * @q: The Completion Queue that the host has completed processing for.
301 * @arm: Indicates whether the host wants to arms this CQ.
303 * This routine will mark all Completion queue entries on @q, from the last
304 * known completed entry to the last entry that was processed, as completed
305 * by clearing the valid bit for each completion queue entry. Then it will
306 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
307 * The internal host index in the @q will be updated by this routine to indicate
308 * that the host has finished processing the entries. The @arm parameter
309 * indicates that the queue should be rearmed when ringing the doorbell.
311 * This function will return the number of CQEs that were released.
314 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
316 uint32_t released = 0;
317 struct lpfc_cqe *temp_qe;
318 struct lpfc_register doorbell;
320 /* while there are valid entries */
321 while (q->hba_index != q->host_index) {
322 temp_qe = q->qe[q->host_index].cqe;
323 bf_set(lpfc_cqe_valid, temp_qe, 0);
325 q->host_index = ((q->host_index + 1) % q->entry_count);
327 if (unlikely(released == 0 && !arm))
330 /* ring doorbell for number popped */
333 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
334 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
335 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
336 bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
337 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
342 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
343 * @q: The Header Receive Queue to operate on.
344 * @wqe: The Receive Queue Entry to put on the Receive queue.
346 * This routine will copy the contents of @wqe to the next available entry on
347 * the @q. This function will then ring the Receive Queue Doorbell to signal the
348 * HBA to start processing the Receive Queue Entry. This function returns the
349 * index that the rqe was copied to if successful. If no entries are available
350 * on @q then this function will return -ENOMEM.
351 * The caller is expected to hold the hbalock when calling this routine.
354 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
355 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
357 struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
358 struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
359 struct lpfc_register doorbell;
360 int put_index = hq->host_index;
362 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
364 if (hq->host_index != dq->host_index)
366 /* If the host has not yet processed the next entry then we are done */
367 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
369 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
370 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
372 /* Update the host index to point to the next slot */
373 hq->host_index = ((hq->host_index + 1) % hq->entry_count);
374 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
376 /* Ring The Header Receive Queue Doorbell */
377 if (!(hq->host_index % LPFC_RQ_POST_BATCH)) {
379 bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
381 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
382 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
388 * lpfc_sli4_rq_release - Updates internal hba index for RQ
389 * @q: The Header Receive Queue to operate on.
391 * This routine will update the HBA index of a queue to reflect consumption of
392 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
393 * consumed an entry the host calls this function to update the queue's
394 * internal pointers. This routine returns the number of entries that were
395 * consumed by the HBA.
398 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
400 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
402 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
403 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
408 * lpfc_cmd_iocb - Get next command iocb entry in the ring
409 * @phba: Pointer to HBA context object.
410 * @pring: Pointer to driver SLI ring object.
412 * This function returns pointer to next command iocb entry
413 * in the command ring. The caller must hold hbalock to prevent
414 * other threads consume the next command iocb.
415 * SLI-2/SLI-3 provide different sized iocbs.
417 static inline IOCB_t *
418 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
420 return (IOCB_t *) (((char *) pring->cmdringaddr) +
421 pring->cmdidx * phba->iocb_cmd_size);
425 * lpfc_resp_iocb - Get next response iocb entry in the ring
426 * @phba: Pointer to HBA context object.
427 * @pring: Pointer to driver SLI ring object.
429 * This function returns pointer to next response iocb entry
430 * in the response ring. The caller must hold hbalock to make sure
431 * that no other thread consume the next response iocb.
432 * SLI-2/SLI-3 provide different sized iocbs.
434 static inline IOCB_t *
435 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
437 return (IOCB_t *) (((char *) pring->rspringaddr) +
438 pring->rspidx * phba->iocb_rsp_size);
442 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
443 * @phba: Pointer to HBA context object.
445 * This function is called with hbalock held. This function
446 * allocates a new driver iocb object from the iocb pool. If the
447 * allocation is successful, it returns pointer to the newly
448 * allocated iocb object else it returns NULL.
450 static struct lpfc_iocbq *
451 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
453 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
454 struct lpfc_iocbq * iocbq = NULL;
456 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
461 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
462 * @phba: Pointer to HBA context object.
463 * @xritag: XRI value.
465 * This function clears the sglq pointer from the array of acive
466 * sglq's. The xritag that is passed in is used to index into the
467 * array. Before the xritag can be used it needs to be adjusted
468 * by subtracting the xribase.
470 * Returns sglq ponter = success, NULL = Failure.
472 static struct lpfc_sglq *
473 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
476 struct lpfc_sglq *sglq;
477 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
478 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
480 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
481 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
486 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
487 * @phba: Pointer to HBA context object.
488 * @xritag: XRI value.
490 * This function returns the sglq pointer from the array of acive
491 * sglq's. The xritag that is passed in is used to index into the
492 * array. Before the xritag can be used it needs to be adjusted
493 * by subtracting the xribase.
495 * Returns sglq ponter = success, NULL = Failure.
497 static struct lpfc_sglq *
498 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
501 struct lpfc_sglq *sglq;
502 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
503 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
505 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
510 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
511 * @phba: Pointer to HBA context object.
513 * This function is called with hbalock held. This function
514 * Gets a new driver sglq object from the sglq list. If the
515 * list is not empty then it is successful, it returns pointer to the newly
516 * allocated sglq object else it returns NULL.
518 static struct lpfc_sglq *
519 __lpfc_sli_get_sglq(struct lpfc_hba *phba)
521 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
522 struct lpfc_sglq *sglq = NULL;
524 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
527 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
528 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
533 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
534 * @phba: Pointer to HBA context object.
536 * This function is called with no lock held. This function
537 * allocates a new driver iocb object from the iocb pool. If the
538 * allocation is successful, it returns pointer to the newly
539 * allocated iocb object else it returns NULL.
542 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
544 struct lpfc_iocbq * iocbq = NULL;
545 unsigned long iflags;
547 spin_lock_irqsave(&phba->hbalock, iflags);
548 iocbq = __lpfc_sli_get_iocbq(phba);
549 spin_unlock_irqrestore(&phba->hbalock, iflags);
554 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
555 * @phba: Pointer to HBA context object.
556 * @iocbq: Pointer to driver iocb object.
558 * This function is called with hbalock held to release driver
559 * iocb object to the iocb pool. The iotag in the iocb object
560 * does not change for each use of the iocb object. This function
561 * clears all other fields of the iocb object when it is freed.
562 * The sqlq structure that holds the xritag and phys and virtual
563 * mappings for the scatter gather list is retrieved from the
564 * active array of sglq. The get of the sglq pointer also clears
565 * the entry in the array. If the status of the IO indiactes that
566 * this IO was aborted then the sglq entry it put on the
567 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
568 * IO has good status or fails for any other reason then the sglq
569 * entry is added to the free list (lpfc_sgl_list).
572 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
574 struct lpfc_sglq *sglq;
575 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
578 if (iocbq->sli4_xritag == NO_XRI)
581 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
583 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED
584 && ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
585 && (iocbq->iocb.un.ulpWord[4]
586 == IOERR_ABORT_REQUESTED))) {
587 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
589 list_add(&sglq->list,
590 &phba->sli4_hba.lpfc_abts_els_sgl_list);
591 spin_unlock_irqrestore(
592 &phba->sli4_hba.abts_sgl_list_lock, iflag);
594 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
599 * Clean all volatile data fields, preserve iotag and node struct.
601 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
602 iocbq->sli4_xritag = NO_XRI;
603 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
607 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
608 * @phba: Pointer to HBA context object.
609 * @iocbq: Pointer to driver iocb object.
611 * This function is called with hbalock held to release driver
612 * iocb object to the iocb pool. The iotag in the iocb object
613 * does not change for each use of the iocb object. This function
614 * clears all other fields of the iocb object when it is freed.
617 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
619 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
622 * Clean all volatile data fields, preserve iotag and node struct.
624 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
625 iocbq->sli4_xritag = NO_XRI;
626 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
630 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
631 * @phba: Pointer to HBA context object.
632 * @iocbq: Pointer to driver iocb object.
634 * This function is called with hbalock held to release driver
635 * iocb object to the iocb pool. The iotag in the iocb object
636 * does not change for each use of the iocb object. This function
637 * clears all other fields of the iocb object when it is freed.
640 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
642 phba->__lpfc_sli_release_iocbq(phba, iocbq);
646 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
647 * @phba: Pointer to HBA context object.
648 * @iocbq: Pointer to driver iocb object.
650 * This function is called with no lock held to release the iocb to
654 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
656 unsigned long iflags;
659 * Clean all volatile data fields, preserve iotag and node struct.
661 spin_lock_irqsave(&phba->hbalock, iflags);
662 __lpfc_sli_release_iocbq(phba, iocbq);
663 spin_unlock_irqrestore(&phba->hbalock, iflags);
667 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
668 * @phba: Pointer to HBA context object.
669 * @iocblist: List of IOCBs.
670 * @ulpstatus: ULP status in IOCB command field.
671 * @ulpWord4: ULP word-4 in IOCB command field.
673 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
674 * on the list by invoking the complete callback function associated with the
675 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
679 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
680 uint32_t ulpstatus, uint32_t ulpWord4)
682 struct lpfc_iocbq *piocb;
684 while (!list_empty(iocblist)) {
685 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
687 if (!piocb->iocb_cmpl)
688 lpfc_sli_release_iocbq(phba, piocb);
690 piocb->iocb.ulpStatus = ulpstatus;
691 piocb->iocb.un.ulpWord[4] = ulpWord4;
692 (piocb->iocb_cmpl) (phba, piocb, piocb);
699 * lpfc_sli_iocb_cmd_type - Get the iocb type
700 * @iocb_cmnd: iocb command code.
702 * This function is called by ring event handler function to get the iocb type.
703 * This function translates the iocb command to an iocb command type used to
704 * decide the final disposition of each completed IOCB.
705 * The function returns
706 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
707 * LPFC_SOL_IOCB if it is a solicited iocb completion
708 * LPFC_ABORT_IOCB if it is an abort iocb
709 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
711 * The caller is not required to hold any lock.
713 static lpfc_iocb_type
714 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
716 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
718 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
722 case CMD_XMIT_SEQUENCE_CR:
723 case CMD_XMIT_SEQUENCE_CX:
724 case CMD_XMIT_BCAST_CN:
725 case CMD_XMIT_BCAST_CX:
726 case CMD_ELS_REQUEST_CR:
727 case CMD_ELS_REQUEST_CX:
728 case CMD_CREATE_XRI_CR:
729 case CMD_CREATE_XRI_CX:
731 case CMD_XMIT_ELS_RSP_CX:
733 case CMD_FCP_IWRITE_CR:
734 case CMD_FCP_IWRITE_CX:
735 case CMD_FCP_IREAD_CR:
736 case CMD_FCP_IREAD_CX:
737 case CMD_FCP_ICMND_CR:
738 case CMD_FCP_ICMND_CX:
739 case CMD_FCP_TSEND_CX:
740 case CMD_FCP_TRSP_CX:
741 case CMD_FCP_TRECEIVE_CX:
742 case CMD_FCP_AUTO_TRSP_CX:
743 case CMD_ADAPTER_MSG:
744 case CMD_ADAPTER_DUMP:
745 case CMD_XMIT_SEQUENCE64_CR:
746 case CMD_XMIT_SEQUENCE64_CX:
747 case CMD_XMIT_BCAST64_CN:
748 case CMD_XMIT_BCAST64_CX:
749 case CMD_ELS_REQUEST64_CR:
750 case CMD_ELS_REQUEST64_CX:
751 case CMD_FCP_IWRITE64_CR:
752 case CMD_FCP_IWRITE64_CX:
753 case CMD_FCP_IREAD64_CR:
754 case CMD_FCP_IREAD64_CX:
755 case CMD_FCP_ICMND64_CR:
756 case CMD_FCP_ICMND64_CX:
757 case CMD_FCP_TSEND64_CX:
758 case CMD_FCP_TRSP64_CX:
759 case CMD_FCP_TRECEIVE64_CX:
760 case CMD_GEN_REQUEST64_CR:
761 case CMD_GEN_REQUEST64_CX:
762 case CMD_XMIT_ELS_RSP64_CX:
763 case DSSCMD_IWRITE64_CR:
764 case DSSCMD_IWRITE64_CX:
765 case DSSCMD_IREAD64_CR:
766 case DSSCMD_IREAD64_CX:
767 case DSSCMD_INVALIDATE_DEK:
769 case DSSCMD_GET_KEK_ID:
770 case DSSCMD_GEN_XFER:
771 type = LPFC_SOL_IOCB;
773 case CMD_ABORT_XRI_CN:
774 case CMD_ABORT_XRI_CX:
775 case CMD_CLOSE_XRI_CN:
776 case CMD_CLOSE_XRI_CX:
777 case CMD_XRI_ABORTED_CX:
778 case CMD_ABORT_MXRI64_CN:
779 case CMD_XMIT_BLS_RSP64_CX:
780 type = LPFC_ABORT_IOCB;
782 case CMD_RCV_SEQUENCE_CX:
783 case CMD_RCV_ELS_REQ_CX:
784 case CMD_RCV_SEQUENCE64_CX:
785 case CMD_RCV_ELS_REQ64_CX:
786 case CMD_ASYNC_STATUS:
787 case CMD_IOCB_RCV_SEQ64_CX:
788 case CMD_IOCB_RCV_ELS64_CX:
789 case CMD_IOCB_RCV_CONT64_CX:
790 case CMD_IOCB_RET_XRI64_CX:
791 type = LPFC_UNSOL_IOCB;
793 case CMD_IOCB_XMIT_MSEQ64_CR:
794 case CMD_IOCB_XMIT_MSEQ64_CX:
795 case CMD_IOCB_RCV_SEQ_LIST64_CX:
796 case CMD_IOCB_RCV_ELS_LIST64_CX:
797 case CMD_IOCB_CLOSE_EXTENDED_CN:
798 case CMD_IOCB_ABORT_EXTENDED_CN:
799 case CMD_IOCB_RET_HBQE64_CN:
800 case CMD_IOCB_FCP_IBIDIR64_CR:
801 case CMD_IOCB_FCP_IBIDIR64_CX:
802 case CMD_IOCB_FCP_ITASKMGT64_CX:
803 case CMD_IOCB_LOGENTRY_CN:
804 case CMD_IOCB_LOGENTRY_ASYNC_CN:
805 printk("%s - Unhandled SLI-3 Command x%x\n",
806 __func__, iocb_cmnd);
807 type = LPFC_UNKNOWN_IOCB;
810 type = LPFC_UNKNOWN_IOCB;
818 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
819 * @phba: Pointer to HBA context object.
821 * This function is called from SLI initialization code
822 * to configure every ring of the HBA's SLI interface. The
823 * caller is not required to hold any lock. This function issues
824 * a config_ring mailbox command for each ring.
825 * This function returns zero if successful else returns a negative
829 lpfc_sli_ring_map(struct lpfc_hba *phba)
831 struct lpfc_sli *psli = &phba->sli;
836 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
840 phba->link_state = LPFC_INIT_MBX_CMDS;
841 for (i = 0; i < psli->num_rings; i++) {
842 lpfc_config_ring(phba, i, pmb);
843 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
844 if (rc != MBX_SUCCESS) {
845 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
846 "0446 Adapter failed to init (%d), "
847 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
849 rc, pmbox->mbxCommand,
850 pmbox->mbxStatus, i);
851 phba->link_state = LPFC_HBA_ERROR;
856 mempool_free(pmb, phba->mbox_mem_pool);
861 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
862 * @phba: Pointer to HBA context object.
863 * @pring: Pointer to driver SLI ring object.
864 * @piocb: Pointer to the driver iocb object.
866 * This function is called with hbalock held. The function adds the
867 * new iocb to txcmplq of the given ring. This function always returns
868 * 0. If this function is called for ELS ring, this function checks if
869 * there is a vport associated with the ELS command. This function also
870 * starts els_tmofunc timer if this is an ELS command.
873 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
874 struct lpfc_iocbq *piocb)
876 list_add_tail(&piocb->list, &pring->txcmplq);
877 pring->txcmplq_cnt++;
878 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
879 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
880 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
884 mod_timer(&piocb->vport->els_tmofunc,
885 jiffies + HZ * (phba->fc_ratov << 1));
893 * lpfc_sli_ringtx_get - Get first element of the txq
894 * @phba: Pointer to HBA context object.
895 * @pring: Pointer to driver SLI ring object.
897 * This function is called with hbalock held to get next
898 * iocb in txq of the given ring. If there is any iocb in
899 * the txq, the function returns first iocb in the list after
900 * removing the iocb from the list, else it returns NULL.
902 static struct lpfc_iocbq *
903 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
905 struct lpfc_iocbq *cmd_iocb;
907 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
908 if (cmd_iocb != NULL)
914 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
915 * @phba: Pointer to HBA context object.
916 * @pring: Pointer to driver SLI ring object.
918 * This function is called with hbalock held and the caller must post the
919 * iocb without releasing the lock. If the caller releases the lock,
920 * iocb slot returned by the function is not guaranteed to be available.
921 * The function returns pointer to the next available iocb slot if there
922 * is available slot in the ring, else it returns NULL.
923 * If the get index of the ring is ahead of the put index, the function
924 * will post an error attention event to the worker thread to take the
925 * HBA to offline state.
928 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
930 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
931 uint32_t max_cmd_idx = pring->numCiocb;
932 if ((pring->next_cmdidx == pring->cmdidx) &&
933 (++pring->next_cmdidx >= max_cmd_idx))
934 pring->next_cmdidx = 0;
936 if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
938 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
940 if (unlikely(pring->local_getidx >= max_cmd_idx)) {
941 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
942 "0315 Ring %d issue: portCmdGet %d "
943 "is bigger than cmd ring %d\n",
945 pring->local_getidx, max_cmd_idx);
947 phba->link_state = LPFC_HBA_ERROR;
949 * All error attention handlers are posted to
952 phba->work_ha |= HA_ERATT;
953 phba->work_hs = HS_FFER3;
955 lpfc_worker_wake_up(phba);
960 if (pring->local_getidx == pring->next_cmdidx)
964 return lpfc_cmd_iocb(phba, pring);
968 * lpfc_sli_next_iotag - Get an iotag for the iocb
969 * @phba: Pointer to HBA context object.
970 * @iocbq: Pointer to driver iocb object.
972 * This function gets an iotag for the iocb. If there is no unused iotag and
973 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
974 * array and assigns a new iotag.
975 * The function returns the allocated iotag if successful, else returns zero.
976 * Zero is not a valid iotag.
977 * The caller is not required to hold any lock.
980 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
982 struct lpfc_iocbq **new_arr;
983 struct lpfc_iocbq **old_arr;
985 struct lpfc_sli *psli = &phba->sli;
988 spin_lock_irq(&phba->hbalock);
989 iotag = psli->last_iotag;
990 if(++iotag < psli->iocbq_lookup_len) {
991 psli->last_iotag = iotag;
992 psli->iocbq_lookup[iotag] = iocbq;
993 spin_unlock_irq(&phba->hbalock);
994 iocbq->iotag = iotag;
996 } else if (psli->iocbq_lookup_len < (0xffff
997 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
998 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
999 spin_unlock_irq(&phba->hbalock);
1000 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
1003 spin_lock_irq(&phba->hbalock);
1004 old_arr = psli->iocbq_lookup;
1005 if (new_len <= psli->iocbq_lookup_len) {
1006 /* highly unprobable case */
1008 iotag = psli->last_iotag;
1009 if(++iotag < psli->iocbq_lookup_len) {
1010 psli->last_iotag = iotag;
1011 psli->iocbq_lookup[iotag] = iocbq;
1012 spin_unlock_irq(&phba->hbalock);
1013 iocbq->iotag = iotag;
1016 spin_unlock_irq(&phba->hbalock);
1019 if (psli->iocbq_lookup)
1020 memcpy(new_arr, old_arr,
1021 ((psli->last_iotag + 1) *
1022 sizeof (struct lpfc_iocbq *)));
1023 psli->iocbq_lookup = new_arr;
1024 psli->iocbq_lookup_len = new_len;
1025 psli->last_iotag = iotag;
1026 psli->iocbq_lookup[iotag] = iocbq;
1027 spin_unlock_irq(&phba->hbalock);
1028 iocbq->iotag = iotag;
1033 spin_unlock_irq(&phba->hbalock);
1035 lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
1036 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1043 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1044 * @phba: Pointer to HBA context object.
1045 * @pring: Pointer to driver SLI ring object.
1046 * @iocb: Pointer to iocb slot in the ring.
1047 * @nextiocb: Pointer to driver iocb object which need to be
1048 * posted to firmware.
1050 * This function is called with hbalock held to post a new iocb to
1051 * the firmware. This function copies the new iocb to ring iocb slot and
1052 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1053 * a completion call back for this iocb else the function will free the
1057 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1058 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1063 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1066 if (pring->ringno == LPFC_ELS_RING) {
1067 lpfc_debugfs_slow_ring_trc(phba,
1068 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1069 *(((uint32_t *) &nextiocb->iocb) + 4),
1070 *(((uint32_t *) &nextiocb->iocb) + 6),
1071 *(((uint32_t *) &nextiocb->iocb) + 7));
1075 * Issue iocb command to adapter
1077 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1079 pring->stats.iocb_cmd++;
1082 * If there is no completion routine to call, we can release the
1083 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1084 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1086 if (nextiocb->iocb_cmpl)
1087 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1089 __lpfc_sli_release_iocbq(phba, nextiocb);
1092 * Let the HBA know what IOCB slot will be the next one the
1093 * driver will put a command into.
1095 pring->cmdidx = pring->next_cmdidx;
1096 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1100 * lpfc_sli_update_full_ring - Update the chip attention register
1101 * @phba: Pointer to HBA context object.
1102 * @pring: Pointer to driver SLI ring object.
1104 * The caller is not required to hold any lock for calling this function.
1105 * This function updates the chip attention bits for the ring to inform firmware
1106 * that there are pending work to be done for this ring and requests an
1107 * interrupt when there is space available in the ring. This function is
1108 * called when the driver is unable to post more iocbs to the ring due
1109 * to unavailability of space in the ring.
1112 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1114 int ringno = pring->ringno;
1116 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1121 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1122 * The HBA will tell us when an IOCB entry is available.
1124 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1125 readl(phba->CAregaddr); /* flush */
1127 pring->stats.iocb_cmd_full++;
1131 * lpfc_sli_update_ring - Update chip attention register
1132 * @phba: Pointer to HBA context object.
1133 * @pring: Pointer to driver SLI ring object.
1135 * This function updates the chip attention register bit for the
1136 * given ring to inform HBA that there is more work to be done
1137 * in this ring. The caller is not required to hold any lock.
1140 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1142 int ringno = pring->ringno;
1145 * Tell the HBA that there is work to do in this ring.
1147 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1149 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1150 readl(phba->CAregaddr); /* flush */
1155 * lpfc_sli_resume_iocb - Process iocbs in the txq
1156 * @phba: Pointer to HBA context object.
1157 * @pring: Pointer to driver SLI ring object.
1159 * This function is called with hbalock held to post pending iocbs
1160 * in the txq to the firmware. This function is called when driver
1161 * detects space available in the ring.
1164 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1167 struct lpfc_iocbq *nextiocb;
1171 * (a) there is anything on the txq to send
1173 * (c) link attention events can be processed (fcp ring only)
1174 * (d) IOCB processing is not blocked by the outstanding mbox command.
1176 if (pring->txq_cnt &&
1177 lpfc_is_link_up(phba) &&
1178 (pring->ringno != phba->sli.fcp_ring ||
1179 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1181 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1182 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1183 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1186 lpfc_sli_update_ring(phba, pring);
1188 lpfc_sli_update_full_ring(phba, pring);
1195 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1196 * @phba: Pointer to HBA context object.
1197 * @hbqno: HBQ number.
1199 * This function is called with hbalock held to get the next
1200 * available slot for the given HBQ. If there is free slot
1201 * available for the HBQ it will return pointer to the next available
1202 * HBQ entry else it will return NULL.
1204 static struct lpfc_hbq_entry *
1205 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1207 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1209 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1210 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1211 hbqp->next_hbqPutIdx = 0;
1213 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1214 uint32_t raw_index = phba->hbq_get[hbqno];
1215 uint32_t getidx = le32_to_cpu(raw_index);
1217 hbqp->local_hbqGetIdx = getidx;
1219 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1220 lpfc_printf_log(phba, KERN_ERR,
1221 LOG_SLI | LOG_VPORT,
1222 "1802 HBQ %d: local_hbqGetIdx "
1223 "%u is > than hbqp->entry_count %u\n",
1224 hbqno, hbqp->local_hbqGetIdx,
1227 phba->link_state = LPFC_HBA_ERROR;
1231 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1235 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1240 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1241 * @phba: Pointer to HBA context object.
1243 * This function is called with no lock held to free all the
1244 * hbq buffers while uninitializing the SLI interface. It also
1245 * frees the HBQ buffers returned by the firmware but not yet
1246 * processed by the upper layers.
1249 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1251 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1252 struct hbq_dmabuf *hbq_buf;
1253 unsigned long flags;
1257 hbq_count = lpfc_sli_hbq_count();
1258 /* Return all memory used by all HBQs */
1259 spin_lock_irqsave(&phba->hbalock, flags);
1260 for (i = 0; i < hbq_count; ++i) {
1261 list_for_each_entry_safe(dmabuf, next_dmabuf,
1262 &phba->hbqs[i].hbq_buffer_list, list) {
1263 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1264 list_del(&hbq_buf->dbuf.list);
1265 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1267 phba->hbqs[i].buffer_count = 0;
1269 /* Return all HBQ buffer that are in-fly */
1270 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
1272 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1273 list_del(&hbq_buf->dbuf.list);
1274 if (hbq_buf->tag == -1) {
1275 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1278 hbqno = hbq_buf->tag >> 16;
1279 if (hbqno >= LPFC_MAX_HBQS)
1280 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1283 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1288 /* Mark the HBQs not in use */
1289 phba->hbq_in_use = 0;
1290 spin_unlock_irqrestore(&phba->hbalock, flags);
1294 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1295 * @phba: Pointer to HBA context object.
1296 * @hbqno: HBQ number.
1297 * @hbq_buf: Pointer to HBQ buffer.
1299 * This function is called with the hbalock held to post a
1300 * hbq buffer to the firmware. If the function finds an empty
1301 * slot in the HBQ, it will post the buffer. The function will return
1302 * pointer to the hbq entry if it successfully post the buffer
1303 * else it will return NULL.
1306 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
1307 struct hbq_dmabuf *hbq_buf)
1309 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1313 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1314 * @phba: Pointer to HBA context object.
1315 * @hbqno: HBQ number.
1316 * @hbq_buf: Pointer to HBQ buffer.
1318 * This function is called with the hbalock held to post a hbq buffer to the
1319 * firmware. If the function finds an empty slot in the HBQ, it will post the
1320 * buffer and place it on the hbq_buffer_list. The function will return zero if
1321 * it successfully post the buffer else it will return an error.
1324 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1325 struct hbq_dmabuf *hbq_buf)
1327 struct lpfc_hbq_entry *hbqe;
1328 dma_addr_t physaddr = hbq_buf->dbuf.phys;
1330 /* Get next HBQ entry slot to use */
1331 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1333 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1335 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1336 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
1337 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
1338 hbqe->bde.tus.f.bdeFlags = 0;
1339 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1340 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1342 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1343 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
1345 readl(phba->hbq_put + hbqno);
1346 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
1353 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1354 * @phba: Pointer to HBA context object.
1355 * @hbqno: HBQ number.
1356 * @hbq_buf: Pointer to HBQ buffer.
1358 * This function is called with the hbalock held to post an RQE to the SLI4
1359 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1360 * the hbq_buffer_list and return zero, otherwise it will return an error.
1363 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1364 struct hbq_dmabuf *hbq_buf)
1367 struct lpfc_rqe hrqe;
1368 struct lpfc_rqe drqe;
1370 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1371 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1372 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1373 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1374 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1379 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1383 /* HBQ for ELS and CT traffic. */
1384 static struct lpfc_hbq_init lpfc_els_hbq = {
1389 .ring_mask = (1 << LPFC_ELS_RING),
1395 /* HBQ for the extra ring if needed */
1396 static struct lpfc_hbq_init lpfc_extra_hbq = {
1401 .ring_mask = (1 << LPFC_EXTRA_RING),
1408 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
1414 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
1415 * @phba: Pointer to HBA context object.
1416 * @hbqno: HBQ number.
1417 * @count: Number of HBQ buffers to be posted.
1419 * This function is called with no lock held to post more hbq buffers to the
1420 * given HBQ. The function returns the number of HBQ buffers successfully
1424 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
1426 uint32_t i, posted = 0;
1427 unsigned long flags;
1428 struct hbq_dmabuf *hbq_buffer;
1429 LIST_HEAD(hbq_buf_list);
1430 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
1433 if ((phba->hbqs[hbqno].buffer_count + count) >
1434 lpfc_hbq_defs[hbqno]->entry_count)
1435 count = lpfc_hbq_defs[hbqno]->entry_count -
1436 phba->hbqs[hbqno].buffer_count;
1439 /* Allocate HBQ entries */
1440 for (i = 0; i < count; i++) {
1441 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1444 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1446 /* Check whether HBQ is still in use */
1447 spin_lock_irqsave(&phba->hbalock, flags);
1448 if (!phba->hbq_in_use)
1450 while (!list_empty(&hbq_buf_list)) {
1451 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1453 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1455 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
1456 phba->hbqs[hbqno].buffer_count++;
1459 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1461 spin_unlock_irqrestore(&phba->hbalock, flags);
1464 spin_unlock_irqrestore(&phba->hbalock, flags);
1465 while (!list_empty(&hbq_buf_list)) {
1466 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1468 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1474 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
1475 * @phba: Pointer to HBA context object.
1478 * This function posts more buffers to the HBQ. This function
1479 * is called with no lock held. The function returns the number of HBQ entries
1480 * successfully allocated.
1483 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1485 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1486 lpfc_hbq_defs[qno]->add_count));
1490 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
1491 * @phba: Pointer to HBA context object.
1492 * @qno: HBQ queue number.
1494 * This function is called from SLI initialization code path with
1495 * no lock held to post initial HBQ buffers to firmware. The
1496 * function returns the number of HBQ entries successfully allocated.
1499 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1501 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1502 lpfc_hbq_defs[qno]->init_count));
1506 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1507 * @phba: Pointer to HBA context object.
1508 * @hbqno: HBQ number.
1510 * This function removes the first hbq buffer on an hbq list and returns a
1511 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1513 static struct hbq_dmabuf *
1514 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1516 struct lpfc_dmabuf *d_buf;
1518 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1521 return container_of(d_buf, struct hbq_dmabuf, dbuf);
1525 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
1526 * @phba: Pointer to HBA context object.
1527 * @tag: Tag of the hbq buffer.
1529 * This function is called with hbalock held. This function searches
1530 * for the hbq buffer associated with the given tag in the hbq buffer
1531 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
1534 static struct hbq_dmabuf *
1535 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
1537 struct lpfc_dmabuf *d_buf;
1538 struct hbq_dmabuf *hbq_buf;
1542 if (hbqno >= LPFC_MAX_HBQS)
1545 spin_lock_irq(&phba->hbalock);
1546 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
1547 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
1548 if (hbq_buf->tag == tag) {
1549 spin_unlock_irq(&phba->hbalock);
1553 spin_unlock_irq(&phba->hbalock);
1554 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
1555 "1803 Bad hbq tag. Data: x%x x%x\n",
1556 tag, phba->hbqs[tag >> 16].buffer_count);
1561 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
1562 * @phba: Pointer to HBA context object.
1563 * @hbq_buffer: Pointer to HBQ buffer.
1565 * This function is called with hbalock. This function gives back
1566 * the hbq buffer to firmware. If the HBQ does not have space to
1567 * post the buffer, it will free the buffer.
1570 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
1575 hbqno = hbq_buffer->tag >> 16;
1576 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
1577 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1582 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
1583 * @mbxCommand: mailbox command code.
1585 * This function is called by the mailbox event handler function to verify
1586 * that the completed mailbox command is a legitimate mailbox command. If the
1587 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
1588 * and the mailbox event handler will take the HBA offline.
1591 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1595 switch (mbxCommand) {
1599 case MBX_WRITE_VPARMS:
1600 case MBX_RUN_BIU_DIAG:
1603 case MBX_CONFIG_LINK:
1604 case MBX_CONFIG_RING:
1605 case MBX_RESET_RING:
1606 case MBX_READ_CONFIG:
1607 case MBX_READ_RCONFIG:
1608 case MBX_READ_SPARM:
1609 case MBX_READ_STATUS:
1613 case MBX_READ_LNK_STAT:
1615 case MBX_UNREG_LOGIN:
1618 case MBX_DUMP_MEMORY:
1619 case MBX_DUMP_CONTEXT:
1622 case MBX_UPDATE_CFG:
1624 case MBX_DEL_LD_ENTRY:
1625 case MBX_RUN_PROGRAM:
1627 case MBX_SET_VARIABLE:
1628 case MBX_UNREG_D_ID:
1629 case MBX_KILL_BOARD:
1630 case MBX_CONFIG_FARP:
1633 case MBX_RUN_BIU_DIAG64:
1634 case MBX_CONFIG_PORT:
1635 case MBX_READ_SPARM64:
1636 case MBX_READ_RPI64:
1637 case MBX_REG_LOGIN64:
1641 case MBX_LOAD_EXP_ROM:
1642 case MBX_ASYNCEVT_ENABLE:
1646 case MBX_PORT_CAPABILITIES:
1647 case MBX_PORT_IOV_CONTROL:
1648 case MBX_SLI4_CONFIG:
1649 case MBX_SLI4_REQ_FTRS:
1651 case MBX_UNREG_FCFI:
1656 case MBX_RESUME_RPI:
1667 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
1668 * @phba: Pointer to HBA context object.
1669 * @pmboxq: Pointer to mailbox command.
1671 * This is completion handler function for mailbox commands issued from
1672 * lpfc_sli_issue_mbox_wait function. This function is called by the
1673 * mailbox event handler function with no lock held. This function
1674 * will wake up thread waiting on the wait queue pointed by context1
1678 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
1680 wait_queue_head_t *pdone_q;
1681 unsigned long drvr_flag;
1684 * If pdone_q is empty, the driver thread gave up waiting and
1685 * continued running.
1687 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
1688 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1689 pdone_q = (wait_queue_head_t *) pmboxq->context1;
1691 wake_up_interruptible(pdone_q);
1692 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1698 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
1699 * @phba: Pointer to HBA context object.
1700 * @pmb: Pointer to mailbox object.
1702 * This function is the default mailbox completion handler. It
1703 * frees the memory resources associated with the completed mailbox
1704 * command. If the completed command is a REG_LOGIN mailbox command,
1705 * this function will issue a UREG_LOGIN to re-claim the RPI.
1708 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1710 struct lpfc_dmabuf *mp;
1714 mp = (struct lpfc_dmabuf *) (pmb->context1);
1717 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1721 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
1722 (phba->sli_rev == LPFC_SLI_REV4))
1723 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
1726 * If a REG_LOGIN succeeded after node is destroyed or node
1727 * is in re-discovery driver need to cleanup the RPI.
1729 if (!(phba->pport->load_flag & FC_UNLOADING) &&
1730 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
1731 !pmb->u.mb.mbxStatus) {
1732 rpi = pmb->u.mb.un.varWords[0];
1733 vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
1734 lpfc_unreg_login(phba, vpi, rpi, pmb);
1735 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1736 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1737 if (rc != MBX_NOT_FINISHED)
1741 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
1742 lpfc_sli4_mbox_cmd_free(phba, pmb);
1744 mempool_free(pmb, phba->mbox_mem_pool);
1748 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
1749 * @phba: Pointer to HBA context object.
1751 * This function is called with no lock held. This function processes all
1752 * the completed mailbox commands and gives it to upper layers. The interrupt
1753 * service routine processes mailbox completion interrupt and adds completed
1754 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
1755 * Worker thread call lpfc_sli_handle_mb_event, which will return the
1756 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
1757 * function returns the mailbox commands to the upper layer by calling the
1758 * completion handler function of each mailbox.
1761 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1768 phba->sli.slistat.mbox_event++;
1770 /* Get all completed mailboxe buffers into the cmplq */
1771 spin_lock_irq(&phba->hbalock);
1772 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
1773 spin_unlock_irq(&phba->hbalock);
1775 /* Get a Mailbox buffer to setup mailbox commands for callback */
1777 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
1783 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
1785 lpfc_debugfs_disc_trc(pmb->vport,
1786 LPFC_DISC_TRC_MBOX_VPORT,
1787 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
1788 (uint32_t)pmbox->mbxCommand,
1789 pmbox->un.varWords[0],
1790 pmbox->un.varWords[1]);
1793 lpfc_debugfs_disc_trc(phba->pport,
1795 "MBOX cmpl: cmd:x%x mb:x%x x%x",
1796 (uint32_t)pmbox->mbxCommand,
1797 pmbox->un.varWords[0],
1798 pmbox->un.varWords[1]);
1803 * It is a fatal error if unknown mbox command completion.
1805 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
1807 /* Unknown mailbox command compl */
1808 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1809 "(%d):0323 Unknown Mailbox command "
1811 pmb->vport ? pmb->vport->vpi : 0,
1813 lpfc_sli4_mbox_opcode_get(phba, pmb));
1814 phba->link_state = LPFC_HBA_ERROR;
1815 phba->work_hs = HS_FFER3;
1816 lpfc_handle_eratt(phba);
1820 if (pmbox->mbxStatus) {
1821 phba->sli.slistat.mbox_stat_err++;
1822 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
1823 /* Mbox cmd cmpl error - RETRYing */
1824 lpfc_printf_log(phba, KERN_INFO,
1826 "(%d):0305 Mbox cmd cmpl "
1827 "error - RETRYing Data: x%x "
1828 "(x%x) x%x x%x x%x\n",
1829 pmb->vport ? pmb->vport->vpi :0,
1831 lpfc_sli4_mbox_opcode_get(phba,
1834 pmbox->un.varWords[0],
1835 pmb->vport->port_state);
1836 pmbox->mbxStatus = 0;
1837 pmbox->mbxOwner = OWN_HOST;
1838 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1839 if (rc != MBX_NOT_FINISHED)
1844 /* Mailbox cmd <cmd> Cmpl <cmpl> */
1845 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
1846 "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p "
1847 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
1848 pmb->vport ? pmb->vport->vpi : 0,
1850 lpfc_sli4_mbox_opcode_get(phba, pmb),
1852 *((uint32_t *) pmbox),
1853 pmbox->un.varWords[0],
1854 pmbox->un.varWords[1],
1855 pmbox->un.varWords[2],
1856 pmbox->un.varWords[3],
1857 pmbox->un.varWords[4],
1858 pmbox->un.varWords[5],
1859 pmbox->un.varWords[6],
1860 pmbox->un.varWords[7]);
1863 pmb->mbox_cmpl(phba,pmb);
1869 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
1870 * @phba: Pointer to HBA context object.
1871 * @pring: Pointer to driver SLI ring object.
1874 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
1875 * is set in the tag the buffer is posted for a particular exchange,
1876 * the function will return the buffer without replacing the buffer.
1877 * If the buffer is for unsolicited ELS or CT traffic, this function
1878 * returns the buffer and also posts another buffer to the firmware.
1880 static struct lpfc_dmabuf *
1881 lpfc_sli_get_buff(struct lpfc_hba *phba,
1882 struct lpfc_sli_ring *pring,
1885 struct hbq_dmabuf *hbq_entry;
1887 if (tag & QUE_BUFTAG_BIT)
1888 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
1889 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
1892 return &hbq_entry->dbuf;
1896 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
1897 * @phba: Pointer to HBA context object.
1898 * @pring: Pointer to driver SLI ring object.
1899 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
1900 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
1901 * @fch_type: the type for the first frame of the sequence.
1903 * This function is called with no lock held. This function uses the r_ctl and
1904 * type of the received sequence to find the correct callback function to call
1905 * to process the sequence.
1908 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1909 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
1914 /* unSolicited Responses */
1915 if (pring->prt[0].profile) {
1916 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1917 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1921 /* We must search, based on rctl / type
1922 for the right routine */
1923 for (i = 0; i < pring->num_mask; i++) {
1924 if ((pring->prt[i].rctl == fch_r_ctl) &&
1925 (pring->prt[i].type == fch_type)) {
1926 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1927 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1928 (phba, pring, saveq);
1936 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
1937 * @phba: Pointer to HBA context object.
1938 * @pring: Pointer to driver SLI ring object.
1939 * @saveq: Pointer to the unsolicited iocb.
1941 * This function is called with no lock held by the ring event handler
1942 * when there is an unsolicited iocb posted to the response ring by the
1943 * firmware. This function gets the buffer associated with the iocbs
1944 * and calls the event handler for the ring. This function handles both
1945 * qring buffers and hbq buffers.
1946 * When the function returns 1 the caller can free the iocb object otherwise
1947 * upper layer functions will free the iocb objects.
1950 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1951 struct lpfc_iocbq *saveq)
1955 uint32_t Rctl, Type;
1957 struct lpfc_iocbq *iocbq;
1958 struct lpfc_dmabuf *dmzbuf;
1961 irsp = &(saveq->iocb);
1963 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
1964 if (pring->lpfc_sli_rcv_async_status)
1965 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
1967 lpfc_printf_log(phba,
1970 "0316 Ring %d handler: unexpected "
1971 "ASYNC_STATUS iocb received evt_code "
1974 irsp->un.asyncstat.evt_code);
1978 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
1979 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
1980 if (irsp->ulpBdeCount > 0) {
1981 dmzbuf = lpfc_sli_get_buff(phba, pring,
1982 irsp->un.ulpWord[3]);
1983 lpfc_in_buf_free(phba, dmzbuf);
1986 if (irsp->ulpBdeCount > 1) {
1987 dmzbuf = lpfc_sli_get_buff(phba, pring,
1988 irsp->unsli3.sli3Words[3]);
1989 lpfc_in_buf_free(phba, dmzbuf);
1992 if (irsp->ulpBdeCount > 2) {
1993 dmzbuf = lpfc_sli_get_buff(phba, pring,
1994 irsp->unsli3.sli3Words[7]);
1995 lpfc_in_buf_free(phba, dmzbuf);
2001 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2002 if (irsp->ulpBdeCount != 0) {
2003 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2004 irsp->un.ulpWord[3]);
2005 if (!saveq->context2)
2006 lpfc_printf_log(phba,
2009 "0341 Ring %d Cannot find buffer for "
2010 "an unsolicited iocb. tag 0x%x\n",
2012 irsp->un.ulpWord[3]);
2014 if (irsp->ulpBdeCount == 2) {
2015 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2016 irsp->unsli3.sli3Words[7]);
2017 if (!saveq->context3)
2018 lpfc_printf_log(phba,
2021 "0342 Ring %d Cannot find buffer for an"
2022 " unsolicited iocb. tag 0x%x\n",
2024 irsp->unsli3.sli3Words[7]);
2026 list_for_each_entry(iocbq, &saveq->list, list) {
2027 irsp = &(iocbq->iocb);
2028 if (irsp->ulpBdeCount != 0) {
2029 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2030 irsp->un.ulpWord[3]);
2031 if (!iocbq->context2)
2032 lpfc_printf_log(phba,
2035 "0343 Ring %d Cannot find "
2036 "buffer for an unsolicited iocb"
2037 ". tag 0x%x\n", pring->ringno,
2038 irsp->un.ulpWord[3]);
2040 if (irsp->ulpBdeCount == 2) {
2041 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2042 irsp->unsli3.sli3Words[7]);
2043 if (!iocbq->context3)
2044 lpfc_printf_log(phba,
2047 "0344 Ring %d Cannot find "
2048 "buffer for an unsolicited "
2051 irsp->unsli3.sli3Words[7]);
2055 if (irsp->ulpBdeCount != 0 &&
2056 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2057 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2060 /* search continue save q for same XRI */
2061 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2062 if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) {
2063 list_add_tail(&saveq->list, &iocbq->list);
2069 list_add_tail(&saveq->clist,
2070 &pring->iocb_continue_saveq);
2071 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2072 list_del_init(&iocbq->clist);
2074 irsp = &(saveq->iocb);
2078 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2079 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2080 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2081 Rctl = FC_RCTL_ELS_REQ;
2084 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2085 Rctl = w5p->hcsw.Rctl;
2086 Type = w5p->hcsw.Type;
2088 /* Firmware Workaround */
2089 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2090 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2091 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2092 Rctl = FC_RCTL_ELS_REQ;
2094 w5p->hcsw.Rctl = Rctl;
2095 w5p->hcsw.Type = Type;
2099 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2100 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2101 "0313 Ring %d handler: unexpected Rctl x%x "
2102 "Type x%x received\n",
2103 pring->ringno, Rctl, Type);
2109 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2110 * @phba: Pointer to HBA context object.
2111 * @pring: Pointer to driver SLI ring object.
2112 * @prspiocb: Pointer to response iocb object.
2114 * This function looks up the iocb_lookup table to get the command iocb
2115 * corresponding to the given response iocb using the iotag of the
2116 * response iocb. This function is called with the hbalock held.
2117 * This function returns the command iocb object if it finds the command
2118 * iocb else returns NULL.
2120 static struct lpfc_iocbq *
2121 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2122 struct lpfc_sli_ring *pring,
2123 struct lpfc_iocbq *prspiocb)
2125 struct lpfc_iocbq *cmd_iocb = NULL;
2128 iotag = prspiocb->iocb.ulpIoTag;
2130 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2131 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2132 list_del_init(&cmd_iocb->list);
2133 pring->txcmplq_cnt--;
2137 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2138 "0317 iotag x%x is out off "
2139 "range: max iotag x%x wd0 x%x\n",
2140 iotag, phba->sli.last_iotag,
2141 *(((uint32_t *) &prspiocb->iocb) + 7));
2146 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2147 * @phba: Pointer to HBA context object.
2148 * @pring: Pointer to driver SLI ring object.
2151 * This function looks up the iocb_lookup table to get the command iocb
2152 * corresponding to the given iotag. This function is called with the
2154 * This function returns the command iocb object if it finds the command
2155 * iocb else returns NULL.
2157 static struct lpfc_iocbq *
2158 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2159 struct lpfc_sli_ring *pring, uint16_t iotag)
2161 struct lpfc_iocbq *cmd_iocb;
2163 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2164 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2165 list_del_init(&cmd_iocb->list);
2166 pring->txcmplq_cnt--;
2170 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2171 "0372 iotag x%x is out off range: max iotag (x%x)\n",
2172 iotag, phba->sli.last_iotag);
2177 * lpfc_sli_process_sol_iocb - process solicited iocb completion
2178 * @phba: Pointer to HBA context object.
2179 * @pring: Pointer to driver SLI ring object.
2180 * @saveq: Pointer to the response iocb to be processed.
2182 * This function is called by the ring event handler for non-fcp
2183 * rings when there is a new response iocb in the response ring.
2184 * The caller is not required to hold any locks. This function
2185 * gets the command iocb associated with the response iocb and
2186 * calls the completion handler for the command iocb. If there
2187 * is no completion handler, the function will free the resources
2188 * associated with command iocb. If the response iocb is for
2189 * an already aborted command iocb, the status of the completion
2190 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2191 * This function always returns 1.
2194 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2195 struct lpfc_iocbq *saveq)
2197 struct lpfc_iocbq *cmdiocbp;
2199 unsigned long iflag;
2201 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2202 spin_lock_irqsave(&phba->hbalock, iflag);
2203 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2204 spin_unlock_irqrestore(&phba->hbalock, iflag);
2207 if (cmdiocbp->iocb_cmpl) {
2209 * If an ELS command failed send an event to mgmt
2212 if (saveq->iocb.ulpStatus &&
2213 (pring->ringno == LPFC_ELS_RING) &&
2214 (cmdiocbp->iocb.ulpCommand ==
2215 CMD_ELS_REQUEST64_CR))
2216 lpfc_send_els_failure_event(phba,
2220 * Post all ELS completions to the worker thread.
2221 * All other are passed to the completion callback.
2223 if (pring->ringno == LPFC_ELS_RING) {
2224 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) {
2225 cmdiocbp->iocb_flag &=
2226 ~LPFC_DRIVER_ABORTED;
2227 saveq->iocb.ulpStatus =
2228 IOSTAT_LOCAL_REJECT;
2229 saveq->iocb.un.ulpWord[4] =
2232 /* Firmware could still be in progress
2233 * of DMAing payload, so don't free data
2234 * buffer till after a hbeat.
2236 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2239 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
2241 lpfc_sli_release_iocbq(phba, cmdiocbp);
2244 * Unknown initiating command based on the response iotag.
2245 * This could be the case on the ELS ring because of
2248 if (pring->ringno != LPFC_ELS_RING) {
2250 * Ring <ringno> handler: unexpected completion IoTag
2253 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2254 "0322 Ring %d handler: "
2255 "unexpected completion IoTag x%x "
2256 "Data: x%x x%x x%x x%x\n",
2258 saveq->iocb.ulpIoTag,
2259 saveq->iocb.ulpStatus,
2260 saveq->iocb.un.ulpWord[4],
2261 saveq->iocb.ulpCommand,
2262 saveq->iocb.ulpContext);
2270 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
2271 * @phba: Pointer to HBA context object.
2272 * @pring: Pointer to driver SLI ring object.
2274 * This function is called from the iocb ring event handlers when
2275 * put pointer is ahead of the get pointer for a ring. This function signal
2276 * an error attention condition to the worker thread and the worker
2277 * thread will transition the HBA to offline state.
2280 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2282 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2284 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2285 * rsp ring <portRspMax>
2287 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2288 "0312 Ring %d handler: portRspPut %d "
2289 "is bigger than rsp ring %d\n",
2290 pring->ringno, le32_to_cpu(pgp->rspPutInx),
2293 phba->link_state = LPFC_HBA_ERROR;
2296 * All error attention handlers are posted to
2299 phba->work_ha |= HA_ERATT;
2300 phba->work_hs = HS_FFER3;
2302 lpfc_worker_wake_up(phba);
2308 * lpfc_poll_eratt - Error attention polling timer timeout handler
2309 * @ptr: Pointer to address of HBA context object.
2311 * This function is invoked by the Error Attention polling timer when the
2312 * timer times out. It will check the SLI Error Attention register for
2313 * possible attention events. If so, it will post an Error Attention event
2314 * and wake up worker thread to process it. Otherwise, it will set up the
2315 * Error Attention polling timer for the next poll.
2317 void lpfc_poll_eratt(unsigned long ptr)
2319 struct lpfc_hba *phba;
2322 phba = (struct lpfc_hba *)ptr;
2324 /* Check chip HA register for error event */
2325 eratt = lpfc_sli_check_eratt(phba);
2328 /* Tell the worker thread there is work to do */
2329 lpfc_worker_wake_up(phba);
2331 /* Restart the timer for next eratt poll */
2332 mod_timer(&phba->eratt_poll, jiffies +
2333 HZ * LPFC_ERATT_POLL_INTERVAL);
2339 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
2340 * @phba: Pointer to HBA context object.
2341 * @pring: Pointer to driver SLI ring object.
2342 * @mask: Host attention register mask for this ring.
2344 * This function is called from the interrupt context when there is a ring
2345 * event for the fcp ring. The caller does not hold any lock.
2346 * The function processes each response iocb in the response ring until it
2347 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2348 * LE bit set. The function will call the completion handler of the command iocb
2349 * if the response iocb indicates a completion for a command iocb or it is
2350 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2351 * function if this is an unsolicited iocb.
2352 * This routine presumes LPFC_FCP_RING handling and doesn't bother
2353 * to check it explicitly.
2356 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2357 struct lpfc_sli_ring *pring, uint32_t mask)
2359 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2360 IOCB_t *irsp = NULL;
2361 IOCB_t *entry = NULL;
2362 struct lpfc_iocbq *cmdiocbq = NULL;
2363 struct lpfc_iocbq rspiocbq;
2365 uint32_t portRspPut, portRspMax;
2367 lpfc_iocb_type type;
2368 unsigned long iflag;
2369 uint32_t rsp_cmpl = 0;
2371 spin_lock_irqsave(&phba->hbalock, iflag);
2372 pring->stats.iocb_event++;
2375 * The next available response entry should never exceed the maximum
2376 * entries. If it does, treat it as an adapter hardware error.
2378 portRspMax = pring->numRiocb;
2379 portRspPut = le32_to_cpu(pgp->rspPutInx);
2380 if (unlikely(portRspPut >= portRspMax)) {
2381 lpfc_sli_rsp_pointers_error(phba, pring);
2382 spin_unlock_irqrestore(&phba->hbalock, iflag);
2385 if (phba->fcp_ring_in_use) {
2386 spin_unlock_irqrestore(&phba->hbalock, iflag);
2389 phba->fcp_ring_in_use = 1;
2392 while (pring->rspidx != portRspPut) {
2394 * Fetch an entry off the ring and copy it into a local data
2395 * structure. The copy involves a byte-swap since the
2396 * network byte order and pci byte orders are different.
2398 entry = lpfc_resp_iocb(phba, pring);
2399 phba->last_completion_time = jiffies;
2401 if (++pring->rspidx >= portRspMax)
2404 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2405 (uint32_t *) &rspiocbq.iocb,
2406 phba->iocb_rsp_size);
2407 INIT_LIST_HEAD(&(rspiocbq.list));
2408 irsp = &rspiocbq.iocb;
2410 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2411 pring->stats.iocb_rsp++;
2414 if (unlikely(irsp->ulpStatus)) {
2416 * If resource errors reported from HBA, reduce
2417 * queuedepths of the SCSI device.
2419 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2420 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2421 spin_unlock_irqrestore(&phba->hbalock, iflag);
2422 phba->lpfc_rampdown_queue_depth(phba);
2423 spin_lock_irqsave(&phba->hbalock, iflag);
2426 /* Rsp ring <ringno> error: IOCB */
2427 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2428 "0336 Rsp Ring %d error: IOCB Data: "
2429 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
2431 irsp->un.ulpWord[0],
2432 irsp->un.ulpWord[1],
2433 irsp->un.ulpWord[2],
2434 irsp->un.ulpWord[3],
2435 irsp->un.ulpWord[4],
2436 irsp->un.ulpWord[5],
2437 *(uint32_t *)&irsp->un1,
2438 *((uint32_t *)&irsp->un1 + 1));
2442 case LPFC_ABORT_IOCB:
2445 * Idle exchange closed via ABTS from port. No iocb
2446 * resources need to be recovered.
2448 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
2449 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2450 "0333 IOCB cmd 0x%x"
2451 " processed. Skipping"
2457 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2459 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
2460 spin_unlock_irqrestore(&phba->hbalock,
2462 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2464 spin_lock_irqsave(&phba->hbalock,
2468 case LPFC_UNSOL_IOCB:
2469 spin_unlock_irqrestore(&phba->hbalock, iflag);
2470 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
2471 spin_lock_irqsave(&phba->hbalock, iflag);
2474 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2475 char adaptermsg[LPFC_MAX_ADPTMSG];
2476 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2477 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2479 dev_warn(&((phba->pcidev)->dev),
2481 phba->brd_no, adaptermsg);
2483 /* Unknown IOCB command */
2484 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2485 "0334 Unknown IOCB command "
2486 "Data: x%x, x%x x%x x%x x%x\n",
2487 type, irsp->ulpCommand,
2496 * The response IOCB has been processed. Update the ring
2497 * pointer in SLIM. If the port response put pointer has not
2498 * been updated, sync the pgp->rspPutInx and fetch the new port
2499 * response put pointer.
2501 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2503 if (pring->rspidx == portRspPut)
2504 portRspPut = le32_to_cpu(pgp->rspPutInx);
2507 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
2508 pring->stats.iocb_rsp_full++;
2509 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
2510 writel(status, phba->CAregaddr);
2511 readl(phba->CAregaddr);
2513 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
2514 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
2515 pring->stats.iocb_cmd_empty++;
2517 /* Force update of the local copy of cmdGetInx */
2518 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
2519 lpfc_sli_resume_iocb(phba, pring);
2521 if ((pring->lpfc_sli_cmd_available))
2522 (pring->lpfc_sli_cmd_available) (phba, pring);
2526 phba->fcp_ring_in_use = 0;
2527 spin_unlock_irqrestore(&phba->hbalock, iflag);
2532 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
2533 * @phba: Pointer to HBA context object.
2534 * @pring: Pointer to driver SLI ring object.
2535 * @rspiocbp: Pointer to driver response IOCB object.
2537 * This function is called from the worker thread when there is a slow-path
2538 * response IOCB to process. This function chains all the response iocbs until
2539 * seeing the iocb with the LE bit set. The function will call
2540 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
2541 * completion of a command iocb. The function will call the
2542 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
2543 * The function frees the resources or calls the completion handler if this
2544 * iocb is an abort completion. The function returns NULL when the response
2545 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
2546 * this function shall chain the iocb on to the iocb_continueq and return the
2547 * response iocb passed in.
2549 static struct lpfc_iocbq *
2550 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2551 struct lpfc_iocbq *rspiocbp)
2553 struct lpfc_iocbq *saveq;
2554 struct lpfc_iocbq *cmdiocbp;
2555 struct lpfc_iocbq *next_iocb;
2556 IOCB_t *irsp = NULL;
2557 uint32_t free_saveq;
2558 uint8_t iocb_cmd_type;
2559 lpfc_iocb_type type;
2560 unsigned long iflag;
2563 spin_lock_irqsave(&phba->hbalock, iflag);
2564 /* First add the response iocb to the countinueq list */
2565 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
2566 pring->iocb_continueq_cnt++;
2568 /* Now, determine whetehr the list is completed for processing */
2569 irsp = &rspiocbp->iocb;
2572 * By default, the driver expects to free all resources
2573 * associated with this iocb completion.
2576 saveq = list_get_first(&pring->iocb_continueq,
2577 struct lpfc_iocbq, list);
2578 irsp = &(saveq->iocb);
2579 list_del_init(&pring->iocb_continueq);
2580 pring->iocb_continueq_cnt = 0;
2582 pring->stats.iocb_rsp++;
2585 * If resource errors reported from HBA, reduce
2586 * queuedepths of the SCSI device.
2588 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2589 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2590 spin_unlock_irqrestore(&phba->hbalock, iflag);
2591 phba->lpfc_rampdown_queue_depth(phba);
2592 spin_lock_irqsave(&phba->hbalock, iflag);
2595 if (irsp->ulpStatus) {
2596 /* Rsp ring <ringno> error: IOCB */
2597 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2598 "0328 Rsp Ring %d error: "
2603 "x%x x%x x%x x%x\n",
2605 irsp->un.ulpWord[0],
2606 irsp->un.ulpWord[1],
2607 irsp->un.ulpWord[2],
2608 irsp->un.ulpWord[3],
2609 irsp->un.ulpWord[4],
2610 irsp->un.ulpWord[5],
2611 *(((uint32_t *) irsp) + 6),
2612 *(((uint32_t *) irsp) + 7),
2613 *(((uint32_t *) irsp) + 8),
2614 *(((uint32_t *) irsp) + 9),
2615 *(((uint32_t *) irsp) + 10),
2616 *(((uint32_t *) irsp) + 11),
2617 *(((uint32_t *) irsp) + 12),
2618 *(((uint32_t *) irsp) + 13),
2619 *(((uint32_t *) irsp) + 14),
2620 *(((uint32_t *) irsp) + 15));
2624 * Fetch the IOCB command type and call the correct completion
2625 * routine. Solicited and Unsolicited IOCBs on the ELS ring
2626 * get freed back to the lpfc_iocb_list by the discovery
2629 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
2630 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
2633 spin_unlock_irqrestore(&phba->hbalock, iflag);
2634 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
2635 spin_lock_irqsave(&phba->hbalock, iflag);
2638 case LPFC_UNSOL_IOCB:
2639 spin_unlock_irqrestore(&phba->hbalock, iflag);
2640 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
2641 spin_lock_irqsave(&phba->hbalock, iflag);
2646 case LPFC_ABORT_IOCB:
2648 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
2649 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
2652 /* Call the specified completion routine */
2653 if (cmdiocbp->iocb_cmpl) {
2654 spin_unlock_irqrestore(&phba->hbalock,
2656 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
2658 spin_lock_irqsave(&phba->hbalock,
2661 __lpfc_sli_release_iocbq(phba,
2666 case LPFC_UNKNOWN_IOCB:
2667 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2668 char adaptermsg[LPFC_MAX_ADPTMSG];
2669 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2670 memcpy(&adaptermsg[0], (uint8_t *)irsp,
2672 dev_warn(&((phba->pcidev)->dev),
2674 phba->brd_no, adaptermsg);
2676 /* Unknown IOCB command */
2677 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2678 "0335 Unknown IOCB "
2679 "command Data: x%x "
2690 list_for_each_entry_safe(rspiocbp, next_iocb,
2691 &saveq->list, list) {
2692 list_del(&rspiocbp->list);
2693 __lpfc_sli_release_iocbq(phba, rspiocbp);
2695 __lpfc_sli_release_iocbq(phba, saveq);
2699 spin_unlock_irqrestore(&phba->hbalock, iflag);
2704 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
2705 * @phba: Pointer to HBA context object.
2706 * @pring: Pointer to driver SLI ring object.
2707 * @mask: Host attention register mask for this ring.
2709 * This routine wraps the actual slow_ring event process routine from the
2710 * API jump table function pointer from the lpfc_hba struct.
2713 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2714 struct lpfc_sli_ring *pring, uint32_t mask)
2716 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
2720 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
2721 * @phba: Pointer to HBA context object.
2722 * @pring: Pointer to driver SLI ring object.
2723 * @mask: Host attention register mask for this ring.
2725 * This function is called from the worker thread when there is a ring event
2726 * for non-fcp rings. The caller does not hold any lock. The function will
2727 * remove each response iocb in the response ring and calls the handle
2728 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
2731 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
2732 struct lpfc_sli_ring *pring, uint32_t mask)
2734 struct lpfc_pgp *pgp;
2736 IOCB_t *irsp = NULL;
2737 struct lpfc_iocbq *rspiocbp = NULL;
2738 uint32_t portRspPut, portRspMax;
2739 unsigned long iflag;
2742 pgp = &phba->port_gp[pring->ringno];
2743 spin_lock_irqsave(&phba->hbalock, iflag);
2744 pring->stats.iocb_event++;
2747 * The next available response entry should never exceed the maximum
2748 * entries. If it does, treat it as an adapter hardware error.
2750 portRspMax = pring->numRiocb;
2751 portRspPut = le32_to_cpu(pgp->rspPutInx);
2752 if (portRspPut >= portRspMax) {
2754 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2755 * rsp ring <portRspMax>
2757 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2758 "0303 Ring %d handler: portRspPut %d "
2759 "is bigger than rsp ring %d\n",
2760 pring->ringno, portRspPut, portRspMax);
2762 phba->link_state = LPFC_HBA_ERROR;
2763 spin_unlock_irqrestore(&phba->hbalock, iflag);
2765 phba->work_hs = HS_FFER3;
2766 lpfc_handle_eratt(phba);
2772 while (pring->rspidx != portRspPut) {
2774 * Build a completion list and call the appropriate handler.
2775 * The process is to get the next available response iocb, get
2776 * a free iocb from the list, copy the response data into the
2777 * free iocb, insert to the continuation list, and update the
2778 * next response index to slim. This process makes response
2779 * iocb's in the ring available to DMA as fast as possible but
2780 * pays a penalty for a copy operation. Since the iocb is
2781 * only 32 bytes, this penalty is considered small relative to
2782 * the PCI reads for register values and a slim write. When
2783 * the ulpLe field is set, the entire Command has been
2786 entry = lpfc_resp_iocb(phba, pring);
2788 phba->last_completion_time = jiffies;
2789 rspiocbp = __lpfc_sli_get_iocbq(phba);
2790 if (rspiocbp == NULL) {
2791 printk(KERN_ERR "%s: out of buffers! Failing "
2792 "completion.\n", __func__);
2796 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
2797 phba->iocb_rsp_size);
2798 irsp = &rspiocbp->iocb;
2800 if (++pring->rspidx >= portRspMax)
2803 if (pring->ringno == LPFC_ELS_RING) {
2804 lpfc_debugfs_slow_ring_trc(phba,
2805 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
2806 *(((uint32_t *) irsp) + 4),
2807 *(((uint32_t *) irsp) + 6),
2808 *(((uint32_t *) irsp) + 7));
2811 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2813 spin_unlock_irqrestore(&phba->hbalock, iflag);
2814 /* Handle the response IOCB */
2815 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
2816 spin_lock_irqsave(&phba->hbalock, iflag);
2819 * If the port response put pointer has not been updated, sync
2820 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
2821 * response put pointer.
2823 if (pring->rspidx == portRspPut) {
2824 portRspPut = le32_to_cpu(pgp->rspPutInx);
2826 } /* while (pring->rspidx != portRspPut) */
2828 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
2829 /* At least one response entry has been freed */
2830 pring->stats.iocb_rsp_full++;
2831 /* SET RxRE_RSP in Chip Att register */
2832 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
2833 writel(status, phba->CAregaddr);
2834 readl(phba->CAregaddr); /* flush */
2836 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
2837 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
2838 pring->stats.iocb_cmd_empty++;
2840 /* Force update of the local copy of cmdGetInx */
2841 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
2842 lpfc_sli_resume_iocb(phba, pring);
2844 if ((pring->lpfc_sli_cmd_available))
2845 (pring->lpfc_sli_cmd_available) (phba, pring);
2849 spin_unlock_irqrestore(&phba->hbalock, iflag);
2854 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
2855 * @phba: Pointer to HBA context object.
2856 * @pring: Pointer to driver SLI ring object.
2857 * @mask: Host attention register mask for this ring.
2859 * This function is called from the worker thread when there is a pending
2860 * ELS response iocb on the driver internal slow-path response iocb worker
2861 * queue. The caller does not hold any lock. The function will remove each
2862 * response iocb from the response worker queue and calls the handle
2863 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
2866 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
2867 struct lpfc_sli_ring *pring, uint32_t mask)
2869 struct lpfc_iocbq *irspiocbq;
2870 struct hbq_dmabuf *dmabuf;
2871 struct lpfc_cq_event *cq_event;
2872 unsigned long iflag;
2874 spin_lock_irqsave(&phba->hbalock, iflag);
2875 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
2876 spin_unlock_irqrestore(&phba->hbalock, iflag);
2877 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
2878 /* Get the response iocb from the head of work queue */
2879 spin_lock_irqsave(&phba->hbalock, iflag);
2880 list_remove_head(&phba->sli4_hba.sp_queue_event,
2881 cq_event, struct lpfc_cq_event, list);
2882 spin_unlock_irqrestore(&phba->hbalock, iflag);
2884 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
2885 case CQE_CODE_COMPL_WQE:
2886 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
2888 /* Translate ELS WCQE to response IOCBQ */
2889 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
2892 lpfc_sli_sp_handle_rspiocb(phba, pring,
2895 case CQE_CODE_RECEIVE:
2896 dmabuf = container_of(cq_event, struct hbq_dmabuf,
2898 lpfc_sli4_handle_received_buffer(phba, dmabuf);
2907 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
2908 * @phba: Pointer to HBA context object.
2909 * @pring: Pointer to driver SLI ring object.
2911 * This function aborts all iocbs in the given ring and frees all the iocb
2912 * objects in txq. This function issues an abort iocb for all the iocb commands
2913 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
2914 * the return of this function. The caller is not required to hold any locks.
2917 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2919 LIST_HEAD(completions);
2920 struct lpfc_iocbq *iocb, *next_iocb;
2922 if (pring->ringno == LPFC_ELS_RING) {
2923 lpfc_fabric_abort_hba(phba);
2926 /* Error everything on txq and txcmplq
2929 spin_lock_irq(&phba->hbalock);
2930 list_splice_init(&pring->txq, &completions);
2933 /* Next issue ABTS for everything on the txcmplq */
2934 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
2935 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
2937 spin_unlock_irq(&phba->hbalock);
2939 /* Cancel all the IOCBs from the completions list */
2940 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
2945 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
2946 * @phba: Pointer to HBA context object.
2948 * This function flushes all iocbs in the fcp ring and frees all the iocb
2949 * objects in txq and txcmplq. This function will not issue abort iocbs
2950 * for all the iocb commands in txcmplq, they will just be returned with
2951 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
2952 * slot has been permanently disabled.
2955 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
2959 struct lpfc_sli *psli = &phba->sli;
2960 struct lpfc_sli_ring *pring;
2962 /* Currently, only one fcp ring */
2963 pring = &psli->ring[psli->fcp_ring];
2965 spin_lock_irq(&phba->hbalock);
2966 /* Retrieve everything on txq */
2967 list_splice_init(&pring->txq, &txq);
2970 /* Retrieve everything on the txcmplq */
2971 list_splice_init(&pring->txcmplq, &txcmplq);
2972 pring->txcmplq_cnt = 0;
2973 spin_unlock_irq(&phba->hbalock);
2976 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
2979 /* Flush the txcmpq */
2980 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
2985 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
2986 * @phba: Pointer to HBA context object.
2987 * @mask: Bit mask to be checked.
2989 * This function reads the host status register and compares
2990 * with the provided bit mask to check if HBA completed
2991 * the restart. This function will wait in a loop for the
2992 * HBA to complete restart. If the HBA does not restart within
2993 * 15 iterations, the function will reset the HBA again. The
2994 * function returns 1 when HBA fail to restart otherwise returns
2998 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3004 /* Read the HBA Host Status Register */
3005 status = readl(phba->HSregaddr);
3008 * Check status register every 100ms for 5 retries, then every
3009 * 500ms for 5, then every 2.5 sec for 5, then reset board and
3010 * every 2.5 sec for 4.
3011 * Break our of the loop if errors occurred during init.
3013 while (((status & mask) != mask) &&
3014 !(status & HS_FFERM) &&
3026 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3027 lpfc_sli_brdrestart(phba);
3029 /* Read the HBA Host Status Register */
3030 status = readl(phba->HSregaddr);
3033 /* Check to see if any errors occurred during init */
3034 if ((status & HS_FFERM) || (i >= 20)) {
3035 phba->link_state = LPFC_HBA_ERROR;
3043 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3044 * @phba: Pointer to HBA context object.
3045 * @mask: Bit mask to be checked.
3047 * This function checks the host status register to check if HBA is
3048 * ready. This function will wait in a loop for the HBA to be ready
3049 * If the HBA is not ready , the function will will reset the HBA PCI
3050 * function again. The function returns 1 when HBA fail to be ready
3051 * otherwise returns zero.
3054 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3059 /* Read the HBA Host Status Register */
3060 status = lpfc_sli4_post_status_check(phba);
3063 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3064 lpfc_sli_brdrestart(phba);
3065 status = lpfc_sli4_post_status_check(phba);
3068 /* Check to see if any errors occurred during init */
3070 phba->link_state = LPFC_HBA_ERROR;
3073 phba->sli4_hba.intr_enable = 0;
3079 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3080 * @phba: Pointer to HBA context object.
3081 * @mask: Bit mask to be checked.
3083 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3084 * from the API jump table function pointer from the lpfc_hba struct.
3087 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3089 return phba->lpfc_sli_brdready(phba, mask);
3092 #define BARRIER_TEST_PATTERN (0xdeadbeef)
3095 * lpfc_reset_barrier - Make HBA ready for HBA reset
3096 * @phba: Pointer to HBA context object.
3098 * This function is called before resetting an HBA. This
3099 * function requests HBA to quiesce DMAs before a reset.
3101 void lpfc_reset_barrier(struct lpfc_hba *phba)
3103 uint32_t __iomem *resp_buf;
3104 uint32_t __iomem *mbox_buf;
3105 volatile uint32_t mbox;
3110 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
3111 if (hdrtype != 0x80 ||
3112 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
3113 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
3117 * Tell the other part of the chip to suspend temporarily all
3120 resp_buf = phba->MBslimaddr;
3122 /* Disable the error attention */
3123 hc_copy = readl(phba->HCregaddr);
3124 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
3125 readl(phba->HCregaddr); /* flush */
3126 phba->link_flag |= LS_IGNORE_ERATT;
3128 if (readl(phba->HAregaddr) & HA_ERATT) {
3129 /* Clear Chip error bit */
3130 writel(HA_ERATT, phba->HAregaddr);
3131 phba->pport->stopped = 1;
3135 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
3136 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
3138 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
3139 mbox_buf = phba->MBslimaddr;