1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
21 #include <linux/interrupt.h>
22 #include <linux/mempool.h>
23 #include <linux/pci.h>
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_host.h>
27 #include <scsi/scsi_transport_fc.h>
28 #include <scsi/scsi_bsg_fc.h>
29 #include <scsi/fc/fc_fs.h>
34 #include "lpfc_sli4.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_vport.h"
43 #include "lpfc_version.h"
45 struct lpfc_bsg_event {
46 struct list_head node;
50 /* Event type and waiter identifiers */
55 /* next two flags are here for the auto-delete logic */
56 unsigned long wait_time_stamp;
59 /* seen and not seen events */
60 struct list_head events_to_get;
61 struct list_head events_to_see;
63 /* job waiting for this event to finish */
64 struct fc_bsg_job *set_job;
67 struct lpfc_bsg_iocb {
68 struct lpfc_iocbq *cmdiocbq;
69 struct lpfc_iocbq *rspiocbq;
70 struct lpfc_dmabuf *bmp;
71 struct lpfc_nodelist *ndlp;
73 /* job waiting for this iocb to finish */
74 struct fc_bsg_job *set_job;
82 struct lpfc_bsg_event *evt;
83 struct lpfc_bsg_iocb iocb;
88 struct list_head node;
95 #define SLI_CT_ELX_LOOPBACK 0x10
97 enum ELX_LOOPBACK_CMD {
98 ELX_LOOPBACK_XRI_SETUP,
102 struct lpfc_dmabufext {
103 struct lpfc_dmabuf dma;
109 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
110 * @phba: Pointer to HBA context object.
111 * @cmdiocbq: Pointer to command iocb.
112 * @rspiocbq: Pointer to response iocb.
114 * This function is the completion handler for iocbs issued using
115 * lpfc_bsg_send_mgmt_cmd function. This function is called by the
116 * ring event handler function without any lock held. This function
117 * can be called from both worker thread context and interrupt
118 * context. This function also can be called from another thread which
119 * cleans up the SLI layer objects.
120 * This function copies the contents of the response iocb to the
121 * response iocb memory object provided by the caller of
122 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
123 * sleeps for the iocb completion.
126 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
127 struct lpfc_iocbq *cmdiocbq,
128 struct lpfc_iocbq *rspiocbq)
130 unsigned long iflags;
131 struct bsg_job_data *dd_data;
132 struct fc_bsg_job *job;
134 struct lpfc_dmabuf *bmp;
135 struct lpfc_nodelist *ndlp;
136 struct lpfc_bsg_iocb *iocb;
140 spin_lock_irqsave(&phba->ct_ev_lock, flags);
141 dd_data = cmdiocbq->context1;
143 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
147 iocb = &dd_data->context_un.iocb;
149 job->dd_data = NULL; /* so timeout handler does not reply */
151 spin_lock_irqsave(&phba->hbalock, iflags);
152 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
153 if (cmdiocbq->context2 && rspiocbq)
154 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
155 &rspiocbq->iocb, sizeof(IOCB_t));
156 spin_unlock_irqrestore(&phba->hbalock, iflags);
159 rspiocbq = iocb->rspiocbq;
160 rsp = &rspiocbq->iocb;
163 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
164 job->request_payload.sg_cnt, DMA_TO_DEVICE);
165 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
166 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
168 if (rsp->ulpStatus) {
169 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
170 switch (rsp->un.ulpWord[4] & 0xff) {
171 case IOERR_SEQUENCE_TIMEOUT:
174 case IOERR_INVALID_RPI:
184 job->reply->reply_payload_rcv_len =
185 rsp->un.genreq64.bdl.bdeSize;
187 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
188 lpfc_sli_release_iocbq(phba, rspiocbq);
189 lpfc_sli_release_iocbq(phba, cmdiocbq);
193 /* make error code available to userspace */
194 job->reply->result = rc;
195 /* complete the job back to userspace */
197 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
202 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
203 * @job: fc_bsg_job to handle
206 lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
208 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
209 struct lpfc_hba *phba = vport->phba;
210 struct lpfc_rport_data *rdata = job->rport->dd_data;
211 struct lpfc_nodelist *ndlp = rdata->pnode;
212 struct ulp_bde64 *bpl = NULL;
214 struct lpfc_iocbq *cmdiocbq = NULL;
215 struct lpfc_iocbq *rspiocbq = NULL;
218 struct lpfc_dmabuf *bmp = NULL;
221 struct scatterlist *sgel = NULL;
224 struct bsg_job_data *dd_data;
228 /* in case no data is transferred */
229 job->reply->reply_payload_rcv_len = 0;
231 /* allocate our bsg tracking structure */
232 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
234 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
235 "2733 Failed allocation of dd_data\n");
240 if (!lpfc_nlp_get(ndlp)) {
245 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
251 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
256 cmdiocbq = lpfc_sli_get_iocbq(phba);
262 cmd = &cmdiocbq->iocb;
263 rspiocbq = lpfc_sli_get_iocbq(phba);
269 rsp = &rspiocbq->iocb;
270 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
276 INIT_LIST_HEAD(&bmp->list);
277 bpl = (struct ulp_bde64 *) bmp->virt;
278 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
279 job->request_payload.sg_cnt, DMA_TO_DEVICE);
280 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
281 busaddr = sg_dma_address(sgel);
282 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
283 bpl->tus.f.bdeSize = sg_dma_len(sgel);
284 bpl->tus.w = cpu_to_le32(bpl->tus.w);
285 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
286 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
290 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
291 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
292 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
293 busaddr = sg_dma_address(sgel);
294 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
295 bpl->tus.f.bdeSize = sg_dma_len(sgel);
296 bpl->tus.w = cpu_to_le32(bpl->tus.w);
297 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
298 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
302 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
303 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
304 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
305 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
306 cmd->un.genreq64.bdl.bdeSize =
307 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
308 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
309 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
310 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
311 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
312 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
313 cmd->ulpBdeCount = 1;
315 cmd->ulpClass = CLASS3;
316 cmd->ulpContext = ndlp->nlp_rpi;
317 cmd->ulpOwner = OWN_CHIP;
318 cmdiocbq->vport = phba->pport;
319 cmdiocbq->context3 = bmp;
320 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
321 timeout = phba->fc_ratov * 2;
322 cmd->ulpTimeout = timeout;
324 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
325 cmdiocbq->context1 = dd_data;
326 cmdiocbq->context2 = rspiocbq;
327 dd_data->type = TYPE_IOCB;
328 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
329 dd_data->context_un.iocb.rspiocbq = rspiocbq;
330 dd_data->context_un.iocb.set_job = job;
331 dd_data->context_un.iocb.bmp = bmp;
332 dd_data->context_un.iocb.ndlp = ndlp;
334 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
335 creg_val = readl(phba->HCregaddr);
336 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
337 writel(creg_val, phba->HCregaddr);
338 readl(phba->HCregaddr); /* flush */
341 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
343 if (rc == IOCB_SUCCESS)
344 return 0; /* done for now */
346 /* iocb failed so cleanup */
347 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
348 job->request_payload.sg_cnt, DMA_TO_DEVICE);
349 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
350 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
352 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
355 lpfc_sli_release_iocbq(phba, rspiocbq);
357 lpfc_sli_release_iocbq(phba, cmdiocbq);
365 /* make error code available to userspace */
366 job->reply->result = rc;
372 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
373 * @phba: Pointer to HBA context object.
374 * @cmdiocbq: Pointer to command iocb.
375 * @rspiocbq: Pointer to response iocb.
377 * This function is the completion handler for iocbs issued using
378 * lpfc_bsg_rport_els_cmp function. This function is called by the
379 * ring event handler function without any lock held. This function
380 * can be called from both worker thread context and interrupt
381 * context. This function also can be called from other thread which
382 * cleans up the SLI layer objects.
383 * This function copy the contents of the response iocb to the
384 * response iocb memory object provided by the caller of
385 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
386 * sleeps for the iocb completion.
389 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
390 struct lpfc_iocbq *cmdiocbq,
391 struct lpfc_iocbq *rspiocbq)
393 struct bsg_job_data *dd_data;
394 struct fc_bsg_job *job;
396 struct lpfc_nodelist *ndlp;
397 struct lpfc_dmabuf *pbuflist = NULL;
398 struct fc_bsg_ctels_reply *els_reply;
403 spin_lock_irqsave(&phba->ct_ev_lock, flags);
404 dd_data = cmdiocbq->context1;
405 /* normal completion and timeout crossed paths, already done */
407 spin_unlock_irqrestore(&phba->hbalock, flags);
411 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
412 if (cmdiocbq->context2 && rspiocbq)
413 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
414 &rspiocbq->iocb, sizeof(IOCB_t));
416 job = dd_data->context_un.iocb.set_job;
417 cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
418 rspiocbq = dd_data->context_un.iocb.rspiocbq;
419 rsp = &rspiocbq->iocb;
420 ndlp = dd_data->context_un.iocb.ndlp;
422 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
423 job->request_payload.sg_cnt, DMA_TO_DEVICE);
424 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
425 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
427 if (job->reply->result == -EAGAIN)
429 else if (rsp->ulpStatus == IOSTAT_SUCCESS)
430 job->reply->reply_payload_rcv_len =
431 rsp->un.elsreq64.bdl.bdeSize;
432 else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
433 job->reply->reply_payload_rcv_len =
434 sizeof(struct fc_bsg_ctels_reply);
435 /* LS_RJT data returned in word 4 */
436 rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
437 els_reply = &job->reply->reply_data.ctels_reply;
438 els_reply->status = FC_CTELS_STATUS_REJECT;
439 els_reply->rjt_data.action = rjt_data[3];
440 els_reply->rjt_data.reason_code = rjt_data[2];
441 els_reply->rjt_data.reason_explanation = rjt_data[1];
442 els_reply->rjt_data.vendor_unique = rjt_data[0];
446 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
447 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
448 lpfc_sli_release_iocbq(phba, rspiocbq);
449 lpfc_sli_release_iocbq(phba, cmdiocbq);
452 /* make error code available to userspace */
453 job->reply->result = rc;
455 /* complete the job back to userspace */
457 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
462 * lpfc_bsg_rport_els - send an ELS command from a bsg request
463 * @job: fc_bsg_job to handle
466 lpfc_bsg_rport_els(struct fc_bsg_job *job)
468 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
469 struct lpfc_hba *phba = vport->phba;
470 struct lpfc_rport_data *rdata = job->rport->dd_data;
471 struct lpfc_nodelist *ndlp = rdata->pnode;
475 struct lpfc_iocbq *rspiocbq;
476 struct lpfc_iocbq *cmdiocbq;
479 struct lpfc_dmabuf *pcmd;
480 struct lpfc_dmabuf *prsp;
481 struct lpfc_dmabuf *pbuflist = NULL;
482 struct ulp_bde64 *bpl;
485 struct scatterlist *sgel = NULL;
488 struct bsg_job_data *dd_data;
492 /* in case no data is transferred */
493 job->reply->reply_payload_rcv_len = 0;
495 /* allocate our bsg tracking structure */
496 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
498 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
499 "2735 Failed allocation of dd_data\n");
504 if (!lpfc_nlp_get(ndlp)) {
509 elscmd = job->request->rqst_data.r_els.els_code;
510 cmdsize = job->request_payload.payload_len;
511 rspsize = job->reply_payload.payload_len;
512 rspiocbq = lpfc_sli_get_iocbq(phba);
519 rsp = &rspiocbq->iocb;
522 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
523 ndlp->nlp_DID, elscmd);
529 /* prep els iocb set context1 to the ndlp, context2 to the command
530 * dmabuf, context3 holds the data dmabuf
532 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
533 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
534 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
536 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
538 cmdiocbq->context2 = NULL;
540 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
541 bpl = (struct ulp_bde64 *) pbuflist->virt;
543 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
544 job->request_payload.sg_cnt, DMA_TO_DEVICE);
545 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
546 busaddr = sg_dma_address(sgel);
547 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
548 bpl->tus.f.bdeSize = sg_dma_len(sgel);
549 bpl->tus.w = cpu_to_le32(bpl->tus.w);
550 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
551 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
555 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
556 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
557 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
558 busaddr = sg_dma_address(sgel);
559 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
560 bpl->tus.f.bdeSize = sg_dma_len(sgel);
561 bpl->tus.w = cpu_to_le32(bpl->tus.w);
562 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
563 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
566 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
567 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
568 cmdiocbq->iocb.ulpContext = rpi;
569 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
570 cmdiocbq->context1 = NULL;
571 cmdiocbq->context2 = NULL;
573 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
574 cmdiocbq->context1 = dd_data;
575 cmdiocbq->context2 = rspiocbq;
576 dd_data->type = TYPE_IOCB;
577 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
578 dd_data->context_un.iocb.rspiocbq = rspiocbq;
579 dd_data->context_un.iocb.set_job = job;
580 dd_data->context_un.iocb.bmp = NULL;;
581 dd_data->context_un.iocb.ndlp = ndlp;
583 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
584 creg_val = readl(phba->HCregaddr);
585 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
586 writel(creg_val, phba->HCregaddr);
587 readl(phba->HCregaddr); /* flush */
589 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
591 if (rc == IOCB_SUCCESS)
592 return 0; /* done for now */
594 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
595 job->request_payload.sg_cnt, DMA_TO_DEVICE);
596 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
597 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
599 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
601 lpfc_sli_release_iocbq(phba, cmdiocbq);
604 lpfc_sli_release_iocbq(phba, rspiocbq);
610 /* make error code available to userspace */
611 job->reply->result = rc;
617 lpfc_bsg_event_free(struct kref *kref)
619 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
621 struct event_data *ed;
623 list_del(&evt->node);
625 while (!list_empty(&evt->events_to_get)) {
626 ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
632 while (!list_empty(&evt->events_to_see)) {
633 ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
643 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
645 kref_get(&evt->kref);
649 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
651 kref_put(&evt->kref, lpfc_bsg_event_free);
654 static struct lpfc_bsg_event *
655 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
657 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
662 INIT_LIST_HEAD(&evt->events_to_get);
663 INIT_LIST_HEAD(&evt->events_to_see);
664 evt->type_mask = ev_mask;
665 evt->req_id = ev_req_id;
666 evt->reg_id = ev_reg_id;
667 evt->wait_time_stamp = jiffies;
668 init_waitqueue_head(&evt->wq);
669 kref_init(&evt->kref);
674 dfc_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
676 struct lpfc_dmabufext *mlast;
677 struct pci_dev *pcidev;
678 struct list_head head, *curr, *next;
680 if ((!mlist) || (!lpfc_is_link_up(phba) &&
681 (phba->link_flag & LS_LOOPBACK_MODE))) {
685 pcidev = phba->pcidev;
686 list_add_tail(&head, &mlist->dma.list);
688 list_for_each_safe(curr, next, &head) {
689 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
691 dma_free_coherent(&pcidev->dev,
701 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
706 * This function is called when an unsolicited CT command is received. It
707 * forwards the event to any processes registered to receive CT events.
710 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
711 struct lpfc_iocbq *piocbq)
713 uint32_t evt_req_id = 0;
716 struct lpfc_dmabuf *dmabuf = NULL;
717 struct lpfc_bsg_event *evt;
718 struct event_data *evt_dat = NULL;
719 struct lpfc_iocbq *iocbq;
721 struct list_head head;
722 struct ulp_bde64 *bde;
725 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
726 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
727 struct lpfc_hbq_entry *hbqe;
728 struct lpfc_sli_ct_request *ct_req;
729 struct fc_bsg_job *job = NULL;
733 INIT_LIST_HEAD(&head);
734 list_add_tail(&head, &piocbq->list);
736 if (piocbq->iocb.ulpBdeCount == 0 ||
737 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
738 goto error_ct_unsol_exit;
740 if (phba->link_state == LPFC_HBA_ERROR ||
741 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
742 goto error_ct_unsol_exit;
744 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
747 dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
748 piocbq->iocb.un.cont64[0].addrLow);
749 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
752 goto error_ct_unsol_exit;
753 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
754 evt_req_id = ct_req->FsType;
755 cmd = ct_req->CommandResponse.bits.CmdRsp;
756 len = ct_req->CommandResponse.bits.Size;
757 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
758 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
760 spin_lock_irqsave(&phba->ct_ev_lock, flags);
761 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
762 if (!(evt->type_mask & FC_REG_CT_EVENT) ||
763 evt->req_id != evt_req_id)
766 lpfc_bsg_event_ref(evt);
767 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
768 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
769 if (evt_dat == NULL) {
770 spin_lock_irqsave(&phba->ct_ev_lock, flags);
771 lpfc_bsg_event_unref(evt);
772 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
773 "2614 Memory allocation failed for "
778 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
779 /* take accumulated byte count from the last iocbq */
780 iocbq = list_entry(head.prev, typeof(*iocbq), list);
781 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
783 list_for_each_entry(iocbq, &head, list) {
784 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
786 iocbq->iocb.un.cont64[i].tus.f.bdeSize;
790 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
791 if (evt_dat->data == NULL) {
792 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
793 "2615 Memory allocation failed for "
794 "CT event data, size %d\n",
797 spin_lock_irqsave(&phba->ct_ev_lock, flags);
798 lpfc_bsg_event_unref(evt);
799 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
800 goto error_ct_unsol_exit;
803 list_for_each_entry(iocbq, &head, list) {
805 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
806 bdeBuf1 = iocbq->context2;
807 bdeBuf2 = iocbq->context3;
809 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
810 if (phba->sli3_options &
811 LPFC_SLI3_HBQ_ENABLED) {
813 hbqe = (struct lpfc_hbq_entry *)
814 &iocbq->iocb.un.ulpWord[0];
815 size = hbqe->bde.tus.f.bdeSize;
818 hbqe = (struct lpfc_hbq_entry *)
821 size = hbqe->bde.tus.f.bdeSize;
824 if ((offset + size) > evt_dat->len)
825 size = evt_dat->len - offset;
827 size = iocbq->iocb.un.cont64[i].
829 bde = &iocbq->iocb.un.cont64[i];
830 dma_addr = getPaddr(bde->addrHigh,
832 dmabuf = lpfc_sli_ringpostbuf_get(phba,
836 lpfc_printf_log(phba, KERN_ERR,
837 LOG_LIBDFC, "2616 No dmabuf "
838 "found for iocbq 0x%p\n",
840 kfree(evt_dat->data);
842 spin_lock_irqsave(&phba->ct_ev_lock,
844 lpfc_bsg_event_unref(evt);
845 spin_unlock_irqrestore(
846 &phba->ct_ev_lock, flags);
847 goto error_ct_unsol_exit;
849 memcpy((char *)(evt_dat->data) + offset,
852 if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
853 !(phba->sli3_options &
854 LPFC_SLI3_HBQ_ENABLED)) {
855 lpfc_sli_ringpostbuf_put(phba, pring,
859 case ELX_LOOPBACK_DATA:
860 dfc_cmd_data_free(phba,
861 (struct lpfc_dmabufext *)
864 case ELX_LOOPBACK_XRI_SETUP:
865 if ((phba->sli_rev ==
867 (phba->sli3_options &
868 LPFC_SLI3_HBQ_ENABLED
870 lpfc_in_buf_free(phba,
873 lpfc_post_buffer(phba,
879 if (!(phba->sli3_options &
880 LPFC_SLI3_HBQ_ENABLED))
881 lpfc_post_buffer(phba,
890 spin_lock_irqsave(&phba->ct_ev_lock, flags);
891 if (phba->sli_rev == LPFC_SLI_REV4) {
892 evt_dat->immed_dat = phba->ctx_idx;
893 phba->ctx_idx = (phba->ctx_idx + 1) % 64;
894 phba->ct_ctx[evt_dat->immed_dat].oxid =
895 piocbq->iocb.ulpContext;
896 phba->ct_ctx[evt_dat->immed_dat].SID =
897 piocbq->iocb.un.rcvels.remoteID;
899 evt_dat->immed_dat = piocbq->iocb.ulpContext;
901 evt_dat->type = FC_REG_CT_EVENT;
902 list_add(&evt_dat->node, &evt->events_to_see);
903 if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
904 wake_up_interruptible(&evt->wq);
905 lpfc_bsg_event_unref(evt);
909 list_move(evt->events_to_see.prev, &evt->events_to_get);
910 lpfc_bsg_event_unref(evt);
915 job->reply->reply_payload_rcv_len = size;
916 /* make error code available to userspace */
917 job->reply->result = 0;
919 /* complete the job back to userspace */
920 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
922 spin_lock_irqsave(&phba->ct_ev_lock, flags);
925 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
928 if (!list_empty(&head))
930 if (evt_req_id == SLI_CT_ELX_LOOPBACK)
936 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
937 * @job: SET_EVENT fc_bsg_job
940 lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
942 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
943 struct lpfc_hba *phba = vport->phba;
944 struct set_ct_event *event_req;
945 struct lpfc_bsg_event *evt;
947 struct bsg_job_data *dd_data = NULL;
951 if (job->request_len <
952 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
953 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
954 "2612 Received SET_CT_EVENT below minimum "
960 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
961 if (dd_data == NULL) {
962 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
963 "2734 Failed allocation of dd_data\n");
968 event_req = (struct set_ct_event *)
969 job->request->rqst_data.h_vendor.vendor_cmd;
970 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
972 spin_lock_irqsave(&phba->ct_ev_lock, flags);
973 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
974 if (evt->reg_id == event_req->ev_reg_id) {
975 lpfc_bsg_event_ref(evt);
976 evt->wait_time_stamp = jiffies;
980 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
982 if (&evt->node == &phba->ct_ev_waiters) {
983 /* no event waiting struct yet - first call */
984 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
985 event_req->ev_req_id);
987 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
988 "2617 Failed allocation of event "
994 spin_lock_irqsave(&phba->ct_ev_lock, flags);
995 list_add(&evt->node, &phba->ct_ev_waiters);
996 lpfc_bsg_event_ref(evt);
997 evt->wait_time_stamp = jiffies;
998 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1001 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1003 dd_data->type = TYPE_EVT;
1004 dd_data->context_un.evt = evt;
1005 evt->set_job = job; /* for unsolicited command */
1006 job->dd_data = dd_data; /* for fc transport timeout callback*/
1007 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1008 return 0; /* call job done later */
1011 if (dd_data != NULL)
1014 job->dd_data = NULL;
1019 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1020 * @job: GET_EVENT fc_bsg_job
1023 lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
1025 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1026 struct lpfc_hba *phba = vport->phba;
1027 struct get_ct_event *event_req;
1028 struct get_ct_event_reply *event_reply;
1029 struct lpfc_bsg_event *evt;
1030 struct event_data *evt_dat = NULL;
1031 unsigned long flags;
1034 if (job->request_len <
1035 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
1036 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1037 "2613 Received GET_CT_EVENT request below "
1043 event_req = (struct get_ct_event *)
1044 job->request->rqst_data.h_vendor.vendor_cmd;
1046 event_reply = (struct get_ct_event_reply *)
1047 job->reply->reply_data.vendor_reply.vendor_rsp;
1048 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1049 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1050 if (evt->reg_id == event_req->ev_reg_id) {
1051 if (list_empty(&evt->events_to_get))
1053 lpfc_bsg_event_ref(evt);
1054 evt->wait_time_stamp = jiffies;
1055 evt_dat = list_entry(evt->events_to_get.prev,
1056 struct event_data, node);
1057 list_del(&evt_dat->node);
1061 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1063 /* The app may continue to ask for event data until it gets
1064 * an error indicating that there isn't anymore
1066 if (evt_dat == NULL) {
1067 job->reply->reply_payload_rcv_len = 0;
1072 if (evt_dat->len > job->request_payload.payload_len) {
1073 evt_dat->len = job->request_payload.payload_len;
1074 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1075 "2618 Truncated event data at %d "
1077 job->request_payload.payload_len);
1080 event_reply->type = evt_dat->type;
1081 event_reply->immed_data = evt_dat->immed_dat;
1082 if (evt_dat->len > 0)
1083 job->reply->reply_payload_rcv_len =
1084 sg_copy_from_buffer(job->request_payload.sg_list,
1085 job->request_payload.sg_cnt,
1086 evt_dat->data, evt_dat->len);
1088 job->reply->reply_payload_rcv_len = 0;
1091 kfree(evt_dat->data);
1095 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1096 lpfc_bsg_event_unref(evt);
1097 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1098 job->dd_data = NULL;
1099 job->reply->result = 0;
1104 job->dd_data = NULL;
1105 job->reply->result = rc;
1110 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
1111 * @job: fc_bsg_job to handle
1114 lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
1116 int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
1120 case LPFC_BSG_VENDOR_SET_CT_EVENT:
1121 rc = lpfc_bsg_hba_set_event(job);
1124 case LPFC_BSG_VENDOR_GET_CT_EVENT:
1125 rc = lpfc_bsg_hba_get_event(job);
1129 job->reply->reply_payload_rcv_len = 0;
1130 /* make error code available to userspace */
1131 job->reply->result = rc;
1139 * lpfc_bsg_request - handle a bsg request from the FC transport
1140 * @job: fc_bsg_job to handle
1143 lpfc_bsg_request(struct fc_bsg_job *job)
1148 msgcode = job->request->msgcode;
1150 case FC_BSG_HST_VENDOR:
1151 rc = lpfc_bsg_hst_vendor(job);
1153 case FC_BSG_RPT_ELS:
1154 rc = lpfc_bsg_rport_els(job);
1157 rc = lpfc_bsg_send_mgmt_cmd(job);
1161 job->reply->reply_payload_rcv_len = 0;
1162 /* make error code available to userspace */
1163 job->reply->result = rc;
1171 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
1172 * @job: fc_bsg_job that has timed out
1174 * This function just aborts the job's IOCB. The aborted IOCB will return to
1175 * the waiting function which will handle passing the error back to userspace
1178 lpfc_bsg_timeout(struct fc_bsg_job *job)
1180 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1181 struct lpfc_hba *phba = vport->phba;
1182 struct lpfc_iocbq *cmdiocb;
1183 struct lpfc_bsg_event *evt;
1184 struct lpfc_bsg_iocb *iocb;
1185 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
1186 struct bsg_job_data *dd_data;
1187 unsigned long flags;
1189 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1190 dd_data = (struct bsg_job_data *)job->dd_data;
1191 /* timeout and completion crossed paths if no dd_data */
1193 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1197 switch (dd_data->type) {
1199 iocb = &dd_data->context_un.iocb;
1200 cmdiocb = iocb->cmdiocbq;
1201 /* hint to completion handler that the job timed out */
1202 job->reply->result = -EAGAIN;
1203 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1204 /* this will call our completion handler */
1205 spin_lock_irq(&phba->hbalock);
1206 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
1207 spin_unlock_irq(&phba->hbalock);
1210 evt = dd_data->context_un.evt;
1211 /* this event has no job anymore */
1212 evt->set_job = NULL;
1213 job->dd_data = NULL;
1214 job->reply->reply_payload_rcv_len = 0;
1215 /* Return -EAGAIN which is our way of signallying the
1218 job->reply->result = -EAGAIN;
1219 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1223 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1227 /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
1228 * otherwise an error message will be displayed on the console
1229 * so always return success (zero)