[SCSI] lpfc 8.3.8: (BSG3) Modify BSG commands to operate asynchronously
[linux-2.6.git] / drivers / scsi / lpfc / lpfc_bsg.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2009-2010 Emulex.  All rights reserved.                *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  *                                                                 *
8  * This program is free software; you can redistribute it and/or   *
9  * modify it under the terms of version 2 of the GNU General       *
10  * Public License as published by the Free Software Foundation.    *
11  * This program is distributed in the hope that it will be useful. *
12  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
13  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
14  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
15  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
17  * more details, a copy of which can be found in the file COPYING  *
18  * included with this package.                                     *
19  *******************************************************************/
20
21 #include <linux/interrupt.h>
22 #include <linux/mempool.h>
23 #include <linux/pci.h>
24
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_host.h>
27 #include <scsi/scsi_transport_fc.h>
28 #include <scsi/scsi_bsg_fc.h>
29 #include <scsi/fc/fc_fs.h>
30
31 #include "lpfc_hw4.h"
32 #include "lpfc_hw.h"
33 #include "lpfc_sli.h"
34 #include "lpfc_sli4.h"
35 #include "lpfc_nl.h"
36 #include "lpfc_bsg.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h"
39 #include "lpfc.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_vport.h"
43 #include "lpfc_version.h"
44
45 struct lpfc_bsg_event {
46         struct list_head node;
47         struct kref kref;
48         wait_queue_head_t wq;
49
50         /* Event type and waiter identifiers */
51         uint32_t type_mask;
52         uint32_t req_id;
53         uint32_t reg_id;
54
55         /* next two flags are here for the auto-delete logic */
56         unsigned long wait_time_stamp;
57         int waiting;
58
59         /* seen and not seen events */
60         struct list_head events_to_get;
61         struct list_head events_to_see;
62
63         /* job waiting for this event to finish */
64         struct fc_bsg_job *set_job;
65 };
66
67 struct lpfc_bsg_iocb {
68         struct lpfc_iocbq *cmdiocbq;
69         struct lpfc_iocbq *rspiocbq;
70         struct lpfc_dmabuf *bmp;
71         struct lpfc_nodelist *ndlp;
72
73         /* job waiting for this iocb to finish */
74         struct fc_bsg_job *set_job;
75 };
76
77 #define TYPE_EVT        1
78 #define TYPE_IOCB       2
79 struct bsg_job_data {
80         uint32_t type;
81         union {
82                 struct lpfc_bsg_event *evt;
83                 struct lpfc_bsg_iocb iocb;
84         } context_un;
85 };
86
87 struct event_data {
88         struct list_head node;
89         uint32_t type;
90         uint32_t immed_dat;
91         void *data;
92         uint32_t len;
93 };
94
95 #define SLI_CT_ELX_LOOPBACK 0x10
96
97 enum ELX_LOOPBACK_CMD {
98         ELX_LOOPBACK_XRI_SETUP,
99         ELX_LOOPBACK_DATA,
100 };
101
102 struct lpfc_dmabufext {
103         struct lpfc_dmabuf dma;
104         uint32_t size;
105         uint32_t flag;
106 };
107
108 /**
109  * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
110  * @phba: Pointer to HBA context object.
111  * @cmdiocbq: Pointer to command iocb.
112  * @rspiocbq: Pointer to response iocb.
113  *
114  * This function is the completion handler for iocbs issued using
115  * lpfc_bsg_send_mgmt_cmd function. This function is called by the
116  * ring event handler function without any lock held. This function
117  * can be called from both worker thread context and interrupt
118  * context. This function also can be called from another thread which
119  * cleans up the SLI layer objects.
120  * This function copies the contents of the response iocb to the
121  * response iocb memory object provided by the caller of
122  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
123  * sleeps for the iocb completion.
124  **/
125 static void
126 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
127                         struct lpfc_iocbq *cmdiocbq,
128                         struct lpfc_iocbq *rspiocbq)
129 {
130         unsigned long iflags;
131         struct bsg_job_data *dd_data;
132         struct fc_bsg_job *job;
133         IOCB_t *rsp;
134         struct lpfc_dmabuf *bmp;
135         struct lpfc_nodelist *ndlp;
136         struct lpfc_bsg_iocb *iocb;
137         unsigned long flags;
138         int rc = 0;
139
140         spin_lock_irqsave(&phba->ct_ev_lock, flags);
141         dd_data = cmdiocbq->context1;
142         if (!dd_data) {
143                 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
144                 return;
145         }
146
147         iocb = &dd_data->context_un.iocb;
148         job = iocb->set_job;
149         job->dd_data = NULL; /* so timeout handler does not reply */
150
151         spin_lock_irqsave(&phba->hbalock, iflags);
152         cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
153         if (cmdiocbq->context2 && rspiocbq)
154                 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
155                        &rspiocbq->iocb, sizeof(IOCB_t));
156         spin_unlock_irqrestore(&phba->hbalock, iflags);
157
158         bmp = iocb->bmp;
159         rspiocbq = iocb->rspiocbq;
160         rsp = &rspiocbq->iocb;
161         ndlp = iocb->ndlp;
162
163         pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
164                      job->request_payload.sg_cnt, DMA_TO_DEVICE);
165         pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
166                      job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
167
168         if (rsp->ulpStatus) {
169                 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
170                         switch (rsp->un.ulpWord[4] & 0xff) {
171                         case IOERR_SEQUENCE_TIMEOUT:
172                                 rc = -ETIMEDOUT;
173                                 break;
174                         case IOERR_INVALID_RPI:
175                                 rc = -EFAULT;
176                                 break;
177                         default:
178                                 rc = -EACCES;
179                                 break;
180                         }
181                 } else
182                         rc = -EACCES;
183         } else
184                 job->reply->reply_payload_rcv_len =
185                         rsp->un.genreq64.bdl.bdeSize;
186
187         lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
188         lpfc_sli_release_iocbq(phba, rspiocbq);
189         lpfc_sli_release_iocbq(phba, cmdiocbq);
190         lpfc_nlp_put(ndlp);
191         kfree(bmp);
192         kfree(dd_data);
193         /* make error code available to userspace */
194         job->reply->result = rc;
195         /* complete the job back to userspace */
196         job->job_done(job);
197         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
198         return;
199 }
200
201 /**
202  * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
203  * @job: fc_bsg_job to handle
204  */
205 static int
206 lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
207 {
208         struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
209         struct lpfc_hba *phba = vport->phba;
210         struct lpfc_rport_data *rdata = job->rport->dd_data;
211         struct lpfc_nodelist *ndlp = rdata->pnode;
212         struct ulp_bde64 *bpl = NULL;
213         uint32_t timeout;
214         struct lpfc_iocbq *cmdiocbq = NULL;
215         struct lpfc_iocbq *rspiocbq = NULL;
216         IOCB_t *cmd;
217         IOCB_t *rsp;
218         struct lpfc_dmabuf *bmp = NULL;
219         int request_nseg;
220         int reply_nseg;
221         struct scatterlist *sgel = NULL;
222         int numbde;
223         dma_addr_t busaddr;
224         struct bsg_job_data *dd_data;
225         uint32_t creg_val;
226         int rc = 0;
227
228         /* in case no data is transferred */
229         job->reply->reply_payload_rcv_len = 0;
230
231         /* allocate our bsg tracking structure */
232         dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
233         if (!dd_data) {
234                 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
235                                 "2733 Failed allocation of dd_data\n");
236                 rc = -ENOMEM;
237                 goto no_dd_data;
238         }
239
240         if (!lpfc_nlp_get(ndlp)) {
241                 rc = -ENODEV;
242                 goto no_ndlp;
243         }
244
245         bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
246         if (!bmp) {
247                 rc = -ENOMEM;
248                 goto free_ndlp;
249         }
250
251         if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
252                 rc = -ENODEV;
253                 goto free_bmp;
254         }
255
256         cmdiocbq = lpfc_sli_get_iocbq(phba);
257         if (!cmdiocbq) {
258                 rc = -ENOMEM;
259                 goto free_bmp;
260         }
261
262         cmd = &cmdiocbq->iocb;
263         rspiocbq = lpfc_sli_get_iocbq(phba);
264         if (!rspiocbq) {
265                 rc = -ENOMEM;
266                 goto free_cmdiocbq;
267         }
268
269         rsp = &rspiocbq->iocb;
270         bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
271         if (!bmp->virt) {
272                 rc = -ENOMEM;
273                 goto free_rspiocbq;
274         }
275
276         INIT_LIST_HEAD(&bmp->list);
277         bpl = (struct ulp_bde64 *) bmp->virt;
278         request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
279                                   job->request_payload.sg_cnt, DMA_TO_DEVICE);
280         for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
281                 busaddr = sg_dma_address(sgel);
282                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
283                 bpl->tus.f.bdeSize = sg_dma_len(sgel);
284                 bpl->tus.w = cpu_to_le32(bpl->tus.w);
285                 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
286                 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
287                 bpl++;
288         }
289
290         reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
291                                 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
292         for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
293                 busaddr = sg_dma_address(sgel);
294                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
295                 bpl->tus.f.bdeSize = sg_dma_len(sgel);
296                 bpl->tus.w = cpu_to_le32(bpl->tus.w);
297                 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
298                 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
299                 bpl++;
300         }
301
302         cmd->un.genreq64.bdl.ulpIoTag32 = 0;
303         cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
304         cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
305         cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
306         cmd->un.genreq64.bdl.bdeSize =
307                 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
308         cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
309         cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
310         cmd->un.genreq64.w5.hcsw.Dfctl = 0;
311         cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
312         cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
313         cmd->ulpBdeCount = 1;
314         cmd->ulpLe = 1;
315         cmd->ulpClass = CLASS3;
316         cmd->ulpContext = ndlp->nlp_rpi;
317         cmd->ulpOwner = OWN_CHIP;
318         cmdiocbq->vport = phba->pport;
319         cmdiocbq->context3 = bmp;
320         cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
321         timeout = phba->fc_ratov * 2;
322         cmd->ulpTimeout = timeout;
323
324         cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
325         cmdiocbq->context1 = dd_data;
326         cmdiocbq->context2 = rspiocbq;
327         dd_data->type = TYPE_IOCB;
328         dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
329         dd_data->context_un.iocb.rspiocbq = rspiocbq;
330         dd_data->context_un.iocb.set_job = job;
331         dd_data->context_un.iocb.bmp = bmp;
332         dd_data->context_un.iocb.ndlp = ndlp;
333
334         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
335                 creg_val = readl(phba->HCregaddr);
336                 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
337                 writel(creg_val, phba->HCregaddr);
338                 readl(phba->HCregaddr); /* flush */
339         }
340
341         rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
342
343         if (rc == IOCB_SUCCESS)
344                 return 0; /* done for now */
345
346         /* iocb failed so cleanup */
347         pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
348                      job->request_payload.sg_cnt, DMA_TO_DEVICE);
349         pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
350                      job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
351
352         lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
353
354 free_rspiocbq:
355         lpfc_sli_release_iocbq(phba, rspiocbq);
356 free_cmdiocbq:
357         lpfc_sli_release_iocbq(phba, cmdiocbq);
358 free_bmp:
359         kfree(bmp);
360 free_ndlp:
361         lpfc_nlp_put(ndlp);
362 no_ndlp:
363         kfree(dd_data);
364 no_dd_data:
365         /* make error code available to userspace */
366         job->reply->result = rc;
367         job->dd_data = NULL;
368         return rc;
369 }
370
371 /**
372  * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
373  * @phba: Pointer to HBA context object.
374  * @cmdiocbq: Pointer to command iocb.
375  * @rspiocbq: Pointer to response iocb.
376  *
377  * This function is the completion handler for iocbs issued using
378  * lpfc_bsg_rport_els_cmp function. This function is called by the
379  * ring event handler function without any lock held. This function
380  * can be called from both worker thread context and interrupt
381  * context. This function also can be called from other thread which
382  * cleans up the SLI layer objects.
383  * This function copy the contents of the response iocb to the
384  * response iocb memory object provided by the caller of
385  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
386  * sleeps for the iocb completion.
387  **/
388 static void
389 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
390                         struct lpfc_iocbq *cmdiocbq,
391                         struct lpfc_iocbq *rspiocbq)
392 {
393         struct bsg_job_data *dd_data;
394         struct fc_bsg_job *job;
395         IOCB_t *rsp;
396         struct lpfc_nodelist *ndlp;
397         struct lpfc_dmabuf *pbuflist = NULL;
398         struct fc_bsg_ctels_reply *els_reply;
399         uint8_t *rjt_data;
400         unsigned long flags;
401         int rc = 0;
402
403         spin_lock_irqsave(&phba->ct_ev_lock, flags);
404         dd_data = cmdiocbq->context1;
405         /* normal completion and timeout crossed paths, already done */
406         if (!dd_data) {
407                 spin_unlock_irqrestore(&phba->hbalock, flags);
408                 return;
409         }
410
411         cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
412         if (cmdiocbq->context2 && rspiocbq)
413                 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
414                        &rspiocbq->iocb, sizeof(IOCB_t));
415
416         job = dd_data->context_un.iocb.set_job;
417         cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
418         rspiocbq = dd_data->context_un.iocb.rspiocbq;
419         rsp = &rspiocbq->iocb;
420         ndlp = dd_data->context_un.iocb.ndlp;
421
422         pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
423                      job->request_payload.sg_cnt, DMA_TO_DEVICE);
424         pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
425                      job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
426
427         if (job->reply->result == -EAGAIN)
428                 rc = -EAGAIN;
429         else if (rsp->ulpStatus == IOSTAT_SUCCESS)
430                 job->reply->reply_payload_rcv_len =
431                         rsp->un.elsreq64.bdl.bdeSize;
432         else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
433                 job->reply->reply_payload_rcv_len =
434                         sizeof(struct fc_bsg_ctels_reply);
435                 /* LS_RJT data returned in word 4 */
436                 rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
437                 els_reply = &job->reply->reply_data.ctels_reply;
438                 els_reply->status = FC_CTELS_STATUS_REJECT;
439                 els_reply->rjt_data.action = rjt_data[3];
440                 els_reply->rjt_data.reason_code = rjt_data[2];
441                 els_reply->rjt_data.reason_explanation = rjt_data[1];
442                 els_reply->rjt_data.vendor_unique = rjt_data[0];
443         } else
444                 rc = -EIO;
445
446         pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
447         lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
448         lpfc_sli_release_iocbq(phba, rspiocbq);
449         lpfc_sli_release_iocbq(phba, cmdiocbq);
450         lpfc_nlp_put(ndlp);
451         kfree(dd_data);
452         /* make error code available to userspace */
453         job->reply->result = rc;
454         job->dd_data = NULL;
455         /* complete the job back to userspace */
456         job->job_done(job);
457         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
458         return;
459 }
460
461 /**
462  * lpfc_bsg_rport_els - send an ELS command from a bsg request
463  * @job: fc_bsg_job to handle
464  */
465 static int
466 lpfc_bsg_rport_els(struct fc_bsg_job *job)
467 {
468         struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
469         struct lpfc_hba *phba = vport->phba;
470         struct lpfc_rport_data *rdata = job->rport->dd_data;
471         struct lpfc_nodelist *ndlp = rdata->pnode;
472         uint32_t elscmd;
473         uint32_t cmdsize;
474         uint32_t rspsize;
475         struct lpfc_iocbq *rspiocbq;
476         struct lpfc_iocbq *cmdiocbq;
477         IOCB_t *rsp;
478         uint16_t rpi = 0;
479         struct lpfc_dmabuf *pcmd;
480         struct lpfc_dmabuf *prsp;
481         struct lpfc_dmabuf *pbuflist = NULL;
482         struct ulp_bde64 *bpl;
483         int request_nseg;
484         int reply_nseg;
485         struct scatterlist *sgel = NULL;
486         int numbde;
487         dma_addr_t busaddr;
488         struct bsg_job_data *dd_data;
489         uint32_t creg_val;
490         int rc = 0;
491
492         /* in case no data is transferred */
493         job->reply->reply_payload_rcv_len = 0;
494
495         /* allocate our bsg tracking structure */
496         dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
497         if (!dd_data) {
498                 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
499                                 "2735 Failed allocation of dd_data\n");
500                 rc = -ENOMEM;
501                 goto no_dd_data;
502         }
503
504         if (!lpfc_nlp_get(ndlp)) {
505                 rc = -ENODEV;
506                 goto free_dd_data;
507         }
508
509         elscmd = job->request->rqst_data.r_els.els_code;
510         cmdsize = job->request_payload.payload_len;
511         rspsize = job->reply_payload.payload_len;
512         rspiocbq = lpfc_sli_get_iocbq(phba);
513         if (!rspiocbq) {
514                 lpfc_nlp_put(ndlp);
515                 rc = -ENOMEM;
516                 goto free_dd_data;
517         }
518
519         rsp = &rspiocbq->iocb;
520         rpi = ndlp->nlp_rpi;
521
522         cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
523                                       ndlp->nlp_DID, elscmd);
524         if (!cmdiocbq) {
525                 rc = -EIO;
526                 goto free_rspiocbq;
527         }
528
529         /* prep els iocb set context1 to the ndlp, context2 to the command
530         * dmabuf, context3 holds the data dmabuf
531         */
532         pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
533         prsp = (struct lpfc_dmabuf *) pcmd->list.next;
534         lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
535         kfree(pcmd);
536         lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
537         kfree(prsp);
538         cmdiocbq->context2 = NULL;
539
540         pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
541         bpl = (struct ulp_bde64 *) pbuflist->virt;
542
543         request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
544                                   job->request_payload.sg_cnt, DMA_TO_DEVICE);
545         for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
546                 busaddr = sg_dma_address(sgel);
547                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
548                 bpl->tus.f.bdeSize = sg_dma_len(sgel);
549                 bpl->tus.w = cpu_to_le32(bpl->tus.w);
550                 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
551                 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
552                 bpl++;
553         }
554
555         reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
556                                 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
557         for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
558                 busaddr = sg_dma_address(sgel);
559                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
560                 bpl->tus.f.bdeSize = sg_dma_len(sgel);
561                 bpl->tus.w = cpu_to_le32(bpl->tus.w);
562                 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
563                 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
564                 bpl++;
565         }
566         cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
567                 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
568         cmdiocbq->iocb.ulpContext = rpi;
569         cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
570         cmdiocbq->context1 = NULL;
571         cmdiocbq->context2 = NULL;
572
573         cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
574         cmdiocbq->context1 = dd_data;
575         cmdiocbq->context2 = rspiocbq;
576         dd_data->type = TYPE_IOCB;
577         dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
578         dd_data->context_un.iocb.rspiocbq = rspiocbq;
579         dd_data->context_un.iocb.set_job = job;
580         dd_data->context_un.iocb.bmp = NULL;;
581         dd_data->context_un.iocb.ndlp = ndlp;
582
583         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
584                 creg_val = readl(phba->HCregaddr);
585                 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
586                 writel(creg_val, phba->HCregaddr);
587                 readl(phba->HCregaddr); /* flush */
588         }
589         rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
590         lpfc_nlp_put(ndlp);
591         if (rc == IOCB_SUCCESS)
592                 return 0; /* done for now */
593
594         pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
595                      job->request_payload.sg_cnt, DMA_TO_DEVICE);
596         pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
597                      job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
598
599         lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
600
601         lpfc_sli_release_iocbq(phba, cmdiocbq);
602
603 free_rspiocbq:
604         lpfc_sli_release_iocbq(phba, rspiocbq);
605
606 free_dd_data:
607         kfree(dd_data);
608
609 no_dd_data:
610         /* make error code available to userspace */
611         job->reply->result = rc;
612         job->dd_data = NULL;
613         return rc;
614 }
615
616 static void
617 lpfc_bsg_event_free(struct kref *kref)
618 {
619         struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
620                                                   kref);
621         struct event_data *ed;
622
623         list_del(&evt->node);
624
625         while (!list_empty(&evt->events_to_get)) {
626                 ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
627                 list_del(&ed->node);
628                 kfree(ed->data);
629                 kfree(ed);
630         }
631
632         while (!list_empty(&evt->events_to_see)) {
633                 ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
634                 list_del(&ed->node);
635                 kfree(ed->data);
636                 kfree(ed);
637         }
638
639         kfree(evt);
640 }
641
642 static inline void
643 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
644 {
645         kref_get(&evt->kref);
646 }
647
648 static inline void
649 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
650 {
651         kref_put(&evt->kref, lpfc_bsg_event_free);
652 }
653
654 static struct lpfc_bsg_event *
655 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
656 {
657         struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
658
659         if (!evt)
660                 return NULL;
661
662         INIT_LIST_HEAD(&evt->events_to_get);
663         INIT_LIST_HEAD(&evt->events_to_see);
664         evt->type_mask = ev_mask;
665         evt->req_id = ev_req_id;
666         evt->reg_id = ev_reg_id;
667         evt->wait_time_stamp = jiffies;
668         init_waitqueue_head(&evt->wq);
669         kref_init(&evt->kref);
670         return evt;
671 }
672
673 static int
674 dfc_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
675 {
676         struct lpfc_dmabufext *mlast;
677         struct pci_dev *pcidev;
678         struct list_head head, *curr, *next;
679
680         if ((!mlist) || (!lpfc_is_link_up(phba) &&
681                 (phba->link_flag & LS_LOOPBACK_MODE))) {
682                 return 0;
683         }
684
685         pcidev = phba->pcidev;
686         list_add_tail(&head, &mlist->dma.list);
687
688         list_for_each_safe(curr, next, &head) {
689                 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
690                 if (mlast->dma.virt)
691                         dma_free_coherent(&pcidev->dev,
692                                           mlast->size,
693                                           mlast->dma.virt,
694                                           mlast->dma.phys);
695                 kfree(mlast);
696         }
697         return 0;
698 }
699
700 /**
701  * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
702  * @phba:
703  * @pring:
704  * @piocbq:
705  *
706  * This function is called when an unsolicited CT command is received.  It
707  * forwards the event to any processes registered to receive CT events.
708  */
709 int
710 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
711                         struct lpfc_iocbq *piocbq)
712 {
713         uint32_t evt_req_id = 0;
714         uint32_t cmd;
715         uint32_t len;
716         struct lpfc_dmabuf *dmabuf = NULL;
717         struct lpfc_bsg_event *evt;
718         struct event_data *evt_dat = NULL;
719         struct lpfc_iocbq *iocbq;
720         size_t offset = 0;
721         struct list_head head;
722         struct ulp_bde64 *bde;
723         dma_addr_t dma_addr;
724         int i;
725         struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
726         struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
727         struct lpfc_hbq_entry *hbqe;
728         struct lpfc_sli_ct_request *ct_req;
729         struct fc_bsg_job *job = NULL;
730         unsigned long flags;
731         int size = 0;
732
733         INIT_LIST_HEAD(&head);
734         list_add_tail(&head, &piocbq->list);
735
736         if (piocbq->iocb.ulpBdeCount == 0 ||
737             piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
738                 goto error_ct_unsol_exit;
739
740         if (phba->link_state == LPFC_HBA_ERROR ||
741                 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
742                 goto error_ct_unsol_exit;
743
744         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
745                 dmabuf = bdeBuf1;
746         else {
747                 dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
748                                     piocbq->iocb.un.cont64[0].addrLow);
749                 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
750         }
751         if (dmabuf == NULL)
752                 goto error_ct_unsol_exit;
753         ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
754         evt_req_id = ct_req->FsType;
755         cmd = ct_req->CommandResponse.bits.CmdRsp;
756         len = ct_req->CommandResponse.bits.Size;
757         if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
758                 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
759
760         spin_lock_irqsave(&phba->ct_ev_lock, flags);
761         list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
762                 if (!(evt->type_mask & FC_REG_CT_EVENT) ||
763                         evt->req_id != evt_req_id)
764                         continue;
765
766                 lpfc_bsg_event_ref(evt);
767                 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
768                 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
769                 if (evt_dat == NULL) {
770                         spin_lock_irqsave(&phba->ct_ev_lock, flags);
771                         lpfc_bsg_event_unref(evt);
772                         lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
773                                         "2614 Memory allocation failed for "
774                                         "CT event\n");
775                         break;
776                 }
777
778                 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
779                         /* take accumulated byte count from the last iocbq */
780                         iocbq = list_entry(head.prev, typeof(*iocbq), list);
781                         evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
782                 } else {
783                         list_for_each_entry(iocbq, &head, list) {
784                                 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
785                                         evt_dat->len +=
786                                         iocbq->iocb.un.cont64[i].tus.f.bdeSize;
787                         }
788                 }
789
790                 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
791                 if (evt_dat->data == NULL) {
792                         lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
793                                         "2615 Memory allocation failed for "
794                                         "CT event data, size %d\n",
795                                         evt_dat->len);
796                         kfree(evt_dat);
797                         spin_lock_irqsave(&phba->ct_ev_lock, flags);
798                         lpfc_bsg_event_unref(evt);
799                         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
800                         goto error_ct_unsol_exit;
801                 }
802
803                 list_for_each_entry(iocbq, &head, list) {
804                         size = 0;
805                         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
806                                 bdeBuf1 = iocbq->context2;
807                                 bdeBuf2 = iocbq->context3;
808                         }
809                         for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
810                                 if (phba->sli3_options &
811                                     LPFC_SLI3_HBQ_ENABLED) {
812                                         if (i == 0) {
813                                                 hbqe = (struct lpfc_hbq_entry *)
814                                                   &iocbq->iocb.un.ulpWord[0];
815                                                 size = hbqe->bde.tus.f.bdeSize;
816                                                 dmabuf = bdeBuf1;
817                                         } else if (i == 1) {
818                                                 hbqe = (struct lpfc_hbq_entry *)
819                                                         &iocbq->iocb.unsli3.
820                                                         sli3Words[4];
821                                                 size = hbqe->bde.tus.f.bdeSize;
822                                                 dmabuf = bdeBuf2;
823                                         }
824                                         if ((offset + size) > evt_dat->len)
825                                                 size = evt_dat->len - offset;
826                                 } else {
827                                         size = iocbq->iocb.un.cont64[i].
828                                                 tus.f.bdeSize;
829                                         bde = &iocbq->iocb.un.cont64[i];
830                                         dma_addr = getPaddr(bde->addrHigh,
831                                                             bde->addrLow);
832                                         dmabuf = lpfc_sli_ringpostbuf_get(phba,
833                                                         pring, dma_addr);
834                                 }
835                                 if (!dmabuf) {
836                                         lpfc_printf_log(phba, KERN_ERR,
837                                                 LOG_LIBDFC, "2616 No dmabuf "
838                                                 "found for iocbq 0x%p\n",
839                                                 iocbq);
840                                         kfree(evt_dat->data);
841                                         kfree(evt_dat);
842                                         spin_lock_irqsave(&phba->ct_ev_lock,
843                                                 flags);
844                                         lpfc_bsg_event_unref(evt);
845                                         spin_unlock_irqrestore(
846                                                 &phba->ct_ev_lock, flags);
847                                         goto error_ct_unsol_exit;
848                                 }
849                                 memcpy((char *)(evt_dat->data) + offset,
850                                        dmabuf->virt, size);
851                                 offset += size;
852                                 if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
853                                     !(phba->sli3_options &
854                                       LPFC_SLI3_HBQ_ENABLED)) {
855                                         lpfc_sli_ringpostbuf_put(phba, pring,
856                                                                  dmabuf);
857                                 } else {
858                                         switch (cmd) {
859                                         case ELX_LOOPBACK_DATA:
860                                                 dfc_cmd_data_free(phba,
861                                                 (struct lpfc_dmabufext *)
862                                                         dmabuf);
863                                                 break;
864                                         case ELX_LOOPBACK_XRI_SETUP:
865                                                 if ((phba->sli_rev ==
866                                                         LPFC_SLI_REV2) ||
867                                                         (phba->sli3_options &
868                                                         LPFC_SLI3_HBQ_ENABLED
869                                                         )) {
870                                                         lpfc_in_buf_free(phba,
871                                                                         dmabuf);
872                                                 } else {
873                                                         lpfc_post_buffer(phba,
874                                                                          pring,
875                                                                          1);
876                                                 }
877                                                 break;
878                                         default:
879                                                 if (!(phba->sli3_options &
880                                                       LPFC_SLI3_HBQ_ENABLED))
881                                                         lpfc_post_buffer(phba,
882                                                                          pring,
883                                                                          1);
884                                                 break;
885                                         }
886                                 }
887                         }
888                 }
889
890                 spin_lock_irqsave(&phba->ct_ev_lock, flags);
891                 if (phba->sli_rev == LPFC_SLI_REV4) {
892                         evt_dat->immed_dat = phba->ctx_idx;
893                         phba->ctx_idx = (phba->ctx_idx + 1) % 64;
894                         phba->ct_ctx[evt_dat->immed_dat].oxid =
895                                                 piocbq->iocb.ulpContext;
896                         phba->ct_ctx[evt_dat->immed_dat].SID =
897                                 piocbq->iocb.un.rcvels.remoteID;
898                 } else
899                         evt_dat->immed_dat = piocbq->iocb.ulpContext;
900
901                 evt_dat->type = FC_REG_CT_EVENT;
902                 list_add(&evt_dat->node, &evt->events_to_see);
903                 if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
904                         wake_up_interruptible(&evt->wq);
905                         lpfc_bsg_event_unref(evt);
906                         break;
907                 }
908
909                 list_move(evt->events_to_see.prev, &evt->events_to_get);
910                 lpfc_bsg_event_unref(evt);
911
912                 job = evt->set_job;
913                 evt->set_job = NULL;
914                 if (job) {
915                         job->reply->reply_payload_rcv_len = size;
916                         /* make error code available to userspace */
917                         job->reply->result = 0;
918                         job->dd_data = NULL;
919                         /* complete the job back to userspace */
920                         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
921                         job->job_done(job);
922                         spin_lock_irqsave(&phba->ct_ev_lock, flags);
923                 }
924         }
925         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
926
927 error_ct_unsol_exit:
928         if (!list_empty(&head))
929                 list_del(&head);
930         if (evt_req_id == SLI_CT_ELX_LOOPBACK)
931                 return 0;
932         return 1;
933 }
934
935 /**
936  * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
937  * @job: SET_EVENT fc_bsg_job
938  */
939 static int
940 lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
941 {
942         struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
943         struct lpfc_hba *phba = vport->phba;
944         struct set_ct_event *event_req;
945         struct lpfc_bsg_event *evt;
946         int rc = 0;
947         struct bsg_job_data *dd_data = NULL;
948         uint32_t ev_mask;
949         unsigned long flags;
950
951         if (job->request_len <
952             sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
953                 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
954                                 "2612 Received SET_CT_EVENT below minimum "
955                                 "size\n");
956                 rc = -EINVAL;
957                 goto job_error;
958         }
959
960         dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
961         if (dd_data == NULL) {
962                 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
963                                 "2734 Failed allocation of dd_data\n");
964                 rc = -ENOMEM;
965                 goto job_error;
966         }
967
968         event_req = (struct set_ct_event *)
969                 job->request->rqst_data.h_vendor.vendor_cmd;
970         ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
971                                 FC_REG_EVENT_MASK);
972         spin_lock_irqsave(&phba->ct_ev_lock, flags);
973         list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
974                 if (evt->reg_id == event_req->ev_reg_id) {
975                         lpfc_bsg_event_ref(evt);
976                         evt->wait_time_stamp = jiffies;
977                         break;
978                 }
979         }
980         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
981
982         if (&evt->node == &phba->ct_ev_waiters) {
983                 /* no event waiting struct yet - first call */
984                 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
985                                         event_req->ev_req_id);
986                 if (!evt) {
987                         lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
988                                         "2617 Failed allocation of event "
989                                         "waiter\n");
990                         rc = -ENOMEM;
991                         goto job_error;
992                 }
993
994                 spin_lock_irqsave(&phba->ct_ev_lock, flags);
995                 list_add(&evt->node, &phba->ct_ev_waiters);
996                 lpfc_bsg_event_ref(evt);
997                 evt->wait_time_stamp = jiffies;
998                 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
999         }
1000
1001         spin_lock_irqsave(&phba->ct_ev_lock, flags);
1002         evt->waiting = 1;
1003         dd_data->type = TYPE_EVT;
1004         dd_data->context_un.evt = evt;
1005         evt->set_job = job; /* for unsolicited command */
1006         job->dd_data = dd_data; /* for fc transport timeout callback*/
1007         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1008         return 0; /* call job done later */
1009
1010 job_error:
1011         if (dd_data != NULL)
1012                 kfree(dd_data);
1013
1014         job->dd_data = NULL;
1015         return rc;
1016 }
1017
1018 /**
1019  * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1020  * @job: GET_EVENT fc_bsg_job
1021  */
1022 static int
1023 lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
1024 {
1025         struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1026         struct lpfc_hba *phba = vport->phba;
1027         struct get_ct_event *event_req;
1028         struct get_ct_event_reply *event_reply;
1029         struct lpfc_bsg_event *evt;
1030         struct event_data *evt_dat = NULL;
1031         unsigned long flags;
1032         uint32_t rc = 0;
1033
1034         if (job->request_len <
1035             sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
1036                 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1037                                 "2613 Received GET_CT_EVENT request below "
1038                                 "minimum size\n");
1039                 rc = -EINVAL;
1040                 goto job_error;
1041         }
1042
1043         event_req = (struct get_ct_event *)
1044                 job->request->rqst_data.h_vendor.vendor_cmd;
1045
1046         event_reply = (struct get_ct_event_reply *)
1047                 job->reply->reply_data.vendor_reply.vendor_rsp;
1048         spin_lock_irqsave(&phba->ct_ev_lock, flags);
1049         list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1050                 if (evt->reg_id == event_req->ev_reg_id) {
1051                         if (list_empty(&evt->events_to_get))
1052                                 break;
1053                         lpfc_bsg_event_ref(evt);
1054                         evt->wait_time_stamp = jiffies;
1055                         evt_dat = list_entry(evt->events_to_get.prev,
1056                                              struct event_data, node);
1057                         list_del(&evt_dat->node);
1058                         break;
1059                 }
1060         }
1061         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1062
1063         /* The app may continue to ask for event data until it gets
1064          * an error indicating that there isn't anymore
1065          */
1066         if (evt_dat == NULL) {
1067                 job->reply->reply_payload_rcv_len = 0;
1068                 rc = -ENOENT;
1069                 goto job_error;
1070         }
1071
1072         if (evt_dat->len > job->request_payload.payload_len) {
1073                 evt_dat->len = job->request_payload.payload_len;
1074                 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1075                                 "2618 Truncated event data at %d "
1076                                 "bytes\n",
1077                                 job->request_payload.payload_len);
1078         }
1079
1080         event_reply->type = evt_dat->type;
1081         event_reply->immed_data = evt_dat->immed_dat;
1082         if (evt_dat->len > 0)
1083                 job->reply->reply_payload_rcv_len =
1084                         sg_copy_from_buffer(job->request_payload.sg_list,
1085                                             job->request_payload.sg_cnt,
1086                                             evt_dat->data, evt_dat->len);
1087         else
1088                 job->reply->reply_payload_rcv_len = 0;
1089
1090         if (evt_dat) {
1091                 kfree(evt_dat->data);
1092                 kfree(evt_dat);
1093         }
1094
1095         spin_lock_irqsave(&phba->ct_ev_lock, flags);
1096         lpfc_bsg_event_unref(evt);
1097         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1098         job->dd_data = NULL;
1099         job->reply->result = 0;
1100         job->job_done(job);
1101         return 0;
1102
1103 job_error:
1104         job->dd_data = NULL;
1105         job->reply->result = rc;
1106         return rc;
1107 }
1108
1109 /**
1110  * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
1111  * @job: fc_bsg_job to handle
1112  */
1113 static int
1114 lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
1115 {
1116         int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
1117         int rc;
1118
1119         switch (command) {
1120         case LPFC_BSG_VENDOR_SET_CT_EVENT:
1121                 rc = lpfc_bsg_hba_set_event(job);
1122                 break;
1123
1124         case LPFC_BSG_VENDOR_GET_CT_EVENT:
1125                 rc = lpfc_bsg_hba_get_event(job);
1126                 break;
1127         default:
1128                 rc = -EINVAL;
1129                 job->reply->reply_payload_rcv_len = 0;
1130                 /* make error code available to userspace */
1131                 job->reply->result = rc;
1132                 break;
1133         }
1134
1135         return rc;
1136 }
1137
1138 /**
1139  * lpfc_bsg_request - handle a bsg request from the FC transport
1140  * @job: fc_bsg_job to handle
1141  */
1142 int
1143 lpfc_bsg_request(struct fc_bsg_job *job)
1144 {
1145         uint32_t msgcode;
1146         int rc;
1147
1148         msgcode = job->request->msgcode;
1149         switch (msgcode) {
1150         case FC_BSG_HST_VENDOR:
1151                 rc = lpfc_bsg_hst_vendor(job);
1152                 break;
1153         case FC_BSG_RPT_ELS:
1154                 rc = lpfc_bsg_rport_els(job);
1155                 break;
1156         case FC_BSG_RPT_CT:
1157                 rc = lpfc_bsg_send_mgmt_cmd(job);
1158                 break;
1159         default:
1160                 rc = -EINVAL;
1161                 job->reply->reply_payload_rcv_len = 0;
1162                 /* make error code available to userspace */
1163                 job->reply->result = rc;
1164                 break;
1165         }
1166
1167         return rc;
1168 }
1169
1170 /**
1171  * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
1172  * @job: fc_bsg_job that has timed out
1173  *
1174  * This function just aborts the job's IOCB.  The aborted IOCB will return to
1175  * the waiting function which will handle passing the error back to userspace
1176  */
1177 int
1178 lpfc_bsg_timeout(struct fc_bsg_job *job)
1179 {
1180         struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1181         struct lpfc_hba *phba = vport->phba;
1182         struct lpfc_iocbq *cmdiocb;
1183         struct lpfc_bsg_event *evt;
1184         struct lpfc_bsg_iocb *iocb;
1185         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
1186         struct bsg_job_data *dd_data;
1187         unsigned long flags;
1188
1189         spin_lock_irqsave(&phba->ct_ev_lock, flags);
1190         dd_data = (struct bsg_job_data *)job->dd_data;
1191         /* timeout and completion crossed paths if no dd_data */
1192         if (!dd_data) {
1193                 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1194                 return 0;
1195         }
1196
1197         switch (dd_data->type) {
1198         case TYPE_IOCB:
1199                 iocb = &dd_data->context_un.iocb;
1200                 cmdiocb = iocb->cmdiocbq;
1201                 /* hint to completion handler that the job timed out */
1202                 job->reply->result = -EAGAIN;
1203                 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1204                 /* this will call our completion handler */
1205                 spin_lock_irq(&phba->hbalock);
1206                 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
1207                 spin_unlock_irq(&phba->hbalock);
1208                 break;
1209         case TYPE_EVT:
1210                 evt = dd_data->context_un.evt;
1211                 /* this event has no job anymore */
1212                 evt->set_job = NULL;
1213                 job->dd_data = NULL;
1214                 job->reply->reply_payload_rcv_len = 0;
1215                 /* Return -EAGAIN which is our way of signallying the
1216                  * app to retry.
1217                  */
1218                 job->reply->result = -EAGAIN;
1219                 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1220                 job->job_done(job);
1221                 break;
1222         default:
1223                 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1224                 break;
1225         }
1226
1227         /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
1228          * otherwise an error message will be displayed on the console
1229          * so always return success (zero)
1230          */
1231         return 0;
1232 }