[PATCH] fmr pool: remove unnecessary pointer dereference
[linux-2.6.git] / drivers / infiniband / ulp / srp / ib_srp.c
1 /*
2  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  * $Id: ib_srp.c 3932 2005-11-01 17:19:29Z roland $
33  */
34
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43
44 #include <asm/atomic.h>
45
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_dbg.h>
49 #include <scsi/srp.h>
50
51 #include <rdma/ib_cache.h>
52
53 #include "ib_srp.h"
54
55 #define DRV_NAME        "ib_srp"
56 #define PFX             DRV_NAME ": "
57 #define DRV_VERSION     "0.2"
58 #define DRV_RELDATE     "November 1, 2005"
59
60 MODULE_AUTHOR("Roland Dreier");
61 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
62                    "v" DRV_VERSION " (" DRV_RELDATE ")");
63 MODULE_LICENSE("Dual BSD/GPL");
64
65 static int srp_sg_tablesize = SRP_DEF_SG_TABLESIZE;
66 static int srp_max_iu_len;
67
68 module_param(srp_sg_tablesize, int, 0444);
69 MODULE_PARM_DESC(srp_sg_tablesize,
70                  "Max number of gather/scatter entries per I/O (default is 12)");
71
72 static int topspin_workarounds = 1;
73
74 module_param(topspin_workarounds, int, 0444);
75 MODULE_PARM_DESC(topspin_workarounds,
76                  "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
77
78 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
79
80 static void srp_add_one(struct ib_device *device);
81 static void srp_remove_one(struct ib_device *device);
82 static void srp_completion(struct ib_cq *cq, void *target_ptr);
83 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
84
85 static struct ib_client srp_client = {
86         .name   = "srp",
87         .add    = srp_add_one,
88         .remove = srp_remove_one
89 };
90
91 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
92 {
93         return (struct srp_target_port *) host->hostdata;
94 }
95
96 static const char *srp_target_info(struct Scsi_Host *host)
97 {
98         return host_to_target(host)->target_name;
99 }
100
101 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
102                                    gfp_t gfp_mask,
103                                    enum dma_data_direction direction)
104 {
105         struct srp_iu *iu;
106
107         iu = kmalloc(sizeof *iu, gfp_mask);
108         if (!iu)
109                 goto out;
110
111         iu->buf = kzalloc(size, gfp_mask);
112         if (!iu->buf)
113                 goto out_free_iu;
114
115         iu->dma = dma_map_single(host->dev->dev->dma_device,
116                                  iu->buf, size, direction);
117         if (dma_mapping_error(iu->dma))
118                 goto out_free_buf;
119
120         iu->size      = size;
121         iu->direction = direction;
122
123         return iu;
124
125 out_free_buf:
126         kfree(iu->buf);
127 out_free_iu:
128         kfree(iu);
129 out:
130         return NULL;
131 }
132
133 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
134 {
135         if (!iu)
136                 return;
137
138         dma_unmap_single(host->dev->dev->dma_device,
139                          iu->dma, iu->size, iu->direction);
140         kfree(iu->buf);
141         kfree(iu);
142 }
143
144 static void srp_qp_event(struct ib_event *event, void *context)
145 {
146         printk(KERN_ERR PFX "QP event %d\n", event->event);
147 }
148
149 static int srp_init_qp(struct srp_target_port *target,
150                        struct ib_qp *qp)
151 {
152         struct ib_qp_attr *attr;
153         int ret;
154
155         attr = kmalloc(sizeof *attr, GFP_KERNEL);
156         if (!attr)
157                 return -ENOMEM;
158
159         ret = ib_find_cached_pkey(target->srp_host->dev->dev,
160                                   target->srp_host->port,
161                                   be16_to_cpu(target->path.pkey),
162                                   &attr->pkey_index);
163         if (ret)
164                 goto out;
165
166         attr->qp_state        = IB_QPS_INIT;
167         attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
168                                     IB_ACCESS_REMOTE_WRITE);
169         attr->port_num        = target->srp_host->port;
170
171         ret = ib_modify_qp(qp, attr,
172                            IB_QP_STATE          |
173                            IB_QP_PKEY_INDEX     |
174                            IB_QP_ACCESS_FLAGS   |
175                            IB_QP_PORT);
176
177 out:
178         kfree(attr);
179         return ret;
180 }
181
182 static int srp_create_target_ib(struct srp_target_port *target)
183 {
184         struct ib_qp_init_attr *init_attr;
185         int ret;
186
187         init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
188         if (!init_attr)
189                 return -ENOMEM;
190
191         target->cq = ib_create_cq(target->srp_host->dev->dev, srp_completion,
192                                   NULL, target, SRP_CQ_SIZE);
193         if (IS_ERR(target->cq)) {
194                 ret = PTR_ERR(target->cq);
195                 goto out;
196         }
197
198         ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP);
199
200         init_attr->event_handler       = srp_qp_event;
201         init_attr->cap.max_send_wr     = SRP_SQ_SIZE;
202         init_attr->cap.max_recv_wr     = SRP_RQ_SIZE;
203         init_attr->cap.max_recv_sge    = 1;
204         init_attr->cap.max_send_sge    = 1;
205         init_attr->sq_sig_type         = IB_SIGNAL_ALL_WR;
206         init_attr->qp_type             = IB_QPT_RC;
207         init_attr->send_cq             = target->cq;
208         init_attr->recv_cq             = target->cq;
209
210         target->qp = ib_create_qp(target->srp_host->dev->pd, init_attr);
211         if (IS_ERR(target->qp)) {
212                 ret = PTR_ERR(target->qp);
213                 ib_destroy_cq(target->cq);
214                 goto out;
215         }
216
217         ret = srp_init_qp(target, target->qp);
218         if (ret) {
219                 ib_destroy_qp(target->qp);
220                 ib_destroy_cq(target->cq);
221                 goto out;
222         }
223
224 out:
225         kfree(init_attr);
226         return ret;
227 }
228
229 static void srp_free_target_ib(struct srp_target_port *target)
230 {
231         int i;
232
233         ib_destroy_qp(target->qp);
234         ib_destroy_cq(target->cq);
235
236         for (i = 0; i < SRP_RQ_SIZE; ++i)
237                 srp_free_iu(target->srp_host, target->rx_ring[i]);
238         for (i = 0; i < SRP_SQ_SIZE + 1; ++i)
239                 srp_free_iu(target->srp_host, target->tx_ring[i]);
240 }
241
242 static void srp_path_rec_completion(int status,
243                                     struct ib_sa_path_rec *pathrec,
244                                     void *target_ptr)
245 {
246         struct srp_target_port *target = target_ptr;
247
248         target->status = status;
249         if (status)
250                 printk(KERN_ERR PFX "Got failed path rec status %d\n", status);
251         else
252                 target->path = *pathrec;
253         complete(&target->done);
254 }
255
256 static int srp_lookup_path(struct srp_target_port *target)
257 {
258         target->path.numb_path = 1;
259
260         init_completion(&target->done);
261
262         target->path_query_id = ib_sa_path_rec_get(target->srp_host->dev->dev,
263                                                    target->srp_host->port,
264                                                    &target->path,
265                                                    IB_SA_PATH_REC_DGID          |
266                                                    IB_SA_PATH_REC_SGID          |
267                                                    IB_SA_PATH_REC_NUMB_PATH     |
268                                                    IB_SA_PATH_REC_PKEY,
269                                                    SRP_PATH_REC_TIMEOUT_MS,
270                                                    GFP_KERNEL,
271                                                    srp_path_rec_completion,
272                                                    target, &target->path_query);
273         if (target->path_query_id < 0)
274                 return target->path_query_id;
275
276         wait_for_completion(&target->done);
277
278         if (target->status < 0)
279                 printk(KERN_WARNING PFX "Path record query failed\n");
280
281         return target->status;
282 }
283
284 static int srp_send_req(struct srp_target_port *target)
285 {
286         struct {
287                 struct ib_cm_req_param param;
288                 struct srp_login_req   priv;
289         } *req = NULL;
290         int status;
291
292         req = kzalloc(sizeof *req, GFP_KERNEL);
293         if (!req)
294                 return -ENOMEM;
295
296         req->param.primary_path               = &target->path;
297         req->param.alternate_path             = NULL;
298         req->param.service_id                 = target->service_id;
299         req->param.qp_num                     = target->qp->qp_num;
300         req->param.qp_type                    = target->qp->qp_type;
301         req->param.private_data               = &req->priv;
302         req->param.private_data_len           = sizeof req->priv;
303         req->param.flow_control               = 1;
304
305         get_random_bytes(&req->param.starting_psn, 4);
306         req->param.starting_psn              &= 0xffffff;
307
308         /*
309          * Pick some arbitrary defaults here; we could make these
310          * module parameters if anyone cared about setting them.
311          */
312         req->param.responder_resources        = 4;
313         req->param.remote_cm_response_timeout = 20;
314         req->param.local_cm_response_timeout  = 20;
315         req->param.retry_count                = 7;
316         req->param.rnr_retry_count            = 7;
317         req->param.max_cm_retries             = 15;
318
319         req->priv.opcode        = SRP_LOGIN_REQ;
320         req->priv.tag           = 0;
321         req->priv.req_it_iu_len = cpu_to_be32(srp_max_iu_len);
322         req->priv.req_buf_fmt   = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
323                                               SRP_BUF_FORMAT_INDIRECT);
324         /*
325          * In the published SRP specification (draft rev. 16a), the 
326          * port identifier format is 8 bytes of ID extension followed
327          * by 8 bytes of GUID.  Older drafts put the two halves in the
328          * opposite order, so that the GUID comes first.
329          *
330          * Targets conforming to these obsolete drafts can be
331          * recognized by the I/O Class they report.
332          */
333         if (target->io_class == SRP_REV10_IB_IO_CLASS) {
334                 memcpy(req->priv.initiator_port_id,
335                        target->srp_host->initiator_port_id + 8, 8);
336                 memcpy(req->priv.initiator_port_id + 8,
337                        target->srp_host->initiator_port_id, 8);
338                 memcpy(req->priv.target_port_id,     &target->ioc_guid, 8);
339                 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
340         } else {
341                 memcpy(req->priv.initiator_port_id,
342                        target->srp_host->initiator_port_id, 16);
343                 memcpy(req->priv.target_port_id,     &target->id_ext, 8);
344                 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
345         }
346
347         /*
348          * Topspin/Cisco SRP targets will reject our login unless we
349          * zero out the first 8 bytes of our initiator port ID.  The
350          * second 8 bytes must be our local node GUID, but we always
351          * use that anyway.
352          */
353         if (topspin_workarounds && !memcmp(&target->ioc_guid, topspin_oui, 3)) {
354                 printk(KERN_DEBUG PFX "Topspin/Cisco initiator port ID workaround "
355                        "activated for target GUID %016llx\n",
356                        (unsigned long long) be64_to_cpu(target->ioc_guid));
357                 memset(req->priv.initiator_port_id, 0, 8);
358         }
359
360         status = ib_send_cm_req(target->cm_id, &req->param);
361
362         kfree(req);
363
364         return status;
365 }
366
367 static void srp_disconnect_target(struct srp_target_port *target)
368 {
369         /* XXX should send SRP_I_LOGOUT request */
370
371         init_completion(&target->done);
372         if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
373                 printk(KERN_DEBUG PFX "Sending CM DREQ failed\n");
374                 return;
375         }
376         wait_for_completion(&target->done);
377 }
378
379 static void srp_remove_work(void *target_ptr)
380 {
381         struct srp_target_port *target = target_ptr;
382
383         spin_lock_irq(target->scsi_host->host_lock);
384         if (target->state != SRP_TARGET_DEAD) {
385                 spin_unlock_irq(target->scsi_host->host_lock);
386                 return;
387         }
388         target->state = SRP_TARGET_REMOVED;
389         spin_unlock_irq(target->scsi_host->host_lock);
390
391         spin_lock(&target->srp_host->target_lock);
392         list_del(&target->list);
393         spin_unlock(&target->srp_host->target_lock);
394
395         scsi_remove_host(target->scsi_host);
396         ib_destroy_cm_id(target->cm_id);
397         srp_free_target_ib(target);
398         scsi_host_put(target->scsi_host);
399 }
400
401 static int srp_connect_target(struct srp_target_port *target)
402 {
403         int ret;
404
405         ret = srp_lookup_path(target);
406         if (ret)
407                 return ret;
408
409         while (1) {
410                 init_completion(&target->done);
411                 ret = srp_send_req(target);
412                 if (ret)
413                         return ret;
414                 wait_for_completion(&target->done);
415
416                 /*
417                  * The CM event handling code will set status to
418                  * SRP_PORT_REDIRECT if we get a port redirect REJ
419                  * back, or SRP_DLID_REDIRECT if we get a lid/qp
420                  * redirect REJ back.
421                  */
422                 switch (target->status) {
423                 case 0:
424                         return 0;
425
426                 case SRP_PORT_REDIRECT:
427                         ret = srp_lookup_path(target);
428                         if (ret)
429                                 return ret;
430                         break;
431
432                 case SRP_DLID_REDIRECT:
433                         break;
434
435                 default:
436                         return target->status;
437                 }
438         }
439 }
440
441 static void srp_unmap_data(struct scsi_cmnd *scmnd,
442                            struct srp_target_port *target,
443                            struct srp_request *req)
444 {
445         struct scatterlist *scat;
446         int nents;
447
448         if (!scmnd->request_buffer ||
449             (scmnd->sc_data_direction != DMA_TO_DEVICE &&
450              scmnd->sc_data_direction != DMA_FROM_DEVICE))
451                 return;
452
453         if (req->fmr) {
454                 ib_fmr_pool_unmap(req->fmr);
455                 req->fmr = NULL;
456         }
457
458         /*
459          * This handling of non-SG commands can be killed when the
460          * SCSI midlayer no longer generates non-SG commands.
461          */
462         if (likely(scmnd->use_sg)) {
463                 nents = scmnd->use_sg;
464                 scat  = scmnd->request_buffer;
465         } else {
466                 nents = 1;
467                 scat  = &req->fake_sg;
468         }
469
470         dma_unmap_sg(target->srp_host->dev->dev->dma_device, scat, nents,
471                      scmnd->sc_data_direction);
472 }
473
474 static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
475 {
476         srp_unmap_data(req->scmnd, target, req);
477         list_move_tail(&req->list, &target->free_reqs);
478 }
479
480 static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
481 {
482         req->scmnd->result = DID_RESET << 16;
483         req->scmnd->scsi_done(req->scmnd);
484         srp_remove_req(target, req);
485 }
486
487 static int srp_reconnect_target(struct srp_target_port *target)
488 {
489         struct ib_cm_id *new_cm_id;
490         struct ib_qp_attr qp_attr;
491         struct srp_request *req, *tmp;
492         struct ib_wc wc;
493         int ret;
494
495         spin_lock_irq(target->scsi_host->host_lock);
496         if (target->state != SRP_TARGET_LIVE) {
497                 spin_unlock_irq(target->scsi_host->host_lock);
498                 return -EAGAIN;
499         }
500         target->state = SRP_TARGET_CONNECTING;
501         spin_unlock_irq(target->scsi_host->host_lock);
502
503         srp_disconnect_target(target);
504         /*
505          * Now get a new local CM ID so that we avoid confusing the
506          * target in case things are really fouled up.
507          */
508         new_cm_id = ib_create_cm_id(target->srp_host->dev->dev,
509                                     srp_cm_handler, target);
510         if (IS_ERR(new_cm_id)) {
511                 ret = PTR_ERR(new_cm_id);
512                 goto err;
513         }
514         ib_destroy_cm_id(target->cm_id);
515         target->cm_id = new_cm_id;
516
517         qp_attr.qp_state = IB_QPS_RESET;
518         ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
519         if (ret)
520                 goto err;
521
522         ret = srp_init_qp(target, target->qp);
523         if (ret)
524                 goto err;
525
526         while (ib_poll_cq(target->cq, 1, &wc) > 0)
527                 ; /* nothing */
528
529         list_for_each_entry_safe(req, tmp, &target->req_queue, list)
530                 srp_reset_req(target, req);
531
532         target->rx_head  = 0;
533         target->tx_head  = 0;
534         target->tx_tail  = 0;
535
536         ret = srp_connect_target(target);
537         if (ret)
538                 goto err;
539
540         spin_lock_irq(target->scsi_host->host_lock);
541         if (target->state == SRP_TARGET_CONNECTING) {
542                 ret = 0;
543                 target->state = SRP_TARGET_LIVE;
544         } else
545                 ret = -EAGAIN;
546         spin_unlock_irq(target->scsi_host->host_lock);
547
548         return ret;
549
550 err:
551         printk(KERN_ERR PFX "reconnect failed (%d), removing target port.\n", ret);
552
553         /*
554          * We couldn't reconnect, so kill our target port off.
555          * However, we have to defer the real removal because we might
556          * be in the context of the SCSI error handler now, which
557          * would deadlock if we call scsi_remove_host().
558          */
559         spin_lock_irq(target->scsi_host->host_lock);
560         if (target->state == SRP_TARGET_CONNECTING) {
561                 target->state = SRP_TARGET_DEAD;
562                 INIT_WORK(&target->work, srp_remove_work, target);
563                 schedule_work(&target->work);
564         }
565         spin_unlock_irq(target->scsi_host->host_lock);
566
567         return ret;
568 }
569
570 static int srp_map_fmr(struct srp_device *dev, struct scatterlist *scat,
571                        int sg_cnt, struct srp_request *req,
572                        struct srp_direct_buf *buf)
573 {
574         u64 io_addr = 0;
575         u64 *dma_pages;
576         u32 len;
577         int page_cnt;
578         int i, j;
579         int ret;
580
581         if (!dev->fmr_pool)
582                 return -ENODEV;
583
584         len = page_cnt = 0;
585         for (i = 0; i < sg_cnt; ++i) {
586                 if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) {
587                         if (i > 0)
588                                 return -EINVAL;
589                         else
590                                 ++page_cnt;
591                 }
592                 if ((sg_dma_address(&scat[i]) + sg_dma_len(&scat[i])) &
593                     ~dev->fmr_page_mask) {
594                         if (i < sg_cnt - 1)
595                                 return -EINVAL;
596                         else
597                                 ++page_cnt;
598                 }
599
600                 len += sg_dma_len(&scat[i]);
601         }
602
603         page_cnt += len >> dev->fmr_page_shift;
604         if (page_cnt > SRP_FMR_SIZE)
605                 return -ENOMEM;
606
607         dma_pages = kmalloc(sizeof (u64) * page_cnt, GFP_ATOMIC);
608         if (!dma_pages)
609                 return -ENOMEM;
610
611         page_cnt = 0;
612         for (i = 0; i < sg_cnt; ++i)
613                 for (j = 0; j < sg_dma_len(&scat[i]); j += dev->fmr_page_size)
614                         dma_pages[page_cnt++] =
615                                 (sg_dma_address(&scat[i]) & dev->fmr_page_mask) + j;
616
617         req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool,
618                                         dma_pages, page_cnt, io_addr);
619         if (IS_ERR(req->fmr)) {
620                 ret = PTR_ERR(req->fmr);
621                 req->fmr = NULL;
622                 goto out;
623         }
624
625         buf->va  = cpu_to_be64(sg_dma_address(&scat[0]) & ~dev->fmr_page_mask);
626         buf->key = cpu_to_be32(req->fmr->fmr->rkey);
627         buf->len = cpu_to_be32(len);
628
629         ret = 0;
630
631 out:
632         kfree(dma_pages);
633
634         return ret;
635 }
636
637 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
638                         struct srp_request *req)
639 {
640         struct scatterlist *scat;
641         struct srp_cmd *cmd = req->cmd->buf;
642         int len, nents, count;
643         u8 fmt = SRP_DATA_DESC_DIRECT;
644
645         if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE)
646                 return sizeof (struct srp_cmd);
647
648         if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
649             scmnd->sc_data_direction != DMA_TO_DEVICE) {
650                 printk(KERN_WARNING PFX "Unhandled data direction %d\n",
651                        scmnd->sc_data_direction);
652                 return -EINVAL;
653         }
654
655         /*
656          * This handling of non-SG commands can be killed when the
657          * SCSI midlayer no longer generates non-SG commands.
658          */
659         if (likely(scmnd->use_sg)) {
660                 nents = scmnd->use_sg;
661                 scat  = scmnd->request_buffer;
662         } else {
663                 nents = 1;
664                 scat  = &req->fake_sg;
665                 sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen);
666         }
667
668         count = dma_map_sg(target->srp_host->dev->dev->dma_device,
669                            scat, nents, scmnd->sc_data_direction);
670
671         fmt = SRP_DATA_DESC_DIRECT;
672         len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
673
674         if (count == 1) {
675                 /*
676                  * The midlayer only generated a single gather/scatter
677                  * entry, or DMA mapping coalesced everything to a
678                  * single entry.  So a direct descriptor along with
679                  * the DMA MR suffices.
680                  */
681                 struct srp_direct_buf *buf = (void *) cmd->add_data;
682
683                 buf->va  = cpu_to_be64(sg_dma_address(scat));
684                 buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey);
685                 buf->len = cpu_to_be32(sg_dma_len(scat));
686         } else if (srp_map_fmr(target->srp_host->dev, scat, count, req,
687                                (void *) cmd->add_data)) {
688                 /*
689                  * FMR mapping failed, and the scatterlist has more
690                  * than one entry.  Generate an indirect memory
691                  * descriptor.
692                  */
693                 struct srp_indirect_buf *buf = (void *) cmd->add_data;
694                 u32 datalen = 0;
695                 int i;
696
697                 fmt = SRP_DATA_DESC_INDIRECT;
698                 len = sizeof (struct srp_cmd) +
699                         sizeof (struct srp_indirect_buf) +
700                         count * sizeof (struct srp_direct_buf);
701
702                 for (i = 0; i < count; ++i) {
703                         buf->desc_list[i].va  =
704                                 cpu_to_be64(sg_dma_address(&scat[i]));
705                         buf->desc_list[i].key =
706                                 cpu_to_be32(target->srp_host->dev->mr->rkey);
707                         buf->desc_list[i].len =
708                                 cpu_to_be32(sg_dma_len(&scat[i]));
709                         datalen += sg_dma_len(&scat[i]);
710                 }
711
712                 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
713                         cmd->data_out_desc_cnt = count;
714                 else
715                         cmd->data_in_desc_cnt = count;
716
717                 buf->table_desc.va  =
718                         cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf);
719                 buf->table_desc.key =
720                         cpu_to_be32(target->srp_host->dev->mr->rkey);
721                 buf->table_desc.len =
722                         cpu_to_be32(count * sizeof (struct srp_direct_buf));
723
724                 buf->len = cpu_to_be32(datalen);
725         }
726
727         if (scmnd->sc_data_direction == DMA_TO_DEVICE)
728                 cmd->buf_fmt = fmt << 4;
729         else
730                 cmd->buf_fmt = fmt;
731
732         return len;
733 }
734
735 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
736 {
737         struct srp_request *req;
738         struct scsi_cmnd *scmnd;
739         unsigned long flags;
740         s32 delta;
741
742         delta = (s32) be32_to_cpu(rsp->req_lim_delta);
743
744         spin_lock_irqsave(target->scsi_host->host_lock, flags);
745
746         target->req_lim += delta;
747
748         req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT];
749
750         if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
751                 if (be32_to_cpu(rsp->resp_data_len) < 4)
752                         req->tsk_status = -1;
753                 else
754                         req->tsk_status = rsp->data[3];
755                 complete(&req->done);
756         } else {
757                 scmnd = req->scmnd;
758                 if (!scmnd)
759                         printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n",
760                                (unsigned long long) rsp->tag);
761                 scmnd->result = rsp->status;
762
763                 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
764                         memcpy(scmnd->sense_buffer, rsp->data +
765                                be32_to_cpu(rsp->resp_data_len),
766                                min_t(int, be32_to_cpu(rsp->sense_data_len),
767                                      SCSI_SENSE_BUFFERSIZE));
768                 }
769
770                 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
771                         scmnd->resid = be32_to_cpu(rsp->data_out_res_cnt);
772                 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
773                         scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt);
774
775                 if (!req->tsk_mgmt) {
776                         scmnd->host_scribble = (void *) -1L;
777                         scmnd->scsi_done(scmnd);
778
779                         srp_remove_req(target, req);
780                 } else
781                         req->cmd_done = 1;
782         }
783
784         spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
785 }
786
787 static void srp_reconnect_work(void *target_ptr)
788 {
789         struct srp_target_port *target = target_ptr;
790
791         srp_reconnect_target(target);
792 }
793
794 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
795 {
796         struct srp_iu *iu;
797         u8 opcode;
798
799         iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV];
800
801         dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma,
802                                 target->max_ti_iu_len, DMA_FROM_DEVICE);
803
804         opcode = *(u8 *) iu->buf;
805
806         if (0) {
807                 int i;
808
809                 printk(KERN_ERR PFX "recv completion, opcode 0x%02x\n", opcode);
810
811                 for (i = 0; i < wc->byte_len; ++i) {
812                         if (i % 8 == 0)
813                                 printk(KERN_ERR "  [%02x] ", i);
814                         printk(" %02x", ((u8 *) iu->buf)[i]);
815                         if ((i + 1) % 8 == 0)
816                                 printk("\n");
817                 }
818
819                 if (wc->byte_len % 8)
820                         printk("\n");
821         }
822
823         switch (opcode) {
824         case SRP_RSP:
825                 srp_process_rsp(target, iu->buf);
826                 break;
827
828         case SRP_T_LOGOUT:
829                 /* XXX Handle target logout */
830                 printk(KERN_WARNING PFX "Got target logout request\n");
831                 break;
832
833         default:
834                 printk(KERN_WARNING PFX "Unhandled SRP opcode 0x%02x\n", opcode);
835                 break;
836         }
837
838         dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma,
839                                    target->max_ti_iu_len, DMA_FROM_DEVICE);
840 }
841
842 static void srp_completion(struct ib_cq *cq, void *target_ptr)
843 {
844         struct srp_target_port *target = target_ptr;
845         struct ib_wc wc;
846         unsigned long flags;
847
848         ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
849         while (ib_poll_cq(cq, 1, &wc) > 0) {
850                 if (wc.status) {
851                         printk(KERN_ERR PFX "failed %s status %d\n",
852                                wc.wr_id & SRP_OP_RECV ? "receive" : "send",
853                                wc.status);
854                         spin_lock_irqsave(target->scsi_host->host_lock, flags);
855                         if (target->state == SRP_TARGET_LIVE)
856                                 schedule_work(&target->work);
857                         spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
858                         break;
859                 }
860
861                 if (wc.wr_id & SRP_OP_RECV)
862                         srp_handle_recv(target, &wc);
863                 else
864                         ++target->tx_tail;
865         }
866 }
867
868 static int __srp_post_recv(struct srp_target_port *target)
869 {
870         struct srp_iu *iu;
871         struct ib_sge list;
872         struct ib_recv_wr wr, *bad_wr;
873         unsigned int next;
874         int ret;
875
876         next     = target->rx_head & (SRP_RQ_SIZE - 1);
877         wr.wr_id = next | SRP_OP_RECV;
878         iu       = target->rx_ring[next];
879
880         list.addr   = iu->dma;
881         list.length = iu->size;
882         list.lkey   = target->srp_host->dev->mr->lkey;
883
884         wr.next     = NULL;
885         wr.sg_list  = &list;
886         wr.num_sge  = 1;
887
888         ret = ib_post_recv(target->qp, &wr, &bad_wr);
889         if (!ret)
890                 ++target->rx_head;
891
892         return ret;
893 }
894
895 static int srp_post_recv(struct srp_target_port *target)
896 {
897         unsigned long flags;
898         int ret;
899
900         spin_lock_irqsave(target->scsi_host->host_lock, flags);
901         ret = __srp_post_recv(target);
902         spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
903
904         return ret;
905 }
906
907 /*
908  * Must be called with target->scsi_host->host_lock held to protect
909  * req_lim and tx_head.  Lock cannot be dropped between call here and
910  * call to __srp_post_send().
911  */
912 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target)
913 {
914         if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
915                 return NULL;
916
917         if (unlikely(target->req_lim < 1))
918                 ++target->zero_req_lim;
919
920         return target->tx_ring[target->tx_head & SRP_SQ_SIZE];
921 }
922
923 /*
924  * Must be called with target->scsi_host->host_lock held to protect
925  * req_lim and tx_head.
926  */
927 static int __srp_post_send(struct srp_target_port *target,
928                            struct srp_iu *iu, int len)
929 {
930         struct ib_sge list;
931         struct ib_send_wr wr, *bad_wr;
932         int ret = 0;
933
934         list.addr   = iu->dma;
935         list.length = len;
936         list.lkey   = target->srp_host->dev->mr->lkey;
937
938         wr.next       = NULL;
939         wr.wr_id      = target->tx_head & SRP_SQ_SIZE;
940         wr.sg_list    = &list;
941         wr.num_sge    = 1;
942         wr.opcode     = IB_WR_SEND;
943         wr.send_flags = IB_SEND_SIGNALED;
944
945         ret = ib_post_send(target->qp, &wr, &bad_wr);
946
947         if (!ret) {
948                 ++target->tx_head;
949                 --target->req_lim;
950         }
951
952         return ret;
953 }
954
955 static int srp_queuecommand(struct scsi_cmnd *scmnd,
956                             void (*done)(struct scsi_cmnd *))
957 {
958         struct srp_target_port *target = host_to_target(scmnd->device->host);
959         struct srp_request *req;
960         struct srp_iu *iu;
961         struct srp_cmd *cmd;
962         int len;
963
964         if (target->state == SRP_TARGET_CONNECTING)
965                 goto err;
966
967         if (target->state == SRP_TARGET_DEAD ||
968             target->state == SRP_TARGET_REMOVED) {
969                 scmnd->result = DID_BAD_TARGET << 16;
970                 done(scmnd);
971                 return 0;
972         }
973
974         iu = __srp_get_tx_iu(target);
975         if (!iu)
976                 goto err;
977
978         dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma,
979                                 srp_max_iu_len, DMA_TO_DEVICE);
980
981         req = list_entry(target->free_reqs.next, struct srp_request, list);
982
983         scmnd->scsi_done     = done;
984         scmnd->result        = 0;
985         scmnd->host_scribble = (void *) (long) req->index;
986
987         cmd = iu->buf;
988         memset(cmd, 0, sizeof *cmd);
989
990         cmd->opcode = SRP_CMD;
991         cmd->lun    = cpu_to_be64((u64) scmnd->device->lun << 48);
992         cmd->tag    = req->index;
993         memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
994
995         req->scmnd    = scmnd;
996         req->cmd      = iu;
997         req->cmd_done = 0;
998         req->tsk_mgmt = NULL;
999
1000         len = srp_map_data(scmnd, target, req);
1001         if (len < 0) {
1002                 printk(KERN_ERR PFX "Failed to map data\n");
1003                 goto err;
1004         }
1005
1006         if (__srp_post_recv(target)) {
1007                 printk(KERN_ERR PFX "Recv failed\n");
1008                 goto err_unmap;
1009         }
1010
1011         dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma,
1012                                    srp_max_iu_len, DMA_TO_DEVICE);
1013
1014         if (__srp_post_send(target, iu, len)) {
1015                 printk(KERN_ERR PFX "Send failed\n");
1016                 goto err_unmap;
1017         }
1018
1019         list_move_tail(&req->list, &target->req_queue);
1020
1021         return 0;
1022
1023 err_unmap:
1024         srp_unmap_data(scmnd, target, req);
1025
1026 err:
1027         return SCSI_MLQUEUE_HOST_BUSY;
1028 }
1029
1030 static int srp_alloc_iu_bufs(struct srp_target_port *target)
1031 {
1032         int i;
1033
1034         for (i = 0; i < SRP_RQ_SIZE; ++i) {
1035                 target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1036                                                   target->max_ti_iu_len,
1037                                                   GFP_KERNEL, DMA_FROM_DEVICE);
1038                 if (!target->rx_ring[i])
1039                         goto err;
1040         }
1041
1042         for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
1043                 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1044                                                   srp_max_iu_len,
1045                                                   GFP_KERNEL, DMA_TO_DEVICE);
1046                 if (!target->tx_ring[i])
1047                         goto err;
1048         }
1049
1050         return 0;
1051
1052 err:
1053         for (i = 0; i < SRP_RQ_SIZE; ++i) {
1054                 srp_free_iu(target->srp_host, target->rx_ring[i]);
1055                 target->rx_ring[i] = NULL;
1056         }
1057
1058         for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
1059                 srp_free_iu(target->srp_host, target->tx_ring[i]);
1060                 target->tx_ring[i] = NULL;
1061         }
1062
1063         return -ENOMEM;
1064 }
1065
1066 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1067                                struct ib_cm_event *event,
1068                                struct srp_target_port *target)
1069 {
1070         struct ib_class_port_info *cpi;
1071         int opcode;
1072
1073         switch (event->param.rej_rcvd.reason) {
1074         case IB_CM_REJ_PORT_CM_REDIRECT:
1075                 cpi = event->param.rej_rcvd.ari;
1076                 target->path.dlid = cpi->redirect_lid;
1077                 target->path.pkey = cpi->redirect_pkey;
1078                 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1079                 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1080
1081                 target->status = target->path.dlid ?
1082                         SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1083                 break;
1084
1085         case IB_CM_REJ_PORT_REDIRECT:
1086                 if (topspin_workarounds &&
1087                     !memcmp(&target->ioc_guid, topspin_oui, 3)) {
1088                         /*
1089                          * Topspin/Cisco SRP gateways incorrectly send
1090                          * reject reason code 25 when they mean 24
1091                          * (port redirect).
1092                          */
1093                         memcpy(target->path.dgid.raw,
1094                                event->param.rej_rcvd.ari, 16);
1095
1096                         printk(KERN_DEBUG PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1097                                (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1098                                (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
1099
1100                         target->status = SRP_PORT_REDIRECT;
1101                 } else {
1102                         printk(KERN_WARNING "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1103                         target->status = -ECONNRESET;
1104                 }
1105                 break;
1106
1107         case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
1108                 printk(KERN_WARNING "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1109                 target->status = -ECONNRESET;
1110                 break;
1111
1112         case IB_CM_REJ_CONSUMER_DEFINED:
1113                 opcode = *(u8 *) event->private_data;
1114                 if (opcode == SRP_LOGIN_REJ) {
1115                         struct srp_login_rej *rej = event->private_data;
1116                         u32 reason = be32_to_cpu(rej->reason);
1117
1118                         if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
1119                                 printk(KERN_WARNING PFX
1120                                        "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1121                         else
1122                                 printk(KERN_WARNING PFX
1123                                        "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
1124                 } else
1125                         printk(KERN_WARNING "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1126                                " opcode 0x%02x\n", opcode);
1127                 target->status = -ECONNRESET;
1128                 break;
1129
1130         default:
1131                 printk(KERN_WARNING "  REJ reason 0x%x\n",
1132                        event->param.rej_rcvd.reason);
1133                 target->status = -ECONNRESET;
1134         }
1135 }
1136
1137 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1138 {
1139         struct srp_target_port *target = cm_id->context;
1140         struct ib_qp_attr *qp_attr = NULL;
1141         int attr_mask = 0;
1142         int comp = 0;
1143         int opcode = 0;
1144
1145         switch (event->event) {
1146         case IB_CM_REQ_ERROR:
1147                 printk(KERN_DEBUG PFX "Sending CM REQ failed\n");
1148                 comp = 1;
1149                 target->status = -ECONNRESET;
1150                 break;
1151
1152         case IB_CM_REP_RECEIVED:
1153                 comp = 1;
1154                 opcode = *(u8 *) event->private_data;
1155
1156                 if (opcode == SRP_LOGIN_RSP) {
1157                         struct srp_login_rsp *rsp = event->private_data;
1158
1159                         target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len);
1160                         target->req_lim       = be32_to_cpu(rsp->req_lim_delta);
1161
1162                         target->scsi_host->can_queue = min(target->req_lim,
1163                                                            target->scsi_host->can_queue);
1164                 } else {
1165                         printk(KERN_WARNING PFX "Unhandled RSP opcode %#x\n", opcode);
1166                         target->status = -ECONNRESET;
1167                         break;
1168                 }
1169
1170                 target->status = srp_alloc_iu_bufs(target);
1171                 if (target->status)
1172                         break;
1173
1174                 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1175                 if (!qp_attr) {
1176                         target->status = -ENOMEM;
1177                         break;
1178                 }
1179
1180                 qp_attr->qp_state = IB_QPS_RTR;
1181                 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1182                 if (target->status)
1183                         break;
1184
1185                 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);
1186                 if (target->status)
1187                         break;
1188
1189                 target->status = srp_post_recv(target);
1190                 if (target->status)
1191                         break;
1192
1193                 qp_attr->qp_state = IB_QPS_RTS;
1194                 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1195                 if (target->status)
1196                         break;
1197
1198                 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);
1199                 if (target->status)
1200                         break;
1201
1202                 target->status = ib_send_cm_rtu(cm_id, NULL, 0);
1203                 if (target->status)
1204                         break;
1205
1206                 break;
1207
1208         case IB_CM_REJ_RECEIVED:
1209                 printk(KERN_DEBUG PFX "REJ received\n");
1210                 comp = 1;
1211
1212                 srp_cm_rej_handler(cm_id, event, target);
1213                 break;
1214
1215         case IB_CM_DREQ_RECEIVED:
1216                 printk(KERN_WARNING PFX "DREQ received - connection closed\n");
1217                 if (ib_send_cm_drep(cm_id, NULL, 0))
1218                         printk(KERN_ERR PFX "Sending CM DREP failed\n");
1219                 break;
1220
1221         case IB_CM_TIMEWAIT_EXIT:
1222                 printk(KERN_ERR PFX "connection closed\n");
1223
1224                 comp = 1;
1225                 target->status = 0;
1226                 break;
1227
1228         case IB_CM_MRA_RECEIVED:
1229         case IB_CM_DREQ_ERROR:
1230         case IB_CM_DREP_RECEIVED:
1231                 break;
1232
1233         default:
1234                 printk(KERN_WARNING PFX "Unhandled CM event %d\n", event->event);
1235                 break;
1236         }
1237
1238         if (comp)
1239                 complete(&target->done);
1240
1241         kfree(qp_attr);
1242
1243         return 0;
1244 }
1245
1246 static int srp_send_tsk_mgmt(struct srp_target_port *target,
1247                              struct srp_request *req, u8 func)
1248 {
1249         struct srp_iu *iu;
1250         struct srp_tsk_mgmt *tsk_mgmt;
1251
1252         spin_lock_irq(target->scsi_host->host_lock);
1253
1254         if (target->state == SRP_TARGET_DEAD ||
1255             target->state == SRP_TARGET_REMOVED) {
1256                 req->scmnd->result = DID_BAD_TARGET << 16;
1257                 goto out;
1258         }
1259
1260         init_completion(&req->done);
1261
1262         iu = __srp_get_tx_iu(target);
1263         if (!iu)
1264                 goto out;
1265
1266         tsk_mgmt = iu->buf;
1267         memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1268
1269         tsk_mgmt->opcode        = SRP_TSK_MGMT;
1270         tsk_mgmt->lun           = cpu_to_be64((u64) req->scmnd->device->lun << 48);
1271         tsk_mgmt->tag           = req->index | SRP_TAG_TSK_MGMT;
1272         tsk_mgmt->tsk_mgmt_func = func;
1273         tsk_mgmt->task_tag      = req->index;
1274
1275         if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
1276                 goto out;
1277
1278         req->tsk_mgmt = iu;
1279
1280         spin_unlock_irq(target->scsi_host->host_lock);
1281
1282         if (!wait_for_completion_timeout(&req->done,
1283                                          msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1284                 return -1;
1285
1286         return 0;
1287
1288 out:
1289         spin_unlock_irq(target->scsi_host->host_lock);
1290         return -1;
1291 }
1292
1293 static int srp_find_req(struct srp_target_port *target,
1294                         struct scsi_cmnd *scmnd,
1295                         struct srp_request **req)
1296 {
1297         if (scmnd->host_scribble == (void *) -1L)
1298                 return -1;
1299
1300         *req = &target->req_ring[(long) scmnd->host_scribble];
1301
1302         return 0;
1303 }
1304
1305 static int srp_abort(struct scsi_cmnd *scmnd)
1306 {
1307         struct srp_target_port *target = host_to_target(scmnd->device->host);
1308         struct srp_request *req;
1309         int ret = SUCCESS;
1310
1311         printk(KERN_ERR "SRP abort called\n");
1312
1313         if (srp_find_req(target, scmnd, &req))
1314                 return FAILED;
1315         if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
1316                 return FAILED;
1317
1318         spin_lock_irq(target->scsi_host->host_lock);
1319
1320         if (req->cmd_done) {
1321                 srp_remove_req(target, req);
1322                 scmnd->scsi_done(scmnd);
1323         } else if (!req->tsk_status) {
1324                 srp_remove_req(target, req);
1325                 scmnd->result = DID_ABORT << 16;
1326         } else
1327                 ret = FAILED;
1328
1329         spin_unlock_irq(target->scsi_host->host_lock);
1330
1331         return ret;
1332 }
1333
1334 static int srp_reset_device(struct scsi_cmnd *scmnd)
1335 {
1336         struct srp_target_port *target = host_to_target(scmnd->device->host);
1337         struct srp_request *req, *tmp;
1338
1339         printk(KERN_ERR "SRP reset_device called\n");
1340
1341         if (srp_find_req(target, scmnd, &req))
1342                 return FAILED;
1343         if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
1344                 return FAILED;
1345         if (req->tsk_status)
1346                 return FAILED;
1347
1348         spin_lock_irq(target->scsi_host->host_lock);
1349
1350         list_for_each_entry_safe(req, tmp, &target->req_queue, list)
1351                 if (req->scmnd->device == scmnd->device)
1352                         srp_reset_req(target, req);
1353
1354         spin_unlock_irq(target->scsi_host->host_lock);
1355
1356         return SUCCESS;
1357 }
1358
1359 static int srp_reset_host(struct scsi_cmnd *scmnd)
1360 {
1361         struct srp_target_port *target = host_to_target(scmnd->device->host);
1362         int ret = FAILED;
1363
1364         printk(KERN_ERR PFX "SRP reset_host called\n");
1365
1366         if (!srp_reconnect_target(target))
1367                 ret = SUCCESS;
1368
1369         return ret;
1370 }
1371
1372 static ssize_t show_id_ext(struct class_device *cdev, char *buf)
1373 {
1374         struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1375
1376         if (target->state == SRP_TARGET_DEAD ||
1377             target->state == SRP_TARGET_REMOVED)
1378                 return -ENODEV;
1379
1380         return sprintf(buf, "0x%016llx\n",
1381                        (unsigned long long) be64_to_cpu(target->id_ext));
1382 }
1383
1384 static ssize_t show_ioc_guid(struct class_device *cdev, char *buf)
1385 {
1386         struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1387
1388         if (target->state == SRP_TARGET_DEAD ||
1389             target->state == SRP_TARGET_REMOVED)
1390                 return -ENODEV;
1391
1392         return sprintf(buf, "0x%016llx\n",
1393                        (unsigned long long) be64_to_cpu(target->ioc_guid));
1394 }
1395
1396 static ssize_t show_service_id(struct class_device *cdev, char *buf)
1397 {
1398         struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1399
1400         if (target->state == SRP_TARGET_DEAD ||
1401             target->state == SRP_TARGET_REMOVED)
1402                 return -ENODEV;
1403
1404         return sprintf(buf, "0x%016llx\n",
1405                        (unsigned long long) be64_to_cpu(target->service_id));
1406 }
1407
1408 static ssize_t show_pkey(struct class_device *cdev, char *buf)
1409 {
1410         struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1411
1412         if (target->state == SRP_TARGET_DEAD ||
1413             target->state == SRP_TARGET_REMOVED)
1414                 return -ENODEV;
1415
1416         return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
1417 }
1418
1419 static ssize_t show_dgid(struct class_device *cdev, char *buf)
1420 {
1421         struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1422
1423         if (target->state == SRP_TARGET_DEAD ||
1424             target->state == SRP_TARGET_REMOVED)
1425                 return -ENODEV;
1426
1427         return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
1428                        be16_to_cpu(((__be16 *) target->path.dgid.raw)[0]),
1429                        be16_to_cpu(((__be16 *) target->path.dgid.raw)[1]),
1430                        be16_to_cpu(((__be16 *) target->path.dgid.raw)[2]),
1431                        be16_to_cpu(((__be16 *) target->path.dgid.raw)[3]),
1432                        be16_to_cpu(((__be16 *) target->path.dgid.raw)[4]),
1433                        be16_to_cpu(((__be16 *) target->path.dgid.raw)[5]),
1434                        be16_to_cpu(((__be16 *) target->path.dgid.raw)[6]),
1435                        be16_to_cpu(((__be16 *) target->path.dgid.raw)[7]));
1436 }
1437
1438 static ssize_t show_zero_req_lim(struct class_device *cdev, char *buf)
1439 {
1440         struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1441
1442         if (target->state == SRP_TARGET_DEAD ||
1443             target->state == SRP_TARGET_REMOVED)
1444                 return -ENODEV;
1445
1446         return sprintf(buf, "%d\n", target->zero_req_lim);
1447 }
1448
1449 static CLASS_DEVICE_ATTR(id_ext,        S_IRUGO, show_id_ext,           NULL);
1450 static CLASS_DEVICE_ATTR(ioc_guid,      S_IRUGO, show_ioc_guid,         NULL);
1451 static CLASS_DEVICE_ATTR(service_id,    S_IRUGO, show_service_id,       NULL);
1452 static CLASS_DEVICE_ATTR(pkey,          S_IRUGO, show_pkey,             NULL);
1453 static CLASS_DEVICE_ATTR(dgid,          S_IRUGO, show_dgid,             NULL);
1454 static CLASS_DEVICE_ATTR(zero_req_lim,  S_IRUGO, show_zero_req_lim,     NULL);
1455
1456 static struct class_device_attribute *srp_host_attrs[] = {
1457         &class_device_attr_id_ext,
1458         &class_device_attr_ioc_guid,
1459         &class_device_attr_service_id,
1460         &class_device_attr_pkey,
1461         &class_device_attr_dgid,
1462         &class_device_attr_zero_req_lim,
1463         NULL
1464 };
1465
1466 static struct scsi_host_template srp_template = {
1467         .module                         = THIS_MODULE,
1468         .name                           = DRV_NAME,
1469         .info                           = srp_target_info,
1470         .queuecommand                   = srp_queuecommand,
1471         .eh_abort_handler               = srp_abort,
1472         .eh_device_reset_handler        = srp_reset_device,
1473         .eh_host_reset_handler          = srp_reset_host,
1474         .can_queue                      = SRP_SQ_SIZE,
1475         .this_id                        = -1,
1476         .cmd_per_lun                    = SRP_SQ_SIZE,
1477         .use_clustering                 = ENABLE_CLUSTERING,
1478         .shost_attrs                    = srp_host_attrs
1479 };
1480
1481 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
1482 {
1483         sprintf(target->target_name, "SRP.T10:%016llX",
1484                  (unsigned long long) be64_to_cpu(target->id_ext));
1485
1486         if (scsi_add_host(target->scsi_host, host->dev->dev->dma_device))
1487                 return -ENODEV;
1488
1489         spin_lock(&host->target_lock);
1490         list_add_tail(&target->list, &host->target_list);
1491         spin_unlock(&host->target_lock);
1492
1493         target->state = SRP_TARGET_LIVE;
1494
1495         scsi_scan_target(&target->scsi_host->shost_gendev,
1496                          0, target->scsi_id, SCAN_WILD_CARD, 0);
1497
1498         return 0;
1499 }
1500
1501 static void srp_release_class_dev(struct class_device *class_dev)
1502 {
1503         struct srp_host *host =
1504                 container_of(class_dev, struct srp_host, class_dev);
1505
1506         complete(&host->released);
1507 }
1508
1509 static struct class srp_class = {
1510         .name    = "infiniband_srp",
1511         .release = srp_release_class_dev
1512 };
1513
1514 /*
1515  * Target ports are added by writing
1516  *
1517  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
1518  *     pkey=<P_Key>,service_id=<service ID>
1519  *
1520  * to the add_target sysfs attribute.
1521  */
1522 enum {
1523         SRP_OPT_ERR             = 0,
1524         SRP_OPT_ID_EXT          = 1 << 0,
1525         SRP_OPT_IOC_GUID        = 1 << 1,
1526         SRP_OPT_DGID            = 1 << 2,
1527         SRP_OPT_PKEY            = 1 << 3,
1528         SRP_OPT_SERVICE_ID      = 1 << 4,
1529         SRP_OPT_MAX_SECT        = 1 << 5,
1530         SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
1531         SRP_OPT_IO_CLASS        = 1 << 7,
1532         SRP_OPT_ALL             = (SRP_OPT_ID_EXT       |
1533                                    SRP_OPT_IOC_GUID     |
1534                                    SRP_OPT_DGID         |
1535                                    SRP_OPT_PKEY         |
1536                                    SRP_OPT_SERVICE_ID),
1537 };
1538
1539 static match_table_t srp_opt_tokens = {
1540         { SRP_OPT_ID_EXT,               "id_ext=%s"             },
1541         { SRP_OPT_IOC_GUID,             "ioc_guid=%s"           },
1542         { SRP_OPT_DGID,                 "dgid=%s"               },
1543         { SRP_OPT_PKEY,                 "pkey=%x"               },
1544         { SRP_OPT_SERVICE_ID,           "service_id=%s"         },
1545         { SRP_OPT_MAX_SECT,             "max_sect=%d"           },
1546         { SRP_OPT_MAX_CMD_PER_LUN,      "max_cmd_per_lun=%d"    },
1547         { SRP_OPT_IO_CLASS,             "io_class=%x"           },
1548         { SRP_OPT_ERR,                  NULL                    }
1549 };
1550
1551 static int srp_parse_options(const char *buf, struct srp_target_port *target)
1552 {
1553         char *options, *sep_opt;
1554         char *p;
1555         char dgid[3];
1556         substring_t args[MAX_OPT_ARGS];
1557         int opt_mask = 0;
1558         int token;
1559         int ret = -EINVAL;
1560         int i;
1561
1562         options = kstrdup(buf, GFP_KERNEL);
1563         if (!options)
1564                 return -ENOMEM;
1565
1566         sep_opt = options;
1567         while ((p = strsep(&sep_opt, ",")) != NULL) {
1568                 if (!*p)
1569                         continue;
1570
1571                 token = match_token(p, srp_opt_tokens, args);
1572                 opt_mask |= token;
1573
1574                 switch (token) {
1575                 case SRP_OPT_ID_EXT:
1576                         p = match_strdup(args);
1577                         target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
1578                         kfree(p);
1579                         break;
1580
1581                 case SRP_OPT_IOC_GUID:
1582                         p = match_strdup(args);
1583                         target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
1584                         kfree(p);
1585                         break;
1586
1587                 case SRP_OPT_DGID:
1588                         p = match_strdup(args);
1589                         if (strlen(p) != 32) {
1590                                 printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p);
1591                                 kfree(p);
1592                                 goto out;
1593                         }
1594
1595                         for (i = 0; i < 16; ++i) {
1596                                 strlcpy(dgid, p + i * 2, 3);
1597                                 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
1598                         }
1599                         kfree(p);
1600                         break;
1601
1602                 case SRP_OPT_PKEY:
1603                         if (match_hex(args, &token)) {
1604                                 printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p);
1605                                 goto out;
1606                         }
1607                         target->path.pkey = cpu_to_be16(token);
1608                         break;
1609
1610                 case SRP_OPT_SERVICE_ID:
1611                         p = match_strdup(args);
1612                         target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
1613                         kfree(p);
1614                         break;
1615
1616                 case SRP_OPT_MAX_SECT:
1617                         if (match_int(args, &token)) {
1618                                 printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p);
1619                                 goto out;
1620                         }
1621                         target->scsi_host->max_sectors = token;
1622                         break;
1623
1624                 case SRP_OPT_MAX_CMD_PER_LUN:
1625                         if (match_int(args, &token)) {
1626                                 printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p);
1627                                 goto out;
1628                         }
1629                         target->scsi_host->cmd_per_lun = min(token, SRP_SQ_SIZE);
1630                         break;
1631
1632                 case SRP_OPT_IO_CLASS:
1633                         if (match_hex(args, &token)) {
1634                                 printk(KERN_WARNING PFX "bad  IO class parameter '%s' \n", p);
1635                                 goto out;
1636                         }
1637                         if (token != SRP_REV10_IB_IO_CLASS &&
1638                             token != SRP_REV16A_IB_IO_CLASS) {
1639                                 printk(KERN_WARNING PFX "unknown IO class parameter value"
1640                                        " %x specified (use %x or %x).\n",
1641                                        token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS);
1642                                 goto out;
1643                         }
1644                         target->io_class = token;
1645                         break;
1646
1647                 default:
1648                         printk(KERN_WARNING PFX "unknown parameter or missing value "
1649                                "'%s' in target creation request\n", p);
1650                         goto out;
1651                 }
1652         }
1653
1654         if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
1655                 ret = 0;
1656         else
1657                 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
1658                         if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
1659                             !(srp_opt_tokens[i].token & opt_mask))
1660                                 printk(KERN_WARNING PFX "target creation request is "
1661                                        "missing parameter '%s'\n",
1662                                        srp_opt_tokens[i].pattern);
1663
1664 out:
1665         kfree(options);
1666         return ret;
1667 }
1668
1669 static ssize_t srp_create_target(struct class_device *class_dev,
1670                                  const char *buf, size_t count)
1671 {
1672         struct srp_host *host =
1673                 container_of(class_dev, struct srp_host, class_dev);
1674         struct Scsi_Host *target_host;
1675         struct srp_target_port *target;
1676         int ret;
1677         int i;
1678
1679         target_host = scsi_host_alloc(&srp_template,
1680                                       sizeof (struct srp_target_port));
1681         if (!target_host)
1682                 return -ENOMEM;
1683
1684         target_host->max_lun = SRP_MAX_LUN;
1685
1686         target = host_to_target(target_host);
1687         memset(target, 0, sizeof *target);
1688
1689         target->io_class   = SRP_REV16A_IB_IO_CLASS;
1690         target->scsi_host  = target_host;
1691         target->srp_host   = host;
1692
1693         INIT_WORK(&target->work, srp_reconnect_work, target);
1694
1695         INIT_LIST_HEAD(&target->free_reqs);
1696         INIT_LIST_HEAD(&target->req_queue);
1697         for (i = 0; i < SRP_SQ_SIZE; ++i) {
1698                 target->req_ring[i].index = i;
1699                 list_add_tail(&target->req_ring[i].list, &target->free_reqs);
1700         }
1701
1702         ret = srp_parse_options(buf, target);
1703         if (ret)
1704                 goto err;
1705
1706         ib_get_cached_gid(host->dev->dev, host->port, 0, &target->path.sgid);
1707
1708         printk(KERN_DEBUG PFX "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
1709                "service_id %016llx dgid %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
1710                (unsigned long long) be64_to_cpu(target->id_ext),
1711                (unsigned long long) be64_to_cpu(target->ioc_guid),
1712                be16_to_cpu(target->path.pkey),
1713                (unsigned long long) be64_to_cpu(target->service_id),
1714                (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[0]),
1715                (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[2]),
1716                (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[4]),
1717                (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[6]),
1718                (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[8]),
1719                (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[10]),
1720                (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[12]),
1721                (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[14]));
1722
1723         ret = srp_create_target_ib(target);
1724         if (ret)
1725                 goto err;
1726
1727         target->cm_id = ib_create_cm_id(host->dev->dev, srp_cm_handler, target);
1728         if (IS_ERR(target->cm_id)) {
1729                 ret = PTR_ERR(target->cm_id);
1730                 goto err_free;
1731         }
1732
1733         ret = srp_connect_target(target);
1734         if (ret) {
1735                 printk(KERN_ERR PFX "Connection failed\n");
1736                 goto err_cm_id;
1737         }
1738
1739         ret = srp_add_target(host, target);
1740         if (ret)
1741                 goto err_disconnect;
1742
1743         return count;
1744
1745 err_disconnect:
1746         srp_disconnect_target(target);
1747
1748 err_cm_id:
1749         ib_destroy_cm_id(target->cm_id);
1750
1751 err_free:
1752         srp_free_target_ib(target);
1753
1754 err:
1755         scsi_host_put(target_host);
1756
1757         return ret;
1758 }
1759
1760 static CLASS_DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
1761
1762 static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
1763 {
1764         struct srp_host *host =
1765                 container_of(class_dev, struct srp_host, class_dev);
1766
1767         return sprintf(buf, "%s\n", host->dev->dev->name);
1768 }
1769
1770 static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
1771
1772 static ssize_t show_port(struct class_device *class_dev, char *buf)
1773 {
1774         struct srp_host *host =
1775                 container_of(class_dev, struct srp_host, class_dev);
1776
1777         return sprintf(buf, "%d\n", host->port);
1778 }
1779
1780 static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
1781
1782 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
1783 {
1784         struct srp_host *host;
1785
1786         host = kzalloc(sizeof *host, GFP_KERNEL);
1787         if (!host)
1788                 return NULL;
1789
1790         INIT_LIST_HEAD(&host->target_list);
1791         spin_lock_init(&host->target_lock);
1792         init_completion(&host->released);
1793         host->dev  = device;
1794         host->port = port;
1795
1796         host->initiator_port_id[7] = port;
1797         memcpy(host->initiator_port_id + 8, &device->dev->node_guid, 8);
1798
1799         host->class_dev.class = &srp_class;
1800         host->class_dev.dev   = device->dev->dma_device;
1801         snprintf(host->class_dev.class_id, BUS_ID_SIZE, "srp-%s-%d",
1802                  device->dev->name, port);
1803
1804         if (class_device_register(&host->class_dev))
1805                 goto free_host;
1806         if (class_device_create_file(&host->class_dev, &class_device_attr_add_target))
1807                 goto err_class;
1808         if (class_device_create_file(&host->class_dev, &class_device_attr_ibdev))
1809                 goto err_class;
1810         if (class_device_create_file(&host->class_dev, &class_device_attr_port))
1811                 goto err_class;
1812
1813         return host;
1814
1815 err_class:
1816         class_device_unregister(&host->class_dev);
1817
1818 free_host:
1819         kfree(host);
1820
1821         return NULL;
1822 }
1823
1824 static void srp_add_one(struct ib_device *device)
1825 {
1826         struct srp_device *srp_dev;
1827         struct ib_device_attr *dev_attr;
1828         struct ib_fmr_pool_param fmr_param;
1829         struct srp_host *host;
1830         int s, e, p;
1831
1832         dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
1833         if (!dev_attr)
1834                 return;
1835
1836         if (ib_query_device(device, dev_attr)) {
1837                 printk(KERN_WARNING PFX "Query device failed for %s\n",
1838                        device->name);
1839                 goto free_attr;
1840         }
1841
1842         srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
1843         if (!srp_dev)
1844                 goto free_attr;
1845
1846         /*
1847          * Use the smallest page size supported by the HCA, down to a
1848          * minimum of 512 bytes (which is the smallest sector that a
1849          * SCSI command will ever carry).
1850          */
1851         srp_dev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1);
1852         srp_dev->fmr_page_size  = 1 << srp_dev->fmr_page_shift;
1853         srp_dev->fmr_page_mask  = ~((unsigned long) srp_dev->fmr_page_size - 1);
1854
1855         INIT_LIST_HEAD(&srp_dev->dev_list);
1856
1857         srp_dev->dev = device;
1858         srp_dev->pd  = ib_alloc_pd(device);
1859         if (IS_ERR(srp_dev->pd))
1860                 goto free_dev;
1861
1862         srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
1863                                     IB_ACCESS_LOCAL_WRITE |
1864                                     IB_ACCESS_REMOTE_READ |
1865                                     IB_ACCESS_REMOTE_WRITE);
1866         if (IS_ERR(srp_dev->mr))
1867                 goto err_pd;
1868
1869         memset(&fmr_param, 0, sizeof fmr_param);
1870         fmr_param.pool_size         = SRP_FMR_POOL_SIZE;
1871         fmr_param.dirty_watermark   = SRP_FMR_DIRTY_SIZE;
1872         fmr_param.cache             = 1;
1873         fmr_param.max_pages_per_fmr = SRP_FMR_SIZE;
1874         fmr_param.page_shift        = srp_dev->fmr_page_shift;
1875         fmr_param.access            = (IB_ACCESS_LOCAL_WRITE |
1876                                        IB_ACCESS_REMOTE_WRITE |
1877                                        IB_ACCESS_REMOTE_READ);
1878
1879         srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
1880         if (IS_ERR(srp_dev->fmr_pool))
1881                 srp_dev->fmr_pool = NULL;
1882
1883         if (device->node_type == IB_NODE_SWITCH) {
1884                 s = 0;
1885                 e = 0;
1886         } else {
1887                 s = 1;
1888                 e = device->phys_port_cnt;
1889         }
1890
1891         for (p = s; p <= e; ++p) {
1892                 host = srp_add_port(srp_dev, p);
1893                 if (host)
1894                         list_add_tail(&host->list, &srp_dev->dev_list);
1895         }
1896
1897         ib_set_client_data(device, &srp_client, srp_dev);
1898
1899         goto free_attr;
1900
1901 err_pd:
1902         ib_dealloc_pd(srp_dev->pd);
1903
1904 free_dev:
1905         kfree(srp_dev);
1906
1907 free_attr:
1908         kfree(dev_attr);
1909 }
1910
1911 static void srp_remove_one(struct ib_device *device)
1912 {
1913         struct srp_device *srp_dev;
1914         struct srp_host *host, *tmp_host;
1915         LIST_HEAD(target_list);
1916         struct srp_target_port *target, *tmp_target;
1917
1918         srp_dev = ib_get_client_data(device, &srp_client);
1919
1920         list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
1921                 class_device_unregister(&host->class_dev);
1922                 /*
1923                  * Wait for the sysfs entry to go away, so that no new
1924                  * target ports can be created.
1925                  */
1926                 wait_for_completion(&host->released);
1927
1928                 /*
1929                  * Mark all target ports as removed, so we stop queueing
1930                  * commands and don't try to reconnect.
1931                  */
1932                 spin_lock(&host->target_lock);
1933                 list_for_each_entry(target, &host->target_list, list) {
1934                         spin_lock_irq(target->scsi_host->host_lock);
1935                         target->state = SRP_TARGET_REMOVED;
1936                         spin_unlock_irq(target->scsi_host->host_lock);
1937                 }
1938                 spin_unlock(&host->target_lock);
1939
1940                 /*
1941                  * Wait for any reconnection tasks that may have
1942                  * started before we marked our target ports as
1943                  * removed, and any target port removal tasks.
1944                  */
1945                 flush_scheduled_work();
1946
1947                 list_for_each_entry_safe(target, tmp_target,
1948                                          &host->target_list, list) {
1949                         scsi_remove_host(target->scsi_host);
1950                         srp_disconnect_target(target);
1951                         ib_destroy_cm_id(target->cm_id);
1952                         srp_free_target_ib(target);
1953                         scsi_host_put(target->scsi_host);
1954                 }
1955
1956                 kfree(host);
1957         }
1958
1959         if (srp_dev->fmr_pool)
1960                 ib_destroy_fmr_pool(srp_dev->fmr_pool);
1961         ib_dereg_mr(srp_dev->mr);
1962         ib_dealloc_pd(srp_dev->pd);
1963
1964         kfree(srp_dev);
1965 }
1966
1967 static int __init srp_init_module(void)
1968 {
1969         int ret;
1970
1971         srp_template.sg_tablesize = srp_sg_tablesize;
1972         srp_max_iu_len = (sizeof (struct srp_cmd) +
1973                           sizeof (struct srp_indirect_buf) +
1974                           srp_sg_tablesize * 16);
1975
1976         ret = class_register(&srp_class);
1977         if (ret) {
1978                 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n");
1979                 return ret;
1980         }
1981
1982         ret = ib_register_client(&srp_client);
1983         if (ret) {
1984                 printk(KERN_ERR PFX "couldn't register IB client\n");
1985                 class_unregister(&srp_class);
1986                 return ret;
1987         }
1988
1989         return 0;
1990 }
1991
1992 static void __exit srp_cleanup_module(void)
1993 {
1994         ib_unregister_client(&srp_client);
1995         class_unregister(&srp_class);
1996 }
1997
1998 module_init(srp_init_module);
1999 module_exit(srp_cleanup_module);