blob: bb27ce911936d4eeba20090544c0c07c02094513 [file] [log] [blame]
Sean Heftye51060f2006-06-17 20:37:29 -07001/*
2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
6 *
7 * This Software is licensed under one of the following licenses:
8 *
9 * 1) under the terms of the "Common Public License 1.0" a copy of which is
10 * available from the Open Source Initiative, see
11 * http://www.opensource.org/licenses/cpl.php.
12 *
13 * 2) under the terms of the "The BSD License" a copy of which is
14 * available from the Open Source Initiative, see
15 * http://www.opensource.org/licenses/bsd-license.php.
16 *
17 * 3) under the terms of the "GNU General Public License (GPL) Version 2" a
18 * copy of which is available from the Open Source Initiative, see
19 * http://www.opensource.org/licenses/gpl-license.php.
20 *
21 * Licensee has the right to choose one of the above licenses.
22 *
23 * Redistributions of source code must retain the above copyright
24 * notice and one of the license notices.
25 *
26 * Redistributions in binary form must reproduce both the above copyright
27 * notice, one of the license notices in the documentation
28 * and/or other materials provided with the distribution.
29 *
30 */
31
32#include <linux/completion.h>
33#include <linux/in.h>
34#include <linux/in6.h>
35#include <linux/mutex.h>
36#include <linux/random.h>
37#include <linux/idr.h>
Tom Tucker07ebafb2006-08-03 16:02:42 -050038#include <linux/inetdevice.h>
Sean Heftye51060f2006-06-17 20:37:29 -070039
40#include <net/tcp.h>
41
42#include <rdma/rdma_cm.h>
43#include <rdma/rdma_cm_ib.h>
44#include <rdma/ib_cache.h>
45#include <rdma/ib_cm.h>
46#include <rdma/ib_sa.h>
Tom Tucker07ebafb2006-08-03 16:02:42 -050047#include <rdma/iw_cm.h>
Sean Heftye51060f2006-06-17 20:37:29 -070048
49MODULE_AUTHOR("Sean Hefty");
50MODULE_DESCRIPTION("Generic RDMA CM Agent");
51MODULE_LICENSE("Dual BSD/GPL");
52
53#define CMA_CM_RESPONSE_TIMEOUT 20
Michael S. Tsirkind5bb7592006-09-13 15:01:54 +030054#define CMA_MAX_CM_RETRIES 15
Sean Heftye51060f2006-06-17 20:37:29 -070055
56static void cma_add_one(struct ib_device *device);
57static void cma_remove_one(struct ib_device *device);
58
59static struct ib_client cma_client = {
60 .name = "cma",
61 .add = cma_add_one,
62 .remove = cma_remove_one
63};
64
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -070065static struct ib_sa_client sa_client;
Sean Hefty7a118df2006-10-31 11:12:59 -080066static struct rdma_addr_client addr_client;
Sean Heftye51060f2006-06-17 20:37:29 -070067static LIST_HEAD(dev_list);
68static LIST_HEAD(listen_any_list);
69static DEFINE_MUTEX(lock);
70static struct workqueue_struct *cma_wq;
71static DEFINE_IDR(sdp_ps);
72static DEFINE_IDR(tcp_ps);
Sean Hefty628e5f62006-11-30 16:44:16 -080073static DEFINE_IDR(udp_ps);
Sean Heftyc8f6a362007-02-15 17:00:18 -080074static DEFINE_IDR(ipoib_ps);
Sean Heftyaedec082007-01-29 16:41:23 -080075static int next_port;
Sean Heftye51060f2006-06-17 20:37:29 -070076
77struct cma_device {
78 struct list_head list;
79 struct ib_device *device;
80 __be64 node_guid;
81 struct completion comp;
82 atomic_t refcount;
83 struct list_head id_list;
84};
85
86enum cma_state {
87 CMA_IDLE,
88 CMA_ADDR_QUERY,
89 CMA_ADDR_RESOLVED,
90 CMA_ROUTE_QUERY,
91 CMA_ROUTE_RESOLVED,
92 CMA_CONNECT,
93 CMA_DISCONNECT,
94 CMA_ADDR_BOUND,
95 CMA_LISTEN,
96 CMA_DEVICE_REMOVAL,
97 CMA_DESTROYING
98};
99
100struct rdma_bind_list {
101 struct idr *ps;
102 struct hlist_head owners;
103 unsigned short port;
104};
105
106/*
107 * Device removal can occur at anytime, so we need extra handling to
108 * serialize notifying the user of device removal with other callbacks.
109 * We do this by disabling removal notification while a callback is in process,
110 * and reporting it after the callback completes.
111 */
112struct rdma_id_private {
113 struct rdma_cm_id id;
114
115 struct rdma_bind_list *bind_list;
116 struct hlist_node node;
117 struct list_head list;
118 struct list_head listen_list;
119 struct cma_device *cma_dev;
Sean Heftyc8f6a362007-02-15 17:00:18 -0800120 struct list_head mc_list;
Sean Heftye51060f2006-06-17 20:37:29 -0700121
122 enum cma_state state;
123 spinlock_t lock;
124 struct completion comp;
125 atomic_t refcount;
126 wait_queue_head_t wait_remove;
127 atomic_t dev_remove;
128
129 int backlog;
130 int timeout_ms;
131 struct ib_sa_query *query;
132 int query_id;
133 union {
134 struct ib_cm_id *ib;
Tom Tucker07ebafb2006-08-03 16:02:42 -0500135 struct iw_cm_id *iw;
Sean Heftye51060f2006-06-17 20:37:29 -0700136 } cm_id;
137
138 u32 seq_num;
Sean Heftyc8f6a362007-02-15 17:00:18 -0800139 u32 qkey;
Sean Heftye51060f2006-06-17 20:37:29 -0700140 u32 qp_num;
Sean Heftye51060f2006-06-17 20:37:29 -0700141 u8 srq;
142};
143
Sean Heftyc8f6a362007-02-15 17:00:18 -0800144struct cma_multicast {
145 struct rdma_id_private *id_priv;
146 union {
147 struct ib_sa_multicast *ib;
148 } multicast;
149 struct list_head list;
150 void *context;
151 struct sockaddr addr;
152 u8 pad[sizeof(struct sockaddr_in6) -
153 sizeof(struct sockaddr)];
154};
155
Sean Heftye51060f2006-06-17 20:37:29 -0700156struct cma_work {
157 struct work_struct work;
158 struct rdma_id_private *id;
159 enum cma_state old_state;
160 enum cma_state new_state;
161 struct rdma_cm_event event;
162};
163
164union cma_ip_addr {
165 struct in6_addr ip6;
166 struct {
167 __u32 pad[3];
168 __u32 addr;
169 } ip4;
170};
171
172struct cma_hdr {
173 u8 cma_version;
174 u8 ip_version; /* IP version: 7:4 */
175 __u16 port;
176 union cma_ip_addr src_addr;
177 union cma_ip_addr dst_addr;
178};
179
180struct sdp_hh {
181 u8 bsdh[16];
182 u8 sdp_version; /* Major version: 7:4 */
183 u8 ip_version; /* IP version: 7:4 */
184 u8 sdp_specific1[10];
185 __u16 port;
186 __u16 sdp_specific2;
187 union cma_ip_addr src_addr;
188 union cma_ip_addr dst_addr;
189};
190
191struct sdp_hah {
192 u8 bsdh[16];
193 u8 sdp_version;
194};
195
196#define CMA_VERSION 0x00
197#define SDP_MAJ_VERSION 0x2
198
199static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp)
200{
201 unsigned long flags;
202 int ret;
203
204 spin_lock_irqsave(&id_priv->lock, flags);
205 ret = (id_priv->state == comp);
206 spin_unlock_irqrestore(&id_priv->lock, flags);
207 return ret;
208}
209
210static int cma_comp_exch(struct rdma_id_private *id_priv,
211 enum cma_state comp, enum cma_state exch)
212{
213 unsigned long flags;
214 int ret;
215
216 spin_lock_irqsave(&id_priv->lock, flags);
217 if ((ret = (id_priv->state == comp)))
218 id_priv->state = exch;
219 spin_unlock_irqrestore(&id_priv->lock, flags);
220 return ret;
221}
222
223static enum cma_state cma_exch(struct rdma_id_private *id_priv,
224 enum cma_state exch)
225{
226 unsigned long flags;
227 enum cma_state old;
228
229 spin_lock_irqsave(&id_priv->lock, flags);
230 old = id_priv->state;
231 id_priv->state = exch;
232 spin_unlock_irqrestore(&id_priv->lock, flags);
233 return old;
234}
235
236static inline u8 cma_get_ip_ver(struct cma_hdr *hdr)
237{
238 return hdr->ip_version >> 4;
239}
240
241static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
242{
243 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
244}
245
246static inline u8 sdp_get_majv(u8 sdp_version)
247{
248 return sdp_version >> 4;
249}
250
251static inline u8 sdp_get_ip_ver(struct sdp_hh *hh)
252{
253 return hh->ip_version >> 4;
254}
255
256static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver)
257{
258 hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF);
259}
260
Sean Heftyc8f6a362007-02-15 17:00:18 -0800261static inline int cma_is_ud_ps(enum rdma_port_space ps)
262{
263 return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB);
264}
265
Sean Heftye51060f2006-06-17 20:37:29 -0700266static void cma_attach_to_dev(struct rdma_id_private *id_priv,
267 struct cma_device *cma_dev)
268{
269 atomic_inc(&cma_dev->refcount);
270 id_priv->cma_dev = cma_dev;
271 id_priv->id.device = cma_dev->device;
272 list_add_tail(&id_priv->list, &cma_dev->id_list);
273}
274
275static inline void cma_deref_dev(struct cma_device *cma_dev)
276{
277 if (atomic_dec_and_test(&cma_dev->refcount))
278 complete(&cma_dev->comp);
279}
280
281static void cma_detach_from_dev(struct rdma_id_private *id_priv)
282{
283 list_del(&id_priv->list);
284 cma_deref_dev(id_priv->cma_dev);
285 id_priv->cma_dev = NULL;
286}
287
Sean Heftyc8f6a362007-02-15 17:00:18 -0800288static int cma_set_qkey(struct ib_device *device, u8 port_num,
289 enum rdma_port_space ps,
290 struct rdma_dev_addr *dev_addr, u32 *qkey)
291{
292 struct ib_sa_mcmember_rec rec;
293 int ret = 0;
294
295 switch (ps) {
296 case RDMA_PS_UDP:
297 *qkey = RDMA_UDP_QKEY;
298 break;
299 case RDMA_PS_IPOIB:
300 ib_addr_get_mgid(dev_addr, &rec.mgid);
301 ret = ib_sa_get_mcmember_rec(device, port_num, &rec.mgid, &rec);
302 *qkey = be32_to_cpu(rec.qkey);
303 break;
304 default:
305 break;
306 }
307 return ret;
308}
309
Tom Tucker07ebafb2006-08-03 16:02:42 -0500310static int cma_acquire_dev(struct rdma_id_private *id_priv)
Sean Heftye51060f2006-06-17 20:37:29 -0700311{
Sean Heftyc8f6a362007-02-15 17:00:18 -0800312 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
Sean Heftye51060f2006-06-17 20:37:29 -0700313 struct cma_device *cma_dev;
Michael S. Tsirkinf0ee3402006-07-14 00:23:52 -0700314 union ib_gid gid;
Sean Heftye51060f2006-06-17 20:37:29 -0700315 int ret = -ENODEV;
316
Sean Heftyc8f6a362007-02-15 17:00:18 -0800317 switch (rdma_node_get_transport(dev_addr->dev_type)) {
Tom Tucker07ebafb2006-08-03 16:02:42 -0500318 case RDMA_TRANSPORT_IB:
Sean Heftyc8f6a362007-02-15 17:00:18 -0800319 ib_addr_get_sgid(dev_addr, &gid);
Tom Tucker07ebafb2006-08-03 16:02:42 -0500320 break;
321 case RDMA_TRANSPORT_IWARP:
Sean Heftyc8f6a362007-02-15 17:00:18 -0800322 iw_addr_get_sgid(dev_addr, &gid);
Tom Tucker07ebafb2006-08-03 16:02:42 -0500323 break;
324 default:
325 return -ENODEV;
326 }
Sean Hefty61a73c72006-09-01 15:33:55 -0700327
Sean Heftye51060f2006-06-17 20:37:29 -0700328 list_for_each_entry(cma_dev, &dev_list, list) {
Michael S. Tsirkinf0ee3402006-07-14 00:23:52 -0700329 ret = ib_find_cached_gid(cma_dev->device, &gid,
Sean Heftye51060f2006-06-17 20:37:29 -0700330 &id_priv->id.port_num, NULL);
331 if (!ret) {
Sean Heftyc8f6a362007-02-15 17:00:18 -0800332 ret = cma_set_qkey(cma_dev->device,
333 id_priv->id.port_num,
334 id_priv->id.ps, dev_addr,
335 &id_priv->qkey);
336 if (!ret)
337 cma_attach_to_dev(id_priv, cma_dev);
Sean Heftye51060f2006-06-17 20:37:29 -0700338 break;
339 }
340 }
Sean Heftye51060f2006-06-17 20:37:29 -0700341 return ret;
342}
343
Sean Heftye51060f2006-06-17 20:37:29 -0700344static void cma_deref_id(struct rdma_id_private *id_priv)
345{
346 if (atomic_dec_and_test(&id_priv->refcount))
347 complete(&id_priv->comp);
348}
349
350static void cma_release_remove(struct rdma_id_private *id_priv)
351{
352 if (atomic_dec_and_test(&id_priv->dev_remove))
353 wake_up(&id_priv->wait_remove);
354}
355
356struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
357 void *context, enum rdma_port_space ps)
358{
359 struct rdma_id_private *id_priv;
360
361 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);
362 if (!id_priv)
363 return ERR_PTR(-ENOMEM);
364
365 id_priv->state = CMA_IDLE;
366 id_priv->id.context = context;
367 id_priv->id.event_handler = event_handler;
368 id_priv->id.ps = ps;
369 spin_lock_init(&id_priv->lock);
370 init_completion(&id_priv->comp);
371 atomic_set(&id_priv->refcount, 1);
372 init_waitqueue_head(&id_priv->wait_remove);
373 atomic_set(&id_priv->dev_remove, 0);
374 INIT_LIST_HEAD(&id_priv->listen_list);
Sean Heftyc8f6a362007-02-15 17:00:18 -0800375 INIT_LIST_HEAD(&id_priv->mc_list);
Sean Heftye51060f2006-06-17 20:37:29 -0700376 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
377
378 return &id_priv->id;
379}
380EXPORT_SYMBOL(rdma_create_id);
381
Sean Heftyc8f6a362007-02-15 17:00:18 -0800382static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
Sean Heftye51060f2006-06-17 20:37:29 -0700383{
384 struct ib_qp_attr qp_attr;
Sean Heftyc8f6a362007-02-15 17:00:18 -0800385 int qp_attr_mask, ret;
Sean Heftye51060f2006-06-17 20:37:29 -0700386
Sean Heftyc8f6a362007-02-15 17:00:18 -0800387 qp_attr.qp_state = IB_QPS_INIT;
388 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
Sean Heftye51060f2006-06-17 20:37:29 -0700389 if (ret)
390 return ret;
391
Sean Heftyc8f6a362007-02-15 17:00:18 -0800392 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
393 if (ret)
394 return ret;
395
396 qp_attr.qp_state = IB_QPS_RTR;
397 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
398 if (ret)
399 return ret;
400
401 qp_attr.qp_state = IB_QPS_RTS;
402 qp_attr.sq_psn = 0;
403 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
404
405 return ret;
Sean Heftye51060f2006-06-17 20:37:29 -0700406}
407
Sean Heftyc8f6a362007-02-15 17:00:18 -0800408static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
Tom Tucker07ebafb2006-08-03 16:02:42 -0500409{
410 struct ib_qp_attr qp_attr;
Sean Heftyc8f6a362007-02-15 17:00:18 -0800411 int qp_attr_mask, ret;
Tom Tucker07ebafb2006-08-03 16:02:42 -0500412
413 qp_attr.qp_state = IB_QPS_INIT;
Sean Heftyc8f6a362007-02-15 17:00:18 -0800414 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
415 if (ret)
416 return ret;
Tom Tucker07ebafb2006-08-03 16:02:42 -0500417
Sean Heftyc8f6a362007-02-15 17:00:18 -0800418 return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
Tom Tucker07ebafb2006-08-03 16:02:42 -0500419}
420
Sean Heftye51060f2006-06-17 20:37:29 -0700421int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
422 struct ib_qp_init_attr *qp_init_attr)
423{
424 struct rdma_id_private *id_priv;
425 struct ib_qp *qp;
426 int ret;
427
428 id_priv = container_of(id, struct rdma_id_private, id);
429 if (id->device != pd->device)
430 return -EINVAL;
431
432 qp = ib_create_qp(pd, qp_init_attr);
433 if (IS_ERR(qp))
434 return PTR_ERR(qp);
435
Sean Heftyc8f6a362007-02-15 17:00:18 -0800436 if (cma_is_ud_ps(id_priv->id.ps))
437 ret = cma_init_ud_qp(id_priv, qp);
438 else
439 ret = cma_init_conn_qp(id_priv, qp);
Sean Heftye51060f2006-06-17 20:37:29 -0700440 if (ret)
441 goto err;
442
443 id->qp = qp;
444 id_priv->qp_num = qp->qp_num;
Sean Heftye51060f2006-06-17 20:37:29 -0700445 id_priv->srq = (qp->srq != NULL);
446 return 0;
447err:
448 ib_destroy_qp(qp);
449 return ret;
450}
451EXPORT_SYMBOL(rdma_create_qp);
452
453void rdma_destroy_qp(struct rdma_cm_id *id)
454{
455 ib_destroy_qp(id->qp);
456}
457EXPORT_SYMBOL(rdma_destroy_qp);
458
459static int cma_modify_qp_rtr(struct rdma_cm_id *id)
460{
461 struct ib_qp_attr qp_attr;
462 int qp_attr_mask, ret;
463
464 if (!id->qp)
465 return 0;
466
467 /* Need to update QP attributes from default values. */
468 qp_attr.qp_state = IB_QPS_INIT;
469 ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask);
470 if (ret)
471 return ret;
472
473 ret = ib_modify_qp(id->qp, &qp_attr, qp_attr_mask);
474 if (ret)
475 return ret;
476
477 qp_attr.qp_state = IB_QPS_RTR;
478 ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask);
479 if (ret)
480 return ret;
481
482 return ib_modify_qp(id->qp, &qp_attr, qp_attr_mask);
483}
484
485static int cma_modify_qp_rts(struct rdma_cm_id *id)
486{
487 struct ib_qp_attr qp_attr;
488 int qp_attr_mask, ret;
489
490 if (!id->qp)
491 return 0;
492
493 qp_attr.qp_state = IB_QPS_RTS;
494 ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask);
495 if (ret)
496 return ret;
497
498 return ib_modify_qp(id->qp, &qp_attr, qp_attr_mask);
499}
500
501static int cma_modify_qp_err(struct rdma_cm_id *id)
502{
503 struct ib_qp_attr qp_attr;
504
505 if (!id->qp)
506 return 0;
507
508 qp_attr.qp_state = IB_QPS_ERR;
509 return ib_modify_qp(id->qp, &qp_attr, IB_QP_STATE);
510}
511
Sean Heftyc8f6a362007-02-15 17:00:18 -0800512static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
513 struct ib_qp_attr *qp_attr, int *qp_attr_mask)
514{
515 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
516 int ret;
517
518 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
519 ib_addr_get_pkey(dev_addr),
520 &qp_attr->pkey_index);
521 if (ret)
522 return ret;
523
524 qp_attr->port_num = id_priv->id.port_num;
525 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
526
527 if (cma_is_ud_ps(id_priv->id.ps)) {
528 qp_attr->qkey = id_priv->qkey;
529 *qp_attr_mask |= IB_QP_QKEY;
530 } else {
531 qp_attr->qp_access_flags = 0;
532 *qp_attr_mask |= IB_QP_ACCESS_FLAGS;
533 }
534 return 0;
535}
536
Sean Heftye51060f2006-06-17 20:37:29 -0700537int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
538 int *qp_attr_mask)
539{
540 struct rdma_id_private *id_priv;
Sean Heftyc8f6a362007-02-15 17:00:18 -0800541 int ret = 0;
Sean Heftye51060f2006-06-17 20:37:29 -0700542
543 id_priv = container_of(id, struct rdma_id_private, id);
Tom Tucker07ebafb2006-08-03 16:02:42 -0500544 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
545 case RDMA_TRANSPORT_IB:
Sean Heftyc8f6a362007-02-15 17:00:18 -0800546 if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps))
547 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
548 else
549 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
550 qp_attr_mask);
Sean Heftye51060f2006-06-17 20:37:29 -0700551 if (qp_attr->qp_state == IB_QPS_RTR)
552 qp_attr->rq_psn = id_priv->seq_num;
553 break;
Tom Tucker07ebafb2006-08-03 16:02:42 -0500554 case RDMA_TRANSPORT_IWARP:
Sean Heftyc8f6a362007-02-15 17:00:18 -0800555 if (!id_priv->cm_id.iw) {
556 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
557 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
558 } else
559 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
560 qp_attr_mask);
Tom Tucker07ebafb2006-08-03 16:02:42 -0500561 break;
Sean Heftye51060f2006-06-17 20:37:29 -0700562 default:
563 ret = -ENOSYS;
564 break;
565 }
566
567 return ret;
568}
569EXPORT_SYMBOL(rdma_init_qp_attr);
570
571static inline int cma_zero_addr(struct sockaddr *addr)
572{
573 struct in6_addr *ip6;
574
575 if (addr->sa_family == AF_INET)
576 return ZERONET(((struct sockaddr_in *) addr)->sin_addr.s_addr);
577 else {
578 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
579 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
Eric Sesterhenn5fd571c2006-06-21 20:56:26 +0200580 ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
Sean Heftye51060f2006-06-17 20:37:29 -0700581 }
582}
583
584static inline int cma_loopback_addr(struct sockaddr *addr)
585{
586 return LOOPBACK(((struct sockaddr_in *) addr)->sin_addr.s_addr);
587}
588
589static inline int cma_any_addr(struct sockaddr *addr)
590{
591 return cma_zero_addr(addr) || cma_loopback_addr(addr);
592}
593
Sean Hefty628e5f62006-11-30 16:44:16 -0800594static inline __be16 cma_port(struct sockaddr *addr)
595{
596 if (addr->sa_family == AF_INET)
597 return ((struct sockaddr_in *) addr)->sin_port;
598 else
599 return ((struct sockaddr_in6 *) addr)->sin6_port;
600}
601
Sean Heftye51060f2006-06-17 20:37:29 -0700602static inline int cma_any_port(struct sockaddr *addr)
603{
Sean Hefty628e5f62006-11-30 16:44:16 -0800604 return !cma_port(addr);
Sean Heftye51060f2006-06-17 20:37:29 -0700605}
606
607static int cma_get_net_info(void *hdr, enum rdma_port_space ps,
608 u8 *ip_ver, __u16 *port,
609 union cma_ip_addr **src, union cma_ip_addr **dst)
610{
611 switch (ps) {
612 case RDMA_PS_SDP:
613 if (sdp_get_majv(((struct sdp_hh *) hdr)->sdp_version) !=
614 SDP_MAJ_VERSION)
615 return -EINVAL;
616
617 *ip_ver = sdp_get_ip_ver(hdr);
618 *port = ((struct sdp_hh *) hdr)->port;
619 *src = &((struct sdp_hh *) hdr)->src_addr;
620 *dst = &((struct sdp_hh *) hdr)->dst_addr;
621 break;
622 default:
623 if (((struct cma_hdr *) hdr)->cma_version != CMA_VERSION)
624 return -EINVAL;
625
626 *ip_ver = cma_get_ip_ver(hdr);
627 *port = ((struct cma_hdr *) hdr)->port;
628 *src = &((struct cma_hdr *) hdr)->src_addr;
629 *dst = &((struct cma_hdr *) hdr)->dst_addr;
630 break;
631 }
632
633 if (*ip_ver != 4 && *ip_ver != 6)
634 return -EINVAL;
635 return 0;
636}
637
638static void cma_save_net_info(struct rdma_addr *addr,
639 struct rdma_addr *listen_addr,
640 u8 ip_ver, __u16 port,
641 union cma_ip_addr *src, union cma_ip_addr *dst)
642{
643 struct sockaddr_in *listen4, *ip4;
644 struct sockaddr_in6 *listen6, *ip6;
645
646 switch (ip_ver) {
647 case 4:
648 listen4 = (struct sockaddr_in *) &listen_addr->src_addr;
649 ip4 = (struct sockaddr_in *) &addr->src_addr;
650 ip4->sin_family = listen4->sin_family;
651 ip4->sin_addr.s_addr = dst->ip4.addr;
652 ip4->sin_port = listen4->sin_port;
653
654 ip4 = (struct sockaddr_in *) &addr->dst_addr;
655 ip4->sin_family = listen4->sin_family;
656 ip4->sin_addr.s_addr = src->ip4.addr;
657 ip4->sin_port = port;
658 break;
659 case 6:
660 listen6 = (struct sockaddr_in6 *) &listen_addr->src_addr;
661 ip6 = (struct sockaddr_in6 *) &addr->src_addr;
662 ip6->sin6_family = listen6->sin6_family;
663 ip6->sin6_addr = dst->ip6;
664 ip6->sin6_port = listen6->sin6_port;
665
666 ip6 = (struct sockaddr_in6 *) &addr->dst_addr;
667 ip6->sin6_family = listen6->sin6_family;
668 ip6->sin6_addr = src->ip6;
669 ip6->sin6_port = port;
670 break;
671 default:
672 break;
673 }
674}
675
676static inline int cma_user_data_offset(enum rdma_port_space ps)
677{
678 switch (ps) {
679 case RDMA_PS_SDP:
680 return 0;
681 default:
682 return sizeof(struct cma_hdr);
683 }
684}
685
Sean Heftye51060f2006-06-17 20:37:29 -0700686static void cma_cancel_route(struct rdma_id_private *id_priv)
687{
Tom Tucker07ebafb2006-08-03 16:02:42 -0500688 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
689 case RDMA_TRANSPORT_IB:
Sean Heftye51060f2006-06-17 20:37:29 -0700690 if (id_priv->query)
691 ib_sa_cancel_query(id_priv->query_id, id_priv->query);
692 break;
693 default:
694 break;
695 }
696}
697
698static inline int cma_internal_listen(struct rdma_id_private *id_priv)
699{
700 return (id_priv->state == CMA_LISTEN) && id_priv->cma_dev &&
701 cma_any_addr(&id_priv->id.route.addr.src_addr);
702}
703
704static void cma_destroy_listen(struct rdma_id_private *id_priv)
705{
706 cma_exch(id_priv, CMA_DESTROYING);
707
708 if (id_priv->cma_dev) {
Tom Tucker07ebafb2006-08-03 16:02:42 -0500709 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
710 case RDMA_TRANSPORT_IB:
Roland Dreier3cd96562006-09-22 15:22:46 -0700711 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
Sean Heftye51060f2006-06-17 20:37:29 -0700712 ib_destroy_cm_id(id_priv->cm_id.ib);
713 break;
Tom Tucker07ebafb2006-08-03 16:02:42 -0500714 case RDMA_TRANSPORT_IWARP:
715 if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw))
716 iw_destroy_cm_id(id_priv->cm_id.iw);
717 break;
Sean Heftye51060f2006-06-17 20:37:29 -0700718 default:
719 break;
720 }
721 cma_detach_from_dev(id_priv);
722 }
723 list_del(&id_priv->listen_list);
724
725 cma_deref_id(id_priv);
726 wait_for_completion(&id_priv->comp);
727
728 kfree(id_priv);
729}
730
731static void cma_cancel_listens(struct rdma_id_private *id_priv)
732{
733 struct rdma_id_private *dev_id_priv;
734
735 mutex_lock(&lock);
736 list_del(&id_priv->list);
737
738 while (!list_empty(&id_priv->listen_list)) {
739 dev_id_priv = list_entry(id_priv->listen_list.next,
740 struct rdma_id_private, listen_list);
741 cma_destroy_listen(dev_id_priv);
742 }
743 mutex_unlock(&lock);
744}
745
746static void cma_cancel_operation(struct rdma_id_private *id_priv,
747 enum cma_state state)
748{
749 switch (state) {
750 case CMA_ADDR_QUERY:
751 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
752 break;
753 case CMA_ROUTE_QUERY:
754 cma_cancel_route(id_priv);
755 break;
756 case CMA_LISTEN:
757 if (cma_any_addr(&id_priv->id.route.addr.src_addr) &&
758 !id_priv->cma_dev)
759 cma_cancel_listens(id_priv);
760 break;
761 default:
762 break;
763 }
764}
765
766static void cma_release_port(struct rdma_id_private *id_priv)
767{
768 struct rdma_bind_list *bind_list = id_priv->bind_list;
769
770 if (!bind_list)
771 return;
772
773 mutex_lock(&lock);
774 hlist_del(&id_priv->node);
775 if (hlist_empty(&bind_list->owners)) {
776 idr_remove(bind_list->ps, bind_list->port);
777 kfree(bind_list);
778 }
779 mutex_unlock(&lock);
780}
781
Sean Heftyc8f6a362007-02-15 17:00:18 -0800782static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
783{
784 struct cma_multicast *mc;
785
786 while (!list_empty(&id_priv->mc_list)) {
787 mc = container_of(id_priv->mc_list.next,
788 struct cma_multicast, list);
789 list_del(&mc->list);
790 ib_sa_free_multicast(mc->multicast.ib);
791 kfree(mc);
792 }
793}
794
Sean Heftye51060f2006-06-17 20:37:29 -0700795void rdma_destroy_id(struct rdma_cm_id *id)
796{
797 struct rdma_id_private *id_priv;
798 enum cma_state state;
799
800 id_priv = container_of(id, struct rdma_id_private, id);
801 state = cma_exch(id_priv, CMA_DESTROYING);
802 cma_cancel_operation(id_priv, state);
803
Sean Hefty61a73c72006-09-01 15:33:55 -0700804 mutex_lock(&lock);
Sean Heftye51060f2006-06-17 20:37:29 -0700805 if (id_priv->cma_dev) {
Sean Hefty61a73c72006-09-01 15:33:55 -0700806 mutex_unlock(&lock);
Tom Tucker07ebafb2006-08-03 16:02:42 -0500807 switch (rdma_node_get_transport(id->device->node_type)) {
808 case RDMA_TRANSPORT_IB:
Roland Dreier3cd96562006-09-22 15:22:46 -0700809 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
Sean Heftye51060f2006-06-17 20:37:29 -0700810 ib_destroy_cm_id(id_priv->cm_id.ib);
811 break;
Tom Tucker07ebafb2006-08-03 16:02:42 -0500812 case RDMA_TRANSPORT_IWARP:
813 if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw))
814 iw_destroy_cm_id(id_priv->cm_id.iw);
815 break;
Sean Heftye51060f2006-06-17 20:37:29 -0700816 default:
817 break;
818 }
Sean Heftyc8f6a362007-02-15 17:00:18 -0800819 cma_leave_mc_groups(id_priv);
Roland Dreier3cd96562006-09-22 15:22:46 -0700820 mutex_lock(&lock);
Sean Heftye51060f2006-06-17 20:37:29 -0700821 cma_detach_from_dev(id_priv);
Sean Heftye51060f2006-06-17 20:37:29 -0700822 }
Sean Hefty61a73c72006-09-01 15:33:55 -0700823 mutex_unlock(&lock);
Sean Heftye51060f2006-06-17 20:37:29 -0700824
825 cma_release_port(id_priv);
826 cma_deref_id(id_priv);
827 wait_for_completion(&id_priv->comp);
828
829 kfree(id_priv->id.route.path_rec);
830 kfree(id_priv);
831}
832EXPORT_SYMBOL(rdma_destroy_id);
833
834static int cma_rep_recv(struct rdma_id_private *id_priv)
835{
836 int ret;
837
838 ret = cma_modify_qp_rtr(&id_priv->id);
839 if (ret)
840 goto reject;
841
842 ret = cma_modify_qp_rts(&id_priv->id);
843 if (ret)
844 goto reject;
845
846 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
847 if (ret)
848 goto reject;
849
850 return 0;
851reject:
852 cma_modify_qp_err(&id_priv->id);
853 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
854 NULL, 0, NULL, 0);
855 return ret;
856}
857
858static int cma_verify_rep(struct rdma_id_private *id_priv, void *data)
859{
860 if (id_priv->id.ps == RDMA_PS_SDP &&
861 sdp_get_majv(((struct sdp_hah *) data)->sdp_version) !=
862 SDP_MAJ_VERSION)
863 return -EINVAL;
864
865 return 0;
866}
867
Sean Heftya1b1b612006-11-30 16:33:14 -0800868static void cma_set_rep_event_data(struct rdma_cm_event *event,
869 struct ib_cm_rep_event_param *rep_data,
870 void *private_data)
871{
872 event->param.conn.private_data = private_data;
873 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
874 event->param.conn.responder_resources = rep_data->responder_resources;
875 event->param.conn.initiator_depth = rep_data->initiator_depth;
876 event->param.conn.flow_control = rep_data->flow_control;
877 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
878 event->param.conn.srq = rep_data->srq;
879 event->param.conn.qp_num = rep_data->remote_qpn;
880}
881
Sean Heftye51060f2006-06-17 20:37:29 -0700882static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
883{
884 struct rdma_id_private *id_priv = cm_id->context;
Sean Heftya1b1b612006-11-30 16:33:14 -0800885 struct rdma_cm_event event;
886 int ret = 0;
Sean Heftye51060f2006-06-17 20:37:29 -0700887
888 atomic_inc(&id_priv->dev_remove);
889 if (!cma_comp(id_priv, CMA_CONNECT))
890 goto out;
891
Sean Heftya1b1b612006-11-30 16:33:14 -0800892 memset(&event, 0, sizeof event);
Sean Heftye51060f2006-06-17 20:37:29 -0700893 switch (ib_event->event) {
894 case IB_CM_REQ_ERROR:
895 case IB_CM_REP_ERROR:
Sean Heftya1b1b612006-11-30 16:33:14 -0800896 event.event = RDMA_CM_EVENT_UNREACHABLE;
897 event.status = -ETIMEDOUT;
Sean Heftye51060f2006-06-17 20:37:29 -0700898 break;
899 case IB_CM_REP_RECEIVED:
Sean Heftya1b1b612006-11-30 16:33:14 -0800900 event.status = cma_verify_rep(id_priv, ib_event->private_data);
901 if (event.status)
902 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
Sean Heftye51060f2006-06-17 20:37:29 -0700903 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) {
Sean Heftya1b1b612006-11-30 16:33:14 -0800904 event.status = cma_rep_recv(id_priv);
905 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
906 RDMA_CM_EVENT_ESTABLISHED;
Sean Heftye51060f2006-06-17 20:37:29 -0700907 } else
Sean Heftya1b1b612006-11-30 16:33:14 -0800908 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
909 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
910 ib_event->private_data);
Sean Heftye51060f2006-06-17 20:37:29 -0700911 break;
912 case IB_CM_RTU_RECEIVED:
Sean Hefty0fe313b2006-11-30 16:37:15 -0800913 case IB_CM_USER_ESTABLISHED:
914 event.event = RDMA_CM_EVENT_ESTABLISHED;
Sean Heftye51060f2006-06-17 20:37:29 -0700915 break;
916 case IB_CM_DREQ_ERROR:
Sean Heftya1b1b612006-11-30 16:33:14 -0800917 event.status = -ETIMEDOUT; /* fall through */
Sean Heftye51060f2006-06-17 20:37:29 -0700918 case IB_CM_DREQ_RECEIVED:
919 case IB_CM_DREP_RECEIVED:
920 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT))
921 goto out;
Sean Heftya1b1b612006-11-30 16:33:14 -0800922 event.event = RDMA_CM_EVENT_DISCONNECTED;
Sean Heftye51060f2006-06-17 20:37:29 -0700923 break;
924 case IB_CM_TIMEWAIT_EXIT:
925 case IB_CM_MRA_RECEIVED:
926 /* ignore event */
927 goto out;
928 case IB_CM_REJ_RECEIVED:
929 cma_modify_qp_err(&id_priv->id);
Sean Heftya1b1b612006-11-30 16:33:14 -0800930 event.status = ib_event->param.rej_rcvd.reason;
931 event.event = RDMA_CM_EVENT_REJECTED;
932 event.param.conn.private_data = ib_event->private_data;
933 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
Sean Heftye51060f2006-06-17 20:37:29 -0700934 break;
935 default:
936 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
937 ib_event->event);
938 goto out;
939 }
940
Sean Heftya1b1b612006-11-30 16:33:14 -0800941 ret = id_priv->id.event_handler(&id_priv->id, &event);
Sean Heftye51060f2006-06-17 20:37:29 -0700942 if (ret) {
943 /* Destroy the CM ID by returning a non-zero value. */
944 id_priv->cm_id.ib = NULL;
945 cma_exch(id_priv, CMA_DESTROYING);
946 cma_release_remove(id_priv);
947 rdma_destroy_id(&id_priv->id);
948 return ret;
949 }
950out:
951 cma_release_remove(id_priv);
952 return ret;
953}
954
Sean Hefty628e5f62006-11-30 16:44:16 -0800955static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
956 struct ib_cm_event *ib_event)
Sean Heftye51060f2006-06-17 20:37:29 -0700957{
958 struct rdma_id_private *id_priv;
959 struct rdma_cm_id *id;
960 struct rdma_route *rt;
961 union cma_ip_addr *src, *dst;
962 __u16 port;
963 u8 ip_ver;
964
Sean Heftye51060f2006-06-17 20:37:29 -0700965 if (cma_get_net_info(ib_event->private_data, listen_id->ps,
966 &ip_ver, &port, &src, &dst))
967 goto err;
968
Krishna Kumar3f168d22006-09-29 12:09:51 -0700969 id = rdma_create_id(listen_id->event_handler, listen_id->context,
970 listen_id->ps);
971 if (IS_ERR(id))
972 goto err;
973
Sean Heftye51060f2006-06-17 20:37:29 -0700974 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
975 ip_ver, port, src, dst);
Krishna Kumar3f168d22006-09-29 12:09:51 -0700976
977 rt = &id->route;
978 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
979 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
980 GFP_KERNEL);
981 if (!rt->path_rec)
982 goto destroy_id;
983
Sean Heftye51060f2006-06-17 20:37:29 -0700984 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
985 if (rt->num_paths == 2)
986 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
987
988 ib_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
989 ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
990 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
Tom Tucker07ebafb2006-08-03 16:02:42 -0500991 rt->addr.dev_addr.dev_type = RDMA_NODE_IB_CA;
Sean Heftye51060f2006-06-17 20:37:29 -0700992
993 id_priv = container_of(id, struct rdma_id_private, id);
994 id_priv->state = CMA_CONNECT;
995 return id_priv;
Krishna Kumar3f168d22006-09-29 12:09:51 -0700996
997destroy_id:
Sean Heftye51060f2006-06-17 20:37:29 -0700998 rdma_destroy_id(id);
Krishna Kumar3f168d22006-09-29 12:09:51 -0700999err:
Sean Heftye51060f2006-06-17 20:37:29 -07001000 return NULL;
1001}
1002
Sean Hefty628e5f62006-11-30 16:44:16 -08001003static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
1004 struct ib_cm_event *ib_event)
1005{
1006 struct rdma_id_private *id_priv;
1007 struct rdma_cm_id *id;
1008 union cma_ip_addr *src, *dst;
1009 __u16 port;
1010 u8 ip_ver;
1011 int ret;
1012
1013 id = rdma_create_id(listen_id->event_handler, listen_id->context,
1014 listen_id->ps);
1015 if (IS_ERR(id))
1016 return NULL;
1017
1018
1019 if (cma_get_net_info(ib_event->private_data, listen_id->ps,
1020 &ip_ver, &port, &src, &dst))
1021 goto err;
1022
1023 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
1024 ip_ver, port, src, dst);
1025
1026 ret = rdma_translate_ip(&id->route.addr.src_addr,
1027 &id->route.addr.dev_addr);
1028 if (ret)
1029 goto err;
1030
1031 id_priv = container_of(id, struct rdma_id_private, id);
1032 id_priv->state = CMA_CONNECT;
1033 return id_priv;
1034err:
1035 rdma_destroy_id(id);
1036 return NULL;
1037}
1038
Sean Heftya1b1b612006-11-30 16:33:14 -08001039static void cma_set_req_event_data(struct rdma_cm_event *event,
1040 struct ib_cm_req_event_param *req_data,
1041 void *private_data, int offset)
1042{
1043 event->param.conn.private_data = private_data + offset;
1044 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
1045 event->param.conn.responder_resources = req_data->responder_resources;
1046 event->param.conn.initiator_depth = req_data->initiator_depth;
1047 event->param.conn.flow_control = req_data->flow_control;
1048 event->param.conn.retry_count = req_data->retry_count;
1049 event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
1050 event->param.conn.srq = req_data->srq;
1051 event->param.conn.qp_num = req_data->remote_qpn;
1052}
1053
Sean Heftye51060f2006-06-17 20:37:29 -07001054static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1055{
1056 struct rdma_id_private *listen_id, *conn_id;
Sean Heftya1b1b612006-11-30 16:33:14 -08001057 struct rdma_cm_event event;
Sean Heftye51060f2006-06-17 20:37:29 -07001058 int offset, ret;
1059
1060 listen_id = cm_id->context;
1061 atomic_inc(&listen_id->dev_remove);
1062 if (!cma_comp(listen_id, CMA_LISTEN)) {
1063 ret = -ECONNABORTED;
1064 goto out;
1065 }
1066
Sean Hefty628e5f62006-11-30 16:44:16 -08001067 memset(&event, 0, sizeof event);
1068 offset = cma_user_data_offset(listen_id->id.ps);
1069 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
Sean Heftyc8f6a362007-02-15 17:00:18 -08001070 if (cma_is_ud_ps(listen_id->id.ps)) {
Sean Hefty628e5f62006-11-30 16:44:16 -08001071 conn_id = cma_new_udp_id(&listen_id->id, ib_event);
1072 event.param.ud.private_data = ib_event->private_data + offset;
1073 event.param.ud.private_data_len =
1074 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
1075 } else {
1076 conn_id = cma_new_conn_id(&listen_id->id, ib_event);
1077 cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
1078 ib_event->private_data, offset);
1079 }
Sean Heftye51060f2006-06-17 20:37:29 -07001080 if (!conn_id) {
1081 ret = -ENOMEM;
1082 goto out;
1083 }
1084
1085 atomic_inc(&conn_id->dev_remove);
Sean Hefty61a73c72006-09-01 15:33:55 -07001086 mutex_lock(&lock);
Tom Tucker07ebafb2006-08-03 16:02:42 -05001087 ret = cma_acquire_dev(conn_id);
Sean Hefty61a73c72006-09-01 15:33:55 -07001088 mutex_unlock(&lock);
Krishna Kumara1a733f2006-10-17 10:09:11 +05301089 if (ret)
1090 goto release_conn_id;
Sean Heftye51060f2006-06-17 20:37:29 -07001091
1092 conn_id->cm_id.ib = cm_id;
1093 cm_id->context = conn_id;
1094 cm_id->cm_handler = cma_ib_handler;
1095
Sean Heftya1b1b612006-11-30 16:33:14 -08001096 ret = conn_id->id.event_handler(&conn_id->id, &event);
Krishna Kumara1a733f2006-10-17 10:09:11 +05301097 if (!ret)
1098 goto out;
1099
1100 /* Destroy the CM ID by returning a non-zero value. */
1101 conn_id->cm_id.ib = NULL;
1102
1103release_conn_id:
1104 cma_exch(conn_id, CMA_DESTROYING);
1105 cma_release_remove(conn_id);
1106 rdma_destroy_id(&conn_id->id);
1107
Sean Heftye51060f2006-06-17 20:37:29 -07001108out:
1109 cma_release_remove(listen_id);
1110 return ret;
1111}
1112
1113static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr)
1114{
Sean Hefty628e5f62006-11-30 16:44:16 -08001115 return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr)));
Sean Heftye51060f2006-06-17 20:37:29 -07001116}
1117
1118static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
1119 struct ib_cm_compare_data *compare)
1120{
1121 struct cma_hdr *cma_data, *cma_mask;
1122 struct sdp_hh *sdp_data, *sdp_mask;
1123 __u32 ip4_addr;
1124 struct in6_addr ip6_addr;
1125
1126 memset(compare, 0, sizeof *compare);
1127 cma_data = (void *) compare->data;
1128 cma_mask = (void *) compare->mask;
1129 sdp_data = (void *) compare->data;
1130 sdp_mask = (void *) compare->mask;
1131
1132 switch (addr->sa_family) {
1133 case AF_INET:
1134 ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr;
1135 if (ps == RDMA_PS_SDP) {
1136 sdp_set_ip_ver(sdp_data, 4);
1137 sdp_set_ip_ver(sdp_mask, 0xF);
1138 sdp_data->dst_addr.ip4.addr = ip4_addr;
1139 sdp_mask->dst_addr.ip4.addr = ~0;
1140 } else {
1141 cma_set_ip_ver(cma_data, 4);
1142 cma_set_ip_ver(cma_mask, 0xF);
1143 cma_data->dst_addr.ip4.addr = ip4_addr;
1144 cma_mask->dst_addr.ip4.addr = ~0;
1145 }
1146 break;
1147 case AF_INET6:
1148 ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr;
1149 if (ps == RDMA_PS_SDP) {
1150 sdp_set_ip_ver(sdp_data, 6);
1151 sdp_set_ip_ver(sdp_mask, 0xF);
1152 sdp_data->dst_addr.ip6 = ip6_addr;
1153 memset(&sdp_mask->dst_addr.ip6, 0xFF,
1154 sizeof sdp_mask->dst_addr.ip6);
1155 } else {
1156 cma_set_ip_ver(cma_data, 6);
1157 cma_set_ip_ver(cma_mask, 0xF);
1158 cma_data->dst_addr.ip6 = ip6_addr;
1159 memset(&cma_mask->dst_addr.ip6, 0xFF,
1160 sizeof cma_mask->dst_addr.ip6);
1161 }
1162 break;
1163 default:
1164 break;
1165 }
1166}
1167
Tom Tucker07ebafb2006-08-03 16:02:42 -05001168static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1169{
1170 struct rdma_id_private *id_priv = iw_id->context;
Sean Heftya1b1b612006-11-30 16:33:14 -08001171 struct rdma_cm_event event;
Tom Tucker07ebafb2006-08-03 16:02:42 -05001172 struct sockaddr_in *sin;
1173 int ret = 0;
1174
Sean Heftya1b1b612006-11-30 16:33:14 -08001175 memset(&event, 0, sizeof event);
Tom Tucker07ebafb2006-08-03 16:02:42 -05001176 atomic_inc(&id_priv->dev_remove);
1177
1178 switch (iw_event->event) {
1179 case IW_CM_EVENT_CLOSE:
Sean Heftya1b1b612006-11-30 16:33:14 -08001180 event.event = RDMA_CM_EVENT_DISCONNECTED;
Tom Tucker07ebafb2006-08-03 16:02:42 -05001181 break;
1182 case IW_CM_EVENT_CONNECT_REPLY:
1183 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1184 *sin = iw_event->local_addr;
1185 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
1186 *sin = iw_event->remote_addr;
Steve Wise881a0452006-12-15 16:50:17 -06001187 switch (iw_event->status) {
1188 case 0:
Sean Heftya1b1b612006-11-30 16:33:14 -08001189 event.event = RDMA_CM_EVENT_ESTABLISHED;
Steve Wise881a0452006-12-15 16:50:17 -06001190 break;
1191 case -ECONNRESET:
1192 case -ECONNREFUSED:
1193 event.event = RDMA_CM_EVENT_REJECTED;
1194 break;
1195 case -ETIMEDOUT:
1196 event.event = RDMA_CM_EVENT_UNREACHABLE;
1197 break;
1198 default:
1199 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
1200 break;
1201 }
Tom Tucker07ebafb2006-08-03 16:02:42 -05001202 break;
1203 case IW_CM_EVENT_ESTABLISHED:
Sean Heftya1b1b612006-11-30 16:33:14 -08001204 event.event = RDMA_CM_EVENT_ESTABLISHED;
Tom Tucker07ebafb2006-08-03 16:02:42 -05001205 break;
1206 default:
1207 BUG_ON(1);
1208 }
1209
Sean Heftya1b1b612006-11-30 16:33:14 -08001210 event.status = iw_event->status;
1211 event.param.conn.private_data = iw_event->private_data;
1212 event.param.conn.private_data_len = iw_event->private_data_len;
1213 ret = id_priv->id.event_handler(&id_priv->id, &event);
Tom Tucker07ebafb2006-08-03 16:02:42 -05001214 if (ret) {
1215 /* Destroy the CM ID by returning a non-zero value. */
1216 id_priv->cm_id.iw = NULL;
1217 cma_exch(id_priv, CMA_DESTROYING);
1218 cma_release_remove(id_priv);
1219 rdma_destroy_id(&id_priv->id);
1220 return ret;
1221 }
1222
1223 cma_release_remove(id_priv);
1224 return ret;
1225}
1226
1227static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1228 struct iw_cm_event *iw_event)
1229{
1230 struct rdma_cm_id *new_cm_id;
1231 struct rdma_id_private *listen_id, *conn_id;
1232 struct sockaddr_in *sin;
1233 struct net_device *dev = NULL;
Sean Heftya1b1b612006-11-30 16:33:14 -08001234 struct rdma_cm_event event;
Tom Tucker07ebafb2006-08-03 16:02:42 -05001235 int ret;
1236
1237 listen_id = cm_id->context;
1238 atomic_inc(&listen_id->dev_remove);
1239 if (!cma_comp(listen_id, CMA_LISTEN)) {
1240 ret = -ECONNABORTED;
1241 goto out;
1242 }
1243
1244 /* Create a new RDMA id for the new IW CM ID */
1245 new_cm_id = rdma_create_id(listen_id->id.event_handler,
1246 listen_id->id.context,
1247 RDMA_PS_TCP);
1248 if (!new_cm_id) {
1249 ret = -ENOMEM;
1250 goto out;
1251 }
1252 conn_id = container_of(new_cm_id, struct rdma_id_private, id);
1253 atomic_inc(&conn_id->dev_remove);
1254 conn_id->state = CMA_CONNECT;
1255
1256 dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr);
1257 if (!dev) {
1258 ret = -EADDRNOTAVAIL;
1259 cma_release_remove(conn_id);
1260 rdma_destroy_id(new_cm_id);
1261 goto out;
1262 }
1263 ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
1264 if (ret) {
1265 cma_release_remove(conn_id);
1266 rdma_destroy_id(new_cm_id);
1267 goto out;
1268 }
1269
Sean Hefty61a73c72006-09-01 15:33:55 -07001270 mutex_lock(&lock);
Tom Tucker07ebafb2006-08-03 16:02:42 -05001271 ret = cma_acquire_dev(conn_id);
Sean Hefty61a73c72006-09-01 15:33:55 -07001272 mutex_unlock(&lock);
Tom Tucker07ebafb2006-08-03 16:02:42 -05001273 if (ret) {
1274 cma_release_remove(conn_id);
1275 rdma_destroy_id(new_cm_id);
1276 goto out;
1277 }
1278
1279 conn_id->cm_id.iw = cm_id;
1280 cm_id->context = conn_id;
1281 cm_id->cm_handler = cma_iw_handler;
1282
1283 sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr;
1284 *sin = iw_event->local_addr;
1285 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;
1286 *sin = iw_event->remote_addr;
1287
Sean Heftya1b1b612006-11-30 16:33:14 -08001288 memset(&event, 0, sizeof event);
1289 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1290 event.param.conn.private_data = iw_event->private_data;
1291 event.param.conn.private_data_len = iw_event->private_data_len;
1292 ret = conn_id->id.event_handler(&conn_id->id, &event);
Tom Tucker07ebafb2006-08-03 16:02:42 -05001293 if (ret) {
1294 /* User wants to destroy the CM ID */
1295 conn_id->cm_id.iw = NULL;
1296 cma_exch(conn_id, CMA_DESTROYING);
1297 cma_release_remove(conn_id);
1298 rdma_destroy_id(&conn_id->id);
1299 }
1300
1301out:
1302 if (dev)
1303 dev_put(dev);
1304 cma_release_remove(listen_id);
1305 return ret;
1306}
1307
Sean Heftye51060f2006-06-17 20:37:29 -07001308static int cma_ib_listen(struct rdma_id_private *id_priv)
1309{
1310 struct ib_cm_compare_data compare_data;
1311 struct sockaddr *addr;
1312 __be64 svc_id;
1313 int ret;
1314
1315 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler,
1316 id_priv);
1317 if (IS_ERR(id_priv->cm_id.ib))
1318 return PTR_ERR(id_priv->cm_id.ib);
1319
1320 addr = &id_priv->id.route.addr.src_addr;
1321 svc_id = cma_get_service_id(id_priv->id.ps, addr);
1322 if (cma_any_addr(addr))
1323 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);
1324 else {
1325 cma_set_compare_data(id_priv->id.ps, addr, &compare_data);
1326 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data);
1327 }
1328
1329 if (ret) {
1330 ib_destroy_cm_id(id_priv->cm_id.ib);
1331 id_priv->cm_id.ib = NULL;
1332 }
1333
1334 return ret;
1335}
1336
Tom Tucker07ebafb2006-08-03 16:02:42 -05001337static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
1338{
1339 int ret;
1340 struct sockaddr_in *sin;
1341
1342 id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device,
1343 iw_conn_req_handler,
1344 id_priv);
1345 if (IS_ERR(id_priv->cm_id.iw))
1346 return PTR_ERR(id_priv->cm_id.iw);
1347
1348 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1349 id_priv->cm_id.iw->local_addr = *sin;
1350
1351 ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
1352
1353 if (ret) {
1354 iw_destroy_cm_id(id_priv->cm_id.iw);
1355 id_priv->cm_id.iw = NULL;
1356 }
1357
1358 return ret;
1359}
1360
Sean Heftye51060f2006-06-17 20:37:29 -07001361static int cma_listen_handler(struct rdma_cm_id *id,
1362 struct rdma_cm_event *event)
1363{
1364 struct rdma_id_private *id_priv = id->context;
1365
1366 id->context = id_priv->id.context;
1367 id->event_handler = id_priv->id.event_handler;
1368 return id_priv->id.event_handler(id, event);
1369}
1370
1371static void cma_listen_on_dev(struct rdma_id_private *id_priv,
1372 struct cma_device *cma_dev)
1373{
1374 struct rdma_id_private *dev_id_priv;
1375 struct rdma_cm_id *id;
1376 int ret;
1377
1378 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps);
1379 if (IS_ERR(id))
1380 return;
1381
1382 dev_id_priv = container_of(id, struct rdma_id_private, id);
1383
1384 dev_id_priv->state = CMA_ADDR_BOUND;
1385 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
1386 ip_addr_size(&id_priv->id.route.addr.src_addr));
1387
1388 cma_attach_to_dev(dev_id_priv, cma_dev);
1389 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
1390
1391 ret = rdma_listen(id, id_priv->backlog);
1392 if (ret)
1393 goto err;
1394
1395 return;
1396err:
1397 cma_destroy_listen(dev_id_priv);
1398}
1399
1400static void cma_listen_on_all(struct rdma_id_private *id_priv)
1401{
1402 struct cma_device *cma_dev;
1403
1404 mutex_lock(&lock);
1405 list_add_tail(&id_priv->list, &listen_any_list);
1406 list_for_each_entry(cma_dev, &dev_list, list)
1407 cma_listen_on_dev(id_priv, cma_dev);
1408 mutex_unlock(&lock);
1409}
1410
1411static int cma_bind_any(struct rdma_cm_id *id, sa_family_t af)
1412{
1413 struct sockaddr_in addr_in;
1414
1415 memset(&addr_in, 0, sizeof addr_in);
1416 addr_in.sin_family = af;
1417 return rdma_bind_addr(id, (struct sockaddr *) &addr_in);
1418}
1419
1420int rdma_listen(struct rdma_cm_id *id, int backlog)
1421{
1422 struct rdma_id_private *id_priv;
1423 int ret;
1424
1425 id_priv = container_of(id, struct rdma_id_private, id);
1426 if (id_priv->state == CMA_IDLE) {
1427 ret = cma_bind_any(id, AF_INET);
1428 if (ret)
1429 return ret;
1430 }
1431
1432 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN))
1433 return -EINVAL;
1434
1435 id_priv->backlog = backlog;
1436 if (id->device) {
Tom Tucker07ebafb2006-08-03 16:02:42 -05001437 switch (rdma_node_get_transport(id->device->node_type)) {
1438 case RDMA_TRANSPORT_IB:
Sean Heftye51060f2006-06-17 20:37:29 -07001439 ret = cma_ib_listen(id_priv);
1440 if (ret)
1441 goto err;
1442 break;
Tom Tucker07ebafb2006-08-03 16:02:42 -05001443 case RDMA_TRANSPORT_IWARP:
1444 ret = cma_iw_listen(id_priv, backlog);
1445 if (ret)
1446 goto err;
1447 break;
Sean Heftye51060f2006-06-17 20:37:29 -07001448 default:
1449 ret = -ENOSYS;
1450 goto err;
1451 }
1452 } else
1453 cma_listen_on_all(id_priv);
1454
1455 return 0;
1456err:
1457 id_priv->backlog = 0;
1458 cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND);
1459 return ret;
1460}
1461EXPORT_SYMBOL(rdma_listen);
1462
1463static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
1464 void *context)
1465{
1466 struct cma_work *work = context;
1467 struct rdma_route *route;
1468
1469 route = &work->id->id.route;
1470
1471 if (!status) {
1472 route->num_paths = 1;
1473 *route->path_rec = *path_rec;
1474 } else {
1475 work->old_state = CMA_ROUTE_QUERY;
1476 work->new_state = CMA_ADDR_RESOLVED;
1477 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
Sean Hefty8f0472d2006-09-29 11:57:09 -07001478 work->event.status = status;
Sean Heftye51060f2006-06-17 20:37:29 -07001479 }
1480
1481 queue_work(cma_wq, &work->work);
1482}
1483
1484static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
1485 struct cma_work *work)
1486{
1487 struct rdma_dev_addr *addr = &id_priv->id.route.addr.dev_addr;
1488 struct ib_sa_path_rec path_rec;
1489
1490 memset(&path_rec, 0, sizeof path_rec);
Michael S. Tsirkinf0ee3402006-07-14 00:23:52 -07001491 ib_addr_get_sgid(addr, &path_rec.sgid);
1492 ib_addr_get_dgid(addr, &path_rec.dgid);
Sean Heftye51060f2006-06-17 20:37:29 -07001493 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr));
1494 path_rec.numb_path = 1;
Sean Hefty962063e2007-02-21 16:40:44 -08001495 path_rec.reversible = 1;
Sean Heftye51060f2006-06-17 20:37:29 -07001496
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07001497 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
Sean Heftye51060f2006-06-17 20:37:29 -07001498 id_priv->id.port_num, &path_rec,
1499 IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
Sean Hefty962063e2007-02-21 16:40:44 -08001500 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
1501 IB_SA_PATH_REC_REVERSIBLE,
Sean Heftye51060f2006-06-17 20:37:29 -07001502 timeout_ms, GFP_KERNEL,
1503 cma_query_handler, work, &id_priv->query);
1504
1505 return (id_priv->query_id < 0) ? id_priv->query_id : 0;
1506}
1507
David Howellsc4028952006-11-22 14:57:56 +00001508static void cma_work_handler(struct work_struct *_work)
Sean Heftye51060f2006-06-17 20:37:29 -07001509{
David Howellsc4028952006-11-22 14:57:56 +00001510 struct cma_work *work = container_of(_work, struct cma_work, work);
Sean Heftye51060f2006-06-17 20:37:29 -07001511 struct rdma_id_private *id_priv = work->id;
1512 int destroy = 0;
1513
1514 atomic_inc(&id_priv->dev_remove);
1515 if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
1516 goto out;
1517
1518 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
1519 cma_exch(id_priv, CMA_DESTROYING);
1520 destroy = 1;
1521 }
1522out:
1523 cma_release_remove(id_priv);
1524 cma_deref_id(id_priv);
1525 if (destroy)
1526 rdma_destroy_id(&id_priv->id);
1527 kfree(work);
1528}
1529
1530static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
1531{
1532 struct rdma_route *route = &id_priv->id.route;
1533 struct cma_work *work;
1534 int ret;
1535
1536 work = kzalloc(sizeof *work, GFP_KERNEL);
1537 if (!work)
1538 return -ENOMEM;
1539
1540 work->id = id_priv;
David Howellsc4028952006-11-22 14:57:56 +00001541 INIT_WORK(&work->work, cma_work_handler);
Sean Heftye51060f2006-06-17 20:37:29 -07001542 work->old_state = CMA_ROUTE_QUERY;
1543 work->new_state = CMA_ROUTE_RESOLVED;
1544 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1545
1546 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
1547 if (!route->path_rec) {
1548 ret = -ENOMEM;
1549 goto err1;
1550 }
1551
1552 ret = cma_query_ib_route(id_priv, timeout_ms, work);
1553 if (ret)
1554 goto err2;
1555
1556 return 0;
1557err2:
1558 kfree(route->path_rec);
1559 route->path_rec = NULL;
1560err1:
1561 kfree(work);
1562 return ret;
1563}
1564
1565int rdma_set_ib_paths(struct rdma_cm_id *id,
1566 struct ib_sa_path_rec *path_rec, int num_paths)
1567{
1568 struct rdma_id_private *id_priv;
1569 int ret;
1570
1571 id_priv = container_of(id, struct rdma_id_private, id);
1572 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED))
1573 return -EINVAL;
1574
1575 id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL);
1576 if (!id->route.path_rec) {
1577 ret = -ENOMEM;
1578 goto err;
1579 }
1580
1581 memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths);
1582 return 0;
1583err:
1584 cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED);
1585 return ret;
1586}
1587EXPORT_SYMBOL(rdma_set_ib_paths);
1588
Tom Tucker07ebafb2006-08-03 16:02:42 -05001589static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
1590{
1591 struct cma_work *work;
1592
1593 work = kzalloc(sizeof *work, GFP_KERNEL);
1594 if (!work)
1595 return -ENOMEM;
1596
1597 work->id = id_priv;
David Howellsc4028952006-11-22 14:57:56 +00001598 INIT_WORK(&work->work, cma_work_handler);
Tom Tucker07ebafb2006-08-03 16:02:42 -05001599 work->old_state = CMA_ROUTE_QUERY;
1600 work->new_state = CMA_ROUTE_RESOLVED;
1601 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1602 queue_work(cma_wq, &work->work);
1603 return 0;
1604}
1605
Sean Heftye51060f2006-06-17 20:37:29 -07001606int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
1607{
1608 struct rdma_id_private *id_priv;
1609 int ret;
1610
1611 id_priv = container_of(id, struct rdma_id_private, id);
1612 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY))
1613 return -EINVAL;
1614
1615 atomic_inc(&id_priv->refcount);
Tom Tucker07ebafb2006-08-03 16:02:42 -05001616 switch (rdma_node_get_transport(id->device->node_type)) {
1617 case RDMA_TRANSPORT_IB:
Sean Heftye51060f2006-06-17 20:37:29 -07001618 ret = cma_resolve_ib_route(id_priv, timeout_ms);
1619 break;
Tom Tucker07ebafb2006-08-03 16:02:42 -05001620 case RDMA_TRANSPORT_IWARP:
1621 ret = cma_resolve_iw_route(id_priv, timeout_ms);
1622 break;
Sean Heftye51060f2006-06-17 20:37:29 -07001623 default:
1624 ret = -ENOSYS;
1625 break;
1626 }
1627 if (ret)
1628 goto err;
1629
1630 return 0;
1631err:
1632 cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED);
1633 cma_deref_id(id_priv);
1634 return ret;
1635}
1636EXPORT_SYMBOL(rdma_resolve_route);
1637
1638static int cma_bind_loopback(struct rdma_id_private *id_priv)
1639{
1640 struct cma_device *cma_dev;
1641 struct ib_port_attr port_attr;
Michael S. Tsirkinf0ee3402006-07-14 00:23:52 -07001642 union ib_gid gid;
Sean Heftye51060f2006-06-17 20:37:29 -07001643 u16 pkey;
1644 int ret;
1645 u8 p;
1646
1647 mutex_lock(&lock);
Krishna Kumare82153b2006-10-16 10:09:01 +05301648 if (list_empty(&dev_list)) {
Sean Heftye51060f2006-06-17 20:37:29 -07001649 ret = -ENODEV;
1650 goto out;
1651 }
Krishna Kumare82153b2006-10-16 10:09:01 +05301652 list_for_each_entry(cma_dev, &dev_list, list)
1653 for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p)
1654 if (!ib_query_port(cma_dev->device, p, &port_attr) &&
1655 port_attr.state == IB_PORT_ACTIVE)
1656 goto port_found;
1657
1658 p = 1;
1659 cma_dev = list_entry(dev_list.next, struct cma_device, list);
Sean Heftye51060f2006-06-17 20:37:29 -07001660
1661port_found:
Michael S. Tsirkinf0ee3402006-07-14 00:23:52 -07001662 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
Sean Heftye51060f2006-06-17 20:37:29 -07001663 if (ret)
1664 goto out;
1665
1666 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
1667 if (ret)
1668 goto out;
1669
Michael S. Tsirkinf0ee3402006-07-14 00:23:52 -07001670 ib_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
Sean Heftye51060f2006-06-17 20:37:29 -07001671 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
1672 id_priv->id.port_num = p;
1673 cma_attach_to_dev(id_priv, cma_dev);
1674out:
1675 mutex_unlock(&lock);
1676 return ret;
1677}
1678
1679static void addr_handler(int status, struct sockaddr *src_addr,
1680 struct rdma_dev_addr *dev_addr, void *context)
1681{
1682 struct rdma_id_private *id_priv = context;
Sean Heftya1b1b612006-11-30 16:33:14 -08001683 struct rdma_cm_event event;
Sean Heftye51060f2006-06-17 20:37:29 -07001684
Sean Heftya1b1b612006-11-30 16:33:14 -08001685 memset(&event, 0, sizeof event);
Sean Heftye51060f2006-06-17 20:37:29 -07001686 atomic_inc(&id_priv->dev_remove);
Sean Hefty61a73c72006-09-01 15:33:55 -07001687
1688 /*
1689 * Grab mutex to block rdma_destroy_id() from removing the device while
1690 * we're trying to acquire it.
1691 */
1692 mutex_lock(&lock);
1693 if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) {
1694 mutex_unlock(&lock);
1695 goto out;
1696 }
1697
1698 if (!status && !id_priv->cma_dev)
Sean Heftye51060f2006-06-17 20:37:29 -07001699 status = cma_acquire_dev(id_priv);
Sean Hefty61a73c72006-09-01 15:33:55 -07001700 mutex_unlock(&lock);
Sean Heftye51060f2006-06-17 20:37:29 -07001701
1702 if (status) {
Sean Hefty61a73c72006-09-01 15:33:55 -07001703 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
Sean Heftye51060f2006-06-17 20:37:29 -07001704 goto out;
Sean Heftya1b1b612006-11-30 16:33:14 -08001705 event.event = RDMA_CM_EVENT_ADDR_ERROR;
1706 event.status = status;
Sean Heftye51060f2006-06-17 20:37:29 -07001707 } else {
Sean Heftye51060f2006-06-17 20:37:29 -07001708 memcpy(&id_priv->id.route.addr.src_addr, src_addr,
1709 ip_addr_size(src_addr));
Sean Heftya1b1b612006-11-30 16:33:14 -08001710 event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
Sean Heftye51060f2006-06-17 20:37:29 -07001711 }
1712
Sean Heftya1b1b612006-11-30 16:33:14 -08001713 if (id_priv->id.event_handler(&id_priv->id, &event)) {
Sean Heftye51060f2006-06-17 20:37:29 -07001714 cma_exch(id_priv, CMA_DESTROYING);
1715 cma_release_remove(id_priv);
1716 cma_deref_id(id_priv);
1717 rdma_destroy_id(&id_priv->id);
1718 return;
1719 }
1720out:
1721 cma_release_remove(id_priv);
1722 cma_deref_id(id_priv);
1723}
1724
1725static int cma_resolve_loopback(struct rdma_id_private *id_priv)
1726{
1727 struct cma_work *work;
1728 struct sockaddr_in *src_in, *dst_in;
Michael S. Tsirkinf0ee3402006-07-14 00:23:52 -07001729 union ib_gid gid;
Sean Heftye51060f2006-06-17 20:37:29 -07001730 int ret;
1731
1732 work = kzalloc(sizeof *work, GFP_KERNEL);
1733 if (!work)
1734 return -ENOMEM;
1735
1736 if (!id_priv->cma_dev) {
1737 ret = cma_bind_loopback(id_priv);
1738 if (ret)
1739 goto err;
1740 }
1741
Michael S. Tsirkinf0ee3402006-07-14 00:23:52 -07001742 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1743 ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
Sean Heftye51060f2006-06-17 20:37:29 -07001744
1745 if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) {
1746 src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr;
1747 dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr;
1748 src_in->sin_family = dst_in->sin_family;
1749 src_in->sin_addr.s_addr = dst_in->sin_addr.s_addr;
1750 }
1751
1752 work->id = id_priv;
David Howellsc4028952006-11-22 14:57:56 +00001753 INIT_WORK(&work->work, cma_work_handler);
Sean Heftye51060f2006-06-17 20:37:29 -07001754 work->old_state = CMA_ADDR_QUERY;
1755 work->new_state = CMA_ADDR_RESOLVED;
1756 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
1757 queue_work(cma_wq, &work->work);
1758 return 0;
1759err:
1760 kfree(work);
1761 return ret;
1762}
1763
1764static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1765 struct sockaddr *dst_addr)
1766{
1767 if (src_addr && src_addr->sa_family)
1768 return rdma_bind_addr(id, src_addr);
1769 else
1770 return cma_bind_any(id, dst_addr->sa_family);
1771}
1772
1773int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1774 struct sockaddr *dst_addr, int timeout_ms)
1775{
1776 struct rdma_id_private *id_priv;
1777 int ret;
1778
1779 id_priv = container_of(id, struct rdma_id_private, id);
1780 if (id_priv->state == CMA_IDLE) {
1781 ret = cma_bind_addr(id, src_addr, dst_addr);
1782 if (ret)
1783 return ret;
1784 }
1785
1786 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY))
1787 return -EINVAL;
1788
1789 atomic_inc(&id_priv->refcount);
1790 memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr));
1791 if (cma_any_addr(dst_addr))
1792 ret = cma_resolve_loopback(id_priv);
1793 else
Sean Hefty7a118df2006-10-31 11:12:59 -08001794 ret = rdma_resolve_ip(&addr_client, &id->route.addr.src_addr,
1795 dst_addr, &id->route.addr.dev_addr,
Sean Heftye51060f2006-06-17 20:37:29 -07001796 timeout_ms, addr_handler, id_priv);
1797 if (ret)
1798 goto err;
1799
1800 return 0;
1801err:
1802 cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND);
1803 cma_deref_id(id_priv);
1804 return ret;
1805}
1806EXPORT_SYMBOL(rdma_resolve_addr);
1807
1808static void cma_bind_port(struct rdma_bind_list *bind_list,
1809 struct rdma_id_private *id_priv)
1810{
1811 struct sockaddr_in *sin;
1812
1813 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1814 sin->sin_port = htons(bind_list->port);
1815 id_priv->bind_list = bind_list;
1816 hlist_add_head(&id_priv->node, &bind_list->owners);
1817}
1818
1819static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
1820 unsigned short snum)
1821{
1822 struct rdma_bind_list *bind_list;
Sean Heftyaedec082007-01-29 16:41:23 -08001823 int port, ret;
Sean Heftye51060f2006-06-17 20:37:29 -07001824
Sean Heftyc8f6a362007-02-15 17:00:18 -08001825 bind_list = kmalloc(sizeof *bind_list, GFP_KERNEL);
Sean Heftye51060f2006-06-17 20:37:29 -07001826 if (!bind_list)
1827 return -ENOMEM;
1828
Sean Heftye51060f2006-06-17 20:37:29 -07001829 do {
Sean Heftyaedec082007-01-29 16:41:23 -08001830 ret = idr_get_new_above(ps, bind_list, snum, &port);
Sean Heftye51060f2006-06-17 20:37:29 -07001831 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
1832
1833 if (ret)
Sean Heftyaedec082007-01-29 16:41:23 -08001834 goto err1;
Sean Heftye51060f2006-06-17 20:37:29 -07001835
Sean Heftyaedec082007-01-29 16:41:23 -08001836 if (port != snum) {
Sean Heftye51060f2006-06-17 20:37:29 -07001837 ret = -EADDRNOTAVAIL;
Sean Heftyaedec082007-01-29 16:41:23 -08001838 goto err2;
Sean Heftye51060f2006-06-17 20:37:29 -07001839 }
1840
1841 bind_list->ps = ps;
1842 bind_list->port = (unsigned short) port;
1843 cma_bind_port(bind_list, id_priv);
1844 return 0;
Sean Heftyaedec082007-01-29 16:41:23 -08001845err2:
1846 idr_remove(ps, port);
1847err1:
1848 kfree(bind_list);
1849 return ret;
1850}
1851
1852static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
1853{
1854 struct rdma_bind_list *bind_list;
1855 int port, ret;
1856
1857 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
1858 if (!bind_list)
1859 return -ENOMEM;
1860
1861retry:
1862 do {
1863 ret = idr_get_new_above(ps, bind_list, next_port, &port);
1864 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
1865
1866 if (ret)
1867 goto err1;
1868
1869 if (port > sysctl_local_port_range[1]) {
1870 if (next_port != sysctl_local_port_range[0]) {
1871 idr_remove(ps, port);
1872 next_port = sysctl_local_port_range[0];
1873 goto retry;
1874 }
1875 ret = -EADDRNOTAVAIL;
1876 goto err2;
1877 }
1878
1879 if (port == sysctl_local_port_range[1])
1880 next_port = sysctl_local_port_range[0];
1881 else
1882 next_port = port + 1;
1883
1884 bind_list->ps = ps;
1885 bind_list->port = (unsigned short) port;
1886 cma_bind_port(bind_list, id_priv);
1887 return 0;
1888err2:
1889 idr_remove(ps, port);
1890err1:
Sean Heftye51060f2006-06-17 20:37:29 -07001891 kfree(bind_list);
1892 return ret;
1893}
1894
1895static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
1896{
1897 struct rdma_id_private *cur_id;
1898 struct sockaddr_in *sin, *cur_sin;
1899 struct rdma_bind_list *bind_list;
1900 struct hlist_node *node;
1901 unsigned short snum;
1902
1903 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1904 snum = ntohs(sin->sin_port);
1905 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
1906 return -EACCES;
1907
1908 bind_list = idr_find(ps, snum);
1909 if (!bind_list)
1910 return cma_alloc_port(ps, id_priv, snum);
1911
1912 /*
1913 * We don't support binding to any address if anyone is bound to
1914 * a specific address on the same port.
1915 */
1916 if (cma_any_addr(&id_priv->id.route.addr.src_addr))
1917 return -EADDRNOTAVAIL;
1918
1919 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
1920 if (cma_any_addr(&cur_id->id.route.addr.src_addr))
1921 return -EADDRNOTAVAIL;
Roland Dreier3cd96562006-09-22 15:22:46 -07001922
Sean Heftye51060f2006-06-17 20:37:29 -07001923 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
1924 if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr)
1925 return -EADDRINUSE;
1926 }
1927
1928 cma_bind_port(bind_list, id_priv);
1929 return 0;
1930}
1931
1932static int cma_get_port(struct rdma_id_private *id_priv)
1933{
1934 struct idr *ps;
1935 int ret;
1936
1937 switch (id_priv->id.ps) {
1938 case RDMA_PS_SDP:
1939 ps = &sdp_ps;
1940 break;
1941 case RDMA_PS_TCP:
1942 ps = &tcp_ps;
1943 break;
Sean Hefty628e5f62006-11-30 16:44:16 -08001944 case RDMA_PS_UDP:
1945 ps = &udp_ps;
1946 break;
Sean Heftyc8f6a362007-02-15 17:00:18 -08001947 case RDMA_PS_IPOIB:
1948 ps = &ipoib_ps;
1949 break;
Sean Heftye51060f2006-06-17 20:37:29 -07001950 default:
1951 return -EPROTONOSUPPORT;
1952 }
1953
1954 mutex_lock(&lock);
1955 if (cma_any_port(&id_priv->id.route.addr.src_addr))
Sean Heftyaedec082007-01-29 16:41:23 -08001956 ret = cma_alloc_any_port(ps, id_priv);
Sean Heftye51060f2006-06-17 20:37:29 -07001957 else
1958 ret = cma_use_port(ps, id_priv);
1959 mutex_unlock(&lock);
1960
1961 return ret;
1962}
1963
1964int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
1965{
1966 struct rdma_id_private *id_priv;
1967 int ret;
1968
1969 if (addr->sa_family != AF_INET)
1970 return -EAFNOSUPPORT;
1971
1972 id_priv = container_of(id, struct rdma_id_private, id);
1973 if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND))
1974 return -EINVAL;
1975
1976 if (!cma_any_addr(addr)) {
1977 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
Sean Heftye51060f2006-06-17 20:37:29 -07001978 if (ret)
Krishna Kumar255d0c12006-10-24 13:22:28 -07001979 goto err1;
1980
1981 mutex_lock(&lock);
1982 ret = cma_acquire_dev(id_priv);
1983 mutex_unlock(&lock);
1984 if (ret)
1985 goto err1;
Sean Heftye51060f2006-06-17 20:37:29 -07001986 }
1987
1988 memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr));
1989 ret = cma_get_port(id_priv);
1990 if (ret)
Krishna Kumar255d0c12006-10-24 13:22:28 -07001991 goto err2;
Sean Heftye51060f2006-06-17 20:37:29 -07001992
1993 return 0;
Krishna Kumar255d0c12006-10-24 13:22:28 -07001994err2:
1995 if (!cma_any_addr(addr)) {
1996 mutex_lock(&lock);
1997 cma_detach_from_dev(id_priv);
1998 mutex_unlock(&lock);
1999 }
2000err1:
Sean Heftye51060f2006-06-17 20:37:29 -07002001 cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE);
2002 return ret;
2003}
2004EXPORT_SYMBOL(rdma_bind_addr);
2005
2006static int cma_format_hdr(void *hdr, enum rdma_port_space ps,
2007 struct rdma_route *route)
2008{
2009 struct sockaddr_in *src4, *dst4;
2010 struct cma_hdr *cma_hdr;
2011 struct sdp_hh *sdp_hdr;
2012
2013 src4 = (struct sockaddr_in *) &route->addr.src_addr;
2014 dst4 = (struct sockaddr_in *) &route->addr.dst_addr;
2015
2016 switch (ps) {
2017 case RDMA_PS_SDP:
2018 sdp_hdr = hdr;
2019 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION)
2020 return -EINVAL;
2021 sdp_set_ip_ver(sdp_hdr, 4);
2022 sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
2023 sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
2024 sdp_hdr->port = src4->sin_port;
2025 break;
2026 default:
2027 cma_hdr = hdr;
2028 cma_hdr->cma_version = CMA_VERSION;
2029 cma_set_ip_ver(cma_hdr, 4);
2030 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
2031 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
2032 cma_hdr->port = src4->sin_port;
2033 break;
2034 }
2035 return 0;
2036}
2037
Sean Hefty628e5f62006-11-30 16:44:16 -08002038static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
2039 struct ib_cm_event *ib_event)
2040{
2041 struct rdma_id_private *id_priv = cm_id->context;
2042 struct rdma_cm_event event;
2043 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
2044 int ret = 0;
2045
2046 memset(&event, 0, sizeof event);
2047 atomic_inc(&id_priv->dev_remove);
2048 if (!cma_comp(id_priv, CMA_CONNECT))
2049 goto out;
2050
2051 switch (ib_event->event) {
2052 case IB_CM_SIDR_REQ_ERROR:
2053 event.event = RDMA_CM_EVENT_UNREACHABLE;
2054 event.status = -ETIMEDOUT;
2055 break;
2056 case IB_CM_SIDR_REP_RECEIVED:
2057 event.param.ud.private_data = ib_event->private_data;
2058 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
2059 if (rep->status != IB_SIDR_SUCCESS) {
2060 event.event = RDMA_CM_EVENT_UNREACHABLE;
2061 event.status = ib_event->param.sidr_rep_rcvd.status;
2062 break;
2063 }
Sean Heftyc8f6a362007-02-15 17:00:18 -08002064 if (id_priv->qkey != rep->qkey) {
Sean Hefty628e5f62006-11-30 16:44:16 -08002065 event.event = RDMA_CM_EVENT_UNREACHABLE;
2066 event.status = -EINVAL;
2067 break;
2068 }
2069 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
2070 id_priv->id.route.path_rec,
2071 &event.param.ud.ah_attr);
2072 event.param.ud.qp_num = rep->qpn;
2073 event.param.ud.qkey = rep->qkey;
2074 event.event = RDMA_CM_EVENT_ESTABLISHED;
2075 event.status = 0;
2076 break;
2077 default:
2078 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
2079 ib_event->event);
2080 goto out;
2081 }
2082
2083 ret = id_priv->id.event_handler(&id_priv->id, &event);
2084 if (ret) {
2085 /* Destroy the CM ID by returning a non-zero value. */
2086 id_priv->cm_id.ib = NULL;
2087 cma_exch(id_priv, CMA_DESTROYING);
2088 cma_release_remove(id_priv);
2089 rdma_destroy_id(&id_priv->id);
2090 return ret;
2091 }
2092out:
2093 cma_release_remove(id_priv);
2094 return ret;
2095}
2096
2097static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
2098 struct rdma_conn_param *conn_param)
2099{
2100 struct ib_cm_sidr_req_param req;
2101 struct rdma_route *route;
2102 int ret;
2103
2104 req.private_data_len = sizeof(struct cma_hdr) +
2105 conn_param->private_data_len;
2106 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
2107 if (!req.private_data)
2108 return -ENOMEM;
2109
2110 if (conn_param->private_data && conn_param->private_data_len)
2111 memcpy((void *) req.private_data + sizeof(struct cma_hdr),
2112 conn_param->private_data, conn_param->private_data_len);
2113
2114 route = &id_priv->id.route;
2115 ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route);
2116 if (ret)
2117 goto out;
2118
2119 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device,
2120 cma_sidr_rep_handler, id_priv);
2121 if (IS_ERR(id_priv->cm_id.ib)) {
2122 ret = PTR_ERR(id_priv->cm_id.ib);
2123 goto out;
2124 }
2125
2126 req.path = route->path_rec;
2127 req.service_id = cma_get_service_id(id_priv->id.ps,
2128 &route->addr.dst_addr);
2129 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
2130 req.max_cm_retries = CMA_MAX_CM_RETRIES;
2131
2132 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
2133 if (ret) {
2134 ib_destroy_cm_id(id_priv->cm_id.ib);
2135 id_priv->cm_id.ib = NULL;
2136 }
2137out:
2138 kfree(req.private_data);
2139 return ret;
2140}
2141
Sean Heftye51060f2006-06-17 20:37:29 -07002142static int cma_connect_ib(struct rdma_id_private *id_priv,
2143 struct rdma_conn_param *conn_param)
2144{
2145 struct ib_cm_req_param req;
2146 struct rdma_route *route;
2147 void *private_data;
2148 int offset, ret;
2149
2150 memset(&req, 0, sizeof req);
2151 offset = cma_user_data_offset(id_priv->id.ps);
2152 req.private_data_len = offset + conn_param->private_data_len;
2153 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
2154 if (!private_data)
2155 return -ENOMEM;
2156
2157 if (conn_param->private_data && conn_param->private_data_len)
2158 memcpy(private_data + offset, conn_param->private_data,
2159 conn_param->private_data_len);
2160
2161 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler,
2162 id_priv);
2163 if (IS_ERR(id_priv->cm_id.ib)) {
2164 ret = PTR_ERR(id_priv->cm_id.ib);
2165 goto out;
2166 }
2167
2168 route = &id_priv->id.route;
2169 ret = cma_format_hdr(private_data, id_priv->id.ps, route);
2170 if (ret)
2171 goto out;
2172 req.private_data = private_data;
2173
2174 req.primary_path = &route->path_rec[0];
2175 if (route->num_paths == 2)
2176 req.alternate_path = &route->path_rec[1];
2177
2178 req.service_id = cma_get_service_id(id_priv->id.ps,
2179 &route->addr.dst_addr);
2180 req.qp_num = id_priv->qp_num;
Sean Hefty9b2e9c02006-11-30 16:30:47 -08002181 req.qp_type = IB_QPT_RC;
Sean Heftye51060f2006-06-17 20:37:29 -07002182 req.starting_psn = id_priv->seq_num;
2183 req.responder_resources = conn_param->responder_resources;
2184 req.initiator_depth = conn_param->initiator_depth;
2185 req.flow_control = conn_param->flow_control;
2186 req.retry_count = conn_param->retry_count;
2187 req.rnr_retry_count = conn_param->rnr_retry_count;
2188 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
2189 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
2190 req.max_cm_retries = CMA_MAX_CM_RETRIES;
2191 req.srq = id_priv->srq ? 1 : 0;
2192
2193 ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
2194out:
Krishna Kumar675a0272006-09-29 11:47:06 -07002195 if (ret && !IS_ERR(id_priv->cm_id.ib)) {
2196 ib_destroy_cm_id(id_priv->cm_id.ib);
2197 id_priv->cm_id.ib = NULL;
2198 }
2199
Sean Heftye51060f2006-06-17 20:37:29 -07002200 kfree(private_data);
2201 return ret;
2202}
2203
Tom Tucker07ebafb2006-08-03 16:02:42 -05002204static int cma_connect_iw(struct rdma_id_private *id_priv,
2205 struct rdma_conn_param *conn_param)
2206{
2207 struct iw_cm_id *cm_id;
2208 struct sockaddr_in* sin;
2209 int ret;
2210 struct iw_cm_conn_param iw_param;
2211
2212 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
2213 if (IS_ERR(cm_id)) {
2214 ret = PTR_ERR(cm_id);
2215 goto out;
2216 }
2217
2218 id_priv->cm_id.iw = cm_id;
2219
2220 sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr;
2221 cm_id->local_addr = *sin;
2222
2223 sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr;
2224 cm_id->remote_addr = *sin;
2225
2226 ret = cma_modify_qp_rtr(&id_priv->id);
Krishna Kumar675a0272006-09-29 11:47:06 -07002227 if (ret)
2228 goto out;
Tom Tucker07ebafb2006-08-03 16:02:42 -05002229
2230 iw_param.ord = conn_param->initiator_depth;
2231 iw_param.ird = conn_param->responder_resources;
2232 iw_param.private_data = conn_param->private_data;
2233 iw_param.private_data_len = conn_param->private_data_len;
2234 if (id_priv->id.qp)
2235 iw_param.qpn = id_priv->qp_num;
2236 else
2237 iw_param.qpn = conn_param->qp_num;
2238 ret = iw_cm_connect(cm_id, &iw_param);
2239out:
Krishna Kumar675a0272006-09-29 11:47:06 -07002240 if (ret && !IS_ERR(cm_id)) {
2241 iw_destroy_cm_id(cm_id);
2242 id_priv->cm_id.iw = NULL;
2243 }
Tom Tucker07ebafb2006-08-03 16:02:42 -05002244 return ret;
2245}
2246
Sean Heftye51060f2006-06-17 20:37:29 -07002247int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2248{
2249 struct rdma_id_private *id_priv;
2250 int ret;
2251
2252 id_priv = container_of(id, struct rdma_id_private, id);
2253 if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT))
2254 return -EINVAL;
2255
2256 if (!id->qp) {
2257 id_priv->qp_num = conn_param->qp_num;
Sean Heftye51060f2006-06-17 20:37:29 -07002258 id_priv->srq = conn_param->srq;
2259 }
2260
Tom Tucker07ebafb2006-08-03 16:02:42 -05002261 switch (rdma_node_get_transport(id->device->node_type)) {
2262 case RDMA_TRANSPORT_IB:
Sean Heftyc8f6a362007-02-15 17:00:18 -08002263 if (cma_is_ud_ps(id->ps))
Sean Hefty628e5f62006-11-30 16:44:16 -08002264 ret = cma_resolve_ib_udp(id_priv, conn_param);
2265 else
2266 ret = cma_connect_ib(id_priv, conn_param);
Sean Heftye51060f2006-06-17 20:37:29 -07002267 break;
Tom Tucker07ebafb2006-08-03 16:02:42 -05002268 case RDMA_TRANSPORT_IWARP:
2269 ret = cma_connect_iw(id_priv, conn_param);
2270 break;
Sean Heftye51060f2006-06-17 20:37:29 -07002271 default:
2272 ret = -ENOSYS;
2273 break;
2274 }
2275 if (ret)
2276 goto err;
2277
2278 return 0;
2279err:
2280 cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED);
2281 return ret;
2282}
2283EXPORT_SYMBOL(rdma_connect);
2284
2285static int cma_accept_ib(struct rdma_id_private *id_priv,
2286 struct rdma_conn_param *conn_param)
2287{
2288 struct ib_cm_rep_param rep;
Sean Hefty0fe313b2006-11-30 16:37:15 -08002289 struct ib_qp_attr qp_attr;
2290 int qp_attr_mask, ret;
Sean Heftye51060f2006-06-17 20:37:29 -07002291
Sean Hefty0fe313b2006-11-30 16:37:15 -08002292 if (id_priv->id.qp) {
2293 ret = cma_modify_qp_rtr(&id_priv->id);
2294 if (ret)
2295 goto out;
2296
2297 qp_attr.qp_state = IB_QPS_RTS;
2298 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, &qp_attr,
2299 &qp_attr_mask);
2300 if (ret)
2301 goto out;
2302
2303 qp_attr.max_rd_atomic = conn_param->initiator_depth;
2304 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
2305 if (ret)
2306 goto out;
2307 }
Sean Heftye51060f2006-06-17 20:37:29 -07002308
2309 memset(&rep, 0, sizeof rep);
2310 rep.qp_num = id_priv->qp_num;
2311 rep.starting_psn = id_priv->seq_num;
2312 rep.private_data = conn_param->private_data;
2313 rep.private_data_len = conn_param->private_data_len;
2314 rep.responder_resources = conn_param->responder_resources;
2315 rep.initiator_depth = conn_param->initiator_depth;
2316 rep.target_ack_delay = CMA_CM_RESPONSE_TIMEOUT;
2317 rep.failover_accepted = 0;
2318 rep.flow_control = conn_param->flow_control;
2319 rep.rnr_retry_count = conn_param->rnr_retry_count;
2320 rep.srq = id_priv->srq ? 1 : 0;
2321
Sean Hefty0fe313b2006-11-30 16:37:15 -08002322 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
2323out:
2324 return ret;
Sean Heftye51060f2006-06-17 20:37:29 -07002325}
2326
Tom Tucker07ebafb2006-08-03 16:02:42 -05002327static int cma_accept_iw(struct rdma_id_private *id_priv,
2328 struct rdma_conn_param *conn_param)
2329{
2330 struct iw_cm_conn_param iw_param;
2331 int ret;
2332
2333 ret = cma_modify_qp_rtr(&id_priv->id);
2334 if (ret)
2335 return ret;
2336
2337 iw_param.ord = conn_param->initiator_depth;
2338 iw_param.ird = conn_param->responder_resources;
2339 iw_param.private_data = conn_param->private_data;
2340 iw_param.private_data_len = conn_param->private_data_len;
2341 if (id_priv->id.qp) {
2342 iw_param.qpn = id_priv->qp_num;
2343 } else
2344 iw_param.qpn = conn_param->qp_num;
2345
2346 return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
2347}
2348
Sean Hefty628e5f62006-11-30 16:44:16 -08002349static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
2350 enum ib_cm_sidr_status status,
2351 const void *private_data, int private_data_len)
2352{
2353 struct ib_cm_sidr_rep_param rep;
2354
2355 memset(&rep, 0, sizeof rep);
2356 rep.status = status;
2357 if (status == IB_SIDR_SUCCESS) {
2358 rep.qp_num = id_priv->qp_num;
Sean Heftyc8f6a362007-02-15 17:00:18 -08002359 rep.qkey = id_priv->qkey;
Sean Hefty628e5f62006-11-30 16:44:16 -08002360 }
2361 rep.private_data = private_data;
2362 rep.private_data_len = private_data_len;
2363
2364 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
2365}
2366
Sean Heftye51060f2006-06-17 20:37:29 -07002367int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2368{
2369 struct rdma_id_private *id_priv;
2370 int ret;
2371
2372 id_priv = container_of(id, struct rdma_id_private, id);
2373 if (!cma_comp(id_priv, CMA_CONNECT))
2374 return -EINVAL;
2375
2376 if (!id->qp && conn_param) {
2377 id_priv->qp_num = conn_param->qp_num;
Sean Heftye51060f2006-06-17 20:37:29 -07002378 id_priv->srq = conn_param->srq;
2379 }
2380
Tom Tucker07ebafb2006-08-03 16:02:42 -05002381 switch (rdma_node_get_transport(id->device->node_type)) {
2382 case RDMA_TRANSPORT_IB:
Sean Heftyc8f6a362007-02-15 17:00:18 -08002383 if (cma_is_ud_ps(id->ps))
Sean Hefty628e5f62006-11-30 16:44:16 -08002384 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
2385 conn_param->private_data,
2386 conn_param->private_data_len);
2387 else if (conn_param)
Sean Heftye51060f2006-06-17 20:37:29 -07002388 ret = cma_accept_ib(id_priv, conn_param);
2389 else
2390 ret = cma_rep_recv(id_priv);
2391 break;
Tom Tucker07ebafb2006-08-03 16:02:42 -05002392 case RDMA_TRANSPORT_IWARP:
2393 ret = cma_accept_iw(id_priv, conn_param);
2394 break;
Sean Heftye51060f2006-06-17 20:37:29 -07002395 default:
2396 ret = -ENOSYS;
2397 break;
2398 }
2399
2400 if (ret)
2401 goto reject;
2402
2403 return 0;
2404reject:
2405 cma_modify_qp_err(id);
2406 rdma_reject(id, NULL, 0);
2407 return ret;
2408}
2409EXPORT_SYMBOL(rdma_accept);
2410
Sean Hefty0fe313b2006-11-30 16:37:15 -08002411int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
2412{
2413 struct rdma_id_private *id_priv;
2414 int ret;
2415
2416 id_priv = container_of(id, struct rdma_id_private, id);
2417 if (!cma_comp(id_priv, CMA_CONNECT))
2418 return -EINVAL;
2419
2420 switch (id->device->node_type) {
2421 case RDMA_NODE_IB_CA:
2422 ret = ib_cm_notify(id_priv->cm_id.ib, event);
2423 break;
2424 default:
2425 ret = 0;
2426 break;
2427 }
2428 return ret;
2429}
2430EXPORT_SYMBOL(rdma_notify);
2431
Sean Heftye51060f2006-06-17 20:37:29 -07002432int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2433 u8 private_data_len)
2434{
2435 struct rdma_id_private *id_priv;
2436 int ret;
2437
2438 id_priv = container_of(id, struct rdma_id_private, id);
2439 if (!cma_comp(id_priv, CMA_CONNECT))
2440 return -EINVAL;
2441
Tom Tucker07ebafb2006-08-03 16:02:42 -05002442 switch (rdma_node_get_transport(id->device->node_type)) {
2443 case RDMA_TRANSPORT_IB:
Sean Heftyc8f6a362007-02-15 17:00:18 -08002444 if (cma_is_ud_ps(id->ps))
Sean Hefty628e5f62006-11-30 16:44:16 -08002445 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
2446 private_data, private_data_len);
2447 else
2448 ret = ib_send_cm_rej(id_priv->cm_id.ib,
2449 IB_CM_REJ_CONSUMER_DEFINED, NULL,
2450 0, private_data, private_data_len);
Sean Heftye51060f2006-06-17 20:37:29 -07002451 break;
Tom Tucker07ebafb2006-08-03 16:02:42 -05002452 case RDMA_TRANSPORT_IWARP:
2453 ret = iw_cm_reject(id_priv->cm_id.iw,
2454 private_data, private_data_len);
2455 break;
Sean Heftye51060f2006-06-17 20:37:29 -07002456 default:
2457 ret = -ENOSYS;
2458 break;
2459 }
2460 return ret;
2461}
2462EXPORT_SYMBOL(rdma_reject);
2463
2464int rdma_disconnect(struct rdma_cm_id *id)
2465{
2466 struct rdma_id_private *id_priv;
2467 int ret;
2468
2469 id_priv = container_of(id, struct rdma_id_private, id);
2470 if (!cma_comp(id_priv, CMA_CONNECT) &&
2471 !cma_comp(id_priv, CMA_DISCONNECT))
2472 return -EINVAL;
2473
Tom Tucker07ebafb2006-08-03 16:02:42 -05002474 switch (rdma_node_get_transport(id->device->node_type)) {
2475 case RDMA_TRANSPORT_IB:
2476 ret = cma_modify_qp_err(id);
2477 if (ret)
2478 goto out;
Sean Heftye51060f2006-06-17 20:37:29 -07002479 /* Initiate or respond to a disconnect. */
2480 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
2481 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
2482 break;
Tom Tucker07ebafb2006-08-03 16:02:42 -05002483 case RDMA_TRANSPORT_IWARP:
2484 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
2485 break;
Sean Heftye51060f2006-06-17 20:37:29 -07002486 default:
Tom Tucker07ebafb2006-08-03 16:02:42 -05002487 ret = -EINVAL;
Sean Heftye51060f2006-06-17 20:37:29 -07002488 break;
2489 }
2490out:
2491 return ret;
2492}
2493EXPORT_SYMBOL(rdma_disconnect);
2494
Sean Heftyc8f6a362007-02-15 17:00:18 -08002495static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
2496{
2497 struct rdma_id_private *id_priv;
2498 struct cma_multicast *mc = multicast->context;
2499 struct rdma_cm_event event;
2500 int ret;
2501
2502 id_priv = mc->id_priv;
2503 atomic_inc(&id_priv->dev_remove);
2504 if (!cma_comp(id_priv, CMA_ADDR_BOUND) &&
2505 !cma_comp(id_priv, CMA_ADDR_RESOLVED))
2506 goto out;
2507
2508 if (!status && id_priv->id.qp)
2509 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
2510 multicast->rec.mlid);
2511
2512 memset(&event, 0, sizeof event);
2513 event.status = status;
2514 event.param.ud.private_data = mc->context;
2515 if (!status) {
2516 event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
2517 ib_init_ah_from_mcmember(id_priv->id.device,
2518 id_priv->id.port_num, &multicast->rec,
2519 &event.param.ud.ah_attr);
2520 event.param.ud.qp_num = 0xFFFFFF;
2521 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
2522 } else
2523 event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
2524
2525 ret = id_priv->id.event_handler(&id_priv->id, &event);
2526 if (ret) {
2527 cma_exch(id_priv, CMA_DESTROYING);
2528 cma_release_remove(id_priv);
2529 rdma_destroy_id(&id_priv->id);
2530 return 0;
2531 }
2532out:
2533 cma_release_remove(id_priv);
2534 return 0;
2535}
2536
2537static void cma_set_mgid(struct rdma_id_private *id_priv,
2538 struct sockaddr *addr, union ib_gid *mgid)
2539{
2540 unsigned char mc_map[MAX_ADDR_LEN];
2541 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2542 struct sockaddr_in *sin = (struct sockaddr_in *) addr;
2543 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr;
2544
2545 if (cma_any_addr(addr)) {
2546 memset(mgid, 0, sizeof *mgid);
2547 } else if ((addr->sa_family == AF_INET6) &&
2548 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFF10A01B) ==
2549 0xFF10A01B)) {
2550 /* IPv6 address is an SA assigned MGID. */
2551 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
2552 } else {
2553 ip_ib_mc_map(sin->sin_addr.s_addr, mc_map);
2554 if (id_priv->id.ps == RDMA_PS_UDP)
2555 mc_map[7] = 0x01; /* Use RDMA CM signature */
2556 mc_map[8] = ib_addr_get_pkey(dev_addr) >> 8;
2557 mc_map[9] = (unsigned char) ib_addr_get_pkey(dev_addr);
2558 *mgid = *(union ib_gid *) (mc_map + 4);
2559 }
2560}
2561
2562static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
2563 struct cma_multicast *mc)
2564{
2565 struct ib_sa_mcmember_rec rec;
2566 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2567 ib_sa_comp_mask comp_mask;
2568 int ret;
2569
2570 ib_addr_get_mgid(dev_addr, &rec.mgid);
2571 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
2572 &rec.mgid, &rec);
2573 if (ret)
2574 return ret;
2575
2576 cma_set_mgid(id_priv, &mc->addr, &rec.mgid);
2577 if (id_priv->id.ps == RDMA_PS_UDP)
2578 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
2579 ib_addr_get_sgid(dev_addr, &rec.port_gid);
2580 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
2581 rec.join_state = 1;
2582
2583 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
2584 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
2585 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
2586 IB_SA_MCMEMBER_REC_FLOW_LABEL |
2587 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
2588
2589 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
2590 id_priv->id.port_num, &rec,
2591 comp_mask, GFP_KERNEL,
2592 cma_ib_mc_handler, mc);
2593 if (IS_ERR(mc->multicast.ib))
2594 return PTR_ERR(mc->multicast.ib);
2595
2596 return 0;
2597}
2598
2599int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
2600 void *context)
2601{
2602 struct rdma_id_private *id_priv;
2603 struct cma_multicast *mc;
2604 int ret;
2605
2606 id_priv = container_of(id, struct rdma_id_private, id);
2607 if (!cma_comp(id_priv, CMA_ADDR_BOUND) &&
2608 !cma_comp(id_priv, CMA_ADDR_RESOLVED))
2609 return -EINVAL;
2610
2611 mc = kmalloc(sizeof *mc, GFP_KERNEL);
2612 if (!mc)
2613 return -ENOMEM;
2614
2615 memcpy(&mc->addr, addr, ip_addr_size(addr));
2616 mc->context = context;
2617 mc->id_priv = id_priv;
2618
2619 spin_lock(&id_priv->lock);
2620 list_add(&mc->list, &id_priv->mc_list);
2621 spin_unlock(&id_priv->lock);
2622
2623 switch (rdma_node_get_transport(id->device->node_type)) {
2624 case RDMA_TRANSPORT_IB:
2625 ret = cma_join_ib_multicast(id_priv, mc);
2626 break;
2627 default:
2628 ret = -ENOSYS;
2629 break;
2630 }
2631
2632 if (ret) {
2633 spin_lock_irq(&id_priv->lock);
2634 list_del(&mc->list);
2635 spin_unlock_irq(&id_priv->lock);
2636 kfree(mc);
2637 }
2638 return ret;
2639}
2640EXPORT_SYMBOL(rdma_join_multicast);
2641
2642void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
2643{
2644 struct rdma_id_private *id_priv;
2645 struct cma_multicast *mc;
2646
2647 id_priv = container_of(id, struct rdma_id_private, id);
2648 spin_lock_irq(&id_priv->lock);
2649 list_for_each_entry(mc, &id_priv->mc_list, list) {
2650 if (!memcmp(&mc->addr, addr, ip_addr_size(addr))) {
2651 list_del(&mc->list);
2652 spin_unlock_irq(&id_priv->lock);
2653
2654 if (id->qp)
2655 ib_detach_mcast(id->qp,
2656 &mc->multicast.ib->rec.mgid,
2657 mc->multicast.ib->rec.mlid);
2658 ib_sa_free_multicast(mc->multicast.ib);
2659 kfree(mc);
2660 return;
2661 }
2662 }
2663 spin_unlock_irq(&id_priv->lock);
2664}
2665EXPORT_SYMBOL(rdma_leave_multicast);
2666
Sean Heftye51060f2006-06-17 20:37:29 -07002667static void cma_add_one(struct ib_device *device)
2668{
2669 struct cma_device *cma_dev;
2670 struct rdma_id_private *id_priv;
2671
2672 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
2673 if (!cma_dev)
2674 return;
2675
2676 cma_dev->device = device;
2677 cma_dev->node_guid = device->node_guid;
Sean Heftye51060f2006-06-17 20:37:29 -07002678
2679 init_completion(&cma_dev->comp);
2680 atomic_set(&cma_dev->refcount, 1);
2681 INIT_LIST_HEAD(&cma_dev->id_list);
2682 ib_set_client_data(device, &cma_client, cma_dev);
2683
2684 mutex_lock(&lock);
2685 list_add_tail(&cma_dev->list, &dev_list);
2686 list_for_each_entry(id_priv, &listen_any_list, list)
2687 cma_listen_on_dev(id_priv, cma_dev);
2688 mutex_unlock(&lock);
Sean Heftye51060f2006-06-17 20:37:29 -07002689}
2690
2691static int cma_remove_id_dev(struct rdma_id_private *id_priv)
2692{
Sean Heftya1b1b612006-11-30 16:33:14 -08002693 struct rdma_cm_event event;
Sean Heftye51060f2006-06-17 20:37:29 -07002694 enum cma_state state;
2695
2696 /* Record that we want to remove the device */
2697 state = cma_exch(id_priv, CMA_DEVICE_REMOVAL);
2698 if (state == CMA_DESTROYING)
2699 return 0;
2700
2701 cma_cancel_operation(id_priv, state);
2702 wait_event(id_priv->wait_remove, !atomic_read(&id_priv->dev_remove));
2703
2704 /* Check for destruction from another callback. */
2705 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL))
2706 return 0;
2707
Sean Heftya1b1b612006-11-30 16:33:14 -08002708 memset(&event, 0, sizeof event);
2709 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
2710 return id_priv->id.event_handler(&id_priv->id, &event);
Sean Heftye51060f2006-06-17 20:37:29 -07002711}
2712
2713static void cma_process_remove(struct cma_device *cma_dev)
2714{
Sean Heftye51060f2006-06-17 20:37:29 -07002715 struct rdma_id_private *id_priv;
2716 int ret;
2717
Sean Heftye51060f2006-06-17 20:37:29 -07002718 mutex_lock(&lock);
2719 while (!list_empty(&cma_dev->id_list)) {
2720 id_priv = list_entry(cma_dev->id_list.next,
2721 struct rdma_id_private, list);
2722
2723 if (cma_internal_listen(id_priv)) {
2724 cma_destroy_listen(id_priv);
2725 continue;
2726 }
2727
Krishna Kumar94de1782006-09-29 12:03:35 -07002728 list_del_init(&id_priv->list);
Sean Heftye51060f2006-06-17 20:37:29 -07002729 atomic_inc(&id_priv->refcount);
2730 mutex_unlock(&lock);
2731
2732 ret = cma_remove_id_dev(id_priv);
2733 cma_deref_id(id_priv);
2734 if (ret)
2735 rdma_destroy_id(&id_priv->id);
2736
2737 mutex_lock(&lock);
2738 }
2739 mutex_unlock(&lock);
2740
2741 cma_deref_dev(cma_dev);
2742 wait_for_completion(&cma_dev->comp);
2743}
2744
2745static void cma_remove_one(struct ib_device *device)
2746{
2747 struct cma_device *cma_dev;
2748
2749 cma_dev = ib_get_client_data(device, &cma_client);
2750 if (!cma_dev)
2751 return;
2752
2753 mutex_lock(&lock);
2754 list_del(&cma_dev->list);
2755 mutex_unlock(&lock);
2756
2757 cma_process_remove(cma_dev);
2758 kfree(cma_dev);
2759}
2760
2761static int cma_init(void)
2762{
2763 int ret;
2764
Sean Heftyaedec082007-01-29 16:41:23 -08002765 get_random_bytes(&next_port, sizeof next_port);
2766 next_port = (next_port % (sysctl_local_port_range[1] -
2767 sysctl_local_port_range[0])) +
2768 sysctl_local_port_range[0];
Sean Heftyc7f743a2007-02-01 12:23:37 -08002769 cma_wq = create_singlethread_workqueue("rdma_cm");
Sean Heftye51060f2006-06-17 20:37:29 -07002770 if (!cma_wq)
2771 return -ENOMEM;
2772
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07002773 ib_sa_register_client(&sa_client);
Sean Hefty7a118df2006-10-31 11:12:59 -08002774 rdma_addr_register_client(&addr_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07002775
Sean Heftye51060f2006-06-17 20:37:29 -07002776 ret = ib_register_client(&cma_client);
2777 if (ret)
2778 goto err;
2779 return 0;
2780
2781err:
Sean Hefty7a118df2006-10-31 11:12:59 -08002782 rdma_addr_unregister_client(&addr_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07002783 ib_sa_unregister_client(&sa_client);
Sean Heftye51060f2006-06-17 20:37:29 -07002784 destroy_workqueue(cma_wq);
2785 return ret;
2786}
2787
2788static void cma_cleanup(void)
2789{
2790 ib_unregister_client(&cma_client);
Sean Hefty7a118df2006-10-31 11:12:59 -08002791 rdma_addr_unregister_client(&addr_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07002792 ib_sa_unregister_client(&sa_client);
Sean Heftye51060f2006-06-17 20:37:29 -07002793 destroy_workqueue(cma_wq);
2794 idr_destroy(&sdp_ps);
2795 idr_destroy(&tcp_ps);
Sean Hefty628e5f62006-11-30 16:44:16 -08002796 idr_destroy(&udp_ps);
Sean Heftyc8f6a362007-02-15 17:00:18 -08002797 idr_destroy(&ipoib_ps);
Sean Heftye51060f2006-06-17 20:37:29 -07002798}
2799
2800module_init(cma_init);
2801module_exit(cma_cleanup);