2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2006 Intel Corporation. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * $Id: sa_query.c 2811 2005-07-06 18:11:43Z halr $
37 #include <linux/module.h>
38 #include <linux/init.h>
39 #include <linux/err.h>
40 #include <linux/random.h>
41 #include <linux/spinlock.h>
42 #include <linux/slab.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kref.h>
46 #include <linux/idr.h>
47 #include <linux/workqueue.h>
49 #include <rdma/ib_pack.h>
50 #include <rdma/ib_cache.h>
53 MODULE_AUTHOR("Roland Dreier");
54 MODULE_DESCRIPTION("InfiniBand subnet administration query support");
55 MODULE_LICENSE("Dual BSD/GPL");
64 struct ib_mad_agent *agent;
65 struct ib_sa_sm_ah *sm_ah;
66 struct work_struct update_task;
72 int start_port, end_port;
73 struct ib_event_handler event_handler;
74 struct ib_sa_port port[0];
78 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
79 void (*release)(struct ib_sa_query *);
80 struct ib_sa_client *client;
81 struct ib_sa_port *port;
82 struct ib_mad_send_buf *mad_buf;
83 struct ib_sa_sm_ah *sm_ah;
87 struct ib_sa_service_query {
88 void (*callback)(int, struct ib_sa_service_rec *, void *);
90 struct ib_sa_query sa_query;
93 struct ib_sa_path_query {
94 void (*callback)(int, struct ib_sa_path_rec *, void *);
96 struct ib_sa_query sa_query;
99 struct ib_sa_mcmember_query {
100 void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
102 struct ib_sa_query sa_query;
105 static void ib_sa_add_one(struct ib_device *device);
106 static void ib_sa_remove_one(struct ib_device *device);
108 static struct ib_client sa_client = {
110 .add = ib_sa_add_one,
111 .remove = ib_sa_remove_one
114 static spinlock_t idr_lock;
115 static DEFINE_IDR(query_idr);
117 static spinlock_t tid_lock;
120 #define PATH_REC_FIELD(field) \
121 .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \
122 .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \
123 .field_name = "sa_path_rec:" #field
125 static const struct ib_field path_rec_table[] = {
134 { PATH_REC_FIELD(dgid),
138 { PATH_REC_FIELD(sgid),
142 { PATH_REC_FIELD(dlid),
146 { PATH_REC_FIELD(slid),
150 { PATH_REC_FIELD(raw_traffic),
158 { PATH_REC_FIELD(flow_label),
162 { PATH_REC_FIELD(hop_limit),
166 { PATH_REC_FIELD(traffic_class),
170 { PATH_REC_FIELD(reversible),
174 { PATH_REC_FIELD(numb_path),
178 { PATH_REC_FIELD(pkey),
186 { PATH_REC_FIELD(sl),
190 { PATH_REC_FIELD(mtu_selector),
194 { PATH_REC_FIELD(mtu),
198 { PATH_REC_FIELD(rate_selector),
202 { PATH_REC_FIELD(rate),
206 { PATH_REC_FIELD(packet_life_time_selector),
210 { PATH_REC_FIELD(packet_life_time),
214 { PATH_REC_FIELD(preference),
224 #define MCMEMBER_REC_FIELD(field) \
225 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
226 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
227 .field_name = "sa_mcmember_rec:" #field
229 static const struct ib_field mcmember_rec_table[] = {
230 { MCMEMBER_REC_FIELD(mgid),
234 { MCMEMBER_REC_FIELD(port_gid),
238 { MCMEMBER_REC_FIELD(qkey),
242 { MCMEMBER_REC_FIELD(mlid),
246 { MCMEMBER_REC_FIELD(mtu_selector),
250 { MCMEMBER_REC_FIELD(mtu),
254 { MCMEMBER_REC_FIELD(traffic_class),
258 { MCMEMBER_REC_FIELD(pkey),
262 { MCMEMBER_REC_FIELD(rate_selector),
266 { MCMEMBER_REC_FIELD(rate),
270 { MCMEMBER_REC_FIELD(packet_life_time_selector),
274 { MCMEMBER_REC_FIELD(packet_life_time),
278 { MCMEMBER_REC_FIELD(sl),
282 { MCMEMBER_REC_FIELD(flow_label),
286 { MCMEMBER_REC_FIELD(hop_limit),
290 { MCMEMBER_REC_FIELD(scope),
294 { MCMEMBER_REC_FIELD(join_state),
298 { MCMEMBER_REC_FIELD(proxy_join),
308 #define SERVICE_REC_FIELD(field) \
309 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
310 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
311 .field_name = "sa_service_rec:" #field
313 static const struct ib_field service_rec_table[] = {
314 { SERVICE_REC_FIELD(id),
318 { SERVICE_REC_FIELD(gid),
322 { SERVICE_REC_FIELD(pkey),
326 { SERVICE_REC_FIELD(lease),
330 { SERVICE_REC_FIELD(key),
334 { SERVICE_REC_FIELD(name),
338 { SERVICE_REC_FIELD(data8),
342 { SERVICE_REC_FIELD(data16),
346 { SERVICE_REC_FIELD(data32),
350 { SERVICE_REC_FIELD(data64),
356 static void free_sm_ah(struct kref *kref)
358 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
360 ib_destroy_ah(sm_ah->ah);
364 static void update_sm_ah(struct work_struct *work)
366 struct ib_sa_port *port =
367 container_of(work, struct ib_sa_port, update_task);
368 struct ib_sa_sm_ah *new_ah, *old_ah;
369 struct ib_port_attr port_attr;
370 struct ib_ah_attr ah_attr;
372 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
373 printk(KERN_WARNING "Couldn't query port\n");
377 new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL);
379 printk(KERN_WARNING "Couldn't allocate new SM AH\n");
383 kref_init(&new_ah->ref);
384 new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
386 memset(&ah_attr, 0, sizeof ah_attr);
387 ah_attr.dlid = port_attr.sm_lid;
388 ah_attr.sl = port_attr.sm_sl;
389 ah_attr.port_num = port->port_num;
391 new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr);
392 if (IS_ERR(new_ah->ah)) {
393 printk(KERN_WARNING "Couldn't create new SM AH\n");
398 spin_lock_irq(&port->ah_lock);
399 old_ah = port->sm_ah;
400 port->sm_ah = new_ah;
401 spin_unlock_irq(&port->ah_lock);
404 kref_put(&old_ah->ref, free_sm_ah);
407 static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event)
409 if (event->event == IB_EVENT_PORT_ERR ||
410 event->event == IB_EVENT_PORT_ACTIVE ||
411 event->event == IB_EVENT_LID_CHANGE ||
412 event->event == IB_EVENT_PKEY_CHANGE ||
413 event->event == IB_EVENT_SM_CHANGE ||
414 event->event == IB_EVENT_CLIENT_REREGISTER) {
415 struct ib_sa_device *sa_dev;
416 sa_dev = container_of(handler, typeof(*sa_dev), event_handler);
418 schedule_work(&sa_dev->port[event->element.port_num -
419 sa_dev->start_port].update_task);
423 void ib_sa_register_client(struct ib_sa_client *client)
425 atomic_set(&client->users, 1);
426 init_completion(&client->comp);
428 EXPORT_SYMBOL(ib_sa_register_client);
430 void ib_sa_unregister_client(struct ib_sa_client *client)
432 ib_sa_client_put(client);
433 wait_for_completion(&client->comp);
435 EXPORT_SYMBOL(ib_sa_unregister_client);
438 * ib_sa_cancel_query - try to cancel an SA query
439 * @id:ID of query to cancel
440 * @query:query pointer to cancel
442 * Try to cancel an SA query. If the id and query don't match up or
443 * the query has already completed, nothing is done. Otherwise the
444 * query is canceled and will complete with a status of -EINTR.
446 void ib_sa_cancel_query(int id, struct ib_sa_query *query)
449 struct ib_mad_agent *agent;
450 struct ib_mad_send_buf *mad_buf;
452 spin_lock_irqsave(&idr_lock, flags);
453 if (idr_find(&query_idr, id) != query) {
454 spin_unlock_irqrestore(&idr_lock, flags);
457 agent = query->port->agent;
458 mad_buf = query->mad_buf;
459 spin_unlock_irqrestore(&idr_lock, flags);
461 ib_cancel_mad(agent, mad_buf);
463 EXPORT_SYMBOL(ib_sa_cancel_query);
465 static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
467 struct ib_sa_device *sa_dev;
468 struct ib_sa_port *port;
472 sa_dev = ib_get_client_data(device, &sa_client);
476 port = &sa_dev->port[port_num - sa_dev->start_port];
477 spin_lock_irqsave(&port->ah_lock, flags);
478 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
479 spin_unlock_irqrestore(&port->ah_lock, flags);
481 return src_path_mask;
484 int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
485 struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr)
490 memset(ah_attr, 0, sizeof *ah_attr);
491 ah_attr->dlid = be16_to_cpu(rec->dlid);
492 ah_attr->sl = rec->sl;
493 ah_attr->src_path_bits = be16_to_cpu(rec->slid) &
494 get_src_path_mask(device, port_num);
495 ah_attr->port_num = port_num;
496 ah_attr->static_rate = rec->rate;
498 if (rec->hop_limit > 1) {
499 ah_attr->ah_flags = IB_AH_GRH;
500 ah_attr->grh.dgid = rec->dgid;
502 ret = ib_find_cached_gid(device, &rec->sgid, &port_num,
507 ah_attr->grh.sgid_index = gid_index;
508 ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label);
509 ah_attr->grh.hop_limit = rec->hop_limit;
510 ah_attr->grh.traffic_class = rec->traffic_class;
514 EXPORT_SYMBOL(ib_init_ah_from_path);
516 static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
520 memset(mad, 0, sizeof *mad);
522 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
523 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
524 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
526 spin_lock_irqsave(&tid_lock, flags);
528 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
529 spin_unlock_irqrestore(&tid_lock, flags);
532 static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
538 if (!idr_pre_get(&query_idr, gfp_mask))
540 spin_lock_irqsave(&idr_lock, flags);
541 ret = idr_get_new(&query_idr, query, &id);
542 spin_unlock_irqrestore(&idr_lock, flags);
548 query->mad_buf->timeout_ms = timeout_ms;
549 query->mad_buf->context[0] = query;
552 spin_lock_irqsave(&query->port->ah_lock, flags);
553 kref_get(&query->port->sm_ah->ref);
554 query->sm_ah = query->port->sm_ah;
555 spin_unlock_irqrestore(&query->port->ah_lock, flags);
557 query->mad_buf->ah = query->sm_ah->ah;
559 ret = ib_post_send_mad(query->mad_buf, NULL);
561 spin_lock_irqsave(&idr_lock, flags);
562 idr_remove(&query_idr, id);
563 spin_unlock_irqrestore(&idr_lock, flags);
565 kref_put(&query->sm_ah->ref, free_sm_ah);
569 * It's not safe to dereference query any more, because the
570 * send may already have completed and freed the query in
573 return ret ? ret : id;
576 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
578 struct ib_sa_mad *mad)
580 struct ib_sa_path_query *query =
581 container_of(sa_query, struct ib_sa_path_query, sa_query);
584 struct ib_sa_path_rec rec;
586 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
588 query->callback(status, &rec, query->context);
590 query->callback(status, NULL, query->context);
593 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
595 kfree(container_of(sa_query, struct ib_sa_path_query, sa_query));
599 * ib_sa_path_rec_get - Start a Path get query
601 * @device:device to send query on
602 * @port_num: port number to send query on
603 * @rec:Path Record to send in query
604 * @comp_mask:component mask to send in query
605 * @timeout_ms:time to wait for response
606 * @gfp_mask:GFP mask to use for internal allocations
607 * @callback:function called when query completes, times out or is
609 * @context:opaque user context passed to callback
610 * @sa_query:query context, used to cancel query
612 * Send a Path Record Get query to the SA to look up a path. The
613 * callback function will be called when the query completes (or
614 * fails); status is 0 for a successful response, -EINTR if the query
615 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
616 * occurred sending the query. The resp parameter of the callback is
617 * only valid if status is 0.
619 * If the return value of ib_sa_path_rec_get() is negative, it is an
620 * error code. Otherwise it is a query ID that can be used to cancel
623 int ib_sa_path_rec_get(struct ib_sa_client *client,
624 struct ib_device *device, u8 port_num,
625 struct ib_sa_path_rec *rec,
626 ib_sa_comp_mask comp_mask,
627 int timeout_ms, gfp_t gfp_mask,
628 void (*callback)(int status,
629 struct ib_sa_path_rec *resp,
632 struct ib_sa_query **sa_query)
634 struct ib_sa_path_query *query;
635 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
636 struct ib_sa_port *port;
637 struct ib_mad_agent *agent;
638 struct ib_sa_mad *mad;
644 port = &sa_dev->port[port_num - sa_dev->start_port];
647 query = kmalloc(sizeof *query, gfp_mask);
651 query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0,
653 IB_MGMT_SA_DATA, gfp_mask);
654 if (!query->sa_query.mad_buf) {
659 ib_sa_client_get(client);
660 query->sa_query.client = client;
661 query->callback = callback;
662 query->context = context;
664 mad = query->sa_query.mad_buf->mad;
665 init_mad(mad, agent);
667 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
668 query->sa_query.release = ib_sa_path_rec_release;
669 query->sa_query.port = port;
670 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
671 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
672 mad->sa_hdr.comp_mask = comp_mask;
674 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data);
676 *sa_query = &query->sa_query;
678 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
686 ib_sa_client_put(query->sa_query.client);
687 ib_free_send_mad(query->sa_query.mad_buf);
693 EXPORT_SYMBOL(ib_sa_path_rec_get);
695 static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
697 struct ib_sa_mad *mad)
699 struct ib_sa_service_query *query =
700 container_of(sa_query, struct ib_sa_service_query, sa_query);
703 struct ib_sa_service_rec rec;
705 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
707 query->callback(status, &rec, query->context);
709 query->callback(status, NULL, query->context);
712 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
714 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
718 * ib_sa_service_rec_query - Start Service Record operation
720 * @device:device to send request on
721 * @port_num: port number to send request on
722 * @method:SA method - should be get, set, or delete
723 * @rec:Service Record to send in request
724 * @comp_mask:component mask to send in request
725 * @timeout_ms:time to wait for response
726 * @gfp_mask:GFP mask to use for internal allocations
727 * @callback:function called when request completes, times out or is
729 * @context:opaque user context passed to callback
730 * @sa_query:request context, used to cancel request
732 * Send a Service Record set/get/delete to the SA to register,
733 * unregister or query a service record.
734 * The callback function will be called when the request completes (or
735 * fails); status is 0 for a successful response, -EINTR if the query
736 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
737 * occurred sending the query. The resp parameter of the callback is
738 * only valid if status is 0.
740 * If the return value of ib_sa_service_rec_query() is negative, it is an
741 * error code. Otherwise it is a request ID that can be used to cancel
744 int ib_sa_service_rec_query(struct ib_sa_client *client,
745 struct ib_device *device, u8 port_num, u8 method,
746 struct ib_sa_service_rec *rec,
747 ib_sa_comp_mask comp_mask,
748 int timeout_ms, gfp_t gfp_mask,
749 void (*callback)(int status,
750 struct ib_sa_service_rec *resp,
753 struct ib_sa_query **sa_query)
755 struct ib_sa_service_query *query;
756 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
757 struct ib_sa_port *port;
758 struct ib_mad_agent *agent;
759 struct ib_sa_mad *mad;
765 port = &sa_dev->port[port_num - sa_dev->start_port];
768 if (method != IB_MGMT_METHOD_GET &&
769 method != IB_MGMT_METHOD_SET &&
770 method != IB_SA_METHOD_DELETE)
773 query = kmalloc(sizeof *query, gfp_mask);
777 query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0,
779 IB_MGMT_SA_DATA, gfp_mask);
780 if (!query->sa_query.mad_buf) {
785 ib_sa_client_get(client);
786 query->sa_query.client = client;
787 query->callback = callback;
788 query->context = context;
790 mad = query->sa_query.mad_buf->mad;
791 init_mad(mad, agent);
793 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
794 query->sa_query.release = ib_sa_service_rec_release;
795 query->sa_query.port = port;
796 mad->mad_hdr.method = method;
797 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
798 mad->sa_hdr.comp_mask = comp_mask;
800 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
803 *sa_query = &query->sa_query;
805 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
813 ib_sa_client_put(query->sa_query.client);
814 ib_free_send_mad(query->sa_query.mad_buf);
820 EXPORT_SYMBOL(ib_sa_service_rec_query);
822 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
824 struct ib_sa_mad *mad)
826 struct ib_sa_mcmember_query *query =
827 container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
830 struct ib_sa_mcmember_rec rec;
832 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
834 query->callback(status, &rec, query->context);
836 query->callback(status, NULL, query->context);
839 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
841 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
844 int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
845 struct ib_device *device, u8 port_num,
847 struct ib_sa_mcmember_rec *rec,
848 ib_sa_comp_mask comp_mask,
849 int timeout_ms, gfp_t gfp_mask,
850 void (*callback)(int status,
851 struct ib_sa_mcmember_rec *resp,
854 struct ib_sa_query **sa_query)
856 struct ib_sa_mcmember_query *query;
857 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
858 struct ib_sa_port *port;
859 struct ib_mad_agent *agent;
860 struct ib_sa_mad *mad;
866 port = &sa_dev->port[port_num - sa_dev->start_port];
869 query = kmalloc(sizeof *query, gfp_mask);
873 query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0,
875 IB_MGMT_SA_DATA, gfp_mask);
876 if (!query->sa_query.mad_buf) {
881 ib_sa_client_get(client);
882 query->sa_query.client = client;
883 query->callback = callback;
884 query->context = context;
886 mad = query->sa_query.mad_buf->mad;
887 init_mad(mad, agent);
889 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
890 query->sa_query.release = ib_sa_mcmember_rec_release;
891 query->sa_query.port = port;
892 mad->mad_hdr.method = method;
893 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
894 mad->sa_hdr.comp_mask = comp_mask;
896 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
899 *sa_query = &query->sa_query;
901 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
909 ib_sa_client_put(query->sa_query.client);
910 ib_free_send_mad(query->sa_query.mad_buf);
917 static void send_handler(struct ib_mad_agent *agent,
918 struct ib_mad_send_wc *mad_send_wc)
920 struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
924 switch (mad_send_wc->status) {
926 /* No callback -- already got recv */
928 case IB_WC_RESP_TIMEOUT_ERR:
929 query->callback(query, -ETIMEDOUT, NULL);
931 case IB_WC_WR_FLUSH_ERR:
932 query->callback(query, -EINTR, NULL);
935 query->callback(query, -EIO, NULL);
939 spin_lock_irqsave(&idr_lock, flags);
940 idr_remove(&query_idr, query->id);
941 spin_unlock_irqrestore(&idr_lock, flags);
943 ib_free_send_mad(mad_send_wc->send_buf);
944 kref_put(&query->sm_ah->ref, free_sm_ah);
945 ib_sa_client_put(query->client);
946 query->release(query);
949 static void recv_handler(struct ib_mad_agent *mad_agent,
950 struct ib_mad_recv_wc *mad_recv_wc)
952 struct ib_sa_query *query;
953 struct ib_mad_send_buf *mad_buf;
955 mad_buf = (void *) (unsigned long) mad_recv_wc->wc->wr_id;
956 query = mad_buf->context[0];
958 if (query->callback) {
959 if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
960 query->callback(query,
961 mad_recv_wc->recv_buf.mad->mad_hdr.status ?
963 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
965 query->callback(query, -EIO, NULL);
968 ib_free_recv_mad(mad_recv_wc);
971 static void ib_sa_add_one(struct ib_device *device)
973 struct ib_sa_device *sa_dev;
976 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
979 if (device->node_type == RDMA_NODE_IB_SWITCH)
983 e = device->phys_port_cnt;
986 sa_dev = kmalloc(sizeof *sa_dev +
987 (e - s + 1) * sizeof (struct ib_sa_port),
992 sa_dev->start_port = s;
993 sa_dev->end_port = e;
995 for (i = 0; i <= e - s; ++i) {
996 sa_dev->port[i].sm_ah = NULL;
997 sa_dev->port[i].port_num = i + s;
998 spin_lock_init(&sa_dev->port[i].ah_lock);
1000 sa_dev->port[i].agent =
1001 ib_register_mad_agent(device, i + s, IB_QPT_GSI,
1002 NULL, 0, send_handler,
1003 recv_handler, sa_dev);
1004 if (IS_ERR(sa_dev->port[i].agent))
1007 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
1010 ib_set_client_data(device, &sa_client, sa_dev);
1013 * We register our event handler after everything is set up,
1014 * and then update our cached info after the event handler is
1015 * registered to avoid any problems if a port changes state
1016 * during our initialization.
1019 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
1020 if (ib_register_event_handler(&sa_dev->event_handler))
1023 for (i = 0; i <= e - s; ++i)
1024 update_sm_ah(&sa_dev->port[i].update_task);
1030 ib_unregister_mad_agent(sa_dev->port[i].agent);
1037 static void ib_sa_remove_one(struct ib_device *device)
1039 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1045 ib_unregister_event_handler(&sa_dev->event_handler);
1047 flush_scheduled_work();
1049 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
1050 ib_unregister_mad_agent(sa_dev->port[i].agent);
1051 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
1057 static int __init ib_sa_init(void)
1061 spin_lock_init(&idr_lock);
1062 spin_lock_init(&tid_lock);
1064 get_random_bytes(&tid, sizeof tid);
1066 ret = ib_register_client(&sa_client);
1068 printk(KERN_ERR "Couldn't register ib_sa client\n");
1074 printk(KERN_ERR "Couldn't initialize multicast handling\n");
1080 ib_unregister_client(&sa_client);
1085 static void __exit ib_sa_cleanup(void)
1088 ib_unregister_client(&sa_client);
1089 idr_destroy(&query_idr);
1092 module_init(ib_sa_init);
1093 module_exit(ib_sa_cleanup);