2efd9d11e7d46baf195092f923bbe9067249107f
[linux-2.6.git] / net / rds / ib.h
1 #ifndef _RDS_IB_H
2 #define _RDS_IB_H
3
4 #include <rdma/ib_verbs.h>
5 #include <rdma/rdma_cm.h>
6 #include <linux/pci.h>
7 #include <linux/slab.h>
8 #include "rds.h"
9 #include "rdma_transport.h"
10
11 #define RDS_FMR_SIZE                    256
12 #define RDS_FMR_POOL_SIZE               4096
13
14 #define RDS_IB_MAX_SGE                  8
15 #define RDS_IB_RECV_SGE                 2
16
17 #define RDS_IB_DEFAULT_RECV_WR          1024
18 #define RDS_IB_DEFAULT_SEND_WR          256
19
20 #define RDS_IB_DEFAULT_RETRY_COUNT      2
21
22 #define RDS_IB_SUPPORTED_PROTOCOLS      0x00000003      /* minor versions supported */
23
24 #define RDS_IB_RECYCLE_BATCH_COUNT      32
25
26 extern struct list_head rds_ib_devices;
27
28 /*
29  * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
30  * try and minimize the amount of memory tied up both the device and
31  * socket receive queues.
32  */
33 struct rds_page_frag {
34         struct list_head        f_item;
35         struct list_head        f_cache_entry;
36         struct scatterlist      f_sg;
37 };
38
39 struct rds_ib_incoming {
40         struct list_head        ii_frags;
41         struct list_head        ii_cache_entry;
42         struct rds_incoming     ii_inc;
43 };
44
45 struct rds_ib_cache_head {
46         struct list_head *first;
47         unsigned long count;
48 };
49
50 struct rds_ib_refill_cache {
51         struct rds_ib_cache_head *percpu;
52         struct list_head         *xfer;
53         struct list_head         *ready;
54 };
55
56 struct rds_ib_connect_private {
57         /* Add new fields at the end, and don't permute existing fields. */
58         __be32                  dp_saddr;
59         __be32                  dp_daddr;
60         u8                      dp_protocol_major;
61         u8                      dp_protocol_minor;
62         __be16                  dp_protocol_minor_mask; /* bitmask */
63         __be32                  dp_reserved1;
64         __be64                  dp_ack_seq;
65         __be32                  dp_credit;              /* non-zero enables flow ctl */
66 };
67
68 struct rds_ib_send_work {
69         void                    *s_op;
70         struct ib_send_wr       s_wr;
71         struct ib_sge           s_sge[RDS_IB_MAX_SGE];
72         unsigned long           s_queued;
73 };
74
75 struct rds_ib_recv_work {
76         struct rds_ib_incoming  *r_ibinc;
77         struct rds_page_frag    *r_frag;
78         struct ib_recv_wr       r_wr;
79         struct ib_sge           r_sge[2];
80 };
81
82 struct rds_ib_work_ring {
83         u32             w_nr;
84         u32             w_alloc_ptr;
85         u32             w_alloc_ctr;
86         u32             w_free_ptr;
87         atomic_t        w_free_ctr;
88 };
89
90 struct rds_ib_device;
91
92 struct rds_ib_connection {
93
94         struct list_head        ib_node;
95         struct rds_ib_device    *rds_ibdev;
96         struct rds_connection   *conn;
97
98         /* alphabet soup, IBTA style */
99         struct rdma_cm_id       *i_cm_id;
100         struct ib_pd            *i_pd;
101         struct ib_mr            *i_mr;
102         struct ib_cq            *i_send_cq;
103         struct ib_cq            *i_recv_cq;
104
105         /* tx */
106         struct rds_ib_work_ring i_send_ring;
107         struct rm_data_op       *i_data_op;
108         struct rds_header       *i_send_hdrs;
109         u64                     i_send_hdrs_dma;
110         struct rds_ib_send_work *i_sends;
111
112         /* rx */
113         struct tasklet_struct   i_recv_tasklet;
114         struct mutex            i_recv_mutex;
115         struct rds_ib_work_ring i_recv_ring;
116         struct rds_ib_incoming  *i_ibinc;
117         u32                     i_recv_data_rem;
118         struct rds_header       *i_recv_hdrs;
119         u64                     i_recv_hdrs_dma;
120         struct rds_ib_recv_work *i_recvs;
121         u64                     i_ack_recv;     /* last ACK received */
122         struct rds_ib_refill_cache i_cache_incs;
123         struct rds_ib_refill_cache i_cache_frags;
124
125         /* sending acks */
126         unsigned long           i_ack_flags;
127 #ifdef KERNEL_HAS_ATOMIC64
128         atomic64_t              i_ack_next;     /* next ACK to send */
129 #else
130         spinlock_t              i_ack_lock;     /* protect i_ack_next */
131         u64                     i_ack_next;     /* next ACK to send */
132 #endif
133         struct rds_header       *i_ack;
134         struct ib_send_wr       i_ack_wr;
135         struct ib_sge           i_ack_sge;
136         u64                     i_ack_dma;
137         unsigned long           i_ack_queued;
138
139         /* Flow control related information
140          *
141          * Our algorithm uses a pair variables that we need to access
142          * atomically - one for the send credits, and one posted
143          * recv credits we need to transfer to remote.
144          * Rather than protect them using a slow spinlock, we put both into
145          * a single atomic_t and update it using cmpxchg
146          */
147         atomic_t                i_credits;
148
149         /* Protocol version specific information */
150         unsigned int            i_flowctl:1;    /* enable/disable flow ctl */
151
152         /* Batched completions */
153         unsigned int            i_unsignaled_wrs;
154 };
155
156 /* This assumes that atomic_t is at least 32 bits */
157 #define IB_GET_SEND_CREDITS(v)  ((v) & 0xffff)
158 #define IB_GET_POST_CREDITS(v)  ((v) >> 16)
159 #define IB_SET_SEND_CREDITS(v)  ((v) & 0xffff)
160 #define IB_SET_POST_CREDITS(v)  ((v) << 16)
161
162 struct rds_ib_ipaddr {
163         struct list_head        list;
164         __be32                  ipaddr;
165 };
166
167 struct rds_ib_device {
168         struct list_head        list;
169         struct list_head        ipaddr_list;
170         struct list_head        conn_list;
171         struct ib_device        *dev;
172         struct ib_pd            *pd;
173         struct ib_mr            *mr;
174         struct rds_ib_mr_pool   *mr_pool;
175         unsigned int            fmr_max_remaps;
176         unsigned int            max_fmrs;
177         int                     max_sge;
178         unsigned int            max_wrs;
179         unsigned int            max_initiator_depth;
180         unsigned int            max_responder_resources;
181         spinlock_t              spinlock;       /* protect the above */
182         atomic_t                refcount;
183         struct work_struct      free_work;
184 };
185
186 #define pcidev_to_node(pcidev) pcibus_to_node(pcidev->bus)
187 #define ibdev_to_node(ibdev) pcidev_to_node(to_pci_dev(ibdev->dma_device))
188 #define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
189
190 /* bits for i_ack_flags */
191 #define IB_ACK_IN_FLIGHT        0
192 #define IB_ACK_REQUESTED        1
193
194 /* Magic WR_ID for ACKs */
195 #define RDS_IB_ACK_WR_ID        (~(u64) 0)
196
197 struct rds_ib_statistics {
198         uint64_t        s_ib_connect_raced;
199         uint64_t        s_ib_listen_closed_stale;
200         uint64_t        s_ib_tx_cq_call;
201         uint64_t        s_ib_tx_cq_event;
202         uint64_t        s_ib_tx_ring_full;
203         uint64_t        s_ib_tx_throttle;
204         uint64_t        s_ib_tx_sg_mapping_failure;
205         uint64_t        s_ib_tx_stalled;
206         uint64_t        s_ib_tx_credit_updates;
207         uint64_t        s_ib_rx_cq_call;
208         uint64_t        s_ib_rx_cq_event;
209         uint64_t        s_ib_rx_ring_empty;
210         uint64_t        s_ib_rx_refill_from_cq;
211         uint64_t        s_ib_rx_refill_from_thread;
212         uint64_t        s_ib_rx_alloc_limit;
213         uint64_t        s_ib_rx_credit_updates;
214         uint64_t        s_ib_ack_sent;
215         uint64_t        s_ib_ack_send_failure;
216         uint64_t        s_ib_ack_send_delayed;
217         uint64_t        s_ib_ack_send_piggybacked;
218         uint64_t        s_ib_ack_received;
219         uint64_t        s_ib_rdma_mr_alloc;
220         uint64_t        s_ib_rdma_mr_free;
221         uint64_t        s_ib_rdma_mr_used;
222         uint64_t        s_ib_rdma_mr_pool_flush;
223         uint64_t        s_ib_rdma_mr_pool_wait;
224         uint64_t        s_ib_rdma_mr_pool_depleted;
225         uint64_t        s_ib_atomic_cswp;
226         uint64_t        s_ib_atomic_fadd;
227 };
228
229 extern struct workqueue_struct *rds_ib_wq;
230
231 /*
232  * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
233  * doesn't define it.
234  */
235 static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev,
236                 struct scatterlist *sg, unsigned int sg_dma_len, int direction)
237 {
238         unsigned int i;
239
240         for (i = 0; i < sg_dma_len; ++i) {
241                 ib_dma_sync_single_for_cpu(dev,
242                                 ib_sg_dma_address(dev, &sg[i]),
243                                 ib_sg_dma_len(dev, &sg[i]),
244                                 direction);
245         }
246 }
247 #define ib_dma_sync_sg_for_cpu  rds_ib_dma_sync_sg_for_cpu
248
249 static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
250                 struct scatterlist *sg, unsigned int sg_dma_len, int direction)
251 {
252         unsigned int i;
253
254         for (i = 0; i < sg_dma_len; ++i) {
255                 ib_dma_sync_single_for_device(dev,
256                                 ib_sg_dma_address(dev, &sg[i]),
257                                 ib_sg_dma_len(dev, &sg[i]),
258                                 direction);
259         }
260 }
261 #define ib_dma_sync_sg_for_device       rds_ib_dma_sync_sg_for_device
262
263
264 /* ib.c */
265 extern struct rds_transport rds_ib_transport;
266 extern void rds_ib_add_one(struct ib_device *device);
267 extern void rds_ib_remove_one(struct ib_device *device);
268 struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device);
269 void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
270 extern struct ib_client rds_ib_client;
271
272 extern unsigned int fmr_pool_size;
273 extern unsigned int fmr_message_size;
274 extern unsigned int rds_ib_retry_count;
275
276 extern spinlock_t ib_nodev_conns_lock;
277 extern struct list_head ib_nodev_conns;
278
279 /* ib_cm.c */
280 int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp);
281 void rds_ib_conn_free(void *arg);
282 int rds_ib_conn_connect(struct rds_connection *conn);
283 void rds_ib_conn_shutdown(struct rds_connection *conn);
284 void rds_ib_state_change(struct sock *sk);
285 int __init rds_ib_listen_init(void);
286 void rds_ib_listen_stop(void);
287 void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
288 int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
289                              struct rdma_cm_event *event);
290 int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id);
291 void rds_ib_cm_connect_complete(struct rds_connection *conn,
292                                 struct rdma_cm_event *event);
293
294
295 #define rds_ib_conn_error(conn, fmt...) \
296         __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
297
298 /* ib_rdma.c */
299 int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr);
300 void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
301 void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
302 void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock);
303 static inline void rds_ib_destroy_nodev_conns(void)
304 {
305         __rds_ib_destroy_conns(&ib_nodev_conns, &ib_nodev_conns_lock);
306 }
307 static inline void rds_ib_destroy_conns(struct rds_ib_device *rds_ibdev)
308 {
309         __rds_ib_destroy_conns(&rds_ibdev->conn_list, &rds_ibdev->spinlock);
310 }
311 struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *);
312 void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo);
313 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *);
314 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
315                     struct rds_sock *rs, u32 *key_ret);
316 void rds_ib_sync_mr(void *trans_private, int dir);
317 void rds_ib_free_mr(void *trans_private, int invalidate);
318 void rds_ib_flush_mrs(void);
319
320 /* ib_recv.c */
321 int __init rds_ib_recv_init(void);
322 void rds_ib_recv_exit(void);
323 int rds_ib_recv(struct rds_connection *conn);
324 int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
325 void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
326 int rds_ib_recv_refill(struct rds_connection *conn, int prefill);
327 void rds_ib_inc_free(struct rds_incoming *inc);
328 int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
329                              size_t size);
330 void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context);
331 void rds_ib_recv_tasklet_fn(unsigned long data);
332 void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
333 void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
334 void rds_ib_recv_init_ack(struct rds_ib_connection *ic);
335 void rds_ib_attempt_ack(struct rds_ib_connection *ic);
336 void rds_ib_ack_send_complete(struct rds_ib_connection *ic);
337 u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic);
338
339 /* ib_ring.c */
340 void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
341 void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr);
342 u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos);
343 void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
344 void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
345 int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
346 int rds_ib_ring_low(struct rds_ib_work_ring *ring);
347 u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
348 u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
349 extern wait_queue_head_t rds_ib_ring_empty_wait;
350
351 /* ib_send.c */
352 void rds_ib_xmit_complete(struct rds_connection *conn);
353 int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
354                 unsigned int hdr_off, unsigned int sg, unsigned int off);
355 void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context);
356 void rds_ib_send_init_ring(struct rds_ib_connection *ic);
357 void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
358 int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
359 void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
360 void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
361 int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
362                              u32 *adv_credits, int need_posted, int max_posted);
363 int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
364
365 /* ib_stats.c */
366 DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
367 #define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
368 unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
369                                     unsigned int avail);
370
371 /* ib_sysctl.c */
372 int __init rds_ib_sysctl_init(void);
373 void rds_ib_sysctl_exit(void);
374 extern unsigned long rds_ib_sysctl_max_send_wr;
375 extern unsigned long rds_ib_sysctl_max_recv_wr;
376 extern unsigned long rds_ib_sysctl_max_unsig_wrs;
377 extern unsigned long rds_ib_sysctl_max_unsig_bytes;
378 extern unsigned long rds_ib_sysctl_max_recv_allocation;
379 extern unsigned int rds_ib_sysctl_flow_control;
380 extern ctl_table rds_ib_sysctl_table[];
381
382 #endif