IB/ipath: Update copyright dates
[linux-3.10.git] / drivers / infiniband / hw / ipath / ipath_cq.c
1 /*
2  * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/err.h>
35 #include <linux/vmalloc.h>
36
37 #include "ipath_verbs.h"
38
39 /**
40  * ipath_cq_enter - add a new entry to the completion queue
41  * @cq: completion queue
42  * @entry: work completion entry to add
43  * @sig: true if @entry is a solicitated entry
44  *
45  * This may be called with qp->s_lock held.
46  */
47 void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
48 {
49         struct ipath_cq_wc *wc;
50         unsigned long flags;
51         u32 head;
52         u32 next;
53
54         spin_lock_irqsave(&cq->lock, flags);
55
56         /*
57          * Note that the head pointer might be writable by user processes.
58          * Take care to verify it is a sane value.
59          */
60         wc = cq->queue;
61         head = wc->head;
62         if (head >= (unsigned) cq->ibcq.cqe) {
63                 head = cq->ibcq.cqe;
64                 next = 0;
65         } else
66                 next = head + 1;
67         if (unlikely(next == wc->tail)) {
68                 spin_unlock_irqrestore(&cq->lock, flags);
69                 if (cq->ibcq.event_handler) {
70                         struct ib_event ev;
71
72                         ev.device = cq->ibcq.device;
73                         ev.element.cq = &cq->ibcq;
74                         ev.event = IB_EVENT_CQ_ERR;
75                         cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
76                 }
77                 return;
78         }
79         wc->queue[head].wr_id = entry->wr_id;
80         wc->queue[head].status = entry->status;
81         wc->queue[head].opcode = entry->opcode;
82         wc->queue[head].vendor_err = entry->vendor_err;
83         wc->queue[head].byte_len = entry->byte_len;
84         wc->queue[head].imm_data = (__u32 __force)entry->imm_data;
85         wc->queue[head].qp_num = entry->qp->qp_num;
86         wc->queue[head].src_qp = entry->src_qp;
87         wc->queue[head].wc_flags = entry->wc_flags;
88         wc->queue[head].pkey_index = entry->pkey_index;
89         wc->queue[head].slid = entry->slid;
90         wc->queue[head].sl = entry->sl;
91         wc->queue[head].dlid_path_bits = entry->dlid_path_bits;
92         wc->queue[head].port_num = entry->port_num;
93         wc->head = next;
94
95         if (cq->notify == IB_CQ_NEXT_COMP ||
96             (cq->notify == IB_CQ_SOLICITED && solicited)) {
97                 cq->notify = IB_CQ_NONE;
98                 cq->triggered++;
99                 /*
100                  * This will cause send_complete() to be called in
101                  * another thread.
102                  */
103                 tasklet_hi_schedule(&cq->comptask);
104         }
105
106         spin_unlock_irqrestore(&cq->lock, flags);
107
108         if (entry->status != IB_WC_SUCCESS)
109                 to_idev(cq->ibcq.device)->n_wqe_errs++;
110 }
111
112 /**
113  * ipath_poll_cq - poll for work completion entries
114  * @ibcq: the completion queue to poll
115  * @num_entries: the maximum number of entries to return
116  * @entry: pointer to array where work completions are placed
117  *
118  * Returns the number of completion entries polled.
119  *
120  * This may be called from interrupt context.  Also called by ib_poll_cq()
121  * in the generic verbs code.
122  */
123 int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
124 {
125         struct ipath_cq *cq = to_icq(ibcq);
126         struct ipath_cq_wc *wc;
127         unsigned long flags;
128         int npolled;
129         u32 tail;
130
131         spin_lock_irqsave(&cq->lock, flags);
132
133         wc = cq->queue;
134         tail = wc->tail;
135         if (tail > (u32) cq->ibcq.cqe)
136                 tail = (u32) cq->ibcq.cqe;
137         for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
138                 struct ipath_qp *qp;
139
140                 if (tail == wc->head)
141                         break;
142
143                 qp = ipath_lookup_qpn(&to_idev(cq->ibcq.device)->qp_table,
144                                       wc->queue[tail].qp_num);
145                 entry->qp = &qp->ibqp;
146                 if (atomic_dec_and_test(&qp->refcount))
147                         wake_up(&qp->wait);
148
149                 entry->wr_id = wc->queue[tail].wr_id;
150                 entry->status = wc->queue[tail].status;
151                 entry->opcode = wc->queue[tail].opcode;
152                 entry->vendor_err = wc->queue[tail].vendor_err;
153                 entry->byte_len = wc->queue[tail].byte_len;
154                 entry->imm_data = wc->queue[tail].imm_data;
155                 entry->src_qp = wc->queue[tail].src_qp;
156                 entry->wc_flags = wc->queue[tail].wc_flags;
157                 entry->pkey_index = wc->queue[tail].pkey_index;
158                 entry->slid = wc->queue[tail].slid;
159                 entry->sl = wc->queue[tail].sl;
160                 entry->dlid_path_bits = wc->queue[tail].dlid_path_bits;
161                 entry->port_num = wc->queue[tail].port_num;
162                 if (tail >= cq->ibcq.cqe)
163                         tail = 0;
164                 else
165                         tail++;
166         }
167         wc->tail = tail;
168
169         spin_unlock_irqrestore(&cq->lock, flags);
170
171         return npolled;
172 }
173
174 static void send_complete(unsigned long data)
175 {
176         struct ipath_cq *cq = (struct ipath_cq *)data;
177
178         /*
179          * The completion handler will most likely rearm the notification
180          * and poll for all pending entries.  If a new completion entry
181          * is added while we are in this routine, tasklet_hi_schedule()
182          * won't call us again until we return so we check triggered to
183          * see if we need to call the handler again.
184          */
185         for (;;) {
186                 u8 triggered = cq->triggered;
187
188                 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
189
190                 if (cq->triggered == triggered)
191                         return;
192         }
193 }
194
195 /**
196  * ipath_create_cq - create a completion queue
197  * @ibdev: the device this completion queue is attached to
198  * @entries: the minimum size of the completion queue
199  * @context: unused by the InfiniPath driver
200  * @udata: unused by the InfiniPath driver
201  *
202  * Returns a pointer to the completion queue or negative errno values
203  * for failure.
204  *
205  * Called by ib_create_cq() in the generic verbs code.
206  */
207 struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,
208                               struct ib_ucontext *context,
209                               struct ib_udata *udata)
210 {
211         struct ipath_ibdev *dev = to_idev(ibdev);
212         struct ipath_cq *cq;
213         struct ipath_cq_wc *wc;
214         struct ib_cq *ret;
215
216         if (entries < 1 || entries > ib_ipath_max_cqes) {
217                 ret = ERR_PTR(-EINVAL);
218                 goto done;
219         }
220
221         /* Allocate the completion queue structure. */
222         cq = kmalloc(sizeof(*cq), GFP_KERNEL);
223         if (!cq) {
224                 ret = ERR_PTR(-ENOMEM);
225                 goto done;
226         }
227
228         /*
229          * Allocate the completion queue entries and head/tail pointers.
230          * This is allocated separately so that it can be resized and
231          * also mapped into user space.
232          * We need to use vmalloc() in order to support mmap and large
233          * numbers of entries.
234          */
235         wc = vmalloc_user(sizeof(*wc) + sizeof(struct ib_wc) * entries);
236         if (!wc) {
237                 ret = ERR_PTR(-ENOMEM);
238                 goto bail_cq;
239         }
240
241         /*
242          * Return the address of the WC as the offset to mmap.
243          * See ipath_mmap() for details.
244          */
245         if (udata && udata->outlen >= sizeof(__u64)) {
246                 int err;
247                 u32 s = sizeof *wc + sizeof(struct ib_wc) * entries;
248
249                 cq->ip = ipath_create_mmap_info(dev, s, context, wc);
250                 if (!cq->ip) {
251                         ret = ERR_PTR(-ENOMEM);
252                         goto bail_wc;
253                 }
254
255                 err = ib_copy_to_udata(udata, &cq->ip->offset,
256                                        sizeof(cq->ip->offset));
257                 if (err) {
258                         ret = ERR_PTR(err);
259                         goto bail_ip;
260                 }
261         } else
262                 cq->ip = NULL;
263
264         spin_lock(&dev->n_cqs_lock);
265         if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
266                 spin_unlock(&dev->n_cqs_lock);
267                 ret = ERR_PTR(-ENOMEM);
268                 goto bail_ip;
269         }
270
271         dev->n_cqs_allocated++;
272         spin_unlock(&dev->n_cqs_lock);
273
274         if (cq->ip) {
275                 spin_lock_irq(&dev->pending_lock);
276                 list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
277                 spin_unlock_irq(&dev->pending_lock);
278         }
279
280         /*
281          * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
282          * The number of entries should be >= the number requested or return
283          * an error.
284          */
285         cq->ibcq.cqe = entries;
286         cq->notify = IB_CQ_NONE;
287         cq->triggered = 0;
288         spin_lock_init(&cq->lock);
289         tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
290         wc->head = 0;
291         wc->tail = 0;
292         cq->queue = wc;
293
294         ret = &cq->ibcq;
295
296         goto done;
297
298 bail_ip:
299         kfree(cq->ip);
300 bail_wc:
301         vfree(wc);
302 bail_cq:
303         kfree(cq);
304 done:
305         return ret;
306 }
307
308 /**
309  * ipath_destroy_cq - destroy a completion queue
310  * @ibcq: the completion queue to destroy.
311  *
312  * Returns 0 for success.
313  *
314  * Called by ib_destroy_cq() in the generic verbs code.
315  */
316 int ipath_destroy_cq(struct ib_cq *ibcq)
317 {
318         struct ipath_ibdev *dev = to_idev(ibcq->device);
319         struct ipath_cq *cq = to_icq(ibcq);
320
321         tasklet_kill(&cq->comptask);
322         spin_lock(&dev->n_cqs_lock);
323         dev->n_cqs_allocated--;
324         spin_unlock(&dev->n_cqs_lock);
325         if (cq->ip)
326                 kref_put(&cq->ip->ref, ipath_release_mmap_info);
327         else
328                 vfree(cq->queue);
329         kfree(cq);
330
331         return 0;
332 }
333
334 /**
335  * ipath_req_notify_cq - change the notification type for a completion queue
336  * @ibcq: the completion queue
337  * @notify_flags: the type of notification to request
338  *
339  * Returns 0 for success.
340  *
341  * This may be called from interrupt context.  Also called by
342  * ib_req_notify_cq() in the generic verbs code.
343  */
344 int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
345 {
346         struct ipath_cq *cq = to_icq(ibcq);
347         unsigned long flags;
348         int ret = 0;
349
350         spin_lock_irqsave(&cq->lock, flags);
351         /*
352          * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
353          * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
354          */
355         if (cq->notify != IB_CQ_NEXT_COMP)
356                 cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
357
358         if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
359             cq->queue->head != cq->queue->tail)
360                 ret = 1;
361
362         spin_unlock_irqrestore(&cq->lock, flags);
363
364         return ret;
365 }
366
367 /**
368  * ipath_resize_cq - change the size of the CQ
369  * @ibcq: the completion queue
370  *
371  * Returns 0 for success.
372  */
373 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
374 {
375         struct ipath_cq *cq = to_icq(ibcq);
376         struct ipath_cq_wc *old_wc;
377         struct ipath_cq_wc *wc;
378         u32 head, tail, n;
379         int ret;
380
381         if (cqe < 1 || cqe > ib_ipath_max_cqes) {
382                 ret = -EINVAL;
383                 goto bail;
384         }
385
386         /*
387          * Need to use vmalloc() if we want to support large #s of entries.
388          */
389         wc = vmalloc_user(sizeof(*wc) + sizeof(struct ib_wc) * cqe);
390         if (!wc) {
391                 ret = -ENOMEM;
392                 goto bail;
393         }
394
395         /*
396          * Return the address of the WC as the offset to mmap.
397          * See ipath_mmap() for details.
398          */
399         if (udata && udata->outlen >= sizeof(__u64)) {
400                 __u64 offset = (__u64) wc;
401
402                 ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
403                 if (ret)
404                         goto bail;
405         }
406
407         spin_lock_irq(&cq->lock);
408         /*
409          * Make sure head and tail are sane since they
410          * might be user writable.
411          */
412         old_wc = cq->queue;
413         head = old_wc->head;
414         if (head > (u32) cq->ibcq.cqe)
415                 head = (u32) cq->ibcq.cqe;
416         tail = old_wc->tail;
417         if (tail > (u32) cq->ibcq.cqe)
418                 tail = (u32) cq->ibcq.cqe;
419         if (head < tail)
420                 n = cq->ibcq.cqe + 1 + head - tail;
421         else
422                 n = head - tail;
423         if (unlikely((u32)cqe < n)) {
424                 spin_unlock_irq(&cq->lock);
425                 vfree(wc);
426                 ret = -EOVERFLOW;
427                 goto bail;
428         }
429         for (n = 0; tail != head; n++) {
430                 wc->queue[n] = old_wc->queue[tail];
431                 if (tail == (u32) cq->ibcq.cqe)
432                         tail = 0;
433                 else
434                         tail++;
435         }
436         cq->ibcq.cqe = cqe;
437         wc->head = n;
438         wc->tail = 0;
439         cq->queue = wc;
440         spin_unlock_irq(&cq->lock);
441
442         vfree(old_wc);
443
444         if (cq->ip) {
445                 struct ipath_ibdev *dev = to_idev(ibcq->device);
446                 struct ipath_mmap_info *ip = cq->ip;
447                 u32 s = sizeof *wc + sizeof(struct ib_wc) * cqe;
448
449                 ipath_update_mmap_info(dev, ip, s, wc);
450                 spin_lock_irq(&dev->pending_lock);
451                 if (list_empty(&ip->pending_mmaps))
452                         list_add(&ip->pending_mmaps, &dev->pending_mmaps);
453                 spin_unlock_irq(&dev->pending_lock);
454         }
455
456         ret = 0;
457
458 bail:
459         return ret;
460 }