RDS: Whitespace
[linux-2.6.git] / net / rds / send.c
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/gfp.h>
35 #include <net/sock.h>
36 #include <linux/in.h>
37 #include <linux/list.h>
38
39 #include "rds.h"
40
41 /* When transmitting messages in rds_send_xmit, we need to emerge from
42  * time to time and briefly release the CPU. Otherwise the softlock watchdog
43  * will kick our shin.
44  * Also, it seems fairer to not let one busy connection stall all the
45  * others.
46  *
47  * send_batch_count is the number of times we'll loop in send_xmit. Setting
48  * it to 0 will restore the old behavior (where we looped until we had
49  * drained the queue).
50  */
51 static int send_batch_count = 64;
52 module_param(send_batch_count, int, 0444);
53 MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
54
55 /*
56  * Reset the send state. Caller must hold c_send_lock when calling here.
57  */
58 void rds_send_reset(struct rds_connection *conn)
59 {
60         struct rds_message *rm, *tmp;
61         unsigned long flags;
62
63         if (conn->c_xmit_rm) {
64                 /* Tell the user the RDMA op is no longer mapped by the
65                  * transport. This isn't entirely true (it's flushed out
66                  * independently) but as the connection is down, there's
67                  * no ongoing RDMA to/from that memory */
68                 rds_message_unmapped(conn->c_xmit_rm);
69                 rds_message_put(conn->c_xmit_rm);
70                 conn->c_xmit_rm = NULL;
71         }
72         conn->c_xmit_sg = 0;
73         conn->c_xmit_hdr_off = 0;
74         conn->c_xmit_data_off = 0;
75         conn->c_xmit_rdma_sent = 0;
76         conn->c_xmit_atomic_sent = 0;
77
78         conn->c_map_queued = 0;
79
80         conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
81         conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
82
83         /* Mark messages as retransmissions, and move them to the send q */
84         spin_lock_irqsave(&conn->c_lock, flags);
85         list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
86                 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
87                 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
88         }
89         list_splice_init(&conn->c_retrans, &conn->c_send_queue);
90         spin_unlock_irqrestore(&conn->c_lock, flags);
91 }
92
93 /*
94  * We're making the concious trade-off here to only send one message
95  * down the connection at a time.
96  *   Pro:
97  *      - tx queueing is a simple fifo list
98  *      - reassembly is optional and easily done by transports per conn
99  *      - no per flow rx lookup at all, straight to the socket
100  *      - less per-frag memory and wire overhead
101  *   Con:
102  *      - queued acks can be delayed behind large messages
103  *   Depends:
104  *      - small message latency is higher behind queued large messages
105  *      - large message latency isn't starved by intervening small sends
106  */
107 int rds_send_xmit(struct rds_connection *conn)
108 {
109         struct rds_message *rm;
110         unsigned long flags;
111         unsigned int tmp;
112         unsigned int send_quota = send_batch_count;
113         struct scatterlist *sg;
114         int ret = 0;
115         int was_empty = 0;
116         LIST_HEAD(to_be_dropped);
117
118         /*
119          * sendmsg calls here after having queued its message on the send
120          * queue.  We only have one task feeding the connection at a time.  If
121          * another thread is already feeding the queue then we back off.  This
122          * avoids blocking the caller and trading per-connection data between
123          * caches per message.
124          *
125          * The sem holder will issue a retry if they notice that someone queued
126          * a message after they stopped walking the send queue but before they
127          * dropped the sem.
128          */
129         if (!mutex_trylock(&conn->c_send_lock)) {
130                 rds_stats_inc(s_send_sem_contention);
131                 ret = -ENOMEM;
132                 goto out;
133         }
134
135         if (conn->c_trans->xmit_prepare)
136                 conn->c_trans->xmit_prepare(conn);
137
138         /*
139          * spin trying to push headers and data down the connection until
140          * the connection doens't make forward progress.
141          */
142         while (--send_quota) {
143                 /*
144                  * See if need to send a congestion map update if we're
145                  * between sending messages.  The send_sem protects our sole
146                  * use of c_map_offset and _bytes.
147                  * Note this is used only by transports that define a special
148                  * xmit_cong_map function. For all others, we create allocate
149                  * a cong_map message and treat it just like any other send.
150                  */
151                 if (conn->c_map_bytes) {
152                         ret = conn->c_trans->xmit_cong_map(conn, conn->c_lcong,
153                                                            conn->c_map_offset);
154                         if (ret <= 0)
155                                 break;
156
157                         conn->c_map_offset += ret;
158                         conn->c_map_bytes -= ret;
159                         if (conn->c_map_bytes)
160                                 continue;
161                 }
162
163                 /* If we're done sending the current message, clear the
164                  * offset and S/G temporaries.
165                  */
166                 rm = conn->c_xmit_rm;
167                 if (rm &&
168                     conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
169                     conn->c_xmit_sg == rm->data.m_nents) {
170                         conn->c_xmit_rm = NULL;
171                         conn->c_xmit_sg = 0;
172                         conn->c_xmit_hdr_off = 0;
173                         conn->c_xmit_data_off = 0;
174                         conn->c_xmit_rdma_sent = 0;
175                         conn->c_xmit_atomic_sent = 0;
176
177                         /* Release the reference to the previous message. */
178                         rds_message_put(rm);
179                         rm = NULL;
180                 }
181
182                 /* If we're asked to send a cong map update, do so.
183                  */
184                 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
185                         if (conn->c_trans->xmit_cong_map) {
186                                 conn->c_map_offset = 0;
187                                 conn->c_map_bytes = sizeof(struct rds_header) +
188                                         RDS_CONG_MAP_BYTES;
189                                 continue;
190                         }
191
192                         rm = rds_cong_update_alloc(conn);
193                         if (IS_ERR(rm)) {
194                                 ret = PTR_ERR(rm);
195                                 break;
196                         }
197
198                         conn->c_xmit_rm = rm;
199                 }
200
201                 /*
202                  * Grab the next message from the send queue, if there is one.
203                  *
204                  * c_xmit_rm holds a ref while we're sending this message down
205                  * the connction.  We can use this ref while holding the
206                  * send_sem.. rds_send_reset() is serialized with it.
207                  */
208                 if (!rm) {
209                         unsigned int len;
210
211                         spin_lock_irqsave(&conn->c_lock, flags);
212
213                         if (!list_empty(&conn->c_send_queue)) {
214                                 rm = list_entry(conn->c_send_queue.next,
215                                                 struct rds_message,
216                                                 m_conn_item);
217                                 rds_message_addref(rm);
218
219                                 /*
220                                  * Move the message from the send queue to the retransmit
221                                  * list right away.
222                                  */
223                                 list_move_tail(&rm->m_conn_item, &conn->c_retrans);
224                         }
225
226                         spin_unlock_irqrestore(&conn->c_lock, flags);
227
228                         if (!rm) {
229                                 was_empty = 1;
230                                 break;
231                         }
232
233                         /* Unfortunately, the way Infiniband deals with
234                          * RDMA to a bad MR key is by moving the entire
235                          * queue pair to error state. We cold possibly
236                          * recover from that, but right now we drop the
237                          * connection.
238                          * Therefore, we never retransmit messages with RDMA ops.
239                          */
240                         if (rm->rdma.m_rdma_op.r_active &&
241                             test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
242                                 spin_lock_irqsave(&conn->c_lock, flags);
243                                 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
244                                         list_move(&rm->m_conn_item, &to_be_dropped);
245                                 spin_unlock_irqrestore(&conn->c_lock, flags);
246                                 rds_message_put(rm);
247                                 continue;
248                         }
249
250                         /* Require an ACK every once in a while */
251                         len = ntohl(rm->m_inc.i_hdr.h_len);
252                         if (conn->c_unacked_packets == 0 ||
253                             conn->c_unacked_bytes < len) {
254                                 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
255
256                                 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
257                                 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
258                                 rds_stats_inc(s_send_ack_required);
259                         } else {
260                                 conn->c_unacked_bytes -= len;
261                                 conn->c_unacked_packets--;
262                         }
263
264                         conn->c_xmit_rm = rm;
265                 }
266
267
268                 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
269                         ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
270                         if (ret)
271                                 break;
272                         conn->c_xmit_atomic_sent = 1;
273                         /* The transport owns the mapped memory for now.
274                          * You can't unmap it while it's on the send queue */
275                         set_bit(RDS_MSG_MAPPED, &rm->m_flags);
276                 }
277
278                 /*
279                  * Try and send an rdma message.  Let's see if we can
280                  * keep this simple and require that the transport either
281                  * send the whole rdma or none of it.
282                  */
283                 if (rm->rdma.m_rdma_op.r_active && !conn->c_xmit_rdma_sent) {
284                         ret = conn->c_trans->xmit_rdma(conn, &rm->rdma.m_rdma_op);
285                         if (ret)
286                                 break;
287                         conn->c_xmit_rdma_sent = 1;
288                         /* The transport owns the mapped memory for now.
289                          * You can't unmap it while it's on the send queue */
290                         set_bit(RDS_MSG_MAPPED, &rm->m_flags);
291                 }
292
293                 if (conn->c_xmit_hdr_off < sizeof(struct rds_header) ||
294                     conn->c_xmit_sg < rm->data.m_nents) {
295                         ret = conn->c_trans->xmit(conn, rm,
296                                                   conn->c_xmit_hdr_off,
297                                                   conn->c_xmit_sg,
298                                                   conn->c_xmit_data_off);
299                         if (ret <= 0)
300                                 break;
301
302                         if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
303                                 tmp = min_t(int, ret,
304                                             sizeof(struct rds_header) -
305                                             conn->c_xmit_hdr_off);
306                                 conn->c_xmit_hdr_off += tmp;
307                                 ret -= tmp;
308                         }
309
310                         sg = &rm->data.m_sg[conn->c_xmit_sg];
311                         while (ret) {
312                                 tmp = min_t(int, ret, sg->length -
313                                                       conn->c_xmit_data_off);
314                                 conn->c_xmit_data_off += tmp;
315                                 ret -= tmp;
316                                 if (conn->c_xmit_data_off == sg->length) {
317                                         conn->c_xmit_data_off = 0;
318                                         sg++;
319                                         conn->c_xmit_sg++;
320                                         BUG_ON(ret != 0 &&
321                                                conn->c_xmit_sg == rm->data.m_nents);
322                                 }
323                         }
324                 }
325         }
326
327         /* Nuke any messages we decided not to retransmit. */
328         if (!list_empty(&to_be_dropped))
329                 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
330
331         if (conn->c_trans->xmit_complete)
332                 conn->c_trans->xmit_complete(conn);
333
334         /*
335          * We might be racing with another sender who queued a message but
336          * backed off on noticing that we held the c_send_lock.  If we check
337          * for queued messages after dropping the sem then either we'll
338          * see the queued message or the queuer will get the sem.  If we
339          * notice the queued message then we trigger an immediate retry.
340          *
341          * We need to be careful only to do this when we stopped processing
342          * the send queue because it was empty.  It's the only way we
343          * stop processing the loop when the transport hasn't taken
344          * responsibility for forward progress.
345          */
346         mutex_unlock(&conn->c_send_lock);
347
348         if (conn->c_map_bytes || (send_quota == 0 && !was_empty)) {
349                 /* We exhausted the send quota, but there's work left to
350                  * do. Return and (re-)schedule the send worker.
351                  */
352                 ret = -EAGAIN;
353         }
354
355         if (ret == 0 && was_empty) {
356                 /* A simple bit test would be way faster than taking the
357                  * spin lock */
358                 spin_lock_irqsave(&conn->c_lock, flags);
359                 if (!list_empty(&conn->c_send_queue)) {
360                         rds_stats_inc(s_send_sem_queue_raced);
361                         ret = -EAGAIN;
362                 }
363                 spin_unlock_irqrestore(&conn->c_lock, flags);
364         }
365 out:
366         return ret;
367 }
368
369 static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
370 {
371         u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
372
373         assert_spin_locked(&rs->rs_lock);
374
375         BUG_ON(rs->rs_snd_bytes < len);
376         rs->rs_snd_bytes -= len;
377
378         if (rs->rs_snd_bytes == 0)
379                 rds_stats_inc(s_send_queue_empty);
380 }
381
382 static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
383                                     is_acked_func is_acked)
384 {
385         if (is_acked)
386                 return is_acked(rm, ack);
387         return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
388 }
389
390 /*
391  * Returns true if there are no messages on the send and retransmit queues
392  * which have a sequence number greater than or equal to the given sequence
393  * number.
394  */
395 int rds_send_acked_before(struct rds_connection *conn, u64 seq)
396 {
397         struct rds_message *rm, *tmp;
398         int ret = 1;
399
400         spin_lock(&conn->c_lock);
401
402         list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
403                 if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
404                         ret = 0;
405                 break;
406         }
407
408         list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
409                 if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
410                         ret = 0;
411                 break;
412         }
413
414         spin_unlock(&conn->c_lock);
415
416         return ret;
417 }
418
419 /*
420  * This is pretty similar to what happens below in the ACK
421  * handling code - except that we call here as soon as we get
422  * the IB send completion on the RDMA op and the accompanying
423  * message.
424  */
425 void rds_rdma_send_complete(struct rds_message *rm, int status)
426 {
427         struct rds_sock *rs = NULL;
428         struct rds_rdma_op *ro;
429         struct rds_notifier *notifier;
430         unsigned long flags;
431
432         spin_lock_irqsave(&rm->m_rs_lock, flags);
433
434         ro = &rm->rdma.m_rdma_op;
435         if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
436             ro->r_active && ro->r_notify && ro->r_notifier) {
437                 notifier = ro->r_notifier;
438                 rs = rm->m_rs;
439                 sock_hold(rds_rs_to_sk(rs));
440
441                 notifier->n_status = status;
442                 spin_lock(&rs->rs_lock);
443                 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
444                 spin_unlock(&rs->rs_lock);
445
446                 ro->r_notifier = NULL;
447         }
448
449         spin_unlock_irqrestore(&rm->m_rs_lock, flags);
450
451         if (rs) {
452                 rds_wake_sk_sleep(rs);
453                 sock_put(rds_rs_to_sk(rs));
454         }
455 }
456 EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
457
458 /*
459  * Just like above, except looks at atomic op
460  */
461 void rds_atomic_send_complete(struct rds_message *rm, int status)
462 {
463         struct rds_sock *rs = NULL;
464         struct rm_atomic_op *ao;
465         struct rds_notifier *notifier;
466
467         spin_lock(&rm->m_rs_lock);
468
469         ao = &rm->atomic;
470         if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
471             && ao->op_active && ao->op_notify && ao->op_notifier) {
472                 notifier = ao->op_notifier;
473                 rs = rm->m_rs;
474                 sock_hold(rds_rs_to_sk(rs));
475
476                 notifier->n_status = status;
477                 spin_lock(&rs->rs_lock);
478                 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
479                 spin_unlock(&rs->rs_lock);
480
481                 ao->op_notifier = NULL;
482         }
483
484         spin_unlock(&rm->m_rs_lock);
485
486         if (rs) {
487                 rds_wake_sk_sleep(rs);
488                 sock_put(rds_rs_to_sk(rs));
489         }
490 }
491 EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
492
493 /*
494  * This is the same as rds_rdma_send_complete except we
495  * don't do any locking - we have all the ingredients (message,
496  * socket, socket lock) and can just move the notifier.
497  */
498 static inline void
499 __rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
500 {
501         struct rds_rdma_op *ro;
502
503         ro = &rm->rdma.m_rdma_op;
504         if (ro->r_active && ro->r_notify && ro->r_notifier) {
505                 ro->r_notifier->n_status = status;
506                 list_add_tail(&ro->r_notifier->n_list, &rs->rs_notify_queue);
507                 ro->r_notifier = NULL;
508         }
509
510         /* No need to wake the app - caller does this */
511 }
512
513 /*
514  * This is called from the IB send completion when we detect
515  * a RDMA operation that failed with remote access error.
516  * So speed is not an issue here.
517  */
518 struct rds_message *rds_send_get_message(struct rds_connection *conn,
519                                          struct rds_rdma_op *op)
520 {
521         struct rds_message *rm, *tmp, *found = NULL;
522         unsigned long flags;
523
524         spin_lock_irqsave(&conn->c_lock, flags);
525
526         list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
527                 if (&rm->rdma.m_rdma_op == op) {
528                         atomic_inc(&rm->m_refcount);
529                         found = rm;
530                         goto out;
531                 }
532         }
533
534         list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
535                 if (&rm->rdma.m_rdma_op == op) {
536                         atomic_inc(&rm->m_refcount);
537                         found = rm;
538                         break;
539                 }
540         }
541
542 out:
543         spin_unlock_irqrestore(&conn->c_lock, flags);
544
545         return found;
546 }
547 EXPORT_SYMBOL_GPL(rds_send_get_message);
548
549 /*
550  * This removes messages from the socket's list if they're on it.  The list
551  * argument must be private to the caller, we must be able to modify it
552  * without locks.  The messages must have a reference held for their
553  * position on the list.  This function will drop that reference after
554  * removing the messages from the 'messages' list regardless of if it found
555  * the messages on the socket list or not.
556  */
557 void rds_send_remove_from_sock(struct list_head *messages, int status)
558 {
559         unsigned long flags;
560         struct rds_sock *rs = NULL;
561         struct rds_message *rm;
562
563         while (!list_empty(messages)) {
564                 int was_on_sock = 0;
565
566                 rm = list_entry(messages->next, struct rds_message,
567                                 m_conn_item);
568                 list_del_init(&rm->m_conn_item);
569
570                 /*
571                  * If we see this flag cleared then we're *sure* that someone
572                  * else beat us to removing it from the sock.  If we race
573                  * with their flag update we'll get the lock and then really
574                  * see that the flag has been cleared.
575                  *
576                  * The message spinlock makes sure nobody clears rm->m_rs
577                  * while we're messing with it. It does not prevent the
578                  * message from being removed from the socket, though.
579                  */
580                 spin_lock_irqsave(&rm->m_rs_lock, flags);
581                 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
582                         goto unlock_and_drop;
583
584                 if (rs != rm->m_rs) {
585                         if (rs) {
586                                 rds_wake_sk_sleep(rs);
587                                 sock_put(rds_rs_to_sk(rs));
588                         }
589                         rs = rm->m_rs;
590                         sock_hold(rds_rs_to_sk(rs));
591                 }
592                 spin_lock(&rs->rs_lock);
593
594                 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
595                         struct rds_rdma_op *ro = &rm->rdma.m_rdma_op;
596                         struct rds_notifier *notifier;
597
598                         list_del_init(&rm->m_sock_item);
599                         rds_send_sndbuf_remove(rs, rm);
600
601                         if (ro->r_active && ro->r_notifier &&
602                             (ro->r_notify || (ro->r_recverr && status))) {
603                                 notifier = ro->r_notifier;
604                                 list_add_tail(&notifier->n_list,
605                                                 &rs->rs_notify_queue);
606                                 if (!notifier->n_status)
607                                         notifier->n_status = status;
608                                 rm->rdma.m_rdma_op.r_notifier = NULL;
609                         }
610                         was_on_sock = 1;
611                         rm->m_rs = NULL;
612                 }
613                 spin_unlock(&rs->rs_lock);
614
615 unlock_and_drop:
616                 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
617                 rds_message_put(rm);
618                 if (was_on_sock)
619                         rds_message_put(rm);
620         }
621
622         if (rs) {
623                 rds_wake_sk_sleep(rs);
624                 sock_put(rds_rs_to_sk(rs));
625         }
626 }
627
628 /*
629  * Transports call here when they've determined that the receiver queued
630  * messages up to, and including, the given sequence number.  Messages are
631  * moved to the retrans queue when rds_send_xmit picks them off the send
632  * queue. This means that in the TCP case, the message may not have been
633  * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
634  * checks the RDS_MSG_HAS_ACK_SEQ bit.
635  *
636  * XXX It's not clear to me how this is safely serialized with socket
637  * destruction.  Maybe it should bail if it sees SOCK_DEAD.
638  */
639 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
640                          is_acked_func is_acked)
641 {
642         struct rds_message *rm, *tmp;
643         unsigned long flags;
644         LIST_HEAD(list);
645
646         spin_lock_irqsave(&conn->c_lock, flags);
647
648         list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
649                 if (!rds_send_is_acked(rm, ack, is_acked))
650                         break;
651
652                 list_move(&rm->m_conn_item, &list);
653                 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
654         }
655
656         /* order flag updates with spin locks */
657         if (!list_empty(&list))
658                 smp_mb__after_clear_bit();
659
660         spin_unlock_irqrestore(&conn->c_lock, flags);
661
662         /* now remove the messages from the sock list as needed */
663         rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
664 }
665 EXPORT_SYMBOL_GPL(rds_send_drop_acked);
666
667 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
668 {
669         struct rds_message *rm, *tmp;
670         struct rds_connection *conn;
671         unsigned long flags;
672         LIST_HEAD(list);
673
674         /* get all the messages we're dropping under the rs lock */
675         spin_lock_irqsave(&rs->rs_lock, flags);
676
677         list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
678                 if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
679                              dest->sin_port != rm->m_inc.i_hdr.h_dport))
680                         continue;
681
682                 list_move(&rm->m_sock_item, &list);
683                 rds_send_sndbuf_remove(rs, rm);
684                 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
685         }
686
687         /* order flag updates with the rs lock */
688         smp_mb__after_clear_bit();
689
690         spin_unlock_irqrestore(&rs->rs_lock, flags);
691
692         if (list_empty(&list))
693                 return;
694
695         /* Remove the messages from the conn */
696         list_for_each_entry(rm, &list, m_sock_item) {
697
698                 conn = rm->m_inc.i_conn;
699
700                 spin_lock_irqsave(&conn->c_lock, flags);
701                 /*
702                  * Maybe someone else beat us to removing rm from the conn.
703                  * If we race with their flag update we'll get the lock and
704                  * then really see that the flag has been cleared.
705                  */
706                 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
707                         spin_unlock_irqrestore(&conn->c_lock, flags);
708                         continue;
709                 }
710                 list_del_init(&rm->m_conn_item);
711                 spin_unlock_irqrestore(&conn->c_lock, flags);
712
713                 /*
714                  * Couldn't grab m_rs_lock in top loop (lock ordering),
715                  * but we can now.
716                  */
717                 spin_lock_irqsave(&rm->m_rs_lock, flags);
718
719                 spin_lock(&rs->rs_lock);
720                 __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED);
721                 spin_unlock(&rs->rs_lock);
722
723                 rm->m_rs = NULL;
724                 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
725
726                 rds_message_put(rm);
727         }
728
729         rds_wake_sk_sleep(rs);
730
731         while (!list_empty(&list)) {
732                 rm = list_entry(list.next, struct rds_message, m_sock_item);
733                 list_del_init(&rm->m_sock_item);
734
735                 rds_message_wait(rm);
736                 rds_message_put(rm);
737         }
738 }
739
740 /*
741  * we only want this to fire once so we use the callers 'queued'.  It's
742  * possible that another thread can race with us and remove the
743  * message from the flow with RDS_CANCEL_SENT_TO.
744  */
745 static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
746                              struct rds_message *rm, __be16 sport,
747                              __be16 dport, int *queued)
748 {
749         unsigned long flags;
750         u32 len;
751
752         if (*queued)
753                 goto out;
754
755         len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
756
757         /* this is the only place which holds both the socket's rs_lock
758          * and the connection's c_lock */
759         spin_lock_irqsave(&rs->rs_lock, flags);
760
761         /*
762          * If there is a little space in sndbuf, we don't queue anything,
763          * and userspace gets -EAGAIN. But poll() indicates there's send
764          * room. This can lead to bad behavior (spinning) if snd_bytes isn't
765          * freed up by incoming acks. So we check the *old* value of
766          * rs_snd_bytes here to allow the last msg to exceed the buffer,
767          * and poll() now knows no more data can be sent.
768          */
769         if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
770                 rs->rs_snd_bytes += len;
771
772                 /* let recv side know we are close to send space exhaustion.
773                  * This is probably not the optimal way to do it, as this
774                  * means we set the flag on *all* messages as soon as our
775                  * throughput hits a certain threshold.
776                  */
777                 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
778                         __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
779
780                 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
781                 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
782                 rds_message_addref(rm);
783                 rm->m_rs = rs;
784
785                 /* The code ordering is a little weird, but we're
786                    trying to minimize the time we hold c_lock */
787                 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
788                 rm->m_inc.i_conn = conn;
789                 rds_message_addref(rm);
790
791                 spin_lock(&conn->c_lock);
792                 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
793                 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
794                 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
795                 spin_unlock(&conn->c_lock);
796
797                 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
798                          rm, len, rs, rs->rs_snd_bytes,
799                          (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
800
801                 *queued = 1;
802         }
803
804         spin_unlock_irqrestore(&rs->rs_lock, flags);
805 out:
806         return *queued;
807 }
808
809 /*
810  * rds_message is getting to be quite complicated, and we'd like to allocate
811  * it all in one go. This figures out how big it needs to be up front.
812  */
813 static int rds_rm_size(struct msghdr *msg, int data_len)
814 {
815         struct cmsghdr *cmsg;
816         int size = 0;
817         int retval;
818
819         for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
820                 if (!CMSG_OK(msg, cmsg))
821                         return -EINVAL;
822
823                 if (cmsg->cmsg_level != SOL_RDS)
824                         continue;
825
826                 switch (cmsg->cmsg_type) {
827                 case RDS_CMSG_RDMA_ARGS:
828                         retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
829                         if (retval < 0)
830                                 return retval;
831                         size += retval;
832                         break;
833
834                 case RDS_CMSG_RDMA_DEST:
835                 case RDS_CMSG_RDMA_MAP:
836                         /* these are valid but do no add any size */
837                         break;
838
839                 case RDS_CMSG_ATOMIC_CSWP:
840                 case RDS_CMSG_ATOMIC_FADD:
841                         size += sizeof(struct scatterlist);
842                         break;
843
844                 default:
845                         return -EINVAL;
846                 }
847
848         }
849
850         size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
851
852         return size;
853 }
854
855 static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
856                          struct msghdr *msg, int *allocated_mr)
857 {
858         struct cmsghdr *cmsg;
859         int ret = 0;
860
861         for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
862                 if (!CMSG_OK(msg, cmsg))
863                         return -EINVAL;
864
865                 if (cmsg->cmsg_level != SOL_RDS)
866                         continue;
867
868                 /* As a side effect, RDMA_DEST and RDMA_MAP will set
869                  * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
870                  */
871                 switch (cmsg->cmsg_type) {
872                 case RDS_CMSG_RDMA_ARGS:
873                         ret = rds_cmsg_rdma_args(rs, rm, cmsg);
874                         break;
875
876                 case RDS_CMSG_RDMA_DEST:
877                         ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
878                         break;
879
880                 case RDS_CMSG_RDMA_MAP:
881                         ret = rds_cmsg_rdma_map(rs, rm, cmsg);
882                         if (!ret)
883                                 *allocated_mr = 1;
884                         break;
885                 case RDS_CMSG_ATOMIC_CSWP:
886                 case RDS_CMSG_ATOMIC_FADD:
887                         ret = rds_cmsg_atomic(rs, rm, cmsg);
888                         break;
889
890                 default:
891                         return -EINVAL;
892                 }
893
894                 if (ret)
895                         break;
896         }
897
898         return ret;
899 }
900
901 int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
902                 size_t payload_len)
903 {
904         struct sock *sk = sock->sk;
905         struct rds_sock *rs = rds_sk_to_rs(sk);
906         struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
907         __be32 daddr;
908         __be16 dport;
909         struct rds_message *rm = NULL;
910         struct rds_connection *conn;
911         int ret = 0;
912         int queued = 0, allocated_mr = 0;
913         int nonblock = msg->msg_flags & MSG_DONTWAIT;
914         long timeo = sock_sndtimeo(sk, nonblock);
915
916         /* Mirror Linux UDP mirror of BSD error message compatibility */
917         /* XXX: Perhaps MSG_MORE someday */
918         if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
919                 printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags);
920                 ret = -EOPNOTSUPP;
921                 goto out;
922         }
923
924         if (msg->msg_namelen) {
925                 /* XXX fail non-unicast destination IPs? */
926                 if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
927                         ret = -EINVAL;
928                         goto out;
929                 }
930                 daddr = usin->sin_addr.s_addr;
931                 dport = usin->sin_port;
932         } else {
933                 /* We only care about consistency with ->connect() */
934                 lock_sock(sk);
935                 daddr = rs->rs_conn_addr;
936                 dport = rs->rs_conn_port;
937                 release_sock(sk);
938         }
939
940         /* racing with another thread binding seems ok here */
941         if (daddr == 0 || rs->rs_bound_addr == 0) {
942                 ret = -ENOTCONN; /* XXX not a great errno */
943                 goto out;
944         }
945
946         /* size of rm including all sgs */
947         ret = rds_rm_size(msg, payload_len);
948         if (ret < 0)
949                 goto out;
950
951         rm = rds_message_alloc(ret, GFP_KERNEL);
952         if (!rm) {
953                 ret = -ENOMEM;
954                 goto out;
955         }
956
957         rm->data.m_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
958         /* XXX fix this to not allocate memory */
959         ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
960         if (ret)
961                 goto out;
962
963         rm->m_daddr = daddr;
964
965         /* rds_conn_create has a spinlock that runs with IRQ off.
966          * Caching the conn in the socket helps a lot. */
967         if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
968                 conn = rs->rs_conn;
969         else {
970                 conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
971                                         rs->rs_transport,
972                                         sock->sk->sk_allocation);
973                 if (IS_ERR(conn)) {
974                         ret = PTR_ERR(conn);
975                         goto out;
976                 }
977                 rs->rs_conn = conn;
978         }
979
980         /* Parse any control messages the user may have included. */
981         ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
982         if (ret)
983                 goto out;
984
985         if ((rm->m_rdma_cookie || rm->rdma.m_rdma_op.r_active) &&
986                !conn->c_trans->xmit_rdma) {
987                 if (printk_ratelimit())
988                         printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
989                                &rm->rdma.m_rdma_op, conn->c_trans->xmit_rdma);
990                 ret = -EOPNOTSUPP;
991                 goto out;
992         }
993
994         if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
995                 if (printk_ratelimit())
996                         printk(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
997                                &rm->atomic, conn->c_trans->xmit_atomic);
998                 ret = -EOPNOTSUPP;
999                 goto out;
1000         }
1001
1002         /* If the connection is down, trigger a connect. We may
1003          * have scheduled a delayed reconnect however - in this case
1004          * we should not interfere.
1005          */
1006         if (rds_conn_state(conn) == RDS_CONN_DOWN &&
1007             !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
1008                 queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
1009
1010         ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1011         if (ret) {
1012                 rs->rs_seen_congestion = 1;
1013                 goto out;
1014         }
1015
1016         while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1017                                   dport, &queued)) {
1018                 rds_stats_inc(s_send_queue_full);
1019                 /* XXX make sure this is reasonable */
1020                 if (payload_len > rds_sk_sndbuf(rs)) {
1021                         ret = -EMSGSIZE;
1022                         goto out;
1023                 }
1024                 if (nonblock) {
1025                         ret = -EAGAIN;
1026                         goto out;
1027                 }
1028
1029                 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1030                                         rds_send_queue_rm(rs, conn, rm,
1031                                                           rs->rs_bound_port,
1032                                                           dport,
1033                                                           &queued),
1034                                         timeo);
1035                 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1036                 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1037                         continue;
1038
1039                 ret = timeo;
1040                 if (ret == 0)
1041                         ret = -ETIMEDOUT;
1042                 goto out;
1043         }
1044
1045         /*
1046          * By now we've committed to the send.  We reuse rds_send_worker()
1047          * to retry sends in the rds thread if the transport asks us to.
1048          */
1049         rds_stats_inc(s_send_queued);
1050
1051         if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1052                 rds_send_worker(&conn->c_send_w.work);
1053
1054         rds_message_put(rm);
1055         return payload_len;
1056
1057 out:
1058         /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1059          * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1060          * or in any other way, we need to destroy the MR again */
1061         if (allocated_mr)
1062                 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1063
1064         if (rm)
1065                 rds_message_put(rm);
1066         return ret;
1067 }
1068
1069 /*
1070  * Reply to a ping packet.
1071  */
1072 int
1073 rds_send_pong(struct rds_connection *conn, __be16 dport)
1074 {
1075         struct rds_message *rm;
1076         unsigned long flags;
1077         int ret = 0;
1078
1079         rm = rds_message_alloc(0, GFP_ATOMIC);
1080         if (!rm) {
1081                 ret = -ENOMEM;
1082                 goto out;
1083         }
1084
1085         rm->m_daddr = conn->c_faddr;
1086
1087         /* If the connection is down, trigger a connect. We may
1088          * have scheduled a delayed reconnect however - in this case
1089          * we should not interfere.
1090          */
1091         if (rds_conn_state(conn) == RDS_CONN_DOWN &&
1092             !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
1093                 queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
1094
1095         ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1096         if (ret)
1097                 goto out;
1098
1099         spin_lock_irqsave(&conn->c_lock, flags);
1100         list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1101         set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1102         rds_message_addref(rm);
1103         rm->m_inc.i_conn = conn;
1104
1105         rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1106                                     conn->c_next_tx_seq);
1107         conn->c_next_tx_seq++;
1108         spin_unlock_irqrestore(&conn->c_lock, flags);
1109
1110         rds_stats_inc(s_send_queued);
1111         rds_stats_inc(s_send_pong);
1112
1113         queue_delayed_work(rds_wq, &conn->c_send_w, 0);
1114         rds_message_put(rm);
1115         return 0;
1116
1117 out:
1118         if (rm)
1119                 rds_message_put(rm);
1120         return ret;
1121 }