]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - net/rxrpc/ar-accept.c
dcbnl: make get_app handling symmetric for IEEE and CEE DCBx
[linux-2.6.git] / net / rxrpc / ar-accept.c
1 /* incoming call handling
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #include <linux/module.h>
13 #include <linux/net.h>
14 #include <linux/skbuff.h>
15 #include <linux/errqueue.h>
16 #include <linux/udp.h>
17 #include <linux/in.h>
18 #include <linux/in6.h>
19 #include <linux/icmp.h>
20 #include <linux/gfp.h>
21 #include <net/sock.h>
22 #include <net/af_rxrpc.h>
23 #include <net/ip.h>
24 #include "ar-internal.h"
25
26 /*
27  * generate a connection-level abort
28  */
29 static int rxrpc_busy(struct rxrpc_local *local, struct sockaddr_rxrpc *srx,
30                       struct rxrpc_header *hdr)
31 {
32         struct msghdr msg;
33         struct kvec iov[1];
34         size_t len;
35         int ret;
36
37         _enter("%d,,", local->debug_id);
38
39         msg.msg_name    = &srx->transport.sin;
40         msg.msg_namelen = sizeof(srx->transport.sin);
41         msg.msg_control = NULL;
42         msg.msg_controllen = 0;
43         msg.msg_flags   = 0;
44
45         hdr->seq        = 0;
46         hdr->type       = RXRPC_PACKET_TYPE_BUSY;
47         hdr->flags      = 0;
48         hdr->userStatus = 0;
49         hdr->_rsvd      = 0;
50
51         iov[0].iov_base = hdr;
52         iov[0].iov_len  = sizeof(*hdr);
53
54         len = iov[0].iov_len;
55
56         hdr->serial = htonl(1);
57         _proto("Tx BUSY %%%u", ntohl(hdr->serial));
58
59         ret = kernel_sendmsg(local->socket, &msg, iov, 1, len);
60         if (ret < 0) {
61                 _leave(" = -EAGAIN [sendmsg failed: %d]", ret);
62                 return -EAGAIN;
63         }
64
65         _leave(" = 0");
66         return 0;
67 }
68
69 /*
70  * accept an incoming call that needs peer, transport and/or connection setting
71  * up
72  */
73 static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
74                                       struct rxrpc_sock *rx,
75                                       struct sk_buff *skb,
76                                       struct sockaddr_rxrpc *srx)
77 {
78         struct rxrpc_connection *conn;
79         struct rxrpc_transport *trans;
80         struct rxrpc_skb_priv *sp, *nsp;
81         struct rxrpc_peer *peer;
82         struct rxrpc_call *call;
83         struct sk_buff *notification;
84         int ret;
85
86         _enter("");
87
88         sp = rxrpc_skb(skb);
89
90         /* get a notification message to send to the server app */
91         notification = alloc_skb(0, GFP_NOFS);
92         if (!notification) {
93                 _debug("no memory");
94                 ret = -ENOMEM;
95                 goto error_nofree;
96         }
97         rxrpc_new_skb(notification);
98         notification->mark = RXRPC_SKB_MARK_NEW_CALL;
99
100         peer = rxrpc_get_peer(srx, GFP_NOIO);
101         if (IS_ERR(peer)) {
102                 _debug("no peer");
103                 ret = -EBUSY;
104                 goto error;
105         }
106
107         trans = rxrpc_get_transport(local, peer, GFP_NOIO);
108         rxrpc_put_peer(peer);
109         if (IS_ERR(trans)) {
110                 _debug("no trans");
111                 ret = -EBUSY;
112                 goto error;
113         }
114
115         conn = rxrpc_incoming_connection(trans, &sp->hdr, GFP_NOIO);
116         rxrpc_put_transport(trans);
117         if (IS_ERR(conn)) {
118                 _debug("no conn");
119                 ret = PTR_ERR(conn);
120                 goto error;
121         }
122
123         call = rxrpc_incoming_call(rx, conn, &sp->hdr, GFP_NOIO);
124         rxrpc_put_connection(conn);
125         if (IS_ERR(call)) {
126                 _debug("no call");
127                 ret = PTR_ERR(call);
128                 goto error;
129         }
130
131         /* attach the call to the socket */
132         read_lock_bh(&local->services_lock);
133         if (rx->sk.sk_state == RXRPC_CLOSE)
134                 goto invalid_service;
135
136         write_lock(&rx->call_lock);
137         if (!test_and_set_bit(RXRPC_CALL_INIT_ACCEPT, &call->flags)) {
138                 rxrpc_get_call(call);
139
140                 spin_lock(&call->conn->state_lock);
141                 if (sp->hdr.securityIndex > 0 &&
142                     call->conn->state == RXRPC_CONN_SERVER_UNSECURED) {
143                         _debug("await conn sec");
144                         list_add_tail(&call->accept_link, &rx->secureq);
145                         call->conn->state = RXRPC_CONN_SERVER_CHALLENGING;
146                         atomic_inc(&call->conn->usage);
147                         set_bit(RXRPC_CONN_CHALLENGE, &call->conn->events);
148                         rxrpc_queue_conn(call->conn);
149                 } else {
150                         _debug("conn ready");
151                         call->state = RXRPC_CALL_SERVER_ACCEPTING;
152                         list_add_tail(&call->accept_link, &rx->acceptq);
153                         rxrpc_get_call(call);
154                         nsp = rxrpc_skb(notification);
155                         nsp->call = call;
156
157                         ASSERTCMP(atomic_read(&call->usage), >=, 3);
158
159                         _debug("notify");
160                         spin_lock(&call->lock);
161                         ret = rxrpc_queue_rcv_skb(call, notification, true,
162                                                   false);
163                         spin_unlock(&call->lock);
164                         notification = NULL;
165                         BUG_ON(ret < 0);
166                 }
167                 spin_unlock(&call->conn->state_lock);
168
169                 _debug("queued");
170         }
171         write_unlock(&rx->call_lock);
172
173         _debug("process");
174         rxrpc_fast_process_packet(call, skb);
175
176         _debug("done");
177         read_unlock_bh(&local->services_lock);
178         rxrpc_free_skb(notification);
179         rxrpc_put_call(call);
180         _leave(" = 0");
181         return 0;
182
183 invalid_service:
184         _debug("invalid");
185         read_unlock_bh(&local->services_lock);
186
187         read_lock_bh(&call->state_lock);
188         if (!test_bit(RXRPC_CALL_RELEASE, &call->flags) &&
189             !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) {
190                 rxrpc_get_call(call);
191                 rxrpc_queue_call(call);
192         }
193         read_unlock_bh(&call->state_lock);
194         rxrpc_put_call(call);
195         ret = -ECONNREFUSED;
196 error:
197         rxrpc_free_skb(notification);
198 error_nofree:
199         _leave(" = %d", ret);
200         return ret;
201 }
202
203 /*
204  * accept incoming calls that need peer, transport and/or connection setting up
205  * - the packets we get are all incoming client DATA packets that have seq == 1
206  */
207 void rxrpc_accept_incoming_calls(struct work_struct *work)
208 {
209         struct rxrpc_local *local =
210                 container_of(work, struct rxrpc_local, acceptor);
211         struct rxrpc_skb_priv *sp;
212         struct sockaddr_rxrpc srx;
213         struct rxrpc_sock *rx;
214         struct sk_buff *skb;
215         __be16 service_id;
216         int ret;
217
218         _enter("%d", local->debug_id);
219
220         read_lock_bh(&rxrpc_local_lock);
221         if (atomic_read(&local->usage) > 0)
222                 rxrpc_get_local(local);
223         else
224                 local = NULL;
225         read_unlock_bh(&rxrpc_local_lock);
226         if (!local) {
227                 _leave(" [local dead]");
228                 return;
229         }
230
231 process_next_packet:
232         skb = skb_dequeue(&local->accept_queue);
233         if (!skb) {
234                 rxrpc_put_local(local);
235                 _leave("\n");
236                 return;
237         }
238
239         _net("incoming call skb %p", skb);
240
241         sp = rxrpc_skb(skb);
242
243         /* determine the remote address */
244         memset(&srx, 0, sizeof(srx));
245         srx.srx_family = AF_RXRPC;
246         srx.transport.family = local->srx.transport.family;
247         srx.transport_type = local->srx.transport_type;
248         switch (srx.transport.family) {
249         case AF_INET:
250                 srx.transport_len = sizeof(struct sockaddr_in);
251                 srx.transport.sin.sin_port = udp_hdr(skb)->source;
252                 srx.transport.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
253                 break;
254         default:
255                 goto busy;
256         }
257
258         /* get the socket providing the service */
259         service_id = sp->hdr.serviceId;
260         read_lock_bh(&local->services_lock);
261         list_for_each_entry(rx, &local->services, listen_link) {
262                 if (rx->service_id == service_id &&
263                     rx->sk.sk_state != RXRPC_CLOSE)
264                         goto found_service;
265         }
266         read_unlock_bh(&local->services_lock);
267         goto invalid_service;
268
269 found_service:
270         _debug("found service %hd", ntohs(rx->service_id));
271         if (sk_acceptq_is_full(&rx->sk))
272                 goto backlog_full;
273         sk_acceptq_added(&rx->sk);
274         sock_hold(&rx->sk);
275         read_unlock_bh(&local->services_lock);
276
277         ret = rxrpc_accept_incoming_call(local, rx, skb, &srx);
278         if (ret < 0)
279                 sk_acceptq_removed(&rx->sk);
280         sock_put(&rx->sk);
281         switch (ret) {
282         case -ECONNRESET: /* old calls are ignored */
283         case -ECONNABORTED: /* aborted calls are reaborted or ignored */
284         case 0:
285                 goto process_next_packet;
286         case -ECONNREFUSED:
287                 goto invalid_service;
288         case -EBUSY:
289                 goto busy;
290         case -EKEYREJECTED:
291                 goto security_mismatch;
292         default:
293                 BUG();
294         }
295
296 backlog_full:
297         read_unlock_bh(&local->services_lock);
298 busy:
299         rxrpc_busy(local, &srx, &sp->hdr);
300         rxrpc_free_skb(skb);
301         goto process_next_packet;
302
303 invalid_service:
304         skb->priority = RX_INVALID_OPERATION;
305         rxrpc_reject_packet(local, skb);
306         goto process_next_packet;
307
308         /* can't change connection security type mid-flow */
309 security_mismatch:
310         skb->priority = RX_PROTOCOL_ERROR;
311         rxrpc_reject_packet(local, skb);
312         goto process_next_packet;
313 }
314
315 /*
316  * handle acceptance of a call by userspace
317  * - assign the user call ID to the call at the front of the queue
318  */
319 struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
320                                      unsigned long user_call_ID)
321 {
322         struct rxrpc_call *call;
323         struct rb_node *parent, **pp;
324         int ret;
325
326         _enter(",%lx", user_call_ID);
327
328         ASSERT(!irqs_disabled());
329
330         write_lock(&rx->call_lock);
331
332         ret = -ENODATA;
333         if (list_empty(&rx->acceptq))
334                 goto out;
335
336         /* check the user ID isn't already in use */
337         ret = -EBADSLT;
338         pp = &rx->calls.rb_node;
339         parent = NULL;
340         while (*pp) {
341                 parent = *pp;
342                 call = rb_entry(parent, struct rxrpc_call, sock_node);
343
344                 if (user_call_ID < call->user_call_ID)
345                         pp = &(*pp)->rb_left;
346                 else if (user_call_ID > call->user_call_ID)
347                         pp = &(*pp)->rb_right;
348                 else
349                         goto out;
350         }
351
352         /* dequeue the first call and check it's still valid */
353         call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link);
354         list_del_init(&call->accept_link);
355         sk_acceptq_removed(&rx->sk);
356
357         write_lock_bh(&call->state_lock);
358         switch (call->state) {
359         case RXRPC_CALL_SERVER_ACCEPTING:
360                 call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
361                 break;
362         case RXRPC_CALL_REMOTELY_ABORTED:
363         case RXRPC_CALL_LOCALLY_ABORTED:
364                 ret = -ECONNABORTED;
365                 goto out_release;
366         case RXRPC_CALL_NETWORK_ERROR:
367                 ret = call->conn->error;
368                 goto out_release;
369         case RXRPC_CALL_DEAD:
370                 ret = -ETIME;
371                 goto out_discard;
372         default:
373                 BUG();
374         }
375
376         /* formalise the acceptance */
377         call->user_call_ID = user_call_ID;
378         rb_link_node(&call->sock_node, parent, pp);
379         rb_insert_color(&call->sock_node, &rx->calls);
380         if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
381                 BUG();
382         if (test_and_set_bit(RXRPC_CALL_ACCEPTED, &call->events))
383                 BUG();
384         rxrpc_queue_call(call);
385
386         rxrpc_get_call(call);
387         write_unlock_bh(&call->state_lock);
388         write_unlock(&rx->call_lock);
389         _leave(" = %p{%d}", call, call->debug_id);
390         return call;
391
392         /* if the call is already dying or dead, then we leave the socket's ref
393          * on it to be released by rxrpc_dead_call_expired() as induced by
394          * rxrpc_release_call() */
395 out_release:
396         _debug("release %p", call);
397         if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
398             !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
399                 rxrpc_queue_call(call);
400 out_discard:
401         write_unlock_bh(&call->state_lock);
402         _debug("discard %p", call);
403 out:
404         write_unlock(&rx->call_lock);
405         _leave(" = %d", ret);
406         return ERR_PTR(ret);
407 }
408
409 /*
410  * handle rejectance of a call by userspace
411  * - reject the call at the front of the queue
412  */
413 int rxrpc_reject_call(struct rxrpc_sock *rx)
414 {
415         struct rxrpc_call *call;
416         int ret;
417
418         _enter("");
419
420         ASSERT(!irqs_disabled());
421
422         write_lock(&rx->call_lock);
423
424         ret = -ENODATA;
425         if (list_empty(&rx->acceptq))
426                 goto out;
427
428         /* dequeue the first call and check it's still valid */
429         call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link);
430         list_del_init(&call->accept_link);
431         sk_acceptq_removed(&rx->sk);
432
433         write_lock_bh(&call->state_lock);
434         switch (call->state) {
435         case RXRPC_CALL_SERVER_ACCEPTING:
436                 call->state = RXRPC_CALL_SERVER_BUSY;
437                 if (test_and_set_bit(RXRPC_CALL_REJECT_BUSY, &call->events))
438                         rxrpc_queue_call(call);
439                 ret = 0;
440                 goto out_release;
441         case RXRPC_CALL_REMOTELY_ABORTED:
442         case RXRPC_CALL_LOCALLY_ABORTED:
443                 ret = -ECONNABORTED;
444                 goto out_release;
445         case RXRPC_CALL_NETWORK_ERROR:
446                 ret = call->conn->error;
447                 goto out_release;
448         case RXRPC_CALL_DEAD:
449                 ret = -ETIME;
450                 goto out_discard;
451         default:
452                 BUG();
453         }
454
455         /* if the call is already dying or dead, then we leave the socket's ref
456          * on it to be released by rxrpc_dead_call_expired() as induced by
457          * rxrpc_release_call() */
458 out_release:
459         _debug("release %p", call);
460         if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
461             !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
462                 rxrpc_queue_call(call);
463 out_discard:
464         write_unlock_bh(&call->state_lock);
465         _debug("discard %p", call);
466 out:
467         write_unlock(&rx->call_lock);
468         _leave(" = %d", ret);
469         return ret;
470 }
471
472 /**
473  * rxrpc_kernel_accept_call - Allow a kernel service to accept an incoming call
474  * @sock: The socket on which the impending call is waiting
475  * @user_call_ID: The tag to attach to the call
476  *
477  * Allow a kernel service to accept an incoming call, assuming the incoming
478  * call is still valid.
479  */
480 struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *sock,
481                                             unsigned long user_call_ID)
482 {
483         struct rxrpc_call *call;
484
485         _enter(",%lx", user_call_ID);
486         call = rxrpc_accept_call(rxrpc_sk(sock->sk), user_call_ID);
487         _leave(" = %p", call);
488         return call;
489 }
490
491 EXPORT_SYMBOL(rxrpc_kernel_accept_call);
492
493 /**
494  * rxrpc_kernel_reject_call - Allow a kernel service to reject an incoming call
495  * @sock: The socket on which the impending call is waiting
496  *
497  * Allow a kernel service to reject an incoming call with a BUSY message,
498  * assuming the incoming call is still valid.
499  */
500 int rxrpc_kernel_reject_call(struct socket *sock)
501 {
502         int ret;
503
504         _enter("");
505         ret = rxrpc_reject_call(rxrpc_sk(sock->sk));
506         _leave(" = %d", ret);
507         return ret;
508 }
509
510 EXPORT_SYMBOL(rxrpc_kernel_reject_call);