[NET]: Conversions from kmalloc+memset to k(z|c)alloc.
[linux-3.10.git] / net / rxrpc / peer.c
1 /* peer.c: Rx RPC peer management
2  *
3  * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <rxrpc/rxrpc.h>
16 #include <rxrpc/transport.h>
17 #include <rxrpc/peer.h>
18 #include <rxrpc/connection.h>
19 #include <rxrpc/call.h>
20 #include <rxrpc/message.h>
21 #include <linux/udp.h>
22 #include <linux/ip.h>
23 #include <net/sock.h>
24 #include <asm/uaccess.h>
25 #include <asm/div64.h>
26 #include "internal.h"
27
28 __RXACCT_DECL(atomic_t rxrpc_peer_count);
29 LIST_HEAD(rxrpc_peers);
30 DECLARE_RWSEM(rxrpc_peers_sem);
31 unsigned long rxrpc_peer_timeout = 12 * 60 * 60;
32
33 static void rxrpc_peer_do_timeout(struct rxrpc_peer *peer);
34
35 static void __rxrpc_peer_timeout(rxrpc_timer_t *timer)
36 {
37         struct rxrpc_peer *peer =
38                 list_entry(timer, struct rxrpc_peer, timeout);
39
40         _debug("Rx PEER TIMEOUT [%p{u=%d}]", peer, atomic_read(&peer->usage));
41
42         rxrpc_peer_do_timeout(peer);
43 }
44
45 static const struct rxrpc_timer_ops rxrpc_peer_timer_ops = {
46         .timed_out      = __rxrpc_peer_timeout,
47 };
48
49 /*****************************************************************************/
50 /*
51  * create a peer record
52  */
53 static int __rxrpc_create_peer(struct rxrpc_transport *trans, __be32 addr,
54                                struct rxrpc_peer **_peer)
55 {
56         struct rxrpc_peer *peer;
57
58         _enter("%p,%08x", trans, ntohl(addr));
59
60         /* allocate and initialise a peer record */
61         peer = kzalloc(sizeof(struct rxrpc_peer), GFP_KERNEL);
62         if (!peer) {
63                 _leave(" = -ENOMEM");
64                 return -ENOMEM;
65         }
66
67         atomic_set(&peer->usage, 1);
68
69         INIT_LIST_HEAD(&peer->link);
70         INIT_LIST_HEAD(&peer->proc_link);
71         INIT_LIST_HEAD(&peer->conn_idlist);
72         INIT_LIST_HEAD(&peer->conn_active);
73         INIT_LIST_HEAD(&peer->conn_graveyard);
74         spin_lock_init(&peer->conn_gylock);
75         init_waitqueue_head(&peer->conn_gy_waitq);
76         rwlock_init(&peer->conn_idlock);
77         rwlock_init(&peer->conn_lock);
78         atomic_set(&peer->conn_count, 0);
79         spin_lock_init(&peer->lock);
80         rxrpc_timer_init(&peer->timeout, &rxrpc_peer_timer_ops);
81
82         peer->addr.s_addr = addr;
83
84         peer->trans = trans;
85         peer->ops = trans->peer_ops;
86
87         __RXACCT(atomic_inc(&rxrpc_peer_count));
88         *_peer = peer;
89         _leave(" = 0 (%p)", peer);
90
91         return 0;
92 } /* end __rxrpc_create_peer() */
93
94 /*****************************************************************************/
95 /*
96  * find a peer record on the specified transport
97  * - returns (if successful) with peer record usage incremented
98  * - resurrects it from the graveyard if found there
99  */
100 int rxrpc_peer_lookup(struct rxrpc_transport *trans, __be32 addr,
101                       struct rxrpc_peer **_peer)
102 {
103         struct rxrpc_peer *peer, *candidate = NULL;
104         struct list_head *_p;
105         int ret;
106
107         _enter("%p{%hu},%08x", trans, trans->port, ntohl(addr));
108
109         /* [common case] search the transport's active list first */
110         read_lock(&trans->peer_lock);
111         list_for_each(_p, &trans->peer_active) {
112                 peer = list_entry(_p, struct rxrpc_peer, link);
113                 if (peer->addr.s_addr == addr)
114                         goto found_active;
115         }
116         read_unlock(&trans->peer_lock);
117
118         /* [uncommon case] not active - create a candidate for a new record */
119         ret = __rxrpc_create_peer(trans, addr, &candidate);
120         if (ret < 0) {
121                 _leave(" = %d", ret);
122                 return ret;
123         }
124
125         /* search the active list again, just in case it appeared whilst we
126          * were busy */
127         write_lock(&trans->peer_lock);
128         list_for_each(_p, &trans->peer_active) {
129                 peer = list_entry(_p, struct rxrpc_peer, link);
130                 if (peer->addr.s_addr == addr)
131                         goto found_active_second_chance;
132         }
133
134         /* search the transport's graveyard list */
135         spin_lock(&trans->peer_gylock);
136         list_for_each(_p, &trans->peer_graveyard) {
137                 peer = list_entry(_p, struct rxrpc_peer, link);
138                 if (peer->addr.s_addr == addr)
139                         goto found_in_graveyard;
140         }
141         spin_unlock(&trans->peer_gylock);
142
143         /* we can now add the new candidate to the list
144          * - tell the application layer that this peer has been added
145          */
146         rxrpc_get_transport(trans);
147         peer = candidate;
148         candidate = NULL;
149
150         if (peer->ops && peer->ops->adding) {
151                 ret = peer->ops->adding(peer);
152                 if (ret < 0) {
153                         write_unlock(&trans->peer_lock);
154                         __RXACCT(atomic_dec(&rxrpc_peer_count));
155                         kfree(peer);
156                         rxrpc_put_transport(trans);
157                         _leave(" = %d", ret);
158                         return ret;
159                 }
160         }
161
162         atomic_inc(&trans->peer_count);
163
164  make_active:
165         list_add_tail(&peer->link, &trans->peer_active);
166
167  success_uwfree:
168         write_unlock(&trans->peer_lock);
169
170         if (candidate) {
171                 __RXACCT(atomic_dec(&rxrpc_peer_count));
172                 kfree(candidate);
173         }
174
175         if (list_empty(&peer->proc_link)) {
176                 down_write(&rxrpc_peers_sem);
177                 list_add_tail(&peer->proc_link, &rxrpc_peers);
178                 up_write(&rxrpc_peers_sem);
179         }
180
181  success:
182         *_peer = peer;
183
184         _leave(" = 0 (%p{u=%d cc=%d})",
185                peer,
186                atomic_read(&peer->usage),
187                atomic_read(&peer->conn_count));
188         return 0;
189
190         /* handle the peer being found in the active list straight off */
191  found_active:
192         rxrpc_get_peer(peer);
193         read_unlock(&trans->peer_lock);
194         goto success;
195
196         /* handle resurrecting a peer from the graveyard */
197  found_in_graveyard:
198         rxrpc_get_peer(peer);
199         rxrpc_get_transport(peer->trans);
200         rxrpc_krxtimod_del_timer(&peer->timeout);
201         list_del_init(&peer->link);
202         spin_unlock(&trans->peer_gylock);
203         goto make_active;
204
205         /* handle finding the peer on the second time through the active
206          * list */
207  found_active_second_chance:
208         rxrpc_get_peer(peer);
209         goto success_uwfree;
210
211 } /* end rxrpc_peer_lookup() */
212
213 /*****************************************************************************/
214 /*
215  * finish with a peer record
216  * - it gets sent to the graveyard from where it can be resurrected or timed
217  *   out
218  */
219 void rxrpc_put_peer(struct rxrpc_peer *peer)
220 {
221         struct rxrpc_transport *trans = peer->trans;
222
223         _enter("%p{cc=%d a=%08x}",
224                peer,
225                atomic_read(&peer->conn_count),
226                ntohl(peer->addr.s_addr));
227
228         /* sanity check */
229         if (atomic_read(&peer->usage) <= 0)
230                 BUG();
231
232         write_lock(&trans->peer_lock);
233         spin_lock(&trans->peer_gylock);
234         if (likely(!atomic_dec_and_test(&peer->usage))) {
235                 spin_unlock(&trans->peer_gylock);
236                 write_unlock(&trans->peer_lock);
237                 _leave("");
238                 return;
239         }
240
241         /* move to graveyard queue */
242         list_del(&peer->link);
243         write_unlock(&trans->peer_lock);
244
245         list_add_tail(&peer->link, &trans->peer_graveyard);
246
247         BUG_ON(!list_empty(&peer->conn_active));
248
249         rxrpc_krxtimod_add_timer(&peer->timeout, rxrpc_peer_timeout * HZ);
250
251         spin_unlock(&trans->peer_gylock);
252
253         rxrpc_put_transport(trans);
254
255         _leave(" [killed]");
256 } /* end rxrpc_put_peer() */
257
258 /*****************************************************************************/
259 /*
260  * handle a peer timing out in the graveyard
261  * - called from krxtimod
262  */
263 static void rxrpc_peer_do_timeout(struct rxrpc_peer *peer)
264 {
265         struct rxrpc_transport *trans = peer->trans;
266
267         _enter("%p{u=%d cc=%d a=%08x}",
268                peer,
269                atomic_read(&peer->usage),
270                atomic_read(&peer->conn_count),
271                ntohl(peer->addr.s_addr));
272
273         BUG_ON(atomic_read(&peer->usage) < 0);
274
275         /* remove from graveyard if still dead */
276         spin_lock(&trans->peer_gylock);
277         if (atomic_read(&peer->usage) == 0)
278                 list_del_init(&peer->link);
279         else
280                 peer = NULL;
281         spin_unlock(&trans->peer_gylock);
282
283         if (!peer) {
284                 _leave("");
285                 return; /* resurrected */
286         }
287
288         /* clear all connections on this peer */
289         rxrpc_conn_clearall(peer);
290
291         BUG_ON(!list_empty(&peer->conn_active));
292         BUG_ON(!list_empty(&peer->conn_graveyard));
293
294         /* inform the application layer */
295         if (peer->ops && peer->ops->discarding)
296                 peer->ops->discarding(peer);
297
298         if (!list_empty(&peer->proc_link)) {
299                 down_write(&rxrpc_peers_sem);
300                 list_del(&peer->proc_link);
301                 up_write(&rxrpc_peers_sem);
302         }
303
304         __RXACCT(atomic_dec(&rxrpc_peer_count));
305         kfree(peer);
306
307         /* if the graveyard is now empty, wake up anyone waiting for that */
308         if (atomic_dec_and_test(&trans->peer_count))
309                 wake_up(&trans->peer_gy_waitq);
310
311         _leave(" [destroyed]");
312 } /* end rxrpc_peer_do_timeout() */
313
314 /*****************************************************************************/
315 /*
316  * clear all peer records from a transport endpoint
317  */
318 void rxrpc_peer_clearall(struct rxrpc_transport *trans)
319 {
320         DECLARE_WAITQUEUE(myself,current);
321
322         struct rxrpc_peer *peer;
323         int err;
324
325         _enter("%p",trans);
326
327         /* there shouldn't be any active peers remaining */
328         BUG_ON(!list_empty(&trans->peer_active));
329
330         /* manually timeout all peers in the graveyard */
331         spin_lock(&trans->peer_gylock);
332         while (!list_empty(&trans->peer_graveyard)) {
333                 peer = list_entry(trans->peer_graveyard.next,
334                                   struct rxrpc_peer, link);
335                 _debug("Clearing peer %p\n", peer);
336                 err = rxrpc_krxtimod_del_timer(&peer->timeout);
337                 spin_unlock(&trans->peer_gylock);
338
339                 if (err == 0)
340                         rxrpc_peer_do_timeout(peer);
341
342                 spin_lock(&trans->peer_gylock);
343         }
344         spin_unlock(&trans->peer_gylock);
345
346         /* wait for the the peer graveyard to be completely cleared */
347         set_current_state(TASK_UNINTERRUPTIBLE);
348         add_wait_queue(&trans->peer_gy_waitq, &myself);
349
350         while (atomic_read(&trans->peer_count) != 0) {
351                 schedule();
352                 set_current_state(TASK_UNINTERRUPTIBLE);
353         }
354
355         remove_wait_queue(&trans->peer_gy_waitq, &myself);
356         set_current_state(TASK_RUNNING);
357
358         _leave("");
359 } /* end rxrpc_peer_clearall() */
360
361 /*****************************************************************************/
362 /*
363  * calculate and cache the Round-Trip-Time for a message and its response
364  */
365 void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer,
366                               struct rxrpc_message *msg,
367                               struct rxrpc_message *resp)
368 {
369         unsigned long long rtt;
370         int loop;
371
372         _enter("%p,%p,%p", peer, msg, resp);
373
374         /* calculate the latest RTT */
375         rtt = resp->stamp.tv_sec - msg->stamp.tv_sec;
376         rtt *= 1000000UL;
377         rtt += resp->stamp.tv_usec - msg->stamp.tv_usec;
378
379         /* add to cache */
380         peer->rtt_cache[peer->rtt_point] = rtt;
381         peer->rtt_point++;
382         peer->rtt_point %= RXRPC_RTT_CACHE_SIZE;
383
384         if (peer->rtt_usage < RXRPC_RTT_CACHE_SIZE)
385                 peer->rtt_usage++;
386
387         /* recalculate RTT */
388         rtt = 0;
389         for (loop = peer->rtt_usage - 1; loop >= 0; loop--)
390                 rtt += peer->rtt_cache[loop];
391
392         do_div(rtt, peer->rtt_usage);
393         peer->rtt = rtt;
394
395         _leave(" RTT=%lu.%lums",
396                (long) (peer->rtt / 1000), (long) (peer->rtt % 1000));
397
398 } /* end rxrpc_peer_calculate_rtt() */