Merge branch 'llseek' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/bkl
[linux-2.6.git] / net / ipv4 / inet_timewait_sock.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Generic TIME_WAIT sockets functions
7  *
8  *              From code orinally in TCP
9  */
10
11 #include <linux/kernel.h>
12 #include <linux/kmemcheck.h>
13 #include <linux/slab.h>
14 #include <net/inet_hashtables.h>
15 #include <net/inet_timewait_sock.h>
16 #include <net/ip.h>
17
18
19 /**
20  *      inet_twsk_unhash - unhash a timewait socket from established hash
21  *      @tw: timewait socket
22  *
23  *      unhash a timewait socket from established hash, if hashed.
24  *      ehash lock must be held by caller.
25  *      Returns 1 if caller should call inet_twsk_put() after lock release.
26  */
27 int inet_twsk_unhash(struct inet_timewait_sock *tw)
28 {
29         if (hlist_nulls_unhashed(&tw->tw_node))
30                 return 0;
31
32         hlist_nulls_del_rcu(&tw->tw_node);
33         sk_nulls_node_init(&tw->tw_node);
34         /*
35          * We cannot call inet_twsk_put() ourself under lock,
36          * caller must call it for us.
37          */
38         return 1;
39 }
40
41 /**
42  *      inet_twsk_bind_unhash - unhash a timewait socket from bind hash
43  *      @tw: timewait socket
44  *      @hashinfo: hashinfo pointer
45  *
46  *      unhash a timewait socket from bind hash, if hashed.
47  *      bind hash lock must be held by caller.
48  *      Returns 1 if caller should call inet_twsk_put() after lock release.
49  */
50 int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
51                           struct inet_hashinfo *hashinfo)
52 {
53         struct inet_bind_bucket *tb = tw->tw_tb;
54
55         if (!tb)
56                 return 0;
57
58         __hlist_del(&tw->tw_bind_node);
59         tw->tw_tb = NULL;
60         inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
61         /*
62          * We cannot call inet_twsk_put() ourself under lock,
63          * caller must call it for us.
64          */
65         return 1;
66 }
67
68 /* Must be called with locally disabled BHs. */
69 static void __inet_twsk_kill(struct inet_timewait_sock *tw,
70                              struct inet_hashinfo *hashinfo)
71 {
72         struct inet_bind_hashbucket *bhead;
73         int refcnt;
74         /* Unlink from established hashes. */
75         spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
76
77         spin_lock(lock);
78         refcnt = inet_twsk_unhash(tw);
79         spin_unlock(lock);
80
81         /* Disassociate with bind bucket. */
82         bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
83                         hashinfo->bhash_size)];
84
85         spin_lock(&bhead->lock);
86         refcnt += inet_twsk_bind_unhash(tw, hashinfo);
87         spin_unlock(&bhead->lock);
88
89 #ifdef SOCK_REFCNT_DEBUG
90         if (atomic_read(&tw->tw_refcnt) != 1) {
91                 printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n",
92                        tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
93         }
94 #endif
95         while (refcnt) {
96                 inet_twsk_put(tw);
97                 refcnt--;
98         }
99 }
100
101 static noinline void inet_twsk_free(struct inet_timewait_sock *tw)
102 {
103         struct module *owner = tw->tw_prot->owner;
104         twsk_destructor((struct sock *)tw);
105 #ifdef SOCK_REFCNT_DEBUG
106         pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw);
107 #endif
108         release_net(twsk_net(tw));
109         kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
110         module_put(owner);
111 }
112
113 void inet_twsk_put(struct inet_timewait_sock *tw)
114 {
115         if (atomic_dec_and_test(&tw->tw_refcnt))
116                 inet_twsk_free(tw);
117 }
118 EXPORT_SYMBOL_GPL(inet_twsk_put);
119
120 /*
121  * Enter the time wait state. This is called with locally disabled BH.
122  * Essentially we whip up a timewait bucket, copy the relevant info into it
123  * from the SK, and mess with hash chains and list linkage.
124  */
125 void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
126                            struct inet_hashinfo *hashinfo)
127 {
128         const struct inet_sock *inet = inet_sk(sk);
129         const struct inet_connection_sock *icsk = inet_csk(sk);
130         struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
131         spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
132         struct inet_bind_hashbucket *bhead;
133         /* Step 1: Put TW into bind hash. Original socket stays there too.
134            Note, that any socket with inet->num != 0 MUST be bound in
135            binding cache, even if it is closed.
136          */
137         bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
138                         hashinfo->bhash_size)];
139         spin_lock(&bhead->lock);
140         tw->tw_tb = icsk->icsk_bind_hash;
141         WARN_ON(!icsk->icsk_bind_hash);
142         inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
143         spin_unlock(&bhead->lock);
144
145         spin_lock(lock);
146
147         /*
148          * Step 2: Hash TW into TIMEWAIT chain.
149          * Should be done before removing sk from established chain
150          * because readers are lockless and search established first.
151          */
152         inet_twsk_add_node_rcu(tw, &ehead->twchain);
153
154         /* Step 3: Remove SK from established hash. */
155         if (__sk_nulls_del_node_init_rcu(sk))
156                 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
157
158         /*
159          * Notes :
160          * - We initially set tw_refcnt to 0 in inet_twsk_alloc()
161          * - We add one reference for the bhash link
162          * - We add one reference for the ehash link
163          * - We want this refcnt update done before allowing other
164          *   threads to find this tw in ehash chain.
165          */
166         atomic_add(1 + 1 + 1, &tw->tw_refcnt);
167
168         spin_unlock(lock);
169 }
170 EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
171
172 struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state)
173 {
174         struct inet_timewait_sock *tw =
175                 kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
176                                  GFP_ATOMIC);
177         if (tw != NULL) {
178                 const struct inet_sock *inet = inet_sk(sk);
179
180                 kmemcheck_annotate_bitfield(tw, flags);
181
182                 /* Give us an identity. */
183                 tw->tw_daddr        = inet->inet_daddr;
184                 tw->tw_rcv_saddr    = inet->inet_rcv_saddr;
185                 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
186                 tw->tw_num          = inet->inet_num;
187                 tw->tw_state        = TCP_TIME_WAIT;
188                 tw->tw_substate     = state;
189                 tw->tw_sport        = inet->inet_sport;
190                 tw->tw_dport        = inet->inet_dport;
191                 tw->tw_family       = sk->sk_family;
192                 tw->tw_reuse        = sk->sk_reuse;
193                 tw->tw_hash         = sk->sk_hash;
194                 tw->tw_ipv6only     = 0;
195                 tw->tw_transparent  = inet->transparent;
196                 tw->tw_prot         = sk->sk_prot_creator;
197                 twsk_net_set(tw, hold_net(sock_net(sk)));
198                 /*
199                  * Because we use RCU lookups, we should not set tw_refcnt
200                  * to a non null value before everything is setup for this
201                  * timewait socket.
202                  */
203                 atomic_set(&tw->tw_refcnt, 0);
204                 inet_twsk_dead_node_init(tw);
205                 __module_get(tw->tw_prot->owner);
206         }
207
208         return tw;
209 }
210 EXPORT_SYMBOL_GPL(inet_twsk_alloc);
211
212 /* Returns non-zero if quota exceeded.  */
213 static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
214                                     const int slot)
215 {
216         struct inet_timewait_sock *tw;
217         struct hlist_node *node;
218         unsigned int killed;
219         int ret;
220
221         /* NOTE: compare this to previous version where lock
222          * was released after detaching chain. It was racy,
223          * because tw buckets are scheduled in not serialized context
224          * in 2.3 (with netfilter), and with softnet it is common, because
225          * soft irqs are not sequenced.
226          */
227         killed = 0;
228         ret = 0;
229 rescan:
230         inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) {
231                 __inet_twsk_del_dead_node(tw);
232                 spin_unlock(&twdr->death_lock);
233                 __inet_twsk_kill(tw, twdr->hashinfo);
234 #ifdef CONFIG_NET_NS
235                 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED);
236 #endif
237                 inet_twsk_put(tw);
238                 killed++;
239                 spin_lock(&twdr->death_lock);
240                 if (killed > INET_TWDR_TWKILL_QUOTA) {
241                         ret = 1;
242                         break;
243                 }
244
245                 /* While we dropped twdr->death_lock, another cpu may have
246                  * killed off the next TW bucket in the list, therefore
247                  * do a fresh re-read of the hlist head node with the
248                  * lock reacquired.  We still use the hlist traversal
249                  * macro in order to get the prefetches.
250                  */
251                 goto rescan;
252         }
253
254         twdr->tw_count -= killed;
255 #ifndef CONFIG_NET_NS
256         NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITED, killed);
257 #endif
258         return ret;
259 }
260
261 void inet_twdr_hangman(unsigned long data)
262 {
263         struct inet_timewait_death_row *twdr;
264         int unsigned need_timer;
265
266         twdr = (struct inet_timewait_death_row *)data;
267         spin_lock(&twdr->death_lock);
268
269         if (twdr->tw_count == 0)
270                 goto out;
271
272         need_timer = 0;
273         if (inet_twdr_do_twkill_work(twdr, twdr->slot)) {
274                 twdr->thread_slots |= (1 << twdr->slot);
275                 schedule_work(&twdr->twkill_work);
276                 need_timer = 1;
277         } else {
278                 /* We purged the entire slot, anything left?  */
279                 if (twdr->tw_count)
280                         need_timer = 1;
281                 twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1));
282         }
283         if (need_timer)
284                 mod_timer(&twdr->tw_timer, jiffies + twdr->period);
285 out:
286         spin_unlock(&twdr->death_lock);
287 }
288 EXPORT_SYMBOL_GPL(inet_twdr_hangman);
289
290 void inet_twdr_twkill_work(struct work_struct *work)
291 {
292         struct inet_timewait_death_row *twdr =
293                 container_of(work, struct inet_timewait_death_row, twkill_work);
294         int i;
295
296         BUILD_BUG_ON((INET_TWDR_TWKILL_SLOTS - 1) >
297                         (sizeof(twdr->thread_slots) * 8));
298
299         while (twdr->thread_slots) {
300                 spin_lock_bh(&twdr->death_lock);
301                 for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) {
302                         if (!(twdr->thread_slots & (1 << i)))
303                                 continue;
304
305                         while (inet_twdr_do_twkill_work(twdr, i) != 0) {
306                                 if (need_resched()) {
307                                         spin_unlock_bh(&twdr->death_lock);
308                                         schedule();
309                                         spin_lock_bh(&twdr->death_lock);
310                                 }
311                         }
312
313                         twdr->thread_slots &= ~(1 << i);
314                 }
315                 spin_unlock_bh(&twdr->death_lock);
316         }
317 }
318 EXPORT_SYMBOL_GPL(inet_twdr_twkill_work);
319
320 /* These are always called from BH context.  See callers in
321  * tcp_input.c to verify this.
322  */
323
324 /* This is for handling early-kills of TIME_WAIT sockets. */
325 void inet_twsk_deschedule(struct inet_timewait_sock *tw,
326                           struct inet_timewait_death_row *twdr)
327 {
328         spin_lock(&twdr->death_lock);
329         if (inet_twsk_del_dead_node(tw)) {
330                 inet_twsk_put(tw);
331                 if (--twdr->tw_count == 0)
332                         del_timer(&twdr->tw_timer);
333         }
334         spin_unlock(&twdr->death_lock);
335         __inet_twsk_kill(tw, twdr->hashinfo);
336 }
337 EXPORT_SYMBOL(inet_twsk_deschedule);
338
339 void inet_twsk_schedule(struct inet_timewait_sock *tw,
340                        struct inet_timewait_death_row *twdr,
341                        const int timeo, const int timewait_len)
342 {
343         struct hlist_head *list;
344         int slot;
345
346         /* timeout := RTO * 3.5
347          *
348          * 3.5 = 1+2+0.5 to wait for two retransmits.
349          *
350          * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
351          * our ACK acking that FIN can be lost. If N subsequent retransmitted
352          * FINs (or previous seqments) are lost (probability of such event
353          * is p^(N+1), where p is probability to lose single packet and
354          * time to detect the loss is about RTO*(2^N - 1) with exponential
355          * backoff). Normal timewait length is calculated so, that we
356          * waited at least for one retransmitted FIN (maximal RTO is 120sec).
357          * [ BTW Linux. following BSD, violates this requirement waiting
358          *   only for 60sec, we should wait at least for 240 secs.
359          *   Well, 240 consumes too much of resources 8)
360          * ]
361          * This interval is not reduced to catch old duplicate and
362          * responces to our wandering segments living for two MSLs.
363          * However, if we use PAWS to detect
364          * old duplicates, we can reduce the interval to bounds required
365          * by RTO, rather than MSL. So, if peer understands PAWS, we
366          * kill tw bucket after 3.5*RTO (it is important that this number
367          * is greater than TS tick!) and detect old duplicates with help
368          * of PAWS.
369          */
370         slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK;
371
372         spin_lock(&twdr->death_lock);
373
374         /* Unlink it, if it was scheduled */
375         if (inet_twsk_del_dead_node(tw))
376                 twdr->tw_count--;
377         else
378                 atomic_inc(&tw->tw_refcnt);
379
380         if (slot >= INET_TWDR_RECYCLE_SLOTS) {
381                 /* Schedule to slow timer */
382                 if (timeo >= timewait_len) {
383                         slot = INET_TWDR_TWKILL_SLOTS - 1;
384                 } else {
385                         slot = DIV_ROUND_UP(timeo, twdr->period);
386                         if (slot >= INET_TWDR_TWKILL_SLOTS)
387                                 slot = INET_TWDR_TWKILL_SLOTS - 1;
388                 }
389                 tw->tw_ttd = jiffies + timeo;
390                 slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
391                 list = &twdr->cells[slot];
392         } else {
393                 tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK);
394
395                 if (twdr->twcal_hand < 0) {
396                         twdr->twcal_hand = 0;
397                         twdr->twcal_jiffie = jiffies;
398                         twdr->twcal_timer.expires = twdr->twcal_jiffie +
399                                               (slot << INET_TWDR_RECYCLE_TICK);
400                         add_timer(&twdr->twcal_timer);
401                 } else {
402                         if (time_after(twdr->twcal_timer.expires,
403                                        jiffies + (slot << INET_TWDR_RECYCLE_TICK)))
404                                 mod_timer(&twdr->twcal_timer,
405                                           jiffies + (slot << INET_TWDR_RECYCLE_TICK));
406                         slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1);
407                 }
408                 list = &twdr->twcal_row[slot];
409         }
410
411         hlist_add_head(&tw->tw_death_node, list);
412
413         if (twdr->tw_count++ == 0)
414                 mod_timer(&twdr->tw_timer, jiffies + twdr->period);
415         spin_unlock(&twdr->death_lock);
416 }
417 EXPORT_SYMBOL_GPL(inet_twsk_schedule);
418
419 void inet_twdr_twcal_tick(unsigned long data)
420 {
421         struct inet_timewait_death_row *twdr;
422         int n, slot;
423         unsigned long j;
424         unsigned long now = jiffies;
425         int killed = 0;
426         int adv = 0;
427
428         twdr = (struct inet_timewait_death_row *)data;
429
430         spin_lock(&twdr->death_lock);
431         if (twdr->twcal_hand < 0)
432                 goto out;
433
434         slot = twdr->twcal_hand;
435         j = twdr->twcal_jiffie;
436
437         for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
438                 if (time_before_eq(j, now)) {
439                         struct hlist_node *node, *safe;
440                         struct inet_timewait_sock *tw;
441
442                         inet_twsk_for_each_inmate_safe(tw, node, safe,
443                                                        &twdr->twcal_row[slot]) {
444                                 __inet_twsk_del_dead_node(tw);
445                                 __inet_twsk_kill(tw, twdr->hashinfo);
446 #ifdef CONFIG_NET_NS
447                                 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
448 #endif
449                                 inet_twsk_put(tw);
450                                 killed++;
451                         }
452                 } else {
453                         if (!adv) {
454                                 adv = 1;
455                                 twdr->twcal_jiffie = j;
456                                 twdr->twcal_hand = slot;
457                         }
458
459                         if (!hlist_empty(&twdr->twcal_row[slot])) {
460                                 mod_timer(&twdr->twcal_timer, j);
461                                 goto out;
462                         }
463                 }
464                 j += 1 << INET_TWDR_RECYCLE_TICK;
465                 slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1);
466         }
467         twdr->twcal_hand = -1;
468
469 out:
470         if ((twdr->tw_count -= killed) == 0)
471                 del_timer(&twdr->tw_timer);
472 #ifndef CONFIG_NET_NS
473         NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITKILLED, killed);
474 #endif
475         spin_unlock(&twdr->death_lock);
476 }
477 EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);
478
479 void inet_twsk_purge(struct inet_hashinfo *hashinfo,
480                      struct inet_timewait_death_row *twdr, int family)
481 {
482         struct inet_timewait_sock *tw;
483         struct sock *sk;
484         struct hlist_nulls_node *node;
485         unsigned int slot;
486
487         for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
488                 struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
489 restart_rcu:
490                 rcu_read_lock();
491 restart:
492                 sk_nulls_for_each_rcu(sk, node, &head->twchain) {
493                         tw = inet_twsk(sk);
494                         if ((tw->tw_family != family) ||
495                                 atomic_read(&twsk_net(tw)->count))
496                                 continue;
497
498                         if (unlikely(!atomic_inc_not_zero(&tw->tw_refcnt)))
499                                 continue;
500
501                         if (unlikely((tw->tw_family != family) ||
502                                      atomic_read(&twsk_net(tw)->count))) {
503                                 inet_twsk_put(tw);
504                                 goto restart;
505                         }
506
507                         rcu_read_unlock();
508                         inet_twsk_deschedule(tw, twdr);
509                         inet_twsk_put(tw);
510                         goto restart_rcu;
511                 }
512                 /* If the nulls value we got at the end of this lookup is
513                  * not the expected one, we must restart lookup.
514                  * We probably met an item that was moved to another chain.
515                  */
516                 if (get_nulls_value(node) != slot)
517                         goto restart;
518                 rcu_read_unlock();
519         }
520 }
521 EXPORT_SYMBOL_GPL(inet_twsk_purge);