Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[linux-2.6.git] / net / core / neighbour.c
1 /*
2  *      Generic address resolution entity
3  *
4  *      Authors:
5  *      Pedro Roque             <roque@di.fc.ul.pt>
6  *      Alexey Kuznetsov        <kuznet@ms2.inr.ac.ru>
7  *
8  *      This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *      Fixes:
14  *      Vitaly E. Lavrov        releasing NULL neighbor in neigh_add.
15  *      Harald Welte            Add neighbour cache statistics like rtstat
16  */
17
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
24 #ifdef CONFIG_SYSCTL
25 #include <linux/sysctl.h>
26 #endif
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
30 #include <net/dst.h>
31 #include <net/sock.h>
32 #include <net/netevent.h>
33 #include <net/netlink.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/random.h>
36 #include <linux/string.h>
37 #include <linux/log2.h>
38
39 #define NEIGH_DEBUG 1
40
41 #define NEIGH_PRINTK(x...) printk(x)
42 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
43 #define NEIGH_PRINTK0 NEIGH_PRINTK
44 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
45 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
46
47 #if NEIGH_DEBUG >= 1
48 #undef NEIGH_PRINTK1
49 #define NEIGH_PRINTK1 NEIGH_PRINTK
50 #endif
51 #if NEIGH_DEBUG >= 2
52 #undef NEIGH_PRINTK2
53 #define NEIGH_PRINTK2 NEIGH_PRINTK
54 #endif
55
56 #define PNEIGH_HASHMASK         0xF
57
58 static void neigh_timer_handler(unsigned long arg);
59 static void __neigh_notify(struct neighbour *n, int type, int flags);
60 static void neigh_update_notify(struct neighbour *neigh);
61 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
62
63 static struct neigh_table *neigh_tables;
64 #ifdef CONFIG_PROC_FS
65 static const struct file_operations neigh_stat_seq_fops;
66 #endif
67
68 /*
69    Neighbour hash table buckets are protected with rwlock tbl->lock.
70
71    - All the scans/updates to hash buckets MUST be made under this lock.
72    - NOTHING clever should be made under this lock: no callbacks
73      to protocol backends, no attempts to send something to network.
74      It will result in deadlocks, if backend/driver wants to use neighbour
75      cache.
76    - If the entry requires some non-trivial actions, increase
77      its reference count and release table lock.
78
79    Neighbour entries are protected:
80    - with reference count.
81    - with rwlock neigh->lock
82
83    Reference count prevents destruction.
84
85    neigh->lock mainly serializes ll address data and its validity state.
86    However, the same lock is used to protect another entry fields:
87     - timer
88     - resolution queue
89
90    Again, nothing clever shall be made under neigh->lock,
91    the most complicated procedure, which we allow is dev->hard_header.
92    It is supposed, that dev->hard_header is simplistic and does
93    not make callbacks to neighbour tables.
94
95    The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96    list of neighbour tables. This list is used only in process context,
97  */
98
99 static DEFINE_RWLOCK(neigh_tbl_lock);
100
101 static int neigh_blackhole(struct sk_buff *skb)
102 {
103         kfree_skb(skb);
104         return -ENETDOWN;
105 }
106
107 static void neigh_cleanup_and_release(struct neighbour *neigh)
108 {
109         if (neigh->parms->neigh_cleanup)
110                 neigh->parms->neigh_cleanup(neigh);
111
112         __neigh_notify(neigh, RTM_DELNEIGH, 0);
113         neigh_release(neigh);
114 }
115
116 /*
117  * It is random distribution in the interval (1/2)*base...(3/2)*base.
118  * It corresponds to default IPv6 settings and is not overridable,
119  * because it is really reasonable choice.
120  */
121
122 unsigned long neigh_rand_reach_time(unsigned long base)
123 {
124         return (base ? (net_random() % base) + (base >> 1) : 0);
125 }
126
127
128 static int neigh_forced_gc(struct neigh_table *tbl)
129 {
130         int shrunk = 0;
131         int i;
132
133         NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
134
135         write_lock_bh(&tbl->lock);
136         for (i = 0; i <= tbl->hash_mask; i++) {
137                 struct neighbour *n, **np;
138
139                 np = &tbl->hash_buckets[i];
140                 while ((n = *np) != NULL) {
141                         /* Neighbour record may be discarded if:
142                          * - nobody refers to it.
143                          * - it is not permanent
144                          */
145                         write_lock(&n->lock);
146                         if (atomic_read(&n->refcnt) == 1 &&
147                             !(n->nud_state & NUD_PERMANENT)) {
148                                 *np     = n->next;
149                                 n->dead = 1;
150                                 shrunk  = 1;
151                                 write_unlock(&n->lock);
152                                 neigh_cleanup_and_release(n);
153                                 continue;
154                         }
155                         write_unlock(&n->lock);
156                         np = &n->next;
157                 }
158         }
159
160         tbl->last_flush = jiffies;
161
162         write_unlock_bh(&tbl->lock);
163
164         return shrunk;
165 }
166
167 static void neigh_add_timer(struct neighbour *n, unsigned long when)
168 {
169         neigh_hold(n);
170         if (unlikely(mod_timer(&n->timer, when))) {
171                 printk("NEIGH: BUG, double timer add, state is %x\n",
172                        n->nud_state);
173                 dump_stack();
174         }
175 }
176
177 static int neigh_del_timer(struct neighbour *n)
178 {
179         if ((n->nud_state & NUD_IN_TIMER) &&
180             del_timer(&n->timer)) {
181                 neigh_release(n);
182                 return 1;
183         }
184         return 0;
185 }
186
187 static void pneigh_queue_purge(struct sk_buff_head *list)
188 {
189         struct sk_buff *skb;
190
191         while ((skb = skb_dequeue(list)) != NULL) {
192                 dev_put(skb->dev);
193                 kfree_skb(skb);
194         }
195 }
196
197 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
198 {
199         int i;
200
201         for (i = 0; i <= tbl->hash_mask; i++) {
202                 struct neighbour *n, **np = &tbl->hash_buckets[i];
203
204                 while ((n = *np) != NULL) {
205                         if (dev && n->dev != dev) {
206                                 np = &n->next;
207                                 continue;
208                         }
209                         *np = n->next;
210                         write_lock(&n->lock);
211                         neigh_del_timer(n);
212                         n->dead = 1;
213
214                         if (atomic_read(&n->refcnt) != 1) {
215                                 /* The most unpleasant situation.
216                                    We must destroy neighbour entry,
217                                    but someone still uses it.
218
219                                    The destroy will be delayed until
220                                    the last user releases us, but
221                                    we must kill timers etc. and move
222                                    it to safe state.
223                                  */
224                                 skb_queue_purge(&n->arp_queue);
225                                 n->output = neigh_blackhole;
226                                 if (n->nud_state & NUD_VALID)
227                                         n->nud_state = NUD_NOARP;
228                                 else
229                                         n->nud_state = NUD_NONE;
230                                 NEIGH_PRINTK2("neigh %p is stray.\n", n);
231                         }
232                         write_unlock(&n->lock);
233                         neigh_cleanup_and_release(n);
234                 }
235         }
236 }
237
238 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
239 {
240         write_lock_bh(&tbl->lock);
241         neigh_flush_dev(tbl, dev);
242         write_unlock_bh(&tbl->lock);
243 }
244
245 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
246 {
247         write_lock_bh(&tbl->lock);
248         neigh_flush_dev(tbl, dev);
249         pneigh_ifdown(tbl, dev);
250         write_unlock_bh(&tbl->lock);
251
252         del_timer_sync(&tbl->proxy_timer);
253         pneigh_queue_purge(&tbl->proxy_queue);
254         return 0;
255 }
256
257 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
258 {
259         struct neighbour *n = NULL;
260         unsigned long now = jiffies;
261         int entries;
262
263         entries = atomic_inc_return(&tbl->entries) - 1;
264         if (entries >= tbl->gc_thresh3 ||
265             (entries >= tbl->gc_thresh2 &&
266              time_after(now, tbl->last_flush + 5 * HZ))) {
267                 if (!neigh_forced_gc(tbl) &&
268                     entries >= tbl->gc_thresh3)
269                         goto out_entries;
270         }
271
272         n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
273         if (!n)
274                 goto out_entries;
275
276         skb_queue_head_init(&n->arp_queue);
277         rwlock_init(&n->lock);
278         n->updated        = n->used = now;
279         n->nud_state      = NUD_NONE;
280         n->output         = neigh_blackhole;
281         n->parms          = neigh_parms_clone(&tbl->parms);
282         setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
283
284         NEIGH_CACHE_STAT_INC(tbl, allocs);
285         n->tbl            = tbl;
286         atomic_set(&n->refcnt, 1);
287         n->dead           = 1;
288 out:
289         return n;
290
291 out_entries:
292         atomic_dec(&tbl->entries);
293         goto out;
294 }
295
296 static struct neighbour **neigh_hash_alloc(unsigned int entries)
297 {
298         unsigned long size = entries * sizeof(struct neighbour *);
299         struct neighbour **ret;
300
301         if (size <= PAGE_SIZE) {
302                 ret = kzalloc(size, GFP_ATOMIC);
303         } else {
304                 ret = (struct neighbour **)
305                       __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
306         }
307         return ret;
308 }
309
310 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
311 {
312         unsigned long size = entries * sizeof(struct neighbour *);
313
314         if (size <= PAGE_SIZE)
315                 kfree(hash);
316         else
317                 free_pages((unsigned long)hash, get_order(size));
318 }
319
320 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
321 {
322         struct neighbour **new_hash, **old_hash;
323         unsigned int i, new_hash_mask, old_entries;
324
325         NEIGH_CACHE_STAT_INC(tbl, hash_grows);
326
327         BUG_ON(!is_power_of_2(new_entries));
328         new_hash = neigh_hash_alloc(new_entries);
329         if (!new_hash)
330                 return;
331
332         old_entries = tbl->hash_mask + 1;
333         new_hash_mask = new_entries - 1;
334         old_hash = tbl->hash_buckets;
335
336         get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
337         for (i = 0; i < old_entries; i++) {
338                 struct neighbour *n, *next;
339
340                 for (n = old_hash[i]; n; n = next) {
341                         unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
342
343                         hash_val &= new_hash_mask;
344                         next = n->next;
345
346                         n->next = new_hash[hash_val];
347                         new_hash[hash_val] = n;
348                 }
349         }
350         tbl->hash_buckets = new_hash;
351         tbl->hash_mask = new_hash_mask;
352
353         neigh_hash_free(old_hash, old_entries);
354 }
355
356 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
357                                struct net_device *dev)
358 {
359         struct neighbour *n;
360         int key_len = tbl->key_len;
361         u32 hash_val;
362
363         NEIGH_CACHE_STAT_INC(tbl, lookups);
364
365         read_lock_bh(&tbl->lock);
366         hash_val = tbl->hash(pkey, dev);
367         for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
368                 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
369                         neigh_hold(n);
370                         NEIGH_CACHE_STAT_INC(tbl, hits);
371                         break;
372                 }
373         }
374         read_unlock_bh(&tbl->lock);
375         return n;
376 }
377
378 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
379                                      const void *pkey)
380 {
381         struct neighbour *n;
382         int key_len = tbl->key_len;
383         u32 hash_val;
384
385         NEIGH_CACHE_STAT_INC(tbl, lookups);
386
387         read_lock_bh(&tbl->lock);
388         hash_val = tbl->hash(pkey, NULL);
389         for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
390                 if (!memcmp(n->primary_key, pkey, key_len) &&
391                     net_eq(dev_net(n->dev), net)) {
392                         neigh_hold(n);
393                         NEIGH_CACHE_STAT_INC(tbl, hits);
394                         break;
395                 }
396         }
397         read_unlock_bh(&tbl->lock);
398         return n;
399 }
400
401 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
402                                struct net_device *dev)
403 {
404         u32 hash_val;
405         int key_len = tbl->key_len;
406         int error;
407         struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
408
409         if (!n) {
410                 rc = ERR_PTR(-ENOBUFS);
411                 goto out;
412         }
413
414         memcpy(n->primary_key, pkey, key_len);
415         n->dev = dev;
416         dev_hold(dev);
417
418         /* Protocol specific setup. */
419         if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
420                 rc = ERR_PTR(error);
421                 goto out_neigh_release;
422         }
423
424         /* Device specific setup. */
425         if (n->parms->neigh_setup &&
426             (error = n->parms->neigh_setup(n)) < 0) {
427                 rc = ERR_PTR(error);
428                 goto out_neigh_release;
429         }
430
431         n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
432
433         write_lock_bh(&tbl->lock);
434
435         if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
436                 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
437
438         hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
439
440         if (n->parms->dead) {
441                 rc = ERR_PTR(-EINVAL);
442                 goto out_tbl_unlock;
443         }
444
445         for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
446                 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
447                         neigh_hold(n1);
448                         rc = n1;
449                         goto out_tbl_unlock;
450                 }
451         }
452
453         n->next = tbl->hash_buckets[hash_val];
454         tbl->hash_buckets[hash_val] = n;
455         n->dead = 0;
456         neigh_hold(n);
457         write_unlock_bh(&tbl->lock);
458         NEIGH_PRINTK2("neigh %p is created.\n", n);
459         rc = n;
460 out:
461         return rc;
462 out_tbl_unlock:
463         write_unlock_bh(&tbl->lock);
464 out_neigh_release:
465         neigh_release(n);
466         goto out;
467 }
468
469 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
470                 struct net *net, const void *pkey, struct net_device *dev)
471 {
472         struct pneigh_entry *n;
473         int key_len = tbl->key_len;
474         u32 hash_val = *(u32 *)(pkey + key_len - 4);
475
476         hash_val ^= (hash_val >> 16);
477         hash_val ^= hash_val >> 8;
478         hash_val ^= hash_val >> 4;
479         hash_val &= PNEIGH_HASHMASK;
480
481         for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
482                 if (!memcmp(n->key, pkey, key_len) &&
483                     (pneigh_net(n) == net) &&
484                     (n->dev == dev || !n->dev))
485                         break;
486         }
487
488         return n;
489 }
490
491 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
492                                     struct net *net, const void *pkey,
493                                     struct net_device *dev, int creat)
494 {
495         struct pneigh_entry *n;
496         int key_len = tbl->key_len;
497         u32 hash_val = *(u32 *)(pkey + key_len - 4);
498
499         hash_val ^= (hash_val >> 16);
500         hash_val ^= hash_val >> 8;
501         hash_val ^= hash_val >> 4;
502         hash_val &= PNEIGH_HASHMASK;
503
504         read_lock_bh(&tbl->lock);
505
506         for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
507                 if (!memcmp(n->key, pkey, key_len) &&
508                     net_eq(pneigh_net(n), net) &&
509                     (n->dev == dev || !n->dev)) {
510                         read_unlock_bh(&tbl->lock);
511                         goto out;
512                 }
513         }
514         read_unlock_bh(&tbl->lock);
515         n = NULL;
516         if (!creat)
517                 goto out;
518
519         ASSERT_RTNL();
520
521         n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
522         if (!n)
523                 goto out;
524
525 #ifdef CONFIG_NET_NS
526         n->net = hold_net(net);
527 #endif
528         memcpy(n->key, pkey, key_len);
529         n->dev = dev;
530         if (dev)
531                 dev_hold(dev);
532
533         if (tbl->pconstructor && tbl->pconstructor(n)) {
534                 if (dev)
535                         dev_put(dev);
536                 release_net(net);
537                 kfree(n);
538                 n = NULL;
539                 goto out;
540         }
541
542         write_lock_bh(&tbl->lock);
543         n->next = tbl->phash_buckets[hash_val];
544         tbl->phash_buckets[hash_val] = n;
545         write_unlock_bh(&tbl->lock);
546 out:
547         return n;
548 }
549
550
551 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
552                   struct net_device *dev)
553 {
554         struct pneigh_entry *n, **np;
555         int key_len = tbl->key_len;
556         u32 hash_val = *(u32 *)(pkey + key_len - 4);
557
558         hash_val ^= (hash_val >> 16);
559         hash_val ^= hash_val >> 8;
560         hash_val ^= hash_val >> 4;
561         hash_val &= PNEIGH_HASHMASK;
562
563         write_lock_bh(&tbl->lock);
564         for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
565              np = &n->next) {
566                 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
567                     net_eq(pneigh_net(n), net)) {
568                         *np = n->next;
569                         write_unlock_bh(&tbl->lock);
570                         if (tbl->pdestructor)
571                                 tbl->pdestructor(n);
572                         if (n->dev)
573                                 dev_put(n->dev);
574                         release_net(pneigh_net(n));
575                         kfree(n);
576                         return 0;
577                 }
578         }
579         write_unlock_bh(&tbl->lock);
580         return -ENOENT;
581 }
582
583 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
584 {
585         struct pneigh_entry *n, **np;
586         u32 h;
587
588         for (h = 0; h <= PNEIGH_HASHMASK; h++) {
589                 np = &tbl->phash_buckets[h];
590                 while ((n = *np) != NULL) {
591                         if (!dev || n->dev == dev) {
592                                 *np = n->next;
593                                 if (tbl->pdestructor)
594                                         tbl->pdestructor(n);
595                                 if (n->dev)
596                                         dev_put(n->dev);
597                                 release_net(pneigh_net(n));
598                                 kfree(n);
599                                 continue;
600                         }
601                         np = &n->next;
602                 }
603         }
604         return -ENOENT;
605 }
606
607 static void neigh_parms_destroy(struct neigh_parms *parms);
608
609 static inline void neigh_parms_put(struct neigh_parms *parms)
610 {
611         if (atomic_dec_and_test(&parms->refcnt))
612                 neigh_parms_destroy(parms);
613 }
614
615 /*
616  *      neighbour must already be out of the table;
617  *
618  */
619 void neigh_destroy(struct neighbour *neigh)
620 {
621         struct hh_cache *hh;
622
623         NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
624
625         if (!neigh->dead) {
626                 printk(KERN_WARNING
627                        "Destroying alive neighbour %p\n", neigh);
628                 dump_stack();
629                 return;
630         }
631
632         if (neigh_del_timer(neigh))
633                 printk(KERN_WARNING "Impossible event.\n");
634
635         while ((hh = neigh->hh) != NULL) {
636                 neigh->hh = hh->hh_next;
637                 hh->hh_next = NULL;
638
639                 write_seqlock_bh(&hh->hh_lock);
640                 hh->hh_output = neigh_blackhole;
641                 write_sequnlock_bh(&hh->hh_lock);
642                 if (atomic_dec_and_test(&hh->hh_refcnt))
643                         kfree(hh);
644         }
645
646         skb_queue_purge(&neigh->arp_queue);
647
648         dev_put(neigh->dev);
649         neigh_parms_put(neigh->parms);
650
651         NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
652
653         atomic_dec(&neigh->tbl->entries);
654         kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
655 }
656
657 /* Neighbour state is suspicious;
658    disable fast path.
659
660    Called with write_locked neigh.
661  */
662 static void neigh_suspect(struct neighbour *neigh)
663 {
664         struct hh_cache *hh;
665
666         NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
667
668         neigh->output = neigh->ops->output;
669
670         for (hh = neigh->hh; hh; hh = hh->hh_next)
671                 hh->hh_output = neigh->ops->output;
672 }
673
674 /* Neighbour state is OK;
675    enable fast path.
676
677    Called with write_locked neigh.
678  */
679 static void neigh_connect(struct neighbour *neigh)
680 {
681         struct hh_cache *hh;
682
683         NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
684
685         neigh->output = neigh->ops->connected_output;
686
687         for (hh = neigh->hh; hh; hh = hh->hh_next)
688                 hh->hh_output = neigh->ops->hh_output;
689 }
690
691 static void neigh_periodic_timer(unsigned long arg)
692 {
693         struct neigh_table *tbl = (struct neigh_table *)arg;
694         struct neighbour *n, **np;
695         unsigned long expire, now = jiffies;
696
697         NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
698
699         write_lock(&tbl->lock);
700
701         /*
702          *      periodically recompute ReachableTime from random function
703          */
704
705         if (time_after(now, tbl->last_rand + 300 * HZ)) {
706                 struct neigh_parms *p;
707                 tbl->last_rand = now;
708                 for (p = &tbl->parms; p; p = p->next)
709                         p->reachable_time =
710                                 neigh_rand_reach_time(p->base_reachable_time);
711         }
712
713         np = &tbl->hash_buckets[tbl->hash_chain_gc];
714         tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
715
716         while ((n = *np) != NULL) {
717                 unsigned int state;
718
719                 write_lock(&n->lock);
720
721                 state = n->nud_state;
722                 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
723                         write_unlock(&n->lock);
724                         goto next_elt;
725                 }
726
727                 if (time_before(n->used, n->confirmed))
728                         n->used = n->confirmed;
729
730                 if (atomic_read(&n->refcnt) == 1 &&
731                     (state == NUD_FAILED ||
732                      time_after(now, n->used + n->parms->gc_staletime))) {
733                         *np = n->next;
734                         n->dead = 1;
735                         write_unlock(&n->lock);
736                         neigh_cleanup_and_release(n);
737                         continue;
738                 }
739                 write_unlock(&n->lock);
740
741 next_elt:
742                 np = &n->next;
743         }
744
745         /* Cycle through all hash buckets every base_reachable_time/2 ticks.
746          * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
747          * base_reachable_time.
748          */
749         expire = tbl->parms.base_reachable_time >> 1;
750         expire /= (tbl->hash_mask + 1);
751         if (!expire)
752                 expire = 1;
753
754         if (expire>HZ)
755                 mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
756         else
757                 mod_timer(&tbl->gc_timer, now + expire);
758
759         write_unlock(&tbl->lock);
760 }
761
762 static __inline__ int neigh_max_probes(struct neighbour *n)
763 {
764         struct neigh_parms *p = n->parms;
765         return (n->nud_state & NUD_PROBE ?
766                 p->ucast_probes :
767                 p->ucast_probes + p->app_probes + p->mcast_probes);
768 }
769
770 /* Called when a timer expires for a neighbour entry. */
771
772 static void neigh_timer_handler(unsigned long arg)
773 {
774         unsigned long now, next;
775         struct neighbour *neigh = (struct neighbour *)arg;
776         unsigned state;
777         int notify = 0;
778
779         write_lock(&neigh->lock);
780
781         state = neigh->nud_state;
782         now = jiffies;
783         next = now + HZ;
784
785         if (!(state & NUD_IN_TIMER)) {
786 #ifndef CONFIG_SMP
787                 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
788 #endif
789                 goto out;
790         }
791
792         if (state & NUD_REACHABLE) {
793                 if (time_before_eq(now,
794                                    neigh->confirmed + neigh->parms->reachable_time)) {
795                         NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
796                         next = neigh->confirmed + neigh->parms->reachable_time;
797                 } else if (time_before_eq(now,
798                                           neigh->used + neigh->parms->delay_probe_time)) {
799                         NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
800                         neigh->nud_state = NUD_DELAY;
801                         neigh->updated = jiffies;
802                         neigh_suspect(neigh);
803                         next = now + neigh->parms->delay_probe_time;
804                 } else {
805                         NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
806                         neigh->nud_state = NUD_STALE;
807                         neigh->updated = jiffies;
808                         neigh_suspect(neigh);
809                         notify = 1;
810                 }
811         } else if (state & NUD_DELAY) {
812                 if (time_before_eq(now,
813                                    neigh->confirmed + neigh->parms->delay_probe_time)) {
814                         NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
815                         neigh->nud_state = NUD_REACHABLE;
816                         neigh->updated = jiffies;
817                         neigh_connect(neigh);
818                         notify = 1;
819                         next = neigh->confirmed + neigh->parms->reachable_time;
820                 } else {
821                         NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
822                         neigh->nud_state = NUD_PROBE;
823                         neigh->updated = jiffies;
824                         atomic_set(&neigh->probes, 0);
825                         next = now + neigh->parms->retrans_time;
826                 }
827         } else {
828                 /* NUD_PROBE|NUD_INCOMPLETE */
829                 next = now + neigh->parms->retrans_time;
830         }
831
832         if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
833             atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
834                 struct sk_buff *skb;
835
836                 neigh->nud_state = NUD_FAILED;
837                 neigh->updated = jiffies;
838                 notify = 1;
839                 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
840                 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
841
842                 /* It is very thin place. report_unreachable is very complicated
843                    routine. Particularly, it can hit the same neighbour entry!
844
845                    So that, we try to be accurate and avoid dead loop. --ANK
846                  */
847                 while (neigh->nud_state == NUD_FAILED &&
848                        (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
849                         write_unlock(&neigh->lock);
850                         neigh->ops->error_report(neigh, skb);
851                         write_lock(&neigh->lock);
852                 }
853                 skb_queue_purge(&neigh->arp_queue);
854         }
855
856         if (neigh->nud_state & NUD_IN_TIMER) {
857                 if (time_before(next, jiffies + HZ/2))
858                         next = jiffies + HZ/2;
859                 if (!mod_timer(&neigh->timer, next))
860                         neigh_hold(neigh);
861         }
862         if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
863                 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
864                 /* keep skb alive even if arp_queue overflows */
865                 if (skb)
866                         skb = skb_copy(skb, GFP_ATOMIC);
867                 write_unlock(&neigh->lock);
868                 neigh->ops->solicit(neigh, skb);
869                 atomic_inc(&neigh->probes);
870                 if (skb)
871                         kfree_skb(skb);
872         } else {
873 out:
874                 write_unlock(&neigh->lock);
875         }
876
877         if (notify)
878                 neigh_update_notify(neigh);
879
880         neigh_release(neigh);
881 }
882
883 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
884 {
885         int rc;
886         unsigned long now;
887
888         write_lock_bh(&neigh->lock);
889
890         rc = 0;
891         if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
892                 goto out_unlock_bh;
893
894         now = jiffies;
895
896         if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
897                 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
898                         atomic_set(&neigh->probes, neigh->parms->ucast_probes);
899                         neigh->nud_state     = NUD_INCOMPLETE;
900                         neigh->updated = jiffies;
901                         neigh_add_timer(neigh, now + 1);
902                 } else {
903                         neigh->nud_state = NUD_FAILED;
904                         neigh->updated = jiffies;
905                         write_unlock_bh(&neigh->lock);
906
907                         if (skb)
908                                 kfree_skb(skb);
909                         return 1;
910                 }
911         } else if (neigh->nud_state & NUD_STALE) {
912                 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
913                 neigh->nud_state = NUD_DELAY;
914                 neigh->updated = jiffies;
915                 neigh_add_timer(neigh,
916                                 jiffies + neigh->parms->delay_probe_time);
917         }
918
919         if (neigh->nud_state == NUD_INCOMPLETE) {
920                 if (skb) {
921                         if (skb_queue_len(&neigh->arp_queue) >=
922                             neigh->parms->queue_len) {
923                                 struct sk_buff *buff;
924                                 buff = neigh->arp_queue.next;
925                                 __skb_unlink(buff, &neigh->arp_queue);
926                                 kfree_skb(buff);
927                         }
928                         __skb_queue_tail(&neigh->arp_queue, skb);
929                 }
930                 rc = 1;
931         }
932 out_unlock_bh:
933         write_unlock_bh(&neigh->lock);
934         return rc;
935 }
936
937 static void neigh_update_hhs(struct neighbour *neigh)
938 {
939         struct hh_cache *hh;
940         void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
941                 = neigh->dev->header_ops->cache_update;
942
943         if (update) {
944                 for (hh = neigh->hh; hh; hh = hh->hh_next) {
945                         write_seqlock_bh(&hh->hh_lock);
946                         update(hh, neigh->dev, neigh->ha);
947                         write_sequnlock_bh(&hh->hh_lock);
948                 }
949         }
950 }
951
952
953
954 /* Generic update routine.
955    -- lladdr is new lladdr or NULL, if it is not supplied.
956    -- new    is new state.
957    -- flags
958         NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
959                                 if it is different.
960         NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
961                                 lladdr instead of overriding it
962                                 if it is different.
963                                 It also allows to retain current state
964                                 if lladdr is unchanged.
965         NEIGH_UPDATE_F_ADMIN    means that the change is administrative.
966
967         NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
968                                 NTF_ROUTER flag.
969         NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
970                                 a router.
971
972    Caller MUST hold reference count on the entry.
973  */
974
975 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
976                  u32 flags)
977 {
978         u8 old;
979         int err;
980         int notify = 0;
981         struct net_device *dev;
982         int update_isrouter = 0;
983
984         write_lock_bh(&neigh->lock);
985
986         dev    = neigh->dev;
987         old    = neigh->nud_state;
988         err    = -EPERM;
989
990         if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
991             (old & (NUD_NOARP | NUD_PERMANENT)))
992                 goto out;
993
994         if (!(new & NUD_VALID)) {
995                 neigh_del_timer(neigh);
996                 if (old & NUD_CONNECTED)
997                         neigh_suspect(neigh);
998                 neigh->nud_state = new;
999                 err = 0;
1000                 notify = old & NUD_VALID;
1001                 goto out;
1002         }
1003
1004         /* Compare new lladdr with cached one */
1005         if (!dev->addr_len) {
1006                 /* First case: device needs no address. */
1007                 lladdr = neigh->ha;
1008         } else if (lladdr) {
1009                 /* The second case: if something is already cached
1010                    and a new address is proposed:
1011                    - compare new & old
1012                    - if they are different, check override flag
1013                  */
1014                 if ((old & NUD_VALID) &&
1015                     !memcmp(lladdr, neigh->ha, dev->addr_len))
1016                         lladdr = neigh->ha;
1017         } else {
1018                 /* No address is supplied; if we know something,
1019                    use it, otherwise discard the request.
1020                  */
1021                 err = -EINVAL;
1022                 if (!(old & NUD_VALID))
1023                         goto out;
1024                 lladdr = neigh->ha;
1025         }
1026
1027         if (new & NUD_CONNECTED)
1028                 neigh->confirmed = jiffies;
1029         neigh->updated = jiffies;
1030
1031         /* If entry was valid and address is not changed,
1032            do not change entry state, if new one is STALE.
1033          */
1034         err = 0;
1035         update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1036         if (old & NUD_VALID) {
1037                 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1038                         update_isrouter = 0;
1039                         if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1040                             (old & NUD_CONNECTED)) {
1041                                 lladdr = neigh->ha;
1042                                 new = NUD_STALE;
1043                         } else
1044                                 goto out;
1045                 } else {
1046                         if (lladdr == neigh->ha && new == NUD_STALE &&
1047                             ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1048                              (old & NUD_CONNECTED))
1049                             )
1050                                 new = old;
1051                 }
1052         }
1053
1054         if (new != old) {
1055                 neigh_del_timer(neigh);
1056                 if (new & NUD_IN_TIMER)
1057                         neigh_add_timer(neigh, (jiffies +
1058                                                 ((new & NUD_REACHABLE) ?
1059                                                  neigh->parms->reachable_time :
1060                                                  0)));
1061                 neigh->nud_state = new;
1062         }
1063
1064         if (lladdr != neigh->ha) {
1065                 memcpy(&neigh->ha, lladdr, dev->addr_len);
1066                 neigh_update_hhs(neigh);
1067                 if (!(new & NUD_CONNECTED))
1068                         neigh->confirmed = jiffies -
1069                                       (neigh->parms->base_reachable_time << 1);
1070                 notify = 1;
1071         }
1072         if (new == old)
1073                 goto out;
1074         if (new & NUD_CONNECTED)
1075                 neigh_connect(neigh);
1076         else
1077                 neigh_suspect(neigh);
1078         if (!(old & NUD_VALID)) {
1079                 struct sk_buff *skb;
1080
1081                 /* Again: avoid dead loop if something went wrong */
1082
1083                 while (neigh->nud_state & NUD_VALID &&
1084                        (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1085                         struct neighbour *n1 = neigh;
1086                         write_unlock_bh(&neigh->lock);
1087                         /* On shaper/eql skb->dst->neighbour != neigh :( */
1088                         if (skb->dst && skb->dst->neighbour)
1089                                 n1 = skb->dst->neighbour;
1090                         n1->output(skb);
1091                         write_lock_bh(&neigh->lock);
1092                 }
1093                 skb_queue_purge(&neigh->arp_queue);
1094         }
1095 out:
1096         if (update_isrouter) {
1097                 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1098                         (neigh->flags | NTF_ROUTER) :
1099                         (neigh->flags & ~NTF_ROUTER);
1100         }
1101         write_unlock_bh(&neigh->lock);
1102
1103         if (notify)
1104                 neigh_update_notify(neigh);
1105
1106         return err;
1107 }
1108
1109 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1110                                  u8 *lladdr, void *saddr,
1111                                  struct net_device *dev)
1112 {
1113         struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1114                                                  lladdr || !dev->addr_len);
1115         if (neigh)
1116                 neigh_update(neigh, lladdr, NUD_STALE,
1117                              NEIGH_UPDATE_F_OVERRIDE);
1118         return neigh;
1119 }
1120
1121 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1122                           __be16 protocol)
1123 {
1124         struct hh_cache *hh;
1125         struct net_device *dev = dst->dev;
1126
1127         for (hh = n->hh; hh; hh = hh->hh_next)
1128                 if (hh->hh_type == protocol)
1129                         break;
1130
1131         if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1132                 seqlock_init(&hh->hh_lock);
1133                 hh->hh_type = protocol;
1134                 atomic_set(&hh->hh_refcnt, 0);
1135                 hh->hh_next = NULL;
1136
1137                 if (dev->header_ops->cache(n, hh)) {
1138                         kfree(hh);
1139                         hh = NULL;
1140                 } else {
1141                         atomic_inc(&hh->hh_refcnt);
1142                         hh->hh_next = n->hh;
1143                         n->hh       = hh;
1144                         if (n->nud_state & NUD_CONNECTED)
1145                                 hh->hh_output = n->ops->hh_output;
1146                         else
1147                                 hh->hh_output = n->ops->output;
1148                 }
1149         }
1150         if (hh) {
1151                 atomic_inc(&hh->hh_refcnt);
1152                 dst->hh = hh;
1153         }
1154 }
1155
1156 /* This function can be used in contexts, where only old dev_queue_xmit
1157    worked, f.e. if you want to override normal output path (eql, shaper),
1158    but resolution is not made yet.
1159  */
1160
1161 int neigh_compat_output(struct sk_buff *skb)
1162 {
1163         struct net_device *dev = skb->dev;
1164
1165         __skb_pull(skb, skb_network_offset(skb));
1166
1167         if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1168                             skb->len) < 0 &&
1169             dev->header_ops->rebuild(skb))
1170                 return 0;
1171
1172         return dev_queue_xmit(skb);
1173 }
1174
1175 /* Slow and careful. */
1176
1177 int neigh_resolve_output(struct sk_buff *skb)
1178 {
1179         struct dst_entry *dst = skb->dst;
1180         struct neighbour *neigh;
1181         int rc = 0;
1182
1183         if (!dst || !(neigh = dst->neighbour))
1184                 goto discard;
1185
1186         __skb_pull(skb, skb_network_offset(skb));
1187
1188         if (!neigh_event_send(neigh, skb)) {
1189                 int err;
1190                 struct net_device *dev = neigh->dev;
1191                 if (dev->header_ops->cache && !dst->hh) {
1192                         write_lock_bh(&neigh->lock);
1193                         if (!dst->hh)
1194                                 neigh_hh_init(neigh, dst, dst->ops->protocol);
1195                         err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1196                                               neigh->ha, NULL, skb->len);
1197                         write_unlock_bh(&neigh->lock);
1198                 } else {
1199                         read_lock_bh(&neigh->lock);
1200                         err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1201                                               neigh->ha, NULL, skb->len);
1202                         read_unlock_bh(&neigh->lock);
1203                 }
1204                 if (err >= 0)
1205                         rc = neigh->ops->queue_xmit(skb);
1206                 else
1207                         goto out_kfree_skb;
1208         }
1209 out:
1210         return rc;
1211 discard:
1212         NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1213                       dst, dst ? dst->neighbour : NULL);
1214 out_kfree_skb:
1215         rc = -EINVAL;
1216         kfree_skb(skb);
1217         goto out;
1218 }
1219
1220 /* As fast as possible without hh cache */
1221
1222 int neigh_connected_output(struct sk_buff *skb)
1223 {
1224         int err;
1225         struct dst_entry *dst = skb->dst;
1226         struct neighbour *neigh = dst->neighbour;
1227         struct net_device *dev = neigh->dev;
1228
1229         __skb_pull(skb, skb_network_offset(skb));
1230
1231         read_lock_bh(&neigh->lock);
1232         err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1233                               neigh->ha, NULL, skb->len);
1234         read_unlock_bh(&neigh->lock);
1235         if (err >= 0)
1236                 err = neigh->ops->queue_xmit(skb);
1237         else {
1238                 err = -EINVAL;
1239                 kfree_skb(skb);
1240         }
1241         return err;
1242 }
1243
1244 static void neigh_proxy_process(unsigned long arg)
1245 {
1246         struct neigh_table *tbl = (struct neigh_table *)arg;
1247         long sched_next = 0;
1248         unsigned long now = jiffies;
1249         struct sk_buff *skb;
1250
1251         spin_lock(&tbl->proxy_queue.lock);
1252
1253         skb = tbl->proxy_queue.next;
1254
1255         while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1256                 struct sk_buff *back = skb;
1257                 long tdif = NEIGH_CB(back)->sched_next - now;
1258
1259                 skb = skb->next;
1260                 if (tdif <= 0) {
1261                         struct net_device *dev = back->dev;
1262                         __skb_unlink(back, &tbl->proxy_queue);
1263                         if (tbl->proxy_redo && netif_running(dev))
1264                                 tbl->proxy_redo(back);
1265                         else
1266                                 kfree_skb(back);
1267
1268                         dev_put(dev);
1269                 } else if (!sched_next || tdif < sched_next)
1270                         sched_next = tdif;
1271         }
1272         del_timer(&tbl->proxy_timer);
1273         if (sched_next)
1274                 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1275         spin_unlock(&tbl->proxy_queue.lock);
1276 }
1277
1278 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1279                     struct sk_buff *skb)
1280 {
1281         unsigned long now = jiffies;
1282         unsigned long sched_next = now + (net_random() % p->proxy_delay);
1283
1284         if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1285                 kfree_skb(skb);
1286                 return;
1287         }
1288
1289         NEIGH_CB(skb)->sched_next = sched_next;
1290         NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1291
1292         spin_lock(&tbl->proxy_queue.lock);
1293         if (del_timer(&tbl->proxy_timer)) {
1294                 if (time_before(tbl->proxy_timer.expires, sched_next))
1295                         sched_next = tbl->proxy_timer.expires;
1296         }
1297         dst_release(skb->dst);
1298         skb->dst = NULL;
1299         dev_hold(skb->dev);
1300         __skb_queue_tail(&tbl->proxy_queue, skb);
1301         mod_timer(&tbl->proxy_timer, sched_next);
1302         spin_unlock(&tbl->proxy_queue.lock);
1303 }
1304
1305 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1306                                                       struct net *net, int ifindex)
1307 {
1308         struct neigh_parms *p;
1309
1310         for (p = &tbl->parms; p; p = p->next) {
1311                 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1312                     (!p->dev && !ifindex))
1313                         return p;
1314         }
1315
1316         return NULL;
1317 }
1318
1319 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1320                                       struct neigh_table *tbl)
1321 {
1322         struct neigh_parms *p, *ref;
1323         struct net *net;
1324
1325         net = dev_net(dev);
1326         ref = lookup_neigh_params(tbl, net, 0);
1327         if (!ref)
1328                 return NULL;
1329
1330         p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
1331         if (p) {
1332                 p->tbl            = tbl;
1333                 atomic_set(&p->refcnt, 1);
1334                 INIT_RCU_HEAD(&p->rcu_head);
1335                 p->reachable_time =
1336                                 neigh_rand_reach_time(p->base_reachable_time);
1337
1338                 if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1339                         kfree(p);
1340                         return NULL;
1341                 }
1342
1343                 dev_hold(dev);
1344                 p->dev = dev;
1345 #ifdef CONFIG_NET_NS
1346                 p->net = hold_net(net);
1347 #endif
1348                 p->sysctl_table = NULL;
1349                 write_lock_bh(&tbl->lock);
1350                 p->next         = tbl->parms.next;
1351                 tbl->parms.next = p;
1352                 write_unlock_bh(&tbl->lock);
1353         }
1354         return p;
1355 }
1356
1357 static void neigh_rcu_free_parms(struct rcu_head *head)
1358 {
1359         struct neigh_parms *parms =
1360                 container_of(head, struct neigh_parms, rcu_head);
1361
1362         neigh_parms_put(parms);
1363 }
1364
1365 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1366 {
1367         struct neigh_parms **p;
1368
1369         if (!parms || parms == &tbl->parms)
1370                 return;
1371         write_lock_bh(&tbl->lock);
1372         for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1373                 if (*p == parms) {
1374                         *p = parms->next;
1375                         parms->dead = 1;
1376                         write_unlock_bh(&tbl->lock);
1377                         if (parms->dev)
1378                                 dev_put(parms->dev);
1379                         call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1380                         return;
1381                 }
1382         }
1383         write_unlock_bh(&tbl->lock);
1384         NEIGH_PRINTK1("neigh_parms_release: not found\n");
1385 }
1386
1387 static void neigh_parms_destroy(struct neigh_parms *parms)
1388 {
1389         release_net(neigh_parms_net(parms));
1390         kfree(parms);
1391 }
1392
1393 static struct lock_class_key neigh_table_proxy_queue_class;
1394
1395 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1396 {
1397         unsigned long now = jiffies;
1398         unsigned long phsize;
1399
1400 #ifdef CONFIG_NET_NS
1401         tbl->parms.net = &init_net;
1402 #endif
1403         atomic_set(&tbl->parms.refcnt, 1);
1404         INIT_RCU_HEAD(&tbl->parms.rcu_head);
1405         tbl->parms.reachable_time =
1406                           neigh_rand_reach_time(tbl->parms.base_reachable_time);
1407
1408         if (!tbl->kmem_cachep)
1409                 tbl->kmem_cachep =
1410                         kmem_cache_create(tbl->id, tbl->entry_size, 0,
1411                                           SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1412                                           NULL);
1413         tbl->stats = alloc_percpu(struct neigh_statistics);
1414         if (!tbl->stats)
1415                 panic("cannot create neighbour cache statistics");
1416
1417 #ifdef CONFIG_PROC_FS
1418         tbl->pde = proc_create(tbl->id, 0, init_net.proc_net_stat,
1419                                &neigh_stat_seq_fops);
1420         if (!tbl->pde)
1421                 panic("cannot create neighbour proc dir entry");
1422         tbl->pde->data = tbl;
1423 #endif
1424
1425         tbl->hash_mask = 1;
1426         tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1427
1428         phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1429         tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1430
1431         if (!tbl->hash_buckets || !tbl->phash_buckets)
1432                 panic("cannot allocate neighbour cache hashes");
1433
1434         get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1435
1436         rwlock_init(&tbl->lock);
1437         setup_timer(&tbl->gc_timer, neigh_periodic_timer, (unsigned long)tbl);
1438         tbl->gc_timer.expires  = now + 1;
1439         add_timer(&tbl->gc_timer);
1440
1441         setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1442         skb_queue_head_init_class(&tbl->proxy_queue,
1443                         &neigh_table_proxy_queue_class);
1444
1445         tbl->last_flush = now;
1446         tbl->last_rand  = now + tbl->parms.reachable_time * 20;
1447 }
1448
1449 void neigh_table_init(struct neigh_table *tbl)
1450 {
1451         struct neigh_table *tmp;
1452
1453         neigh_table_init_no_netlink(tbl);
1454         write_lock(&neigh_tbl_lock);
1455         for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1456                 if (tmp->family == tbl->family)
1457                         break;
1458         }
1459         tbl->next       = neigh_tables;
1460         neigh_tables    = tbl;
1461         write_unlock(&neigh_tbl_lock);
1462
1463         if (unlikely(tmp)) {
1464                 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1465                        "family %d\n", tbl->family);
1466                 dump_stack();
1467         }
1468 }
1469
1470 int neigh_table_clear(struct neigh_table *tbl)
1471 {
1472         struct neigh_table **tp;
1473
1474         /* It is not clean... Fix it to unload IPv6 module safely */
1475         del_timer_sync(&tbl->gc_timer);
1476         del_timer_sync(&tbl->proxy_timer);
1477         pneigh_queue_purge(&tbl->proxy_queue);
1478         neigh_ifdown(tbl, NULL);
1479         if (atomic_read(&tbl->entries))
1480                 printk(KERN_CRIT "neighbour leakage\n");
1481         write_lock(&neigh_tbl_lock);
1482         for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1483                 if (*tp == tbl) {
1484                         *tp = tbl->next;
1485                         break;
1486                 }
1487         }
1488         write_unlock(&neigh_tbl_lock);
1489
1490         neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1491         tbl->hash_buckets = NULL;
1492
1493         kfree(tbl->phash_buckets);
1494         tbl->phash_buckets = NULL;
1495
1496         remove_proc_entry(tbl->id, init_net.proc_net_stat);
1497
1498         free_percpu(tbl->stats);
1499         tbl->stats = NULL;
1500
1501         kmem_cache_destroy(tbl->kmem_cachep);
1502         tbl->kmem_cachep = NULL;
1503
1504         return 0;
1505 }
1506
1507 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1508 {
1509         struct net *net = sock_net(skb->sk);
1510         struct ndmsg *ndm;
1511         struct nlattr *dst_attr;
1512         struct neigh_table *tbl;
1513         struct net_device *dev = NULL;
1514         int err = -EINVAL;
1515
1516         if (nlmsg_len(nlh) < sizeof(*ndm))
1517                 goto out;
1518
1519         dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1520         if (dst_attr == NULL)
1521                 goto out;
1522
1523         ndm = nlmsg_data(nlh);
1524         if (ndm->ndm_ifindex) {
1525                 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1526                 if (dev == NULL) {
1527                         err = -ENODEV;
1528                         goto out;
1529                 }
1530         }
1531
1532         read_lock(&neigh_tbl_lock);
1533         for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1534                 struct neighbour *neigh;
1535
1536                 if (tbl->family != ndm->ndm_family)
1537                         continue;
1538                 read_unlock(&neigh_tbl_lock);
1539
1540                 if (nla_len(dst_attr) < tbl->key_len)
1541                         goto out_dev_put;
1542
1543                 if (ndm->ndm_flags & NTF_PROXY) {
1544                         err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1545                         goto out_dev_put;
1546                 }
1547
1548                 if (dev == NULL)
1549                         goto out_dev_put;
1550
1551                 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1552                 if (neigh == NULL) {
1553                         err = -ENOENT;
1554                         goto out_dev_put;
1555                 }
1556
1557                 err = neigh_update(neigh, NULL, NUD_FAILED,
1558                                    NEIGH_UPDATE_F_OVERRIDE |
1559                                    NEIGH_UPDATE_F_ADMIN);
1560                 neigh_release(neigh);
1561                 goto out_dev_put;
1562         }
1563         read_unlock(&neigh_tbl_lock);
1564         err = -EAFNOSUPPORT;
1565
1566 out_dev_put:
1567         if (dev)
1568                 dev_put(dev);
1569 out:
1570         return err;
1571 }
1572
1573 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1574 {
1575         struct net *net = sock_net(skb->sk);
1576         struct ndmsg *ndm;
1577         struct nlattr *tb[NDA_MAX+1];
1578         struct neigh_table *tbl;
1579         struct net_device *dev = NULL;
1580         int err;
1581
1582         err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1583         if (err < 0)
1584                 goto out;
1585
1586         err = -EINVAL;
1587         if (tb[NDA_DST] == NULL)
1588                 goto out;
1589
1590         ndm = nlmsg_data(nlh);
1591         if (ndm->ndm_ifindex) {
1592                 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1593                 if (dev == NULL) {
1594                         err = -ENODEV;
1595                         goto out;
1596                 }
1597
1598                 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1599                         goto out_dev_put;
1600         }
1601
1602         read_lock(&neigh_tbl_lock);
1603         for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1604                 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1605                 struct neighbour *neigh;
1606                 void *dst, *lladdr;
1607
1608                 if (tbl->family != ndm->ndm_family)
1609                         continue;
1610                 read_unlock(&neigh_tbl_lock);
1611
1612                 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1613                         goto out_dev_put;
1614                 dst = nla_data(tb[NDA_DST]);
1615                 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1616
1617                 if (ndm->ndm_flags & NTF_PROXY) {
1618                         struct pneigh_entry *pn;
1619
1620                         err = -ENOBUFS;
1621                         pn = pneigh_lookup(tbl, net, dst, dev, 1);
1622                         if (pn) {
1623                                 pn->flags = ndm->ndm_flags;
1624                                 err = 0;
1625                         }
1626                         goto out_dev_put;
1627                 }
1628
1629                 if (dev == NULL)
1630                         goto out_dev_put;
1631
1632                 neigh = neigh_lookup(tbl, dst, dev);
1633                 if (neigh == NULL) {
1634                         if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1635                                 err = -ENOENT;
1636                                 goto out_dev_put;
1637                         }
1638
1639                         neigh = __neigh_lookup_errno(tbl, dst, dev);
1640                         if (IS_ERR(neigh)) {
1641                                 err = PTR_ERR(neigh);
1642                                 goto out_dev_put;
1643                         }
1644                 } else {
1645                         if (nlh->nlmsg_flags & NLM_F_EXCL) {
1646                                 err = -EEXIST;
1647                                 neigh_release(neigh);
1648                                 goto out_dev_put;
1649                         }
1650
1651                         if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1652                                 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1653                 }
1654
1655                 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1656                 neigh_release(neigh);
1657                 goto out_dev_put;
1658         }
1659
1660         read_unlock(&neigh_tbl_lock);
1661         err = -EAFNOSUPPORT;
1662
1663 out_dev_put:
1664         if (dev)
1665                 dev_put(dev);
1666 out:
1667         return err;
1668 }
1669
1670 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1671 {
1672         struct nlattr *nest;
1673
1674         nest = nla_nest_start(skb, NDTA_PARMS);
1675         if (nest == NULL)
1676                 return -ENOBUFS;
1677
1678         if (parms->dev)
1679                 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1680
1681         NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1682         NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1683         NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1684         NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1685         NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1686         NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1687         NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1688         NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1689                       parms->base_reachable_time);
1690         NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1691         NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1692         NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1693         NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1694         NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1695         NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1696
1697         return nla_nest_end(skb, nest);
1698
1699 nla_put_failure:
1700         return nla_nest_cancel(skb, nest);
1701 }
1702
1703 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1704                               u32 pid, u32 seq, int type, int flags)
1705 {
1706         struct nlmsghdr *nlh;
1707         struct ndtmsg *ndtmsg;
1708
1709         nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1710         if (nlh == NULL)
1711                 return -EMSGSIZE;
1712
1713         ndtmsg = nlmsg_data(nlh);
1714
1715         read_lock_bh(&tbl->lock);
1716         ndtmsg->ndtm_family = tbl->family;
1717         ndtmsg->ndtm_pad1   = 0;
1718         ndtmsg->ndtm_pad2   = 0;
1719
1720         NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1721         NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1722         NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1723         NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1724         NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1725
1726         {
1727                 unsigned long now = jiffies;
1728                 unsigned int flush_delta = now - tbl->last_flush;
1729                 unsigned int rand_delta = now - tbl->last_rand;
1730
1731                 struct ndt_config ndc = {
1732                         .ndtc_key_len           = tbl->key_len,
1733                         .ndtc_entry_size        = tbl->entry_size,
1734                         .ndtc_entries           = atomic_read(&tbl->entries),
1735                         .ndtc_last_flush        = jiffies_to_msecs(flush_delta),
1736                         .ndtc_last_rand         = jiffies_to_msecs(rand_delta),
1737                         .ndtc_hash_rnd          = tbl->hash_rnd,
1738                         .ndtc_hash_mask         = tbl->hash_mask,
1739                         .ndtc_hash_chain_gc     = tbl->hash_chain_gc,
1740                         .ndtc_proxy_qlen        = tbl->proxy_queue.qlen,
1741                 };
1742
1743                 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1744         }
1745
1746         {
1747                 int cpu;
1748                 struct ndt_stats ndst;
1749
1750                 memset(&ndst, 0, sizeof(ndst));
1751
1752                 for_each_possible_cpu(cpu) {
1753                         struct neigh_statistics *st;
1754
1755                         st = per_cpu_ptr(tbl->stats, cpu);
1756                         ndst.ndts_allocs                += st->allocs;
1757                         ndst.ndts_destroys              += st->destroys;
1758                         ndst.ndts_hash_grows            += st->hash_grows;
1759                         ndst.ndts_res_failed            += st->res_failed;
1760                         ndst.ndts_lookups               += st->lookups;
1761                         ndst.ndts_hits                  += st->hits;
1762                         ndst.ndts_rcv_probes_mcast      += st->rcv_probes_mcast;
1763                         ndst.ndts_rcv_probes_ucast      += st->rcv_probes_ucast;
1764                         ndst.ndts_periodic_gc_runs      += st->periodic_gc_runs;
1765                         ndst.ndts_forced_gc_runs        += st->forced_gc_runs;
1766                 }
1767
1768                 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1769         }
1770
1771         BUG_ON(tbl->parms.dev);
1772         if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1773                 goto nla_put_failure;
1774
1775         read_unlock_bh(&tbl->lock);
1776         return nlmsg_end(skb, nlh);
1777
1778 nla_put_failure:
1779         read_unlock_bh(&tbl->lock);
1780         nlmsg_cancel(skb, nlh);
1781         return -EMSGSIZE;
1782 }
1783
1784 static int neightbl_fill_param_info(struct sk_buff *skb,
1785                                     struct neigh_table *tbl,
1786                                     struct neigh_parms *parms,
1787                                     u32 pid, u32 seq, int type,
1788                                     unsigned int flags)
1789 {
1790         struct ndtmsg *ndtmsg;
1791         struct nlmsghdr *nlh;
1792
1793         nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1794         if (nlh == NULL)
1795                 return -EMSGSIZE;
1796
1797         ndtmsg = nlmsg_data(nlh);
1798
1799         read_lock_bh(&tbl->lock);
1800         ndtmsg->ndtm_family = tbl->family;
1801         ndtmsg->ndtm_pad1   = 0;
1802         ndtmsg->ndtm_pad2   = 0;
1803
1804         if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1805             neightbl_fill_parms(skb, parms) < 0)
1806                 goto errout;
1807
1808         read_unlock_bh(&tbl->lock);
1809         return nlmsg_end(skb, nlh);
1810 errout:
1811         read_unlock_bh(&tbl->lock);
1812         nlmsg_cancel(skb, nlh);
1813         return -EMSGSIZE;
1814 }
1815
1816 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1817         [NDTA_NAME]             = { .type = NLA_STRING },
1818         [NDTA_THRESH1]          = { .type = NLA_U32 },
1819         [NDTA_THRESH2]          = { .type = NLA_U32 },
1820         [NDTA_THRESH3]          = { .type = NLA_U32 },
1821         [NDTA_GC_INTERVAL]      = { .type = NLA_U64 },
1822         [NDTA_PARMS]            = { .type = NLA_NESTED },
1823 };
1824
1825 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1826         [NDTPA_IFINDEX]                 = { .type = NLA_U32 },
1827         [NDTPA_QUEUE_LEN]               = { .type = NLA_U32 },
1828         [NDTPA_PROXY_QLEN]              = { .type = NLA_U32 },
1829         [NDTPA_APP_PROBES]              = { .type = NLA_U32 },
1830         [NDTPA_UCAST_PROBES]            = { .type = NLA_U32 },
1831         [NDTPA_MCAST_PROBES]            = { .type = NLA_U32 },
1832         [NDTPA_BASE_REACHABLE_TIME]     = { .type = NLA_U64 },
1833         [NDTPA_GC_STALETIME]            = { .type = NLA_U64 },
1834         [NDTPA_DELAY_PROBE_TIME]        = { .type = NLA_U64 },
1835         [NDTPA_RETRANS_TIME]            = { .type = NLA_U64 },
1836         [NDTPA_ANYCAST_DELAY]           = { .type = NLA_U64 },
1837         [NDTPA_PROXY_DELAY]             = { .type = NLA_U64 },
1838         [NDTPA_LOCKTIME]                = { .type = NLA_U64 },
1839 };
1840
1841 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1842 {
1843         struct net *net = sock_net(skb->sk);
1844         struct neigh_table *tbl;
1845         struct ndtmsg *ndtmsg;
1846         struct nlattr *tb[NDTA_MAX+1];
1847         int err;
1848
1849         err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1850                           nl_neightbl_policy);
1851         if (err < 0)
1852                 goto errout;
1853
1854         if (tb[NDTA_NAME] == NULL) {
1855                 err = -EINVAL;
1856                 goto errout;
1857         }
1858
1859         ndtmsg = nlmsg_data(nlh);
1860         read_lock(&neigh_tbl_lock);
1861         for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1862                 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1863                         continue;
1864
1865                 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1866                         break;
1867         }
1868
1869         if (tbl == NULL) {
1870                 err = -ENOENT;
1871                 goto errout_locked;
1872         }
1873
1874         /*
1875          * We acquire tbl->lock to be nice to the periodic timers and
1876          * make sure they always see a consistent set of values.
1877          */
1878         write_lock_bh(&tbl->lock);
1879
1880         if (tb[NDTA_PARMS]) {
1881                 struct nlattr *tbp[NDTPA_MAX+1];
1882                 struct neigh_parms *p;
1883                 int i, ifindex = 0;
1884
1885                 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1886                                        nl_ntbl_parm_policy);
1887                 if (err < 0)
1888                         goto errout_tbl_lock;
1889
1890                 if (tbp[NDTPA_IFINDEX])
1891                         ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1892
1893                 p = lookup_neigh_params(tbl, net, ifindex);
1894                 if (p == NULL) {
1895                         err = -ENOENT;
1896                         goto errout_tbl_lock;
1897                 }
1898
1899                 for (i = 1; i <= NDTPA_MAX; i++) {
1900                         if (tbp[i] == NULL)
1901                                 continue;
1902
1903                         switch (i) {
1904                         case NDTPA_QUEUE_LEN:
1905                                 p->queue_len = nla_get_u32(tbp[i]);
1906                                 break;
1907                         case NDTPA_PROXY_QLEN:
1908                                 p->proxy_qlen = nla_get_u32(tbp[i]);
1909                                 break;
1910                         case NDTPA_APP_PROBES:
1911                                 p->app_probes = nla_get_u32(tbp[i]);
1912                                 break;
1913                         case NDTPA_UCAST_PROBES:
1914                                 p->ucast_probes = nla_get_u32(tbp[i]);
1915                                 break;
1916                         case NDTPA_MCAST_PROBES:
1917                                 p->mcast_probes = nla_get_u32(tbp[i]);
1918                                 break;
1919                         case NDTPA_BASE_REACHABLE_TIME:
1920                                 p->base_reachable_time = nla_get_msecs(tbp[i]);
1921                                 break;
1922                         case NDTPA_GC_STALETIME:
1923                                 p->gc_staletime = nla_get_msecs(tbp[i]);
1924                                 break;
1925                         case NDTPA_DELAY_PROBE_TIME:
1926                                 p->delay_probe_time = nla_get_msecs(tbp[i]);
1927                                 break;
1928                         case NDTPA_RETRANS_TIME:
1929                                 p->retrans_time = nla_get_msecs(tbp[i]);
1930                                 break;
1931                         case NDTPA_ANYCAST_DELAY:
1932                                 p->anycast_delay = nla_get_msecs(tbp[i]);
1933                                 break;
1934                         case NDTPA_PROXY_DELAY:
1935                                 p->proxy_delay = nla_get_msecs(tbp[i]);
1936                                 break;
1937                         case NDTPA_LOCKTIME:
1938                                 p->locktime = nla_get_msecs(tbp[i]);
1939                                 break;
1940                         }
1941                 }
1942         }
1943
1944         if (tb[NDTA_THRESH1])
1945                 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
1946
1947         if (tb[NDTA_THRESH2])
1948                 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
1949
1950         if (tb[NDTA_THRESH3])
1951                 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
1952
1953         if (tb[NDTA_GC_INTERVAL])
1954                 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
1955
1956         err = 0;
1957
1958 errout_tbl_lock:
1959         write_unlock_bh(&tbl->lock);
1960 errout_locked:
1961         read_unlock(&neigh_tbl_lock);
1962 errout:
1963         return err;
1964 }
1965
1966 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1967 {
1968         struct net *net = sock_net(skb->sk);
1969         int family, tidx, nidx = 0;
1970         int tbl_skip = cb->args[0];
1971         int neigh_skip = cb->args[1];
1972         struct neigh_table *tbl;
1973
1974         family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
1975
1976         read_lock(&neigh_tbl_lock);
1977         for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
1978                 struct neigh_parms *p;
1979
1980                 if (tidx < tbl_skip || (family && tbl->family != family))
1981                         continue;
1982
1983                 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
1984                                        cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
1985                                        NLM_F_MULTI) <= 0)
1986                         break;
1987
1988                 for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
1989                         if (!net_eq(neigh_parms_net(p), net))
1990                                 continue;
1991
1992                         if (nidx++ < neigh_skip)
1993                                 continue;
1994
1995                         if (neightbl_fill_param_info(skb, tbl, p,
1996                                                      NETLINK_CB(cb->skb).pid,
1997                                                      cb->nlh->nlmsg_seq,
1998                                                      RTM_NEWNEIGHTBL,
1999                                                      NLM_F_MULTI) <= 0)
2000                                 goto out;
2001                 }
2002
2003                 neigh_skip = 0;
2004         }
2005 out:
2006         read_unlock(&neigh_tbl_lock);
2007         cb->args[0] = tidx;
2008         cb->args[1] = nidx;
2009
2010         return skb->len;
2011 }
2012
2013 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2014                            u32 pid, u32 seq, int type, unsigned int flags)
2015 {
2016         unsigned long now = jiffies;
2017         struct nda_cacheinfo ci;
2018         struct nlmsghdr *nlh;
2019         struct ndmsg *ndm;
2020
2021         nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2022         if (nlh == NULL)
2023                 return -EMSGSIZE;
2024
2025         ndm = nlmsg_data(nlh);
2026         ndm->ndm_family  = neigh->ops->family;
2027         ndm->ndm_pad1    = 0;
2028         ndm->ndm_pad2    = 0;
2029         ndm->ndm_flags   = neigh->flags;
2030         ndm->ndm_type    = neigh->type;
2031         ndm->ndm_ifindex = neigh->dev->ifindex;
2032
2033         NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
2034
2035         read_lock_bh(&neigh->lock);
2036         ndm->ndm_state   = neigh->nud_state;
2037         if ((neigh->nud_state & NUD_VALID) &&
2038             nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
2039                 read_unlock_bh(&neigh->lock);
2040                 goto nla_put_failure;
2041         }
2042
2043         ci.ndm_used      = now - neigh->used;
2044         ci.ndm_confirmed = now - neigh->confirmed;
2045         ci.ndm_updated   = now - neigh->updated;
2046         ci.ndm_refcnt    = atomic_read(&neigh->refcnt) - 1;
2047         read_unlock_bh(&neigh->lock);
2048
2049         NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
2050         NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
2051
2052         return nlmsg_end(skb, nlh);
2053
2054 nla_put_failure:
2055         nlmsg_cancel(skb, nlh);
2056         return -EMSGSIZE;
2057 }
2058
2059 static void neigh_update_notify(struct neighbour *neigh)
2060 {
2061         call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2062         __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2063 }
2064
2065 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2066                             struct netlink_callback *cb)
2067 {
2068         struct net * net = sock_net(skb->sk);
2069         struct neighbour *n;
2070         int rc, h, s_h = cb->args[1];
2071         int idx, s_idx = idx = cb->args[2];
2072
2073         read_lock_bh(&tbl->lock);
2074         for (h = 0; h <= tbl->hash_mask; h++) {
2075                 if (h < s_h)
2076                         continue;
2077                 if (h > s_h)
2078                         s_idx = 0;
2079                 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
2080                         int lidx;
2081                         if (dev_net(n->dev) != net)
2082                                 continue;
2083                         lidx = idx++;
2084                         if (lidx < s_idx)
2085                                 continue;
2086                         if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2087                                             cb->nlh->nlmsg_seq,
2088                                             RTM_NEWNEIGH,
2089                                             NLM_F_MULTI) <= 0) {
2090                                 read_unlock_bh(&tbl->lock);
2091                                 rc = -1;
2092                                 goto out;
2093                         }
2094                 }
2095         }
2096         read_unlock_bh(&tbl->lock);
2097         rc = skb->len;
2098 out:
2099         cb->args[1] = h;
2100         cb->args[2] = idx;
2101         return rc;
2102 }
2103
2104 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2105 {
2106         struct neigh_table *tbl;
2107         int t, family, s_t;
2108
2109         read_lock(&neigh_tbl_lock);
2110         family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2111         s_t = cb->args[0];
2112
2113         for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2114                 if (t < s_t || (family && tbl->family != family))
2115                         continue;
2116                 if (t > s_t)
2117                         memset(&cb->args[1], 0, sizeof(cb->args) -
2118                                                 sizeof(cb->args[0]));
2119                 if (neigh_dump_table(tbl, skb, cb) < 0)
2120                         break;
2121         }
2122         read_unlock(&neigh_tbl_lock);
2123
2124         cb->args[0] = t;
2125         return skb->len;
2126 }
2127
2128 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2129 {
2130         int chain;
2131
2132         read_lock_bh(&tbl->lock);
2133         for (chain = 0; chain <= tbl->hash_mask; chain++) {
2134                 struct neighbour *n;
2135
2136                 for (n = tbl->hash_buckets[chain]; n; n = n->next)
2137                         cb(n, cookie);
2138         }
2139         read_unlock_bh(&tbl->lock);
2140 }
2141 EXPORT_SYMBOL(neigh_for_each);
2142
2143 /* The tbl->lock must be held as a writer and BH disabled. */
2144 void __neigh_for_each_release(struct neigh_table *tbl,
2145                               int (*cb)(struct neighbour *))
2146 {
2147         int chain;
2148
2149         for (chain = 0; chain <= tbl->hash_mask; chain++) {
2150                 struct neighbour *n, **np;
2151
2152                 np = &tbl->hash_buckets[chain];
2153                 while ((n = *np) != NULL) {
2154                         int release;
2155
2156                         write_lock(&n->lock);
2157                         release = cb(n);
2158                         if (release) {
2159                                 *np = n->next;
2160                                 n->dead = 1;
2161                         } else
2162                                 np = &n->next;
2163                         write_unlock(&n->lock);
2164                         if (release)
2165                                 neigh_cleanup_and_release(n);
2166                 }
2167         }
2168 }
2169 EXPORT_SYMBOL(__neigh_for_each_release);
2170
2171 #ifdef CONFIG_PROC_FS
2172
2173 static struct neighbour *neigh_get_first(struct seq_file *seq)
2174 {
2175         struct neigh_seq_state *state = seq->private;
2176         struct net *net = seq_file_net(seq);
2177         struct neigh_table *tbl = state->tbl;
2178         struct neighbour *n = NULL;
2179         int bucket = state->bucket;
2180
2181         state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2182         for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2183                 n = tbl->hash_buckets[bucket];
2184
2185                 while (n) {
2186                         if (!net_eq(dev_net(n->dev), net))
2187                                 goto next;
2188                         if (state->neigh_sub_iter) {
2189                                 loff_t fakep = 0;
2190                                 void *v;
2191
2192                                 v = state->neigh_sub_iter(state, n, &fakep);
2193                                 if (!v)
2194                                         goto next;
2195                         }
2196                         if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2197                                 break;
2198                         if (n->nud_state & ~NUD_NOARP)
2199                                 break;
2200                 next:
2201                         n = n->next;
2202                 }
2203
2204                 if (n)
2205                         break;
2206         }
2207         state->bucket = bucket;
2208
2209         return n;
2210 }
2211
2212 static struct neighbour *neigh_get_next(struct seq_file *seq,
2213                                         struct neighbour *n,
2214                                         loff_t *pos)
2215 {
2216         struct neigh_seq_state *state = seq->private;
2217         struct net *net = seq_file_net(seq);
2218         struct neigh_table *tbl = state->tbl;
2219
2220         if (state->neigh_sub_iter) {
2221                 void *v = state->neigh_sub_iter(state, n, pos);
2222                 if (v)
2223                         return n;
2224         }
2225         n = n->next;
2226
2227         while (1) {
2228                 while (n) {
2229                         if (!net_eq(dev_net(n->dev), net))
2230                                 goto next;
2231                         if (state->neigh_sub_iter) {
2232                                 void *v = state->neigh_sub_iter(state, n, pos);
2233                                 if (v)
2234                                         return n;
2235                                 goto next;
2236                         }
2237                         if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2238                                 break;
2239
2240                         if (n->nud_state & ~NUD_NOARP)
2241                                 break;
2242                 next:
2243                         n = n->next;
2244                 }
2245
2246                 if (n)
2247                         break;
2248
2249                 if (++state->bucket > tbl->hash_mask)
2250                         break;
2251
2252                 n = tbl->hash_buckets[state->bucket];
2253         }
2254
2255         if (n && pos)
2256                 --(*pos);
2257         return n;
2258 }
2259
2260 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2261 {
2262         struct neighbour *n = neigh_get_first(seq);
2263
2264         if (n) {
2265                 while (*pos) {
2266                         n = neigh_get_next(seq, n, pos);
2267                         if (!n)
2268                                 break;
2269                 }
2270         }
2271         return *pos ? NULL : n;
2272 }
2273
2274 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2275 {
2276         struct neigh_seq_state *state = seq->private;
2277         struct net *net = seq_file_net(seq);
2278         struct neigh_table *tbl = state->tbl;
2279         struct pneigh_entry *pn = NULL;
2280         int bucket = state->bucket;
2281
2282         state->flags |= NEIGH_SEQ_IS_PNEIGH;
2283         for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2284                 pn = tbl->phash_buckets[bucket];
2285                 while (pn && !net_eq(pneigh_net(pn), net))
2286                         pn = pn->next;
2287                 if (pn)
2288                         break;
2289         }
2290         state->bucket = bucket;
2291
2292         return pn;
2293 }
2294
2295 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2296                                             struct pneigh_entry *pn,
2297                                             loff_t *pos)
2298 {
2299         struct neigh_seq_state *state = seq->private;
2300         struct net *net = seq_file_net(seq);
2301         struct neigh_table *tbl = state->tbl;
2302
2303         pn = pn->next;
2304         while (!pn) {
2305                 if (++state->bucket > PNEIGH_HASHMASK)
2306                         break;
2307                 pn = tbl->phash_buckets[state->bucket];
2308                 while (pn && !net_eq(pneigh_net(pn), net))
2309                         pn = pn->next;
2310                 if (pn)
2311                         break;
2312         }
2313
2314         if (pn && pos)
2315                 --(*pos);
2316
2317         return pn;
2318 }
2319
2320 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2321 {
2322         struct pneigh_entry *pn = pneigh_get_first(seq);
2323
2324         if (pn) {
2325                 while (*pos) {
2326                         pn = pneigh_get_next(seq, pn, pos);
2327                         if (!pn)
2328                                 break;
2329                 }
2330         }
2331         return *pos ? NULL : pn;
2332 }
2333
2334 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2335 {
2336         struct neigh_seq_state *state = seq->private;
2337         void *rc;
2338
2339         rc = neigh_get_idx(seq, pos);
2340         if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2341                 rc = pneigh_get_idx(seq, pos);
2342
2343         return rc;
2344 }
2345
2346 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2347         __acquires(tbl->lock)
2348 {
2349         struct neigh_seq_state *state = seq->private;
2350         loff_t pos_minus_one;
2351
2352         state->tbl = tbl;
2353         state->bucket = 0;
2354         state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2355
2356         read_lock_bh(&tbl->lock);
2357
2358         pos_minus_one = *pos - 1;
2359         return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2360 }
2361 EXPORT_SYMBOL(neigh_seq_start);
2362
2363 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2364 {
2365         struct neigh_seq_state *state;
2366         void *rc;
2367
2368         if (v == SEQ_START_TOKEN) {
2369                 rc = neigh_get_idx(seq, pos);
2370                 goto out;
2371         }
2372
2373         state = seq->private;
2374         if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2375                 rc = neigh_get_next(seq, v, NULL);
2376                 if (rc)
2377                         goto out;
2378                 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2379                         rc = pneigh_get_first(seq);
2380         } else {
2381                 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2382                 rc = pneigh_get_next(seq, v, NULL);
2383         }
2384 out:
2385         ++(*pos);
2386         return rc;
2387 }
2388 EXPORT_SYMBOL(neigh_seq_next);
2389
2390 void neigh_seq_stop(struct seq_file *seq, void *v)
2391         __releases(tbl->lock)
2392 {
2393         struct neigh_seq_state *state = seq->private;
2394         struct neigh_table *tbl = state->tbl;
2395
2396         read_unlock_bh(&tbl->lock);
2397 }
2398 EXPORT_SYMBOL(neigh_seq_stop);
2399
2400 /* statistics via seq_file */
2401
2402 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2403 {
2404         struct proc_dir_entry *pde = seq->private;
2405         struct neigh_table *tbl = pde->data;
2406         int cpu;
2407
2408         if (*pos == 0)
2409                 return SEQ_START_TOKEN;
2410
2411         for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2412                 if (!cpu_possible(cpu))
2413                         continue;
2414                 *pos = cpu+1;
2415                 return per_cpu_ptr(tbl->stats, cpu);
2416         }
2417         return NULL;
2418 }
2419
2420 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2421 {
2422         struct proc_dir_entry *pde = seq->private;
2423         struct neigh_table *tbl = pde->data;
2424         int cpu;
2425
2426         for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2427                 if (!cpu_possible(cpu))
2428                         continue;
2429                 *pos = cpu+1;
2430                 return per_cpu_ptr(tbl->stats, cpu);
2431         }
2432         return NULL;
2433 }
2434
2435 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2436 {
2437
2438 }
2439
2440 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2441 {
2442         struct proc_dir_entry *pde = seq->private;
2443         struct neigh_table *tbl = pde->data;
2444         struct neigh_statistics *st = v;
2445
2446         if (v == SEQ_START_TOKEN) {
2447                 seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs\n");
2448                 return 0;
2449         }
2450
2451         seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2452                         "%08lx %08lx  %08lx %08lx\n",
2453                    atomic_read(&tbl->entries),
2454
2455                    st->allocs,
2456                    st->destroys,
2457                    st->hash_grows,
2458
2459                    st->lookups,
2460                    st->hits,
2461
2462                    st->res_failed,
2463
2464                    st->rcv_probes_mcast,
2465                    st->rcv_probes_ucast,
2466
2467                    st->periodic_gc_runs,
2468                    st->forced_gc_runs
2469                    );
2470
2471         return 0;
2472 }
2473
2474 static const struct seq_operations neigh_stat_seq_ops = {
2475         .start  = neigh_stat_seq_start,
2476         .next   = neigh_stat_seq_next,
2477         .stop   = neigh_stat_seq_stop,
2478         .show   = neigh_stat_seq_show,
2479 };
2480
2481 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2482 {
2483         int ret = seq_open(file, &neigh_stat_seq_ops);
2484
2485         if (!ret) {
2486                 struct seq_file *sf = file->private_data;
2487                 sf->private = PDE(inode);
2488         }
2489         return ret;
2490 };
2491
2492 static const struct file_operations neigh_stat_seq_fops = {
2493         .owner   = THIS_MODULE,
2494         .open    = neigh_stat_seq_open,
2495         .read    = seq_read,
2496         .llseek  = seq_lseek,
2497         .release = seq_release,
2498 };
2499
2500 #endif /* CONFIG_PROC_FS */
2501
2502 static inline size_t neigh_nlmsg_size(void)
2503 {
2504         return NLMSG_ALIGN(sizeof(struct ndmsg))
2505                + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2506                + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2507                + nla_total_size(sizeof(struct nda_cacheinfo))
2508                + nla_total_size(4); /* NDA_PROBES */
2509 }
2510
2511 static void __neigh_notify(struct neighbour *n, int type, int flags)
2512 {
2513         struct net *net = dev_net(n->dev);
2514         struct sk_buff *skb;
2515         int err = -ENOBUFS;
2516
2517         skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2518         if (skb == NULL)
2519                 goto errout;
2520
2521         err = neigh_fill_info(skb, n, 0, 0, type, flags);
2522         if (err < 0) {
2523                 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2524                 WARN_ON(err == -EMSGSIZE);
2525                 kfree_skb(skb);
2526                 goto errout;
2527         }
2528         err = rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2529 errout:
2530         if (err < 0)
2531                 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2532 }
2533
2534 #ifdef CONFIG_ARPD
2535 void neigh_app_ns(struct neighbour *n)
2536 {
2537         __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2538 }
2539 #endif /* CONFIG_ARPD */
2540
2541 #ifdef CONFIG_SYSCTL
2542
2543 static struct neigh_sysctl_table {
2544         struct ctl_table_header *sysctl_header;
2545         struct ctl_table neigh_vars[__NET_NEIGH_MAX];
2546         char *dev_name;
2547 } neigh_sysctl_template __read_mostly = {
2548         .neigh_vars = {
2549                 {
2550                         .ctl_name       = NET_NEIGH_MCAST_SOLICIT,
2551                         .procname       = "mcast_solicit",
2552                         .maxlen         = sizeof(int),
2553                         .mode           = 0644,
2554                         .proc_handler   = &proc_dointvec,
2555                 },
2556                 {
2557                         .ctl_name       = NET_NEIGH_UCAST_SOLICIT,
2558                         .procname       = "ucast_solicit",
2559                         .maxlen         = sizeof(int),
2560                         .mode           = 0644,
2561                         .proc_handler   = &proc_dointvec,
2562                 },
2563                 {
2564                         .ctl_name       = NET_NEIGH_APP_SOLICIT,
2565                         .procname       = "app_solicit",
2566                         .maxlen         = sizeof(int),
2567                         .mode           = 0644,
2568                         .proc_handler   = &proc_dointvec,
2569                 },
2570                 {
2571                         .procname       = "retrans_time",
2572                         .maxlen         = sizeof(int),
2573                         .mode           = 0644,
2574                         .proc_handler   = &proc_dointvec_userhz_jiffies,
2575                 },
2576                 {
2577                         .ctl_name       = NET_NEIGH_REACHABLE_TIME,
2578                         .procname       = "base_reachable_time",
2579                         .maxlen         = sizeof(int),
2580                         .mode           = 0644,
2581                         .proc_handler   = &proc_dointvec_jiffies,
2582                         .strategy       = &sysctl_jiffies,
2583                 },
2584                 {
2585                         .ctl_name       = NET_NEIGH_DELAY_PROBE_TIME,
2586                         .procname       = "delay_first_probe_time",
2587                         .maxlen         = sizeof(int),
2588                         .mode           = 0644,
2589                         .proc_handler   = &proc_dointvec_jiffies,
2590                         .strategy       = &sysctl_jiffies,
2591                 },
2592                 {
2593                         .ctl_name       = NET_NEIGH_GC_STALE_TIME,
2594                         .procname       = "gc_stale_time",
2595                         .maxlen         = sizeof(int),
2596                         .mode           = 0644,
2597                         .proc_handler   = &proc_dointvec_jiffies,
2598                         .strategy       = &sysctl_jiffies,
2599                 },
2600                 {
2601                         .ctl_name       = NET_NEIGH_UNRES_QLEN,
2602                         .procname       = "unres_qlen",
2603                         .maxlen         = sizeof(int),
2604                         .mode           = 0644,
2605                         .proc_handler   = &proc_dointvec,
2606                 },
2607                 {
2608                         .ctl_name       = NET_NEIGH_PROXY_QLEN,
2609                         .procname       = "proxy_qlen",
2610                         .maxlen         = sizeof(int),
2611                         .mode           = 0644,
2612                         .proc_handler   = &proc_dointvec,
2613                 },
2614                 {
2615                         .procname       = "anycast_delay",
2616                         .maxlen         = sizeof(int),
2617                         .mode           = 0644,
2618                         .proc_handler   = &proc_dointvec_userhz_jiffies,
2619                 },
2620                 {
2621                         .procname       = "proxy_delay",
2622                         .maxlen         = sizeof(int),
2623                         .mode           = 0644,
2624                         .proc_handler   = &proc_dointvec_userhz_jiffies,
2625                 },
2626                 {
2627                         .procname       = "locktime",
2628                         .maxlen         = sizeof(int),
2629                         .mode           = 0644,
2630                         .proc_handler   = &proc_dointvec_userhz_jiffies,
2631                 },
2632                 {
2633                         .ctl_name       = NET_NEIGH_RETRANS_TIME_MS,
2634                         .procname       = "retrans_time_ms",
2635                         .maxlen         = sizeof(int),
2636                         .mode           = 0644,
2637                         .proc_handler   = &proc_dointvec_ms_jiffies,
2638                         .strategy       = &sysctl_ms_jiffies,
2639                 },
2640                 {
2641                         .ctl_name       = NET_NEIGH_REACHABLE_TIME_MS,
2642                         .procname       = "base_reachable_time_ms",
2643                         .maxlen         = sizeof(int),
2644                         .mode           = 0644,
2645                         .proc_handler   = &proc_dointvec_ms_jiffies,
2646                         .strategy       = &sysctl_ms_jiffies,
2647                 },
2648                 {
2649                         .ctl_name       = NET_NEIGH_GC_INTERVAL,
2650                         .procname       = "gc_interval",
2651                         .maxlen         = sizeof(int),
2652                         .mode           = 0644,
2653                         .proc_handler   = &proc_dointvec_jiffies,
2654                         .strategy       = &sysctl_jiffies,
2655                 },
2656                 {
2657                         .ctl_name       = NET_NEIGH_GC_THRESH1,
2658                         .procname       = "gc_thresh1",
2659                         .maxlen         = sizeof(int),
2660                         .mode           = 0644,
2661                         .proc_handler   = &proc_dointvec,
2662                 },
2663                 {
2664                         .ctl_name       = NET_NEIGH_GC_THRESH2,
2665                         .procname       = "gc_thresh2",
2666                         .maxlen         = sizeof(int),
2667                         .mode           = 0644,
2668                         .proc_handler   = &proc_dointvec,
2669                 },
2670                 {
2671                         .ctl_name       = NET_NEIGH_GC_THRESH3,
2672                         .procname       = "gc_thresh3",
2673                         .maxlen         = sizeof(int),
2674                         .mode           = 0644,
2675                         .proc_handler   = &proc_dointvec,
2676                 },
2677                 {},
2678         },
2679 };
2680
2681 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2682                           int p_id, int pdev_id, char *p_name,
2683                           proc_handler *handler, ctl_handler *strategy)
2684 {
2685         struct neigh_sysctl_table *t;
2686         const char *dev_name_source = NULL;
2687
2688 #define NEIGH_CTL_PATH_ROOT     0
2689 #define NEIGH_CTL_PATH_PROTO    1
2690 #define NEIGH_CTL_PATH_NEIGH    2
2691 #define NEIGH_CTL_PATH_DEV      3
2692
2693         struct ctl_path neigh_path[] = {
2694                 { .procname = "net",     .ctl_name = CTL_NET, },
2695                 { .procname = "proto",   .ctl_name = 0, },
2696                 { .procname = "neigh",   .ctl_name = 0, },
2697                 { .procname = "default", .ctl_name = NET_PROTO_CONF_DEFAULT, },
2698                 { },
2699         };
2700
2701         t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2702         if (!t)
2703                 goto err;
2704
2705         t->neigh_vars[0].data  = &p->mcast_probes;
2706         t->neigh_vars[1].data  = &p->ucast_probes;
2707         t->neigh_vars[2].data  = &p->app_probes;
2708         t->neigh_vars[3].data  = &p->retrans_time;
2709         t->neigh_vars[4].data  = &p->base_reachable_time;
2710         t->neigh_vars[5].data  = &p->delay_probe_time;
2711         t->neigh_vars[6].data  = &p->gc_staletime;
2712         t->neigh_vars[7].data  = &p->queue_len;
2713         t->neigh_vars[8].data  = &p->proxy_qlen;
2714         t->neigh_vars[9].data  = &p->anycast_delay;
2715         t->neigh_vars[10].data = &p->proxy_delay;
2716         t->neigh_vars[11].data = &p->locktime;
2717         t->neigh_vars[12].data  = &p->retrans_time;
2718         t->neigh_vars[13].data  = &p->base_reachable_time;
2719
2720         if (dev) {
2721                 dev_name_source = dev->name;
2722                 neigh_path[NEIGH_CTL_PATH_DEV].ctl_name = dev->ifindex;
2723                 /* Terminate the table early */
2724                 memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
2725         } else {
2726                 dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
2727                 t->neigh_vars[14].data = (int *)(p + 1);
2728                 t->neigh_vars[15].data = (int *)(p + 1) + 1;
2729                 t->neigh_vars[16].data = (int *)(p + 1) + 2;
2730                 t->neigh_vars[17].data = (int *)(p + 1) + 3;
2731         }
2732
2733
2734         if (handler || strategy) {
2735                 /* RetransTime */
2736                 t->neigh_vars[3].proc_handler = handler;
2737                 t->neigh_vars[3].strategy = strategy;
2738                 t->neigh_vars[3].extra1 = dev;
2739                 if (!strategy)
2740                         t->neigh_vars[3].ctl_name = CTL_UNNUMBERED;
2741                 /* ReachableTime */
2742                 t->neigh_vars[4].proc_handler = handler;
2743                 t->neigh_vars[4].strategy = strategy;
2744                 t->neigh_vars[4].extra1 = dev;
2745                 if (!strategy)
2746                         t->neigh_vars[4].ctl_name = CTL_UNNUMBERED;
2747                 /* RetransTime (in milliseconds)*/
2748                 t->neigh_vars[12].proc_handler = handler;
2749                 t->neigh_vars[12].strategy = strategy;
2750                 t->neigh_vars[12].extra1 = dev;
2751                 if (!strategy)
2752                         t->neigh_vars[12].ctl_name = CTL_UNNUMBERED;
2753                 /* ReachableTime (in milliseconds) */
2754                 t->neigh_vars[13].proc_handler = handler;
2755                 t->neigh_vars[13].strategy = strategy;
2756                 t->neigh_vars[13].extra1 = dev;
2757                 if (!strategy)
2758                         t->neigh_vars[13].ctl_name = CTL_UNNUMBERED;
2759         }
2760
2761         t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2762         if (!t->dev_name)
2763                 goto free;
2764
2765         neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
2766         neigh_path[NEIGH_CTL_PATH_NEIGH].ctl_name = pdev_id;
2767         neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
2768         neigh_path[NEIGH_CTL_PATH_PROTO].ctl_name = p_id;
2769
2770         t->sysctl_header =
2771                 register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars);
2772         if (!t->sysctl_header)
2773                 goto free_procname;
2774
2775         p->sysctl_table = t;
2776         return 0;
2777
2778 free_procname:
2779         kfree(t->dev_name);
2780 free:
2781         kfree(t);
2782 err:
2783         return -ENOBUFS;
2784 }
2785
2786 void neigh_sysctl_unregister(struct neigh_parms *p)
2787 {
2788         if (p->sysctl_table) {
2789                 struct neigh_sysctl_table *t = p->sysctl_table;
2790                 p->sysctl_table = NULL;
2791                 unregister_sysctl_table(t->sysctl_header);
2792                 kfree(t->dev_name);
2793                 kfree(t);
2794         }
2795 }
2796
2797 #endif  /* CONFIG_SYSCTL */
2798
2799 static int __init neigh_init(void)
2800 {
2801         rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
2802         rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
2803         rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
2804
2805         rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
2806         rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
2807
2808         return 0;
2809 }
2810
2811 subsys_initcall(neigh_init);
2812
2813 EXPORT_SYMBOL(__neigh_event_send);
2814 EXPORT_SYMBOL(neigh_changeaddr);
2815 EXPORT_SYMBOL(neigh_compat_output);
2816 EXPORT_SYMBOL(neigh_connected_output);
2817 EXPORT_SYMBOL(neigh_create);
2818 EXPORT_SYMBOL(neigh_destroy);
2819 EXPORT_SYMBOL(neigh_event_ns);
2820 EXPORT_SYMBOL(neigh_ifdown);
2821 EXPORT_SYMBOL(neigh_lookup);
2822 EXPORT_SYMBOL(neigh_lookup_nodev);
2823 EXPORT_SYMBOL(neigh_parms_alloc);
2824 EXPORT_SYMBOL(neigh_parms_release);
2825 EXPORT_SYMBOL(neigh_rand_reach_time);
2826 EXPORT_SYMBOL(neigh_resolve_output);
2827 EXPORT_SYMBOL(neigh_table_clear);
2828 EXPORT_SYMBOL(neigh_table_init);
2829 EXPORT_SYMBOL(neigh_table_init_no_netlink);
2830 EXPORT_SYMBOL(neigh_update);
2831 EXPORT_SYMBOL(pneigh_enqueue);
2832 EXPORT_SYMBOL(pneigh_lookup);
2833 EXPORT_SYMBOL_GPL(__pneigh_lookup);
2834
2835 #ifdef CONFIG_ARPD
2836 EXPORT_SYMBOL(neigh_app_ns);
2837 #endif
2838 #ifdef CONFIG_SYSCTL
2839 EXPORT_SYMBOL(neigh_sysctl_register);
2840 EXPORT_SYMBOL(neigh_sysctl_unregister);
2841 #endif