ipv4: fix redirect handling
[linux-2.6.git] / net / ipv4 / route.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              ROUTE - implementation of the IP router.
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
11  *              Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12  *              Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13  *
14  * Fixes:
15  *              Alan Cox        :       Verify area fixes.
16  *              Alan Cox        :       cli() protects routing changes
17  *              Rui Oliveira    :       ICMP routing table updates
18  *              (rco@di.uminho.pt)      Routing table insertion and update
19  *              Linus Torvalds  :       Rewrote bits to be sensible
20  *              Alan Cox        :       Added BSD route gw semantics
21  *              Alan Cox        :       Super /proc >4K
22  *              Alan Cox        :       MTU in route table
23  *              Alan Cox        :       MSS actually. Also added the window
24  *                                      clamper.
25  *              Sam Lantinga    :       Fixed route matching in rt_del()
26  *              Alan Cox        :       Routing cache support.
27  *              Alan Cox        :       Removed compatibility cruft.
28  *              Alan Cox        :       RTF_REJECT support.
29  *              Alan Cox        :       TCP irtt support.
30  *              Jonathan Naylor :       Added Metric support.
31  *      Miquel van Smoorenburg  :       BSD API fixes.
32  *      Miquel van Smoorenburg  :       Metrics.
33  *              Alan Cox        :       Use __u32 properly
34  *              Alan Cox        :       Aligned routing errors more closely with BSD
35  *                                      our system is still very different.
36  *              Alan Cox        :       Faster /proc handling
37  *      Alexey Kuznetsov        :       Massive rework to support tree based routing,
38  *                                      routing caches and better behaviour.
39  *
40  *              Olaf Erb        :       irtt wasn't being copied right.
41  *              Bjorn Ekwall    :       Kerneld route support.
42  *              Alan Cox        :       Multicast fixed (I hope)
43  *              Pavel Krauz     :       Limited broadcast fixed
44  *              Mike McLagan    :       Routing by source
45  *      Alexey Kuznetsov        :       End of old history. Split to fib.c and
46  *                                      route.c and rewritten from scratch.
47  *              Andi Kleen      :       Load-limit warning messages.
48  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
49  *      Vitaly E. Lavrov        :       Race condition in ip_route_input_slow.
50  *      Tobias Ringstrom        :       Uninitialized res.type in ip_route_output_slow.
51  *      Vladimir V. Ivanov      :       IP rule info (flowid) is really useful.
52  *              Marc Boucher    :       routing by fwmark
53  *      Robert Olsson           :       Added rt_cache statistics
54  *      Arnaldo C. Melo         :       Convert proc stuff to seq_file
55  *      Eric Dumazet            :       hashed spinlocks and rt_check_expire() fixes.
56  *      Ilia Sotnikov           :       Ignore TOS on PMTUD and Redirect
57  *      Ilia Sotnikov           :       Removed TOS from hash calculations
58  *
59  *              This program is free software; you can redistribute it and/or
60  *              modify it under the terms of the GNU General Public License
61  *              as published by the Free Software Foundation; either version
62  *              2 of the License, or (at your option) any later version.
63  */
64
65 #include <linux/module.h>
66 #include <asm/uaccess.h>
67 #include <asm/system.h>
68 #include <linux/bitops.h>
69 #include <linux/types.h>
70 #include <linux/kernel.h>
71 #include <linux/mm.h>
72 #include <linux/bootmem.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
77 #include <linux/in.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/workqueue.h>
83 #include <linux/skbuff.h>
84 #include <linux/inetdevice.h>
85 #include <linux/igmp.h>
86 #include <linux/pkt_sched.h>
87 #include <linux/mroute.h>
88 #include <linux/netfilter_ipv4.h>
89 #include <linux/random.h>
90 #include <linux/jhash.h>
91 #include <linux/rcupdate.h>
92 #include <linux/times.h>
93 #include <linux/slab.h>
94 #include <net/dst.h>
95 #include <net/net_namespace.h>
96 #include <net/protocol.h>
97 #include <net/ip.h>
98 #include <net/route.h>
99 #include <net/inetpeer.h>
100 #include <net/sock.h>
101 #include <net/ip_fib.h>
102 #include <net/arp.h>
103 #include <net/tcp.h>
104 #include <net/icmp.h>
105 #include <net/xfrm.h>
106 #include <net/netevent.h>
107 #include <net/rtnetlink.h>
108 #ifdef CONFIG_SYSCTL
109 #include <linux/sysctl.h>
110 #endif
111 #include <net/atmclip.h>
112 #include <net/secure_seq.h>
113
114 #define RT_FL_TOS(oldflp4) \
115     ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
116
117 #define IP_MAX_MTU      0xFFF0
118
119 #define RT_GC_TIMEOUT (300*HZ)
120
121 static int ip_rt_max_size;
122 static int ip_rt_gc_timeout __read_mostly       = RT_GC_TIMEOUT;
123 static int ip_rt_gc_interval __read_mostly      = 60 * HZ;
124 static int ip_rt_gc_min_interval __read_mostly  = HZ / 2;
125 static int ip_rt_redirect_number __read_mostly  = 9;
126 static int ip_rt_redirect_load __read_mostly    = HZ / 50;
127 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
128 static int ip_rt_error_cost __read_mostly       = HZ;
129 static int ip_rt_error_burst __read_mostly      = 5 * HZ;
130 static int ip_rt_gc_elasticity __read_mostly    = 8;
131 static int ip_rt_mtu_expires __read_mostly      = 10 * 60 * HZ;
132 static int ip_rt_min_pmtu __read_mostly         = 512 + 20 + 20;
133 static int ip_rt_min_advmss __read_mostly       = 256;
134 static int rt_chain_length_max __read_mostly    = 20;
135
136 /*
137  *      Interface to generic destination cache.
138  */
139
140 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
141 static unsigned int      ipv4_default_advmss(const struct dst_entry *dst);
142 static unsigned int      ipv4_default_mtu(const struct dst_entry *dst);
143 static void              ipv4_dst_destroy(struct dst_entry *dst);
144 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
145 static void              ipv4_link_failure(struct sk_buff *skb);
146 static void              ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
147 static int rt_garbage_collect(struct dst_ops *ops);
148
149 static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
150                             int how)
151 {
152 }
153
154 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
155 {
156         struct rtable *rt = (struct rtable *) dst;
157         struct inet_peer *peer;
158         u32 *p = NULL;
159
160         if (!rt->peer)
161                 rt_bind_peer(rt, rt->rt_dst, 1);
162
163         peer = rt->peer;
164         if (peer) {
165                 u32 *old_p = __DST_METRICS_PTR(old);
166                 unsigned long prev, new;
167
168                 p = peer->metrics;
169                 if (inet_metrics_new(peer))
170                         memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
171
172                 new = (unsigned long) p;
173                 prev = cmpxchg(&dst->_metrics, old, new);
174
175                 if (prev != old) {
176                         p = __DST_METRICS_PTR(prev);
177                         if (prev & DST_METRICS_READ_ONLY)
178                                 p = NULL;
179                 } else {
180                         if (rt->fi) {
181                                 fib_info_put(rt->fi);
182                                 rt->fi = NULL;
183                         }
184                 }
185         }
186         return p;
187 }
188
189 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr);
190
191 static struct dst_ops ipv4_dst_ops = {
192         .family =               AF_INET,
193         .protocol =             cpu_to_be16(ETH_P_IP),
194         .gc =                   rt_garbage_collect,
195         .check =                ipv4_dst_check,
196         .default_advmss =       ipv4_default_advmss,
197         .default_mtu =          ipv4_default_mtu,
198         .cow_metrics =          ipv4_cow_metrics,
199         .destroy =              ipv4_dst_destroy,
200         .ifdown =               ipv4_dst_ifdown,
201         .negative_advice =      ipv4_negative_advice,
202         .link_failure =         ipv4_link_failure,
203         .update_pmtu =          ip_rt_update_pmtu,
204         .local_out =            __ip_local_out,
205         .neigh_lookup =         ipv4_neigh_lookup,
206 };
207
208 #define ECN_OR_COST(class)      TC_PRIO_##class
209
210 const __u8 ip_tos2prio[16] = {
211         TC_PRIO_BESTEFFORT,
212         ECN_OR_COST(BESTEFFORT),
213         TC_PRIO_BESTEFFORT,
214         ECN_OR_COST(BESTEFFORT),
215         TC_PRIO_BULK,
216         ECN_OR_COST(BULK),
217         TC_PRIO_BULK,
218         ECN_OR_COST(BULK),
219         TC_PRIO_INTERACTIVE,
220         ECN_OR_COST(INTERACTIVE),
221         TC_PRIO_INTERACTIVE,
222         ECN_OR_COST(INTERACTIVE),
223         TC_PRIO_INTERACTIVE_BULK,
224         ECN_OR_COST(INTERACTIVE_BULK),
225         TC_PRIO_INTERACTIVE_BULK,
226         ECN_OR_COST(INTERACTIVE_BULK)
227 };
228
229
230 /*
231  * Route cache.
232  */
233
234 /* The locking scheme is rather straight forward:
235  *
236  * 1) Read-Copy Update protects the buckets of the central route hash.
237  * 2) Only writers remove entries, and they hold the lock
238  *    as they look at rtable reference counts.
239  * 3) Only readers acquire references to rtable entries,
240  *    they do so with atomic increments and with the
241  *    lock held.
242  */
243
244 struct rt_hash_bucket {
245         struct rtable __rcu     *chain;
246 };
247
248 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
249         defined(CONFIG_PROVE_LOCKING)
250 /*
251  * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
252  * The size of this table is a power of two and depends on the number of CPUS.
253  * (on lockdep we have a quite big spinlock_t, so keep the size down there)
254  */
255 #ifdef CONFIG_LOCKDEP
256 # define RT_HASH_LOCK_SZ        256
257 #else
258 # if NR_CPUS >= 32
259 #  define RT_HASH_LOCK_SZ       4096
260 # elif NR_CPUS >= 16
261 #  define RT_HASH_LOCK_SZ       2048
262 # elif NR_CPUS >= 8
263 #  define RT_HASH_LOCK_SZ       1024
264 # elif NR_CPUS >= 4
265 #  define RT_HASH_LOCK_SZ       512
266 # else
267 #  define RT_HASH_LOCK_SZ       256
268 # endif
269 #endif
270
271 static spinlock_t       *rt_hash_locks;
272 # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
273
274 static __init void rt_hash_lock_init(void)
275 {
276         int i;
277
278         rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
279                         GFP_KERNEL);
280         if (!rt_hash_locks)
281                 panic("IP: failed to allocate rt_hash_locks\n");
282
283         for (i = 0; i < RT_HASH_LOCK_SZ; i++)
284                 spin_lock_init(&rt_hash_locks[i]);
285 }
286 #else
287 # define rt_hash_lock_addr(slot) NULL
288
289 static inline void rt_hash_lock_init(void)
290 {
291 }
292 #endif
293
294 static struct rt_hash_bucket    *rt_hash_table __read_mostly;
295 static unsigned                 rt_hash_mask __read_mostly;
296 static unsigned int             rt_hash_log  __read_mostly;
297
298 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
299 #define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
300
301 static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
302                                    int genid)
303 {
304         return jhash_3words((__force u32)daddr, (__force u32)saddr,
305                             idx, genid)
306                 & rt_hash_mask;
307 }
308
309 static inline int rt_genid(struct net *net)
310 {
311         return atomic_read(&net->ipv4.rt_genid);
312 }
313
314 #ifdef CONFIG_PROC_FS
315 struct rt_cache_iter_state {
316         struct seq_net_private p;
317         int bucket;
318         int genid;
319 };
320
321 static struct rtable *rt_cache_get_first(struct seq_file *seq)
322 {
323         struct rt_cache_iter_state *st = seq->private;
324         struct rtable *r = NULL;
325
326         for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
327                 if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain))
328                         continue;
329                 rcu_read_lock_bh();
330                 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
331                 while (r) {
332                         if (dev_net(r->dst.dev) == seq_file_net(seq) &&
333                             r->rt_genid == st->genid)
334                                 return r;
335                         r = rcu_dereference_bh(r->dst.rt_next);
336                 }
337                 rcu_read_unlock_bh();
338         }
339         return r;
340 }
341
342 static struct rtable *__rt_cache_get_next(struct seq_file *seq,
343                                           struct rtable *r)
344 {
345         struct rt_cache_iter_state *st = seq->private;
346
347         r = rcu_dereference_bh(r->dst.rt_next);
348         while (!r) {
349                 rcu_read_unlock_bh();
350                 do {
351                         if (--st->bucket < 0)
352                                 return NULL;
353                 } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain));
354                 rcu_read_lock_bh();
355                 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
356         }
357         return r;
358 }
359
360 static struct rtable *rt_cache_get_next(struct seq_file *seq,
361                                         struct rtable *r)
362 {
363         struct rt_cache_iter_state *st = seq->private;
364         while ((r = __rt_cache_get_next(seq, r)) != NULL) {
365                 if (dev_net(r->dst.dev) != seq_file_net(seq))
366                         continue;
367                 if (r->rt_genid == st->genid)
368                         break;
369         }
370         return r;
371 }
372
373 static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
374 {
375         struct rtable *r = rt_cache_get_first(seq);
376
377         if (r)
378                 while (pos && (r = rt_cache_get_next(seq, r)))
379                         --pos;
380         return pos ? NULL : r;
381 }
382
383 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
384 {
385         struct rt_cache_iter_state *st = seq->private;
386         if (*pos)
387                 return rt_cache_get_idx(seq, *pos - 1);
388         st->genid = rt_genid(seq_file_net(seq));
389         return SEQ_START_TOKEN;
390 }
391
392 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
393 {
394         struct rtable *r;
395
396         if (v == SEQ_START_TOKEN)
397                 r = rt_cache_get_first(seq);
398         else
399                 r = rt_cache_get_next(seq, v);
400         ++*pos;
401         return r;
402 }
403
404 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
405 {
406         if (v && v != SEQ_START_TOKEN)
407                 rcu_read_unlock_bh();
408 }
409
410 static int rt_cache_seq_show(struct seq_file *seq, void *v)
411 {
412         if (v == SEQ_START_TOKEN)
413                 seq_printf(seq, "%-127s\n",
414                            "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
415                            "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
416                            "HHUptod\tSpecDst");
417         else {
418                 struct rtable *r = v;
419                 struct neighbour *n;
420                 int len, HHUptod;
421
422                 rcu_read_lock();
423                 n = dst_get_neighbour(&r->dst);
424                 HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0;
425                 rcu_read_unlock();
426
427                 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
428                               "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
429                         r->dst.dev ? r->dst.dev->name : "*",
430                         (__force u32)r->rt_dst,
431                         (__force u32)r->rt_gateway,
432                         r->rt_flags, atomic_read(&r->dst.__refcnt),
433                         r->dst.__use, 0, (__force u32)r->rt_src,
434                         dst_metric_advmss(&r->dst) + 40,
435                         dst_metric(&r->dst, RTAX_WINDOW),
436                         (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
437                               dst_metric(&r->dst, RTAX_RTTVAR)),
438                         r->rt_key_tos,
439                         -1,
440                         HHUptod,
441                         r->rt_spec_dst, &len);
442
443                 seq_printf(seq, "%*s\n", 127 - len, "");
444         }
445         return 0;
446 }
447
448 static const struct seq_operations rt_cache_seq_ops = {
449         .start  = rt_cache_seq_start,
450         .next   = rt_cache_seq_next,
451         .stop   = rt_cache_seq_stop,
452         .show   = rt_cache_seq_show,
453 };
454
455 static int rt_cache_seq_open(struct inode *inode, struct file *file)
456 {
457         return seq_open_net(inode, file, &rt_cache_seq_ops,
458                         sizeof(struct rt_cache_iter_state));
459 }
460
461 static const struct file_operations rt_cache_seq_fops = {
462         .owner   = THIS_MODULE,
463         .open    = rt_cache_seq_open,
464         .read    = seq_read,
465         .llseek  = seq_lseek,
466         .release = seq_release_net,
467 };
468
469
470 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
471 {
472         int cpu;
473
474         if (*pos == 0)
475                 return SEQ_START_TOKEN;
476
477         for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
478                 if (!cpu_possible(cpu))
479                         continue;
480                 *pos = cpu+1;
481                 return &per_cpu(rt_cache_stat, cpu);
482         }
483         return NULL;
484 }
485
486 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
487 {
488         int cpu;
489
490         for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
491                 if (!cpu_possible(cpu))
492                         continue;
493                 *pos = cpu+1;
494                 return &per_cpu(rt_cache_stat, cpu);
495         }
496         return NULL;
497
498 }
499
500 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
501 {
502
503 }
504
505 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
506 {
507         struct rt_cache_stat *st = v;
508
509         if (v == SEQ_START_TOKEN) {
510                 seq_printf(seq, "entries  in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src  out_hit out_slow_tot out_slow_mc  gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
511                 return 0;
512         }
513
514         seq_printf(seq,"%08x  %08x %08x %08x %08x %08x %08x %08x "
515                    " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
516                    dst_entries_get_slow(&ipv4_dst_ops),
517                    st->in_hit,
518                    st->in_slow_tot,
519                    st->in_slow_mc,
520                    st->in_no_route,
521                    st->in_brd,
522                    st->in_martian_dst,
523                    st->in_martian_src,
524
525                    st->out_hit,
526                    st->out_slow_tot,
527                    st->out_slow_mc,
528
529                    st->gc_total,
530                    st->gc_ignored,
531                    st->gc_goal_miss,
532                    st->gc_dst_overflow,
533                    st->in_hlist_search,
534                    st->out_hlist_search
535                 );
536         return 0;
537 }
538
539 static const struct seq_operations rt_cpu_seq_ops = {
540         .start  = rt_cpu_seq_start,
541         .next   = rt_cpu_seq_next,
542         .stop   = rt_cpu_seq_stop,
543         .show   = rt_cpu_seq_show,
544 };
545
546
547 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
548 {
549         return seq_open(file, &rt_cpu_seq_ops);
550 }
551
552 static const struct file_operations rt_cpu_seq_fops = {
553         .owner   = THIS_MODULE,
554         .open    = rt_cpu_seq_open,
555         .read    = seq_read,
556         .llseek  = seq_lseek,
557         .release = seq_release,
558 };
559
560 #ifdef CONFIG_IP_ROUTE_CLASSID
561 static int rt_acct_proc_show(struct seq_file *m, void *v)
562 {
563         struct ip_rt_acct *dst, *src;
564         unsigned int i, j;
565
566         dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
567         if (!dst)
568                 return -ENOMEM;
569
570         for_each_possible_cpu(i) {
571                 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
572                 for (j = 0; j < 256; j++) {
573                         dst[j].o_bytes   += src[j].o_bytes;
574                         dst[j].o_packets += src[j].o_packets;
575                         dst[j].i_bytes   += src[j].i_bytes;
576                         dst[j].i_packets += src[j].i_packets;
577                 }
578         }
579
580         seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
581         kfree(dst);
582         return 0;
583 }
584
585 static int rt_acct_proc_open(struct inode *inode, struct file *file)
586 {
587         return single_open(file, rt_acct_proc_show, NULL);
588 }
589
590 static const struct file_operations rt_acct_proc_fops = {
591         .owner          = THIS_MODULE,
592         .open           = rt_acct_proc_open,
593         .read           = seq_read,
594         .llseek         = seq_lseek,
595         .release        = single_release,
596 };
597 #endif
598
599 static int __net_init ip_rt_do_proc_init(struct net *net)
600 {
601         struct proc_dir_entry *pde;
602
603         pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
604                         &rt_cache_seq_fops);
605         if (!pde)
606                 goto err1;
607
608         pde = proc_create("rt_cache", S_IRUGO,
609                           net->proc_net_stat, &rt_cpu_seq_fops);
610         if (!pde)
611                 goto err2;
612
613 #ifdef CONFIG_IP_ROUTE_CLASSID
614         pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
615         if (!pde)
616                 goto err3;
617 #endif
618         return 0;
619
620 #ifdef CONFIG_IP_ROUTE_CLASSID
621 err3:
622         remove_proc_entry("rt_cache", net->proc_net_stat);
623 #endif
624 err2:
625         remove_proc_entry("rt_cache", net->proc_net);
626 err1:
627         return -ENOMEM;
628 }
629
630 static void __net_exit ip_rt_do_proc_exit(struct net *net)
631 {
632         remove_proc_entry("rt_cache", net->proc_net_stat);
633         remove_proc_entry("rt_cache", net->proc_net);
634 #ifdef CONFIG_IP_ROUTE_CLASSID
635         remove_proc_entry("rt_acct", net->proc_net);
636 #endif
637 }
638
639 static struct pernet_operations ip_rt_proc_ops __net_initdata =  {
640         .init = ip_rt_do_proc_init,
641         .exit = ip_rt_do_proc_exit,
642 };
643
644 static int __init ip_rt_proc_init(void)
645 {
646         return register_pernet_subsys(&ip_rt_proc_ops);
647 }
648
649 #else
650 static inline int ip_rt_proc_init(void)
651 {
652         return 0;
653 }
654 #endif /* CONFIG_PROC_FS */
655
656 static inline void rt_free(struct rtable *rt)
657 {
658         call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
659 }
660
661 static inline void rt_drop(struct rtable *rt)
662 {
663         ip_rt_put(rt);
664         call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
665 }
666
667 static inline int rt_fast_clean(struct rtable *rth)
668 {
669         /* Kill broadcast/multicast entries very aggresively, if they
670            collide in hash table with more useful entries */
671         return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
672                 rt_is_input_route(rth) && rth->dst.rt_next;
673 }
674
675 static inline int rt_valuable(struct rtable *rth)
676 {
677         return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
678                 (rth->peer && rth->peer->pmtu_expires);
679 }
680
681 static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
682 {
683         unsigned long age;
684         int ret = 0;
685
686         if (atomic_read(&rth->dst.__refcnt))
687                 goto out;
688
689         age = jiffies - rth->dst.lastuse;
690         if ((age <= tmo1 && !rt_fast_clean(rth)) ||
691             (age <= tmo2 && rt_valuable(rth)))
692                 goto out;
693         ret = 1;
694 out:    return ret;
695 }
696
697 /* Bits of score are:
698  * 31: very valuable
699  * 30: not quite useless
700  * 29..0: usage counter
701  */
702 static inline u32 rt_score(struct rtable *rt)
703 {
704         u32 score = jiffies - rt->dst.lastuse;
705
706         score = ~score & ~(3<<30);
707
708         if (rt_valuable(rt))
709                 score |= (1<<31);
710
711         if (rt_is_output_route(rt) ||
712             !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
713                 score |= (1<<30);
714
715         return score;
716 }
717
718 static inline bool rt_caching(const struct net *net)
719 {
720         return net->ipv4.current_rt_cache_rebuild_count <=
721                 net->ipv4.sysctl_rt_cache_rebuild_count;
722 }
723
724 static inline bool compare_hash_inputs(const struct rtable *rt1,
725                                        const struct rtable *rt2)
726 {
727         return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
728                 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
729                 (rt1->rt_route_iif ^ rt2->rt_route_iif)) == 0);
730 }
731
732 static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
733 {
734         return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
735                 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
736                 (rt1->rt_mark ^ rt2->rt_mark) |
737                 (rt1->rt_key_tos ^ rt2->rt_key_tos) |
738                 (rt1->rt_route_iif ^ rt2->rt_route_iif) |
739                 (rt1->rt_oif ^ rt2->rt_oif)) == 0;
740 }
741
742 static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
743 {
744         return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
745 }
746
747 static inline int rt_is_expired(struct rtable *rth)
748 {
749         return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
750 }
751
752 /*
753  * Perform a full scan of hash table and free all entries.
754  * Can be called by a softirq or a process.
755  * In the later case, we want to be reschedule if necessary
756  */
757 static void rt_do_flush(struct net *net, int process_context)
758 {
759         unsigned int i;
760         struct rtable *rth, *next;
761
762         for (i = 0; i <= rt_hash_mask; i++) {
763                 struct rtable __rcu **pprev;
764                 struct rtable *list;
765
766                 if (process_context && need_resched())
767                         cond_resched();
768                 rth = rcu_dereference_raw(rt_hash_table[i].chain);
769                 if (!rth)
770                         continue;
771
772                 spin_lock_bh(rt_hash_lock_addr(i));
773
774                 list = NULL;
775                 pprev = &rt_hash_table[i].chain;
776                 rth = rcu_dereference_protected(*pprev,
777                         lockdep_is_held(rt_hash_lock_addr(i)));
778
779                 while (rth) {
780                         next = rcu_dereference_protected(rth->dst.rt_next,
781                                 lockdep_is_held(rt_hash_lock_addr(i)));
782
783                         if (!net ||
784                             net_eq(dev_net(rth->dst.dev), net)) {
785                                 rcu_assign_pointer(*pprev, next);
786                                 rcu_assign_pointer(rth->dst.rt_next, list);
787                                 list = rth;
788                         } else {
789                                 pprev = &rth->dst.rt_next;
790                         }
791                         rth = next;
792                 }
793
794                 spin_unlock_bh(rt_hash_lock_addr(i));
795
796                 for (; list; list = next) {
797                         next = rcu_dereference_protected(list->dst.rt_next, 1);
798                         rt_free(list);
799                 }
800         }
801 }
802
803 /*
804  * While freeing expired entries, we compute average chain length
805  * and standard deviation, using fixed-point arithmetic.
806  * This to have an estimation of rt_chain_length_max
807  *  rt_chain_length_max = max(elasticity, AVG + 4*SD)
808  * We use 3 bits for frational part, and 29 (or 61) for magnitude.
809  */
810
811 #define FRACT_BITS 3
812 #define ONE (1UL << FRACT_BITS)
813
814 /*
815  * Given a hash chain and an item in this hash chain,
816  * find if a previous entry has the same hash_inputs
817  * (but differs on tos, mark or oif)
818  * Returns 0 if an alias is found.
819  * Returns ONE if rth has no alias before itself.
820  */
821 static int has_noalias(const struct rtable *head, const struct rtable *rth)
822 {
823         const struct rtable *aux = head;
824
825         while (aux != rth) {
826                 if (compare_hash_inputs(aux, rth))
827                         return 0;
828                 aux = rcu_dereference_protected(aux->dst.rt_next, 1);
829         }
830         return ONE;
831 }
832
833 /*
834  * Perturbation of rt_genid by a small quantity [1..256]
835  * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
836  * many times (2^24) without giving recent rt_genid.
837  * Jenkins hash is strong enough that litle changes of rt_genid are OK.
838  */
839 static void rt_cache_invalidate(struct net *net)
840 {
841         unsigned char shuffle;
842
843         get_random_bytes(&shuffle, sizeof(shuffle));
844         atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
845 }
846
847 /*
848  * delay < 0  : invalidate cache (fast : entries will be deleted later)
849  * delay >= 0 : invalidate & flush cache (can be long)
850  */
851 void rt_cache_flush(struct net *net, int delay)
852 {
853         rt_cache_invalidate(net);
854         if (delay >= 0)
855                 rt_do_flush(net, !in_softirq());
856 }
857
858 /* Flush previous cache invalidated entries from the cache */
859 void rt_cache_flush_batch(struct net *net)
860 {
861         rt_do_flush(net, !in_softirq());
862 }
863
864 static void rt_emergency_hash_rebuild(struct net *net)
865 {
866         if (net_ratelimit())
867                 printk(KERN_WARNING "Route hash chain too long!\n");
868         rt_cache_invalidate(net);
869 }
870
871 /*
872    Short description of GC goals.
873
874    We want to build algorithm, which will keep routing cache
875    at some equilibrium point, when number of aged off entries
876    is kept approximately equal to newly generated ones.
877
878    Current expiration strength is variable "expire".
879    We try to adjust it dynamically, so that if networking
880    is idle expires is large enough to keep enough of warm entries,
881    and when load increases it reduces to limit cache size.
882  */
883
884 static int rt_garbage_collect(struct dst_ops *ops)
885 {
886         static unsigned long expire = RT_GC_TIMEOUT;
887         static unsigned long last_gc;
888         static int rover;
889         static int equilibrium;
890         struct rtable *rth;
891         struct rtable __rcu **rthp;
892         unsigned long now = jiffies;
893         int goal;
894         int entries = dst_entries_get_fast(&ipv4_dst_ops);
895
896         /*
897          * Garbage collection is pretty expensive,
898          * do not make it too frequently.
899          */
900
901         RT_CACHE_STAT_INC(gc_total);
902
903         if (now - last_gc < ip_rt_gc_min_interval &&
904             entries < ip_rt_max_size) {
905                 RT_CACHE_STAT_INC(gc_ignored);
906                 goto out;
907         }
908
909         entries = dst_entries_get_slow(&ipv4_dst_ops);
910         /* Calculate number of entries, which we want to expire now. */
911         goal = entries - (ip_rt_gc_elasticity << rt_hash_log);
912         if (goal <= 0) {
913                 if (equilibrium < ipv4_dst_ops.gc_thresh)
914                         equilibrium = ipv4_dst_ops.gc_thresh;
915                 goal = entries - equilibrium;
916                 if (goal > 0) {
917                         equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
918                         goal = entries - equilibrium;
919                 }
920         } else {
921                 /* We are in dangerous area. Try to reduce cache really
922                  * aggressively.
923                  */
924                 goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
925                 equilibrium = entries - goal;
926         }
927
928         if (now - last_gc >= ip_rt_gc_min_interval)
929                 last_gc = now;
930
931         if (goal <= 0) {
932                 equilibrium += goal;
933                 goto work_done;
934         }
935
936         do {
937                 int i, k;
938
939                 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
940                         unsigned long tmo = expire;
941
942                         k = (k + 1) & rt_hash_mask;
943                         rthp = &rt_hash_table[k].chain;
944                         spin_lock_bh(rt_hash_lock_addr(k));
945                         while ((rth = rcu_dereference_protected(*rthp,
946                                         lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
947                                 if (!rt_is_expired(rth) &&
948                                         !rt_may_expire(rth, tmo, expire)) {
949                                         tmo >>= 1;
950                                         rthp = &rth->dst.rt_next;
951                                         continue;
952                                 }
953                                 *rthp = rth->dst.rt_next;
954                                 rt_free(rth);
955                                 goal--;
956                         }
957                         spin_unlock_bh(rt_hash_lock_addr(k));
958                         if (goal <= 0)
959                                 break;
960                 }
961                 rover = k;
962
963                 if (goal <= 0)
964                         goto work_done;
965
966                 /* Goal is not achieved. We stop process if:
967
968                    - if expire reduced to zero. Otherwise, expire is halfed.
969                    - if table is not full.
970                    - if we are called from interrupt.
971                    - jiffies check is just fallback/debug loop breaker.
972                      We will not spin here for long time in any case.
973                  */
974
975                 RT_CACHE_STAT_INC(gc_goal_miss);
976
977                 if (expire == 0)
978                         break;
979
980                 expire >>= 1;
981
982                 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
983                         goto out;
984         } while (!in_softirq() && time_before_eq(jiffies, now));
985
986         if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
987                 goto out;
988         if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
989                 goto out;
990         if (net_ratelimit())
991                 printk(KERN_WARNING "dst cache overflow\n");
992         RT_CACHE_STAT_INC(gc_dst_overflow);
993         return 1;
994
995 work_done:
996         expire += ip_rt_gc_min_interval;
997         if (expire > ip_rt_gc_timeout ||
998             dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
999             dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
1000                 expire = ip_rt_gc_timeout;
1001 out:    return 0;
1002 }
1003
1004 /*
1005  * Returns number of entries in a hash chain that have different hash_inputs
1006  */
1007 static int slow_chain_length(const struct rtable *head)
1008 {
1009         int length = 0;
1010         const struct rtable *rth = head;
1011
1012         while (rth) {
1013                 length += has_noalias(head, rth);
1014                 rth = rcu_dereference_protected(rth->dst.rt_next, 1);
1015         }
1016         return length >> FRACT_BITS;
1017 }
1018
1019 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr)
1020 {
1021         struct neigh_table *tbl = &arp_tbl;
1022         static const __be32 inaddr_any = 0;
1023         struct net_device *dev = dst->dev;
1024         const __be32 *pkey = daddr;
1025         struct neighbour *n;
1026
1027 #if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
1028         if (dev->type == ARPHRD_ATM)
1029                 tbl = clip_tbl_hook;
1030 #endif
1031         if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
1032                 pkey = &inaddr_any;
1033
1034         n = __ipv4_neigh_lookup(tbl, dev, *(__force u32 *)pkey);
1035         if (n)
1036                 return n;
1037         return neigh_create(tbl, pkey, dev);
1038 }
1039
1040 static int rt_bind_neighbour(struct rtable *rt)
1041 {
1042         struct neighbour *n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
1043         if (IS_ERR(n))
1044                 return PTR_ERR(n);
1045         dst_set_neighbour(&rt->dst, n);
1046
1047         return 0;
1048 }
1049
1050 static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt,
1051                                      struct sk_buff *skb, int ifindex)
1052 {
1053         struct rtable   *rth, *cand;
1054         struct rtable __rcu **rthp, **candp;
1055         unsigned long   now;
1056         u32             min_score;
1057         int             chain_length;
1058         int attempts = !in_softirq();
1059
1060 restart:
1061         chain_length = 0;
1062         min_score = ~(u32)0;
1063         cand = NULL;
1064         candp = NULL;
1065         now = jiffies;
1066
1067         if (!rt_caching(dev_net(rt->dst.dev))) {
1068                 /*
1069                  * If we're not caching, just tell the caller we
1070                  * were successful and don't touch the route.  The
1071                  * caller hold the sole reference to the cache entry, and
1072                  * it will be released when the caller is done with it.
1073                  * If we drop it here, the callers have no way to resolve routes
1074                  * when we're not caching.  Instead, just point *rp at rt, so
1075                  * the caller gets a single use out of the route
1076                  * Note that we do rt_free on this new route entry, so that
1077                  * once its refcount hits zero, we are still able to reap it
1078                  * (Thanks Alexey)
1079                  * Note: To avoid expensive rcu stuff for this uncached dst,
1080                  * we set DST_NOCACHE so that dst_release() can free dst without
1081                  * waiting a grace period.
1082                  */
1083
1084                 rt->dst.flags |= DST_NOCACHE;
1085                 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1086                         int err = rt_bind_neighbour(rt);
1087                         if (err) {
1088                                 if (net_ratelimit())
1089                                         printk(KERN_WARNING
1090                                             "Neighbour table failure & not caching routes.\n");
1091                                 ip_rt_put(rt);
1092                                 return ERR_PTR(err);
1093                         }
1094                 }
1095
1096                 goto skip_hashing;
1097         }
1098
1099         rthp = &rt_hash_table[hash].chain;
1100
1101         spin_lock_bh(rt_hash_lock_addr(hash));
1102         while ((rth = rcu_dereference_protected(*rthp,
1103                         lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
1104                 if (rt_is_expired(rth)) {
1105                         *rthp = rth->dst.rt_next;
1106                         rt_free(rth);
1107                         continue;
1108                 }
1109                 if (compare_keys(rth, rt) && compare_netns(rth, rt)) {
1110                         /* Put it first */
1111                         *rthp = rth->dst.rt_next;
1112                         /*
1113                          * Since lookup is lockfree, the deletion
1114                          * must be visible to another weakly ordered CPU before
1115                          * the insertion at the start of the hash chain.
1116                          */
1117                         rcu_assign_pointer(rth->dst.rt_next,
1118                                            rt_hash_table[hash].chain);
1119                         /*
1120                          * Since lookup is lockfree, the update writes
1121                          * must be ordered for consistency on SMP.
1122                          */
1123                         rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1124
1125                         dst_use(&rth->dst, now);
1126                         spin_unlock_bh(rt_hash_lock_addr(hash));
1127
1128                         rt_drop(rt);
1129                         if (skb)
1130                                 skb_dst_set(skb, &rth->dst);
1131                         return rth;
1132                 }
1133
1134                 if (!atomic_read(&rth->dst.__refcnt)) {
1135                         u32 score = rt_score(rth);
1136
1137                         if (score <= min_score) {
1138                                 cand = rth;
1139                                 candp = rthp;
1140                                 min_score = score;
1141                         }
1142                 }
1143
1144                 chain_length++;
1145
1146                 rthp = &rth->dst.rt_next;
1147         }
1148
1149         if (cand) {
1150                 /* ip_rt_gc_elasticity used to be average length of chain
1151                  * length, when exceeded gc becomes really aggressive.
1152                  *
1153                  * The second limit is less certain. At the moment it allows
1154                  * only 2 entries per bucket. We will see.
1155                  */
1156                 if (chain_length > ip_rt_gc_elasticity) {
1157                         *candp = cand->dst.rt_next;
1158                         rt_free(cand);
1159                 }
1160         } else {
1161                 if (chain_length > rt_chain_length_max &&
1162                     slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
1163                         struct net *net = dev_net(rt->dst.dev);
1164                         int num = ++net->ipv4.current_rt_cache_rebuild_count;
1165                         if (!rt_caching(net)) {
1166                                 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
1167                                         rt->dst.dev->name, num);
1168                         }
1169                         rt_emergency_hash_rebuild(net);
1170                         spin_unlock_bh(rt_hash_lock_addr(hash));
1171
1172                         hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1173                                         ifindex, rt_genid(net));
1174                         goto restart;
1175                 }
1176         }
1177
1178         /* Try to bind route to arp only if it is output
1179            route or unicast forwarding path.
1180          */
1181         if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1182                 int err = rt_bind_neighbour(rt);
1183                 if (err) {
1184                         spin_unlock_bh(rt_hash_lock_addr(hash));
1185
1186                         if (err != -ENOBUFS) {
1187                                 rt_drop(rt);
1188                                 return ERR_PTR(err);
1189                         }
1190
1191                         /* Neighbour tables are full and nothing
1192                            can be released. Try to shrink route cache,
1193                            it is most likely it holds some neighbour records.
1194                          */
1195                         if (attempts-- > 0) {
1196                                 int saved_elasticity = ip_rt_gc_elasticity;
1197                                 int saved_int = ip_rt_gc_min_interval;
1198                                 ip_rt_gc_elasticity     = 1;
1199                                 ip_rt_gc_min_interval   = 0;
1200                                 rt_garbage_collect(&ipv4_dst_ops);
1201                                 ip_rt_gc_min_interval   = saved_int;
1202                                 ip_rt_gc_elasticity     = saved_elasticity;
1203                                 goto restart;
1204                         }
1205
1206                         if (net_ratelimit())
1207                                 printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
1208                         rt_drop(rt);
1209                         return ERR_PTR(-ENOBUFS);
1210                 }
1211         }
1212
1213         rt->dst.rt_next = rt_hash_table[hash].chain;
1214
1215         /*
1216          * Since lookup is lockfree, we must make sure
1217          * previous writes to rt are committed to memory
1218          * before making rt visible to other CPUS.
1219          */
1220         rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1221
1222         spin_unlock_bh(rt_hash_lock_addr(hash));
1223
1224 skip_hashing:
1225         if (skb)
1226                 skb_dst_set(skb, &rt->dst);
1227         return rt;
1228 }
1229
1230 static atomic_t __rt_peer_genid = ATOMIC_INIT(0);
1231
1232 static u32 rt_peer_genid(void)
1233 {
1234         return atomic_read(&__rt_peer_genid);
1235 }
1236
1237 void rt_bind_peer(struct rtable *rt, __be32 daddr, int create)
1238 {
1239         struct inet_peer *peer;
1240
1241         peer = inet_getpeer_v4(daddr, create);
1242
1243         if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
1244                 inet_putpeer(peer);
1245         else
1246                 rt->rt_peer_genid = rt_peer_genid();
1247 }
1248
1249 /*
1250  * Peer allocation may fail only in serious out-of-memory conditions.  However
1251  * we still can generate some output.
1252  * Random ID selection looks a bit dangerous because we have no chances to
1253  * select ID being unique in a reasonable period of time.
1254  * But broken packet identifier may be better than no packet at all.
1255  */
1256 static void ip_select_fb_ident(struct iphdr *iph)
1257 {
1258         static DEFINE_SPINLOCK(ip_fb_id_lock);
1259         static u32 ip_fallback_id;
1260         u32 salt;
1261
1262         spin_lock_bh(&ip_fb_id_lock);
1263         salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1264         iph->id = htons(salt & 0xFFFF);
1265         ip_fallback_id = salt;
1266         spin_unlock_bh(&ip_fb_id_lock);
1267 }
1268
1269 void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1270 {
1271         struct rtable *rt = (struct rtable *) dst;
1272
1273         if (rt) {
1274                 if (rt->peer == NULL)
1275                         rt_bind_peer(rt, rt->rt_dst, 1);
1276
1277                 /* If peer is attached to destination, it is never detached,
1278                    so that we need not to grab a lock to dereference it.
1279                  */
1280                 if (rt->peer) {
1281                         iph->id = htons(inet_getid(rt->peer, more));
1282                         return;
1283                 }
1284         } else
1285                 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
1286                        __builtin_return_address(0));
1287
1288         ip_select_fb_ident(iph);
1289 }
1290 EXPORT_SYMBOL(__ip_select_ident);
1291
1292 static void rt_del(unsigned hash, struct rtable *rt)
1293 {
1294         struct rtable __rcu **rthp;
1295         struct rtable *aux;
1296
1297         rthp = &rt_hash_table[hash].chain;
1298         spin_lock_bh(rt_hash_lock_addr(hash));
1299         ip_rt_put(rt);
1300         while ((aux = rcu_dereference_protected(*rthp,
1301                         lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
1302                 if (aux == rt || rt_is_expired(aux)) {
1303                         *rthp = aux->dst.rt_next;
1304                         rt_free(aux);
1305                         continue;
1306                 }
1307                 rthp = &aux->dst.rt_next;
1308         }
1309         spin_unlock_bh(rt_hash_lock_addr(hash));
1310 }
1311
1312 static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
1313 {
1314         struct rtable *rt = (struct rtable *) dst;
1315         __be32 orig_gw = rt->rt_gateway;
1316         struct neighbour *n, *old_n;
1317
1318         dst_confirm(&rt->dst);
1319
1320         rt->rt_gateway = peer->redirect_learned.a4;
1321
1322         n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
1323         if (IS_ERR(n))
1324                 return PTR_ERR(n);
1325         old_n = xchg(&rt->dst._neighbour, n);
1326         if (old_n)
1327                 neigh_release(old_n);
1328         if (!n || !(n->nud_state & NUD_VALID)) {
1329                 if (n)
1330                         neigh_event_send(n, NULL);
1331                 rt->rt_gateway = orig_gw;
1332                 return -EAGAIN;
1333         } else {
1334                 rt->rt_flags |= RTCF_REDIRECTED;
1335                 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
1336         }
1337         return 0;
1338 }
1339
1340 /* called in rcu_read_lock() section */
1341 void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1342                     __be32 saddr, struct net_device *dev)
1343 {
1344         int s, i;
1345         struct in_device *in_dev = __in_dev_get_rcu(dev);
1346         __be32 skeys[2] = { saddr, 0 };
1347         int    ikeys[2] = { dev->ifindex, 0 };
1348         struct inet_peer *peer;
1349         struct net *net;
1350
1351         if (!in_dev)
1352                 return;
1353
1354         net = dev_net(dev);
1355         if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
1356             ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
1357             ipv4_is_zeronet(new_gw))
1358                 goto reject_redirect;
1359
1360         if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1361                 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1362                         goto reject_redirect;
1363                 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1364                         goto reject_redirect;
1365         } else {
1366                 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1367                         goto reject_redirect;
1368         }
1369
1370         for (s = 0; s < 2; s++) {
1371                 for (i = 0; i < 2; i++) {
1372                         unsigned int hash;
1373                         struct rtable __rcu **rthp;
1374                         struct rtable *rt;
1375
1376                         hash = rt_hash(daddr, skeys[s], ikeys[i], rt_genid(net));
1377
1378                         rthp = &rt_hash_table[hash].chain;
1379
1380                         while ((rt = rcu_dereference(*rthp)) != NULL) {
1381                                 rthp = &rt->dst.rt_next;
1382
1383                                 if (rt->rt_key_dst != daddr ||
1384                                     rt->rt_key_src != skeys[s] ||
1385                                     rt->rt_oif != ikeys[i] ||
1386                                     rt_is_input_route(rt) ||
1387                                     rt_is_expired(rt) ||
1388                                     !net_eq(dev_net(rt->dst.dev), net) ||
1389                                     rt->dst.error ||
1390                                     rt->dst.dev != dev ||
1391                                     rt->rt_gateway != old_gw)
1392                                         continue;
1393
1394                                 if (!rt->peer)
1395                                         rt_bind_peer(rt, rt->rt_dst, 1);
1396
1397                                 peer = rt->peer;
1398                                 if (peer) {
1399                                         if (peer->redirect_learned.a4 != new_gw) {
1400                                                 peer->redirect_learned.a4 = new_gw;
1401                                                 atomic_inc(&__rt_peer_genid);
1402                                         }
1403                                         check_peer_redir(&rt->dst, peer);
1404                                 }
1405                         }
1406                 }
1407         }
1408         return;
1409
1410 reject_redirect:
1411 #ifdef CONFIG_IP_ROUTE_VERBOSE
1412         if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
1413                 printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n"
1414                         "  Advised path = %pI4 -> %pI4\n",
1415                        &old_gw, dev->name, &new_gw,
1416                        &saddr, &daddr);
1417 #endif
1418         ;
1419 }
1420
1421 static bool peer_pmtu_expired(struct inet_peer *peer)
1422 {
1423         unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
1424
1425         return orig &&
1426                time_after_eq(jiffies, orig) &&
1427                cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1428 }
1429
1430 static bool peer_pmtu_cleaned(struct inet_peer *peer)
1431 {
1432         unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
1433
1434         return orig &&
1435                cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1436 }
1437
1438 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1439 {
1440         struct rtable *rt = (struct rtable *)dst;
1441         struct dst_entry *ret = dst;
1442
1443         if (rt) {
1444                 if (dst->obsolete > 0) {
1445                         ip_rt_put(rt);
1446                         ret = NULL;
1447                 } else if (rt->rt_flags & RTCF_REDIRECTED) {
1448                         unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1449                                                 rt->rt_oif,
1450                                                 rt_genid(dev_net(dst->dev)));
1451                         rt_del(hash, rt);
1452                         ret = NULL;
1453                 } else if (rt->peer && peer_pmtu_expired(rt->peer)) {
1454                         dst_metric_set(dst, RTAX_MTU, rt->peer->pmtu_orig);
1455                 }
1456         }
1457         return ret;
1458 }
1459
1460 /*
1461  * Algorithm:
1462  *      1. The first ip_rt_redirect_number redirects are sent
1463  *         with exponential backoff, then we stop sending them at all,
1464  *         assuming that the host ignores our redirects.
1465  *      2. If we did not see packets requiring redirects
1466  *         during ip_rt_redirect_silence, we assume that the host
1467  *         forgot redirected route and start to send redirects again.
1468  *
1469  * This algorithm is much cheaper and more intelligent than dumb load limiting
1470  * in icmp.c.
1471  *
1472  * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1473  * and "frag. need" (breaks PMTU discovery) in icmp.c.
1474  */
1475
1476 void ip_rt_send_redirect(struct sk_buff *skb)
1477 {
1478         struct rtable *rt = skb_rtable(skb);
1479         struct in_device *in_dev;
1480         struct inet_peer *peer;
1481         int log_martians;
1482
1483         rcu_read_lock();
1484         in_dev = __in_dev_get_rcu(rt->dst.dev);
1485         if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1486                 rcu_read_unlock();
1487                 return;
1488         }
1489         log_martians = IN_DEV_LOG_MARTIANS(in_dev);
1490         rcu_read_unlock();
1491
1492         if (!rt->peer)
1493                 rt_bind_peer(rt, rt->rt_dst, 1);
1494         peer = rt->peer;
1495         if (!peer) {
1496                 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1497                 return;
1498         }
1499
1500         /* No redirected packets during ip_rt_redirect_silence;
1501          * reset the algorithm.
1502          */
1503         if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
1504                 peer->rate_tokens = 0;
1505
1506         /* Too many ignored redirects; do not send anything
1507          * set dst.rate_last to the last seen redirected packet.
1508          */
1509         if (peer->rate_tokens >= ip_rt_redirect_number) {
1510                 peer->rate_last = jiffies;
1511                 return;
1512         }
1513
1514         /* Check for load limit; set rate_last to the latest sent
1515          * redirect.
1516          */
1517         if (peer->rate_tokens == 0 ||
1518             time_after(jiffies,
1519                        (peer->rate_last +
1520                         (ip_rt_redirect_load << peer->rate_tokens)))) {
1521                 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1522                 peer->rate_last = jiffies;
1523                 ++peer->rate_tokens;
1524 #ifdef CONFIG_IP_ROUTE_VERBOSE
1525                 if (log_martians &&
1526                     peer->rate_tokens == ip_rt_redirect_number &&
1527                     net_ratelimit())
1528                         printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
1529                                &ip_hdr(skb)->saddr, rt->rt_iif,
1530                                 &rt->rt_dst, &rt->rt_gateway);
1531 #endif
1532         }
1533 }
1534
1535 static int ip_error(struct sk_buff *skb)
1536 {
1537         struct rtable *rt = skb_rtable(skb);
1538         struct inet_peer *peer;
1539         unsigned long now;
1540         bool send;
1541         int code;
1542
1543         switch (rt->dst.error) {
1544         case EINVAL:
1545         default:
1546                 goto out;
1547         case EHOSTUNREACH:
1548                 code = ICMP_HOST_UNREACH;
1549                 break;
1550         case ENETUNREACH:
1551                 code = ICMP_NET_UNREACH;
1552                 IP_INC_STATS_BH(dev_net(rt->dst.dev),
1553                                 IPSTATS_MIB_INNOROUTES);
1554                 break;
1555         case EACCES:
1556                 code = ICMP_PKT_FILTERED;
1557                 break;
1558         }
1559
1560         if (!rt->peer)
1561                 rt_bind_peer(rt, rt->rt_dst, 1);
1562         peer = rt->peer;
1563
1564         send = true;
1565         if (peer) {
1566                 now = jiffies;
1567                 peer->rate_tokens += now - peer->rate_last;
1568                 if (peer->rate_tokens > ip_rt_error_burst)
1569                         peer->rate_tokens = ip_rt_error_burst;
1570                 peer->rate_last = now;
1571                 if (peer->rate_tokens >= ip_rt_error_cost)
1572                         peer->rate_tokens -= ip_rt_error_cost;
1573                 else
1574                         send = false;
1575         }
1576         if (send)
1577                 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1578
1579 out:    kfree_skb(skb);
1580         return 0;
1581 }
1582
1583 /*
1584  *      The last two values are not from the RFC but
1585  *      are needed for AMPRnet AX.25 paths.
1586  */
1587
1588 static const unsigned short mtu_plateau[] =
1589 {32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1590
1591 static inline unsigned short guess_mtu(unsigned short old_mtu)
1592 {
1593         int i;
1594
1595         for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1596                 if (old_mtu > mtu_plateau[i])
1597                         return mtu_plateau[i];
1598         return 68;
1599 }
1600
1601 unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
1602                                  unsigned short new_mtu,
1603                                  struct net_device *dev)
1604 {
1605         unsigned short old_mtu = ntohs(iph->tot_len);
1606         unsigned short est_mtu = 0;
1607         struct inet_peer *peer;
1608
1609         peer = inet_getpeer_v4(iph->daddr, 1);
1610         if (peer) {
1611                 unsigned short mtu = new_mtu;
1612
1613                 if (new_mtu < 68 || new_mtu >= old_mtu) {
1614                         /* BSD 4.2 derived systems incorrectly adjust
1615                          * tot_len by the IP header length, and report
1616                          * a zero MTU in the ICMP message.
1617                          */
1618                         if (mtu == 0 &&
1619                             old_mtu >= 68 + (iph->ihl << 2))
1620                                 old_mtu -= iph->ihl << 2;
1621                         mtu = guess_mtu(old_mtu);
1622                 }
1623
1624                 if (mtu < ip_rt_min_pmtu)
1625                         mtu = ip_rt_min_pmtu;
1626                 if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
1627                         unsigned long pmtu_expires;
1628
1629                         pmtu_expires = jiffies + ip_rt_mtu_expires;
1630                         if (!pmtu_expires)
1631                                 pmtu_expires = 1UL;
1632
1633                         est_mtu = mtu;
1634                         peer->pmtu_learned = mtu;
1635                         peer->pmtu_expires = pmtu_expires;
1636                         atomic_inc(&__rt_peer_genid);
1637                 }
1638
1639                 inet_putpeer(peer);
1640         }
1641         return est_mtu ? : new_mtu;
1642 }
1643
1644 static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
1645 {
1646         unsigned long expires = ACCESS_ONCE(peer->pmtu_expires);
1647
1648         if (!expires)
1649                 return;
1650         if (time_before(jiffies, expires)) {
1651                 u32 orig_dst_mtu = dst_mtu(dst);
1652                 if (peer->pmtu_learned < orig_dst_mtu) {
1653                         if (!peer->pmtu_orig)
1654                                 peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU);
1655                         dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned);
1656                 }
1657         } else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires)
1658                 dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
1659 }
1660
1661 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1662 {
1663         struct rtable *rt = (struct rtable *) dst;
1664         struct inet_peer *peer;
1665
1666         dst_confirm(dst);
1667
1668         if (!rt->peer)
1669                 rt_bind_peer(rt, rt->rt_dst, 1);
1670         peer = rt->peer;
1671         if (peer) {
1672                 unsigned long pmtu_expires = ACCESS_ONCE(peer->pmtu_expires);
1673
1674                 if (mtu < ip_rt_min_pmtu)
1675                         mtu = ip_rt_min_pmtu;
1676                 if (!pmtu_expires || mtu < peer->pmtu_learned) {
1677
1678                         pmtu_expires = jiffies + ip_rt_mtu_expires;
1679                         if (!pmtu_expires)
1680                                 pmtu_expires = 1UL;
1681
1682                         peer->pmtu_learned = mtu;
1683                         peer->pmtu_expires = pmtu_expires;
1684
1685                         atomic_inc(&__rt_peer_genid);
1686                         rt->rt_peer_genid = rt_peer_genid();
1687                 }
1688                 check_peer_pmtu(dst, peer);
1689         }
1690 }
1691
1692
1693 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1694 {
1695         struct rtable *rt = (struct rtable *) dst;
1696
1697         if (rt_is_expired(rt))
1698                 return NULL;
1699         if (rt->rt_peer_genid != rt_peer_genid()) {
1700                 struct inet_peer *peer;
1701
1702                 if (!rt->peer)
1703                         rt_bind_peer(rt, rt->rt_dst, 0);
1704
1705                 peer = rt->peer;
1706                 if (peer) {
1707                         check_peer_pmtu(dst, peer);
1708
1709                         if (peer->redirect_learned.a4 &&
1710                             peer->redirect_learned.a4 != rt->rt_gateway) {
1711                                 if (check_peer_redir(dst, peer))
1712                                         return NULL;
1713                         }
1714                 }
1715
1716                 rt->rt_peer_genid = rt_peer_genid();
1717         }
1718         return dst;
1719 }
1720
1721 static void ipv4_dst_destroy(struct dst_entry *dst)
1722 {
1723         struct rtable *rt = (struct rtable *) dst;
1724         struct inet_peer *peer = rt->peer;
1725
1726         if (rt->fi) {
1727                 fib_info_put(rt->fi);
1728                 rt->fi = NULL;
1729         }
1730         if (peer) {
1731                 rt->peer = NULL;
1732                 inet_putpeer(peer);
1733         }
1734 }
1735
1736
1737 static void ipv4_link_failure(struct sk_buff *skb)
1738 {
1739         struct rtable *rt;
1740
1741         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1742
1743         rt = skb_rtable(skb);
1744         if (rt && rt->peer && peer_pmtu_cleaned(rt->peer))
1745                 dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
1746 }
1747
1748 static int ip_rt_bug(struct sk_buff *skb)
1749 {
1750         printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n",
1751                 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1752                 skb->dev ? skb->dev->name : "?");
1753         kfree_skb(skb);
1754         WARN_ON(1);
1755         return 0;
1756 }
1757
1758 /*
1759    We do not cache source address of outgoing interface,
1760    because it is used only by IP RR, TS and SRR options,
1761    so that it out of fast path.
1762
1763    BTW remember: "addr" is allowed to be not aligned
1764    in IP options!
1765  */
1766
1767 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1768 {
1769         __be32 src;
1770
1771         if (rt_is_output_route(rt))
1772                 src = ip_hdr(skb)->saddr;
1773         else {
1774                 struct fib_result res;
1775                 struct flowi4 fl4;
1776                 struct iphdr *iph;
1777
1778                 iph = ip_hdr(skb);
1779
1780                 memset(&fl4, 0, sizeof(fl4));
1781                 fl4.daddr = iph->daddr;
1782                 fl4.saddr = iph->saddr;
1783                 fl4.flowi4_tos = RT_TOS(iph->tos);
1784                 fl4.flowi4_oif = rt->dst.dev->ifindex;
1785                 fl4.flowi4_iif = skb->dev->ifindex;
1786                 fl4.flowi4_mark = skb->mark;
1787
1788                 rcu_read_lock();
1789                 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
1790                         src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
1791                 else
1792                         src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
1793                                         RT_SCOPE_UNIVERSE);
1794                 rcu_read_unlock();
1795         }
1796         memcpy(addr, &src, 4);
1797 }
1798
1799 #ifdef CONFIG_IP_ROUTE_CLASSID
1800 static void set_class_tag(struct rtable *rt, u32 tag)
1801 {
1802         if (!(rt->dst.tclassid & 0xFFFF))
1803                 rt->dst.tclassid |= tag & 0xFFFF;
1804         if (!(rt->dst.tclassid & 0xFFFF0000))
1805                 rt->dst.tclassid |= tag & 0xFFFF0000;
1806 }
1807 #endif
1808
1809 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1810 {
1811         unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1812
1813         if (advmss == 0) {
1814                 advmss = max_t(unsigned int, dst->dev->mtu - 40,
1815                                ip_rt_min_advmss);
1816                 if (advmss > 65535 - 40)
1817                         advmss = 65535 - 40;
1818         }
1819         return advmss;
1820 }
1821
1822 static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
1823 {
1824         unsigned int mtu = dst->dev->mtu;
1825
1826         if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1827                 const struct rtable *rt = (const struct rtable *) dst;
1828
1829                 if (rt->rt_gateway != rt->rt_dst && mtu > 576)
1830                         mtu = 576;
1831         }
1832
1833         if (mtu > IP_MAX_MTU)
1834                 mtu = IP_MAX_MTU;
1835
1836         return mtu;
1837 }
1838
1839 static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
1840                             struct fib_info *fi)
1841 {
1842         struct inet_peer *peer;
1843         int create = 0;
1844
1845         /* If a peer entry exists for this destination, we must hook
1846          * it up in order to get at cached metrics.
1847          */
1848         if (fl4 && (fl4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS))
1849                 create = 1;
1850
1851         rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create);
1852         if (peer) {
1853                 rt->rt_peer_genid = rt_peer_genid();
1854                 if (inet_metrics_new(peer))
1855                         memcpy(peer->metrics, fi->fib_metrics,
1856                                sizeof(u32) * RTAX_MAX);
1857                 dst_init_metrics(&rt->dst, peer->metrics, false);
1858
1859                 check_peer_pmtu(&rt->dst, peer);
1860                 if (peer->redirect_learned.a4 &&
1861                     peer->redirect_learned.a4 != rt->rt_gateway) {
1862                         rt->rt_gateway = peer->redirect_learned.a4;
1863                         rt->rt_flags |= RTCF_REDIRECTED;
1864                 }
1865         } else {
1866                 if (fi->fib_metrics != (u32 *) dst_default_metrics) {
1867                         rt->fi = fi;
1868                         atomic_inc(&fi->fib_clntref);
1869                 }
1870                 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
1871         }
1872 }
1873
1874 static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *fl4,
1875                            const struct fib_result *res,
1876                            struct fib_info *fi, u16 type, u32 itag)
1877 {
1878         struct dst_entry *dst = &rt->dst;
1879
1880         if (fi) {
1881                 if (FIB_RES_GW(*res) &&
1882                     FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1883                         rt->rt_gateway = FIB_RES_GW(*res);
1884                 rt_init_metrics(rt, fl4, fi);
1885 #ifdef CONFIG_IP_ROUTE_CLASSID
1886                 dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
1887 #endif
1888         }
1889
1890         if (dst_mtu(dst) > IP_MAX_MTU)
1891                 dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
1892         if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
1893                 dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
1894
1895 #ifdef CONFIG_IP_ROUTE_CLASSID
1896 #ifdef CONFIG_IP_MULTIPLE_TABLES
1897         set_class_tag(rt, fib_rules_tclass(res));
1898 #endif
1899         set_class_tag(rt, itag);
1900 #endif
1901 }
1902
1903 static struct rtable *rt_dst_alloc(struct net_device *dev,
1904                                    bool nopolicy, bool noxfrm)
1905 {
1906         return dst_alloc(&ipv4_dst_ops, dev, 1, -1,
1907                          DST_HOST |
1908                          (nopolicy ? DST_NOPOLICY : 0) |
1909                          (noxfrm ? DST_NOXFRM : 0));
1910 }
1911
1912 /* called in rcu_read_lock() section */
1913 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1914                                 u8 tos, struct net_device *dev, int our)
1915 {
1916         unsigned int hash;
1917         struct rtable *rth;
1918         __be32 spec_dst;
1919         struct in_device *in_dev = __in_dev_get_rcu(dev);
1920         u32 itag = 0;
1921         int err;
1922
1923         /* Primary sanity checks. */
1924
1925         if (in_dev == NULL)
1926                 return -EINVAL;
1927
1928         if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1929             ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
1930                 goto e_inval;
1931
1932         if (ipv4_is_zeronet(saddr)) {
1933                 if (!ipv4_is_local_multicast(daddr))
1934                         goto e_inval;
1935                 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1936         } else {
1937                 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
1938                                           &itag);
1939                 if (err < 0)
1940                         goto e_err;
1941         }
1942         rth = rt_dst_alloc(init_net.loopback_dev,
1943                            IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1944         if (!rth)
1945                 goto e_nobufs;
1946
1947 #ifdef CONFIG_IP_ROUTE_CLASSID
1948         rth->dst.tclassid = itag;
1949 #endif
1950         rth->dst.output = ip_rt_bug;
1951
1952         rth->rt_key_dst = daddr;
1953         rth->rt_key_src = saddr;
1954         rth->rt_genid   = rt_genid(dev_net(dev));
1955         rth->rt_flags   = RTCF_MULTICAST;
1956         rth->rt_type    = RTN_MULTICAST;
1957         rth->rt_key_tos = tos;
1958         rth->rt_dst     = daddr;
1959         rth->rt_src     = saddr;
1960         rth->rt_route_iif = dev->ifindex;
1961         rth->rt_iif     = dev->ifindex;
1962         rth->rt_oif     = 0;
1963         rth->rt_mark    = skb->mark;
1964         rth->rt_gateway = daddr;
1965         rth->rt_spec_dst= spec_dst;
1966         rth->rt_peer_genid = 0;
1967         rth->peer = NULL;
1968         rth->fi = NULL;
1969         if (our) {
1970                 rth->dst.input= ip_local_deliver;
1971                 rth->rt_flags |= RTCF_LOCAL;
1972         }
1973
1974 #ifdef CONFIG_IP_MROUTE
1975         if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1976                 rth->dst.input = ip_mr_input;
1977 #endif
1978         RT_CACHE_STAT_INC(in_slow_mc);
1979
1980         hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1981         rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
1982         return IS_ERR(rth) ? PTR_ERR(rth) : 0;
1983
1984 e_nobufs:
1985         return -ENOBUFS;
1986 e_inval:
1987         return -EINVAL;
1988 e_err:
1989         return err;
1990 }
1991
1992
1993 static void ip_handle_martian_source(struct net_device *dev,
1994                                      struct in_device *in_dev,
1995                                      struct sk_buff *skb,
1996                                      __be32 daddr,
1997                                      __be32 saddr)
1998 {
1999         RT_CACHE_STAT_INC(in_martian_src);
2000 #ifdef CONFIG_IP_ROUTE_VERBOSE
2001         if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
2002                 /*
2003                  *      RFC1812 recommendation, if source is martian,
2004                  *      the only hint is MAC header.
2005                  */
2006                 printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n",
2007                         &daddr, &saddr, dev->name);
2008                 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
2009                         int i;
2010                         const unsigned char *p = skb_mac_header(skb);
2011                         printk(KERN_WARNING "ll header: ");
2012                         for (i = 0; i < dev->hard_header_len; i++, p++) {
2013                                 printk("%02x", *p);
2014                                 if (i < (dev->hard_header_len - 1))
2015                                         printk(":");
2016                         }
2017                         printk("\n");
2018                 }
2019         }
2020 #endif
2021 }
2022
2023 /* called in rcu_read_lock() section */
2024 static int __mkroute_input(struct sk_buff *skb,
2025                            const struct fib_result *res,
2026                            struct in_device *in_dev,
2027                            __be32 daddr, __be32 saddr, u32 tos,
2028                            struct rtable **result)
2029 {
2030         struct rtable *rth;
2031         int err;
2032         struct in_device *out_dev;
2033         unsigned int flags = 0;
2034         __be32 spec_dst;
2035         u32 itag;
2036
2037         /* get a working reference to the output device */
2038         out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
2039         if (out_dev == NULL) {
2040                 if (net_ratelimit())
2041                         printk(KERN_CRIT "Bug in ip_route_input" \
2042                                "_slow(). Please, report\n");
2043                 return -EINVAL;
2044         }
2045
2046
2047         err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
2048                                   in_dev->dev, &spec_dst, &itag);
2049         if (err < 0) {
2050                 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
2051                                          saddr);
2052
2053                 goto cleanup;
2054         }
2055
2056         if (err)
2057                 flags |= RTCF_DIRECTSRC;
2058
2059         if (out_dev == in_dev && err &&
2060             (IN_DEV_SHARED_MEDIA(out_dev) ||
2061              inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
2062                 flags |= RTCF_DOREDIRECT;
2063
2064         if (skb->protocol != htons(ETH_P_IP)) {
2065                 /* Not IP (i.e. ARP). Do not create route, if it is
2066                  * invalid for proxy arp. DNAT routes are always valid.
2067                  *
2068                  * Proxy arp feature have been extended to allow, ARP
2069                  * replies back to the same interface, to support
2070                  * Private VLAN switch technologies. See arp.c.
2071                  */
2072                 if (out_dev == in_dev &&
2073                     IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
2074                         err = -EINVAL;
2075                         goto cleanup;
2076                 }
2077         }
2078
2079         rth = rt_dst_alloc(out_dev->dev,
2080                            IN_DEV_CONF_GET(in_dev, NOPOLICY),
2081                            IN_DEV_CONF_GET(out_dev, NOXFRM));
2082         if (!rth) {
2083                 err = -ENOBUFS;
2084                 goto cleanup;
2085         }
2086
2087         rth->rt_key_dst = daddr;
2088         rth->rt_key_src = saddr;
2089         rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
2090         rth->rt_flags = flags;
2091         rth->rt_type = res->type;
2092         rth->rt_key_tos = tos;
2093         rth->rt_dst     = daddr;
2094         rth->rt_src     = saddr;
2095         rth->rt_route_iif = in_dev->dev->ifindex;
2096         rth->rt_iif     = in_dev->dev->ifindex;
2097         rth->rt_oif     = 0;
2098         rth->rt_mark    = skb->mark;
2099         rth->rt_gateway = daddr;
2100         rth->rt_spec_dst= spec_dst;
2101         rth->rt_peer_genid = 0;
2102         rth->peer = NULL;
2103         rth->fi = NULL;
2104
2105         rth->dst.input = ip_forward;
2106         rth->dst.output = ip_output;
2107
2108         rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag);
2109
2110         *result = rth;
2111         err = 0;
2112  cleanup:
2113         return err;
2114 }
2115
2116 static int ip_mkroute_input(struct sk_buff *skb,
2117                             struct fib_result *res,
2118                             const struct flowi4 *fl4,
2119                             struct in_device *in_dev,
2120                             __be32 daddr, __be32 saddr, u32 tos)
2121 {
2122         struct rtable* rth = NULL;
2123         int err;
2124         unsigned hash;
2125
2126 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2127         if (res->fi && res->fi->fib_nhs > 1)
2128                 fib_select_multipath(res);
2129 #endif
2130
2131         /* create a routing cache entry */
2132         err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
2133         if (err)
2134                 return err;
2135
2136         /* put it into the cache */
2137         hash = rt_hash(daddr, saddr, fl4->flowi4_iif,
2138                        rt_genid(dev_net(rth->dst.dev)));
2139         rth = rt_intern_hash(hash, rth, skb, fl4->flowi4_iif);
2140         if (IS_ERR(rth))
2141                 return PTR_ERR(rth);
2142         return 0;
2143 }
2144
2145 /*
2146  *      NOTE. We drop all the packets that has local source
2147  *      addresses, because every properly looped back packet
2148  *      must have correct destination already attached by output routine.
2149  *
2150  *      Such approach solves two big problems:
2151  *      1. Not simplex devices are handled properly.
2152  *      2. IP spoofing attempts are filtered with 100% of guarantee.
2153  *      called with rcu_read_lock()
2154  */
2155
2156 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2157                                u8 tos, struct net_device *dev)
2158 {
2159         struct fib_result res;
2160         struct in_device *in_dev = __in_dev_get_rcu(dev);
2161         struct flowi4   fl4;
2162         unsigned        flags = 0;
2163         u32             itag = 0;
2164         struct rtable * rth;
2165         unsigned        hash;
2166         __be32          spec_dst;
2167         int             err = -EINVAL;
2168         struct net    * net = dev_net(dev);
2169
2170         /* IP on this device is disabled. */
2171
2172         if (!in_dev)
2173                 goto out;
2174
2175         /* Check for the most weird martians, which can be not detected
2176            by fib_lookup.
2177          */
2178
2179         if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
2180             ipv4_is_loopback(saddr))
2181                 goto martian_source;
2182
2183         if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2184                 goto brd_input;
2185
2186         /* Accept zero addresses only to limited broadcast;
2187          * I even do not know to fix it or not. Waiting for complains :-)
2188          */
2189         if (ipv4_is_zeronet(saddr))
2190                 goto martian_source;
2191
2192         if (ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr))
2193                 goto martian_destination;
2194
2195         /*
2196          *      Now we are ready to route packet.
2197          */
2198         fl4.flowi4_oif = 0;
2199         fl4.flowi4_iif = dev->ifindex;
2200         fl4.flowi4_mark = skb->mark;
2201         fl4.flowi4_tos = tos;
2202         fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2203         fl4.daddr = daddr;
2204         fl4.saddr = saddr;
2205         err = fib_lookup(net, &fl4, &res);
2206         if (err != 0) {
2207                 if (!IN_DEV_FORWARD(in_dev))
2208                         goto e_hostunreach;
2209                 goto no_route;
2210         }
2211
2212         RT_CACHE_STAT_INC(in_slow_tot);
2213
2214         if (res.type == RTN_BROADCAST)
2215                 goto brd_input;
2216
2217         if (res.type == RTN_LOCAL) {
2218                 err = fib_validate_source(skb, saddr, daddr, tos,
2219                                           net->loopback_dev->ifindex,
2220                                           dev, &spec_dst, &itag);
2221                 if (err < 0)
2222                         goto martian_source_keep_err;
2223                 if (err)
2224                         flags |= RTCF_DIRECTSRC;
2225                 spec_dst = daddr;
2226                 goto local_input;
2227         }
2228
2229         if (!IN_DEV_FORWARD(in_dev))
2230                 goto e_hostunreach;
2231         if (res.type != RTN_UNICAST)
2232                 goto martian_destination;
2233
2234         err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
2235 out:    return err;
2236
2237 brd_input:
2238         if (skb->protocol != htons(ETH_P_IP))
2239                 goto e_inval;
2240
2241         if (ipv4_is_zeronet(saddr))
2242                 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2243         else {
2244                 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
2245                                           &itag);
2246                 if (err < 0)
2247                         goto martian_source_keep_err;
2248                 if (err)
2249                         flags |= RTCF_DIRECTSRC;
2250         }
2251         flags |= RTCF_BROADCAST;
2252         res.type = RTN_BROADCAST;
2253         RT_CACHE_STAT_INC(in_brd);
2254
2255 local_input:
2256         rth = rt_dst_alloc(net->loopback_dev,
2257                            IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
2258         if (!rth)
2259                 goto e_nobufs;
2260
2261         rth->dst.input= ip_local_deliver;
2262         rth->dst.output= ip_rt_bug;
2263 #ifdef CONFIG_IP_ROUTE_CLASSID
2264         rth->dst.tclassid = itag;
2265 #endif
2266
2267         rth->rt_key_dst = daddr;
2268         rth->rt_key_src = saddr;
2269         rth->rt_genid = rt_genid(net);
2270         rth->rt_flags   = flags|RTCF_LOCAL;
2271         rth->rt_type    = res.type;
2272         rth->rt_key_tos = tos;
2273         rth->rt_dst     = daddr;
2274         rth->rt_src     = saddr;
2275 #ifdef CONFIG_IP_ROUTE_CLASSID
2276         rth->dst.tclassid = itag;
2277 #endif
2278         rth->rt_route_iif = dev->ifindex;
2279         rth->rt_iif     = dev->ifindex;
2280         rth->rt_oif     = 0;
2281         rth->rt_mark    = skb->mark;
2282         rth->rt_gateway = daddr;
2283         rth->rt_spec_dst= spec_dst;
2284         rth->rt_peer_genid = 0;
2285         rth->peer = NULL;
2286         rth->fi = NULL;
2287         if (res.type == RTN_UNREACHABLE) {
2288                 rth->dst.input= ip_error;
2289                 rth->dst.error= -err;
2290                 rth->rt_flags   &= ~RTCF_LOCAL;
2291         }
2292         hash = rt_hash(daddr, saddr, fl4.flowi4_iif, rt_genid(net));
2293         rth = rt_intern_hash(hash, rth, skb, fl4.flowi4_iif);
2294         err = 0;
2295         if (IS_ERR(rth))
2296                 err = PTR_ERR(rth);
2297         goto out;
2298
2299 no_route:
2300         RT_CACHE_STAT_INC(in_no_route);
2301         spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2302         res.type = RTN_UNREACHABLE;
2303         if (err == -ESRCH)
2304                 err = -ENETUNREACH;
2305         goto local_input;
2306
2307         /*
2308          *      Do not cache martian addresses: they should be logged (RFC1812)
2309          */
2310 martian_destination:
2311         RT_CACHE_STAT_INC(in_martian_dst);
2312 #ifdef CONFIG_IP_ROUTE_VERBOSE
2313         if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
2314                 printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n",
2315                         &daddr, &saddr, dev->name);
2316 #endif
2317
2318 e_hostunreach:
2319         err = -EHOSTUNREACH;
2320         goto out;
2321
2322 e_inval:
2323         err = -EINVAL;
2324         goto out;
2325
2326 e_nobufs:
2327         err = -ENOBUFS;
2328         goto out;
2329
2330 martian_source:
2331         err = -EINVAL;
2332 martian_source_keep_err:
2333         ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2334         goto out;
2335 }
2336
2337 int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2338                            u8 tos, struct net_device *dev, bool noref)
2339 {
2340         struct rtable * rth;
2341         unsigned        hash;
2342         int iif = dev->ifindex;
2343         struct net *net;
2344         int res;
2345
2346         net = dev_net(dev);
2347
2348         rcu_read_lock();
2349
2350         if (!rt_caching(net))
2351                 goto skip_cache;
2352
2353         tos &= IPTOS_RT_MASK;
2354         hash = rt_hash(daddr, saddr, iif, rt_genid(net));
2355
2356         for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2357              rth = rcu_dereference(rth->dst.rt_next)) {
2358                 if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
2359                      ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
2360                      (rth->rt_route_iif ^ iif) |
2361                      (rth->rt_key_tos ^ tos)) == 0 &&
2362                     rth->rt_mark == skb->mark &&
2363                     net_eq(dev_net(rth->dst.dev), net) &&
2364                     !rt_is_expired(rth)) {
2365                         if (noref) {
2366                                 dst_use_noref(&rth->dst, jiffies);
2367                                 skb_dst_set_noref(skb, &rth->dst);
2368                         } else {
2369                                 dst_use(&rth->dst, jiffies);
2370                                 skb_dst_set(skb, &rth->dst);
2371                         }
2372                         RT_CACHE_STAT_INC(in_hit);
2373                         rcu_read_unlock();
2374                         return 0;
2375                 }
2376                 RT_CACHE_STAT_INC(in_hlist_search);
2377         }
2378
2379 skip_cache:
2380         /* Multicast recognition logic is moved from route cache to here.
2381            The problem was that too many Ethernet cards have broken/missing
2382            hardware multicast filters :-( As result the host on multicasting
2383            network acquires a lot of useless route cache entries, sort of
2384            SDR messages from all the world. Now we try to get rid of them.
2385            Really, provided software IP multicast filter is organized
2386            reasonably (at least, hashed), it does not result in a slowdown
2387            comparing with route cache reject entries.
2388            Note, that multicast routers are not affected, because
2389            route cache entry is created eventually.
2390          */
2391         if (ipv4_is_multicast(daddr)) {
2392                 struct in_device *in_dev = __in_dev_get_rcu(dev);
2393
2394                 if (in_dev) {
2395                         int our = ip_check_mc_rcu(in_dev, daddr, saddr,
2396                                                   ip_hdr(skb)->protocol);
2397                         if (our
2398 #ifdef CONFIG_IP_MROUTE
2399                                 ||
2400                             (!ipv4_is_local_multicast(daddr) &&
2401                              IN_DEV_MFORWARD(in_dev))
2402 #endif
2403                            ) {
2404                                 int res = ip_route_input_mc(skb, daddr, saddr,
2405                                                             tos, dev, our);
2406                                 rcu_read_unlock();
2407                                 return res;
2408                         }
2409                 }
2410                 rcu_read_unlock();
2411                 return -EINVAL;
2412         }
2413         res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
2414         rcu_read_unlock();
2415         return res;
2416 }
2417 EXPORT_SYMBOL(ip_route_input_common);
2418
2419 /* called with rcu_read_lock() */
2420 static struct rtable *__mkroute_output(const struct fib_result *res,
2421                                        const struct flowi4 *fl4,
2422                                        __be32 orig_daddr, __be32 orig_saddr,
2423                                        int orig_oif, struct net_device *dev_out,
2424                                        unsigned int flags)
2425 {
2426         struct fib_info *fi = res->fi;
2427         u32 tos = RT_FL_TOS(fl4);
2428         struct in_device *in_dev;
2429         u16 type = res->type;
2430         struct rtable *rth;
2431
2432         if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
2433                 return ERR_PTR(-EINVAL);
2434
2435         if (ipv4_is_lbcast(fl4->daddr))
2436                 type = RTN_BROADCAST;
2437         else if (ipv4_is_multicast(fl4->daddr))
2438                 type = RTN_MULTICAST;
2439         else if (ipv4_is_zeronet(fl4->daddr))
2440                 return ERR_PTR(-EINVAL);
2441
2442         if (dev_out->flags & IFF_LOOPBACK)
2443                 flags |= RTCF_LOCAL;
2444
2445         in_dev = __in_dev_get_rcu(dev_out);
2446         if (!in_dev)
2447                 return ERR_PTR(-EINVAL);
2448
2449         if (type == RTN_BROADCAST) {
2450                 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2451                 fi = NULL;
2452         } else if (type == RTN_MULTICAST) {
2453                 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2454                 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2455                                      fl4->flowi4_proto))
2456                         flags &= ~RTCF_LOCAL;
2457                 /* If multicast route do not exist use
2458                  * default one, but do not gateway in this case.
2459                  * Yes, it is hack.
2460                  */
2461                 if (fi && res->prefixlen < 4)
2462                         fi = NULL;
2463         }
2464
2465         rth = rt_dst_alloc(dev_out,
2466                            IN_DEV_CONF_GET(in_dev, NOPOLICY),
2467                            IN_DEV_CONF_GET(in_dev, NOXFRM));
2468         if (!rth)
2469                 return ERR_PTR(-ENOBUFS);
2470
2471         rth->dst.output = ip_output;
2472
2473         rth->rt_key_dst = orig_daddr;
2474         rth->rt_key_src = orig_saddr;
2475         rth->rt_genid = rt_genid(dev_net(dev_out));
2476         rth->rt_flags   = flags;
2477         rth->rt_type    = type;
2478         rth->rt_key_tos = tos;
2479         rth->rt_dst     = fl4->daddr;
2480         rth->rt_src     = fl4->saddr;
2481         rth->rt_route_iif = 0;
2482         rth->rt_iif     = orig_oif ? : dev_out->ifindex;
2483         rth->rt_oif     = orig_oif;
2484         rth->rt_mark    = fl4->flowi4_mark;
2485         rth->rt_gateway = fl4->daddr;
2486         rth->rt_spec_dst= fl4->saddr;
2487         rth->rt_peer_genid = 0;
2488         rth->peer = NULL;
2489         rth->fi = NULL;
2490
2491         RT_CACHE_STAT_INC(out_slow_tot);
2492
2493         if (flags & RTCF_LOCAL) {
2494                 rth->dst.input = ip_local_deliver;
2495                 rth->rt_spec_dst = fl4->daddr;
2496         }
2497         if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2498                 rth->rt_spec_dst = fl4->saddr;
2499                 if (flags & RTCF_LOCAL &&
2500                     !(dev_out->flags & IFF_LOOPBACK)) {
2501                         rth->dst.output = ip_mc_output;
2502                         RT_CACHE_STAT_INC(out_slow_mc);
2503                 }
2504 #ifdef CONFIG_IP_MROUTE
2505                 if (type == RTN_MULTICAST) {
2506                         if (IN_DEV_MFORWARD(in_dev) &&
2507                             !ipv4_is_local_multicast(fl4->daddr)) {
2508                                 rth->dst.input = ip_mr_input;
2509                                 rth->dst.output = ip_mc_output;
2510                         }
2511                 }
2512 #endif
2513         }
2514
2515         rt_set_nexthop(rth, fl4, res, fi, type, 0);
2516
2517         return rth;
2518 }
2519
2520 /*
2521  * Major route resolver routine.
2522  * called with rcu_read_lock();
2523  */
2524
2525 static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
2526 {
2527         struct net_device *dev_out = NULL;
2528         u32 tos = RT_FL_TOS(fl4);
2529         unsigned int flags = 0;
2530         struct fib_result res;
2531         struct rtable *rth;
2532         __be32 orig_daddr;
2533         __be32 orig_saddr;
2534         int orig_oif;
2535
2536         res.fi          = NULL;
2537 #ifdef CONFIG_IP_MULTIPLE_TABLES
2538         res.r           = NULL;
2539 #endif
2540
2541         orig_daddr = fl4->daddr;
2542         orig_saddr = fl4->saddr;
2543         orig_oif = fl4->flowi4_oif;
2544
2545         fl4->flowi4_iif = net->loopback_dev->ifindex;
2546         fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2547         fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2548                          RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2549
2550         rcu_read_lock();
2551         if (fl4->saddr) {
2552                 rth = ERR_PTR(-EINVAL);
2553                 if (ipv4_is_multicast(fl4->saddr) ||
2554                     ipv4_is_lbcast(fl4->saddr) ||
2555                     ipv4_is_zeronet(fl4->saddr))
2556                         goto out;
2557
2558                 /* I removed check for oif == dev_out->oif here.
2559                    It was wrong for two reasons:
2560                    1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2561                       is assigned to multiple interfaces.
2562                    2. Moreover, we are allowed to send packets with saddr
2563                       of another iface. --ANK
2564                  */
2565
2566                 if (fl4->flowi4_oif == 0 &&
2567                     (ipv4_is_multicast(fl4->daddr) ||
2568                      ipv4_is_lbcast(fl4->daddr))) {
2569                         /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2570                         dev_out = __ip_dev_find(net, fl4->saddr, false);
2571                         if (dev_out == NULL)
2572                                 goto out;
2573
2574                         /* Special hack: user can direct multicasts
2575                            and limited broadcast via necessary interface
2576                            without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2577                            This hack is not just for fun, it allows
2578                            vic,vat and friends to work.
2579                            They bind socket to loopback, set ttl to zero
2580                            and expect that it will work.
2581                            From the viewpoint of routing cache they are broken,
2582                            because we are not allowed to build multicast path
2583                            with loopback source addr (look, routing cache
2584                            cannot know, that ttl is zero, so that packet
2585                            will not leave this host and route is valid).
2586                            Luckily, this hack is good workaround.
2587                          */
2588
2589                         fl4->flowi4_oif = dev_out->ifindex;
2590                         goto make_route;
2591                 }
2592
2593                 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2594                         /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2595                         if (!__ip_dev_find(net, fl4->saddr, false))
2596                                 goto out;
2597                 }
2598         }
2599
2600
2601         if (fl4->flowi4_oif) {
2602                 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2603                 rth = ERR_PTR(-ENODEV);
2604                 if (dev_out == NULL)
2605                         goto out;
2606
2607                 /* RACE: Check return value of inet_select_addr instead. */
2608                 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2609                         rth = ERR_PTR(-ENETUNREACH);
2610                         goto out;
2611                 }
2612                 if (ipv4_is_local_multicast(fl4->daddr) ||
2613                     ipv4_is_lbcast(fl4->daddr)) {
2614                         if (!fl4->saddr)
2615                                 fl4->saddr = inet_select_addr(dev_out, 0,
2616                                                               RT_SCOPE_LINK);
2617                         goto make_route;
2618                 }
2619                 if (fl4->saddr) {
2620                         if (ipv4_is_multicast(fl4->daddr))
2621                                 fl4->saddr = inet_select_addr(dev_out, 0,
2622                                                               fl4->flowi4_scope);
2623                         else if (!fl4->daddr)
2624                                 fl4->saddr = inet_select_addr(dev_out, 0,
2625                                                               RT_SCOPE_HOST);
2626                 }
2627         }
2628
2629         if (!fl4->daddr) {
2630                 fl4->daddr = fl4->saddr;
2631                 if (!fl4->daddr)
2632                         fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2633                 dev_out = net->loopback_dev;
2634                 fl4->flowi4_oif = net->loopback_dev->ifindex;
2635                 res.type = RTN_LOCAL;
2636                 flags |= RTCF_LOCAL;
2637                 goto make_route;
2638         }
2639
2640         if (fib_lookup(net, fl4, &res)) {
2641                 res.fi = NULL;
2642                 if (fl4->flowi4_oif) {
2643                         /* Apparently, routing tables are wrong. Assume,
2644                            that the destination is on link.
2645
2646                            WHY? DW.
2647                            Because we are allowed to send to iface
2648                            even if it has NO routes and NO assigned
2649                            addresses. When oif is specified, routing
2650                            tables are looked up with only one purpose:
2651                            to catch if destination is gatewayed, rather than
2652                            direct. Moreover, if MSG_DONTROUTE is set,
2653                            we send packet, ignoring both routing tables
2654                            and ifaddr state. --ANK
2655
2656
2657                            We could make it even if oif is unknown,
2658                            likely IPv6, but we do not.
2659                          */
2660
2661                         if (fl4->saddr == 0)
2662                                 fl4->saddr = inet_select_addr(dev_out, 0,
2663                                                               RT_SCOPE_LINK);
2664                         res.type = RTN_UNICAST;
2665                         goto make_route;
2666                 }
2667                 rth = ERR_PTR(-ENETUNREACH);
2668                 goto out;
2669         }
2670
2671         if (res.type == RTN_LOCAL) {
2672                 if (!fl4->saddr) {
2673                         if (res.fi->fib_prefsrc)
2674                                 fl4->saddr = res.fi->fib_prefsrc;
2675                         else
2676                                 fl4->saddr = fl4->daddr;
2677                 }
2678                 dev_out = net->loopback_dev;
2679                 fl4->flowi4_oif = dev_out->ifindex;
2680                 res.fi = NULL;
2681                 flags |= RTCF_LOCAL;
2682                 goto make_route;
2683         }
2684
2685 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2686         if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
2687                 fib_select_multipath(&res);
2688         else
2689 #endif
2690         if (!res.prefixlen &&
2691             res.table->tb_num_default > 1 &&
2692             res.type == RTN_UNICAST && !fl4->flowi4_oif)
2693                 fib_select_default(&res);
2694
2695         if (!fl4->saddr)
2696                 fl4->saddr = FIB_RES_PREFSRC(net, res);
2697
2698         dev_out = FIB_RES_DEV(res);
2699         fl4->flowi4_oif = dev_out->ifindex;
2700
2701
2702 make_route:
2703         rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
2704                                dev_out, flags);
2705         if (!IS_ERR(rth)) {
2706                 unsigned int hash;
2707
2708                 hash = rt_hash(orig_daddr, orig_saddr, orig_oif,
2709                                rt_genid(dev_net(dev_out)));
2710                 rth = rt_intern_hash(hash, rth, NULL, orig_oif);
2711         }
2712
2713 out:
2714         rcu_read_unlock();
2715         return rth;
2716 }
2717
2718 struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
2719 {
2720         struct rtable *rth;
2721         unsigned int hash;
2722
2723         if (!rt_caching(net))
2724                 goto slow_output;
2725
2726         hash = rt_hash(flp4->daddr, flp4->saddr, flp4->flowi4_oif, rt_genid(net));
2727
2728         rcu_read_lock_bh();
2729         for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
2730                 rth = rcu_dereference_bh(rth->dst.rt_next)) {
2731                 if (rth->rt_key_dst == flp4->daddr &&
2732                     rth->rt_key_src == flp4->saddr &&
2733                     rt_is_output_route(rth) &&
2734                     rth->rt_oif == flp4->flowi4_oif &&
2735                     rth->rt_mark == flp4->flowi4_mark &&
2736                     !((rth->rt_key_tos ^ flp4->flowi4_tos) &
2737                             (IPTOS_RT_MASK | RTO_ONLINK)) &&
2738                     net_eq(dev_net(rth->dst.dev), net) &&
2739                     !rt_is_expired(rth)) {
2740                         dst_use(&rth->dst, jiffies);
2741                         RT_CACHE_STAT_INC(out_hit);
2742                         rcu_read_unlock_bh();
2743                         if (!flp4->saddr)
2744                                 flp4->saddr = rth->rt_src;
2745                         if (!flp4->daddr)
2746                                 flp4->daddr = rth->rt_dst;
2747                         return rth;
2748                 }
2749                 RT_CACHE_STAT_INC(out_hlist_search);
2750         }
2751         rcu_read_unlock_bh();
2752
2753 slow_output:
2754         return ip_route_output_slow(net, flp4);
2755 }
2756 EXPORT_SYMBOL_GPL(__ip_route_output_key);
2757
2758 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2759 {
2760         return NULL;
2761 }
2762
2763 static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
2764 {
2765         return 0;
2766 }
2767
2768 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2769 {
2770 }
2771
2772 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2773                                           unsigned long old)
2774 {
2775         return NULL;
2776 }
2777
2778 static struct dst_ops ipv4_dst_blackhole_ops = {
2779         .family                 =       AF_INET,
2780         .protocol               =       cpu_to_be16(ETH_P_IP),
2781         .destroy                =       ipv4_dst_destroy,
2782         .check                  =       ipv4_blackhole_dst_check,
2783         .default_mtu            =       ipv4_blackhole_default_mtu,
2784         .default_advmss         =       ipv4_default_advmss,
2785         .update_pmtu            =       ipv4_rt_blackhole_update_pmtu,
2786         .cow_metrics            =       ipv4_rt_blackhole_cow_metrics,
2787         .neigh_lookup           =       ipv4_neigh_lookup,
2788 };
2789
2790 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2791 {
2792         struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, 0, 0);
2793         struct rtable *ort = (struct rtable *) dst_orig;
2794
2795         if (rt) {
2796                 struct dst_entry *new = &rt->dst;
2797
2798                 new->__use = 1;
2799                 new->input = dst_discard;
2800                 new->output = dst_discard;
2801                 dst_copy_metrics(new, &ort->dst);
2802
2803                 new->dev = ort->dst.dev;
2804                 if (new->dev)
2805                         dev_hold(new->dev);
2806
2807                 rt->rt_key_dst = ort->rt_key_dst;
2808                 rt->rt_key_src = ort->rt_key_src;
2809                 rt->rt_key_tos = ort->rt_key_tos;
2810                 rt->rt_route_iif = ort->rt_route_iif;
2811                 rt->rt_iif = ort->rt_iif;
2812                 rt->rt_oif = ort->rt_oif;
2813                 rt->rt_mark = ort->rt_mark;
2814
2815                 rt->rt_genid = rt_genid(net);
2816                 rt->rt_flags = ort->rt_flags;
2817                 rt->rt_type = ort->rt_type;
2818                 rt->rt_dst = ort->rt_dst;
2819                 rt->rt_src = ort->rt_src;
2820                 rt->rt_gateway = ort->rt_gateway;
2821                 rt->rt_spec_dst = ort->rt_spec_dst;
2822                 rt->peer = ort->peer;
2823                 if (rt->peer)
2824                         atomic_inc(&rt->peer->refcnt);
2825                 rt->fi = ort->fi;
2826                 if (rt->fi)
2827                         atomic_inc(&rt->fi->fib_clntref);
2828
2829                 dst_free(new);
2830         }
2831
2832         dst_release(dst_orig);
2833
2834         return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2835 }
2836
2837 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2838                                     struct sock *sk)
2839 {
2840         struct rtable *rt = __ip_route_output_key(net, flp4);
2841
2842         if (IS_ERR(rt))
2843                 return rt;
2844
2845         if (flp4->flowi4_proto)
2846                 rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
2847                                                    flowi4_to_flowi(flp4),
2848                                                    sk, 0);
2849
2850         return rt;
2851 }
2852 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2853
2854 static int rt_fill_info(struct net *net,
2855                         struct sk_buff *skb, u32 pid, u32 seq, int event,
2856                         int nowait, unsigned int flags)
2857 {
2858         struct rtable *rt = skb_rtable(skb);
2859         struct rtmsg *r;
2860         struct nlmsghdr *nlh;
2861         long expires = 0;
2862         const struct inet_peer *peer = rt->peer;
2863         u32 id = 0, ts = 0, tsage = 0, error;
2864
2865         nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2866         if (nlh == NULL)
2867                 return -EMSGSIZE;
2868
2869         r = nlmsg_data(nlh);
2870         r->rtm_family    = AF_INET;
2871         r->rtm_dst_len  = 32;
2872         r->rtm_src_len  = 0;
2873         r->rtm_tos      = rt->rt_key_tos;
2874         r->rtm_table    = RT_TABLE_MAIN;
2875         NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
2876         r->rtm_type     = rt->rt_type;
2877         r->rtm_scope    = RT_SCOPE_UNIVERSE;
2878         r->rtm_protocol = RTPROT_UNSPEC;
2879         r->rtm_flags    = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2880         if (rt->rt_flags & RTCF_NOTIFY)
2881                 r->rtm_flags |= RTM_F_NOTIFY;
2882
2883         NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
2884
2885         if (rt->rt_key_src) {
2886                 r->rtm_src_len = 32;
2887                 NLA_PUT_BE32(skb, RTA_SRC, rt->rt_key_src);
2888         }
2889         if (rt->dst.dev)
2890                 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
2891 #ifdef CONFIG_IP_ROUTE_CLASSID
2892         if (rt->dst.tclassid)
2893                 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
2894 #endif
2895         if (rt_is_input_route(rt))
2896                 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
2897         else if (rt->rt_src != rt->rt_key_src)
2898                 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
2899
2900         if (rt->rt_dst != rt->rt_gateway)
2901                 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
2902
2903         if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2904                 goto nla_put_failure;
2905
2906         if (rt->rt_mark)
2907                 NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark);
2908
2909         error = rt->dst.error;
2910         if (peer) {
2911                 inet_peer_refcheck(rt->peer);
2912                 id = atomic_read(&peer->ip_id_count) & 0xffff;
2913                 if (peer->tcp_ts_stamp) {
2914                         ts = peer->tcp_ts;
2915                         tsage = get_seconds() - peer->tcp_ts_stamp;
2916                 }
2917                 expires = ACCESS_ONCE(peer->pmtu_expires);
2918                 if (expires)
2919                         expires -= jiffies;
2920         }
2921
2922         if (rt_is_input_route(rt)) {
2923 #ifdef CONFIG_IP_MROUTE
2924                 __be32 dst = rt->rt_dst;
2925
2926                 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2927                     IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2928                         int err = ipmr_get_route(net, skb,
2929                                                  rt->rt_src, rt->rt_dst,
2930                                                  r, nowait);
2931                         if (err <= 0) {
2932                                 if (!nowait) {
2933                                         if (err == 0)
2934                                                 return 0;
2935                                         goto nla_put_failure;
2936                                 } else {
2937                                         if (err == -EMSGSIZE)
2938                                                 goto nla_put_failure;
2939                                         error = err;
2940                                 }
2941                         }
2942                 } else
2943 #endif
2944                         NLA_PUT_U32(skb, RTA_IIF, rt->rt_iif);
2945         }
2946
2947         if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
2948                                expires, error) < 0)
2949                 goto nla_put_failure;
2950
2951         return nlmsg_end(skb, nlh);
2952
2953 nla_put_failure:
2954         nlmsg_cancel(skb, nlh);
2955         return -EMSGSIZE;
2956 }
2957
2958 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2959 {
2960         struct net *net = sock_net(in_skb->sk);
2961         struct rtmsg *rtm;
2962         struct nlattr *tb[RTA_MAX+1];
2963         struct rtable *rt = NULL;
2964         __be32 dst = 0;
2965         __be32 src = 0;
2966         u32 iif;
2967         int err;
2968         int mark;
2969         struct sk_buff *skb;
2970
2971         err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2972         if (err < 0)
2973                 goto errout;
2974
2975         rtm = nlmsg_data(nlh);
2976
2977         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2978         if (skb == NULL) {
2979                 err = -ENOBUFS;
2980                 goto errout;
2981         }
2982
2983         /* Reserve room for dummy headers, this skb can pass
2984            through good chunk of routing engine.
2985          */
2986         skb_reset_mac_header(skb);
2987         skb_reset_network_header(skb);
2988
2989         /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2990         ip_hdr(skb)->protocol = IPPROTO_ICMP;
2991         skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2992
2993         src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2994         dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2995         iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2996         mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2997
2998         if (iif) {
2999                 struct net_device *dev;
3000
3001                 dev = __dev_get_by_index(net, iif);
3002                 if (dev == NULL) {
3003                         err = -ENODEV;
3004                         goto errout_free;
3005                 }
3006
3007                 skb->protocol   = htons(ETH_P_IP);
3008                 skb->dev        = dev;
3009                 skb->mark       = mark;
3010                 local_bh_disable();
3011                 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
3012                 local_bh_enable();
3013
3014                 rt = skb_rtable(skb);
3015                 if (err == 0 && rt->dst.error)
3016                         err = -rt->dst.error;
3017         } else {
3018                 struct flowi4 fl4 = {
3019                         .daddr = dst,
3020                         .saddr = src,
3021                         .flowi4_tos = rtm->rtm_tos,
3022                         .flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
3023                         .flowi4_mark = mark,
3024                 };
3025                 rt = ip_route_output_key(net, &fl4);
3026
3027                 err = 0;
3028                 if (IS_ERR(rt))
3029                         err = PTR_ERR(rt);
3030         }
3031
3032         if (err)
3033                 goto errout_free;
3034
3035         skb_dst_set(skb, &rt->dst);
3036         if (rtm->rtm_flags & RTM_F_NOTIFY)
3037                 rt->rt_flags |= RTCF_NOTIFY;
3038
3039         err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
3040                            RTM_NEWROUTE, 0, 0);
3041         if (err <= 0)
3042                 goto errout_free;
3043
3044         err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
3045 errout:
3046         return err;
3047
3048 errout_free:
3049         kfree_skb(skb);
3050         goto errout;
3051 }
3052
3053 int ip_rt_dump(struct sk_buff *skb,  struct netlink_callback *cb)
3054 {
3055         struct rtable *rt;
3056         int h, s_h;
3057         int idx, s_idx;
3058         struct net *net;
3059
3060         net = sock_net(skb->sk);
3061
3062         s_h = cb->args[0];
3063         if (s_h < 0)
3064                 s_h = 0;
3065         s_idx = idx = cb->args[1];
3066         for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
3067                 if (!rt_hash_table[h].chain)
3068                         continue;
3069                 rcu_read_lock_bh();
3070                 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
3071                      rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
3072                         if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
3073                                 continue;
3074                         if (rt_is_expired(rt))
3075                                 continue;
3076                         skb_dst_set_noref(skb, &rt->dst);
3077                         if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
3078                                          cb->nlh->nlmsg_seq, RTM_NEWROUTE,
3079                                          1, NLM_F_MULTI) <= 0) {
3080                                 skb_dst_drop(skb);
3081                                 rcu_read_unlock_bh();
3082                                 goto done;
3083                         }
3084                         skb_dst_drop(skb);
3085                 }
3086                 rcu_read_unlock_bh();
3087         }
3088
3089 done:
3090         cb->args[0] = h;
3091         cb->args[1] = idx;
3092         return skb->len;
3093 }
3094
3095 void ip_rt_multicast_event(struct in_device *in_dev)
3096 {
3097         rt_cache_flush(dev_net(in_dev->dev), 0);
3098 }
3099
3100 #ifdef CONFIG_SYSCTL
3101 static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
3102                                         void __user *buffer,
3103                                         size_t *lenp, loff_t *ppos)
3104 {
3105         if (write) {
3106                 int flush_delay;
3107                 ctl_table ctl;
3108                 struct net *net;
3109
3110                 memcpy(&ctl, __ctl, sizeof(ctl));
3111                 ctl.data = &flush_delay;
3112                 proc_dointvec(&ctl, write, buffer, lenp, ppos);
3113
3114                 net = (struct net *)__ctl->extra1;
3115                 rt_cache_flush(net, flush_delay);
3116                 return 0;
3117         }
3118
3119         return -EINVAL;
3120 }
3121
3122 static ctl_table ipv4_route_table[] = {
3123         {
3124                 .procname       = "gc_thresh",
3125                 .data           = &ipv4_dst_ops.gc_thresh,
3126                 .maxlen         = sizeof(int),
3127                 .mode           = 0644,
3128                 .proc_handler   = proc_dointvec,
3129         },
3130         {
3131                 .procname       = "max_size",
3132                 .data           = &ip_rt_max_size,
3133                 .maxlen         = sizeof(int),
3134                 .mode           = 0644,
3135                 .proc_handler   = proc_dointvec,
3136         },
3137         {
3138                 /*  Deprecated. Use gc_min_interval_ms */
3139
3140                 .procname       = "gc_min_interval",
3141                 .data           = &ip_rt_gc_min_interval,
3142                 .maxlen         = sizeof(int),
3143                 .mode           = 0644,
3144                 .proc_handler   = proc_dointvec_jiffies,
3145         },
3146         {
3147                 .procname       = "gc_min_interval_ms",
3148                 .data           = &ip_rt_gc_min_interval,
3149                 .maxlen         = sizeof(int),
3150                 .mode           = 0644,
3151                 .proc_handler   = proc_dointvec_ms_jiffies,
3152         },
3153         {
3154                 .procname       = "gc_timeout",
3155                 .data           = &ip_rt_gc_timeout,
3156                 .maxlen         = sizeof(int),
3157                 .mode           = 0644,
3158                 .proc_handler   = proc_dointvec_jiffies,
3159         },
3160         {
3161                 .procname       = "gc_interval",
3162                 .data           = &ip_rt_gc_interval,
3163                 .maxlen         = sizeof(int),
3164                 .mode           = 0644,
3165                 .proc_handler   = proc_dointvec_jiffies,
3166         },
3167         {
3168                 .procname       = "redirect_load",
3169                 .data           = &ip_rt_redirect_load,
3170                 .maxlen         = sizeof(int),
3171                 .mode           = 0644,
3172                 .proc_handler   = proc_dointvec,
3173         },
3174         {
3175                 .procname       = "redirect_number",
3176                 .data           = &ip_rt_redirect_number,
3177                 .maxlen         = sizeof(int),
3178                 .mode           = 0644,
3179                 .proc_handler   = proc_dointvec,
3180         },
3181         {
3182                 .procname       = "redirect_silence",
3183                 .data           = &ip_rt_redirect_silence,
3184                 .maxlen         = sizeof(int),
3185                 .mode           = 0644,
3186                 .proc_handler   = proc_dointvec,
3187         },
3188         {
3189                 .procname       = "error_cost",
3190                 .data           = &ip_rt_error_cost,
3191                 .maxlen         = sizeof(int),
3192                 .mode           = 0644,
3193                 .proc_handler   = proc_dointvec,
3194         },
3195         {
3196                 .procname       = "error_burst",
3197                 .data           = &ip_rt_error_burst,
3198                 .maxlen         = sizeof(int),
3199                 .mode           = 0644,
3200                 .proc_handler   = proc_dointvec,
3201         },
3202         {
3203                 .procname       = "gc_elasticity",
3204                 .data           = &ip_rt_gc_elasticity,
3205                 .maxlen         = sizeof(int),
3206                 .mode           = 0644,
3207                 .proc_handler   = proc_dointvec,
3208         },
3209         {
3210                 .procname       = "mtu_expires",
3211                 .data           = &ip_rt_mtu_expires,
3212                 .maxlen         = sizeof(int),
3213                 .mode           = 0644,
3214                 .proc_handler   = proc_dointvec_jiffies,
3215         },
3216         {
3217                 .procname       = "min_pmtu",
3218                 .data           = &ip_rt_min_pmtu,
3219                 .maxlen         = sizeof(int),
3220                 .mode           = 0644,
3221                 .proc_handler   = proc_dointvec,
3222         },
3223         {
3224                 .procname       = "min_adv_mss",
3225                 .data           = &ip_rt_min_advmss,
3226                 .maxlen         = sizeof(int),
3227                 .mode           = 0644,
3228                 .proc_handler   = proc_dointvec,
3229         },
3230         { }
3231 };
3232
3233 static struct ctl_table empty[1];
3234
3235 static struct ctl_table ipv4_skeleton[] =
3236 {
3237         { .procname = "route", 
3238           .mode = 0555, .child = ipv4_route_table},
3239         { .procname = "neigh", 
3240           .mode = 0555, .child = empty},
3241         { }
3242 };
3243
3244 static __net_initdata struct ctl_path ipv4_path[] = {
3245         { .procname = "net", },
3246         { .procname = "ipv4", },
3247         { },
3248 };
3249
3250 static struct ctl_table ipv4_route_flush_table[] = {
3251         {
3252                 .procname       = "flush",
3253                 .maxlen         = sizeof(int),
3254                 .mode           = 0200,
3255                 .proc_handler   = ipv4_sysctl_rtcache_flush,
3256         },
3257         { },
3258 };
3259
3260 static __net_initdata struct ctl_path ipv4_route_path[] = {
3261         { .procname = "net", },
3262         { .procname = "ipv4", },
3263         { .procname = "route", },
3264         { },
3265 };
3266
3267 static __net_init int sysctl_route_net_init(struct net *net)
3268 {
3269         struct ctl_table *tbl;
3270
3271         tbl = ipv4_route_flush_table;
3272         if (!net_eq(net, &init_net)) {
3273                 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3274                 if (tbl == NULL)
3275                         goto err_dup;
3276         }
3277         tbl[0].extra1 = net;
3278
3279         net->ipv4.route_hdr =
3280                 register_net_sysctl_table(net, ipv4_route_path, tbl);
3281         if (net->ipv4.route_hdr == NULL)
3282                 goto err_reg;
3283         return 0;
3284
3285 err_reg:
3286         if (tbl != ipv4_route_flush_table)
3287                 kfree(tbl);
3288 err_dup:
3289         return -ENOMEM;
3290 }
3291
3292 static __net_exit void sysctl_route_net_exit(struct net *net)
3293 {
3294         struct ctl_table *tbl;
3295
3296         tbl = net->ipv4.route_hdr->ctl_table_arg;
3297         unregister_net_sysctl_table(net->ipv4.route_hdr);
3298         BUG_ON(tbl == ipv4_route_flush_table);
3299         kfree(tbl);
3300 }
3301
3302 static __net_initdata struct pernet_operations sysctl_route_ops = {
3303         .init = sysctl_route_net_init,
3304         .exit = sysctl_route_net_exit,
3305 };
3306 #endif
3307
3308 static __net_init int rt_genid_init(struct net *net)
3309 {
3310         get_random_bytes(&net->ipv4.rt_genid,
3311                          sizeof(net->ipv4.rt_genid));
3312         get_random_bytes(&net->ipv4.dev_addr_genid,
3313                          sizeof(net->ipv4.dev_addr_genid));
3314         return 0;
3315 }
3316
3317 static __net_initdata struct pernet_operations rt_genid_ops = {
3318         .init = rt_genid_init,
3319 };
3320
3321
3322 #ifdef CONFIG_IP_ROUTE_CLASSID
3323 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3324 #endif /* CONFIG_IP_ROUTE_CLASSID */
3325
3326 static __initdata unsigned long rhash_entries;
3327 static int __init set_rhash_entries(char *str)
3328 {
3329         if (!str)
3330                 return 0;
3331         rhash_entries = simple_strtoul(str, &str, 0);
3332         return 1;
3333 }
3334 __setup("rhash_entries=", set_rhash_entries);
3335
3336 int __init ip_rt_init(void)
3337 {
3338         int rc = 0;
3339
3340 #ifdef CONFIG_IP_ROUTE_CLASSID
3341         ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3342         if (!ip_rt_acct)
3343                 panic("IP: failed to allocate ip_rt_acct\n");
3344 #endif
3345
3346         ipv4_dst_ops.kmem_cachep =
3347                 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3348                                   SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3349
3350         ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3351
3352         if (dst_entries_init(&ipv4_dst_ops) < 0)
3353                 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3354
3355         if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3356                 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3357
3358         rt_hash_table = (struct rt_hash_bucket *)
3359                 alloc_large_system_hash("IP route cache",
3360                                         sizeof(struct rt_hash_bucket),
3361                                         rhash_entries,
3362                                         (totalram_pages >= 128 * 1024) ?
3363                                         15 : 17,
3364                                         0,
3365                                         &rt_hash_log,
3366                                         &rt_hash_mask,
3367                                         rhash_entries ? 0 : 512 * 1024);
3368         memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3369         rt_hash_lock_init();
3370
3371         ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3372         ip_rt_max_size = (rt_hash_mask + 1) * 16;
3373
3374         devinet_init();
3375         ip_fib_init();
3376
3377         if (ip_rt_proc_init())
3378                 printk(KERN_ERR "Unable to create route proc files\n");
3379 #ifdef CONFIG_XFRM
3380         xfrm_init();
3381         xfrm4_init(ip_rt_max_size);
3382 #endif
3383         rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
3384
3385 #ifdef CONFIG_SYSCTL
3386         register_pernet_subsys(&sysctl_route_ops);
3387 #endif
3388         register_pernet_subsys(&rt_genid_ops);
3389         return rc;
3390 }
3391
3392 #ifdef CONFIG_SYSCTL
3393 /*
3394  * We really need to sanitize the damn ipv4 init order, then all
3395  * this nonsense will go away.
3396  */
3397 void __init ip_static_sysctl_init(void)
3398 {
3399         register_sysctl_paths(ipv4_path, ipv4_skeleton);
3400 }
3401 #endif