5b0180f11b20f0dc44bc0a1c528d2ad683b666be
[linux-3.10.git] / net / ipv4 / route.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              ROUTE - implementation of the IP router.
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
11  *              Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12  *              Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13  *
14  * Fixes:
15  *              Alan Cox        :       Verify area fixes.
16  *              Alan Cox        :       cli() protects routing changes
17  *              Rui Oliveira    :       ICMP routing table updates
18  *              (rco@di.uminho.pt)      Routing table insertion and update
19  *              Linus Torvalds  :       Rewrote bits to be sensible
20  *              Alan Cox        :       Added BSD route gw semantics
21  *              Alan Cox        :       Super /proc >4K
22  *              Alan Cox        :       MTU in route table
23  *              Alan Cox        :       MSS actually. Also added the window
24  *                                      clamper.
25  *              Sam Lantinga    :       Fixed route matching in rt_del()
26  *              Alan Cox        :       Routing cache support.
27  *              Alan Cox        :       Removed compatibility cruft.
28  *              Alan Cox        :       RTF_REJECT support.
29  *              Alan Cox        :       TCP irtt support.
30  *              Jonathan Naylor :       Added Metric support.
31  *      Miquel van Smoorenburg  :       BSD API fixes.
32  *      Miquel van Smoorenburg  :       Metrics.
33  *              Alan Cox        :       Use __u32 properly
34  *              Alan Cox        :       Aligned routing errors more closely with BSD
35  *                                      our system is still very different.
36  *              Alan Cox        :       Faster /proc handling
37  *      Alexey Kuznetsov        :       Massive rework to support tree based routing,
38  *                                      routing caches and better behaviour.
39  *
40  *              Olaf Erb        :       irtt wasn't being copied right.
41  *              Bjorn Ekwall    :       Kerneld route support.
42  *              Alan Cox        :       Multicast fixed (I hope)
43  *              Pavel Krauz     :       Limited broadcast fixed
44  *              Mike McLagan    :       Routing by source
45  *      Alexey Kuznetsov        :       End of old history. Split to fib.c and
46  *                                      route.c and rewritten from scratch.
47  *              Andi Kleen      :       Load-limit warning messages.
48  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
49  *      Vitaly E. Lavrov        :       Race condition in ip_route_input_slow.
50  *      Tobias Ringstrom        :       Uninitialized res.type in ip_route_output_slow.
51  *      Vladimir V. Ivanov      :       IP rule info (flowid) is really useful.
52  *              Marc Boucher    :       routing by fwmark
53  *      Robert Olsson           :       Added rt_cache statistics
54  *      Arnaldo C. Melo         :       Convert proc stuff to seq_file
55  *      Eric Dumazet            :       hashed spinlocks and rt_check_expire() fixes.
56  *      Ilia Sotnikov           :       Ignore TOS on PMTUD and Redirect
57  *      Ilia Sotnikov           :       Removed TOS from hash calculations
58  *
59  *              This program is free software; you can redistribute it and/or
60  *              modify it under the terms of the GNU General Public License
61  *              as published by the Free Software Foundation; either version
62  *              2 of the License, or (at your option) any later version.
63  */
64
65 #define pr_fmt(fmt) "IPv4: " fmt
66
67 #include <linux/module.h>
68 #include <asm/uaccess.h>
69 #include <linux/bitops.h>
70 #include <linux/types.h>
71 #include <linux/kernel.h>
72 #include <linux/mm.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
77 #include <linux/in.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/skbuff.h>
83 #include <linux/inetdevice.h>
84 #include <linux/igmp.h>
85 #include <linux/pkt_sched.h>
86 #include <linux/mroute.h>
87 #include <linux/netfilter_ipv4.h>
88 #include <linux/random.h>
89 #include <linux/rcupdate.h>
90 #include <linux/times.h>
91 #include <linux/slab.h>
92 #include <net/dst.h>
93 #include <net/net_namespace.h>
94 #include <net/protocol.h>
95 #include <net/ip.h>
96 #include <net/route.h>
97 #include <net/inetpeer.h>
98 #include <net/sock.h>
99 #include <net/ip_fib.h>
100 #include <net/arp.h>
101 #include <net/tcp.h>
102 #include <net/icmp.h>
103 #include <net/xfrm.h>
104 #include <net/netevent.h>
105 #include <net/rtnetlink.h>
106 #ifdef CONFIG_SYSCTL
107 #include <linux/sysctl.h>
108 #include <linux/kmemleak.h>
109 #endif
110 #include <net/secure_seq.h>
111
112 #define RT_FL_TOS(oldflp4) \
113         ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
114
115 #define IP_MAX_MTU      0xFFF0
116
117 #define RT_GC_TIMEOUT (300*HZ)
118
119 static int ip_rt_max_size;
120 static int ip_rt_gc_timeout __read_mostly       = RT_GC_TIMEOUT;
121 static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
122 static int ip_rt_gc_min_interval __read_mostly  = HZ / 2;
123 static int ip_rt_redirect_number __read_mostly  = 9;
124 static int ip_rt_redirect_load __read_mostly    = HZ / 50;
125 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
126 static int ip_rt_error_cost __read_mostly       = HZ;
127 static int ip_rt_error_burst __read_mostly      = 5 * HZ;
128 static int ip_rt_gc_elasticity __read_mostly    = 8;
129 static int ip_rt_mtu_expires __read_mostly      = 10 * 60 * HZ;
130 static int ip_rt_min_pmtu __read_mostly         = 512 + 20 + 20;
131 static int ip_rt_min_advmss __read_mostly       = 256;
132
133 /*
134  *      Interface to generic destination cache.
135  */
136
137 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
138 static unsigned int      ipv4_default_advmss(const struct dst_entry *dst);
139 static unsigned int      ipv4_mtu(const struct dst_entry *dst);
140 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
141 static void              ipv4_link_failure(struct sk_buff *skb);
142 static void              ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
143                                            struct sk_buff *skb, u32 mtu);
144 static void              ip_do_redirect(struct dst_entry *dst, struct sock *sk,
145                                         struct sk_buff *skb);
146 static void             ipv4_dst_destroy(struct dst_entry *dst);
147
148 static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
149                             int how)
150 {
151 }
152
153 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
154 {
155         WARN_ON(1);
156         return NULL;
157 }
158
159 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
160                                            struct sk_buff *skb,
161                                            const void *daddr);
162
163 static struct dst_ops ipv4_dst_ops = {
164         .family =               AF_INET,
165         .protocol =             cpu_to_be16(ETH_P_IP),
166         .check =                ipv4_dst_check,
167         .default_advmss =       ipv4_default_advmss,
168         .mtu =                  ipv4_mtu,
169         .cow_metrics =          ipv4_cow_metrics,
170         .destroy =              ipv4_dst_destroy,
171         .ifdown =               ipv4_dst_ifdown,
172         .negative_advice =      ipv4_negative_advice,
173         .link_failure =         ipv4_link_failure,
174         .update_pmtu =          ip_rt_update_pmtu,
175         .redirect =             ip_do_redirect,
176         .local_out =            __ip_local_out,
177         .neigh_lookup =         ipv4_neigh_lookup,
178 };
179
180 #define ECN_OR_COST(class)      TC_PRIO_##class
181
182 const __u8 ip_tos2prio[16] = {
183         TC_PRIO_BESTEFFORT,
184         ECN_OR_COST(BESTEFFORT),
185         TC_PRIO_BESTEFFORT,
186         ECN_OR_COST(BESTEFFORT),
187         TC_PRIO_BULK,
188         ECN_OR_COST(BULK),
189         TC_PRIO_BULK,
190         ECN_OR_COST(BULK),
191         TC_PRIO_INTERACTIVE,
192         ECN_OR_COST(INTERACTIVE),
193         TC_PRIO_INTERACTIVE,
194         ECN_OR_COST(INTERACTIVE),
195         TC_PRIO_INTERACTIVE_BULK,
196         ECN_OR_COST(INTERACTIVE_BULK),
197         TC_PRIO_INTERACTIVE_BULK,
198         ECN_OR_COST(INTERACTIVE_BULK)
199 };
200 EXPORT_SYMBOL(ip_tos2prio);
201
202 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
203 #define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
204
205 #ifdef CONFIG_PROC_FS
206 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
207 {
208         if (*pos)
209                 return NULL;
210         return SEQ_START_TOKEN;
211 }
212
213 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
214 {
215         ++*pos;
216         return NULL;
217 }
218
219 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
220 {
221 }
222
223 static int rt_cache_seq_show(struct seq_file *seq, void *v)
224 {
225         if (v == SEQ_START_TOKEN)
226                 seq_printf(seq, "%-127s\n",
227                            "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
228                            "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
229                            "HHUptod\tSpecDst");
230         return 0;
231 }
232
233 static const struct seq_operations rt_cache_seq_ops = {
234         .start  = rt_cache_seq_start,
235         .next   = rt_cache_seq_next,
236         .stop   = rt_cache_seq_stop,
237         .show   = rt_cache_seq_show,
238 };
239
240 static int rt_cache_seq_open(struct inode *inode, struct file *file)
241 {
242         return seq_open(file, &rt_cache_seq_ops);
243 }
244
245 static const struct file_operations rt_cache_seq_fops = {
246         .owner   = THIS_MODULE,
247         .open    = rt_cache_seq_open,
248         .read    = seq_read,
249         .llseek  = seq_lseek,
250         .release = seq_release,
251 };
252
253
254 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
255 {
256         int cpu;
257
258         if (*pos == 0)
259                 return SEQ_START_TOKEN;
260
261         for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
262                 if (!cpu_possible(cpu))
263                         continue;
264                 *pos = cpu+1;
265                 return &per_cpu(rt_cache_stat, cpu);
266         }
267         return NULL;
268 }
269
270 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
271 {
272         int cpu;
273
274         for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
275                 if (!cpu_possible(cpu))
276                         continue;
277                 *pos = cpu+1;
278                 return &per_cpu(rt_cache_stat, cpu);
279         }
280         return NULL;
281
282 }
283
284 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
285 {
286
287 }
288
289 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
290 {
291         struct rt_cache_stat *st = v;
292
293         if (v == SEQ_START_TOKEN) {
294                 seq_printf(seq, "entries  in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src  out_hit out_slow_tot out_slow_mc  gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
295                 return 0;
296         }
297
298         seq_printf(seq,"%08x  %08x %08x %08x %08x %08x %08x %08x "
299                    " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
300                    dst_entries_get_slow(&ipv4_dst_ops),
301                    st->in_hit,
302                    st->in_slow_tot,
303                    st->in_slow_mc,
304                    st->in_no_route,
305                    st->in_brd,
306                    st->in_martian_dst,
307                    st->in_martian_src,
308
309                    st->out_hit,
310                    st->out_slow_tot,
311                    st->out_slow_mc,
312
313                    st->gc_total,
314                    st->gc_ignored,
315                    st->gc_goal_miss,
316                    st->gc_dst_overflow,
317                    st->in_hlist_search,
318                    st->out_hlist_search
319                 );
320         return 0;
321 }
322
323 static const struct seq_operations rt_cpu_seq_ops = {
324         .start  = rt_cpu_seq_start,
325         .next   = rt_cpu_seq_next,
326         .stop   = rt_cpu_seq_stop,
327         .show   = rt_cpu_seq_show,
328 };
329
330
331 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
332 {
333         return seq_open(file, &rt_cpu_seq_ops);
334 }
335
336 static const struct file_operations rt_cpu_seq_fops = {
337         .owner   = THIS_MODULE,
338         .open    = rt_cpu_seq_open,
339         .read    = seq_read,
340         .llseek  = seq_lseek,
341         .release = seq_release,
342 };
343
344 #ifdef CONFIG_IP_ROUTE_CLASSID
345 static int rt_acct_proc_show(struct seq_file *m, void *v)
346 {
347         struct ip_rt_acct *dst, *src;
348         unsigned int i, j;
349
350         dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
351         if (!dst)
352                 return -ENOMEM;
353
354         for_each_possible_cpu(i) {
355                 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
356                 for (j = 0; j < 256; j++) {
357                         dst[j].o_bytes   += src[j].o_bytes;
358                         dst[j].o_packets += src[j].o_packets;
359                         dst[j].i_bytes   += src[j].i_bytes;
360                         dst[j].i_packets += src[j].i_packets;
361                 }
362         }
363
364         seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
365         kfree(dst);
366         return 0;
367 }
368
369 static int rt_acct_proc_open(struct inode *inode, struct file *file)
370 {
371         return single_open(file, rt_acct_proc_show, NULL);
372 }
373
374 static const struct file_operations rt_acct_proc_fops = {
375         .owner          = THIS_MODULE,
376         .open           = rt_acct_proc_open,
377         .read           = seq_read,
378         .llseek         = seq_lseek,
379         .release        = single_release,
380 };
381 #endif
382
383 static int __net_init ip_rt_do_proc_init(struct net *net)
384 {
385         struct proc_dir_entry *pde;
386
387         pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
388                         &rt_cache_seq_fops);
389         if (!pde)
390                 goto err1;
391
392         pde = proc_create("rt_cache", S_IRUGO,
393                           net->proc_net_stat, &rt_cpu_seq_fops);
394         if (!pde)
395                 goto err2;
396
397 #ifdef CONFIG_IP_ROUTE_CLASSID
398         pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
399         if (!pde)
400                 goto err3;
401 #endif
402         return 0;
403
404 #ifdef CONFIG_IP_ROUTE_CLASSID
405 err3:
406         remove_proc_entry("rt_cache", net->proc_net_stat);
407 #endif
408 err2:
409         remove_proc_entry("rt_cache", net->proc_net);
410 err1:
411         return -ENOMEM;
412 }
413
414 static void __net_exit ip_rt_do_proc_exit(struct net *net)
415 {
416         remove_proc_entry("rt_cache", net->proc_net_stat);
417         remove_proc_entry("rt_cache", net->proc_net);
418 #ifdef CONFIG_IP_ROUTE_CLASSID
419         remove_proc_entry("rt_acct", net->proc_net);
420 #endif
421 }
422
423 static struct pernet_operations ip_rt_proc_ops __net_initdata =  {
424         .init = ip_rt_do_proc_init,
425         .exit = ip_rt_do_proc_exit,
426 };
427
428 static int __init ip_rt_proc_init(void)
429 {
430         return register_pernet_subsys(&ip_rt_proc_ops);
431 }
432
433 #else
434 static inline int ip_rt_proc_init(void)
435 {
436         return 0;
437 }
438 #endif /* CONFIG_PROC_FS */
439
440 static inline bool rt_is_expired(const struct rtable *rth)
441 {
442         return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
443 }
444
445 void rt_cache_flush(struct net *net)
446 {
447         rt_genid_bump(net);
448 }
449
450 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
451                                            struct sk_buff *skb,
452                                            const void *daddr)
453 {
454         struct net_device *dev = dst->dev;
455         const __be32 *pkey = daddr;
456         const struct rtable *rt;
457         struct neighbour *n;
458
459         rt = (const struct rtable *) dst;
460         if (rt->rt_gateway)
461                 pkey = (const __be32 *) &rt->rt_gateway;
462         else if (skb)
463                 pkey = &ip_hdr(skb)->daddr;
464
465         n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
466         if (n)
467                 return n;
468         return neigh_create(&arp_tbl, pkey, dev);
469 }
470
471 /*
472  * Peer allocation may fail only in serious out-of-memory conditions.  However
473  * we still can generate some output.
474  * Random ID selection looks a bit dangerous because we have no chances to
475  * select ID being unique in a reasonable period of time.
476  * But broken packet identifier may be better than no packet at all.
477  */
478 static void ip_select_fb_ident(struct iphdr *iph)
479 {
480         static DEFINE_SPINLOCK(ip_fb_id_lock);
481         static u32 ip_fallback_id;
482         u32 salt;
483
484         spin_lock_bh(&ip_fb_id_lock);
485         salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
486         iph->id = htons(salt & 0xFFFF);
487         ip_fallback_id = salt;
488         spin_unlock_bh(&ip_fb_id_lock);
489 }
490
491 void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
492 {
493         struct net *net = dev_net(dst->dev);
494         struct inet_peer *peer;
495
496         peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1);
497         if (peer) {
498                 iph->id = htons(inet_getid(peer, more));
499                 inet_putpeer(peer);
500                 return;
501         }
502
503         ip_select_fb_ident(iph);
504 }
505 EXPORT_SYMBOL(__ip_select_ident);
506
507 static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
508                              const struct iphdr *iph,
509                              int oif, u8 tos,
510                              u8 prot, u32 mark, int flow_flags)
511 {
512         if (sk) {
513                 const struct inet_sock *inet = inet_sk(sk);
514
515                 oif = sk->sk_bound_dev_if;
516                 mark = sk->sk_mark;
517                 tos = RT_CONN_FLAGS(sk);
518                 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
519         }
520         flowi4_init_output(fl4, oif, mark, tos,
521                            RT_SCOPE_UNIVERSE, prot,
522                            flow_flags,
523                            iph->daddr, iph->saddr, 0, 0);
524 }
525
526 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
527                                const struct sock *sk)
528 {
529         const struct iphdr *iph = ip_hdr(skb);
530         int oif = skb->dev->ifindex;
531         u8 tos = RT_TOS(iph->tos);
532         u8 prot = iph->protocol;
533         u32 mark = skb->mark;
534
535         __build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
536 }
537
538 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
539 {
540         const struct inet_sock *inet = inet_sk(sk);
541         const struct ip_options_rcu *inet_opt;
542         __be32 daddr = inet->inet_daddr;
543
544         rcu_read_lock();
545         inet_opt = rcu_dereference(inet->inet_opt);
546         if (inet_opt && inet_opt->opt.srr)
547                 daddr = inet_opt->opt.faddr;
548         flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
549                            RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
550                            inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
551                            inet_sk_flowi_flags(sk),
552                            daddr, inet->inet_saddr, 0, 0);
553         rcu_read_unlock();
554 }
555
556 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
557                                  const struct sk_buff *skb)
558 {
559         if (skb)
560                 build_skb_flow_key(fl4, skb, sk);
561         else
562                 build_sk_flow_key(fl4, sk);
563 }
564
565 static inline void rt_free(struct rtable *rt)
566 {
567         call_rcu(&rt->dst.rcu_head, dst_rcu_free);
568 }
569
570 static DEFINE_SPINLOCK(fnhe_lock);
571
572 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
573 {
574         struct fib_nh_exception *fnhe, *oldest;
575         struct rtable *orig;
576
577         oldest = rcu_dereference(hash->chain);
578         for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
579              fnhe = rcu_dereference(fnhe->fnhe_next)) {
580                 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
581                         oldest = fnhe;
582         }
583         orig = rcu_dereference(oldest->fnhe_rth);
584         if (orig) {
585                 RCU_INIT_POINTER(oldest->fnhe_rth, NULL);
586                 rt_free(orig);
587         }
588         return oldest;
589 }
590
591 static inline u32 fnhe_hashfun(__be32 daddr)
592 {
593         u32 hval;
594
595         hval = (__force u32) daddr;
596         hval ^= (hval >> 11) ^ (hval >> 22);
597
598         return hval & (FNHE_HASH_SIZE - 1);
599 }
600
601 static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
602                                   u32 pmtu, unsigned long expires)
603 {
604         struct fnhe_hash_bucket *hash;
605         struct fib_nh_exception *fnhe;
606         int depth;
607         u32 hval = fnhe_hashfun(daddr);
608
609         spin_lock_bh(&fnhe_lock);
610
611         hash = nh->nh_exceptions;
612         if (!hash) {
613                 hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
614                 if (!hash)
615                         goto out_unlock;
616                 nh->nh_exceptions = hash;
617         }
618
619         hash += hval;
620
621         depth = 0;
622         for (fnhe = rcu_dereference(hash->chain); fnhe;
623              fnhe = rcu_dereference(fnhe->fnhe_next)) {
624                 if (fnhe->fnhe_daddr == daddr)
625                         break;
626                 depth++;
627         }
628
629         if (fnhe) {
630                 if (gw)
631                         fnhe->fnhe_gw = gw;
632                 if (pmtu) {
633                         fnhe->fnhe_pmtu = pmtu;
634                         fnhe->fnhe_expires = expires;
635                 }
636         } else {
637                 if (depth > FNHE_RECLAIM_DEPTH)
638                         fnhe = fnhe_oldest(hash);
639                 else {
640                         fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
641                         if (!fnhe)
642                                 goto out_unlock;
643
644                         fnhe->fnhe_next = hash->chain;
645                         rcu_assign_pointer(hash->chain, fnhe);
646                 }
647                 fnhe->fnhe_daddr = daddr;
648                 fnhe->fnhe_gw = gw;
649                 fnhe->fnhe_pmtu = pmtu;
650                 fnhe->fnhe_expires = expires;
651         }
652
653         fnhe->fnhe_stamp = jiffies;
654
655 out_unlock:
656         spin_unlock_bh(&fnhe_lock);
657         return;
658 }
659
660 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
661                              bool kill_route)
662 {
663         __be32 new_gw = icmp_hdr(skb)->un.gateway;
664         __be32 old_gw = ip_hdr(skb)->saddr;
665         struct net_device *dev = skb->dev;
666         struct in_device *in_dev;
667         struct fib_result res;
668         struct neighbour *n;
669         struct net *net;
670
671         switch (icmp_hdr(skb)->code & 7) {
672         case ICMP_REDIR_NET:
673         case ICMP_REDIR_NETTOS:
674         case ICMP_REDIR_HOST:
675         case ICMP_REDIR_HOSTTOS:
676                 break;
677
678         default:
679                 return;
680         }
681
682         if (rt->rt_gateway != old_gw)
683                 return;
684
685         in_dev = __in_dev_get_rcu(dev);
686         if (!in_dev)
687                 return;
688
689         net = dev_net(dev);
690         if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
691             ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
692             ipv4_is_zeronet(new_gw))
693                 goto reject_redirect;
694
695         if (!IN_DEV_SHARED_MEDIA(in_dev)) {
696                 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
697                         goto reject_redirect;
698                 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
699                         goto reject_redirect;
700         } else {
701                 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
702                         goto reject_redirect;
703         }
704
705         n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
706         if (n) {
707                 if (!(n->nud_state & NUD_VALID)) {
708                         neigh_event_send(n, NULL);
709                 } else {
710                         if (fib_lookup(net, fl4, &res) == 0) {
711                                 struct fib_nh *nh = &FIB_RES_NH(res);
712
713                                 update_or_create_fnhe(nh, fl4->daddr, new_gw,
714                                                       0, 0);
715                         }
716                         if (kill_route)
717                                 rt->dst.obsolete = DST_OBSOLETE_KILL;
718                         call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
719                 }
720                 neigh_release(n);
721         }
722         return;
723
724 reject_redirect:
725 #ifdef CONFIG_IP_ROUTE_VERBOSE
726         if (IN_DEV_LOG_MARTIANS(in_dev)) {
727                 const struct iphdr *iph = (const struct iphdr *) skb->data;
728                 __be32 daddr = iph->daddr;
729                 __be32 saddr = iph->saddr;
730
731                 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
732                                      "  Advised path = %pI4 -> %pI4\n",
733                                      &old_gw, dev->name, &new_gw,
734                                      &saddr, &daddr);
735         }
736 #endif
737         ;
738 }
739
740 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
741 {
742         struct rtable *rt;
743         struct flowi4 fl4;
744
745         rt = (struct rtable *) dst;
746
747         ip_rt_build_flow_key(&fl4, sk, skb);
748         __ip_do_redirect(rt, skb, &fl4, true);
749 }
750
751 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
752 {
753         struct rtable *rt = (struct rtable *)dst;
754         struct dst_entry *ret = dst;
755
756         if (rt) {
757                 if (dst->obsolete > 0) {
758                         ip_rt_put(rt);
759                         ret = NULL;
760                 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
761                            rt->dst.expires) {
762                         ip_rt_put(rt);
763                         ret = NULL;
764                 }
765         }
766         return ret;
767 }
768
769 /*
770  * Algorithm:
771  *      1. The first ip_rt_redirect_number redirects are sent
772  *         with exponential backoff, then we stop sending them at all,
773  *         assuming that the host ignores our redirects.
774  *      2. If we did not see packets requiring redirects
775  *         during ip_rt_redirect_silence, we assume that the host
776  *         forgot redirected route and start to send redirects again.
777  *
778  * This algorithm is much cheaper and more intelligent than dumb load limiting
779  * in icmp.c.
780  *
781  * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
782  * and "frag. need" (breaks PMTU discovery) in icmp.c.
783  */
784
785 void ip_rt_send_redirect(struct sk_buff *skb)
786 {
787         struct rtable *rt = skb_rtable(skb);
788         struct in_device *in_dev;
789         struct inet_peer *peer;
790         struct net *net;
791         int log_martians;
792
793         rcu_read_lock();
794         in_dev = __in_dev_get_rcu(rt->dst.dev);
795         if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
796                 rcu_read_unlock();
797                 return;
798         }
799         log_martians = IN_DEV_LOG_MARTIANS(in_dev);
800         rcu_read_unlock();
801
802         net = dev_net(rt->dst.dev);
803         peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
804         if (!peer) {
805                 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
806                           rt_nexthop(rt, ip_hdr(skb)->daddr));
807                 return;
808         }
809
810         /* No redirected packets during ip_rt_redirect_silence;
811          * reset the algorithm.
812          */
813         if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
814                 peer->rate_tokens = 0;
815
816         /* Too many ignored redirects; do not send anything
817          * set dst.rate_last to the last seen redirected packet.
818          */
819         if (peer->rate_tokens >= ip_rt_redirect_number) {
820                 peer->rate_last = jiffies;
821                 goto out_put_peer;
822         }
823
824         /* Check for load limit; set rate_last to the latest sent
825          * redirect.
826          */
827         if (peer->rate_tokens == 0 ||
828             time_after(jiffies,
829                        (peer->rate_last +
830                         (ip_rt_redirect_load << peer->rate_tokens)))) {
831                 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
832
833                 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
834                 peer->rate_last = jiffies;
835                 ++peer->rate_tokens;
836 #ifdef CONFIG_IP_ROUTE_VERBOSE
837                 if (log_martians &&
838                     peer->rate_tokens == ip_rt_redirect_number)
839                         net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
840                                              &ip_hdr(skb)->saddr, inet_iif(skb),
841                                              &ip_hdr(skb)->daddr, &gw);
842 #endif
843         }
844 out_put_peer:
845         inet_putpeer(peer);
846 }
847
848 static int ip_error(struct sk_buff *skb)
849 {
850         struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
851         struct rtable *rt = skb_rtable(skb);
852         struct inet_peer *peer;
853         unsigned long now;
854         struct net *net;
855         bool send;
856         int code;
857
858         net = dev_net(rt->dst.dev);
859         if (!IN_DEV_FORWARD(in_dev)) {
860                 switch (rt->dst.error) {
861                 case EHOSTUNREACH:
862                         IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS);
863                         break;
864
865                 case ENETUNREACH:
866                         IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
867                         break;
868                 }
869                 goto out;
870         }
871
872         switch (rt->dst.error) {
873         case EINVAL:
874         default:
875                 goto out;
876         case EHOSTUNREACH:
877                 code = ICMP_HOST_UNREACH;
878                 break;
879         case ENETUNREACH:
880                 code = ICMP_NET_UNREACH;
881                 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
882                 break;
883         case EACCES:
884                 code = ICMP_PKT_FILTERED;
885                 break;
886         }
887
888         peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
889
890         send = true;
891         if (peer) {
892                 now = jiffies;
893                 peer->rate_tokens += now - peer->rate_last;
894                 if (peer->rate_tokens > ip_rt_error_burst)
895                         peer->rate_tokens = ip_rt_error_burst;
896                 peer->rate_last = now;
897                 if (peer->rate_tokens >= ip_rt_error_cost)
898                         peer->rate_tokens -= ip_rt_error_cost;
899                 else
900                         send = false;
901                 inet_putpeer(peer);
902         }
903         if (send)
904                 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
905
906 out:    kfree_skb(skb);
907         return 0;
908 }
909
910 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
911 {
912         struct dst_entry *dst = &rt->dst;
913         struct fib_result res;
914
915         if (dst->dev->mtu < mtu)
916                 return;
917
918         if (mtu < ip_rt_min_pmtu)
919                 mtu = ip_rt_min_pmtu;
920
921         if (!rt->rt_pmtu) {
922                 dst->obsolete = DST_OBSOLETE_KILL;
923         } else {
924                 rt->rt_pmtu = mtu;
925                 dst->expires = max(1UL, jiffies + ip_rt_mtu_expires);
926         }
927
928         rcu_read_lock();
929         if (fib_lookup(dev_net(dst->dev), fl4, &res) == 0) {
930                 struct fib_nh *nh = &FIB_RES_NH(res);
931
932                 update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
933                                       jiffies + ip_rt_mtu_expires);
934         }
935         rcu_read_unlock();
936 }
937
938 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
939                               struct sk_buff *skb, u32 mtu)
940 {
941         struct rtable *rt = (struct rtable *) dst;
942         struct flowi4 fl4;
943
944         ip_rt_build_flow_key(&fl4, sk, skb);
945         __ip_rt_update_pmtu(rt, &fl4, mtu);
946 }
947
948 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
949                       int oif, u32 mark, u8 protocol, int flow_flags)
950 {
951         const struct iphdr *iph = (const struct iphdr *) skb->data;
952         struct flowi4 fl4;
953         struct rtable *rt;
954
955         __build_flow_key(&fl4, NULL, iph, oif,
956                          RT_TOS(iph->tos), protocol, mark, flow_flags);
957         rt = __ip_route_output_key(net, &fl4);
958         if (!IS_ERR(rt)) {
959                 __ip_rt_update_pmtu(rt, &fl4, mtu);
960                 ip_rt_put(rt);
961         }
962 }
963 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
964
965 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
966 {
967         const struct iphdr *iph = (const struct iphdr *) skb->data;
968         struct flowi4 fl4;
969         struct rtable *rt;
970
971         __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
972         rt = __ip_route_output_key(sock_net(sk), &fl4);
973         if (!IS_ERR(rt)) {
974                 __ip_rt_update_pmtu(rt, &fl4, mtu);
975                 ip_rt_put(rt);
976         }
977 }
978 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
979
980 void ipv4_redirect(struct sk_buff *skb, struct net *net,
981                    int oif, u32 mark, u8 protocol, int flow_flags)
982 {
983         const struct iphdr *iph = (const struct iphdr *) skb->data;
984         struct flowi4 fl4;
985         struct rtable *rt;
986
987         __build_flow_key(&fl4, NULL, iph, oif,
988                          RT_TOS(iph->tos), protocol, mark, flow_flags);
989         rt = __ip_route_output_key(net, &fl4);
990         if (!IS_ERR(rt)) {
991                 __ip_do_redirect(rt, skb, &fl4, false);
992                 ip_rt_put(rt);
993         }
994 }
995 EXPORT_SYMBOL_GPL(ipv4_redirect);
996
997 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
998 {
999         const struct iphdr *iph = (const struct iphdr *) skb->data;
1000         struct flowi4 fl4;
1001         struct rtable *rt;
1002
1003         __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
1004         rt = __ip_route_output_key(sock_net(sk), &fl4);
1005         if (!IS_ERR(rt)) {
1006                 __ip_do_redirect(rt, skb, &fl4, false);
1007                 ip_rt_put(rt);
1008         }
1009 }
1010 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1011
1012 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1013 {
1014         struct rtable *rt = (struct rtable *) dst;
1015
1016         /* All IPV4 dsts are created with ->obsolete set to the value
1017          * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1018          * into this function always.
1019          *
1020          * When a PMTU/redirect information update invalidates a
1021          * route, this is indicated by setting obsolete to
1022          * DST_OBSOLETE_KILL.
1023          */
1024         if (dst->obsolete == DST_OBSOLETE_KILL || rt_is_expired(rt))
1025                 return NULL;
1026         return dst;
1027 }
1028
1029 static void ipv4_link_failure(struct sk_buff *skb)
1030 {
1031         struct rtable *rt;
1032
1033         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1034
1035         rt = skb_rtable(skb);
1036         if (rt)
1037                 dst_set_expires(&rt->dst, 0);
1038 }
1039
1040 static int ip_rt_bug(struct sk_buff *skb)
1041 {
1042         pr_debug("%s: %pI4 -> %pI4, %s\n",
1043                  __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1044                  skb->dev ? skb->dev->name : "?");
1045         kfree_skb(skb);
1046         WARN_ON(1);
1047         return 0;
1048 }
1049
1050 /*
1051    We do not cache source address of outgoing interface,
1052    because it is used only by IP RR, TS and SRR options,
1053    so that it out of fast path.
1054
1055    BTW remember: "addr" is allowed to be not aligned
1056    in IP options!
1057  */
1058
1059 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1060 {
1061         __be32 src;
1062
1063         if (rt_is_output_route(rt))
1064                 src = ip_hdr(skb)->saddr;
1065         else {
1066                 struct fib_result res;
1067                 struct flowi4 fl4;
1068                 struct iphdr *iph;
1069
1070                 iph = ip_hdr(skb);
1071
1072                 memset(&fl4, 0, sizeof(fl4));
1073                 fl4.daddr = iph->daddr;
1074                 fl4.saddr = iph->saddr;
1075                 fl4.flowi4_tos = RT_TOS(iph->tos);
1076                 fl4.flowi4_oif = rt->dst.dev->ifindex;
1077                 fl4.flowi4_iif = skb->dev->ifindex;
1078                 fl4.flowi4_mark = skb->mark;
1079
1080                 rcu_read_lock();
1081                 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
1082                         src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
1083                 else
1084                         src = inet_select_addr(rt->dst.dev,
1085                                                rt_nexthop(rt, iph->daddr),
1086                                                RT_SCOPE_UNIVERSE);
1087                 rcu_read_unlock();
1088         }
1089         memcpy(addr, &src, 4);
1090 }
1091
1092 #ifdef CONFIG_IP_ROUTE_CLASSID
1093 static void set_class_tag(struct rtable *rt, u32 tag)
1094 {
1095         if (!(rt->dst.tclassid & 0xFFFF))
1096                 rt->dst.tclassid |= tag & 0xFFFF;
1097         if (!(rt->dst.tclassid & 0xFFFF0000))
1098                 rt->dst.tclassid |= tag & 0xFFFF0000;
1099 }
1100 #endif
1101
1102 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1103 {
1104         unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1105
1106         if (advmss == 0) {
1107                 advmss = max_t(unsigned int, dst->dev->mtu - 40,
1108                                ip_rt_min_advmss);
1109                 if (advmss > 65535 - 40)
1110                         advmss = 65535 - 40;
1111         }
1112         return advmss;
1113 }
1114
1115 static unsigned int ipv4_mtu(const struct dst_entry *dst)
1116 {
1117         const struct rtable *rt = (const struct rtable *) dst;
1118         unsigned int mtu = rt->rt_pmtu;
1119
1120         if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1121                 mtu = dst_metric_raw(dst, RTAX_MTU);
1122
1123         if (mtu && rt_is_output_route(rt))
1124                 return mtu;
1125
1126         mtu = dst->dev->mtu;
1127
1128         if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1129                 if (rt->rt_gateway && mtu > 576)
1130                         mtu = 576;
1131         }
1132
1133         if (mtu > IP_MAX_MTU)
1134                 mtu = IP_MAX_MTU;
1135
1136         return mtu;
1137 }
1138
1139 static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1140 {
1141         struct fnhe_hash_bucket *hash = nh->nh_exceptions;
1142         struct fib_nh_exception *fnhe;
1143         u32 hval;
1144
1145         if (!hash)
1146                 return NULL;
1147
1148         hval = fnhe_hashfun(daddr);
1149
1150         for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1151              fnhe = rcu_dereference(fnhe->fnhe_next)) {
1152                 if (fnhe->fnhe_daddr == daddr)
1153                         return fnhe;
1154         }
1155         return NULL;
1156 }
1157
1158 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1159                               __be32 daddr)
1160 {
1161         bool ret = false;
1162
1163         spin_lock_bh(&fnhe_lock);
1164
1165         if (daddr == fnhe->fnhe_daddr) {
1166                 struct rtable *orig;
1167
1168                 if (fnhe->fnhe_pmtu) {
1169                         unsigned long expires = fnhe->fnhe_expires;
1170                         unsigned long diff = expires - jiffies;
1171
1172                         if (time_before(jiffies, expires)) {
1173                                 rt->rt_pmtu = fnhe->fnhe_pmtu;
1174                                 dst_set_expires(&rt->dst, diff);
1175                         }
1176                 }
1177                 if (fnhe->fnhe_gw) {
1178                         rt->rt_flags |= RTCF_REDIRECTED;
1179                         rt->rt_gateway = fnhe->fnhe_gw;
1180                 }
1181
1182                 orig = rcu_dereference(fnhe->fnhe_rth);
1183                 rcu_assign_pointer(fnhe->fnhe_rth, rt);
1184                 if (orig)
1185                         rt_free(orig);
1186
1187                 fnhe->fnhe_stamp = jiffies;
1188                 ret = true;
1189         } else {
1190                 /* Routes we intend to cache in nexthop exception have
1191                  * the DST_NOCACHE bit clear.  However, if we are
1192                  * unsuccessful at storing this route into the cache
1193                  * we really need to set it.
1194                  */
1195                 rt->dst.flags |= DST_NOCACHE;
1196         }
1197         spin_unlock_bh(&fnhe_lock);
1198
1199         return ret;
1200 }
1201
1202 static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
1203 {
1204         struct rtable *orig, *prev, **p;
1205         bool ret = true;
1206
1207         if (rt_is_input_route(rt)) {
1208                 p = (struct rtable **)&nh->nh_rth_input;
1209         } else {
1210                 p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output);
1211         }
1212         orig = *p;
1213
1214         prev = cmpxchg(p, orig, rt);
1215         if (prev == orig) {
1216                 if (orig)
1217                         rt_free(orig);
1218         } else {
1219                 /* Routes we intend to cache in the FIB nexthop have
1220                  * the DST_NOCACHE bit clear.  However, if we are
1221                  * unsuccessful at storing this route into the cache
1222                  * we really need to set it.
1223                  */
1224                 rt->dst.flags |= DST_NOCACHE;
1225                 ret = false;
1226         }
1227
1228         return ret;
1229 }
1230
1231 static DEFINE_SPINLOCK(rt_uncached_lock);
1232 static LIST_HEAD(rt_uncached_list);
1233
1234 static void rt_add_uncached_list(struct rtable *rt)
1235 {
1236         spin_lock_bh(&rt_uncached_lock);
1237         list_add_tail(&rt->rt_uncached, &rt_uncached_list);
1238         spin_unlock_bh(&rt_uncached_lock);
1239 }
1240
1241 static void ipv4_dst_destroy(struct dst_entry *dst)
1242 {
1243         struct rtable *rt = (struct rtable *) dst;
1244
1245         if (!list_empty(&rt->rt_uncached)) {
1246                 spin_lock_bh(&rt_uncached_lock);
1247                 list_del(&rt->rt_uncached);
1248                 spin_unlock_bh(&rt_uncached_lock);
1249         }
1250 }
1251
1252 void rt_flush_dev(struct net_device *dev)
1253 {
1254         if (!list_empty(&rt_uncached_list)) {
1255                 struct net *net = dev_net(dev);
1256                 struct rtable *rt;
1257
1258                 spin_lock_bh(&rt_uncached_lock);
1259                 list_for_each_entry(rt, &rt_uncached_list, rt_uncached) {
1260                         if (rt->dst.dev != dev)
1261                                 continue;
1262                         rt->dst.dev = net->loopback_dev;
1263                         dev_hold(rt->dst.dev);
1264                         dev_put(dev);
1265                 }
1266                 spin_unlock_bh(&rt_uncached_lock);
1267         }
1268 }
1269
1270 static bool rt_cache_valid(const struct rtable *rt)
1271 {
1272         return  rt &&
1273                 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1274                 !rt_is_expired(rt);
1275 }
1276
1277 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1278                            const struct fib_result *res,
1279                            struct fib_nh_exception *fnhe,
1280                            struct fib_info *fi, u16 type, u32 itag)
1281 {
1282         bool cached = false;
1283
1284         if (fi) {
1285                 struct fib_nh *nh = &FIB_RES_NH(*res);
1286
1287                 if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK)
1288                         rt->rt_gateway = nh->nh_gw;
1289                 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
1290 #ifdef CONFIG_IP_ROUTE_CLASSID
1291                 rt->dst.tclassid = nh->nh_tclassid;
1292 #endif
1293                 if (unlikely(fnhe))
1294                         cached = rt_bind_exception(rt, fnhe, daddr);
1295                 else if (!(rt->dst.flags & DST_NOCACHE))
1296                         cached = rt_cache_route(nh, rt);
1297         }
1298         if (unlikely(!cached))
1299                 rt_add_uncached_list(rt);
1300
1301 #ifdef CONFIG_IP_ROUTE_CLASSID
1302 #ifdef CONFIG_IP_MULTIPLE_TABLES
1303         set_class_tag(rt, res->tclassid);
1304 #endif
1305         set_class_tag(rt, itag);
1306 #endif
1307 }
1308
1309 static struct rtable *rt_dst_alloc(struct net_device *dev,
1310                                    bool nopolicy, bool noxfrm, bool will_cache)
1311 {
1312         return dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1313                          (will_cache ? 0 : (DST_HOST | DST_NOCACHE)) |
1314                          (nopolicy ? DST_NOPOLICY : 0) |
1315                          (noxfrm ? DST_NOXFRM : 0));
1316 }
1317
1318 /* called in rcu_read_lock() section */
1319 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1320                                 u8 tos, struct net_device *dev, int our)
1321 {
1322         struct rtable *rth;
1323         struct in_device *in_dev = __in_dev_get_rcu(dev);
1324         u32 itag = 0;
1325         int err;
1326
1327         /* Primary sanity checks. */
1328
1329         if (in_dev == NULL)
1330                 return -EINVAL;
1331
1332         if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1333             skb->protocol != htons(ETH_P_IP))
1334                 goto e_inval;
1335
1336         if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
1337                 if (ipv4_is_loopback(saddr))
1338                         goto e_inval;
1339
1340         if (ipv4_is_zeronet(saddr)) {
1341                 if (!ipv4_is_local_multicast(daddr))
1342                         goto e_inval;
1343         } else {
1344                 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1345                                           in_dev, &itag);
1346                 if (err < 0)
1347                         goto e_err;
1348         }
1349         rth = rt_dst_alloc(dev_net(dev)->loopback_dev,
1350                            IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
1351         if (!rth)
1352                 goto e_nobufs;
1353
1354 #ifdef CONFIG_IP_ROUTE_CLASSID
1355         rth->dst.tclassid = itag;
1356 #endif
1357         rth->dst.output = ip_rt_bug;
1358
1359         rth->rt_genid   = rt_genid(dev_net(dev));
1360         rth->rt_flags   = RTCF_MULTICAST;
1361         rth->rt_type    = RTN_MULTICAST;
1362         rth->rt_is_input= 1;
1363         rth->rt_iif     = 0;
1364         rth->rt_pmtu    = 0;
1365         rth->rt_gateway = 0;
1366         INIT_LIST_HEAD(&rth->rt_uncached);
1367         if (our) {
1368                 rth->dst.input= ip_local_deliver;
1369                 rth->rt_flags |= RTCF_LOCAL;
1370         }
1371
1372 #ifdef CONFIG_IP_MROUTE
1373         if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1374                 rth->dst.input = ip_mr_input;
1375 #endif
1376         RT_CACHE_STAT_INC(in_slow_mc);
1377
1378         skb_dst_set(skb, &rth->dst);
1379         return 0;
1380
1381 e_nobufs:
1382         return -ENOBUFS;
1383 e_inval:
1384         return -EINVAL;
1385 e_err:
1386         return err;
1387 }
1388
1389
1390 static void ip_handle_martian_source(struct net_device *dev,
1391                                      struct in_device *in_dev,
1392                                      struct sk_buff *skb,
1393                                      __be32 daddr,
1394                                      __be32 saddr)
1395 {
1396         RT_CACHE_STAT_INC(in_martian_src);
1397 #ifdef CONFIG_IP_ROUTE_VERBOSE
1398         if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1399                 /*
1400                  *      RFC1812 recommendation, if source is martian,
1401                  *      the only hint is MAC header.
1402                  */
1403                 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1404                         &daddr, &saddr, dev->name);
1405                 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1406                         print_hex_dump(KERN_WARNING, "ll header: ",
1407                                        DUMP_PREFIX_OFFSET, 16, 1,
1408                                        skb_mac_header(skb),
1409                                        dev->hard_header_len, true);
1410                 }
1411         }
1412 #endif
1413 }
1414
1415 /* called in rcu_read_lock() section */
1416 static int __mkroute_input(struct sk_buff *skb,
1417                            const struct fib_result *res,
1418                            struct in_device *in_dev,
1419                            __be32 daddr, __be32 saddr, u32 tos)
1420 {
1421         struct rtable *rth;
1422         int err;
1423         struct in_device *out_dev;
1424         unsigned int flags = 0;
1425         bool do_cache;
1426         u32 itag;
1427
1428         /* get a working reference to the output device */
1429         out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1430         if (out_dev == NULL) {
1431                 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1432                 return -EINVAL;
1433         }
1434
1435
1436         err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1437                                   in_dev->dev, in_dev, &itag);
1438         if (err < 0) {
1439                 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1440                                          saddr);
1441
1442                 goto cleanup;
1443         }
1444
1445         do_cache = res->fi && !itag;
1446         if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1447             (IN_DEV_SHARED_MEDIA(out_dev) ||
1448              inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) {
1449                 flags |= RTCF_DOREDIRECT;
1450                 do_cache = false;
1451         }
1452
1453         if (skb->protocol != htons(ETH_P_IP)) {
1454                 /* Not IP (i.e. ARP). Do not create route, if it is
1455                  * invalid for proxy arp. DNAT routes are always valid.
1456                  *
1457                  * Proxy arp feature have been extended to allow, ARP
1458                  * replies back to the same interface, to support
1459                  * Private VLAN switch technologies. See arp.c.
1460                  */
1461                 if (out_dev == in_dev &&
1462                     IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1463                         err = -EINVAL;
1464                         goto cleanup;
1465                 }
1466         }
1467
1468         if (do_cache) {
1469                 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1470                 if (rt_cache_valid(rth)) {
1471                         skb_dst_set_noref(skb, &rth->dst);
1472                         goto out;
1473                 }
1474         }
1475
1476         rth = rt_dst_alloc(out_dev->dev,
1477                            IN_DEV_CONF_GET(in_dev, NOPOLICY),
1478                            IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
1479         if (!rth) {
1480                 err = -ENOBUFS;
1481                 goto cleanup;
1482         }
1483
1484         rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
1485         rth->rt_flags = flags;
1486         rth->rt_type = res->type;
1487         rth->rt_is_input = 1;
1488         rth->rt_iif     = 0;
1489         rth->rt_pmtu    = 0;
1490         rth->rt_gateway = 0;
1491         INIT_LIST_HEAD(&rth->rt_uncached);
1492
1493         rth->dst.input = ip_forward;
1494         rth->dst.output = ip_output;
1495
1496         rt_set_nexthop(rth, daddr, res, NULL, res->fi, res->type, itag);
1497         skb_dst_set(skb, &rth->dst);
1498 out:
1499         err = 0;
1500  cleanup:
1501         return err;
1502 }
1503
1504 static int ip_mkroute_input(struct sk_buff *skb,
1505                             struct fib_result *res,
1506                             const struct flowi4 *fl4,
1507                             struct in_device *in_dev,
1508                             __be32 daddr, __be32 saddr, u32 tos)
1509 {
1510 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1511         if (res->fi && res->fi->fib_nhs > 1)
1512                 fib_select_multipath(res);
1513 #endif
1514
1515         /* create a routing cache entry */
1516         return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
1517 }
1518
1519 /*
1520  *      NOTE. We drop all the packets that has local source
1521  *      addresses, because every properly looped back packet
1522  *      must have correct destination already attached by output routine.
1523  *
1524  *      Such approach solves two big problems:
1525  *      1. Not simplex devices are handled properly.
1526  *      2. IP spoofing attempts are filtered with 100% of guarantee.
1527  *      called with rcu_read_lock()
1528  */
1529
1530 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1531                                u8 tos, struct net_device *dev)
1532 {
1533         struct fib_result res;
1534         struct in_device *in_dev = __in_dev_get_rcu(dev);
1535         struct flowi4   fl4;
1536         unsigned int    flags = 0;
1537         u32             itag = 0;
1538         struct rtable   *rth;
1539         int             err = -EINVAL;
1540         struct net    *net = dev_net(dev);
1541         bool do_cache;
1542
1543         /* IP on this device is disabled. */
1544
1545         if (!in_dev)
1546                 goto out;
1547
1548         /* Check for the most weird martians, which can be not detected
1549            by fib_lookup.
1550          */
1551
1552         if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
1553                 goto martian_source;
1554
1555         res.fi = NULL;
1556         if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
1557                 goto brd_input;
1558
1559         /* Accept zero addresses only to limited broadcast;
1560          * I even do not know to fix it or not. Waiting for complains :-)
1561          */
1562         if (ipv4_is_zeronet(saddr))
1563                 goto martian_source;
1564
1565         if (ipv4_is_zeronet(daddr))
1566                 goto martian_destination;
1567
1568         /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
1569          * and call it once if daddr or/and saddr are loopback addresses
1570          */
1571         if (ipv4_is_loopback(daddr)) {
1572                 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1573                         goto martian_destination;
1574         } else if (ipv4_is_loopback(saddr)) {
1575                 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1576                         goto martian_source;
1577         }
1578
1579         /*
1580          *      Now we are ready to route packet.
1581          */
1582         fl4.flowi4_oif = 0;
1583         fl4.flowi4_iif = dev->ifindex;
1584         fl4.flowi4_mark = skb->mark;
1585         fl4.flowi4_tos = tos;
1586         fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
1587         fl4.daddr = daddr;
1588         fl4.saddr = saddr;
1589         err = fib_lookup(net, &fl4, &res);
1590         if (err != 0)
1591                 goto no_route;
1592
1593         RT_CACHE_STAT_INC(in_slow_tot);
1594
1595         if (res.type == RTN_BROADCAST)
1596                 goto brd_input;
1597
1598         if (res.type == RTN_LOCAL) {
1599                 err = fib_validate_source(skb, saddr, daddr, tos,
1600                                           LOOPBACK_IFINDEX,
1601                                           dev, in_dev, &itag);
1602                 if (err < 0)
1603                         goto martian_source_keep_err;
1604                 goto local_input;
1605         }
1606
1607         if (!IN_DEV_FORWARD(in_dev))
1608                 goto no_route;
1609         if (res.type != RTN_UNICAST)
1610                 goto martian_destination;
1611
1612         err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
1613 out:    return err;
1614
1615 brd_input:
1616         if (skb->protocol != htons(ETH_P_IP))
1617                 goto e_inval;
1618
1619         if (!ipv4_is_zeronet(saddr)) {
1620                 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1621                                           in_dev, &itag);
1622                 if (err < 0)
1623                         goto martian_source_keep_err;
1624         }
1625         flags |= RTCF_BROADCAST;
1626         res.type = RTN_BROADCAST;
1627         RT_CACHE_STAT_INC(in_brd);
1628
1629 local_input:
1630         do_cache = false;
1631         if (res.fi) {
1632                 if (!itag) {
1633                         rth = rcu_dereference(FIB_RES_NH(res).nh_rth_input);
1634                         if (rt_cache_valid(rth)) {
1635                                 skb_dst_set_noref(skb, &rth->dst);
1636                                 err = 0;
1637                                 goto out;
1638                         }
1639                         do_cache = true;
1640                 }
1641         }
1642
1643         rth = rt_dst_alloc(net->loopback_dev,
1644                            IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
1645         if (!rth)
1646                 goto e_nobufs;
1647
1648         rth->dst.input= ip_local_deliver;
1649         rth->dst.output= ip_rt_bug;
1650 #ifdef CONFIG_IP_ROUTE_CLASSID
1651         rth->dst.tclassid = itag;
1652 #endif
1653
1654         rth->rt_genid = rt_genid(net);
1655         rth->rt_flags   = flags|RTCF_LOCAL;
1656         rth->rt_type    = res.type;
1657         rth->rt_is_input = 1;
1658         rth->rt_iif     = 0;
1659         rth->rt_pmtu    = 0;
1660         rth->rt_gateway = 0;
1661         INIT_LIST_HEAD(&rth->rt_uncached);
1662         if (res.type == RTN_UNREACHABLE) {
1663                 rth->dst.input= ip_error;
1664                 rth->dst.error= -err;
1665                 rth->rt_flags   &= ~RTCF_LOCAL;
1666         }
1667         if (do_cache)
1668                 rt_cache_route(&FIB_RES_NH(res), rth);
1669         skb_dst_set(skb, &rth->dst);
1670         err = 0;
1671         goto out;
1672
1673 no_route:
1674         RT_CACHE_STAT_INC(in_no_route);
1675         res.type = RTN_UNREACHABLE;
1676         if (err == -ESRCH)
1677                 err = -ENETUNREACH;
1678         goto local_input;
1679
1680         /*
1681          *      Do not cache martian addresses: they should be logged (RFC1812)
1682          */
1683 martian_destination:
1684         RT_CACHE_STAT_INC(in_martian_dst);
1685 #ifdef CONFIG_IP_ROUTE_VERBOSE
1686         if (IN_DEV_LOG_MARTIANS(in_dev))
1687                 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
1688                                      &daddr, &saddr, dev->name);
1689 #endif
1690
1691 e_inval:
1692         err = -EINVAL;
1693         goto out;
1694
1695 e_nobufs:
1696         err = -ENOBUFS;
1697         goto out;
1698
1699 martian_source:
1700         err = -EINVAL;
1701 martian_source_keep_err:
1702         ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
1703         goto out;
1704 }
1705
1706 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1707                          u8 tos, struct net_device *dev)
1708 {
1709         int res;
1710
1711         rcu_read_lock();
1712
1713         /* Multicast recognition logic is moved from route cache to here.
1714            The problem was that too many Ethernet cards have broken/missing
1715            hardware multicast filters :-( As result the host on multicasting
1716            network acquires a lot of useless route cache entries, sort of
1717            SDR messages from all the world. Now we try to get rid of them.
1718            Really, provided software IP multicast filter is organized
1719            reasonably (at least, hashed), it does not result in a slowdown
1720            comparing with route cache reject entries.
1721            Note, that multicast routers are not affected, because
1722            route cache entry is created eventually.
1723          */
1724         if (ipv4_is_multicast(daddr)) {
1725                 struct in_device *in_dev = __in_dev_get_rcu(dev);
1726
1727                 if (in_dev) {
1728                         int our = ip_check_mc_rcu(in_dev, daddr, saddr,
1729                                                   ip_hdr(skb)->protocol);
1730                         if (our
1731 #ifdef CONFIG_IP_MROUTE
1732                                 ||
1733                             (!ipv4_is_local_multicast(daddr) &&
1734                              IN_DEV_MFORWARD(in_dev))
1735 #endif
1736                            ) {
1737                                 int res = ip_route_input_mc(skb, daddr, saddr,
1738                                                             tos, dev, our);
1739                                 rcu_read_unlock();
1740                                 return res;
1741                         }
1742                 }
1743                 rcu_read_unlock();
1744                 return -EINVAL;
1745         }
1746         res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
1747         rcu_read_unlock();
1748         return res;
1749 }
1750 EXPORT_SYMBOL(ip_route_input_noref);
1751
1752 /* called with rcu_read_lock() */
1753 static struct rtable *__mkroute_output(const struct fib_result *res,
1754                                        const struct flowi4 *fl4, int orig_oif,
1755                                        struct net_device *dev_out,
1756                                        unsigned int flags)
1757 {
1758         struct fib_info *fi = res->fi;
1759         struct fib_nh_exception *fnhe;
1760         struct in_device *in_dev;
1761         u16 type = res->type;
1762         struct rtable *rth;
1763
1764         in_dev = __in_dev_get_rcu(dev_out);
1765         if (!in_dev)
1766                 return ERR_PTR(-EINVAL);
1767
1768         if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
1769                 if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
1770                         return ERR_PTR(-EINVAL);
1771
1772         if (ipv4_is_lbcast(fl4->daddr))
1773                 type = RTN_BROADCAST;
1774         else if (ipv4_is_multicast(fl4->daddr))
1775                 type = RTN_MULTICAST;
1776         else if (ipv4_is_zeronet(fl4->daddr))
1777                 return ERR_PTR(-EINVAL);
1778
1779         if (dev_out->flags & IFF_LOOPBACK)
1780                 flags |= RTCF_LOCAL;
1781
1782         if (type == RTN_BROADCAST) {
1783                 flags |= RTCF_BROADCAST | RTCF_LOCAL;
1784                 fi = NULL;
1785         } else if (type == RTN_MULTICAST) {
1786                 flags |= RTCF_MULTICAST | RTCF_LOCAL;
1787                 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
1788                                      fl4->flowi4_proto))
1789                         flags &= ~RTCF_LOCAL;
1790                 /* If multicast route do not exist use
1791                  * default one, but do not gateway in this case.
1792                  * Yes, it is hack.
1793                  */
1794                 if (fi && res->prefixlen < 4)
1795                         fi = NULL;
1796         }
1797
1798         fnhe = NULL;
1799         if (fi) {
1800                 struct rtable __rcu **prth;
1801
1802                 fnhe = find_exception(&FIB_RES_NH(*res), fl4->daddr);
1803                 if (fnhe)
1804                         prth = &fnhe->fnhe_rth;
1805                 else
1806                         prth = __this_cpu_ptr(FIB_RES_NH(*res).nh_pcpu_rth_output);
1807                 rth = rcu_dereference(*prth);
1808                 if (rt_cache_valid(rth)) {
1809                         dst_hold(&rth->dst);
1810                         return rth;
1811                 }
1812         }
1813         rth = rt_dst_alloc(dev_out,
1814                            IN_DEV_CONF_GET(in_dev, NOPOLICY),
1815                            IN_DEV_CONF_GET(in_dev, NOXFRM),
1816                            fi);
1817         if (!rth)
1818                 return ERR_PTR(-ENOBUFS);
1819
1820         rth->dst.output = ip_output;
1821
1822         rth->rt_genid = rt_genid(dev_net(dev_out));
1823         rth->rt_flags   = flags;
1824         rth->rt_type    = type;
1825         rth->rt_is_input = 0;
1826         rth->rt_iif     = orig_oif ? : 0;
1827         rth->rt_pmtu    = 0;
1828         rth->rt_gateway = 0;
1829         INIT_LIST_HEAD(&rth->rt_uncached);
1830
1831         RT_CACHE_STAT_INC(out_slow_tot);
1832
1833         if (flags & RTCF_LOCAL)
1834                 rth->dst.input = ip_local_deliver;
1835         if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
1836                 if (flags & RTCF_LOCAL &&
1837                     !(dev_out->flags & IFF_LOOPBACK)) {
1838                         rth->dst.output = ip_mc_output;
1839                         RT_CACHE_STAT_INC(out_slow_mc);
1840                 }
1841 #ifdef CONFIG_IP_MROUTE
1842                 if (type == RTN_MULTICAST) {
1843                         if (IN_DEV_MFORWARD(in_dev) &&
1844                             !ipv4_is_local_multicast(fl4->daddr)) {
1845                                 rth->dst.input = ip_mr_input;
1846                                 rth->dst.output = ip_mc_output;
1847                         }
1848                 }
1849 #endif
1850         }
1851
1852         rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
1853
1854         return rth;
1855 }
1856
1857 /*
1858  * Major route resolver routine.
1859  */
1860
1861 struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
1862 {
1863         struct net_device *dev_out = NULL;
1864         __u8 tos = RT_FL_TOS(fl4);
1865         unsigned int flags = 0;
1866         struct fib_result res;
1867         struct rtable *rth;
1868         int orig_oif;
1869
1870         res.tclassid    = 0;
1871         res.fi          = NULL;
1872         res.table       = NULL;
1873
1874         orig_oif = fl4->flowi4_oif;
1875
1876         fl4->flowi4_iif = LOOPBACK_IFINDEX;
1877         fl4->flowi4_tos = tos & IPTOS_RT_MASK;
1878         fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
1879                          RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
1880
1881         rcu_read_lock();
1882         if (fl4->saddr) {
1883                 rth = ERR_PTR(-EINVAL);
1884                 if (ipv4_is_multicast(fl4->saddr) ||
1885                     ipv4_is_lbcast(fl4->saddr) ||
1886                     ipv4_is_zeronet(fl4->saddr))
1887                         goto out;
1888
1889                 /* I removed check for oif == dev_out->oif here.
1890                    It was wrong for two reasons:
1891                    1. ip_dev_find(net, saddr) can return wrong iface, if saddr
1892                       is assigned to multiple interfaces.
1893                    2. Moreover, we are allowed to send packets with saddr
1894                       of another iface. --ANK
1895                  */
1896
1897                 if (fl4->flowi4_oif == 0 &&
1898                     (ipv4_is_multicast(fl4->daddr) ||
1899                      ipv4_is_lbcast(fl4->daddr))) {
1900                         /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
1901                         dev_out = __ip_dev_find(net, fl4->saddr, false);
1902                         if (dev_out == NULL)
1903                                 goto out;
1904
1905                         /* Special hack: user can direct multicasts
1906                            and limited broadcast via necessary interface
1907                            without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
1908                            This hack is not just for fun, it allows
1909                            vic,vat and friends to work.
1910                            They bind socket to loopback, set ttl to zero
1911                            and expect that it will work.
1912                            From the viewpoint of routing cache they are broken,
1913                            because we are not allowed to build multicast path
1914                            with loopback source addr (look, routing cache
1915                            cannot know, that ttl is zero, so that packet
1916                            will not leave this host and route is valid).
1917                            Luckily, this hack is good workaround.
1918                          */
1919
1920                         fl4->flowi4_oif = dev_out->ifindex;
1921                         goto make_route;
1922                 }
1923
1924                 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
1925                         /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
1926                         if (!__ip_dev_find(net, fl4->saddr, false))
1927                                 goto out;
1928                 }
1929         }
1930
1931
1932         if (fl4->flowi4_oif) {
1933                 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
1934                 rth = ERR_PTR(-ENODEV);
1935                 if (dev_out == NULL)
1936                         goto out;
1937
1938                 /* RACE: Check return value of inet_select_addr instead. */
1939                 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
1940                         rth = ERR_PTR(-ENETUNREACH);
1941                         goto out;
1942                 }
1943                 if (ipv4_is_local_multicast(fl4->daddr) ||
1944                     ipv4_is_lbcast(fl4->daddr)) {
1945                         if (!fl4->saddr)
1946                                 fl4->saddr = inet_select_addr(dev_out, 0,
1947                                                               RT_SCOPE_LINK);
1948                         goto make_route;
1949                 }
1950                 if (fl4->saddr) {
1951                         if (ipv4_is_multicast(fl4->daddr))
1952                                 fl4->saddr = inet_select_addr(dev_out, 0,
1953                                                               fl4->flowi4_scope);
1954                         else if (!fl4->daddr)
1955                                 fl4->saddr = inet_select_addr(dev_out, 0,
1956                                                               RT_SCOPE_HOST);
1957                 }
1958         }
1959
1960         if (!fl4->daddr) {
1961                 fl4->daddr = fl4->saddr;
1962                 if (!fl4->daddr)
1963                         fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
1964                 dev_out = net->loopback_dev;
1965                 fl4->flowi4_oif = LOOPBACK_IFINDEX;
1966                 res.type = RTN_LOCAL;
1967                 flags |= RTCF_LOCAL;
1968                 goto make_route;
1969         }
1970
1971         if (fib_lookup(net, fl4, &res)) {
1972                 res.fi = NULL;
1973                 res.table = NULL;
1974                 if (fl4->flowi4_oif) {
1975                         /* Apparently, routing tables are wrong. Assume,
1976                            that the destination is on link.
1977
1978                            WHY? DW.
1979                            Because we are allowed to send to iface
1980                            even if it has NO routes and NO assigned
1981                            addresses. When oif is specified, routing
1982                            tables are looked up with only one purpose:
1983                            to catch if destination is gatewayed, rather than
1984                            direct. Moreover, if MSG_DONTROUTE is set,
1985                            we send packet, ignoring both routing tables
1986                            and ifaddr state. --ANK
1987
1988
1989                            We could make it even if oif is unknown,
1990                            likely IPv6, but we do not.
1991                          */
1992
1993                         if (fl4->saddr == 0)
1994                                 fl4->saddr = inet_select_addr(dev_out, 0,
1995                                                               RT_SCOPE_LINK);
1996                         res.type = RTN_UNICAST;
1997                         goto make_route;
1998                 }
1999                 rth = ERR_PTR(-ENETUNREACH);
2000                 goto out;
2001         }
2002
2003         if (res.type == RTN_LOCAL) {
2004                 if (!fl4->saddr) {
2005                         if (res.fi->fib_prefsrc)
2006                                 fl4->saddr = res.fi->fib_prefsrc;
2007                         else
2008                                 fl4->saddr = fl4->daddr;
2009                 }
2010                 dev_out = net->loopback_dev;
2011                 fl4->flowi4_oif = dev_out->ifindex;
2012                 flags |= RTCF_LOCAL;
2013                 goto make_route;
2014         }
2015
2016 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2017         if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
2018                 fib_select_multipath(&res);
2019         else
2020 #endif
2021         if (!res.prefixlen &&
2022             res.table->tb_num_default > 1 &&
2023             res.type == RTN_UNICAST && !fl4->flowi4_oif)
2024                 fib_select_default(&res);
2025
2026         if (!fl4->saddr)
2027                 fl4->saddr = FIB_RES_PREFSRC(net, res);
2028
2029         dev_out = FIB_RES_DEV(res);
2030         fl4->flowi4_oif = dev_out->ifindex;
2031
2032
2033 make_route:
2034         rth = __mkroute_output(&res, fl4, orig_oif, dev_out, flags);
2035
2036 out:
2037         rcu_read_unlock();
2038         return rth;
2039 }
2040 EXPORT_SYMBOL_GPL(__ip_route_output_key);
2041
2042 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2043 {
2044         return NULL;
2045 }
2046
2047 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2048 {
2049         unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2050
2051         return mtu ? : dst->dev->mtu;
2052 }
2053
2054 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2055                                           struct sk_buff *skb, u32 mtu)
2056 {
2057 }
2058
2059 static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2060                                        struct sk_buff *skb)
2061 {
2062 }
2063
2064 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2065                                           unsigned long old)
2066 {
2067         return NULL;
2068 }
2069
2070 static struct dst_ops ipv4_dst_blackhole_ops = {
2071         .family                 =       AF_INET,
2072         .protocol               =       cpu_to_be16(ETH_P_IP),
2073         .check                  =       ipv4_blackhole_dst_check,
2074         .mtu                    =       ipv4_blackhole_mtu,
2075         .default_advmss         =       ipv4_default_advmss,
2076         .update_pmtu            =       ipv4_rt_blackhole_update_pmtu,
2077         .redirect               =       ipv4_rt_blackhole_redirect,
2078         .cow_metrics            =       ipv4_rt_blackhole_cow_metrics,
2079         .neigh_lookup           =       ipv4_neigh_lookup,
2080 };
2081
2082 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2083 {
2084         struct rtable *ort = (struct rtable *) dst_orig;
2085         struct rtable *rt;
2086
2087         rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2088         if (rt) {
2089                 struct dst_entry *new = &rt->dst;
2090
2091                 new->__use = 1;
2092                 new->input = dst_discard;
2093                 new->output = dst_discard;
2094
2095                 new->dev = ort->dst.dev;
2096                 if (new->dev)
2097                         dev_hold(new->dev);
2098
2099                 rt->rt_is_input = ort->rt_is_input;
2100                 rt->rt_iif = ort->rt_iif;
2101                 rt->rt_pmtu = ort->rt_pmtu;
2102
2103                 rt->rt_genid = rt_genid(net);
2104                 rt->rt_flags = ort->rt_flags;
2105                 rt->rt_type = ort->rt_type;
2106                 rt->rt_gateway = ort->rt_gateway;
2107
2108                 INIT_LIST_HEAD(&rt->rt_uncached);
2109
2110                 dst_free(new);
2111         }
2112
2113         dst_release(dst_orig);
2114
2115         return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2116 }
2117
2118 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2119                                     struct sock *sk)
2120 {
2121         struct rtable *rt = __ip_route_output_key(net, flp4);
2122
2123         if (IS_ERR(rt))
2124                 return rt;
2125
2126         if (flp4->flowi4_proto)
2127                 rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
2128                                                    flowi4_to_flowi(flp4),
2129                                                    sk, 0);
2130
2131         return rt;
2132 }
2133 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2134
2135 static int rt_fill_info(struct net *net,  __be32 dst, __be32 src,
2136                         struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
2137                         u32 seq, int event, int nowait, unsigned int flags)
2138 {
2139         struct rtable *rt = skb_rtable(skb);
2140         struct rtmsg *r;
2141         struct nlmsghdr *nlh;
2142         unsigned long expires = 0;
2143         u32 error;
2144         u32 metrics[RTAX_MAX];
2145
2146         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
2147         if (nlh == NULL)
2148                 return -EMSGSIZE;
2149
2150         r = nlmsg_data(nlh);
2151         r->rtm_family    = AF_INET;
2152         r->rtm_dst_len  = 32;
2153         r->rtm_src_len  = 0;
2154         r->rtm_tos      = fl4->flowi4_tos;
2155         r->rtm_table    = RT_TABLE_MAIN;
2156         if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
2157                 goto nla_put_failure;
2158         r->rtm_type     = rt->rt_type;
2159         r->rtm_scope    = RT_SCOPE_UNIVERSE;
2160         r->rtm_protocol = RTPROT_UNSPEC;
2161         r->rtm_flags    = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2162         if (rt->rt_flags & RTCF_NOTIFY)
2163                 r->rtm_flags |= RTM_F_NOTIFY;
2164
2165         if (nla_put_be32(skb, RTA_DST, dst))
2166                 goto nla_put_failure;
2167         if (src) {
2168                 r->rtm_src_len = 32;
2169                 if (nla_put_be32(skb, RTA_SRC, src))
2170                         goto nla_put_failure;
2171         }
2172         if (rt->dst.dev &&
2173             nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2174                 goto nla_put_failure;
2175 #ifdef CONFIG_IP_ROUTE_CLASSID
2176         if (rt->dst.tclassid &&
2177             nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2178                 goto nla_put_failure;
2179 #endif
2180         if (!rt_is_input_route(rt) &&
2181             fl4->saddr != src) {
2182                 if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr))
2183                         goto nla_put_failure;
2184         }
2185         if (rt->rt_gateway &&
2186             nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
2187                 goto nla_put_failure;
2188
2189         expires = rt->dst.expires;
2190         if (expires) {
2191                 unsigned long now = jiffies;
2192
2193                 if (time_before(now, expires))
2194                         expires -= now;
2195                 else
2196                         expires = 0;
2197         }
2198
2199         memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2200         if (rt->rt_pmtu && expires)
2201                 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2202         if (rtnetlink_put_metrics(skb, metrics) < 0)
2203                 goto nla_put_failure;
2204
2205         if (fl4->flowi4_mark &&
2206             nla_put_be32(skb, RTA_MARK, fl4->flowi4_mark))
2207                 goto nla_put_failure;
2208
2209         error = rt->dst.error;
2210
2211         if (rt_is_input_route(rt)) {
2212                 if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
2213                         goto nla_put_failure;
2214         }
2215
2216         if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2217                 goto nla_put_failure;
2218
2219         return nlmsg_end(skb, nlh);
2220
2221 nla_put_failure:
2222         nlmsg_cancel(skb, nlh);
2223         return -EMSGSIZE;
2224 }
2225
2226 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
2227 {
2228         struct net *net = sock_net(in_skb->sk);
2229         struct rtmsg *rtm;
2230         struct nlattr *tb[RTA_MAX+1];
2231         struct rtable *rt = NULL;
2232         struct flowi4 fl4;
2233         __be32 dst = 0;
2234         __be32 src = 0;
2235         u32 iif;
2236         int err;
2237         int mark;
2238         struct sk_buff *skb;
2239
2240         err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2241         if (err < 0)
2242                 goto errout;
2243
2244         rtm = nlmsg_data(nlh);
2245
2246         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2247         if (skb == NULL) {
2248                 err = -ENOBUFS;
2249                 goto errout;
2250         }
2251
2252         /* Reserve room for dummy headers, this skb can pass
2253            through good chunk of routing engine.
2254          */
2255         skb_reset_mac_header(skb);
2256         skb_reset_network_header(skb);
2257
2258         /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2259         ip_hdr(skb)->protocol = IPPROTO_ICMP;
2260         skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2261
2262         src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2263         dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2264         iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2265         mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2266
2267         memset(&fl4, 0, sizeof(fl4));
2268         fl4.daddr = dst;
2269         fl4.saddr = src;
2270         fl4.flowi4_tos = rtm->rtm_tos;
2271         fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
2272         fl4.flowi4_mark = mark;
2273
2274         if (iif) {
2275                 struct net_device *dev;
2276
2277                 dev = __dev_get_by_index(net, iif);
2278                 if (dev == NULL) {
2279                         err = -ENODEV;
2280                         goto errout_free;
2281                 }
2282
2283                 skb->protocol   = htons(ETH_P_IP);
2284                 skb->dev        = dev;
2285                 skb->mark       = mark;
2286                 local_bh_disable();
2287                 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2288                 local_bh_enable();
2289
2290                 rt = skb_rtable(skb);
2291                 if (err == 0 && rt->dst.error)
2292                         err = -rt->dst.error;
2293         } else {
2294                 rt = ip_route_output_key(net, &fl4);
2295
2296                 err = 0;
2297                 if (IS_ERR(rt))
2298                         err = PTR_ERR(rt);
2299         }
2300
2301         if (err)
2302                 goto errout_free;
2303
2304         skb_dst_set(skb, &rt->dst);
2305         if (rtm->rtm_flags & RTM_F_NOTIFY)
2306                 rt->rt_flags |= RTCF_NOTIFY;
2307
2308         err = rt_fill_info(net, dst, src, &fl4, skb,
2309                            NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2310                            RTM_NEWROUTE, 0, 0);
2311         if (err <= 0)
2312                 goto errout_free;
2313
2314         err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2315 errout:
2316         return err;
2317
2318 errout_free:
2319         kfree_skb(skb);
2320         goto errout;
2321 }
2322
2323 int ip_rt_dump(struct sk_buff *skb,  struct netlink_callback *cb)
2324 {
2325         return skb->len;
2326 }
2327
2328 void ip_rt_multicast_event(struct in_device *in_dev)
2329 {
2330         rt_cache_flush(dev_net(in_dev->dev));
2331 }
2332
2333 #ifdef CONFIG_SYSCTL
2334 static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
2335                                         void __user *buffer,
2336                                         size_t *lenp, loff_t *ppos)
2337 {
2338         if (write) {
2339                 rt_cache_flush((struct net *)__ctl->extra1);
2340                 return 0;
2341         }
2342
2343         return -EINVAL;
2344 }
2345
2346 static ctl_table ipv4_route_table[] = {
2347         {
2348                 .procname       = "gc_thresh",
2349                 .data           = &ipv4_dst_ops.gc_thresh,
2350                 .maxlen         = sizeof(int),
2351                 .mode           = 0644,
2352                 .proc_handler   = proc_dointvec,
2353         },
2354         {
2355                 .procname       = "max_size",
2356                 .data           = &ip_rt_max_size,
2357                 .maxlen         = sizeof(int),
2358                 .mode           = 0644,
2359                 .proc_handler   = proc_dointvec,
2360         },
2361         {
2362                 /*  Deprecated. Use gc_min_interval_ms */
2363
2364                 .procname       = "gc_min_interval",
2365                 .data           = &ip_rt_gc_min_interval,
2366                 .maxlen         = sizeof(int),
2367                 .mode           = 0644,
2368                 .proc_handler   = proc_dointvec_jiffies,
2369         },
2370         {
2371                 .procname       = "gc_min_interval_ms",
2372                 .data           = &ip_rt_gc_min_interval,
2373                 .maxlen         = sizeof(int),
2374                 .mode           = 0644,
2375                 .proc_handler   = proc_dointvec_ms_jiffies,
2376         },
2377         {
2378                 .procname       = "gc_timeout",
2379                 .data           = &ip_rt_gc_timeout,
2380                 .maxlen         = sizeof(int),
2381                 .mode           = 0644,
2382                 .proc_handler   = proc_dointvec_jiffies,
2383         },
2384         {
2385                 .procname       = "gc_interval",
2386                 .data           = &ip_rt_gc_interval,
2387                 .maxlen         = sizeof(int),
2388                 .mode           = 0644,
2389                 .proc_handler   = proc_dointvec_jiffies,
2390         },
2391         {
2392                 .procname       = "redirect_load",
2393                 .data           = &ip_rt_redirect_load,
2394                 .maxlen         = sizeof(int),
2395                 .mode           = 0644,
2396                 .proc_handler   = proc_dointvec,
2397         },
2398         {
2399                 .procname       = "redirect_number",
2400                 .data           = &ip_rt_redirect_number,
2401                 .maxlen         = sizeof(int),
2402                 .mode           = 0644,
2403                 .proc_handler   = proc_dointvec,
2404         },
2405         {
2406                 .procname       = "redirect_silence",
2407                 .data           = &ip_rt_redirect_silence,
2408                 .maxlen         = sizeof(int),
2409                 .mode           = 0644,
2410                 .proc_handler   = proc_dointvec,
2411         },
2412         {
2413                 .procname       = "error_cost",
2414                 .data           = &ip_rt_error_cost,
2415                 .maxlen         = sizeof(int),
2416                 .mode           = 0644,
2417                 .proc_handler   = proc_dointvec,
2418         },
2419         {
2420                 .procname       = "error_burst",
2421                 .data           = &ip_rt_error_burst,
2422                 .maxlen         = sizeof(int),
2423                 .mode           = 0644,
2424                 .proc_handler   = proc_dointvec,
2425         },
2426         {
2427                 .procname       = "gc_elasticity",
2428                 .data           = &ip_rt_gc_elasticity,
2429                 .maxlen         = sizeof(int),
2430                 .mode           = 0644,
2431                 .proc_handler   = proc_dointvec,
2432         },
2433         {
2434                 .procname       = "mtu_expires",
2435                 .data           = &ip_rt_mtu_expires,
2436                 .maxlen         = sizeof(int),
2437                 .mode           = 0644,
2438                 .proc_handler   = proc_dointvec_jiffies,
2439         },
2440         {
2441                 .procname       = "min_pmtu",
2442                 .data           = &ip_rt_min_pmtu,
2443                 .maxlen         = sizeof(int),
2444                 .mode           = 0644,
2445                 .proc_handler   = proc_dointvec,
2446         },
2447         {
2448                 .procname       = "min_adv_mss",
2449                 .data           = &ip_rt_min_advmss,
2450                 .maxlen         = sizeof(int),
2451                 .mode           = 0644,
2452                 .proc_handler   = proc_dointvec,
2453         },
2454         { }
2455 };
2456
2457 static struct ctl_table ipv4_route_flush_table[] = {
2458         {
2459                 .procname       = "flush",
2460                 .maxlen         = sizeof(int),
2461                 .mode           = 0200,
2462                 .proc_handler   = ipv4_sysctl_rtcache_flush,
2463         },
2464         { },
2465 };
2466
2467 static __net_init int sysctl_route_net_init(struct net *net)
2468 {
2469         struct ctl_table *tbl;
2470
2471         tbl = ipv4_route_flush_table;
2472         if (!net_eq(net, &init_net)) {
2473                 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
2474                 if (tbl == NULL)
2475                         goto err_dup;
2476         }
2477         tbl[0].extra1 = net;
2478
2479         net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
2480         if (net->ipv4.route_hdr == NULL)
2481                 goto err_reg;
2482         return 0;
2483
2484 err_reg:
2485         if (tbl != ipv4_route_flush_table)
2486                 kfree(tbl);
2487 err_dup:
2488         return -ENOMEM;
2489 }
2490
2491 static __net_exit void sysctl_route_net_exit(struct net *net)
2492 {
2493         struct ctl_table *tbl;
2494
2495         tbl = net->ipv4.route_hdr->ctl_table_arg;
2496         unregister_net_sysctl_table(net->ipv4.route_hdr);
2497         BUG_ON(tbl == ipv4_route_flush_table);
2498         kfree(tbl);
2499 }
2500
2501 static __net_initdata struct pernet_operations sysctl_route_ops = {
2502         .init = sysctl_route_net_init,
2503         .exit = sysctl_route_net_exit,
2504 };
2505 #endif
2506
2507 static __net_init int rt_genid_init(struct net *net)
2508 {
2509         atomic_set(&net->rt_genid, 0);
2510         get_random_bytes(&net->ipv4.dev_addr_genid,
2511                          sizeof(net->ipv4.dev_addr_genid));
2512         return 0;
2513 }
2514
2515 static __net_initdata struct pernet_operations rt_genid_ops = {
2516         .init = rt_genid_init,
2517 };
2518
2519 static int __net_init ipv4_inetpeer_init(struct net *net)
2520 {
2521         struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
2522
2523         if (!bp)
2524                 return -ENOMEM;
2525         inet_peer_base_init(bp);
2526         net->ipv4.peers = bp;
2527         return 0;
2528 }
2529
2530 static void __net_exit ipv4_inetpeer_exit(struct net *net)
2531 {
2532         struct inet_peer_base *bp = net->ipv4.peers;
2533
2534         net->ipv4.peers = NULL;
2535         inetpeer_invalidate_tree(bp);
2536         kfree(bp);
2537 }
2538
2539 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
2540         .init   =       ipv4_inetpeer_init,
2541         .exit   =       ipv4_inetpeer_exit,
2542 };
2543
2544 #ifdef CONFIG_IP_ROUTE_CLASSID
2545 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
2546 #endif /* CONFIG_IP_ROUTE_CLASSID */
2547
2548 int __init ip_rt_init(void)
2549 {
2550         int rc = 0;
2551
2552 #ifdef CONFIG_IP_ROUTE_CLASSID
2553         ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
2554         if (!ip_rt_acct)
2555                 panic("IP: failed to allocate ip_rt_acct\n");
2556 #endif
2557
2558         ipv4_dst_ops.kmem_cachep =
2559                 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
2560                                   SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2561
2562         ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
2563
2564         if (dst_entries_init(&ipv4_dst_ops) < 0)
2565                 panic("IP: failed to allocate ipv4_dst_ops counter\n");
2566
2567         if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
2568                 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
2569
2570         ipv4_dst_ops.gc_thresh = ~0;
2571         ip_rt_max_size = INT_MAX;
2572
2573         devinet_init();
2574         ip_fib_init();
2575
2576         if (ip_rt_proc_init())
2577                 pr_err("Unable to create route proc files\n");
2578 #ifdef CONFIG_XFRM
2579         xfrm_init();
2580         xfrm4_init(ip_rt_max_size);
2581 #endif
2582         rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
2583
2584 #ifdef CONFIG_SYSCTL
2585         register_pernet_subsys(&sysctl_route_ops);
2586 #endif
2587         register_pernet_subsys(&rt_genid_ops);
2588         register_pernet_subsys(&ipv4_inetpeer_ops);
2589         return rc;
2590 }
2591
2592 #ifdef CONFIG_SYSCTL
2593 /*
2594  * We really need to sanitize the damn ipv4 init order, then all
2595  * this nonsense will go away.
2596  */
2597 void __init ip_static_sysctl_init(void)
2598 {
2599         register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
2600 }
2601 #endif