[NETFILTER]: x_tables: pass registered match/target data to match/target functions
[linux-2.6.git] / net / ipv4 / ip_gre.c
1 /*
2  *      Linux NET3:     GRE over IP protocol decoder. 
3  *
4  *      Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
5  *
6  *      This program is free software; you can redistribute it and/or
7  *      modify it under the terms of the GNU General Public License
8  *      as published by the Free Software Foundation; either version
9  *      2 of the License, or (at your option) any later version.
10  *
11  */
12
13 #include <linux/capability.h>
14 #include <linux/config.h>
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <asm/uaccess.h>
20 #include <linux/skbuff.h>
21 #include <linux/netdevice.h>
22 #include <linux/in.h>
23 #include <linux/tcp.h>
24 #include <linux/udp.h>
25 #include <linux/if_arp.h>
26 #include <linux/mroute.h>
27 #include <linux/init.h>
28 #include <linux/in6.h>
29 #include <linux/inetdevice.h>
30 #include <linux/igmp.h>
31 #include <linux/netfilter_ipv4.h>
32 #include <linux/if_ether.h>
33
34 #include <net/sock.h>
35 #include <net/ip.h>
36 #include <net/icmp.h>
37 #include <net/protocol.h>
38 #include <net/ipip.h>
39 #include <net/arp.h>
40 #include <net/checksum.h>
41 #include <net/dsfield.h>
42 #include <net/inet_ecn.h>
43 #include <net/xfrm.h>
44
45 #ifdef CONFIG_IPV6
46 #include <net/ipv6.h>
47 #include <net/ip6_fib.h>
48 #include <net/ip6_route.h>
49 #endif
50
51 /*
52    Problems & solutions
53    --------------------
54
55    1. The most important issue is detecting local dead loops.
56    They would cause complete host lockup in transmit, which
57    would be "resolved" by stack overflow or, if queueing is enabled,
58    with infinite looping in net_bh.
59
60    We cannot track such dead loops during route installation,
61    it is infeasible task. The most general solutions would be
62    to keep skb->encapsulation counter (sort of local ttl),
63    and silently drop packet when it expires. It is the best
64    solution, but it supposes maintaing new variable in ALL
65    skb, even if no tunneling is used.
66
67    Current solution: t->recursion lock breaks dead loops. It looks 
68    like dev->tbusy flag, but I preferred new variable, because
69    the semantics is different. One day, when hard_start_xmit
70    will be multithreaded we will have to use skb->encapsulation.
71
72
73
74    2. Networking dead loops would not kill routers, but would really
75    kill network. IP hop limit plays role of "t->recursion" in this case,
76    if we copy it from packet being encapsulated to upper header.
77    It is very good solution, but it introduces two problems:
78
79    - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
80      do not work over tunnels.
81    - traceroute does not work. I planned to relay ICMP from tunnel,
82      so that this problem would be solved and traceroute output
83      would even more informative. This idea appeared to be wrong:
84      only Linux complies to rfc1812 now (yes, guys, Linux is the only
85      true router now :-)), all routers (at least, in neighbourhood of mine)
86      return only 8 bytes of payload. It is the end.
87
88    Hence, if we want that OSPF worked or traceroute said something reasonable,
89    we should search for another solution.
90
91    One of them is to parse packet trying to detect inner encapsulation
92    made by our node. It is difficult or even impossible, especially,
93    taking into account fragmentation. TO be short, tt is not solution at all.
94
95    Current solution: The solution was UNEXPECTEDLY SIMPLE.
96    We force DF flag on tunnels with preconfigured hop limit,
97    that is ALL. :-) Well, it does not remove the problem completely,
98    but exponential growth of network traffic is changed to linear
99    (branches, that exceed pmtu are pruned) and tunnel mtu
100    fastly degrades to value <68, where looping stops.
101    Yes, it is not good if there exists a router in the loop,
102    which does not force DF, even when encapsulating packets have DF set.
103    But it is not our problem! Nobody could accuse us, we made
104    all that we could make. Even if it is your gated who injected
105    fatal route to network, even if it were you who configured
106    fatal static route: you are innocent. :-)
107
108
109
110    3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
111    practically identical code. It would be good to glue them
112    together, but it is not very evident, how to make them modular.
113    sit is integral part of IPv6, ipip and gre are naturally modular.
114    We could extract common parts (hash table, ioctl etc)
115    to a separate module (ip_tunnel.c).
116
117    Alexey Kuznetsov.
118  */
119
120 static int ipgre_tunnel_init(struct net_device *dev);
121 static void ipgre_tunnel_setup(struct net_device *dev);
122
123 /* Fallback tunnel: no source, no destination, no key, no options */
124
125 static int ipgre_fb_tunnel_init(struct net_device *dev);
126
127 static struct net_device *ipgre_fb_tunnel_dev;
128
129 /* Tunnel hash table */
130
131 /*
132    4 hash tables:
133
134    3: (remote,local)
135    2: (remote,*)
136    1: (*,local)
137    0: (*,*)
138
139    We require exact key match i.e. if a key is present in packet
140    it will match only tunnel with the same key; if it is not present,
141    it will match only keyless tunnel.
142
143    All keysless packets, if not matched configured keyless tunnels
144    will match fallback tunnel.
145  */
146
147 #define HASH_SIZE  16
148 #define HASH(addr) ((addr^(addr>>4))&0xF)
149
150 static struct ip_tunnel *tunnels[4][HASH_SIZE];
151
152 #define tunnels_r_l     (tunnels[3])
153 #define tunnels_r       (tunnels[2])
154 #define tunnels_l       (tunnels[1])
155 #define tunnels_wc      (tunnels[0])
156
157 static DEFINE_RWLOCK(ipgre_lock);
158
159 /* Given src, dst and key, find appropriate for input tunnel. */
160
161 static struct ip_tunnel * ipgre_tunnel_lookup(u32 remote, u32 local, u32 key)
162 {
163         unsigned h0 = HASH(remote);
164         unsigned h1 = HASH(key);
165         struct ip_tunnel *t;
166
167         for (t = tunnels_r_l[h0^h1]; t; t = t->next) {
168                 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) {
169                         if (t->parms.i_key == key && (t->dev->flags&IFF_UP))
170                                 return t;
171                 }
172         }
173         for (t = tunnels_r[h0^h1]; t; t = t->next) {
174                 if (remote == t->parms.iph.daddr) {
175                         if (t->parms.i_key == key && (t->dev->flags&IFF_UP))
176                                 return t;
177                 }
178         }
179         for (t = tunnels_l[h1]; t; t = t->next) {
180                 if (local == t->parms.iph.saddr ||
181                      (local == t->parms.iph.daddr && MULTICAST(local))) {
182                         if (t->parms.i_key == key && (t->dev->flags&IFF_UP))
183                                 return t;
184                 }
185         }
186         for (t = tunnels_wc[h1]; t; t = t->next) {
187                 if (t->parms.i_key == key && (t->dev->flags&IFF_UP))
188                         return t;
189         }
190
191         if (ipgre_fb_tunnel_dev->flags&IFF_UP)
192                 return netdev_priv(ipgre_fb_tunnel_dev);
193         return NULL;
194 }
195
196 static struct ip_tunnel **ipgre_bucket(struct ip_tunnel *t)
197 {
198         u32 remote = t->parms.iph.daddr;
199         u32 local = t->parms.iph.saddr;
200         u32 key = t->parms.i_key;
201         unsigned h = HASH(key);
202         int prio = 0;
203
204         if (local)
205                 prio |= 1;
206         if (remote && !MULTICAST(remote)) {
207                 prio |= 2;
208                 h ^= HASH(remote);
209         }
210
211         return &tunnels[prio][h];
212 }
213
214 static void ipgre_tunnel_link(struct ip_tunnel *t)
215 {
216         struct ip_tunnel **tp = ipgre_bucket(t);
217
218         t->next = *tp;
219         write_lock_bh(&ipgre_lock);
220         *tp = t;
221         write_unlock_bh(&ipgre_lock);
222 }
223
224 static void ipgre_tunnel_unlink(struct ip_tunnel *t)
225 {
226         struct ip_tunnel **tp;
227
228         for (tp = ipgre_bucket(t); *tp; tp = &(*tp)->next) {
229                 if (t == *tp) {
230                         write_lock_bh(&ipgre_lock);
231                         *tp = t->next;
232                         write_unlock_bh(&ipgre_lock);
233                         break;
234                 }
235         }
236 }
237
238 static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int create)
239 {
240         u32 remote = parms->iph.daddr;
241         u32 local = parms->iph.saddr;
242         u32 key = parms->i_key;
243         struct ip_tunnel *t, **tp, *nt;
244         struct net_device *dev;
245         unsigned h = HASH(key);
246         int prio = 0;
247         char name[IFNAMSIZ];
248
249         if (local)
250                 prio |= 1;
251         if (remote && !MULTICAST(remote)) {
252                 prio |= 2;
253                 h ^= HASH(remote);
254         }
255         for (tp = &tunnels[prio][h]; (t = *tp) != NULL; tp = &t->next) {
256                 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) {
257                         if (key == t->parms.i_key)
258                                 return t;
259                 }
260         }
261         if (!create)
262                 return NULL;
263
264         if (parms->name[0])
265                 strlcpy(name, parms->name, IFNAMSIZ);
266         else {
267                 int i;
268                 for (i=1; i<100; i++) {
269                         sprintf(name, "gre%d", i);
270                         if (__dev_get_by_name(name) == NULL)
271                                 break;
272                 }
273                 if (i==100)
274                         goto failed;
275         }
276
277         dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
278         if (!dev)
279           return NULL;
280
281         dev->init = ipgre_tunnel_init;
282         nt = netdev_priv(dev);
283         nt->parms = *parms;
284
285         if (register_netdevice(dev) < 0) {
286                 free_netdev(dev);
287                 goto failed;
288         }
289
290         dev_hold(dev);
291         ipgre_tunnel_link(nt);
292         return nt;
293
294 failed:
295         return NULL;
296 }
297
298 static void ipgre_tunnel_uninit(struct net_device *dev)
299 {
300         ipgre_tunnel_unlink(netdev_priv(dev));
301         dev_put(dev);
302 }
303
304
305 static void ipgre_err(struct sk_buff *skb, u32 info)
306 {
307 #ifndef I_WISH_WORLD_WERE_PERFECT
308
309 /* It is not :-( All the routers (except for Linux) return only
310    8 bytes of packet payload. It means, that precise relaying of
311    ICMP in the real Internet is absolutely infeasible.
312
313    Moreover, Cisco "wise men" put GRE key to the third word
314    in GRE header. It makes impossible maintaining even soft state for keyed
315    GRE tunnels with enabled checksum. Tell them "thank you".
316
317    Well, I wonder, rfc1812 was written by Cisco employee,
318    what the hell these idiots break standrads established
319    by themself???
320  */
321
322         struct iphdr *iph = (struct iphdr*)skb->data;
323         u16          *p = (u16*)(skb->data+(iph->ihl<<2));
324         int grehlen = (iph->ihl<<2) + 4;
325         int type = skb->h.icmph->type;
326         int code = skb->h.icmph->code;
327         struct ip_tunnel *t;
328         u16 flags;
329
330         flags = p[0];
331         if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
332                 if (flags&(GRE_VERSION|GRE_ROUTING))
333                         return;
334                 if (flags&GRE_KEY) {
335                         grehlen += 4;
336                         if (flags&GRE_CSUM)
337                                 grehlen += 4;
338                 }
339         }
340
341         /* If only 8 bytes returned, keyed message will be dropped here */
342         if (skb_headlen(skb) < grehlen)
343                 return;
344
345         switch (type) {
346         default:
347         case ICMP_PARAMETERPROB:
348                 return;
349
350         case ICMP_DEST_UNREACH:
351                 switch (code) {
352                 case ICMP_SR_FAILED:
353                 case ICMP_PORT_UNREACH:
354                         /* Impossible event. */
355                         return;
356                 case ICMP_FRAG_NEEDED:
357                         /* Soft state for pmtu is maintained by IP core. */
358                         return;
359                 default:
360                         /* All others are translated to HOST_UNREACH.
361                            rfc2003 contains "deep thoughts" about NET_UNREACH,
362                            I believe they are just ether pollution. --ANK
363                          */
364                         break;
365                 }
366                 break;
367         case ICMP_TIME_EXCEEDED:
368                 if (code != ICMP_EXC_TTL)
369                         return;
370                 break;
371         }
372
373         read_lock(&ipgre_lock);
374         t = ipgre_tunnel_lookup(iph->daddr, iph->saddr, (flags&GRE_KEY) ? *(((u32*)p) + (grehlen>>2) - 1) : 0);
375         if (t == NULL || t->parms.iph.daddr == 0 || MULTICAST(t->parms.iph.daddr))
376                 goto out;
377
378         if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
379                 goto out;
380
381         if (jiffies - t->err_time < IPTUNNEL_ERR_TIMEO)
382                 t->err_count++;
383         else
384                 t->err_count = 1;
385         t->err_time = jiffies;
386 out:
387         read_unlock(&ipgre_lock);
388         return;
389 #else
390         struct iphdr *iph = (struct iphdr*)dp;
391         struct iphdr *eiph;
392         u16          *p = (u16*)(dp+(iph->ihl<<2));
393         int type = skb->h.icmph->type;
394         int code = skb->h.icmph->code;
395         int rel_type = 0;
396         int rel_code = 0;
397         int rel_info = 0;
398         u16 flags;
399         int grehlen = (iph->ihl<<2) + 4;
400         struct sk_buff *skb2;
401         struct flowi fl;
402         struct rtable *rt;
403
404         if (p[1] != htons(ETH_P_IP))
405                 return;
406
407         flags = p[0];
408         if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
409                 if (flags&(GRE_VERSION|GRE_ROUTING))
410                         return;
411                 if (flags&GRE_CSUM)
412                         grehlen += 4;
413                 if (flags&GRE_KEY)
414                         grehlen += 4;
415                 if (flags&GRE_SEQ)
416                         grehlen += 4;
417         }
418         if (len < grehlen + sizeof(struct iphdr))
419                 return;
420         eiph = (struct iphdr*)(dp + grehlen);
421
422         switch (type) {
423         default:
424                 return;
425         case ICMP_PARAMETERPROB:
426                 if (skb->h.icmph->un.gateway < (iph->ihl<<2))
427                         return;
428
429                 /* So... This guy found something strange INSIDE encapsulated
430                    packet. Well, he is fool, but what can we do ?
431                  */
432                 rel_type = ICMP_PARAMETERPROB;
433                 rel_info = skb->h.icmph->un.gateway - grehlen;
434                 break;
435
436         case ICMP_DEST_UNREACH:
437                 switch (code) {
438                 case ICMP_SR_FAILED:
439                 case ICMP_PORT_UNREACH:
440                         /* Impossible event. */
441                         return;
442                 case ICMP_FRAG_NEEDED:
443                         /* And it is the only really necessary thing :-) */
444                         rel_info = ntohs(skb->h.icmph->un.frag.mtu);
445                         if (rel_info < grehlen+68)
446                                 return;
447                         rel_info -= grehlen;
448                         /* BSD 4.2 MORE DOES NOT EXIST IN NATURE. */
449                         if (rel_info > ntohs(eiph->tot_len))
450                                 return;
451                         break;
452                 default:
453                         /* All others are translated to HOST_UNREACH.
454                            rfc2003 contains "deep thoughts" about NET_UNREACH,
455                            I believe, it is just ether pollution. --ANK
456                          */
457                         rel_type = ICMP_DEST_UNREACH;
458                         rel_code = ICMP_HOST_UNREACH;
459                         break;
460                 }
461                 break;
462         case ICMP_TIME_EXCEEDED:
463                 if (code != ICMP_EXC_TTL)
464                         return;
465                 break;
466         }
467
468         /* Prepare fake skb to feed it to icmp_send */
469         skb2 = skb_clone(skb, GFP_ATOMIC);
470         if (skb2 == NULL)
471                 return;
472         dst_release(skb2->dst);
473         skb2->dst = NULL;
474         skb_pull(skb2, skb->data - (u8*)eiph);
475         skb2->nh.raw = skb2->data;
476
477         /* Try to guess incoming interface */
478         memset(&fl, 0, sizeof(fl));
479         fl.fl4_dst = eiph->saddr;
480         fl.fl4_tos = RT_TOS(eiph->tos);
481         fl.proto = IPPROTO_GRE;
482         if (ip_route_output_key(&rt, &fl)) {
483                 kfree_skb(skb2);
484                 return;
485         }
486         skb2->dev = rt->u.dst.dev;
487
488         /* route "incoming" packet */
489         if (rt->rt_flags&RTCF_LOCAL) {
490                 ip_rt_put(rt);
491                 rt = NULL;
492                 fl.fl4_dst = eiph->daddr;
493                 fl.fl4_src = eiph->saddr;
494                 fl.fl4_tos = eiph->tos;
495                 if (ip_route_output_key(&rt, &fl) ||
496                     rt->u.dst.dev->type != ARPHRD_IPGRE) {
497                         ip_rt_put(rt);
498                         kfree_skb(skb2);
499                         return;
500                 }
501         } else {
502                 ip_rt_put(rt);
503                 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, skb2->dev) ||
504                     skb2->dst->dev->type != ARPHRD_IPGRE) {
505                         kfree_skb(skb2);
506                         return;
507                 }
508         }
509
510         /* change mtu on this route */
511         if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
512                 if (rel_info > dst_mtu(skb2->dst)) {
513                         kfree_skb(skb2);
514                         return;
515                 }
516                 skb2->dst->ops->update_pmtu(skb2->dst, rel_info);
517                 rel_info = htonl(rel_info);
518         } else if (type == ICMP_TIME_EXCEEDED) {
519                 struct ip_tunnel *t = netdev_priv(skb2->dev);
520                 if (t->parms.iph.ttl) {
521                         rel_type = ICMP_DEST_UNREACH;
522                         rel_code = ICMP_HOST_UNREACH;
523                 }
524         }
525
526         icmp_send(skb2, rel_type, rel_code, rel_info);
527         kfree_skb(skb2);
528 #endif
529 }
530
531 static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
532 {
533         if (INET_ECN_is_ce(iph->tos)) {
534                 if (skb->protocol == htons(ETH_P_IP)) {
535                         IP_ECN_set_ce(skb->nh.iph);
536                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
537                         IP6_ECN_set_ce(skb->nh.ipv6h);
538                 }
539         }
540 }
541
542 static inline u8
543 ipgre_ecn_encapsulate(u8 tos, struct iphdr *old_iph, struct sk_buff *skb)
544 {
545         u8 inner = 0;
546         if (skb->protocol == htons(ETH_P_IP))
547                 inner = old_iph->tos;
548         else if (skb->protocol == htons(ETH_P_IPV6))
549                 inner = ipv6_get_dsfield((struct ipv6hdr *)old_iph);
550         return INET_ECN_encapsulate(tos, inner);
551 }
552
553 static int ipgre_rcv(struct sk_buff *skb)
554 {
555         struct iphdr *iph;
556         u8     *h;
557         u16    flags;
558         u16    csum = 0;
559         u32    key = 0;
560         u32    seqno = 0;
561         struct ip_tunnel *tunnel;
562         int    offset = 4;
563
564         if (!pskb_may_pull(skb, 16))
565                 goto drop_nolock;
566
567         iph = skb->nh.iph;
568         h = skb->data;
569         flags = *(u16*)h;
570
571         if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
572                 /* - Version must be 0.
573                    - We do not support routing headers.
574                  */
575                 if (flags&(GRE_VERSION|GRE_ROUTING))
576                         goto drop_nolock;
577
578                 if (flags&GRE_CSUM) {
579                         switch (skb->ip_summed) {
580                         case CHECKSUM_HW:
581                                 csum = (u16)csum_fold(skb->csum);
582                                 if (!csum)
583                                         break;
584                                 /* fall through */
585                         case CHECKSUM_NONE:
586                                 skb->csum = 0;
587                                 csum = __skb_checksum_complete(skb);
588                                 skb->ip_summed = CHECKSUM_HW;
589                         }
590                         offset += 4;
591                 }
592                 if (flags&GRE_KEY) {
593                         key = *(u32*)(h + offset);
594                         offset += 4;
595                 }
596                 if (flags&GRE_SEQ) {
597                         seqno = ntohl(*(u32*)(h + offset));
598                         offset += 4;
599                 }
600         }
601
602         read_lock(&ipgre_lock);
603         if ((tunnel = ipgre_tunnel_lookup(iph->saddr, iph->daddr, key)) != NULL) {
604                 secpath_reset(skb);
605
606                 skb->protocol = *(u16*)(h + 2);
607                 /* WCCP version 1 and 2 protocol decoding.
608                  * - Change protocol to IP
609                  * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
610                  */
611                 if (flags == 0 &&
612                     skb->protocol == __constant_htons(ETH_P_WCCP)) {
613                         skb->protocol = __constant_htons(ETH_P_IP);
614                         if ((*(h + offset) & 0xF0) != 0x40) 
615                                 offset += 4;
616                 }
617
618                 skb->mac.raw = skb->nh.raw;
619                 skb->nh.raw = __pskb_pull(skb, offset);
620                 skb_postpull_rcsum(skb, skb->h.raw, offset);
621                 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
622                 skb->pkt_type = PACKET_HOST;
623 #ifdef CONFIG_NET_IPGRE_BROADCAST
624                 if (MULTICAST(iph->daddr)) {
625                         /* Looped back packet, drop it! */
626                         if (((struct rtable*)skb->dst)->fl.iif == 0)
627                                 goto drop;
628                         tunnel->stat.multicast++;
629                         skb->pkt_type = PACKET_BROADCAST;
630                 }
631 #endif
632
633                 if (((flags&GRE_CSUM) && csum) ||
634                     (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
635                         tunnel->stat.rx_crc_errors++;
636                         tunnel->stat.rx_errors++;
637                         goto drop;
638                 }
639                 if (tunnel->parms.i_flags&GRE_SEQ) {
640                         if (!(flags&GRE_SEQ) ||
641                             (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
642                                 tunnel->stat.rx_fifo_errors++;
643                                 tunnel->stat.rx_errors++;
644                                 goto drop;
645                         }
646                         tunnel->i_seqno = seqno + 1;
647                 }
648                 tunnel->stat.rx_packets++;
649                 tunnel->stat.rx_bytes += skb->len;
650                 skb->dev = tunnel->dev;
651                 dst_release(skb->dst);
652                 skb->dst = NULL;
653                 nf_reset(skb);
654                 ipgre_ecn_decapsulate(iph, skb);
655                 netif_rx(skb);
656                 read_unlock(&ipgre_lock);
657                 return(0);
658         }
659         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0);
660
661 drop:
662         read_unlock(&ipgre_lock);
663 drop_nolock:
664         kfree_skb(skb);
665         return(0);
666 }
667
668 static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
669 {
670         struct ip_tunnel *tunnel = netdev_priv(dev);
671         struct net_device_stats *stats = &tunnel->stat;
672         struct iphdr  *old_iph = skb->nh.iph;
673         struct iphdr  *tiph;
674         u8     tos;
675         u16    df;
676         struct rtable *rt;                      /* Route to the other host */
677         struct net_device *tdev;                        /* Device to other host */
678         struct iphdr  *iph;                     /* Our new IP header */
679         int    max_headroom;                    /* The extra header space needed */
680         int    gre_hlen;
681         u32    dst;
682         int    mtu;
683
684         if (tunnel->recursion++) {
685                 tunnel->stat.collisions++;
686                 goto tx_error;
687         }
688
689         if (dev->hard_header) {
690                 gre_hlen = 0;
691                 tiph = (struct iphdr*)skb->data;
692         } else {
693                 gre_hlen = tunnel->hlen;
694                 tiph = &tunnel->parms.iph;
695         }
696
697         if ((dst = tiph->daddr) == 0) {
698                 /* NBMA tunnel */
699
700                 if (skb->dst == NULL) {
701                         tunnel->stat.tx_fifo_errors++;
702                         goto tx_error;
703                 }
704
705                 if (skb->protocol == htons(ETH_P_IP)) {
706                         rt = (struct rtable*)skb->dst;
707                         if ((dst = rt->rt_gateway) == 0)
708                                 goto tx_error_icmp;
709                 }
710 #ifdef CONFIG_IPV6
711                 else if (skb->protocol == htons(ETH_P_IPV6)) {
712                         struct in6_addr *addr6;
713                         int addr_type;
714                         struct neighbour *neigh = skb->dst->neighbour;
715
716                         if (neigh == NULL)
717                                 goto tx_error;
718
719                         addr6 = (struct in6_addr*)&neigh->primary_key;
720                         addr_type = ipv6_addr_type(addr6);
721
722                         if (addr_type == IPV6_ADDR_ANY) {
723                                 addr6 = &skb->nh.ipv6h->daddr;
724                                 addr_type = ipv6_addr_type(addr6);
725                         }
726
727                         if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
728                                 goto tx_error_icmp;
729
730                         dst = addr6->s6_addr32[3];
731                 }
732 #endif
733                 else
734                         goto tx_error;
735         }
736
737         tos = tiph->tos;
738         if (tos&1) {
739                 if (skb->protocol == htons(ETH_P_IP))
740                         tos = old_iph->tos;
741                 tos &= ~1;
742         }
743
744         {
745                 struct flowi fl = { .oif = tunnel->parms.link,
746                                     .nl_u = { .ip4_u =
747                                               { .daddr = dst,
748                                                 .saddr = tiph->saddr,
749                                                 .tos = RT_TOS(tos) } },
750                                     .proto = IPPROTO_GRE };
751                 if (ip_route_output_key(&rt, &fl)) {
752                         tunnel->stat.tx_carrier_errors++;
753                         goto tx_error;
754                 }
755         }
756         tdev = rt->u.dst.dev;
757
758         if (tdev == dev) {
759                 ip_rt_put(rt);
760                 tunnel->stat.collisions++;
761                 goto tx_error;
762         }
763
764         df = tiph->frag_off;
765         if (df)
766                 mtu = dst_mtu(&rt->u.dst) - tunnel->hlen;
767         else
768                 mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;
769
770         if (skb->dst)
771                 skb->dst->ops->update_pmtu(skb->dst, mtu);
772
773         if (skb->protocol == htons(ETH_P_IP)) {
774                 df |= (old_iph->frag_off&htons(IP_DF));
775
776                 if ((old_iph->frag_off&htons(IP_DF)) &&
777                     mtu < ntohs(old_iph->tot_len)) {
778                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
779                         ip_rt_put(rt);
780                         goto tx_error;
781                 }
782         }
783 #ifdef CONFIG_IPV6
784         else if (skb->protocol == htons(ETH_P_IPV6)) {
785                 struct rt6_info *rt6 = (struct rt6_info*)skb->dst;
786
787                 if (rt6 && mtu < dst_mtu(skb->dst) && mtu >= IPV6_MIN_MTU) {
788                         if ((tunnel->parms.iph.daddr && !MULTICAST(tunnel->parms.iph.daddr)) ||
789                             rt6->rt6i_dst.plen == 128) {
790                                 rt6->rt6i_flags |= RTF_MODIFIED;
791                                 skb->dst->metrics[RTAX_MTU-1] = mtu;
792                         }
793                 }
794
795                 if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
796                         icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
797                         ip_rt_put(rt);
798                         goto tx_error;
799                 }
800         }
801 #endif
802
803         if (tunnel->err_count > 0) {
804                 if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) {
805                         tunnel->err_count--;
806
807                         dst_link_failure(skb);
808                 } else
809                         tunnel->err_count = 0;
810         }
811
812         max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen;
813
814         if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
815                 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
816                 if (!new_skb) {
817                         ip_rt_put(rt);
818                         stats->tx_dropped++;
819                         dev_kfree_skb(skb);
820                         tunnel->recursion--;
821                         return 0;
822                 }
823                 if (skb->sk)
824                         skb_set_owner_w(new_skb, skb->sk);
825                 dev_kfree_skb(skb);
826                 skb = new_skb;
827                 old_iph = skb->nh.iph;
828         }
829
830         skb->h.raw = skb->nh.raw;
831         skb->nh.raw = skb_push(skb, gre_hlen);
832         memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
833         IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
834                               IPSKB_REROUTED);
835         dst_release(skb->dst);
836         skb->dst = &rt->u.dst;
837
838         /*
839          *      Push down and install the IPIP header.
840          */
841
842         iph                     =       skb->nh.iph;
843         iph->version            =       4;
844         iph->ihl                =       sizeof(struct iphdr) >> 2;
845         iph->frag_off           =       df;
846         iph->protocol           =       IPPROTO_GRE;
847         iph->tos                =       ipgre_ecn_encapsulate(tos, old_iph, skb);
848         iph->daddr              =       rt->rt_dst;
849         iph->saddr              =       rt->rt_src;
850
851         if ((iph->ttl = tiph->ttl) == 0) {
852                 if (skb->protocol == htons(ETH_P_IP))
853                         iph->ttl = old_iph->ttl;
854 #ifdef CONFIG_IPV6
855                 else if (skb->protocol == htons(ETH_P_IPV6))
856                         iph->ttl = ((struct ipv6hdr*)old_iph)->hop_limit;
857 #endif
858                 else
859                         iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
860         }
861
862         ((u16*)(iph+1))[0] = tunnel->parms.o_flags;
863         ((u16*)(iph+1))[1] = skb->protocol;
864
865         if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
866                 u32 *ptr = (u32*)(((u8*)iph) + tunnel->hlen - 4);
867
868                 if (tunnel->parms.o_flags&GRE_SEQ) {
869                         ++tunnel->o_seqno;
870                         *ptr = htonl(tunnel->o_seqno);
871                         ptr--;
872                 }
873                 if (tunnel->parms.o_flags&GRE_KEY) {
874                         *ptr = tunnel->parms.o_key;
875                         ptr--;
876                 }
877                 if (tunnel->parms.o_flags&GRE_CSUM) {
878                         *ptr = 0;
879                         *(__u16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
880                 }
881         }
882
883         nf_reset(skb);
884
885         IPTUNNEL_XMIT();
886         tunnel->recursion--;
887         return 0;
888
889 tx_error_icmp:
890         dst_link_failure(skb);
891
892 tx_error:
893         stats->tx_errors++;
894         dev_kfree_skb(skb);
895         tunnel->recursion--;
896         return 0;
897 }
898
899 static int
900 ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
901 {
902         int err = 0;
903         struct ip_tunnel_parm p;
904         struct ip_tunnel *t;
905
906         switch (cmd) {
907         case SIOCGETTUNNEL:
908                 t = NULL;
909                 if (dev == ipgre_fb_tunnel_dev) {
910                         if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
911                                 err = -EFAULT;
912                                 break;
913                         }
914                         t = ipgre_tunnel_locate(&p, 0);
915                 }
916                 if (t == NULL)
917                         t = netdev_priv(dev);
918                 memcpy(&p, &t->parms, sizeof(p));
919                 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
920                         err = -EFAULT;
921                 break;
922
923         case SIOCADDTUNNEL:
924         case SIOCCHGTUNNEL:
925                 err = -EPERM;
926                 if (!capable(CAP_NET_ADMIN))
927                         goto done;
928
929                 err = -EFAULT;
930                 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
931                         goto done;
932
933                 err = -EINVAL;
934                 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
935                     p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
936                     ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
937                         goto done;
938                 if (p.iph.ttl)
939                         p.iph.frag_off |= htons(IP_DF);
940
941                 if (!(p.i_flags&GRE_KEY))
942                         p.i_key = 0;
943                 if (!(p.o_flags&GRE_KEY))
944                         p.o_key = 0;
945
946                 t = ipgre_tunnel_locate(&p, cmd == SIOCADDTUNNEL);
947
948                 if (dev != ipgre_fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
949                         if (t != NULL) {
950                                 if (t->dev != dev) {
951                                         err = -EEXIST;
952                                         break;
953                                 }
954                         } else {
955                                 unsigned nflags=0;
956
957                                 t = netdev_priv(dev);
958
959                                 if (MULTICAST(p.iph.daddr))
960                                         nflags = IFF_BROADCAST;
961                                 else if (p.iph.daddr)
962                                         nflags = IFF_POINTOPOINT;
963
964                                 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
965                                         err = -EINVAL;
966                                         break;
967                                 }
968                                 ipgre_tunnel_unlink(t);
969                                 t->parms.iph.saddr = p.iph.saddr;
970                                 t->parms.iph.daddr = p.iph.daddr;
971                                 t->parms.i_key = p.i_key;
972                                 t->parms.o_key = p.o_key;
973                                 memcpy(dev->dev_addr, &p.iph.saddr, 4);
974                                 memcpy(dev->broadcast, &p.iph.daddr, 4);
975                                 ipgre_tunnel_link(t);
976                                 netdev_state_change(dev);
977                         }
978                 }
979
980                 if (t) {
981                         err = 0;
982                         if (cmd == SIOCCHGTUNNEL) {
983                                 t->parms.iph.ttl = p.iph.ttl;
984                                 t->parms.iph.tos = p.iph.tos;
985                                 t->parms.iph.frag_off = p.iph.frag_off;
986                         }
987                         if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
988                                 err = -EFAULT;
989                 } else
990                         err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
991                 break;
992
993         case SIOCDELTUNNEL:
994                 err = -EPERM;
995                 if (!capable(CAP_NET_ADMIN))
996                         goto done;
997
998                 if (dev == ipgre_fb_tunnel_dev) {
999                         err = -EFAULT;
1000                         if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1001                                 goto done;
1002                         err = -ENOENT;
1003                         if ((t = ipgre_tunnel_locate(&p, 0)) == NULL)
1004                                 goto done;
1005                         err = -EPERM;
1006                         if (t == netdev_priv(ipgre_fb_tunnel_dev))
1007                                 goto done;
1008                         dev = t->dev;
1009                 }
1010                 err = unregister_netdevice(dev);
1011                 break;
1012
1013         default:
1014                 err = -EINVAL;
1015         }
1016
1017 done:
1018         return err;
1019 }
1020
1021 static struct net_device_stats *ipgre_tunnel_get_stats(struct net_device *dev)
1022 {
1023         return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
1024 }
1025
1026 static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1027 {
1028         struct ip_tunnel *tunnel = netdev_priv(dev);
1029         if (new_mtu < 68 || new_mtu > 0xFFF8 - tunnel->hlen)
1030                 return -EINVAL;
1031         dev->mtu = new_mtu;
1032         return 0;
1033 }
1034
1035 #ifdef CONFIG_NET_IPGRE_BROADCAST
1036 /* Nice toy. Unfortunately, useless in real life :-)
1037    It allows to construct virtual multiprotocol broadcast "LAN"
1038    over the Internet, provided multicast routing is tuned.
1039
1040
1041    I have no idea was this bicycle invented before me,
1042    so that I had to set ARPHRD_IPGRE to a random value.
1043    I have an impression, that Cisco could make something similar,
1044    but this feature is apparently missing in IOS<=11.2(8).
1045    
1046    I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1047    with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1048
1049    ping -t 255 224.66.66.66
1050
1051    If nobody answers, mbone does not work.
1052
1053    ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
1054    ip addr add 10.66.66.<somewhat>/24 dev Universe
1055    ifconfig Universe up
1056    ifconfig Universe add fe80::<Your_real_addr>/10
1057    ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
1058    ftp 10.66.66.66
1059    ...
1060    ftp fec0:6666:6666::193.233.7.65
1061    ...
1062
1063  */
1064
1065 static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
1066                         void *daddr, void *saddr, unsigned len)
1067 {
1068         struct ip_tunnel *t = netdev_priv(dev);
1069         struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
1070         u16 *p = (u16*)(iph+1);
1071
1072         memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
1073         p[0]            = t->parms.o_flags;
1074         p[1]            = htons(type);
1075
1076         /*
1077          *      Set the source hardware address. 
1078          */
1079          
1080         if (saddr)
1081                 memcpy(&iph->saddr, saddr, 4);
1082
1083         if (daddr) {
1084                 memcpy(&iph->daddr, daddr, 4);
1085                 return t->hlen;
1086         }
1087         if (iph->daddr && !MULTICAST(iph->daddr))
1088                 return t->hlen;
1089         
1090         return -t->hlen;
1091 }
1092
1093 static int ipgre_open(struct net_device *dev)
1094 {
1095         struct ip_tunnel *t = netdev_priv(dev);
1096
1097         if (MULTICAST(t->parms.iph.daddr)) {
1098                 struct flowi fl = { .oif = t->parms.link,
1099                                     .nl_u = { .ip4_u =
1100                                               { .daddr = t->parms.iph.daddr,
1101                                                 .saddr = t->parms.iph.saddr,
1102                                                 .tos = RT_TOS(t->parms.iph.tos) } },
1103                                     .proto = IPPROTO_GRE };
1104                 struct rtable *rt;
1105                 if (ip_route_output_key(&rt, &fl))
1106                         return -EADDRNOTAVAIL;
1107                 dev = rt->u.dst.dev;
1108                 ip_rt_put(rt);
1109                 if (__in_dev_get_rtnl(dev) == NULL)
1110                         return -EADDRNOTAVAIL;
1111                 t->mlink = dev->ifindex;
1112                 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
1113         }
1114         return 0;
1115 }
1116
1117 static int ipgre_close(struct net_device *dev)
1118 {
1119         struct ip_tunnel *t = netdev_priv(dev);
1120         if (MULTICAST(t->parms.iph.daddr) && t->mlink) {
1121                 struct in_device *in_dev = inetdev_by_index(t->mlink);
1122                 if (in_dev) {
1123                         ip_mc_dec_group(in_dev, t->parms.iph.daddr);
1124                         in_dev_put(in_dev);
1125                 }
1126         }
1127         return 0;
1128 }
1129
1130 #endif
1131
1132 static void ipgre_tunnel_setup(struct net_device *dev)
1133 {
1134         SET_MODULE_OWNER(dev);
1135         dev->uninit             = ipgre_tunnel_uninit;
1136         dev->destructor         = free_netdev;
1137         dev->hard_start_xmit    = ipgre_tunnel_xmit;
1138         dev->get_stats          = ipgre_tunnel_get_stats;
1139         dev->do_ioctl           = ipgre_tunnel_ioctl;
1140         dev->change_mtu         = ipgre_tunnel_change_mtu;
1141
1142         dev->type               = ARPHRD_IPGRE;
1143         dev->hard_header_len    = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1144         dev->mtu                = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
1145         dev->flags              = IFF_NOARP;
1146         dev->iflink             = 0;
1147         dev->addr_len           = 4;
1148 }
1149
1150 static int ipgre_tunnel_init(struct net_device *dev)
1151 {
1152         struct net_device *tdev = NULL;
1153         struct ip_tunnel *tunnel;
1154         struct iphdr *iph;
1155         int hlen = LL_MAX_HEADER;
1156         int mtu = ETH_DATA_LEN;
1157         int addend = sizeof(struct iphdr) + 4;
1158
1159         tunnel = netdev_priv(dev);
1160         iph = &tunnel->parms.iph;
1161
1162         tunnel->dev = dev;
1163         strcpy(tunnel->parms.name, dev->name);
1164
1165         memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1166         memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1167
1168         /* Guess output device to choose reasonable mtu and hard_header_len */
1169
1170         if (iph->daddr) {
1171                 struct flowi fl = { .oif = tunnel->parms.link,
1172                                     .nl_u = { .ip4_u =
1173                                               { .daddr = iph->daddr,
1174                                                 .saddr = iph->saddr,
1175                                                 .tos = RT_TOS(iph->tos) } },
1176                                     .proto = IPPROTO_GRE };
1177                 struct rtable *rt;
1178                 if (!ip_route_output_key(&rt, &fl)) {
1179                         tdev = rt->u.dst.dev;
1180                         ip_rt_put(rt);
1181                 }
1182
1183                 dev->flags |= IFF_POINTOPOINT;
1184
1185 #ifdef CONFIG_NET_IPGRE_BROADCAST
1186                 if (MULTICAST(iph->daddr)) {
1187                         if (!iph->saddr)
1188                                 return -EINVAL;
1189                         dev->flags = IFF_BROADCAST;
1190                         dev->hard_header = ipgre_header;
1191                         dev->open = ipgre_open;
1192                         dev->stop = ipgre_close;
1193                 }
1194 #endif
1195         }
1196
1197         if (!tdev && tunnel->parms.link)
1198                 tdev = __dev_get_by_index(tunnel->parms.link);
1199
1200         if (tdev) {
1201                 hlen = tdev->hard_header_len;
1202                 mtu = tdev->mtu;
1203         }
1204         dev->iflink = tunnel->parms.link;
1205
1206         /* Precalculate GRE options length */
1207         if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
1208                 if (tunnel->parms.o_flags&GRE_CSUM)
1209                         addend += 4;
1210                 if (tunnel->parms.o_flags&GRE_KEY)
1211                         addend += 4;
1212                 if (tunnel->parms.o_flags&GRE_SEQ)
1213                         addend += 4;
1214         }
1215         dev->hard_header_len = hlen + addend;
1216         dev->mtu = mtu - addend;
1217         tunnel->hlen = addend;
1218         return 0;
1219 }
1220
1221 static int __init ipgre_fb_tunnel_init(struct net_device *dev)
1222 {
1223         struct ip_tunnel *tunnel = netdev_priv(dev);
1224         struct iphdr *iph = &tunnel->parms.iph;
1225
1226         tunnel->dev = dev;
1227         strcpy(tunnel->parms.name, dev->name);
1228
1229         iph->version            = 4;
1230         iph->protocol           = IPPROTO_GRE;
1231         iph->ihl                = 5;
1232         tunnel->hlen            = sizeof(struct iphdr) + 4;
1233
1234         dev_hold(dev);
1235         tunnels_wc[0]           = tunnel;
1236         return 0;
1237 }
1238
1239
1240 static struct net_protocol ipgre_protocol = {
1241         .handler        =       ipgre_rcv,
1242         .err_handler    =       ipgre_err,
1243 };
1244
1245
1246 /*
1247  *      And now the modules code and kernel interface.
1248  */
1249
1250 static int __init ipgre_init(void)
1251 {
1252         int err;
1253
1254         printk(KERN_INFO "GRE over IPv4 tunneling driver\n");
1255
1256         if (inet_add_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) {
1257                 printk(KERN_INFO "ipgre init: can't add protocol\n");
1258                 return -EAGAIN;
1259         }
1260
1261         ipgre_fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
1262                                            ipgre_tunnel_setup);
1263         if (!ipgre_fb_tunnel_dev) {
1264                 err = -ENOMEM;
1265                 goto err1;
1266         }
1267
1268         ipgre_fb_tunnel_dev->init = ipgre_fb_tunnel_init;
1269
1270         if ((err = register_netdev(ipgre_fb_tunnel_dev)))
1271                 goto err2;
1272 out:
1273         return err;
1274 err2:
1275         free_netdev(ipgre_fb_tunnel_dev);
1276 err1:
1277         inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
1278         goto out;
1279 }
1280
1281 static void __exit ipgre_destroy_tunnels(void)
1282 {
1283         int prio;
1284
1285         for (prio = 0; prio < 4; prio++) {
1286                 int h;
1287                 for (h = 0; h < HASH_SIZE; h++) {
1288                         struct ip_tunnel *t;
1289                         while ((t = tunnels[prio][h]) != NULL)
1290                                 unregister_netdevice(t->dev);
1291                 }
1292         }
1293 }
1294
1295 static void __exit ipgre_fini(void)
1296 {
1297         if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
1298                 printk(KERN_INFO "ipgre close: can't remove protocol\n");
1299
1300         rtnl_lock();
1301         ipgre_destroy_tunnels();
1302         rtnl_unlock();
1303 }
1304
1305 module_init(ipgre_init);
1306 module_exit(ipgre_fini);
1307 MODULE_LICENSE("GPL");