[NETFILTER]: x_tables: pass registered match/target data to match/target functions
[linux-2.6.git] / net / ipv4 / ip_output.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              The Internet Protocol (IP) output module.
7  *
8  * Version:     $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
9  *
10  * Authors:     Ross Biro
11  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *              Donald Becker, <becker@super.org>
13  *              Alan Cox, <Alan.Cox@linux.org>
14  *              Richard Underwood
15  *              Stefan Becker, <stefanb@yello.ping.de>
16  *              Jorge Cwik, <jorge@laser.satlink.net>
17  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18  *              Hirokazu Takahashi, <taka@valinux.co.jp>
19  *
20  *      See ip_input.c for original log
21  *
22  *      Fixes:
23  *              Alan Cox        :       Missing nonblock feature in ip_build_xmit.
24  *              Mike Kilburn    :       htons() missing in ip_build_xmit.
25  *              Bradford Johnson:       Fix faulty handling of some frames when 
26  *                                      no route is found.
27  *              Alexander Demenshin:    Missing sk/skb free in ip_queue_xmit
28  *                                      (in case if packet not accepted by
29  *                                      output firewall rules)
30  *              Mike McLagan    :       Routing by source
31  *              Alexey Kuznetsov:       use new route cache
32  *              Andi Kleen:             Fix broken PMTU recovery and remove
33  *                                      some redundant tests.
34  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
35  *              Andi Kleen      :       Replace ip_reply with ip_send_reply.
36  *              Andi Kleen      :       Split fast and slow ip_build_xmit path 
37  *                                      for decreased register pressure on x86 
38  *                                      and more readibility. 
39  *              Marc Boucher    :       When call_out_firewall returns FW_QUEUE,
40  *                                      silently drop skb instead of failing with -EPERM.
41  *              Detlev Wengorz  :       Copy protocol for fragments.
42  *              Hirokazu Takahashi:     HW checksumming for outgoing UDP
43  *                                      datagrams.
44  *              Hirokazu Takahashi:     sendfile() on UDP works now.
45  */
46
47 #include <asm/uaccess.h>
48 #include <asm/system.h>
49 #include <linux/module.h>
50 #include <linux/types.h>
51 #include <linux/kernel.h>
52 #include <linux/sched.h>
53 #include <linux/mm.h>
54 #include <linux/string.h>
55 #include <linux/errno.h>
56 #include <linux/config.h>
57
58 #include <linux/socket.h>
59 #include <linux/sockios.h>
60 #include <linux/in.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/proc_fs.h>
65 #include <linux/stat.h>
66 #include <linux/init.h>
67
68 #include <net/snmp.h>
69 #include <net/ip.h>
70 #include <net/protocol.h>
71 #include <net/route.h>
72 #include <net/xfrm.h>
73 #include <linux/skbuff.h>
74 #include <net/sock.h>
75 #include <net/arp.h>
76 #include <net/icmp.h>
77 #include <net/checksum.h>
78 #include <net/inetpeer.h>
79 #include <net/checksum.h>
80 #include <linux/igmp.h>
81 #include <linux/netfilter_ipv4.h>
82 #include <linux/netfilter_bridge.h>
83 #include <linux/mroute.h>
84 #include <linux/netlink.h>
85 #include <linux/tcp.h>
86
87 int sysctl_ip_default_ttl = IPDEFTTL;
88
89 static int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*));
90
91 /* Generate a checksum for an outgoing IP datagram. */
92 __inline__ void ip_send_check(struct iphdr *iph)
93 {
94         iph->check = 0;
95         iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
96 }
97
98 /* dev_loopback_xmit for use with netfilter. */
99 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
100 {
101         newskb->mac.raw = newskb->data;
102         __skb_pull(newskb, newskb->nh.raw - newskb->data);
103         newskb->pkt_type = PACKET_LOOPBACK;
104         newskb->ip_summed = CHECKSUM_UNNECESSARY;
105         BUG_TRAP(newskb->dst);
106         netif_rx(newskb);
107         return 0;
108 }
109
110 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
111 {
112         int ttl = inet->uc_ttl;
113
114         if (ttl < 0)
115                 ttl = dst_metric(dst, RTAX_HOPLIMIT);
116         return ttl;
117 }
118
119 /* 
120  *              Add an ip header to a skbuff and send it out.
121  *
122  */
123 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
124                           u32 saddr, u32 daddr, struct ip_options *opt)
125 {
126         struct inet_sock *inet = inet_sk(sk);
127         struct rtable *rt = (struct rtable *)skb->dst;
128         struct iphdr *iph;
129
130         /* Build the IP header. */
131         if (opt)
132                 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen);
133         else
134                 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr));
135
136         iph->version  = 4;
137         iph->ihl      = 5;
138         iph->tos      = inet->tos;
139         if (ip_dont_fragment(sk, &rt->u.dst))
140                 iph->frag_off = htons(IP_DF);
141         else
142                 iph->frag_off = 0;
143         iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
144         iph->daddr    = rt->rt_dst;
145         iph->saddr    = rt->rt_src;
146         iph->protocol = sk->sk_protocol;
147         iph->tot_len  = htons(skb->len);
148         ip_select_ident(iph, &rt->u.dst, sk);
149         skb->nh.iph   = iph;
150
151         if (opt && opt->optlen) {
152                 iph->ihl += opt->optlen>>2;
153                 ip_options_build(skb, opt, daddr, rt, 0);
154         }
155         ip_send_check(iph);
156
157         skb->priority = sk->sk_priority;
158
159         /* Send it out. */
160         return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
161                        dst_output);
162 }
163
164 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
165
166 static inline int ip_finish_output2(struct sk_buff *skb)
167 {
168         struct dst_entry *dst = skb->dst;
169         struct hh_cache *hh = dst->hh;
170         struct net_device *dev = dst->dev;
171         int hh_len = LL_RESERVED_SPACE(dev);
172
173         /* Be paranoid, rather than too clever. */
174         if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
175                 struct sk_buff *skb2;
176
177                 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
178                 if (skb2 == NULL) {
179                         kfree_skb(skb);
180                         return -ENOMEM;
181                 }
182                 if (skb->sk)
183                         skb_set_owner_w(skb2, skb->sk);
184                 kfree_skb(skb);
185                 skb = skb2;
186         }
187
188         if (hh) {
189                 int hh_alen;
190
191                 read_lock_bh(&hh->hh_lock);
192                 hh_alen = HH_DATA_ALIGN(hh->hh_len);
193                 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
194                 read_unlock_bh(&hh->hh_lock);
195                 skb_push(skb, hh->hh_len);
196                 return hh->hh_output(skb);
197         } else if (dst->neighbour)
198                 return dst->neighbour->output(skb);
199
200         if (net_ratelimit())
201                 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
202         kfree_skb(skb);
203         return -EINVAL;
204 }
205
206 static inline int ip_finish_output(struct sk_buff *skb)
207 {
208 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
209         /* Policy lookup after SNAT yielded a new policy */
210         if (skb->dst->xfrm != NULL) {
211                 IPCB(skb)->flags |= IPSKB_REROUTED;
212                 return dst_output(skb);
213         }
214 #endif
215         if (skb->len > dst_mtu(skb->dst) &&
216             !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
217                 return ip_fragment(skb, ip_finish_output2);
218         else
219                 return ip_finish_output2(skb);
220 }
221
222 int ip_mc_output(struct sk_buff *skb)
223 {
224         struct sock *sk = skb->sk;
225         struct rtable *rt = (struct rtable*)skb->dst;
226         struct net_device *dev = rt->u.dst.dev;
227
228         /*
229          *      If the indicated interface is up and running, send the packet.
230          */
231         IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
232
233         skb->dev = dev;
234         skb->protocol = htons(ETH_P_IP);
235
236         /*
237          *      Multicasts are looped back for other local users
238          */
239
240         if (rt->rt_flags&RTCF_MULTICAST) {
241                 if ((!sk || inet_sk(sk)->mc_loop)
242 #ifdef CONFIG_IP_MROUTE
243                 /* Small optimization: do not loopback not local frames,
244                    which returned after forwarding; they will be  dropped
245                    by ip_mr_input in any case.
246                    Note, that local frames are looped back to be delivered
247                    to local recipients.
248
249                    This check is duplicated in ip_mr_input at the moment.
250                  */
251                     && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
252 #endif
253                 ) {
254                         struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
255                         if (newskb)
256                                 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
257                                         newskb->dev, 
258                                         ip_dev_loopback_xmit);
259                 }
260
261                 /* Multicasts with ttl 0 must not go beyond the host */
262
263                 if (skb->nh.iph->ttl == 0) {
264                         kfree_skb(skb);
265                         return 0;
266                 }
267         }
268
269         if (rt->rt_flags&RTCF_BROADCAST) {
270                 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
271                 if (newskb)
272                         NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
273                                 newskb->dev, ip_dev_loopback_xmit);
274         }
275
276         return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev,
277                             ip_finish_output,
278                             !(IPCB(skb)->flags & IPSKB_REROUTED));
279 }
280
281 int ip_output(struct sk_buff *skb)
282 {
283         struct net_device *dev = skb->dst->dev;
284
285         IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
286
287         skb->dev = dev;
288         skb->protocol = htons(ETH_P_IP);
289
290         return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
291                             ip_finish_output,
292                             !(IPCB(skb)->flags & IPSKB_REROUTED));
293 }
294
295 int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
296 {
297         struct sock *sk = skb->sk;
298         struct inet_sock *inet = inet_sk(sk);
299         struct ip_options *opt = inet->opt;
300         struct rtable *rt;
301         struct iphdr *iph;
302
303         /* Skip all of this if the packet is already routed,
304          * f.e. by something like SCTP.
305          */
306         rt = (struct rtable *) skb->dst;
307         if (rt != NULL)
308                 goto packet_routed;
309
310         /* Make sure we can route this packet. */
311         rt = (struct rtable *)__sk_dst_check(sk, 0);
312         if (rt == NULL) {
313                 u32 daddr;
314
315                 /* Use correct destination address if we have options. */
316                 daddr = inet->daddr;
317                 if(opt && opt->srr)
318                         daddr = opt->faddr;
319
320                 {
321                         struct flowi fl = { .oif = sk->sk_bound_dev_if,
322                                             .nl_u = { .ip4_u =
323                                                       { .daddr = daddr,
324                                                         .saddr = inet->saddr,
325                                                         .tos = RT_CONN_FLAGS(sk) } },
326                                             .proto = sk->sk_protocol,
327                                             .uli_u = { .ports =
328                                                        { .sport = inet->sport,
329                                                          .dport = inet->dport } } };
330
331                         /* If this fails, retransmit mechanism of transport layer will
332                          * keep trying until route appears or the connection times
333                          * itself out.
334                          */
335                         if (ip_route_output_flow(&rt, &fl, sk, 0))
336                                 goto no_route;
337                 }
338                 sk_setup_caps(sk, &rt->u.dst);
339         }
340         skb->dst = dst_clone(&rt->u.dst);
341
342 packet_routed:
343         if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
344                 goto no_route;
345
346         /* OK, we know where to send it, allocate and build IP header. */
347         iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
348         *((__u16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
349         iph->tot_len = htons(skb->len);
350         if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
351                 iph->frag_off = htons(IP_DF);
352         else
353                 iph->frag_off = 0;
354         iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
355         iph->protocol = sk->sk_protocol;
356         iph->saddr    = rt->rt_src;
357         iph->daddr    = rt->rt_dst;
358         skb->nh.iph   = iph;
359         /* Transport layer set skb->h.foo itself. */
360
361         if (opt && opt->optlen) {
362                 iph->ihl += opt->optlen >> 2;
363                 ip_options_build(skb, opt, inet->daddr, rt, 0);
364         }
365
366         ip_select_ident_more(iph, &rt->u.dst, sk,
367                              (skb_shinfo(skb)->tso_segs ?: 1) - 1);
368
369         /* Add an IP checksum. */
370         ip_send_check(iph);
371
372         skb->priority = sk->sk_priority;
373
374         return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
375                        dst_output);
376
377 no_route:
378         IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
379         kfree_skb(skb);
380         return -EHOSTUNREACH;
381 }
382
383
384 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
385 {
386         to->pkt_type = from->pkt_type;
387         to->priority = from->priority;
388         to->protocol = from->protocol;
389         dst_release(to->dst);
390         to->dst = dst_clone(from->dst);
391         to->dev = from->dev;
392
393         /* Copy the flags to each fragment. */
394         IPCB(to)->flags = IPCB(from)->flags;
395
396 #ifdef CONFIG_NET_SCHED
397         to->tc_index = from->tc_index;
398 #endif
399 #ifdef CONFIG_NETFILTER
400         to->nfmark = from->nfmark;
401         /* Connection association is same as pre-frag packet */
402         nf_conntrack_put(to->nfct);
403         to->nfct = from->nfct;
404         nf_conntrack_get(to->nfct);
405         to->nfctinfo = from->nfctinfo;
406 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
407         to->ipvs_property = from->ipvs_property;
408 #endif
409 #ifdef CONFIG_BRIDGE_NETFILTER
410         nf_bridge_put(to->nf_bridge);
411         to->nf_bridge = from->nf_bridge;
412         nf_bridge_get(to->nf_bridge);
413 #endif
414 #endif
415 }
416
417 /*
418  *      This IP datagram is too large to be sent in one piece.  Break it up into
419  *      smaller pieces (each of size equal to IP header plus
420  *      a block of the data of the original IP data part) that will yet fit in a
421  *      single device frame, and queue such a frame for sending.
422  */
423
424 static int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
425 {
426         struct iphdr *iph;
427         int raw = 0;
428         int ptr;
429         struct net_device *dev;
430         struct sk_buff *skb2;
431         unsigned int mtu, hlen, left, len, ll_rs;
432         int offset;
433         __be16 not_last_frag;
434         struct rtable *rt = (struct rtable*)skb->dst;
435         int err = 0;
436
437         dev = rt->u.dst.dev;
438
439         /*
440          *      Point into the IP datagram header.
441          */
442
443         iph = skb->nh.iph;
444
445         if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
446                 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
447                           htonl(dst_mtu(&rt->u.dst)));
448                 kfree_skb(skb);
449                 return -EMSGSIZE;
450         }
451
452         /*
453          *      Setup starting values.
454          */
455
456         hlen = iph->ihl * 4;
457         mtu = dst_mtu(&rt->u.dst) - hlen;       /* Size of data space */
458         IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
459
460         /* When frag_list is given, use it. First, check its validity:
461          * some transformers could create wrong frag_list or break existing
462          * one, it is not prohibited. In this case fall back to copying.
463          *
464          * LATER: this step can be merged to real generation of fragments,
465          * we can switch to copy when see the first bad fragment.
466          */
467         if (skb_shinfo(skb)->frag_list) {
468                 struct sk_buff *frag;
469                 int first_len = skb_pagelen(skb);
470
471                 if (first_len - hlen > mtu ||
472                     ((first_len - hlen) & 7) ||
473                     (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
474                     skb_cloned(skb))
475                         goto slow_path;
476
477                 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
478                         /* Correct geometry. */
479                         if (frag->len > mtu ||
480                             ((frag->len & 7) && frag->next) ||
481                             skb_headroom(frag) < hlen)
482                             goto slow_path;
483
484                         /* Partially cloned skb? */
485                         if (skb_shared(frag))
486                                 goto slow_path;
487
488                         BUG_ON(frag->sk);
489                         if (skb->sk) {
490                                 sock_hold(skb->sk);
491                                 frag->sk = skb->sk;
492                                 frag->destructor = sock_wfree;
493                                 skb->truesize -= frag->truesize;
494                         }
495                 }
496
497                 /* Everything is OK. Generate! */
498
499                 err = 0;
500                 offset = 0;
501                 frag = skb_shinfo(skb)->frag_list;
502                 skb_shinfo(skb)->frag_list = NULL;
503                 skb->data_len = first_len - skb_headlen(skb);
504                 skb->len = first_len;
505                 iph->tot_len = htons(first_len);
506                 iph->frag_off = htons(IP_MF);
507                 ip_send_check(iph);
508
509                 for (;;) {
510                         /* Prepare header of the next frame,
511                          * before previous one went down. */
512                         if (frag) {
513                                 frag->ip_summed = CHECKSUM_NONE;
514                                 frag->h.raw = frag->data;
515                                 frag->nh.raw = __skb_push(frag, hlen);
516                                 memcpy(frag->nh.raw, iph, hlen);
517                                 iph = frag->nh.iph;
518                                 iph->tot_len = htons(frag->len);
519                                 ip_copy_metadata(frag, skb);
520                                 if (offset == 0)
521                                         ip_options_fragment(frag);
522                                 offset += skb->len - hlen;
523                                 iph->frag_off = htons(offset>>3);
524                                 if (frag->next != NULL)
525                                         iph->frag_off |= htons(IP_MF);
526                                 /* Ready, complete checksum */
527                                 ip_send_check(iph);
528                         }
529
530                         err = output(skb);
531
532                         if (err || !frag)
533                                 break;
534
535                         skb = frag;
536                         frag = skb->next;
537                         skb->next = NULL;
538                 }
539
540                 if (err == 0) {
541                         IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
542                         return 0;
543                 }
544
545                 while (frag) {
546                         skb = frag->next;
547                         kfree_skb(frag);
548                         frag = skb;
549                 }
550                 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
551                 return err;
552         }
553
554 slow_path:
555         left = skb->len - hlen;         /* Space per frame */
556         ptr = raw + hlen;               /* Where to start from */
557
558 #ifdef CONFIG_BRIDGE_NETFILTER
559         /* for bridged IP traffic encapsulated inside f.e. a vlan header,
560          * we need to make room for the encapsulating header */
561         ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, nf_bridge_pad(skb));
562         mtu -= nf_bridge_pad(skb);
563 #else
564         ll_rs = LL_RESERVED_SPACE(rt->u.dst.dev);
565 #endif
566         /*
567          *      Fragment the datagram.
568          */
569
570         offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
571         not_last_frag = iph->frag_off & htons(IP_MF);
572
573         /*
574          *      Keep copying data until we run out.
575          */
576
577         while(left > 0) {
578                 len = left;
579                 /* IF: it doesn't fit, use 'mtu' - the data space left */
580                 if (len > mtu)
581                         len = mtu;
582                 /* IF: we are not sending upto and including the packet end
583                    then align the next start on an eight byte boundary */
584                 if (len < left) {
585                         len &= ~7;
586                 }
587                 /*
588                  *      Allocate buffer.
589                  */
590
591                 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
592                         NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
593                         err = -ENOMEM;
594                         goto fail;
595                 }
596
597                 /*
598                  *      Set up data on packet
599                  */
600
601                 ip_copy_metadata(skb2, skb);
602                 skb_reserve(skb2, ll_rs);
603                 skb_put(skb2, len + hlen);
604                 skb2->nh.raw = skb2->data;
605                 skb2->h.raw = skb2->data + hlen;
606
607                 /*
608                  *      Charge the memory for the fragment to any owner
609                  *      it might possess
610                  */
611
612                 if (skb->sk)
613                         skb_set_owner_w(skb2, skb->sk);
614
615                 /*
616                  *      Copy the packet header into the new buffer.
617                  */
618
619                 memcpy(skb2->nh.raw, skb->data, hlen);
620
621                 /*
622                  *      Copy a block of the IP datagram.
623                  */
624                 if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
625                         BUG();
626                 left -= len;
627
628                 /*
629                  *      Fill in the new header fields.
630                  */
631                 iph = skb2->nh.iph;
632                 iph->frag_off = htons((offset >> 3));
633
634                 /* ANK: dirty, but effective trick. Upgrade options only if
635                  * the segment to be fragmented was THE FIRST (otherwise,
636                  * options are already fixed) and make it ONCE
637                  * on the initial skb, so that all the following fragments
638                  * will inherit fixed options.
639                  */
640                 if (offset == 0)
641                         ip_options_fragment(skb);
642
643                 /*
644                  *      Added AC : If we are fragmenting a fragment that's not the
645                  *                 last fragment then keep MF on each bit
646                  */
647                 if (left > 0 || not_last_frag)
648                         iph->frag_off |= htons(IP_MF);
649                 ptr += len;
650                 offset += len;
651
652                 /*
653                  *      Put this fragment into the sending queue.
654                  */
655
656                 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
657
658                 iph->tot_len = htons(len + hlen);
659
660                 ip_send_check(iph);
661
662                 err = output(skb2);
663                 if (err)
664                         goto fail;
665         }
666         kfree_skb(skb);
667         IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
668         return err;
669
670 fail:
671         kfree_skb(skb); 
672         IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
673         return err;
674 }
675
676 int
677 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
678 {
679         struct iovec *iov = from;
680
681         if (skb->ip_summed == CHECKSUM_HW) {
682                 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
683                         return -EFAULT;
684         } else {
685                 unsigned int csum = 0;
686                 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
687                         return -EFAULT;
688                 skb->csum = csum_block_add(skb->csum, csum, odd);
689         }
690         return 0;
691 }
692
693 static inline unsigned int
694 csum_page(struct page *page, int offset, int copy)
695 {
696         char *kaddr;
697         unsigned int csum;
698         kaddr = kmap(page);
699         csum = csum_partial(kaddr + offset, copy, 0);
700         kunmap(page);
701         return csum;
702 }
703
704 static inline int ip_ufo_append_data(struct sock *sk,
705                         int getfrag(void *from, char *to, int offset, int len,
706                                int odd, struct sk_buff *skb),
707                         void *from, int length, int hh_len, int fragheaderlen,
708                         int transhdrlen, int mtu,unsigned int flags)
709 {
710         struct sk_buff *skb;
711         int err;
712
713         /* There is support for UDP fragmentation offload by network
714          * device, so create one single skb packet containing complete
715          * udp datagram
716          */
717         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
718                 skb = sock_alloc_send_skb(sk,
719                         hh_len + fragheaderlen + transhdrlen + 20,
720                         (flags & MSG_DONTWAIT), &err);
721
722                 if (skb == NULL)
723                         return err;
724
725                 /* reserve space for Hardware header */
726                 skb_reserve(skb, hh_len);
727
728                 /* create space for UDP/IP header */
729                 skb_put(skb,fragheaderlen + transhdrlen);
730
731                 /* initialize network header pointer */
732                 skb->nh.raw = skb->data;
733
734                 /* initialize protocol header pointer */
735                 skb->h.raw = skb->data + fragheaderlen;
736
737                 skb->ip_summed = CHECKSUM_HW;
738                 skb->csum = 0;
739                 sk->sk_sndmsg_off = 0;
740         }
741
742         err = skb_append_datato_frags(sk,skb, getfrag, from,
743                                (length - transhdrlen));
744         if (!err) {
745                 /* specify the length of each IP datagram fragment*/
746                 skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
747                 __skb_queue_tail(&sk->sk_write_queue, skb);
748
749                 return 0;
750         }
751         /* There is not enough support do UFO ,
752          * so follow normal path
753          */
754         kfree_skb(skb);
755         return err;
756 }
757
758 /*
759  *      ip_append_data() and ip_append_page() can make one large IP datagram
760  *      from many pieces of data. Each pieces will be holded on the socket
761  *      until ip_push_pending_frames() is called. Each piece can be a page
762  *      or non-page data.
763  *      
764  *      Not only UDP, other transport protocols - e.g. raw sockets - can use
765  *      this interface potentially.
766  *
767  *      LATER: length must be adjusted by pad at tail, when it is required.
768  */
769 int ip_append_data(struct sock *sk,
770                    int getfrag(void *from, char *to, int offset, int len,
771                                int odd, struct sk_buff *skb),
772                    void *from, int length, int transhdrlen,
773                    struct ipcm_cookie *ipc, struct rtable *rt,
774                    unsigned int flags)
775 {
776         struct inet_sock *inet = inet_sk(sk);
777         struct sk_buff *skb;
778
779         struct ip_options *opt = NULL;
780         int hh_len;
781         int exthdrlen;
782         int mtu;
783         int copy;
784         int err;
785         int offset = 0;
786         unsigned int maxfraglen, fragheaderlen;
787         int csummode = CHECKSUM_NONE;
788
789         if (flags&MSG_PROBE)
790                 return 0;
791
792         if (skb_queue_empty(&sk->sk_write_queue)) {
793                 /*
794                  * setup for corking.
795                  */
796                 opt = ipc->opt;
797                 if (opt) {
798                         if (inet->cork.opt == NULL) {
799                                 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
800                                 if (unlikely(inet->cork.opt == NULL))
801                                         return -ENOBUFS;
802                         }
803                         memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
804                         inet->cork.flags |= IPCORK_OPT;
805                         inet->cork.addr = ipc->addr;
806                 }
807                 dst_hold(&rt->u.dst);
808                 inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
809                 inet->cork.rt = rt;
810                 inet->cork.length = 0;
811                 sk->sk_sndmsg_page = NULL;
812                 sk->sk_sndmsg_off = 0;
813                 if ((exthdrlen = rt->u.dst.header_len) != 0) {
814                         length += exthdrlen;
815                         transhdrlen += exthdrlen;
816                 }
817         } else {
818                 rt = inet->cork.rt;
819                 if (inet->cork.flags & IPCORK_OPT)
820                         opt = inet->cork.opt;
821
822                 transhdrlen = 0;
823                 exthdrlen = 0;
824                 mtu = inet->cork.fragsize;
825         }
826         hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
827
828         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
829         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
830
831         if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
832                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen);
833                 return -EMSGSIZE;
834         }
835
836         /*
837          * transhdrlen > 0 means that this is the first fragment and we wish
838          * it won't be fragmented in the future.
839          */
840         if (transhdrlen &&
841             length + fragheaderlen <= mtu &&
842             rt->u.dst.dev->features&(NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) &&
843             !exthdrlen)
844                 csummode = CHECKSUM_HW;
845
846         inet->cork.length += length;
847         if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
848                         (rt->u.dst.dev->features & NETIF_F_UFO)) {
849
850                 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
851                                          fragheaderlen, transhdrlen, mtu,
852                                          flags);
853                 if (err)
854                         goto error;
855                 return 0;
856         }
857
858         /* So, what's going on in the loop below?
859          *
860          * We use calculated fragment length to generate chained skb,
861          * each of segments is IP fragment ready for sending to network after
862          * adding appropriate IP header.
863          */
864
865         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
866                 goto alloc_new_skb;
867
868         while (length > 0) {
869                 /* Check if the remaining data fits into current packet. */
870                 copy = mtu - skb->len;
871                 if (copy < length)
872                         copy = maxfraglen - skb->len;
873                 if (copy <= 0) {
874                         char *data;
875                         unsigned int datalen;
876                         unsigned int fraglen;
877                         unsigned int fraggap;
878                         unsigned int alloclen;
879                         struct sk_buff *skb_prev;
880 alloc_new_skb:
881                         skb_prev = skb;
882                         if (skb_prev)
883                                 fraggap = skb_prev->len - maxfraglen;
884                         else
885                                 fraggap = 0;
886
887                         /*
888                          * If remaining data exceeds the mtu,
889                          * we know we need more fragment(s).
890                          */
891                         datalen = length + fraggap;
892                         if (datalen > mtu - fragheaderlen)
893                                 datalen = maxfraglen - fragheaderlen;
894                         fraglen = datalen + fragheaderlen;
895
896                         if ((flags & MSG_MORE) && 
897                             !(rt->u.dst.dev->features&NETIF_F_SG))
898                                 alloclen = mtu;
899                         else
900                                 alloclen = datalen + fragheaderlen;
901
902                         /* The last fragment gets additional space at tail.
903                          * Note, with MSG_MORE we overallocate on fragments,
904                          * because we have no idea what fragment will be
905                          * the last.
906                          */
907                         if (datalen == length)
908                                 alloclen += rt->u.dst.trailer_len;
909
910                         if (transhdrlen) {
911                                 skb = sock_alloc_send_skb(sk, 
912                                                 alloclen + hh_len + 15,
913                                                 (flags & MSG_DONTWAIT), &err);
914                         } else {
915                                 skb = NULL;
916                                 if (atomic_read(&sk->sk_wmem_alloc) <=
917                                     2 * sk->sk_sndbuf)
918                                         skb = sock_wmalloc(sk, 
919                                                            alloclen + hh_len + 15, 1,
920                                                            sk->sk_allocation);
921                                 if (unlikely(skb == NULL))
922                                         err = -ENOBUFS;
923                         }
924                         if (skb == NULL)
925                                 goto error;
926
927                         /*
928                          *      Fill in the control structures
929                          */
930                         skb->ip_summed = csummode;
931                         skb->csum = 0;
932                         skb_reserve(skb, hh_len);
933
934                         /*
935                          *      Find where to start putting bytes.
936                          */
937                         data = skb_put(skb, fraglen);
938                         skb->nh.raw = data + exthdrlen;
939                         data += fragheaderlen;
940                         skb->h.raw = data + exthdrlen;
941
942                         if (fraggap) {
943                                 skb->csum = skb_copy_and_csum_bits(
944                                         skb_prev, maxfraglen,
945                                         data + transhdrlen, fraggap, 0);
946                                 skb_prev->csum = csum_sub(skb_prev->csum,
947                                                           skb->csum);
948                                 data += fraggap;
949                                 skb_trim(skb_prev, maxfraglen);
950                         }
951
952                         copy = datalen - transhdrlen - fraggap;
953                         if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
954                                 err = -EFAULT;
955                                 kfree_skb(skb);
956                                 goto error;
957                         }
958
959                         offset += copy;
960                         length -= datalen - fraggap;
961                         transhdrlen = 0;
962                         exthdrlen = 0;
963                         csummode = CHECKSUM_NONE;
964
965                         /*
966                          * Put the packet on the pending queue.
967                          */
968                         __skb_queue_tail(&sk->sk_write_queue, skb);
969                         continue;
970                 }
971
972                 if (copy > length)
973                         copy = length;
974
975                 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
976                         unsigned int off;
977
978                         off = skb->len;
979                         if (getfrag(from, skb_put(skb, copy), 
980                                         offset, copy, off, skb) < 0) {
981                                 __skb_trim(skb, off);
982                                 err = -EFAULT;
983                                 goto error;
984                         }
985                 } else {
986                         int i = skb_shinfo(skb)->nr_frags;
987                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
988                         struct page *page = sk->sk_sndmsg_page;
989                         int off = sk->sk_sndmsg_off;
990                         unsigned int left;
991
992                         if (page && (left = PAGE_SIZE - off) > 0) {
993                                 if (copy >= left)
994                                         copy = left;
995                                 if (page != frag->page) {
996                                         if (i == MAX_SKB_FRAGS) {
997                                                 err = -EMSGSIZE;
998                                                 goto error;
999                                         }
1000                                         get_page(page);
1001                                         skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1002                                         frag = &skb_shinfo(skb)->frags[i];
1003                                 }
1004                         } else if (i < MAX_SKB_FRAGS) {
1005                                 if (copy > PAGE_SIZE)
1006                                         copy = PAGE_SIZE;
1007                                 page = alloc_pages(sk->sk_allocation, 0);
1008                                 if (page == NULL)  {
1009                                         err = -ENOMEM;
1010                                         goto error;
1011                                 }
1012                                 sk->sk_sndmsg_page = page;
1013                                 sk->sk_sndmsg_off = 0;
1014
1015                                 skb_fill_page_desc(skb, i, page, 0, 0);
1016                                 frag = &skb_shinfo(skb)->frags[i];
1017                                 skb->truesize += PAGE_SIZE;
1018                                 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1019                         } else {
1020                                 err = -EMSGSIZE;
1021                                 goto error;
1022                         }
1023                         if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1024                                 err = -EFAULT;
1025                                 goto error;
1026                         }
1027                         sk->sk_sndmsg_off += copy;
1028                         frag->size += copy;
1029                         skb->len += copy;
1030                         skb->data_len += copy;
1031                 }
1032                 offset += copy;
1033                 length -= copy;
1034         }
1035
1036         return 0;
1037
1038 error:
1039         inet->cork.length -= length;
1040         IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1041         return err; 
1042 }
1043
1044 ssize_t ip_append_page(struct sock *sk, struct page *page,
1045                        int offset, size_t size, int flags)
1046 {
1047         struct inet_sock *inet = inet_sk(sk);
1048         struct sk_buff *skb;
1049         struct rtable *rt;
1050         struct ip_options *opt = NULL;
1051         int hh_len;
1052         int mtu;
1053         int len;
1054         int err;
1055         unsigned int maxfraglen, fragheaderlen, fraggap;
1056
1057         if (inet->hdrincl)
1058                 return -EPERM;
1059
1060         if (flags&MSG_PROBE)
1061                 return 0;
1062
1063         if (skb_queue_empty(&sk->sk_write_queue))
1064                 return -EINVAL;
1065
1066         rt = inet->cork.rt;
1067         if (inet->cork.flags & IPCORK_OPT)
1068                 opt = inet->cork.opt;
1069
1070         if (!(rt->u.dst.dev->features&NETIF_F_SG))
1071                 return -EOPNOTSUPP;
1072
1073         hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1074         mtu = inet->cork.fragsize;
1075
1076         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1077         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1078
1079         if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1080                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
1081                 return -EMSGSIZE;
1082         }
1083
1084         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1085                 return -EINVAL;
1086
1087         inet->cork.length += size;
1088         if ((sk->sk_protocol == IPPROTO_UDP) &&
1089             (rt->u.dst.dev->features & NETIF_F_UFO))
1090                 skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
1091
1092
1093         while (size > 0) {
1094                 int i;
1095
1096                 if (skb_shinfo(skb)->ufo_size)
1097                         len = size;
1098                 else {
1099
1100                         /* Check if the remaining data fits into current packet. */
1101                         len = mtu - skb->len;
1102                         if (len < size)
1103                                 len = maxfraglen - skb->len;
1104                 }
1105                 if (len <= 0) {
1106                         struct sk_buff *skb_prev;
1107                         char *data;
1108                         struct iphdr *iph;
1109                         int alloclen;
1110
1111                         skb_prev = skb;
1112                         fraggap = skb_prev->len - maxfraglen;
1113
1114                         alloclen = fragheaderlen + hh_len + fraggap + 15;
1115                         skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1116                         if (unlikely(!skb)) {
1117                                 err = -ENOBUFS;
1118                                 goto error;
1119                         }
1120
1121                         /*
1122                          *      Fill in the control structures
1123                          */
1124                         skb->ip_summed = CHECKSUM_NONE;
1125                         skb->csum = 0;
1126                         skb_reserve(skb, hh_len);
1127
1128                         /*
1129                          *      Find where to start putting bytes.
1130                          */
1131                         data = skb_put(skb, fragheaderlen + fraggap);
1132                         skb->nh.iph = iph = (struct iphdr *)data;
1133                         data += fragheaderlen;
1134                         skb->h.raw = data;
1135
1136                         if (fraggap) {
1137                                 skb->csum = skb_copy_and_csum_bits(
1138                                         skb_prev, maxfraglen,
1139                                         data, fraggap, 0);
1140                                 skb_prev->csum = csum_sub(skb_prev->csum,
1141                                                           skb->csum);
1142                                 skb_trim(skb_prev, maxfraglen);
1143                         }
1144
1145                         /*
1146                          * Put the packet on the pending queue.
1147                          */
1148                         __skb_queue_tail(&sk->sk_write_queue, skb);
1149                         continue;
1150                 }
1151
1152                 i = skb_shinfo(skb)->nr_frags;
1153                 if (len > size)
1154                         len = size;
1155                 if (skb_can_coalesce(skb, i, page, offset)) {
1156                         skb_shinfo(skb)->frags[i-1].size += len;
1157                 } else if (i < MAX_SKB_FRAGS) {
1158                         get_page(page);
1159                         skb_fill_page_desc(skb, i, page, offset, len);
1160                 } else {
1161                         err = -EMSGSIZE;
1162                         goto error;
1163                 }
1164
1165                 if (skb->ip_summed == CHECKSUM_NONE) {
1166                         unsigned int csum;
1167                         csum = csum_page(page, offset, len);
1168                         skb->csum = csum_block_add(skb->csum, csum, skb->len);
1169                 }
1170
1171                 skb->len += len;
1172                 skb->data_len += len;
1173                 offset += len;
1174                 size -= len;
1175         }
1176         return 0;
1177
1178 error:
1179         inet->cork.length -= size;
1180         IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1181         return err;
1182 }
1183
1184 /*
1185  *      Combined all pending IP fragments on the socket as one IP datagram
1186  *      and push them out.
1187  */
1188 int ip_push_pending_frames(struct sock *sk)
1189 {
1190         struct sk_buff *skb, *tmp_skb;
1191         struct sk_buff **tail_skb;
1192         struct inet_sock *inet = inet_sk(sk);
1193         struct ip_options *opt = NULL;
1194         struct rtable *rt = inet->cork.rt;
1195         struct iphdr *iph;
1196         __be16 df = 0;
1197         __u8 ttl;
1198         int err = 0;
1199
1200         if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1201                 goto out;
1202         tail_skb = &(skb_shinfo(skb)->frag_list);
1203
1204         /* move skb->data to ip header from ext header */
1205         if (skb->data < skb->nh.raw)
1206                 __skb_pull(skb, skb->nh.raw - skb->data);
1207         while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1208                 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1209                 *tail_skb = tmp_skb;
1210                 tail_skb = &(tmp_skb->next);
1211                 skb->len += tmp_skb->len;
1212                 skb->data_len += tmp_skb->len;
1213                 skb->truesize += tmp_skb->truesize;
1214                 __sock_put(tmp_skb->sk);
1215                 tmp_skb->destructor = NULL;
1216                 tmp_skb->sk = NULL;
1217         }
1218
1219         /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1220          * to fragment the frame generated here. No matter, what transforms
1221          * how transforms change size of the packet, it will come out.
1222          */
1223         if (inet->pmtudisc != IP_PMTUDISC_DO)
1224                 skb->local_df = 1;
1225
1226         /* DF bit is set when we want to see DF on outgoing frames.
1227          * If local_df is set too, we still allow to fragment this frame
1228          * locally. */
1229         if (inet->pmtudisc == IP_PMTUDISC_DO ||
1230             (skb->len <= dst_mtu(&rt->u.dst) &&
1231              ip_dont_fragment(sk, &rt->u.dst)))
1232                 df = htons(IP_DF);
1233
1234         if (inet->cork.flags & IPCORK_OPT)
1235                 opt = inet->cork.opt;
1236
1237         if (rt->rt_type == RTN_MULTICAST)
1238                 ttl = inet->mc_ttl;
1239         else
1240                 ttl = ip_select_ttl(inet, &rt->u.dst);
1241
1242         iph = (struct iphdr *)skb->data;
1243         iph->version = 4;
1244         iph->ihl = 5;
1245         if (opt) {
1246                 iph->ihl += opt->optlen>>2;
1247                 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1248         }
1249         iph->tos = inet->tos;
1250         iph->tot_len = htons(skb->len);
1251         iph->frag_off = df;
1252         if (!df) {
1253                 __ip_select_ident(iph, &rt->u.dst, 0);
1254         } else {
1255                 iph->id = htons(inet->id++);
1256         }
1257         iph->ttl = ttl;
1258         iph->protocol = sk->sk_protocol;
1259         iph->saddr = rt->rt_src;
1260         iph->daddr = rt->rt_dst;
1261         ip_send_check(iph);
1262
1263         skb->priority = sk->sk_priority;
1264         skb->dst = dst_clone(&rt->u.dst);
1265
1266         /* Netfilter gets whole the not fragmented skb. */
1267         err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, 
1268                       skb->dst->dev, dst_output);
1269         if (err) {
1270                 if (err > 0)
1271                         err = inet->recverr ? net_xmit_errno(err) : 0;
1272                 if (err)
1273                         goto error;
1274         }
1275
1276 out:
1277         inet->cork.flags &= ~IPCORK_OPT;
1278         kfree(inet->cork.opt);
1279         inet->cork.opt = NULL;
1280         if (inet->cork.rt) {
1281                 ip_rt_put(inet->cork.rt);
1282                 inet->cork.rt = NULL;
1283         }
1284         return err;
1285
1286 error:
1287         IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1288         goto out;
1289 }
1290
1291 /*
1292  *      Throw away all pending data on the socket.
1293  */
1294 void ip_flush_pending_frames(struct sock *sk)
1295 {
1296         struct inet_sock *inet = inet_sk(sk);
1297         struct sk_buff *skb;
1298
1299         while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1300                 kfree_skb(skb);
1301
1302         inet->cork.flags &= ~IPCORK_OPT;
1303         kfree(inet->cork.opt);
1304         inet->cork.opt = NULL;
1305         if (inet->cork.rt) {
1306                 ip_rt_put(inet->cork.rt);
1307                 inet->cork.rt = NULL;
1308         }
1309 }
1310
1311
1312 /*
1313  *      Fetch data from kernel space and fill in checksum if needed.
1314  */
1315 static int ip_reply_glue_bits(void *dptr, char *to, int offset, 
1316                               int len, int odd, struct sk_buff *skb)
1317 {
1318         unsigned int csum;
1319
1320         csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1321         skb->csum = csum_block_add(skb->csum, csum, odd);
1322         return 0;  
1323 }
1324
1325 /* 
1326  *      Generic function to send a packet as reply to another packet.
1327  *      Used to send TCP resets so far. ICMP should use this function too.
1328  *
1329  *      Should run single threaded per socket because it uses the sock 
1330  *      structure to pass arguments.
1331  *
1332  *      LATER: switch from ip_build_xmit to ip_append_*
1333  */
1334 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1335                    unsigned int len)
1336 {
1337         struct inet_sock *inet = inet_sk(sk);
1338         struct {
1339                 struct ip_options       opt;
1340                 char                    data[40];
1341         } replyopts;
1342         struct ipcm_cookie ipc;
1343         u32 daddr;
1344         struct rtable *rt = (struct rtable*)skb->dst;
1345
1346         if (ip_options_echo(&replyopts.opt, skb))
1347                 return;
1348
1349         daddr = ipc.addr = rt->rt_src;
1350         ipc.opt = NULL;
1351
1352         if (replyopts.opt.optlen) {
1353                 ipc.opt = &replyopts.opt;
1354
1355                 if (ipc.opt->srr)
1356                         daddr = replyopts.opt.faddr;
1357         }
1358
1359         {
1360                 struct flowi fl = { .nl_u = { .ip4_u =
1361                                               { .daddr = daddr,
1362                                                 .saddr = rt->rt_spec_dst,
1363                                                 .tos = RT_TOS(skb->nh.iph->tos) } },
1364                                     /* Not quite clean, but right. */
1365                                     .uli_u = { .ports =
1366                                                { .sport = skb->h.th->dest,
1367                                                  .dport = skb->h.th->source } },
1368                                     .proto = sk->sk_protocol };
1369                 if (ip_route_output_key(&rt, &fl))
1370                         return;
1371         }
1372
1373         /* And let IP do all the hard work.
1374
1375            This chunk is not reenterable, hence spinlock.
1376            Note that it uses the fact, that this function is called
1377            with locally disabled BH and that sk cannot be already spinlocked.
1378          */
1379         bh_lock_sock(sk);
1380         inet->tos = skb->nh.iph->tos;
1381         sk->sk_priority = skb->priority;
1382         sk->sk_protocol = skb->nh.iph->protocol;
1383         ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1384                        &ipc, rt, MSG_DONTWAIT);
1385         if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1386                 if (arg->csumoffset >= 0)
1387                         *((u16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
1388                 skb->ip_summed = CHECKSUM_NONE;
1389                 ip_push_pending_frames(sk);
1390         }
1391
1392         bh_unlock_sock(sk);
1393
1394         ip_rt_put(rt);
1395 }
1396
1397 void __init ip_init(void)
1398 {
1399         ip_rt_init();
1400         inet_initpeers();
1401
1402 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1403         igmp_mc_proc_init();
1404 #endif
1405 }
1406
1407 EXPORT_SYMBOL(ip_generic_getfrag);
1408 EXPORT_SYMBOL(ip_queue_xmit);
1409 EXPORT_SYMBOL(ip_send_check);