[NETFILTER]: reduce netfilter sk_buff enlargement
[linux-2.6.git] / net / ipv4 / ip_output.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              The Internet Protocol (IP) output module.
7  *
8  * Version:     $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
9  *
10  * Authors:     Ross Biro
11  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *              Donald Becker, <becker@super.org>
13  *              Alan Cox, <Alan.Cox@linux.org>
14  *              Richard Underwood
15  *              Stefan Becker, <stefanb@yello.ping.de>
16  *              Jorge Cwik, <jorge@laser.satlink.net>
17  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18  *              Hirokazu Takahashi, <taka@valinux.co.jp>
19  *
20  *      See ip_input.c for original log
21  *
22  *      Fixes:
23  *              Alan Cox        :       Missing nonblock feature in ip_build_xmit.
24  *              Mike Kilburn    :       htons() missing in ip_build_xmit.
25  *              Bradford Johnson:       Fix faulty handling of some frames when 
26  *                                      no route is found.
27  *              Alexander Demenshin:    Missing sk/skb free in ip_queue_xmit
28  *                                      (in case if packet not accepted by
29  *                                      output firewall rules)
30  *              Mike McLagan    :       Routing by source
31  *              Alexey Kuznetsov:       use new route cache
32  *              Andi Kleen:             Fix broken PMTU recovery and remove
33  *                                      some redundant tests.
34  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
35  *              Andi Kleen      :       Replace ip_reply with ip_send_reply.
36  *              Andi Kleen      :       Split fast and slow ip_build_xmit path 
37  *                                      for decreased register pressure on x86 
38  *                                      and more readibility. 
39  *              Marc Boucher    :       When call_out_firewall returns FW_QUEUE,
40  *                                      silently drop skb instead of failing with -EPERM.
41  *              Detlev Wengorz  :       Copy protocol for fragments.
42  *              Hirokazu Takahashi:     HW checksumming for outgoing UDP
43  *                                      datagrams.
44  *              Hirokazu Takahashi:     sendfile() on UDP works now.
45  */
46
47 #include <asm/uaccess.h>
48 #include <asm/system.h>
49 #include <linux/module.h>
50 #include <linux/types.h>
51 #include <linux/kernel.h>
52 #include <linux/sched.h>
53 #include <linux/mm.h>
54 #include <linux/string.h>
55 #include <linux/errno.h>
56 #include <linux/config.h>
57
58 #include <linux/socket.h>
59 #include <linux/sockios.h>
60 #include <linux/in.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/proc_fs.h>
65 #include <linux/stat.h>
66 #include <linux/init.h>
67
68 #include <net/snmp.h>
69 #include <net/ip.h>
70 #include <net/protocol.h>
71 #include <net/route.h>
72 #include <net/tcp.h>
73 #include <net/udp.h>
74 #include <linux/skbuff.h>
75 #include <net/sock.h>
76 #include <net/arp.h>
77 #include <net/icmp.h>
78 #include <net/raw.h>
79 #include <net/checksum.h>
80 #include <net/inetpeer.h>
81 #include <net/checksum.h>
82 #include <linux/igmp.h>
83 #include <linux/netfilter_ipv4.h>
84 #include <linux/netfilter_bridge.h>
85 #include <linux/mroute.h>
86 #include <linux/netlink.h>
87
88 /*
89  *      Shall we try to damage output packets if routing dev changes?
90  */
91
92 int sysctl_ip_dynaddr;
93 int sysctl_ip_default_ttl = IPDEFTTL;
94
95 /* Generate a checksum for an outgoing IP datagram. */
96 __inline__ void ip_send_check(struct iphdr *iph)
97 {
98         iph->check = 0;
99         iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
100 }
101
102 /* dev_loopback_xmit for use with netfilter. */
103 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
104 {
105         newskb->mac.raw = newskb->data;
106         __skb_pull(newskb, newskb->nh.raw - newskb->data);
107         newskb->pkt_type = PACKET_LOOPBACK;
108         newskb->ip_summed = CHECKSUM_UNNECESSARY;
109         BUG_TRAP(newskb->dst);
110         netif_rx(newskb);
111         return 0;
112 }
113
114 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
115 {
116         int ttl = inet->uc_ttl;
117
118         if (ttl < 0)
119                 ttl = dst_metric(dst, RTAX_HOPLIMIT);
120         return ttl;
121 }
122
123 /* 
124  *              Add an ip header to a skbuff and send it out.
125  *
126  */
127 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
128                           u32 saddr, u32 daddr, struct ip_options *opt)
129 {
130         struct inet_sock *inet = inet_sk(sk);
131         struct rtable *rt = (struct rtable *)skb->dst;
132         struct iphdr *iph;
133
134         /* Build the IP header. */
135         if (opt)
136                 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen);
137         else
138                 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr));
139
140         iph->version  = 4;
141         iph->ihl      = 5;
142         iph->tos      = inet->tos;
143         if (ip_dont_fragment(sk, &rt->u.dst))
144                 iph->frag_off = htons(IP_DF);
145         else
146                 iph->frag_off = 0;
147         iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
148         iph->daddr    = rt->rt_dst;
149         iph->saddr    = rt->rt_src;
150         iph->protocol = sk->sk_protocol;
151         iph->tot_len  = htons(skb->len);
152         ip_select_ident(iph, &rt->u.dst, sk);
153         skb->nh.iph   = iph;
154
155         if (opt && opt->optlen) {
156                 iph->ihl += opt->optlen>>2;
157                 ip_options_build(skb, opt, daddr, rt, 0);
158         }
159         ip_send_check(iph);
160
161         skb->priority = sk->sk_priority;
162
163         /* Send it out. */
164         return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
165                        dst_output);
166 }
167
168 static inline int ip_finish_output2(struct sk_buff *skb)
169 {
170         struct dst_entry *dst = skb->dst;
171         struct hh_cache *hh = dst->hh;
172         struct net_device *dev = dst->dev;
173         int hh_len = LL_RESERVED_SPACE(dev);
174
175         /* Be paranoid, rather than too clever. */
176         if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
177                 struct sk_buff *skb2;
178
179                 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
180                 if (skb2 == NULL) {
181                         kfree_skb(skb);
182                         return -ENOMEM;
183                 }
184                 if (skb->sk)
185                         skb_set_owner_w(skb2, skb->sk);
186                 kfree_skb(skb);
187                 skb = skb2;
188         }
189
190         if (hh) {
191                 int hh_alen;
192
193                 read_lock_bh(&hh->hh_lock);
194                 hh_alen = HH_DATA_ALIGN(hh->hh_len);
195                 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
196                 read_unlock_bh(&hh->hh_lock);
197                 skb_push(skb, hh->hh_len);
198                 return hh->hh_output(skb);
199         } else if (dst->neighbour)
200                 return dst->neighbour->output(skb);
201
202         if (net_ratelimit())
203                 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
204         kfree_skb(skb);
205         return -EINVAL;
206 }
207
208 int ip_finish_output(struct sk_buff *skb)
209 {
210         struct net_device *dev = skb->dst->dev;
211
212         skb->dev = dev;
213         skb->protocol = htons(ETH_P_IP);
214
215         return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
216                        ip_finish_output2);
217 }
218
219 int ip_mc_output(struct sk_buff *skb)
220 {
221         struct sock *sk = skb->sk;
222         struct rtable *rt = (struct rtable*)skb->dst;
223         struct net_device *dev = rt->u.dst.dev;
224
225         /*
226          *      If the indicated interface is up and running, send the packet.
227          */
228         IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
229
230         skb->dev = dev;
231         skb->protocol = htons(ETH_P_IP);
232
233         /*
234          *      Multicasts are looped back for other local users
235          */
236
237         if (rt->rt_flags&RTCF_MULTICAST) {
238                 if ((!sk || inet_sk(sk)->mc_loop)
239 #ifdef CONFIG_IP_MROUTE
240                 /* Small optimization: do not loopback not local frames,
241                    which returned after forwarding; they will be  dropped
242                    by ip_mr_input in any case.
243                    Note, that local frames are looped back to be delivered
244                    to local recipients.
245
246                    This check is duplicated in ip_mr_input at the moment.
247                  */
248                     && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
249 #endif
250                 ) {
251                         struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
252                         if (newskb)
253                                 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
254                                         newskb->dev, 
255                                         ip_dev_loopback_xmit);
256                 }
257
258                 /* Multicasts with ttl 0 must not go beyond the host */
259
260                 if (skb->nh.iph->ttl == 0) {
261                         kfree_skb(skb);
262                         return 0;
263                 }
264         }
265
266         if (rt->rt_flags&RTCF_BROADCAST) {
267                 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
268                 if (newskb)
269                         NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
270                                 newskb->dev, ip_dev_loopback_xmit);
271         }
272
273         if (skb->len > dst_mtu(&rt->u.dst))
274                 return ip_fragment(skb, ip_finish_output);
275         else
276                 return ip_finish_output(skb);
277 }
278
279 int ip_output(struct sk_buff *skb)
280 {
281         IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
282
283         if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->tso_size)
284                 return ip_fragment(skb, ip_finish_output);
285         else
286                 return ip_finish_output(skb);
287 }
288
289 int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
290 {
291         struct sock *sk = skb->sk;
292         struct inet_sock *inet = inet_sk(sk);
293         struct ip_options *opt = inet->opt;
294         struct rtable *rt;
295         struct iphdr *iph;
296
297         /* Skip all of this if the packet is already routed,
298          * f.e. by something like SCTP.
299          */
300         rt = (struct rtable *) skb->dst;
301         if (rt != NULL)
302                 goto packet_routed;
303
304         /* Make sure we can route this packet. */
305         rt = (struct rtable *)__sk_dst_check(sk, 0);
306         if (rt == NULL) {
307                 u32 daddr;
308
309                 /* Use correct destination address if we have options. */
310                 daddr = inet->daddr;
311                 if(opt && opt->srr)
312                         daddr = opt->faddr;
313
314                 {
315                         struct flowi fl = { .oif = sk->sk_bound_dev_if,
316                                             .nl_u = { .ip4_u =
317                                                       { .daddr = daddr,
318                                                         .saddr = inet->saddr,
319                                                         .tos = RT_CONN_FLAGS(sk) } },
320                                             .proto = sk->sk_protocol,
321                                             .uli_u = { .ports =
322                                                        { .sport = inet->sport,
323                                                          .dport = inet->dport } } };
324
325                         /* If this fails, retransmit mechanism of transport layer will
326                          * keep trying until route appears or the connection times
327                          * itself out.
328                          */
329                         if (ip_route_output_flow(&rt, &fl, sk, 0))
330                                 goto no_route;
331                 }
332                 __sk_dst_set(sk, &rt->u.dst);
333                 tcp_v4_setup_caps(sk, &rt->u.dst);
334         }
335         skb->dst = dst_clone(&rt->u.dst);
336
337 packet_routed:
338         if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
339                 goto no_route;
340
341         /* OK, we know where to send it, allocate and build IP header. */
342         iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
343         *((__u16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
344         iph->tot_len = htons(skb->len);
345         if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
346                 iph->frag_off = htons(IP_DF);
347         else
348                 iph->frag_off = 0;
349         iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
350         iph->protocol = sk->sk_protocol;
351         iph->saddr    = rt->rt_src;
352         iph->daddr    = rt->rt_dst;
353         skb->nh.iph   = iph;
354         /* Transport layer set skb->h.foo itself. */
355
356         if (opt && opt->optlen) {
357                 iph->ihl += opt->optlen >> 2;
358                 ip_options_build(skb, opt, inet->daddr, rt, 0);
359         }
360
361         ip_select_ident_more(iph, &rt->u.dst, sk, skb_shinfo(skb)->tso_segs);
362
363         /* Add an IP checksum. */
364         ip_send_check(iph);
365
366         skb->priority = sk->sk_priority;
367
368         return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
369                        dst_output);
370
371 no_route:
372         IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
373         kfree_skb(skb);
374         return -EHOSTUNREACH;
375 }
376
377
378 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
379 {
380         to->pkt_type = from->pkt_type;
381         to->priority = from->priority;
382         to->protocol = from->protocol;
383         dst_release(to->dst);
384         to->dst = dst_clone(from->dst);
385         to->dev = from->dev;
386
387         /* Copy the flags to each fragment. */
388         IPCB(to)->flags = IPCB(from)->flags;
389
390 #ifdef CONFIG_NET_SCHED
391         to->tc_index = from->tc_index;
392 #endif
393 #ifdef CONFIG_NETFILTER
394         to->nfmark = from->nfmark;
395         /* Connection association is same as pre-frag packet */
396         nf_conntrack_put(to->nfct);
397         to->nfct = from->nfct;
398         nf_conntrack_get(to->nfct);
399         to->nfctinfo = from->nfctinfo;
400 #ifdef CONFIG_BRIDGE_NETFILTER
401         nf_bridge_put(to->nf_bridge);
402         to->nf_bridge = from->nf_bridge;
403         nf_bridge_get(to->nf_bridge);
404 #endif
405 #endif
406 }
407
408 /*
409  *      This IP datagram is too large to be sent in one piece.  Break it up into
410  *      smaller pieces (each of size equal to IP header plus
411  *      a block of the data of the original IP data part) that will yet fit in a
412  *      single device frame, and queue such a frame for sending.
413  */
414
415 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
416 {
417         struct iphdr *iph;
418         int raw = 0;
419         int ptr;
420         struct net_device *dev;
421         struct sk_buff *skb2;
422         unsigned int mtu, hlen, left, len, ll_rs;
423         int offset;
424         int not_last_frag;
425         struct rtable *rt = (struct rtable*)skb->dst;
426         int err = 0;
427
428         dev = rt->u.dst.dev;
429
430         /*
431          *      Point into the IP datagram header.
432          */
433
434         iph = skb->nh.iph;
435
436         if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
437                 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
438                           htonl(dst_mtu(&rt->u.dst)));
439                 kfree_skb(skb);
440                 return -EMSGSIZE;
441         }
442
443         /*
444          *      Setup starting values.
445          */
446
447         hlen = iph->ihl * 4;
448         mtu = dst_mtu(&rt->u.dst) - hlen;       /* Size of data space */
449
450         /* When frag_list is given, use it. First, check its validity:
451          * some transformers could create wrong frag_list or break existing
452          * one, it is not prohibited. In this case fall back to copying.
453          *
454          * LATER: this step can be merged to real generation of fragments,
455          * we can switch to copy when see the first bad fragment.
456          */
457         if (skb_shinfo(skb)->frag_list) {
458                 struct sk_buff *frag;
459                 int first_len = skb_pagelen(skb);
460
461                 if (first_len - hlen > mtu ||
462                     ((first_len - hlen) & 7) ||
463                     (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
464                     skb_cloned(skb))
465                         goto slow_path;
466
467                 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
468                         /* Correct geometry. */
469                         if (frag->len > mtu ||
470                             ((frag->len & 7) && frag->next) ||
471                             skb_headroom(frag) < hlen)
472                             goto slow_path;
473
474                         /* Partially cloned skb? */
475                         if (skb_shared(frag))
476                                 goto slow_path;
477
478                         BUG_ON(frag->sk);
479                         if (skb->sk) {
480                                 sock_hold(skb->sk);
481                                 frag->sk = skb->sk;
482                                 frag->destructor = sock_wfree;
483                                 skb->truesize -= frag->truesize;
484                         }
485                 }
486
487                 /* Everything is OK. Generate! */
488
489                 err = 0;
490                 offset = 0;
491                 frag = skb_shinfo(skb)->frag_list;
492                 skb_shinfo(skb)->frag_list = NULL;
493                 skb->data_len = first_len - skb_headlen(skb);
494                 skb->len = first_len;
495                 iph->tot_len = htons(first_len);
496                 iph->frag_off = htons(IP_MF);
497                 ip_send_check(iph);
498
499                 for (;;) {
500                         /* Prepare header of the next frame,
501                          * before previous one went down. */
502                         if (frag) {
503                                 frag->ip_summed = CHECKSUM_NONE;
504                                 frag->h.raw = frag->data;
505                                 frag->nh.raw = __skb_push(frag, hlen);
506                                 memcpy(frag->nh.raw, iph, hlen);
507                                 iph = frag->nh.iph;
508                                 iph->tot_len = htons(frag->len);
509                                 ip_copy_metadata(frag, skb);
510                                 if (offset == 0)
511                                         ip_options_fragment(frag);
512                                 offset += skb->len - hlen;
513                                 iph->frag_off = htons(offset>>3);
514                                 if (frag->next != NULL)
515                                         iph->frag_off |= htons(IP_MF);
516                                 /* Ready, complete checksum */
517                                 ip_send_check(iph);
518                         }
519
520                         err = output(skb);
521
522                         if (err || !frag)
523                                 break;
524
525                         skb = frag;
526                         frag = skb->next;
527                         skb->next = NULL;
528                 }
529
530                 if (err == 0) {
531                         IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
532                         return 0;
533                 }
534
535                 while (frag) {
536                         skb = frag->next;
537                         kfree_skb(frag);
538                         frag = skb;
539                 }
540                 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
541                 return err;
542         }
543
544 slow_path:
545         left = skb->len - hlen;         /* Space per frame */
546         ptr = raw + hlen;               /* Where to start from */
547
548 #ifdef CONFIG_BRIDGE_NETFILTER
549         /* for bridged IP traffic encapsulated inside f.e. a vlan header,
550          * we need to make room for the encapsulating header */
551         ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, nf_bridge_pad(skb));
552         mtu -= nf_bridge_pad(skb);
553 #else
554         ll_rs = LL_RESERVED_SPACE(rt->u.dst.dev);
555 #endif
556         /*
557          *      Fragment the datagram.
558          */
559
560         offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
561         not_last_frag = iph->frag_off & htons(IP_MF);
562
563         /*
564          *      Keep copying data until we run out.
565          */
566
567         while(left > 0) {
568                 len = left;
569                 /* IF: it doesn't fit, use 'mtu' - the data space left */
570                 if (len > mtu)
571                         len = mtu;
572                 /* IF: we are not sending upto and including the packet end
573                    then align the next start on an eight byte boundary */
574                 if (len < left) {
575                         len &= ~7;
576                 }
577                 /*
578                  *      Allocate buffer.
579                  */
580
581                 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
582                         NETDEBUG(printk(KERN_INFO "IP: frag: no memory for new fragment!\n"));
583                         err = -ENOMEM;
584                         goto fail;
585                 }
586
587                 /*
588                  *      Set up data on packet
589                  */
590
591                 ip_copy_metadata(skb2, skb);
592                 skb_reserve(skb2, ll_rs);
593                 skb_put(skb2, len + hlen);
594                 skb2->nh.raw = skb2->data;
595                 skb2->h.raw = skb2->data + hlen;
596
597                 /*
598                  *      Charge the memory for the fragment to any owner
599                  *      it might possess
600                  */
601
602                 if (skb->sk)
603                         skb_set_owner_w(skb2, skb->sk);
604
605                 /*
606                  *      Copy the packet header into the new buffer.
607                  */
608
609                 memcpy(skb2->nh.raw, skb->data, hlen);
610
611                 /*
612                  *      Copy a block of the IP datagram.
613                  */
614                 if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
615                         BUG();
616                 left -= len;
617
618                 /*
619                  *      Fill in the new header fields.
620                  */
621                 iph = skb2->nh.iph;
622                 iph->frag_off = htons((offset >> 3));
623
624                 /* ANK: dirty, but effective trick. Upgrade options only if
625                  * the segment to be fragmented was THE FIRST (otherwise,
626                  * options are already fixed) and make it ONCE
627                  * on the initial skb, so that all the following fragments
628                  * will inherit fixed options.
629                  */
630                 if (offset == 0)
631                         ip_options_fragment(skb);
632
633                 /*
634                  *      Added AC : If we are fragmenting a fragment that's not the
635                  *                 last fragment then keep MF on each bit
636                  */
637                 if (left > 0 || not_last_frag)
638                         iph->frag_off |= htons(IP_MF);
639                 ptr += len;
640                 offset += len;
641
642                 /*
643                  *      Put this fragment into the sending queue.
644                  */
645
646                 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
647
648                 iph->tot_len = htons(len + hlen);
649
650                 ip_send_check(iph);
651
652                 err = output(skb2);
653                 if (err)
654                         goto fail;
655         }
656         kfree_skb(skb);
657         IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
658         return err;
659
660 fail:
661         kfree_skb(skb); 
662         IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
663         return err;
664 }
665
666 int
667 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
668 {
669         struct iovec *iov = from;
670
671         if (skb->ip_summed == CHECKSUM_HW) {
672                 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
673                         return -EFAULT;
674         } else {
675                 unsigned int csum = 0;
676                 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
677                         return -EFAULT;
678                 skb->csum = csum_block_add(skb->csum, csum, odd);
679         }
680         return 0;
681 }
682
683 static inline unsigned int
684 csum_page(struct page *page, int offset, int copy)
685 {
686         char *kaddr;
687         unsigned int csum;
688         kaddr = kmap(page);
689         csum = csum_partial(kaddr + offset, copy, 0);
690         kunmap(page);
691         return csum;
692 }
693
694 /*
695  *      ip_append_data() and ip_append_page() can make one large IP datagram
696  *      from many pieces of data. Each pieces will be holded on the socket
697  *      until ip_push_pending_frames() is called. Each piece can be a page
698  *      or non-page data.
699  *      
700  *      Not only UDP, other transport protocols - e.g. raw sockets - can use
701  *      this interface potentially.
702  *
703  *      LATER: length must be adjusted by pad at tail, when it is required.
704  */
705 int ip_append_data(struct sock *sk,
706                    int getfrag(void *from, char *to, int offset, int len,
707                                int odd, struct sk_buff *skb),
708                    void *from, int length, int transhdrlen,
709                    struct ipcm_cookie *ipc, struct rtable *rt,
710                    unsigned int flags)
711 {
712         struct inet_sock *inet = inet_sk(sk);
713         struct sk_buff *skb;
714
715         struct ip_options *opt = NULL;
716         int hh_len;
717         int exthdrlen;
718         int mtu;
719         int copy;
720         int err;
721         int offset = 0;
722         unsigned int maxfraglen, fragheaderlen;
723         int csummode = CHECKSUM_NONE;
724
725         if (flags&MSG_PROBE)
726                 return 0;
727
728         if (skb_queue_empty(&sk->sk_write_queue)) {
729                 /*
730                  * setup for corking.
731                  */
732                 opt = ipc->opt;
733                 if (opt) {
734                         if (inet->cork.opt == NULL) {
735                                 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
736                                 if (unlikely(inet->cork.opt == NULL))
737                                         return -ENOBUFS;
738                         }
739                         memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
740                         inet->cork.flags |= IPCORK_OPT;
741                         inet->cork.addr = ipc->addr;
742                 }
743                 dst_hold(&rt->u.dst);
744                 inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
745                 inet->cork.rt = rt;
746                 inet->cork.length = 0;
747                 sk->sk_sndmsg_page = NULL;
748                 sk->sk_sndmsg_off = 0;
749                 if ((exthdrlen = rt->u.dst.header_len) != 0) {
750                         length += exthdrlen;
751                         transhdrlen += exthdrlen;
752                 }
753         } else {
754                 rt = inet->cork.rt;
755                 if (inet->cork.flags & IPCORK_OPT)
756                         opt = inet->cork.opt;
757
758                 transhdrlen = 0;
759                 exthdrlen = 0;
760                 mtu = inet->cork.fragsize;
761         }
762         hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
763
764         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
765         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
766
767         if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
768                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen);
769                 return -EMSGSIZE;
770         }
771
772         /*
773          * transhdrlen > 0 means that this is the first fragment and we wish
774          * it won't be fragmented in the future.
775          */
776         if (transhdrlen &&
777             length + fragheaderlen <= mtu &&
778             rt->u.dst.dev->features&(NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) &&
779             !exthdrlen)
780                 csummode = CHECKSUM_HW;
781
782         inet->cork.length += length;
783
784         /* So, what's going on in the loop below?
785          *
786          * We use calculated fragment length to generate chained skb,
787          * each of segments is IP fragment ready for sending to network after
788          * adding appropriate IP header.
789          */
790
791         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
792                 goto alloc_new_skb;
793
794         while (length > 0) {
795                 /* Check if the remaining data fits into current packet. */
796                 copy = mtu - skb->len;
797                 if (copy < length)
798                         copy = maxfraglen - skb->len;
799                 if (copy <= 0) {
800                         char *data;
801                         unsigned int datalen;
802                         unsigned int fraglen;
803                         unsigned int fraggap;
804                         unsigned int alloclen;
805                         struct sk_buff *skb_prev;
806 alloc_new_skb:
807                         skb_prev = skb;
808                         if (skb_prev)
809                                 fraggap = skb_prev->len - maxfraglen;
810                         else
811                                 fraggap = 0;
812
813                         /*
814                          * If remaining data exceeds the mtu,
815                          * we know we need more fragment(s).
816                          */
817                         datalen = length + fraggap;
818                         if (datalen > mtu - fragheaderlen)
819                                 datalen = maxfraglen - fragheaderlen;
820                         fraglen = datalen + fragheaderlen;
821
822                         if ((flags & MSG_MORE) && 
823                             !(rt->u.dst.dev->features&NETIF_F_SG))
824                                 alloclen = mtu;
825                         else
826                                 alloclen = datalen + fragheaderlen;
827
828                         /* The last fragment gets additional space at tail.
829                          * Note, with MSG_MORE we overallocate on fragments,
830                          * because we have no idea what fragment will be
831                          * the last.
832                          */
833                         if (datalen == length)
834                                 alloclen += rt->u.dst.trailer_len;
835
836                         if (transhdrlen) {
837                                 skb = sock_alloc_send_skb(sk, 
838                                                 alloclen + hh_len + 15,
839                                                 (flags & MSG_DONTWAIT), &err);
840                         } else {
841                                 skb = NULL;
842                                 if (atomic_read(&sk->sk_wmem_alloc) <=
843                                     2 * sk->sk_sndbuf)
844                                         skb = sock_wmalloc(sk, 
845                                                            alloclen + hh_len + 15, 1,
846                                                            sk->sk_allocation);
847                                 if (unlikely(skb == NULL))
848                                         err = -ENOBUFS;
849                         }
850                         if (skb == NULL)
851                                 goto error;
852
853                         /*
854                          *      Fill in the control structures
855                          */
856                         skb->ip_summed = csummode;
857                         skb->csum = 0;
858                         skb_reserve(skb, hh_len);
859
860                         /*
861                          *      Find where to start putting bytes.
862                          */
863                         data = skb_put(skb, fraglen);
864                         skb->nh.raw = data + exthdrlen;
865                         data += fragheaderlen;
866                         skb->h.raw = data + exthdrlen;
867
868                         if (fraggap) {
869                                 skb->csum = skb_copy_and_csum_bits(
870                                         skb_prev, maxfraglen,
871                                         data + transhdrlen, fraggap, 0);
872                                 skb_prev->csum = csum_sub(skb_prev->csum,
873                                                           skb->csum);
874                                 data += fraggap;
875                                 skb_trim(skb_prev, maxfraglen);
876                         }
877
878                         copy = datalen - transhdrlen - fraggap;
879                         if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
880                                 err = -EFAULT;
881                                 kfree_skb(skb);
882                                 goto error;
883                         }
884
885                         offset += copy;
886                         length -= datalen - fraggap;
887                         transhdrlen = 0;
888                         exthdrlen = 0;
889                         csummode = CHECKSUM_NONE;
890
891                         /*
892                          * Put the packet on the pending queue.
893                          */
894                         __skb_queue_tail(&sk->sk_write_queue, skb);
895                         continue;
896                 }
897
898                 if (copy > length)
899                         copy = length;
900
901                 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
902                         unsigned int off;
903
904                         off = skb->len;
905                         if (getfrag(from, skb_put(skb, copy), 
906                                         offset, copy, off, skb) < 0) {
907                                 __skb_trim(skb, off);
908                                 err = -EFAULT;
909                                 goto error;
910                         }
911                 } else {
912                         int i = skb_shinfo(skb)->nr_frags;
913                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
914                         struct page *page = sk->sk_sndmsg_page;
915                         int off = sk->sk_sndmsg_off;
916                         unsigned int left;
917
918                         if (page && (left = PAGE_SIZE - off) > 0) {
919                                 if (copy >= left)
920                                         copy = left;
921                                 if (page != frag->page) {
922                                         if (i == MAX_SKB_FRAGS) {
923                                                 err = -EMSGSIZE;
924                                                 goto error;
925                                         }
926                                         get_page(page);
927                                         skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
928                                         frag = &skb_shinfo(skb)->frags[i];
929                                 }
930                         } else if (i < MAX_SKB_FRAGS) {
931                                 if (copy > PAGE_SIZE)
932                                         copy = PAGE_SIZE;
933                                 page = alloc_pages(sk->sk_allocation, 0);
934                                 if (page == NULL)  {
935                                         err = -ENOMEM;
936                                         goto error;
937                                 }
938                                 sk->sk_sndmsg_page = page;
939                                 sk->sk_sndmsg_off = 0;
940
941                                 skb_fill_page_desc(skb, i, page, 0, 0);
942                                 frag = &skb_shinfo(skb)->frags[i];
943                                 skb->truesize += PAGE_SIZE;
944                                 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
945                         } else {
946                                 err = -EMSGSIZE;
947                                 goto error;
948                         }
949                         if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
950                                 err = -EFAULT;
951                                 goto error;
952                         }
953                         sk->sk_sndmsg_off += copy;
954                         frag->size += copy;
955                         skb->len += copy;
956                         skb->data_len += copy;
957                 }
958                 offset += copy;
959                 length -= copy;
960         }
961
962         return 0;
963
964 error:
965         inet->cork.length -= length;
966         IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
967         return err; 
968 }
969
970 ssize_t ip_append_page(struct sock *sk, struct page *page,
971                        int offset, size_t size, int flags)
972 {
973         struct inet_sock *inet = inet_sk(sk);
974         struct sk_buff *skb;
975         struct rtable *rt;
976         struct ip_options *opt = NULL;
977         int hh_len;
978         int mtu;
979         int len;
980         int err;
981         unsigned int maxfraglen, fragheaderlen, fraggap;
982
983         if (inet->hdrincl)
984                 return -EPERM;
985
986         if (flags&MSG_PROBE)
987                 return 0;
988
989         if (skb_queue_empty(&sk->sk_write_queue))
990                 return -EINVAL;
991
992         rt = inet->cork.rt;
993         if (inet->cork.flags & IPCORK_OPT)
994                 opt = inet->cork.opt;
995
996         if (!(rt->u.dst.dev->features&NETIF_F_SG))
997                 return -EOPNOTSUPP;
998
999         hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1000         mtu = inet->cork.fragsize;
1001
1002         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1003         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1004
1005         if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1006                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
1007                 return -EMSGSIZE;
1008         }
1009
1010         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1011                 return -EINVAL;
1012
1013         inet->cork.length += size;
1014
1015         while (size > 0) {
1016                 int i;
1017
1018                 /* Check if the remaining data fits into current packet. */
1019                 len = mtu - skb->len;
1020                 if (len < size)
1021                         len = maxfraglen - skb->len;
1022                 if (len <= 0) {
1023                         struct sk_buff *skb_prev;
1024                         char *data;
1025                         struct iphdr *iph;
1026                         int alloclen;
1027
1028                         skb_prev = skb;
1029                         if (skb_prev)
1030                                 fraggap = skb_prev->len - maxfraglen;
1031                         else
1032                                 fraggap = 0;
1033
1034                         alloclen = fragheaderlen + hh_len + fraggap + 15;
1035                         skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1036                         if (unlikely(!skb)) {
1037                                 err = -ENOBUFS;
1038                                 goto error;
1039                         }
1040
1041                         /*
1042                          *      Fill in the control structures
1043                          */
1044                         skb->ip_summed = CHECKSUM_NONE;
1045                         skb->csum = 0;
1046                         skb_reserve(skb, hh_len);
1047
1048                         /*
1049                          *      Find where to start putting bytes.
1050                          */
1051                         data = skb_put(skb, fragheaderlen + fraggap);
1052                         skb->nh.iph = iph = (struct iphdr *)data;
1053                         data += fragheaderlen;
1054                         skb->h.raw = data;
1055
1056                         if (fraggap) {
1057                                 skb->csum = skb_copy_and_csum_bits(
1058                                         skb_prev, maxfraglen,
1059                                         data, fraggap, 0);
1060                                 skb_prev->csum = csum_sub(skb_prev->csum,
1061                                                           skb->csum);
1062                                 skb_trim(skb_prev, maxfraglen);
1063                         }
1064
1065                         /*
1066                          * Put the packet on the pending queue.
1067                          */
1068                         __skb_queue_tail(&sk->sk_write_queue, skb);
1069                         continue;
1070                 }
1071
1072                 i = skb_shinfo(skb)->nr_frags;
1073                 if (len > size)
1074                         len = size;
1075                 if (skb_can_coalesce(skb, i, page, offset)) {
1076                         skb_shinfo(skb)->frags[i-1].size += len;
1077                 } else if (i < MAX_SKB_FRAGS) {
1078                         get_page(page);
1079                         skb_fill_page_desc(skb, i, page, offset, len);
1080                 } else {
1081                         err = -EMSGSIZE;
1082                         goto error;
1083                 }
1084
1085                 if (skb->ip_summed == CHECKSUM_NONE) {
1086                         unsigned int csum;
1087                         csum = csum_page(page, offset, len);
1088                         skb->csum = csum_block_add(skb->csum, csum, skb->len);
1089                 }
1090
1091                 skb->len += len;
1092                 skb->data_len += len;
1093                 offset += len;
1094                 size -= len;
1095         }
1096         return 0;
1097
1098 error:
1099         inet->cork.length -= size;
1100         IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1101         return err;
1102 }
1103
1104 /*
1105  *      Combined all pending IP fragments on the socket as one IP datagram
1106  *      and push them out.
1107  */
1108 int ip_push_pending_frames(struct sock *sk)
1109 {
1110         struct sk_buff *skb, *tmp_skb;
1111         struct sk_buff **tail_skb;
1112         struct inet_sock *inet = inet_sk(sk);
1113         struct ip_options *opt = NULL;
1114         struct rtable *rt = inet->cork.rt;
1115         struct iphdr *iph;
1116         int df = 0;
1117         __u8 ttl;
1118         int err = 0;
1119
1120         if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1121                 goto out;
1122         tail_skb = &(skb_shinfo(skb)->frag_list);
1123
1124         /* move skb->data to ip header from ext header */
1125         if (skb->data < skb->nh.raw)
1126                 __skb_pull(skb, skb->nh.raw - skb->data);
1127         while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1128                 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1129                 *tail_skb = tmp_skb;
1130                 tail_skb = &(tmp_skb->next);
1131                 skb->len += tmp_skb->len;
1132                 skb->data_len += tmp_skb->len;
1133                 skb->truesize += tmp_skb->truesize;
1134                 __sock_put(tmp_skb->sk);
1135                 tmp_skb->destructor = NULL;
1136                 tmp_skb->sk = NULL;
1137         }
1138
1139         /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1140          * to fragment the frame generated here. No matter, what transforms
1141          * how transforms change size of the packet, it will come out.
1142          */
1143         if (inet->pmtudisc != IP_PMTUDISC_DO)
1144                 skb->local_df = 1;
1145
1146         /* DF bit is set when we want to see DF on outgoing frames.
1147          * If local_df is set too, we still allow to fragment this frame
1148          * locally. */
1149         if (inet->pmtudisc == IP_PMTUDISC_DO ||
1150             (skb->len <= dst_mtu(&rt->u.dst) &&
1151              ip_dont_fragment(sk, &rt->u.dst)))
1152                 df = htons(IP_DF);
1153
1154         if (inet->cork.flags & IPCORK_OPT)
1155                 opt = inet->cork.opt;
1156
1157         if (rt->rt_type == RTN_MULTICAST)
1158                 ttl = inet->mc_ttl;
1159         else
1160                 ttl = ip_select_ttl(inet, &rt->u.dst);
1161
1162         iph = (struct iphdr *)skb->data;
1163         iph->version = 4;
1164         iph->ihl = 5;
1165         if (opt) {
1166                 iph->ihl += opt->optlen>>2;
1167                 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1168         }
1169         iph->tos = inet->tos;
1170         iph->tot_len = htons(skb->len);
1171         iph->frag_off = df;
1172         if (!df) {
1173                 __ip_select_ident(iph, &rt->u.dst, 0);
1174         } else {
1175                 iph->id = htons(inet->id++);
1176         }
1177         iph->ttl = ttl;
1178         iph->protocol = sk->sk_protocol;
1179         iph->saddr = rt->rt_src;
1180         iph->daddr = rt->rt_dst;
1181         ip_send_check(iph);
1182
1183         skb->priority = sk->sk_priority;
1184         skb->dst = dst_clone(&rt->u.dst);
1185
1186         /* Netfilter gets whole the not fragmented skb. */
1187         err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, 
1188                       skb->dst->dev, dst_output);
1189         if (err) {
1190                 if (err > 0)
1191                         err = inet->recverr ? net_xmit_errno(err) : 0;
1192                 if (err)
1193                         goto error;
1194         }
1195
1196 out:
1197         inet->cork.flags &= ~IPCORK_OPT;
1198         if (inet->cork.opt) {
1199                 kfree(inet->cork.opt);
1200                 inet->cork.opt = NULL;
1201         }
1202         if (inet->cork.rt) {
1203                 ip_rt_put(inet->cork.rt);
1204                 inet->cork.rt = NULL;
1205         }
1206         return err;
1207
1208 error:
1209         IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1210         goto out;
1211 }
1212
1213 /*
1214  *      Throw away all pending data on the socket.
1215  */
1216 void ip_flush_pending_frames(struct sock *sk)
1217 {
1218         struct inet_sock *inet = inet_sk(sk);
1219         struct sk_buff *skb;
1220
1221         while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1222                 kfree_skb(skb);
1223
1224         inet->cork.flags &= ~IPCORK_OPT;
1225         if (inet->cork.opt) {
1226                 kfree(inet->cork.opt);
1227                 inet->cork.opt = NULL;
1228         }
1229         if (inet->cork.rt) {
1230                 ip_rt_put(inet->cork.rt);
1231                 inet->cork.rt = NULL;
1232         }
1233 }
1234
1235
1236 /*
1237  *      Fetch data from kernel space and fill in checksum if needed.
1238  */
1239 static int ip_reply_glue_bits(void *dptr, char *to, int offset, 
1240                               int len, int odd, struct sk_buff *skb)
1241 {
1242         unsigned int csum;
1243
1244         csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1245         skb->csum = csum_block_add(skb->csum, csum, odd);
1246         return 0;  
1247 }
1248
1249 /* 
1250  *      Generic function to send a packet as reply to another packet.
1251  *      Used to send TCP resets so far. ICMP should use this function too.
1252  *
1253  *      Should run single threaded per socket because it uses the sock 
1254  *      structure to pass arguments.
1255  *
1256  *      LATER: switch from ip_build_xmit to ip_append_*
1257  */
1258 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1259                    unsigned int len)
1260 {
1261         struct inet_sock *inet = inet_sk(sk);
1262         struct {
1263                 struct ip_options       opt;
1264                 char                    data[40];
1265         } replyopts;
1266         struct ipcm_cookie ipc;
1267         u32 daddr;
1268         struct rtable *rt = (struct rtable*)skb->dst;
1269
1270         if (ip_options_echo(&replyopts.opt, skb))
1271                 return;
1272
1273         daddr = ipc.addr = rt->rt_src;
1274         ipc.opt = NULL;
1275
1276         if (replyopts.opt.optlen) {
1277                 ipc.opt = &replyopts.opt;
1278
1279                 if (ipc.opt->srr)
1280                         daddr = replyopts.opt.faddr;
1281         }
1282
1283         {
1284                 struct flowi fl = { .nl_u = { .ip4_u =
1285                                               { .daddr = daddr,
1286                                                 .saddr = rt->rt_spec_dst,
1287                                                 .tos = RT_TOS(skb->nh.iph->tos) } },
1288                                     /* Not quite clean, but right. */
1289                                     .uli_u = { .ports =
1290                                                { .sport = skb->h.th->dest,
1291                                                  .dport = skb->h.th->source } },
1292                                     .proto = sk->sk_protocol };
1293                 if (ip_route_output_key(&rt, &fl))
1294                         return;
1295         }
1296
1297         /* And let IP do all the hard work.
1298
1299            This chunk is not reenterable, hence spinlock.
1300            Note that it uses the fact, that this function is called
1301            with locally disabled BH and that sk cannot be already spinlocked.
1302          */
1303         bh_lock_sock(sk);
1304         inet->tos = skb->nh.iph->tos;
1305         sk->sk_priority = skb->priority;
1306         sk->sk_protocol = skb->nh.iph->protocol;
1307         ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1308                        &ipc, rt, MSG_DONTWAIT);
1309         if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1310                 if (arg->csumoffset >= 0)
1311                         *((u16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
1312                 skb->ip_summed = CHECKSUM_NONE;
1313                 ip_push_pending_frames(sk);
1314         }
1315
1316         bh_unlock_sock(sk);
1317
1318         ip_rt_put(rt);
1319 }
1320
1321 void __init ip_init(void)
1322 {
1323         ip_rt_init();
1324         inet_initpeers();
1325
1326 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1327         igmp_mc_proc_init();
1328 #endif
1329 }
1330
1331 EXPORT_SYMBOL(ip_finish_output);
1332 EXPORT_SYMBOL(ip_fragment);
1333 EXPORT_SYMBOL(ip_generic_getfrag);
1334 EXPORT_SYMBOL(ip_queue_xmit);
1335 EXPORT_SYMBOL(ip_send_check);
1336
1337 #ifdef CONFIG_SYSCTL
1338 EXPORT_SYMBOL(sysctl_ip_default_ttl);
1339 #endif