[IPV6] IPSEC: Support sending with Mobile IPv6 extension headers.
[linux-2.6.git] / net / ipv6 / ip6_output.c
1 /*
2  *      IPv6 output functions
3  *      Linux INET6 implementation 
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>     
7  *
8  *      $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $
9  *
10  *      Based on linux/net/ipv4/ip_output.c
11  *
12  *      This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  *
17  *      Changes:
18  *      A.N.Kuznetsov   :       airthmetics in fragmentation.
19  *                              extension headers are implemented.
20  *                              route changes now work.
21  *                              ip6_forward does not confuse sniffers.
22  *                              etc.
23  *
24  *      H. von Brand    :       Added missing #include <linux/string.h>
25  *      Imran Patel     :       frag id should be in NBO
26  *      Kazunori MIYAZAWA @USAGI
27  *                      :       add ip6_append_data and related functions
28  *                              for datagram xmit
29  */
30
31 #include <linux/errno.h>
32 #include <linux/types.h>
33 #include <linux/string.h>
34 #include <linux/socket.h>
35 #include <linux/net.h>
36 #include <linux/netdevice.h>
37 #include <linux/if_arp.h>
38 #include <linux/in6.h>
39 #include <linux/tcp.h>
40 #include <linux/route.h>
41 #include <linux/module.h>
42
43 #include <linux/netfilter.h>
44 #include <linux/netfilter_ipv6.h>
45
46 #include <net/sock.h>
47 #include <net/snmp.h>
48
49 #include <net/ipv6.h>
50 #include <net/ndisc.h>
51 #include <net/protocol.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/rawv6.h>
55 #include <net/icmp.h>
56 #include <net/xfrm.h>
57 #include <net/checksum.h>
58
59 static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
60
61 static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr)
62 {
63         static u32 ipv6_fragmentation_id = 1;
64         static DEFINE_SPINLOCK(ip6_id_lock);
65
66         spin_lock_bh(&ip6_id_lock);
67         fhdr->identification = htonl(ipv6_fragmentation_id);
68         if (++ipv6_fragmentation_id == 0)
69                 ipv6_fragmentation_id = 1;
70         spin_unlock_bh(&ip6_id_lock);
71 }
72
73 static inline int ip6_output_finish(struct sk_buff *skb)
74 {
75
76         struct dst_entry *dst = skb->dst;
77         struct hh_cache *hh = dst->hh;
78
79         if (hh) {
80                 int hh_alen;
81
82                 read_lock_bh(&hh->hh_lock);
83                 hh_alen = HH_DATA_ALIGN(hh->hh_len);
84                 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
85                 read_unlock_bh(&hh->hh_lock);
86                 skb_push(skb, hh->hh_len);
87                 return hh->hh_output(skb);
88         } else if (dst->neighbour)
89                 return dst->neighbour->output(skb);
90
91         IP6_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
92         kfree_skb(skb);
93         return -EINVAL;
94
95 }
96
97 /* dev_loopback_xmit for use with netfilter. */
98 static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
99 {
100         newskb->mac.raw = newskb->data;
101         __skb_pull(newskb, newskb->nh.raw - newskb->data);
102         newskb->pkt_type = PACKET_LOOPBACK;
103         newskb->ip_summed = CHECKSUM_UNNECESSARY;
104         BUG_TRAP(newskb->dst);
105
106         netif_rx(newskb);
107         return 0;
108 }
109
110
111 static int ip6_output2(struct sk_buff *skb)
112 {
113         struct dst_entry *dst = skb->dst;
114         struct net_device *dev = dst->dev;
115
116         skb->protocol = htons(ETH_P_IPV6);
117         skb->dev = dev;
118
119         if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) {
120                 struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
121
122                 if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
123                     ipv6_chk_mcast_addr(dev, &skb->nh.ipv6h->daddr,
124                                 &skb->nh.ipv6h->saddr)) {
125                         struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
126
127                         /* Do not check for IFF_ALLMULTI; multicast routing
128                            is not supported in any case.
129                          */
130                         if (newskb)
131                                 NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, newskb, NULL,
132                                         newskb->dev,
133                                         ip6_dev_loopback_xmit);
134
135                         if (skb->nh.ipv6h->hop_limit == 0) {
136                                 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
137                                 kfree_skb(skb);
138                                 return 0;
139                         }
140                 }
141
142                 IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS);
143         }
144
145         return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
146 }
147
148 int ip6_output(struct sk_buff *skb)
149 {
150         if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) ||
151                                 dst_allfrag(skb->dst))
152                 return ip6_fragment(skb, ip6_output2);
153         else
154                 return ip6_output2(skb);
155 }
156
157 /*
158  *      xmit an sk_buff (used by TCP)
159  */
160
161 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
162              struct ipv6_txoptions *opt, int ipfragok)
163 {
164         struct ipv6_pinfo *np = inet6_sk(sk);
165         struct in6_addr *first_hop = &fl->fl6_dst;
166         struct dst_entry *dst = skb->dst;
167         struct ipv6hdr *hdr;
168         u8  proto = fl->proto;
169         int seg_len = skb->len;
170         int hlimit, tclass;
171         u32 mtu;
172
173         if (opt) {
174                 int head_room;
175
176                 /* First: exthdrs may take lots of space (~8K for now)
177                    MAX_HEADER is not enough.
178                  */
179                 head_room = opt->opt_nflen + opt->opt_flen;
180                 seg_len += head_room;
181                 head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
182
183                 if (skb_headroom(skb) < head_room) {
184                         struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
185                         kfree_skb(skb);
186                         skb = skb2;
187                         if (skb == NULL) {      
188                                 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
189                                 return -ENOBUFS;
190                         }
191                         if (sk)
192                                 skb_set_owner_w(skb, sk);
193                 }
194                 if (opt->opt_flen)
195                         ipv6_push_frag_opts(skb, opt, &proto);
196                 if (opt->opt_nflen)
197                         ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
198         }
199
200         hdr = skb->nh.ipv6h = (struct ipv6hdr*)skb_push(skb, sizeof(struct ipv6hdr));
201
202         /*
203          *      Fill in the IPv6 header
204          */
205
206         hlimit = -1;
207         if (np)
208                 hlimit = np->hop_limit;
209         if (hlimit < 0)
210                 hlimit = dst_metric(dst, RTAX_HOPLIMIT);
211         if (hlimit < 0)
212                 hlimit = ipv6_get_hoplimit(dst->dev);
213
214         tclass = -1;
215         if (np)
216                 tclass = np->tclass;
217         if (tclass < 0)
218                 tclass = 0;
219
220         *(u32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;
221
222         hdr->payload_len = htons(seg_len);
223         hdr->nexthdr = proto;
224         hdr->hop_limit = hlimit;
225
226         ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
227         ipv6_addr_copy(&hdr->daddr, first_hop);
228
229         skb->priority = sk->sk_priority;
230
231         mtu = dst_mtu(dst);
232         if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) {
233                 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
234                 return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
235                                 dst_output);
236         }
237
238         if (net_ratelimit())
239                 printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
240         skb->dev = dst->dev;
241         icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
242         IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
243         kfree_skb(skb);
244         return -EMSGSIZE;
245 }
246
247 /*
248  *      To avoid extra problems ND packets are send through this
249  *      routine. It's code duplication but I really want to avoid
250  *      extra checks since ipv6_build_header is used by TCP (which
251  *      is for us performance critical)
252  */
253
254 int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
255                struct in6_addr *saddr, struct in6_addr *daddr,
256                int proto, int len)
257 {
258         struct ipv6_pinfo *np = inet6_sk(sk);
259         struct ipv6hdr *hdr;
260         int totlen;
261
262         skb->protocol = htons(ETH_P_IPV6);
263         skb->dev = dev;
264
265         totlen = len + sizeof(struct ipv6hdr);
266
267         hdr = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr));
268         skb->nh.ipv6h = hdr;
269
270         *(u32*)hdr = htonl(0x60000000);
271
272         hdr->payload_len = htons(len);
273         hdr->nexthdr = proto;
274         hdr->hop_limit = np->hop_limit;
275
276         ipv6_addr_copy(&hdr->saddr, saddr);
277         ipv6_addr_copy(&hdr->daddr, daddr);
278
279         return 0;
280 }
281
282 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
283 {
284         struct ip6_ra_chain *ra;
285         struct sock *last = NULL;
286
287         read_lock(&ip6_ra_lock);
288         for (ra = ip6_ra_chain; ra; ra = ra->next) {
289                 struct sock *sk = ra->sk;
290                 if (sk && ra->sel == sel &&
291                     (!sk->sk_bound_dev_if ||
292                      sk->sk_bound_dev_if == skb->dev->ifindex)) {
293                         if (last) {
294                                 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
295                                 if (skb2)
296                                         rawv6_rcv(last, skb2);
297                         }
298                         last = sk;
299                 }
300         }
301
302         if (last) {
303                 rawv6_rcv(last, skb);
304                 read_unlock(&ip6_ra_lock);
305                 return 1;
306         }
307         read_unlock(&ip6_ra_lock);
308         return 0;
309 }
310
311 static inline int ip6_forward_finish(struct sk_buff *skb)
312 {
313         return dst_output(skb);
314 }
315
316 int ip6_forward(struct sk_buff *skb)
317 {
318         struct dst_entry *dst = skb->dst;
319         struct ipv6hdr *hdr = skb->nh.ipv6h;
320         struct inet6_skb_parm *opt = IP6CB(skb);
321         
322         if (ipv6_devconf.forwarding == 0)
323                 goto error;
324
325         if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
326                 IP6_INC_STATS(IPSTATS_MIB_INDISCARDS);
327                 goto drop;
328         }
329
330         skb->ip_summed = CHECKSUM_NONE;
331
332         /*
333          *      We DO NOT make any processing on
334          *      RA packets, pushing them to user level AS IS
335          *      without ane WARRANTY that application will be able
336          *      to interpret them. The reason is that we
337          *      cannot make anything clever here.
338          *
339          *      We are not end-node, so that if packet contains
340          *      AH/ESP, we cannot make anything.
341          *      Defragmentation also would be mistake, RA packets
342          *      cannot be fragmented, because there is no warranty
343          *      that different fragments will go along one path. --ANK
344          */
345         if (opt->ra) {
346                 u8 *ptr = skb->nh.raw + opt->ra;
347                 if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
348                         return 0;
349         }
350
351         /*
352          *      check and decrement ttl
353          */
354         if (hdr->hop_limit <= 1) {
355                 /* Force OUTPUT device used as source address */
356                 skb->dev = dst->dev;
357                 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
358                             0, skb->dev);
359                 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
360
361                 kfree_skb(skb);
362                 return -ETIMEDOUT;
363         }
364
365         if (!xfrm6_route_forward(skb)) {
366                 IP6_INC_STATS(IPSTATS_MIB_INDISCARDS);
367                 goto drop;
368         }
369         dst = skb->dst;
370
371         /* IPv6 specs say nothing about it, but it is clear that we cannot
372            send redirects to source routed frames.
373          */
374         if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) {
375                 struct in6_addr *target = NULL;
376                 struct rt6_info *rt;
377                 struct neighbour *n = dst->neighbour;
378
379                 /*
380                  *      incoming and outgoing devices are the same
381                  *      send a redirect.
382                  */
383
384                 rt = (struct rt6_info *) dst;
385                 if ((rt->rt6i_flags & RTF_GATEWAY))
386                         target = (struct in6_addr*)&n->primary_key;
387                 else
388                         target = &hdr->daddr;
389
390                 /* Limit redirects both by destination (here)
391                    and by source (inside ndisc_send_redirect)
392                  */
393                 if (xrlim_allow(dst, 1*HZ))
394                         ndisc_send_redirect(skb, n, target);
395         } else if (ipv6_addr_type(&hdr->saddr)&(IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK
396                                                 |IPV6_ADDR_LINKLOCAL)) {
397                 /* This check is security critical. */
398                 goto error;
399         }
400
401         if (skb->len > dst_mtu(dst)) {
402                 /* Again, force OUTPUT device used as source address */
403                 skb->dev = dst->dev;
404                 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev);
405                 IP6_INC_STATS_BH(IPSTATS_MIB_INTOOBIGERRORS);
406                 IP6_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS);
407                 kfree_skb(skb);
408                 return -EMSGSIZE;
409         }
410
411         if (skb_cow(skb, dst->dev->hard_header_len)) {
412                 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
413                 goto drop;
414         }
415
416         hdr = skb->nh.ipv6h;
417
418         /* Mangling hops number delayed to point after skb COW */
419  
420         hdr->hop_limit--;
421
422         IP6_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);
423         return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish);
424
425 error:
426         IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
427 drop:
428         kfree_skb(skb);
429         return -EINVAL;
430 }
431
432 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
433 {
434         to->pkt_type = from->pkt_type;
435         to->priority = from->priority;
436         to->protocol = from->protocol;
437         dst_release(to->dst);
438         to->dst = dst_clone(from->dst);
439         to->dev = from->dev;
440
441 #ifdef CONFIG_NET_SCHED
442         to->tc_index = from->tc_index;
443 #endif
444 #ifdef CONFIG_NETFILTER
445         to->nfmark = from->nfmark;
446         /* Connection association is same as pre-frag packet */
447         nf_conntrack_put(to->nfct);
448         to->nfct = from->nfct;
449         nf_conntrack_get(to->nfct);
450         to->nfctinfo = from->nfctinfo;
451 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
452         nf_conntrack_put_reasm(to->nfct_reasm);
453         to->nfct_reasm = from->nfct_reasm;
454         nf_conntrack_get_reasm(to->nfct_reasm);
455 #endif
456 #ifdef CONFIG_BRIDGE_NETFILTER
457         nf_bridge_put(to->nf_bridge);
458         to->nf_bridge = from->nf_bridge;
459         nf_bridge_get(to->nf_bridge);
460 #endif
461 #endif
462         skb_copy_secmark(to, from);
463 }
464
465 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
466 {
467         u16 offset = sizeof(struct ipv6hdr);
468         struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1);
469         unsigned int packet_len = skb->tail - skb->nh.raw;
470         int found_rhdr = 0;
471         *nexthdr = &skb->nh.ipv6h->nexthdr;
472
473         while (offset + 1 <= packet_len) {
474
475                 switch (**nexthdr) {
476
477                 case NEXTHDR_HOP:
478                         break;
479                 case NEXTHDR_ROUTING:
480                         found_rhdr = 1;
481                         break;
482                 case NEXTHDR_DEST:
483 #ifdef CONFIG_IPV6_MIP6
484                         if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
485                                 break;
486 #endif
487                         if (found_rhdr)
488                                 return offset;
489                         break;
490                 default :
491                         return offset;
492                 }
493
494                 offset += ipv6_optlen(exthdr);
495                 *nexthdr = &exthdr->nexthdr;
496                 exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
497         }
498
499         return offset;
500 }
501 EXPORT_SYMBOL_GPL(ip6_find_1stfragopt);
502
503 static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
504 {
505         struct net_device *dev;
506         struct sk_buff *frag;
507         struct rt6_info *rt = (struct rt6_info*)skb->dst;
508         struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
509         struct ipv6hdr *tmp_hdr;
510         struct frag_hdr *fh;
511         unsigned int mtu, hlen, left, len;
512         u32 frag_id = 0;
513         int ptr, offset = 0, err=0;
514         u8 *prevhdr, nexthdr = 0;
515
516         dev = rt->u.dst.dev;
517         hlen = ip6_find_1stfragopt(skb, &prevhdr);
518         nexthdr = *prevhdr;
519
520         mtu = dst_mtu(&rt->u.dst);
521         if (np && np->frag_size < mtu) {
522                 if (np->frag_size)
523                         mtu = np->frag_size;
524         }
525         mtu -= hlen + sizeof(struct frag_hdr);
526
527         if (skb_shinfo(skb)->frag_list) {
528                 int first_len = skb_pagelen(skb);
529
530                 if (first_len - hlen > mtu ||
531                     ((first_len - hlen) & 7) ||
532                     skb_cloned(skb))
533                         goto slow_path;
534
535                 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
536                         /* Correct geometry. */
537                         if (frag->len > mtu ||
538                             ((frag->len & 7) && frag->next) ||
539                             skb_headroom(frag) < hlen)
540                             goto slow_path;
541
542                         /* Partially cloned skb? */
543                         if (skb_shared(frag))
544                                 goto slow_path;
545
546                         BUG_ON(frag->sk);
547                         if (skb->sk) {
548                                 sock_hold(skb->sk);
549                                 frag->sk = skb->sk;
550                                 frag->destructor = sock_wfree;
551                                 skb->truesize -= frag->truesize;
552                         }
553                 }
554
555                 err = 0;
556                 offset = 0;
557                 frag = skb_shinfo(skb)->frag_list;
558                 skb_shinfo(skb)->frag_list = NULL;
559                 /* BUILD HEADER */
560
561                 tmp_hdr = kmalloc(hlen, GFP_ATOMIC);
562                 if (!tmp_hdr) {
563                         IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
564                         return -ENOMEM;
565                 }
566
567                 *prevhdr = NEXTHDR_FRAGMENT;
568                 memcpy(tmp_hdr, skb->nh.raw, hlen);
569                 __skb_pull(skb, hlen);
570                 fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
571                 skb->nh.raw = __skb_push(skb, hlen);
572                 memcpy(skb->nh.raw, tmp_hdr, hlen);
573
574                 ipv6_select_ident(skb, fh);
575                 fh->nexthdr = nexthdr;
576                 fh->reserved = 0;
577                 fh->frag_off = htons(IP6_MF);
578                 frag_id = fh->identification;
579
580                 first_len = skb_pagelen(skb);
581                 skb->data_len = first_len - skb_headlen(skb);
582                 skb->len = first_len;
583                 skb->nh.ipv6h->payload_len = htons(first_len - sizeof(struct ipv6hdr));
584  
585
586                 for (;;) {
587                         /* Prepare header of the next frame,
588                          * before previous one went down. */
589                         if (frag) {
590                                 frag->ip_summed = CHECKSUM_NONE;
591                                 frag->h.raw = frag->data;
592                                 fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
593                                 frag->nh.raw = __skb_push(frag, hlen);
594                                 memcpy(frag->nh.raw, tmp_hdr, hlen);
595                                 offset += skb->len - hlen - sizeof(struct frag_hdr);
596                                 fh->nexthdr = nexthdr;
597                                 fh->reserved = 0;
598                                 fh->frag_off = htons(offset);
599                                 if (frag->next != NULL)
600                                         fh->frag_off |= htons(IP6_MF);
601                                 fh->identification = frag_id;
602                                 frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
603                                 ip6_copy_metadata(frag, skb);
604                         }
605                         
606                         err = output(skb);
607                         if(!err)
608                                 IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES);
609
610                         if (err || !frag)
611                                 break;
612
613                         skb = frag;
614                         frag = skb->next;
615                         skb->next = NULL;
616                 }
617
618                 kfree(tmp_hdr);
619
620                 if (err == 0) {
621                         IP6_INC_STATS(IPSTATS_MIB_FRAGOKS);
622                         return 0;
623                 }
624
625                 while (frag) {
626                         skb = frag->next;
627                         kfree_skb(frag);
628                         frag = skb;
629                 }
630
631                 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
632                 return err;
633         }
634
635 slow_path:
636         left = skb->len - hlen;         /* Space per frame */
637         ptr = hlen;                     /* Where to start from */
638
639         /*
640          *      Fragment the datagram.
641          */
642
643         *prevhdr = NEXTHDR_FRAGMENT;
644
645         /*
646          *      Keep copying data until we run out.
647          */
648         while(left > 0) {
649                 len = left;
650                 /* IF: it doesn't fit, use 'mtu' - the data space left */
651                 if (len > mtu)
652                         len = mtu;
653                 /* IF: we are not sending upto and including the packet end
654                    then align the next start on an eight byte boundary */
655                 if (len < left) {
656                         len &= ~7;
657                 }
658                 /*
659                  *      Allocate buffer.
660                  */
661
662                 if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
663                         NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
664                         IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
665                         err = -ENOMEM;
666                         goto fail;
667                 }
668
669                 /*
670                  *      Set up data on packet
671                  */
672
673                 ip6_copy_metadata(frag, skb);
674                 skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
675                 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
676                 frag->nh.raw = frag->data;
677                 fh = (struct frag_hdr*)(frag->data + hlen);
678                 frag->h.raw = frag->data + hlen + sizeof(struct frag_hdr);
679
680                 /*
681                  *      Charge the memory for the fragment to any owner
682                  *      it might possess
683                  */
684                 if (skb->sk)
685                         skb_set_owner_w(frag, skb->sk);
686
687                 /*
688                  *      Copy the packet header into the new buffer.
689                  */
690                 memcpy(frag->nh.raw, skb->data, hlen);
691
692                 /*
693                  *      Build fragment header.
694                  */
695                 fh->nexthdr = nexthdr;
696                 fh->reserved = 0;
697                 if (!frag_id) {
698                         ipv6_select_ident(skb, fh);
699                         frag_id = fh->identification;
700                 } else
701                         fh->identification = frag_id;
702
703                 /*
704                  *      Copy a block of the IP datagram.
705                  */
706                 if (skb_copy_bits(skb, ptr, frag->h.raw, len))
707                         BUG();
708                 left -= len;
709
710                 fh->frag_off = htons(offset);
711                 if (left > 0)
712                         fh->frag_off |= htons(IP6_MF);
713                 frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
714
715                 ptr += len;
716                 offset += len;
717
718                 /*
719                  *      Put this fragment into the sending queue.
720                  */
721                 err = output(frag);
722                 if (err)
723                         goto fail;
724
725                 IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES);
726         }
727         kfree_skb(skb);
728         IP6_INC_STATS(IPSTATS_MIB_FRAGOKS);
729         return err;
730
731 fail:
732         kfree_skb(skb); 
733         IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
734         return err;
735 }
736
737 static inline int ip6_rt_check(struct rt6key *rt_key,
738                                struct in6_addr *fl_addr,
739                                struct in6_addr *addr_cache)
740 {
741         return ((rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
742                 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache)));
743 }
744
745 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
746                                           struct dst_entry *dst,
747                                           struct flowi *fl)
748 {
749         struct ipv6_pinfo *np = inet6_sk(sk);
750         struct rt6_info *rt = (struct rt6_info *)dst;
751
752         if (!dst)
753                 goto out;
754
755         /* Yes, checking route validity in not connected
756          * case is not very simple. Take into account,
757          * that we do not support routing by source, TOS,
758          * and MSG_DONTROUTE            --ANK (980726)
759          *
760          * 1. ip6_rt_check(): If route was host route,
761          *    check that cached destination is current.
762          *    If it is network route, we still may
763          *    check its validity using saved pointer
764          *    to the last used address: daddr_cache.
765          *    We do not want to save whole address now,
766          *    (because main consumer of this service
767          *    is tcp, which has not this problem),
768          *    so that the last trick works only on connected
769          *    sockets.
770          * 2. oif also should be the same.
771          */
772         if (ip6_rt_check(&rt->rt6i_dst, &fl->fl6_dst, np->daddr_cache) ||
773 #ifdef CONFIG_IPV6_SUBTREES
774             ip6_rt_check(&rt->rt6i_src, &fl->fl6_src, np->saddr_cache) ||
775 #endif
776             (fl->oif && fl->oif != dst->dev->ifindex)) {
777                 dst_release(dst);
778                 dst = NULL;
779         }
780
781 out:
782         return dst;
783 }
784
785 static int ip6_dst_lookup_tail(struct sock *sk,
786                                struct dst_entry **dst, struct flowi *fl)
787 {
788         int err;
789
790         if (*dst == NULL)
791                 *dst = ip6_route_output(sk, fl);
792
793         if ((err = (*dst)->error))
794                 goto out_err_release;
795
796         if (ipv6_addr_any(&fl->fl6_src)) {
797                 err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src);
798                 if (err)
799                         goto out_err_release;
800         }
801
802         return 0;
803
804 out_err_release:
805         dst_release(*dst);
806         *dst = NULL;
807         return err;
808 }
809
810 /**
811  *      ip6_dst_lookup - perform route lookup on flow
812  *      @sk: socket which provides route info
813  *      @dst: pointer to dst_entry * for result
814  *      @fl: flow to lookup
815  *
816  *      This function performs a route lookup on the given flow.
817  *
818  *      It returns zero on success, or a standard errno code on error.
819  */
820 int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
821 {
822         *dst = NULL;
823         return ip6_dst_lookup_tail(sk, dst, fl);
824 }
825 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
826
827 /**
828  *      ip6_sk_dst_lookup - perform socket cached route lookup on flow
829  *      @sk: socket which provides the dst cache and route info
830  *      @dst: pointer to dst_entry * for result
831  *      @fl: flow to lookup
832  *
833  *      This function performs a route lookup on the given flow with the
834  *      possibility of using the cached route in the socket if it is valid.
835  *      It will take the socket dst lock when operating on the dst cache.
836  *      As a result, this function can only be used in process context.
837  *
838  *      It returns zero on success, or a standard errno code on error.
839  */
840 int ip6_sk_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
841 {
842         *dst = NULL;
843         if (sk) {
844                 *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
845                 *dst = ip6_sk_dst_check(sk, *dst, fl);
846         }
847
848         return ip6_dst_lookup_tail(sk, dst, fl);
849 }
850 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup);
851
852 static inline int ip6_ufo_append_data(struct sock *sk,
853                         int getfrag(void *from, char *to, int offset, int len,
854                         int odd, struct sk_buff *skb),
855                         void *from, int length, int hh_len, int fragheaderlen,
856                         int transhdrlen, int mtu,unsigned int flags)
857
858 {
859         struct sk_buff *skb;
860         int err;
861
862         /* There is support for UDP large send offload by network
863          * device, so create one single skb packet containing complete
864          * udp datagram
865          */
866         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
867                 skb = sock_alloc_send_skb(sk,
868                         hh_len + fragheaderlen + transhdrlen + 20,
869                         (flags & MSG_DONTWAIT), &err);
870                 if (skb == NULL)
871                         return -ENOMEM;
872
873                 /* reserve space for Hardware header */
874                 skb_reserve(skb, hh_len);
875
876                 /* create space for UDP/IP header */
877                 skb_put(skb,fragheaderlen + transhdrlen);
878
879                 /* initialize network header pointer */
880                 skb->nh.raw = skb->data;
881
882                 /* initialize protocol header pointer */
883                 skb->h.raw = skb->data + fragheaderlen;
884
885                 skb->ip_summed = CHECKSUM_PARTIAL;
886                 skb->csum = 0;
887                 sk->sk_sndmsg_off = 0;
888         }
889
890         err = skb_append_datato_frags(sk,skb, getfrag, from,
891                                       (length - transhdrlen));
892         if (!err) {
893                 struct frag_hdr fhdr;
894
895                 /* specify the length of each IP datagram fragment*/
896                 skb_shinfo(skb)->gso_size = mtu - fragheaderlen - 
897                                             sizeof(struct frag_hdr);
898                 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
899                 ipv6_select_ident(skb, &fhdr);
900                 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
901                 __skb_queue_tail(&sk->sk_write_queue, skb);
902
903                 return 0;
904         }
905         /* There is not enough support do UPD LSO,
906          * so follow normal path
907          */
908         kfree_skb(skb);
909
910         return err;
911 }
912
913 int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
914         int offset, int len, int odd, struct sk_buff *skb),
915         void *from, int length, int transhdrlen,
916         int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl,
917         struct rt6_info *rt, unsigned int flags)
918 {
919         struct inet_sock *inet = inet_sk(sk);
920         struct ipv6_pinfo *np = inet6_sk(sk);
921         struct sk_buff *skb;
922         unsigned int maxfraglen, fragheaderlen;
923         int exthdrlen;
924         int hh_len;
925         int mtu;
926         int copy;
927         int err;
928         int offset = 0;
929         int csummode = CHECKSUM_NONE;
930
931         if (flags&MSG_PROBE)
932                 return 0;
933         if (skb_queue_empty(&sk->sk_write_queue)) {
934                 /*
935                  * setup for corking
936                  */
937                 if (opt) {
938                         if (np->cork.opt == NULL) {
939                                 np->cork.opt = kmalloc(opt->tot_len,
940                                                        sk->sk_allocation);
941                                 if (unlikely(np->cork.opt == NULL))
942                                         return -ENOBUFS;
943                         } else if (np->cork.opt->tot_len < opt->tot_len) {
944                                 printk(KERN_DEBUG "ip6_append_data: invalid option length\n");
945                                 return -EINVAL;
946                         }
947                         memcpy(np->cork.opt, opt, opt->tot_len);
948                         inet->cork.flags |= IPCORK_OPT;
949                         /* need source address above miyazawa*/
950                 }
951                 dst_hold(&rt->u.dst);
952                 np->cork.rt = rt;
953                 inet->cork.fl = *fl;
954                 np->cork.hop_limit = hlimit;
955                 np->cork.tclass = tclass;
956                 mtu = dst_mtu(rt->u.dst.path);
957                 if (np->frag_size < mtu) {
958                         if (np->frag_size)
959                                 mtu = np->frag_size;
960                 }
961                 inet->cork.fragsize = mtu;
962                 if (dst_allfrag(rt->u.dst.path))
963                         inet->cork.flags |= IPCORK_ALLFRAG;
964                 inet->cork.length = 0;
965                 sk->sk_sndmsg_page = NULL;
966                 sk->sk_sndmsg_off = 0;
967                 exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0);
968                 length += exthdrlen;
969                 transhdrlen += exthdrlen;
970         } else {
971                 rt = np->cork.rt;
972                 fl = &inet->cork.fl;
973                 if (inet->cork.flags & IPCORK_OPT)
974                         opt = np->cork.opt;
975                 transhdrlen = 0;
976                 exthdrlen = 0;
977                 mtu = inet->cork.fragsize;
978         }
979
980         hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
981
982         fragheaderlen = sizeof(struct ipv6hdr) + rt->u.dst.nfheader_len + (opt ? opt->opt_nflen : 0);
983         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
984
985         if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
986                 if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
987                         ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
988                         return -EMSGSIZE;
989                 }
990         }
991
992         /*
993          * Let's try using as much space as possible.
994          * Use MTU if total length of the message fits into the MTU.
995          * Otherwise, we need to reserve fragment header and
996          * fragment alignment (= 8-15 octects, in total).
997          *
998          * Note that we may need to "move" the data from the tail of
999          * of the buffer to the new fragment when we split 
1000          * the message.
1001          *
1002          * FIXME: It may be fragmented into multiple chunks 
1003          *        at once if non-fragmentable extension headers
1004          *        are too large.
1005          * --yoshfuji 
1006          */
1007
1008         inet->cork.length += length;
1009         if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
1010             (rt->u.dst.dev->features & NETIF_F_UFO)) {
1011
1012                 err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
1013                                           fragheaderlen, transhdrlen, mtu,
1014                                           flags);
1015                 if (err)
1016                         goto error;
1017                 return 0;
1018         }
1019
1020         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1021                 goto alloc_new_skb;
1022
1023         while (length > 0) {
1024                 /* Check if the remaining data fits into current packet. */
1025                 copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1026                 if (copy < length)
1027                         copy = maxfraglen - skb->len;
1028
1029                 if (copy <= 0) {
1030                         char *data;
1031                         unsigned int datalen;
1032                         unsigned int fraglen;
1033                         unsigned int fraggap;
1034                         unsigned int alloclen;
1035                         struct sk_buff *skb_prev;
1036 alloc_new_skb:
1037                         skb_prev = skb;
1038
1039                         /* There's no room in the current skb */
1040                         if (skb_prev)
1041                                 fraggap = skb_prev->len - maxfraglen;
1042                         else
1043                                 fraggap = 0;
1044
1045                         /*
1046                          * If remaining data exceeds the mtu,
1047                          * we know we need more fragment(s).
1048                          */
1049                         datalen = length + fraggap;
1050                         if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1051                                 datalen = maxfraglen - fragheaderlen;
1052
1053                         fraglen = datalen + fragheaderlen;
1054                         if ((flags & MSG_MORE) &&
1055                             !(rt->u.dst.dev->features&NETIF_F_SG))
1056                                 alloclen = mtu;
1057                         else
1058                                 alloclen = datalen + fragheaderlen;
1059
1060                         /*
1061                          * The last fragment gets additional space at tail.
1062                          * Note: we overallocate on fragments with MSG_MODE
1063                          * because we have no idea if we're the last one.
1064                          */
1065                         if (datalen == length + fraggap)
1066                                 alloclen += rt->u.dst.trailer_len;
1067
1068                         /*
1069                          * We just reserve space for fragment header.
1070                          * Note: this may be overallocation if the message 
1071                          * (without MSG_MORE) fits into the MTU.
1072                          */
1073                         alloclen += sizeof(struct frag_hdr);
1074
1075                         if (transhdrlen) {
1076                                 skb = sock_alloc_send_skb(sk,
1077                                                 alloclen + hh_len,
1078                                                 (flags & MSG_DONTWAIT), &err);
1079                         } else {
1080                                 skb = NULL;
1081                                 if (atomic_read(&sk->sk_wmem_alloc) <=
1082                                     2 * sk->sk_sndbuf)
1083                                         skb = sock_wmalloc(sk,
1084                                                            alloclen + hh_len, 1,
1085                                                            sk->sk_allocation);
1086                                 if (unlikely(skb == NULL))
1087                                         err = -ENOBUFS;
1088                         }
1089                         if (skb == NULL)
1090                                 goto error;
1091                         /*
1092                          *      Fill in the control structures
1093                          */
1094                         skb->ip_summed = csummode;
1095                         skb->csum = 0;
1096                         /* reserve for fragmentation */
1097                         skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
1098
1099                         /*
1100                          *      Find where to start putting bytes
1101                          */
1102                         data = skb_put(skb, fraglen);
1103                         skb->nh.raw = data + exthdrlen;
1104                         data += fragheaderlen;
1105                         skb->h.raw = data + exthdrlen;
1106
1107                         if (fraggap) {
1108                                 skb->csum = skb_copy_and_csum_bits(
1109                                         skb_prev, maxfraglen,
1110                                         data + transhdrlen, fraggap, 0);
1111                                 skb_prev->csum = csum_sub(skb_prev->csum,
1112                                                           skb->csum);
1113                                 data += fraggap;
1114                                 pskb_trim_unique(skb_prev, maxfraglen);
1115                         }
1116                         copy = datalen - transhdrlen - fraggap;
1117                         if (copy < 0) {
1118                                 err = -EINVAL;
1119                                 kfree_skb(skb);
1120                                 goto error;
1121                         } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1122                                 err = -EFAULT;
1123                                 kfree_skb(skb);
1124                                 goto error;
1125                         }
1126
1127                         offset += copy;
1128                         length -= datalen - fraggap;
1129                         transhdrlen = 0;
1130                         exthdrlen = 0;
1131                         csummode = CHECKSUM_NONE;
1132
1133                         /*
1134                          * Put the packet on the pending queue
1135                          */
1136                         __skb_queue_tail(&sk->sk_write_queue, skb);
1137                         continue;
1138                 }
1139
1140                 if (copy > length)
1141                         copy = length;
1142
1143                 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
1144                         unsigned int off;
1145
1146                         off = skb->len;
1147                         if (getfrag(from, skb_put(skb, copy),
1148                                                 offset, copy, off, skb) < 0) {
1149                                 __skb_trim(skb, off);
1150                                 err = -EFAULT;
1151                                 goto error;
1152                         }
1153                 } else {
1154                         int i = skb_shinfo(skb)->nr_frags;
1155                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1156                         struct page *page = sk->sk_sndmsg_page;
1157                         int off = sk->sk_sndmsg_off;
1158                         unsigned int left;
1159
1160                         if (page && (left = PAGE_SIZE - off) > 0) {
1161                                 if (copy >= left)
1162                                         copy = left;
1163                                 if (page != frag->page) {
1164                                         if (i == MAX_SKB_FRAGS) {
1165                                                 err = -EMSGSIZE;
1166                                                 goto error;
1167                                         }
1168                                         get_page(page);
1169                                         skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1170                                         frag = &skb_shinfo(skb)->frags[i];
1171                                 }
1172                         } else if(i < MAX_SKB_FRAGS) {
1173                                 if (copy > PAGE_SIZE)
1174                                         copy = PAGE_SIZE;
1175                                 page = alloc_pages(sk->sk_allocation, 0);
1176                                 if (page == NULL) {
1177                                         err = -ENOMEM;
1178                                         goto error;
1179                                 }
1180                                 sk->sk_sndmsg_page = page;
1181                                 sk->sk_sndmsg_off = 0;
1182
1183                                 skb_fill_page_desc(skb, i, page, 0, 0);
1184                                 frag = &skb_shinfo(skb)->frags[i];
1185                                 skb->truesize += PAGE_SIZE;
1186                                 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1187                         } else {
1188                                 err = -EMSGSIZE;
1189                                 goto error;
1190                         }
1191                         if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1192                                 err = -EFAULT;
1193                                 goto error;
1194                         }
1195                         sk->sk_sndmsg_off += copy;
1196                         frag->size += copy;
1197                         skb->len += copy;
1198                         skb->data_len += copy;
1199                 }
1200                 offset += copy;
1201                 length -= copy;
1202         }
1203         return 0;
1204 error:
1205         inet->cork.length -= length;
1206         IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1207         return err;
1208 }
1209
1210 int ip6_push_pending_frames(struct sock *sk)
1211 {
1212         struct sk_buff *skb, *tmp_skb;
1213         struct sk_buff **tail_skb;
1214         struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1215         struct inet_sock *inet = inet_sk(sk);
1216         struct ipv6_pinfo *np = inet6_sk(sk);
1217         struct ipv6hdr *hdr;
1218         struct ipv6_txoptions *opt = np->cork.opt;
1219         struct rt6_info *rt = np->cork.rt;
1220         struct flowi *fl = &inet->cork.fl;
1221         unsigned char proto = fl->proto;
1222         int err = 0;
1223
1224         if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1225                 goto out;
1226         tail_skb = &(skb_shinfo(skb)->frag_list);
1227
1228         /* move skb->data to ip header from ext header */
1229         if (skb->data < skb->nh.raw)
1230                 __skb_pull(skb, skb->nh.raw - skb->data);
1231         while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1232                 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1233                 *tail_skb = tmp_skb;
1234                 tail_skb = &(tmp_skb->next);
1235                 skb->len += tmp_skb->len;
1236                 skb->data_len += tmp_skb->len;
1237                 skb->truesize += tmp_skb->truesize;
1238                 __sock_put(tmp_skb->sk);
1239                 tmp_skb->destructor = NULL;
1240                 tmp_skb->sk = NULL;
1241         }
1242
1243         ipv6_addr_copy(final_dst, &fl->fl6_dst);
1244         __skb_pull(skb, skb->h.raw - skb->nh.raw);
1245         if (opt && opt->opt_flen)
1246                 ipv6_push_frag_opts(skb, opt, &proto);
1247         if (opt && opt->opt_nflen)
1248                 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1249
1250         skb->nh.ipv6h = hdr = (struct ipv6hdr*) skb_push(skb, sizeof(struct ipv6hdr));
1251         
1252         *(u32*)hdr = fl->fl6_flowlabel |
1253                      htonl(0x60000000 | ((int)np->cork.tclass << 20));
1254
1255         if (skb->len <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN)
1256                 hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
1257         else
1258                 hdr->payload_len = 0;
1259         hdr->hop_limit = np->cork.hop_limit;
1260         hdr->nexthdr = proto;
1261         ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
1262         ipv6_addr_copy(&hdr->daddr, final_dst);
1263
1264         skb->priority = sk->sk_priority;
1265
1266         skb->dst = dst_clone(&rt->u.dst);
1267         IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); 
1268         err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output);
1269         if (err) {
1270                 if (err > 0)
1271                         err = np->recverr ? net_xmit_errno(err) : 0;
1272                 if (err)
1273                         goto error;
1274         }
1275
1276 out:
1277         inet->cork.flags &= ~IPCORK_OPT;
1278         kfree(np->cork.opt);
1279         np->cork.opt = NULL;
1280         if (np->cork.rt) {
1281                 dst_release(&np->cork.rt->u.dst);
1282                 np->cork.rt = NULL;
1283                 inet->cork.flags &= ~IPCORK_ALLFRAG;
1284         }
1285         memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1286         return err;
1287 error:
1288         goto out;
1289 }
1290
1291 void ip6_flush_pending_frames(struct sock *sk)
1292 {
1293         struct inet_sock *inet = inet_sk(sk);
1294         struct ipv6_pinfo *np = inet6_sk(sk);
1295         struct sk_buff *skb;
1296
1297         while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
1298                 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1299                 kfree_skb(skb);
1300         }
1301
1302         inet->cork.flags &= ~IPCORK_OPT;
1303
1304         kfree(np->cork.opt);
1305         np->cork.opt = NULL;
1306         if (np->cork.rt) {
1307                 dst_release(&np->cork.rt->u.dst);
1308                 np->cork.rt = NULL;
1309                 inet->cork.flags &= ~IPCORK_ALLFRAG;
1310         }
1311         memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1312 }