net: return operator cleanup
[linux-3.10.git] / net / ipv6 / tcp_ipv6.c
1 /*
2  *      TCP over IPv6
3  *      Linux INET6 implementation
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *
8  *      Based on:
9  *      linux/net/ipv4/tcp.c
10  *      linux/net/ipv4/tcp_input.c
11  *      linux/net/ipv4/tcp_output.c
12  *
13  *      Fixes:
14  *      Hideaki YOSHIFUJI       :       sin6_scope_id support
15  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
16  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
17  *                                      a single port at the same time.
18  *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/tcp6 to seq_file.
19  *
20  *      This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64
65 #include <asm/uaccess.h>
66
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
69
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
72
73 static void     tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void     tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75                                       struct request_sock *req);
76
77 static int      tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78 static void     __tcp_v6_send_check(struct sk_buff *skb,
79                                     struct in6_addr *saddr,
80                                     struct in6_addr *daddr);
81
82 static const struct inet_connection_sock_af_ops ipv6_mapped;
83 static const struct inet_connection_sock_af_ops ipv6_specific;
84 #ifdef CONFIG_TCP_MD5SIG
85 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
86 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
87 #else
88 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
89                                                    struct in6_addr *addr)
90 {
91         return NULL;
92 }
93 #endif
94
95 static void tcp_v6_hash(struct sock *sk)
96 {
97         if (sk->sk_state != TCP_CLOSE) {
98                 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
99                         tcp_prot.hash(sk);
100                         return;
101                 }
102                 local_bh_disable();
103                 __inet6_hash(sk, NULL);
104                 local_bh_enable();
105         }
106 }
107
108 static __inline__ __sum16 tcp_v6_check(int len,
109                                    struct in6_addr *saddr,
110                                    struct in6_addr *daddr,
111                                    __wsum base)
112 {
113         return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
114 }
115
116 static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
117 {
118         return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
119                                             ipv6_hdr(skb)->saddr.s6_addr32,
120                                             tcp_hdr(skb)->dest,
121                                             tcp_hdr(skb)->source);
122 }
123
124 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
125                           int addr_len)
126 {
127         struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
128         struct inet_sock *inet = inet_sk(sk);
129         struct inet_connection_sock *icsk = inet_csk(sk);
130         struct ipv6_pinfo *np = inet6_sk(sk);
131         struct tcp_sock *tp = tcp_sk(sk);
132         struct in6_addr *saddr = NULL, *final_p, final;
133         struct flowi fl;
134         struct dst_entry *dst;
135         int addr_type;
136         int err;
137
138         if (addr_len < SIN6_LEN_RFC2133)
139                 return -EINVAL;
140
141         if (usin->sin6_family != AF_INET6)
142                 return -EAFNOSUPPORT;
143
144         memset(&fl, 0, sizeof(fl));
145
146         if (np->sndflow) {
147                 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
148                 IP6_ECN_flow_init(fl.fl6_flowlabel);
149                 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
150                         struct ip6_flowlabel *flowlabel;
151                         flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
152                         if (flowlabel == NULL)
153                                 return -EINVAL;
154                         ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
155                         fl6_sock_release(flowlabel);
156                 }
157         }
158
159         /*
160          *      connect() to INADDR_ANY means loopback (BSD'ism).
161          */
162
163         if(ipv6_addr_any(&usin->sin6_addr))
164                 usin->sin6_addr.s6_addr[15] = 0x1;
165
166         addr_type = ipv6_addr_type(&usin->sin6_addr);
167
168         if(addr_type & IPV6_ADDR_MULTICAST)
169                 return -ENETUNREACH;
170
171         if (addr_type&IPV6_ADDR_LINKLOCAL) {
172                 if (addr_len >= sizeof(struct sockaddr_in6) &&
173                     usin->sin6_scope_id) {
174                         /* If interface is set while binding, indices
175                          * must coincide.
176                          */
177                         if (sk->sk_bound_dev_if &&
178                             sk->sk_bound_dev_if != usin->sin6_scope_id)
179                                 return -EINVAL;
180
181                         sk->sk_bound_dev_if = usin->sin6_scope_id;
182                 }
183
184                 /* Connect to link-local address requires an interface */
185                 if (!sk->sk_bound_dev_if)
186                         return -EINVAL;
187         }
188
189         if (tp->rx_opt.ts_recent_stamp &&
190             !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
191                 tp->rx_opt.ts_recent = 0;
192                 tp->rx_opt.ts_recent_stamp = 0;
193                 tp->write_seq = 0;
194         }
195
196         ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
197         np->flow_label = fl.fl6_flowlabel;
198
199         /*
200          *      TCP over IPv4
201          */
202
203         if (addr_type == IPV6_ADDR_MAPPED) {
204                 u32 exthdrlen = icsk->icsk_ext_hdr_len;
205                 struct sockaddr_in sin;
206
207                 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
208
209                 if (__ipv6_only_sock(sk))
210                         return -ENETUNREACH;
211
212                 sin.sin_family = AF_INET;
213                 sin.sin_port = usin->sin6_port;
214                 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
215
216                 icsk->icsk_af_ops = &ipv6_mapped;
217                 sk->sk_backlog_rcv = tcp_v4_do_rcv;
218 #ifdef CONFIG_TCP_MD5SIG
219                 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
220 #endif
221
222                 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
223
224                 if (err) {
225                         icsk->icsk_ext_hdr_len = exthdrlen;
226                         icsk->icsk_af_ops = &ipv6_specific;
227                         sk->sk_backlog_rcv = tcp_v6_do_rcv;
228 #ifdef CONFIG_TCP_MD5SIG
229                         tp->af_specific = &tcp_sock_ipv6_specific;
230 #endif
231                         goto failure;
232                 } else {
233                         ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
234                         ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
235                                                &np->rcv_saddr);
236                 }
237
238                 return err;
239         }
240
241         if (!ipv6_addr_any(&np->rcv_saddr))
242                 saddr = &np->rcv_saddr;
243
244         fl.proto = IPPROTO_TCP;
245         ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
246         ipv6_addr_copy(&fl.fl6_src,
247                        (saddr ? saddr : &np->saddr));
248         fl.oif = sk->sk_bound_dev_if;
249         fl.mark = sk->sk_mark;
250         fl.fl_ip_dport = usin->sin6_port;
251         fl.fl_ip_sport = inet->inet_sport;
252
253         final_p = fl6_update_dst(&fl, np->opt, &final);
254
255         security_sk_classify_flow(sk, &fl);
256
257         err = ip6_dst_lookup(sk, &dst, &fl);
258         if (err)
259                 goto failure;
260         if (final_p)
261                 ipv6_addr_copy(&fl.fl6_dst, final_p);
262
263         err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
264         if (err < 0) {
265                 if (err == -EREMOTE)
266                         err = ip6_dst_blackhole(sk, &dst, &fl);
267                 if (err < 0)
268                         goto failure;
269         }
270
271         if (saddr == NULL) {
272                 saddr = &fl.fl6_src;
273                 ipv6_addr_copy(&np->rcv_saddr, saddr);
274         }
275
276         /* set the source address */
277         ipv6_addr_copy(&np->saddr, saddr);
278         inet->inet_rcv_saddr = LOOPBACK4_IPV6;
279
280         sk->sk_gso_type = SKB_GSO_TCPV6;
281         __ip6_dst_store(sk, dst, NULL, NULL);
282
283         icsk->icsk_ext_hdr_len = 0;
284         if (np->opt)
285                 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
286                                           np->opt->opt_nflen);
287
288         tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
289
290         inet->inet_dport = usin->sin6_port;
291
292         tcp_set_state(sk, TCP_SYN_SENT);
293         err = inet6_hash_connect(&tcp_death_row, sk);
294         if (err)
295                 goto late_failure;
296
297         if (!tp->write_seq)
298                 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
299                                                              np->daddr.s6_addr32,
300                                                              inet->inet_sport,
301                                                              inet->inet_dport);
302
303         err = tcp_connect(sk);
304         if (err)
305                 goto late_failure;
306
307         return 0;
308
309 late_failure:
310         tcp_set_state(sk, TCP_CLOSE);
311         __sk_dst_reset(sk);
312 failure:
313         inet->inet_dport = 0;
314         sk->sk_route_caps = 0;
315         return err;
316 }
317
318 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
319                 u8 type, u8 code, int offset, __be32 info)
320 {
321         struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
322         const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
323         struct ipv6_pinfo *np;
324         struct sock *sk;
325         int err;
326         struct tcp_sock *tp;
327         __u32 seq;
328         struct net *net = dev_net(skb->dev);
329
330         sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
331                         th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
332
333         if (sk == NULL) {
334                 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
335                                    ICMP6_MIB_INERRORS);
336                 return;
337         }
338
339         if (sk->sk_state == TCP_TIME_WAIT) {
340                 inet_twsk_put(inet_twsk(sk));
341                 return;
342         }
343
344         bh_lock_sock(sk);
345         if (sock_owned_by_user(sk))
346                 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
347
348         if (sk->sk_state == TCP_CLOSE)
349                 goto out;
350
351         if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
352                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
353                 goto out;
354         }
355
356         tp = tcp_sk(sk);
357         seq = ntohl(th->seq);
358         if (sk->sk_state != TCP_LISTEN &&
359             !between(seq, tp->snd_una, tp->snd_nxt)) {
360                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
361                 goto out;
362         }
363
364         np = inet6_sk(sk);
365
366         if (type == ICMPV6_PKT_TOOBIG) {
367                 struct dst_entry *dst = NULL;
368
369                 if (sock_owned_by_user(sk))
370                         goto out;
371                 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
372                         goto out;
373
374                 /* icmp should have updated the destination cache entry */
375                 dst = __sk_dst_check(sk, np->dst_cookie);
376
377                 if (dst == NULL) {
378                         struct inet_sock *inet = inet_sk(sk);
379                         struct flowi fl;
380
381                         /* BUGGG_FUTURE: Again, it is not clear how
382                            to handle rthdr case. Ignore this complexity
383                            for now.
384                          */
385                         memset(&fl, 0, sizeof(fl));
386                         fl.proto = IPPROTO_TCP;
387                         ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
388                         ipv6_addr_copy(&fl.fl6_src, &np->saddr);
389                         fl.oif = sk->sk_bound_dev_if;
390                         fl.mark = sk->sk_mark;
391                         fl.fl_ip_dport = inet->inet_dport;
392                         fl.fl_ip_sport = inet->inet_sport;
393                         security_skb_classify_flow(skb, &fl);
394
395                         if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
396                                 sk->sk_err_soft = -err;
397                                 goto out;
398                         }
399
400                         if ((err = xfrm_lookup(net, &dst, &fl, sk, 0)) < 0) {
401                                 sk->sk_err_soft = -err;
402                                 goto out;
403                         }
404
405                 } else
406                         dst_hold(dst);
407
408                 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
409                         tcp_sync_mss(sk, dst_mtu(dst));
410                         tcp_simple_retransmit(sk);
411                 } /* else let the usual retransmit timer handle it */
412                 dst_release(dst);
413                 goto out;
414         }
415
416         icmpv6_err_convert(type, code, &err);
417
418         /* Might be for an request_sock */
419         switch (sk->sk_state) {
420                 struct request_sock *req, **prev;
421         case TCP_LISTEN:
422                 if (sock_owned_by_user(sk))
423                         goto out;
424
425                 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
426                                            &hdr->saddr, inet6_iif(skb));
427                 if (!req)
428                         goto out;
429
430                 /* ICMPs are not backlogged, hence we cannot get
431                  * an established socket here.
432                  */
433                 WARN_ON(req->sk != NULL);
434
435                 if (seq != tcp_rsk(req)->snt_isn) {
436                         NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
437                         goto out;
438                 }
439
440                 inet_csk_reqsk_queue_drop(sk, req, prev);
441                 goto out;
442
443         case TCP_SYN_SENT:
444         case TCP_SYN_RECV:  /* Cannot happen.
445                                It can, it SYNs are crossed. --ANK */
446                 if (!sock_owned_by_user(sk)) {
447                         sk->sk_err = err;
448                         sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
449
450                         tcp_done(sk);
451                 } else
452                         sk->sk_err_soft = err;
453                 goto out;
454         }
455
456         if (!sock_owned_by_user(sk) && np->recverr) {
457                 sk->sk_err = err;
458                 sk->sk_error_report(sk);
459         } else
460                 sk->sk_err_soft = err;
461
462 out:
463         bh_unlock_sock(sk);
464         sock_put(sk);
465 }
466
467
468 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
469                               struct request_values *rvp)
470 {
471         struct inet6_request_sock *treq = inet6_rsk(req);
472         struct ipv6_pinfo *np = inet6_sk(sk);
473         struct sk_buff * skb;
474         struct ipv6_txoptions *opt = NULL;
475         struct in6_addr * final_p, final;
476         struct flowi fl;
477         struct dst_entry *dst;
478         int err = -1;
479
480         memset(&fl, 0, sizeof(fl));
481         fl.proto = IPPROTO_TCP;
482         ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
483         ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
484         fl.fl6_flowlabel = 0;
485         fl.oif = treq->iif;
486         fl.mark = sk->sk_mark;
487         fl.fl_ip_dport = inet_rsk(req)->rmt_port;
488         fl.fl_ip_sport = inet_rsk(req)->loc_port;
489         security_req_classify_flow(req, &fl);
490
491         opt = np->opt;
492         final_p = fl6_update_dst(&fl, opt, &final);
493
494         err = ip6_dst_lookup(sk, &dst, &fl);
495         if (err)
496                 goto done;
497         if (final_p)
498                 ipv6_addr_copy(&fl.fl6_dst, final_p);
499         if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
500                 goto done;
501
502         skb = tcp_make_synack(sk, dst, req, rvp);
503         if (skb) {
504                 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
505
506                 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
507                 err = ip6_xmit(sk, skb, &fl, opt);
508                 err = net_xmit_eval(err);
509         }
510
511 done:
512         if (opt && opt != np->opt)
513                 sock_kfree_s(sk, opt, opt->tot_len);
514         dst_release(dst);
515         return err;
516 }
517
518 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
519                              struct request_values *rvp)
520 {
521         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
522         return tcp_v6_send_synack(sk, req, rvp);
523 }
524
525 static inline void syn_flood_warning(struct sk_buff *skb)
526 {
527 #ifdef CONFIG_SYN_COOKIES
528         if (sysctl_tcp_syncookies)
529                 printk(KERN_INFO
530                        "TCPv6: Possible SYN flooding on port %d. "
531                        "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
532         else
533 #endif
534                 printk(KERN_INFO
535                        "TCPv6: Possible SYN flooding on port %d. "
536                        "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
537 }
538
539 static void tcp_v6_reqsk_destructor(struct request_sock *req)
540 {
541         kfree_skb(inet6_rsk(req)->pktopts);
542 }
543
544 #ifdef CONFIG_TCP_MD5SIG
545 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
546                                                    struct in6_addr *addr)
547 {
548         struct tcp_sock *tp = tcp_sk(sk);
549         int i;
550
551         BUG_ON(tp == NULL);
552
553         if (!tp->md5sig_info || !tp->md5sig_info->entries6)
554                 return NULL;
555
556         for (i = 0; i < tp->md5sig_info->entries6; i++) {
557                 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
558                         return &tp->md5sig_info->keys6[i].base;
559         }
560         return NULL;
561 }
562
563 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
564                                                 struct sock *addr_sk)
565 {
566         return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
567 }
568
569 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
570                                                       struct request_sock *req)
571 {
572         return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
573 }
574
575 static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
576                              char *newkey, u8 newkeylen)
577 {
578         /* Add key to the list */
579         struct tcp_md5sig_key *key;
580         struct tcp_sock *tp = tcp_sk(sk);
581         struct tcp6_md5sig_key *keys;
582
583         key = tcp_v6_md5_do_lookup(sk, peer);
584         if (key) {
585                 /* modify existing entry - just update that one */
586                 kfree(key->key);
587                 key->key = newkey;
588                 key->keylen = newkeylen;
589         } else {
590                 /* reallocate new list if current one is full. */
591                 if (!tp->md5sig_info) {
592                         tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
593                         if (!tp->md5sig_info) {
594                                 kfree(newkey);
595                                 return -ENOMEM;
596                         }
597                         sk_nocaps_add(sk, NETIF_F_GSO_MASK);
598                 }
599                 if (tcp_alloc_md5sig_pool(sk) == NULL) {
600                         kfree(newkey);
601                         return -ENOMEM;
602                 }
603                 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
604                         keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
605                                        (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
606
607                         if (!keys) {
608                                 tcp_free_md5sig_pool();
609                                 kfree(newkey);
610                                 return -ENOMEM;
611                         }
612
613                         if (tp->md5sig_info->entries6)
614                                 memmove(keys, tp->md5sig_info->keys6,
615                                         (sizeof (tp->md5sig_info->keys6[0]) *
616                                          tp->md5sig_info->entries6));
617
618                         kfree(tp->md5sig_info->keys6);
619                         tp->md5sig_info->keys6 = keys;
620                         tp->md5sig_info->alloced6++;
621                 }
622
623                 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
624                                peer);
625                 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
626                 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
627
628                 tp->md5sig_info->entries6++;
629         }
630         return 0;
631 }
632
633 static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
634                                u8 *newkey, __u8 newkeylen)
635 {
636         return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
637                                  newkey, newkeylen);
638 }
639
640 static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
641 {
642         struct tcp_sock *tp = tcp_sk(sk);
643         int i;
644
645         for (i = 0; i < tp->md5sig_info->entries6; i++) {
646                 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
647                         /* Free the key */
648                         kfree(tp->md5sig_info->keys6[i].base.key);
649                         tp->md5sig_info->entries6--;
650
651                         if (tp->md5sig_info->entries6 == 0) {
652                                 kfree(tp->md5sig_info->keys6);
653                                 tp->md5sig_info->keys6 = NULL;
654                                 tp->md5sig_info->alloced6 = 0;
655                         } else {
656                                 /* shrink the database */
657                                 if (tp->md5sig_info->entries6 != i)
658                                         memmove(&tp->md5sig_info->keys6[i],
659                                                 &tp->md5sig_info->keys6[i+1],
660                                                 (tp->md5sig_info->entries6 - i)
661                                                 * sizeof (tp->md5sig_info->keys6[0]));
662                         }
663                         tcp_free_md5sig_pool();
664                         return 0;
665                 }
666         }
667         return -ENOENT;
668 }
669
670 static void tcp_v6_clear_md5_list (struct sock *sk)
671 {
672         struct tcp_sock *tp = tcp_sk(sk);
673         int i;
674
675         if (tp->md5sig_info->entries6) {
676                 for (i = 0; i < tp->md5sig_info->entries6; i++)
677                         kfree(tp->md5sig_info->keys6[i].base.key);
678                 tp->md5sig_info->entries6 = 0;
679                 tcp_free_md5sig_pool();
680         }
681
682         kfree(tp->md5sig_info->keys6);
683         tp->md5sig_info->keys6 = NULL;
684         tp->md5sig_info->alloced6 = 0;
685
686         if (tp->md5sig_info->entries4) {
687                 for (i = 0; i < tp->md5sig_info->entries4; i++)
688                         kfree(tp->md5sig_info->keys4[i].base.key);
689                 tp->md5sig_info->entries4 = 0;
690                 tcp_free_md5sig_pool();
691         }
692
693         kfree(tp->md5sig_info->keys4);
694         tp->md5sig_info->keys4 = NULL;
695         tp->md5sig_info->alloced4 = 0;
696 }
697
698 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
699                                   int optlen)
700 {
701         struct tcp_md5sig cmd;
702         struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
703         u8 *newkey;
704
705         if (optlen < sizeof(cmd))
706                 return -EINVAL;
707
708         if (copy_from_user(&cmd, optval, sizeof(cmd)))
709                 return -EFAULT;
710
711         if (sin6->sin6_family != AF_INET6)
712                 return -EINVAL;
713
714         if (!cmd.tcpm_keylen) {
715                 if (!tcp_sk(sk)->md5sig_info)
716                         return -ENOENT;
717                 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
718                         return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
719                 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
720         }
721
722         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
723                 return -EINVAL;
724
725         if (!tcp_sk(sk)->md5sig_info) {
726                 struct tcp_sock *tp = tcp_sk(sk);
727                 struct tcp_md5sig_info *p;
728
729                 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
730                 if (!p)
731                         return -ENOMEM;
732
733                 tp->md5sig_info = p;
734                 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
735         }
736
737         newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
738         if (!newkey)
739                 return -ENOMEM;
740         if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
741                 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
742                                          newkey, cmd.tcpm_keylen);
743         }
744         return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
745 }
746
747 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
748                                         struct in6_addr *daddr,
749                                         struct in6_addr *saddr, int nbytes)
750 {
751         struct tcp6_pseudohdr *bp;
752         struct scatterlist sg;
753
754         bp = &hp->md5_blk.ip6;
755         /* 1. TCP pseudo-header (RFC2460) */
756         ipv6_addr_copy(&bp->saddr, saddr);
757         ipv6_addr_copy(&bp->daddr, daddr);
758         bp->protocol = cpu_to_be32(IPPROTO_TCP);
759         bp->len = cpu_to_be32(nbytes);
760
761         sg_init_one(&sg, bp, sizeof(*bp));
762         return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
763 }
764
765 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
766                                struct in6_addr *daddr, struct in6_addr *saddr,
767                                struct tcphdr *th)
768 {
769         struct tcp_md5sig_pool *hp;
770         struct hash_desc *desc;
771
772         hp = tcp_get_md5sig_pool();
773         if (!hp)
774                 goto clear_hash_noput;
775         desc = &hp->md5_desc;
776
777         if (crypto_hash_init(desc))
778                 goto clear_hash;
779         if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
780                 goto clear_hash;
781         if (tcp_md5_hash_header(hp, th))
782                 goto clear_hash;
783         if (tcp_md5_hash_key(hp, key))
784                 goto clear_hash;
785         if (crypto_hash_final(desc, md5_hash))
786                 goto clear_hash;
787
788         tcp_put_md5sig_pool();
789         return 0;
790
791 clear_hash:
792         tcp_put_md5sig_pool();
793 clear_hash_noput:
794         memset(md5_hash, 0, 16);
795         return 1;
796 }
797
798 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
799                                struct sock *sk, struct request_sock *req,
800                                struct sk_buff *skb)
801 {
802         struct in6_addr *saddr, *daddr;
803         struct tcp_md5sig_pool *hp;
804         struct hash_desc *desc;
805         struct tcphdr *th = tcp_hdr(skb);
806
807         if (sk) {
808                 saddr = &inet6_sk(sk)->saddr;
809                 daddr = &inet6_sk(sk)->daddr;
810         } else if (req) {
811                 saddr = &inet6_rsk(req)->loc_addr;
812                 daddr = &inet6_rsk(req)->rmt_addr;
813         } else {
814                 struct ipv6hdr *ip6h = ipv6_hdr(skb);
815                 saddr = &ip6h->saddr;
816                 daddr = &ip6h->daddr;
817         }
818
819         hp = tcp_get_md5sig_pool();
820         if (!hp)
821                 goto clear_hash_noput;
822         desc = &hp->md5_desc;
823
824         if (crypto_hash_init(desc))
825                 goto clear_hash;
826
827         if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
828                 goto clear_hash;
829         if (tcp_md5_hash_header(hp, th))
830                 goto clear_hash;
831         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
832                 goto clear_hash;
833         if (tcp_md5_hash_key(hp, key))
834                 goto clear_hash;
835         if (crypto_hash_final(desc, md5_hash))
836                 goto clear_hash;
837
838         tcp_put_md5sig_pool();
839         return 0;
840
841 clear_hash:
842         tcp_put_md5sig_pool();
843 clear_hash_noput:
844         memset(md5_hash, 0, 16);
845         return 1;
846 }
847
848 static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
849 {
850         __u8 *hash_location = NULL;
851         struct tcp_md5sig_key *hash_expected;
852         struct ipv6hdr *ip6h = ipv6_hdr(skb);
853         struct tcphdr *th = tcp_hdr(skb);
854         int genhash;
855         u8 newhash[16];
856
857         hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
858         hash_location = tcp_parse_md5sig_option(th);
859
860         /* We've parsed the options - do we have a hash? */
861         if (!hash_expected && !hash_location)
862                 return 0;
863
864         if (hash_expected && !hash_location) {
865                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
866                 return 1;
867         }
868
869         if (!hash_expected && hash_location) {
870                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
871                 return 1;
872         }
873
874         /* check the signature */
875         genhash = tcp_v6_md5_hash_skb(newhash,
876                                       hash_expected,
877                                       NULL, NULL, skb);
878
879         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
880                 if (net_ratelimit()) {
881                         printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
882                                genhash ? "failed" : "mismatch",
883                                &ip6h->saddr, ntohs(th->source),
884                                &ip6h->daddr, ntohs(th->dest));
885                 }
886                 return 1;
887         }
888         return 0;
889 }
890 #endif
891
892 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
893         .family         =       AF_INET6,
894         .obj_size       =       sizeof(struct tcp6_request_sock),
895         .rtx_syn_ack    =       tcp_v6_rtx_synack,
896         .send_ack       =       tcp_v6_reqsk_send_ack,
897         .destructor     =       tcp_v6_reqsk_destructor,
898         .send_reset     =       tcp_v6_send_reset,
899         .syn_ack_timeout =      tcp_syn_ack_timeout,
900 };
901
902 #ifdef CONFIG_TCP_MD5SIG
903 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
904         .md5_lookup     =       tcp_v6_reqsk_md5_lookup,
905         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
906 };
907 #endif
908
909 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
910         .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
911         .twsk_unique    = tcp_twsk_unique,
912         .twsk_destructor= tcp_twsk_destructor,
913 };
914
915 static void __tcp_v6_send_check(struct sk_buff *skb,
916                                 struct in6_addr *saddr, struct in6_addr *daddr)
917 {
918         struct tcphdr *th = tcp_hdr(skb);
919
920         if (skb->ip_summed == CHECKSUM_PARTIAL) {
921                 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
922                 skb->csum_start = skb_transport_header(skb) - skb->head;
923                 skb->csum_offset = offsetof(struct tcphdr, check);
924         } else {
925                 th->check = tcp_v6_check(skb->len, saddr, daddr,
926                                          csum_partial(th, th->doff << 2,
927                                                       skb->csum));
928         }
929 }
930
931 static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
932 {
933         struct ipv6_pinfo *np = inet6_sk(sk);
934
935         __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
936 }
937
938 static int tcp_v6_gso_send_check(struct sk_buff *skb)
939 {
940         struct ipv6hdr *ipv6h;
941         struct tcphdr *th;
942
943         if (!pskb_may_pull(skb, sizeof(*th)))
944                 return -EINVAL;
945
946         ipv6h = ipv6_hdr(skb);
947         th = tcp_hdr(skb);
948
949         th->check = 0;
950         skb->ip_summed = CHECKSUM_PARTIAL;
951         __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
952         return 0;
953 }
954
955 static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
956                                          struct sk_buff *skb)
957 {
958         struct ipv6hdr *iph = skb_gro_network_header(skb);
959
960         switch (skb->ip_summed) {
961         case CHECKSUM_COMPLETE:
962                 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
963                                   skb->csum)) {
964                         skb->ip_summed = CHECKSUM_UNNECESSARY;
965                         break;
966                 }
967
968                 /* fall through */
969         case CHECKSUM_NONE:
970                 NAPI_GRO_CB(skb)->flush = 1;
971                 return NULL;
972         }
973
974         return tcp_gro_receive(head, skb);
975 }
976
977 static int tcp6_gro_complete(struct sk_buff *skb)
978 {
979         struct ipv6hdr *iph = ipv6_hdr(skb);
980         struct tcphdr *th = tcp_hdr(skb);
981
982         th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
983                                   &iph->saddr, &iph->daddr, 0);
984         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
985
986         return tcp_gro_complete(skb);
987 }
988
989 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
990                                  u32 ts, struct tcp_md5sig_key *key, int rst)
991 {
992         struct tcphdr *th = tcp_hdr(skb), *t1;
993         struct sk_buff *buff;
994         struct flowi fl;
995         struct net *net = dev_net(skb_dst(skb)->dev);
996         struct sock *ctl_sk = net->ipv6.tcp_sk;
997         unsigned int tot_len = sizeof(struct tcphdr);
998         struct dst_entry *dst;
999         __be32 *topt;
1000
1001         if (ts)
1002                 tot_len += TCPOLEN_TSTAMP_ALIGNED;
1003 #ifdef CONFIG_TCP_MD5SIG
1004         if (key)
1005                 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1006 #endif
1007
1008         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1009                          GFP_ATOMIC);
1010         if (buff == NULL)
1011                 return;
1012
1013         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1014
1015         t1 = (struct tcphdr *) skb_push(buff, tot_len);
1016         skb_reset_transport_header(buff);
1017
1018         /* Swap the send and the receive. */
1019         memset(t1, 0, sizeof(*t1));
1020         t1->dest = th->source;
1021         t1->source = th->dest;
1022         t1->doff = tot_len / 4;
1023         t1->seq = htonl(seq);
1024         t1->ack_seq = htonl(ack);
1025         t1->ack = !rst || !th->ack;
1026         t1->rst = rst;
1027         t1->window = htons(win);
1028
1029         topt = (__be32 *)(t1 + 1);
1030
1031         if (ts) {
1032                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1033                                 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1034                 *topt++ = htonl(tcp_time_stamp);
1035                 *topt++ = htonl(ts);
1036         }
1037
1038 #ifdef CONFIG_TCP_MD5SIG
1039         if (key) {
1040                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1041                                 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1042                 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
1043                                     &ipv6_hdr(skb)->saddr,
1044                                     &ipv6_hdr(skb)->daddr, t1);
1045         }
1046 #endif
1047
1048         memset(&fl, 0, sizeof(fl));
1049         ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1050         ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1051
1052         buff->ip_summed = CHECKSUM_PARTIAL;
1053         buff->csum = 0;
1054
1055         __tcp_v6_send_check(buff, &fl.fl6_src, &fl.fl6_dst);
1056
1057         fl.proto = IPPROTO_TCP;
1058         fl.oif = inet6_iif(skb);
1059         fl.fl_ip_dport = t1->dest;
1060         fl.fl_ip_sport = t1->source;
1061         security_skb_classify_flow(skb, &fl);
1062
1063         /* Pass a socket to ip6_dst_lookup either it is for RST
1064          * Underlying function will use this to retrieve the network
1065          * namespace
1066          */
1067         if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
1068                 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
1069                         skb_dst_set(buff, dst);
1070                         ip6_xmit(ctl_sk, buff, &fl, NULL);
1071                         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1072                         if (rst)
1073                                 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1074                         return;
1075                 }
1076         }
1077
1078         kfree_skb(buff);
1079 }
1080
1081 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1082 {
1083         struct tcphdr *th = tcp_hdr(skb);
1084         u32 seq = 0, ack_seq = 0;
1085         struct tcp_md5sig_key *key = NULL;
1086
1087         if (th->rst)
1088                 return;
1089
1090         if (!ipv6_unicast_destination(skb))
1091                 return;
1092
1093 #ifdef CONFIG_TCP_MD5SIG
1094         if (sk)
1095                 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
1096 #endif
1097
1098         if (th->ack)
1099                 seq = ntohl(th->ack_seq);
1100         else
1101                 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1102                           (th->doff << 2);
1103
1104         tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
1105 }
1106
1107 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1108                             struct tcp_md5sig_key *key)
1109 {
1110         tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
1111 }
1112
1113 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1114 {
1115         struct inet_timewait_sock *tw = inet_twsk(sk);
1116         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1117
1118         tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1119                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1120                         tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
1121
1122         inet_twsk_put(tw);
1123 }
1124
1125 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1126                                   struct request_sock *req)
1127 {
1128         tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1129                         tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr));
1130 }
1131
1132
1133 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1134 {
1135         struct request_sock *req, **prev;
1136         const struct tcphdr *th = tcp_hdr(skb);
1137         struct sock *nsk;
1138
1139         /* Find possible connection requests. */
1140         req = inet6_csk_search_req(sk, &prev, th->source,
1141                                    &ipv6_hdr(skb)->saddr,
1142                                    &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1143         if (req)
1144                 return tcp_check_req(sk, skb, req, prev);
1145
1146         nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1147                         &ipv6_hdr(skb)->saddr, th->source,
1148                         &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1149
1150         if (nsk) {
1151                 if (nsk->sk_state != TCP_TIME_WAIT) {
1152                         bh_lock_sock(nsk);
1153                         return nsk;
1154                 }
1155                 inet_twsk_put(inet_twsk(nsk));
1156                 return NULL;
1157         }
1158
1159 #ifdef CONFIG_SYN_COOKIES
1160         if (!th->syn)
1161                 sk = cookie_v6_check(sk, skb);
1162 #endif
1163         return sk;
1164 }
1165
1166 /* FIXME: this is substantially similar to the ipv4 code.
1167  * Can some kind of merge be done? -- erics
1168  */
1169 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1170 {
1171         struct tcp_extend_values tmp_ext;
1172         struct tcp_options_received tmp_opt;
1173         u8 *hash_location;
1174         struct request_sock *req;
1175         struct inet6_request_sock *treq;
1176         struct ipv6_pinfo *np = inet6_sk(sk);
1177         struct tcp_sock *tp = tcp_sk(sk);
1178         __u32 isn = TCP_SKB_CB(skb)->when;
1179 #ifdef CONFIG_SYN_COOKIES
1180         int want_cookie = 0;
1181 #else
1182 #define want_cookie 0
1183 #endif
1184
1185         if (skb->protocol == htons(ETH_P_IP))
1186                 return tcp_v4_conn_request(sk, skb);
1187
1188         if (!ipv6_unicast_destination(skb))
1189                 goto drop;
1190
1191         if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1192                 if (net_ratelimit())
1193                         syn_flood_warning(skb);
1194 #ifdef CONFIG_SYN_COOKIES
1195                 if (sysctl_tcp_syncookies)
1196                         want_cookie = 1;
1197                 else
1198 #endif
1199                 goto drop;
1200         }
1201
1202         if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1203                 goto drop;
1204
1205         req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1206         if (req == NULL)
1207                 goto drop;
1208
1209 #ifdef CONFIG_TCP_MD5SIG
1210         tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1211 #endif
1212
1213         tcp_clear_options(&tmp_opt);
1214         tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1215         tmp_opt.user_mss = tp->rx_opt.user_mss;
1216         tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1217
1218         if (tmp_opt.cookie_plus > 0 &&
1219             tmp_opt.saw_tstamp &&
1220             !tp->rx_opt.cookie_out_never &&
1221             (sysctl_tcp_cookie_size > 0 ||
1222              (tp->cookie_values != NULL &&
1223               tp->cookie_values->cookie_desired > 0))) {
1224                 u8 *c;
1225                 u32 *d;
1226                 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1227                 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1228
1229                 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1230                         goto drop_and_free;
1231
1232                 /* Secret recipe starts with IP addresses */
1233                 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1234                 *mess++ ^= *d++;
1235                 *mess++ ^= *d++;
1236                 *mess++ ^= *d++;
1237                 *mess++ ^= *d++;
1238                 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1239                 *mess++ ^= *d++;
1240                 *mess++ ^= *d++;
1241                 *mess++ ^= *d++;
1242                 *mess++ ^= *d++;
1243
1244                 /* plus variable length Initiator Cookie */
1245                 c = (u8 *)mess;
1246                 while (l-- > 0)
1247                         *c++ ^= *hash_location++;
1248
1249 #ifdef CONFIG_SYN_COOKIES
1250                 want_cookie = 0;        /* not our kind of cookie */
1251 #endif
1252                 tmp_ext.cookie_out_never = 0; /* false */
1253                 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1254         } else if (!tp->rx_opt.cookie_in_always) {
1255                 /* redundant indications, but ensure initialization. */
1256                 tmp_ext.cookie_out_never = 1; /* true */
1257                 tmp_ext.cookie_plus = 0;
1258         } else {
1259                 goto drop_and_free;
1260         }
1261         tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1262
1263         if (want_cookie && !tmp_opt.saw_tstamp)
1264                 tcp_clear_options(&tmp_opt);
1265
1266         tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1267         tcp_openreq_init(req, &tmp_opt, skb);
1268
1269         treq = inet6_rsk(req);
1270         ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1271         ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1272         if (!want_cookie || tmp_opt.tstamp_ok)
1273                 TCP_ECN_create_request(req, tcp_hdr(skb));
1274
1275         if (!isn) {
1276                 if (ipv6_opt_accepted(sk, skb) ||
1277                     np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1278                     np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1279                         atomic_inc(&skb->users);
1280                         treq->pktopts = skb;
1281                 }
1282                 treq->iif = sk->sk_bound_dev_if;
1283
1284                 /* So that link locals have meaning */
1285                 if (!sk->sk_bound_dev_if &&
1286                     ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1287                         treq->iif = inet6_iif(skb);
1288                 if (!want_cookie) {
1289                         isn = tcp_v6_init_sequence(skb);
1290                 } else {
1291                         isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1292                         req->cookie_ts = tmp_opt.tstamp_ok;
1293                 }
1294         }
1295         tcp_rsk(req)->snt_isn = isn;
1296
1297         security_inet_conn_request(sk, skb, req);
1298
1299         if (tcp_v6_send_synack(sk, req,
1300                                (struct request_values *)&tmp_ext) ||
1301             want_cookie)
1302                 goto drop_and_free;
1303
1304         inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1305         return 0;
1306
1307 drop_and_free:
1308         reqsk_free(req);
1309 drop:
1310         return 0; /* don't send reset */
1311 }
1312
1313 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1314                                           struct request_sock *req,
1315                                           struct dst_entry *dst)
1316 {
1317         struct inet6_request_sock *treq;
1318         struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1319         struct tcp6_sock *newtcp6sk;
1320         struct inet_sock *newinet;
1321         struct tcp_sock *newtp;
1322         struct sock *newsk;
1323         struct ipv6_txoptions *opt;
1324 #ifdef CONFIG_TCP_MD5SIG
1325         struct tcp_md5sig_key *key;
1326 #endif
1327
1328         if (skb->protocol == htons(ETH_P_IP)) {
1329                 /*
1330                  *      v6 mapped
1331                  */
1332
1333                 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1334
1335                 if (newsk == NULL)
1336                         return NULL;
1337
1338                 newtcp6sk = (struct tcp6_sock *)newsk;
1339                 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1340
1341                 newinet = inet_sk(newsk);
1342                 newnp = inet6_sk(newsk);
1343                 newtp = tcp_sk(newsk);
1344
1345                 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1346
1347                 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1348
1349                 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1350
1351                 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1352
1353                 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1354                 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1355 #ifdef CONFIG_TCP_MD5SIG
1356                 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1357 #endif
1358
1359                 newnp->pktoptions  = NULL;
1360                 newnp->opt         = NULL;
1361                 newnp->mcast_oif   = inet6_iif(skb);
1362                 newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1363
1364                 /*
1365                  * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1366                  * here, tcp_create_openreq_child now does this for us, see the comment in
1367                  * that function for the gory details. -acme
1368                  */
1369
1370                 /* It is tricky place. Until this moment IPv4 tcp
1371                    worked with IPv6 icsk.icsk_af_ops.
1372                    Sync it now.
1373                  */
1374                 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1375
1376                 return newsk;
1377         }
1378
1379         treq = inet6_rsk(req);
1380         opt = np->opt;
1381
1382         if (sk_acceptq_is_full(sk))
1383                 goto out_overflow;
1384
1385         if (dst == NULL) {
1386                 struct in6_addr *final_p, final;
1387                 struct flowi fl;
1388
1389                 memset(&fl, 0, sizeof(fl));
1390                 fl.proto = IPPROTO_TCP;
1391                 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1392                 final_p = fl6_update_dst(&fl, opt, &final);
1393                 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1394                 fl.oif = sk->sk_bound_dev_if;
1395                 fl.mark = sk->sk_mark;
1396                 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1397                 fl.fl_ip_sport = inet_rsk(req)->loc_port;
1398                 security_req_classify_flow(req, &fl);
1399
1400                 if (ip6_dst_lookup(sk, &dst, &fl))
1401                         goto out;
1402
1403                 if (final_p)
1404                         ipv6_addr_copy(&fl.fl6_dst, final_p);
1405
1406                 if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
1407                         goto out;
1408         }
1409
1410         newsk = tcp_create_openreq_child(sk, req, skb);
1411         if (newsk == NULL)
1412                 goto out;
1413
1414         /*
1415          * No need to charge this sock to the relevant IPv6 refcnt debug socks
1416          * count here, tcp_create_openreq_child now does this for us, see the
1417          * comment in that function for the gory details. -acme
1418          */
1419
1420         newsk->sk_gso_type = SKB_GSO_TCPV6;
1421         __ip6_dst_store(newsk, dst, NULL, NULL);
1422
1423         newtcp6sk = (struct tcp6_sock *)newsk;
1424         inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1425
1426         newtp = tcp_sk(newsk);
1427         newinet = inet_sk(newsk);
1428         newnp = inet6_sk(newsk);
1429
1430         memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1431
1432         ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1433         ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1434         ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1435         newsk->sk_bound_dev_if = treq->iif;
1436
1437         /* Now IPv6 options...
1438
1439            First: no IPv4 options.
1440          */
1441         newinet->opt = NULL;
1442         newnp->ipv6_fl_list = NULL;
1443
1444         /* Clone RX bits */
1445         newnp->rxopt.all = np->rxopt.all;
1446
1447         /* Clone pktoptions received with SYN */
1448         newnp->pktoptions = NULL;
1449         if (treq->pktopts != NULL) {
1450                 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1451                 kfree_skb(treq->pktopts);
1452                 treq->pktopts = NULL;
1453                 if (newnp->pktoptions)
1454                         skb_set_owner_r(newnp->pktoptions, newsk);
1455         }
1456         newnp->opt        = NULL;
1457         newnp->mcast_oif  = inet6_iif(skb);
1458         newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1459
1460         /* Clone native IPv6 options from listening socket (if any)
1461
1462            Yes, keeping reference count would be much more clever,
1463            but we make one more one thing there: reattach optmem
1464            to newsk.
1465          */
1466         if (opt) {
1467                 newnp->opt = ipv6_dup_options(newsk, opt);
1468                 if (opt != np->opt)
1469                         sock_kfree_s(sk, opt, opt->tot_len);
1470         }
1471
1472         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1473         if (newnp->opt)
1474                 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1475                                                      newnp->opt->opt_flen);
1476
1477         tcp_mtup_init(newsk);
1478         tcp_sync_mss(newsk, dst_mtu(dst));
1479         newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1480         tcp_initialize_rcv_mss(newsk);
1481
1482         newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1483         newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1484
1485 #ifdef CONFIG_TCP_MD5SIG
1486         /* Copy over the MD5 key from the original socket */
1487         if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1488                 /* We're using one, so create a matching key
1489                  * on the newsk structure. If we fail to get
1490                  * memory, then we end up not copying the key
1491                  * across. Shucks.
1492                  */
1493                 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1494                 if (newkey != NULL)
1495                         tcp_v6_md5_do_add(newsk, &newnp->daddr,
1496                                           newkey, key->keylen);
1497         }
1498 #endif
1499
1500         __inet6_hash(newsk, NULL);
1501         __inet_inherit_port(sk, newsk);
1502
1503         return newsk;
1504
1505 out_overflow:
1506         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1507 out:
1508         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1509         if (opt && opt != np->opt)
1510                 sock_kfree_s(sk, opt, opt->tot_len);
1511         dst_release(dst);
1512         return NULL;
1513 }
1514
1515 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1516 {
1517         if (skb->ip_summed == CHECKSUM_COMPLETE) {
1518                 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1519                                   &ipv6_hdr(skb)->daddr, skb->csum)) {
1520                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1521                         return 0;
1522                 }
1523         }
1524
1525         skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1526                                               &ipv6_hdr(skb)->saddr,
1527                                               &ipv6_hdr(skb)->daddr, 0));
1528
1529         if (skb->len <= 76) {
1530                 return __skb_checksum_complete(skb);
1531         }
1532         return 0;
1533 }
1534
1535 /* The socket must have it's spinlock held when we get
1536  * here.
1537  *
1538  * We have a potential double-lock case here, so even when
1539  * doing backlog processing we use the BH locking scheme.
1540  * This is because we cannot sleep with the original spinlock
1541  * held.
1542  */
1543 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1544 {
1545         struct ipv6_pinfo *np = inet6_sk(sk);
1546         struct tcp_sock *tp;
1547         struct sk_buff *opt_skb = NULL;
1548
1549         /* Imagine: socket is IPv6. IPv4 packet arrives,
1550            goes to IPv4 receive handler and backlogged.
1551            From backlog it always goes here. Kerboom...
1552            Fortunately, tcp_rcv_established and rcv_established
1553            handle them correctly, but it is not case with
1554            tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1555          */
1556
1557         if (skb->protocol == htons(ETH_P_IP))
1558                 return tcp_v4_do_rcv(sk, skb);
1559
1560 #ifdef CONFIG_TCP_MD5SIG
1561         if (tcp_v6_inbound_md5_hash (sk, skb))
1562                 goto discard;
1563 #endif
1564
1565         if (sk_filter(sk, skb))
1566                 goto discard;
1567
1568         /*
1569          *      socket locking is here for SMP purposes as backlog rcv
1570          *      is currently called with bh processing disabled.
1571          */
1572
1573         /* Do Stevens' IPV6_PKTOPTIONS.
1574
1575            Yes, guys, it is the only place in our code, where we
1576            may make it not affecting IPv4.
1577            The rest of code is protocol independent,
1578            and I do not like idea to uglify IPv4.
1579
1580            Actually, all the idea behind IPV6_PKTOPTIONS
1581            looks not very well thought. For now we latch
1582            options, received in the last packet, enqueued
1583            by tcp. Feel free to propose better solution.
1584                                                --ANK (980728)
1585          */
1586         if (np->rxopt.all)
1587                 opt_skb = skb_clone(skb, GFP_ATOMIC);
1588
1589         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1590                 TCP_CHECK_TIMER(sk);
1591                 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1592                         goto reset;
1593                 TCP_CHECK_TIMER(sk);
1594                 if (opt_skb)
1595                         goto ipv6_pktoptions;
1596                 return 0;
1597         }
1598
1599         if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1600                 goto csum_err;
1601
1602         if (sk->sk_state == TCP_LISTEN) {
1603                 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1604                 if (!nsk)
1605                         goto discard;
1606
1607                 /*
1608                  * Queue it on the new socket if the new socket is active,
1609                  * otherwise we just shortcircuit this and continue with
1610                  * the new socket..
1611                  */
1612                 if(nsk != sk) {
1613                         if (tcp_child_process(sk, nsk, skb))
1614                                 goto reset;
1615                         if (opt_skb)
1616                                 __kfree_skb(opt_skb);
1617                         return 0;
1618                 }
1619         }
1620
1621         TCP_CHECK_TIMER(sk);
1622         if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1623                 goto reset;
1624         TCP_CHECK_TIMER(sk);
1625         if (opt_skb)
1626                 goto ipv6_pktoptions;
1627         return 0;
1628
1629 reset:
1630         tcp_v6_send_reset(sk, skb);
1631 discard:
1632         if (opt_skb)
1633                 __kfree_skb(opt_skb);
1634         kfree_skb(skb);
1635         return 0;
1636 csum_err:
1637         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1638         goto discard;
1639
1640
1641 ipv6_pktoptions:
1642         /* Do you ask, what is it?
1643
1644            1. skb was enqueued by tcp.
1645            2. skb is added to tail of read queue, rather than out of order.
1646            3. socket is not in passive state.
1647            4. Finally, it really contains options, which user wants to receive.
1648          */
1649         tp = tcp_sk(sk);
1650         if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1651             !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1652                 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1653                         np->mcast_oif = inet6_iif(opt_skb);
1654                 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1655                         np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1656                 if (ipv6_opt_accepted(sk, opt_skb)) {
1657                         skb_set_owner_r(opt_skb, sk);
1658                         opt_skb = xchg(&np->pktoptions, opt_skb);
1659                 } else {
1660                         __kfree_skb(opt_skb);
1661                         opt_skb = xchg(&np->pktoptions, NULL);
1662                 }
1663         }
1664
1665         kfree_skb(opt_skb);
1666         return 0;
1667 }
1668
1669 static int tcp_v6_rcv(struct sk_buff *skb)
1670 {
1671         struct tcphdr *th;
1672         struct ipv6hdr *hdr;
1673         struct sock *sk;
1674         int ret;
1675         struct net *net = dev_net(skb->dev);
1676
1677         if (skb->pkt_type != PACKET_HOST)
1678                 goto discard_it;
1679
1680         /*
1681          *      Count it even if it's bad.
1682          */
1683         TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1684
1685         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1686                 goto discard_it;
1687
1688         th = tcp_hdr(skb);
1689
1690         if (th->doff < sizeof(struct tcphdr)/4)
1691                 goto bad_packet;
1692         if (!pskb_may_pull(skb, th->doff*4))
1693                 goto discard_it;
1694
1695         if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1696                 goto bad_packet;
1697
1698         th = tcp_hdr(skb);
1699         hdr = ipv6_hdr(skb);
1700         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1701         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1702                                     skb->len - th->doff*4);
1703         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1704         TCP_SKB_CB(skb)->when = 0;
1705         TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr);
1706         TCP_SKB_CB(skb)->sacked = 0;
1707
1708         sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1709         if (!sk)
1710                 goto no_tcp_socket;
1711
1712 process:
1713         if (sk->sk_state == TCP_TIME_WAIT)
1714                 goto do_time_wait;
1715
1716         if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1717                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1718                 goto discard_and_relse;
1719         }
1720
1721         if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1722                 goto discard_and_relse;
1723
1724         if (sk_filter(sk, skb))
1725                 goto discard_and_relse;
1726
1727         skb->dev = NULL;
1728
1729         bh_lock_sock_nested(sk);
1730         ret = 0;
1731         if (!sock_owned_by_user(sk)) {
1732 #ifdef CONFIG_NET_DMA
1733                 struct tcp_sock *tp = tcp_sk(sk);
1734                 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1735                         tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1736                 if (tp->ucopy.dma_chan)
1737                         ret = tcp_v6_do_rcv(sk, skb);
1738                 else
1739 #endif
1740                 {
1741                         if (!tcp_prequeue(sk, skb))
1742                                 ret = tcp_v6_do_rcv(sk, skb);
1743                 }
1744         } else if (unlikely(sk_add_backlog(sk, skb))) {
1745                 bh_unlock_sock(sk);
1746                 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1747                 goto discard_and_relse;
1748         }
1749         bh_unlock_sock(sk);
1750
1751         sock_put(sk);
1752         return ret ? -1 : 0;
1753
1754 no_tcp_socket:
1755         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1756                 goto discard_it;
1757
1758         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1759 bad_packet:
1760                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1761         } else {
1762                 tcp_v6_send_reset(NULL, skb);
1763         }
1764
1765 discard_it:
1766
1767         /*
1768          *      Discard frame
1769          */
1770
1771         kfree_skb(skb);
1772         return 0;
1773
1774 discard_and_relse:
1775         sock_put(sk);
1776         goto discard_it;
1777
1778 do_time_wait:
1779         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1780                 inet_twsk_put(inet_twsk(sk));
1781                 goto discard_it;
1782         }
1783
1784         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1785                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1786                 inet_twsk_put(inet_twsk(sk));
1787                 goto discard_it;
1788         }
1789
1790         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1791         case TCP_TW_SYN:
1792         {
1793                 struct sock *sk2;
1794
1795                 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1796                                             &ipv6_hdr(skb)->daddr,
1797                                             ntohs(th->dest), inet6_iif(skb));
1798                 if (sk2 != NULL) {
1799                         struct inet_timewait_sock *tw = inet_twsk(sk);
1800                         inet_twsk_deschedule(tw, &tcp_death_row);
1801                         inet_twsk_put(tw);
1802                         sk = sk2;
1803                         goto process;
1804                 }
1805                 /* Fall through to ACK */
1806         }
1807         case TCP_TW_ACK:
1808                 tcp_v6_timewait_ack(sk, skb);
1809                 break;
1810         case TCP_TW_RST:
1811                 goto no_tcp_socket;
1812         case TCP_TW_SUCCESS:;
1813         }
1814         goto discard_it;
1815 }
1816
1817 static int tcp_v6_remember_stamp(struct sock *sk)
1818 {
1819         /* Alas, not yet... */
1820         return 0;
1821 }
1822
1823 static const struct inet_connection_sock_af_ops ipv6_specific = {
1824         .queue_xmit        = inet6_csk_xmit,
1825         .send_check        = tcp_v6_send_check,
1826         .rebuild_header    = inet6_sk_rebuild_header,
1827         .conn_request      = tcp_v6_conn_request,
1828         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1829         .remember_stamp    = tcp_v6_remember_stamp,
1830         .net_header_len    = sizeof(struct ipv6hdr),
1831         .setsockopt        = ipv6_setsockopt,
1832         .getsockopt        = ipv6_getsockopt,
1833         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1834         .sockaddr_len      = sizeof(struct sockaddr_in6),
1835         .bind_conflict     = inet6_csk_bind_conflict,
1836 #ifdef CONFIG_COMPAT
1837         .compat_setsockopt = compat_ipv6_setsockopt,
1838         .compat_getsockopt = compat_ipv6_getsockopt,
1839 #endif
1840 };
1841
1842 #ifdef CONFIG_TCP_MD5SIG
1843 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1844         .md5_lookup     =       tcp_v6_md5_lookup,
1845         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
1846         .md5_add        =       tcp_v6_md5_add_func,
1847         .md5_parse      =       tcp_v6_parse_md5_keys,
1848 };
1849 #endif
1850
1851 /*
1852  *      TCP over IPv4 via INET6 API
1853  */
1854
1855 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1856         .queue_xmit        = ip_queue_xmit,
1857         .send_check        = tcp_v4_send_check,
1858         .rebuild_header    = inet_sk_rebuild_header,
1859         .conn_request      = tcp_v6_conn_request,
1860         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1861         .remember_stamp    = tcp_v4_remember_stamp,
1862         .net_header_len    = sizeof(struct iphdr),
1863         .setsockopt        = ipv6_setsockopt,
1864         .getsockopt        = ipv6_getsockopt,
1865         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1866         .sockaddr_len      = sizeof(struct sockaddr_in6),
1867         .bind_conflict     = inet6_csk_bind_conflict,
1868 #ifdef CONFIG_COMPAT
1869         .compat_setsockopt = compat_ipv6_setsockopt,
1870         .compat_getsockopt = compat_ipv6_getsockopt,
1871 #endif
1872 };
1873
1874 #ifdef CONFIG_TCP_MD5SIG
1875 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1876         .md5_lookup     =       tcp_v4_md5_lookup,
1877         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1878         .md5_add        =       tcp_v6_md5_add_func,
1879         .md5_parse      =       tcp_v6_parse_md5_keys,
1880 };
1881 #endif
1882
1883 /* NOTE: A lot of things set to zero explicitly by call to
1884  *       sk_alloc() so need not be done here.
1885  */
1886 static int tcp_v6_init_sock(struct sock *sk)
1887 {
1888         struct inet_connection_sock *icsk = inet_csk(sk);
1889         struct tcp_sock *tp = tcp_sk(sk);
1890
1891         skb_queue_head_init(&tp->out_of_order_queue);
1892         tcp_init_xmit_timers(sk);
1893         tcp_prequeue_init(tp);
1894
1895         icsk->icsk_rto = TCP_TIMEOUT_INIT;
1896         tp->mdev = TCP_TIMEOUT_INIT;
1897
1898         /* So many TCP implementations out there (incorrectly) count the
1899          * initial SYN frame in their delayed-ACK and congestion control
1900          * algorithms that we must have the following bandaid to talk
1901          * efficiently to them.  -DaveM
1902          */
1903         tp->snd_cwnd = 2;
1904
1905         /* See draft-stevens-tcpca-spec-01 for discussion of the
1906          * initialization of these values.
1907          */
1908         tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1909         tp->snd_cwnd_clamp = ~0;
1910         tp->mss_cache = TCP_MSS_DEFAULT;
1911
1912         tp->reordering = sysctl_tcp_reordering;
1913
1914         sk->sk_state = TCP_CLOSE;
1915
1916         icsk->icsk_af_ops = &ipv6_specific;
1917         icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1918         icsk->icsk_sync_mss = tcp_sync_mss;
1919         sk->sk_write_space = sk_stream_write_space;
1920         sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1921
1922 #ifdef CONFIG_TCP_MD5SIG
1923         tp->af_specific = &tcp_sock_ipv6_specific;
1924 #endif
1925
1926         /* TCP Cookie Transactions */
1927         if (sysctl_tcp_cookie_size > 0) {
1928                 /* Default, cookies without s_data_payload. */
1929                 tp->cookie_values =
1930                         kzalloc(sizeof(*tp->cookie_values),
1931                                 sk->sk_allocation);
1932                 if (tp->cookie_values != NULL)
1933                         kref_init(&tp->cookie_values->kref);
1934         }
1935         /* Presumed zeroed, in order of appearance:
1936          *      cookie_in_always, cookie_out_never,
1937          *      s_data_constant, s_data_in, s_data_out
1938          */
1939         sk->sk_sndbuf = sysctl_tcp_wmem[1];
1940         sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1941
1942         local_bh_disable();
1943         percpu_counter_inc(&tcp_sockets_allocated);
1944         local_bh_enable();
1945
1946         return 0;
1947 }
1948
1949 static void tcp_v6_destroy_sock(struct sock *sk)
1950 {
1951 #ifdef CONFIG_TCP_MD5SIG
1952         /* Clean up the MD5 key list */
1953         if (tcp_sk(sk)->md5sig_info)
1954                 tcp_v6_clear_md5_list(sk);
1955 #endif
1956         tcp_v4_destroy_sock(sk);
1957         inet6_destroy_sock(sk);
1958 }
1959
1960 #ifdef CONFIG_PROC_FS
1961 /* Proc filesystem TCPv6 sock list dumping. */
1962 static void get_openreq6(struct seq_file *seq,
1963                          struct sock *sk, struct request_sock *req, int i, int uid)
1964 {
1965         int ttd = req->expires - jiffies;
1966         struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1967         struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1968
1969         if (ttd < 0)
1970                 ttd = 0;
1971
1972         seq_printf(seq,
1973                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1974                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1975                    i,
1976                    src->s6_addr32[0], src->s6_addr32[1],
1977                    src->s6_addr32[2], src->s6_addr32[3],
1978                    ntohs(inet_rsk(req)->loc_port),
1979                    dest->s6_addr32[0], dest->s6_addr32[1],
1980                    dest->s6_addr32[2], dest->s6_addr32[3],
1981                    ntohs(inet_rsk(req)->rmt_port),
1982                    TCP_SYN_RECV,
1983                    0,0, /* could print option size, but that is af dependent. */
1984                    1,   /* timers active (only the expire timer) */
1985                    jiffies_to_clock_t(ttd),
1986                    req->retrans,
1987                    uid,
1988                    0,  /* non standard timer */
1989                    0, /* open_requests have no inode */
1990                    0, req);
1991 }
1992
1993 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1994 {
1995         struct in6_addr *dest, *src;
1996         __u16 destp, srcp;
1997         int timer_active;
1998         unsigned long timer_expires;
1999         struct inet_sock *inet = inet_sk(sp);
2000         struct tcp_sock *tp = tcp_sk(sp);
2001         const struct inet_connection_sock *icsk = inet_csk(sp);
2002         struct ipv6_pinfo *np = inet6_sk(sp);
2003
2004         dest  = &np->daddr;
2005         src   = &np->rcv_saddr;
2006         destp = ntohs(inet->inet_dport);
2007         srcp  = ntohs(inet->inet_sport);
2008
2009         if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2010                 timer_active    = 1;
2011                 timer_expires   = icsk->icsk_timeout;
2012         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2013                 timer_active    = 4;
2014                 timer_expires   = icsk->icsk_timeout;
2015         } else if (timer_pending(&sp->sk_timer)) {
2016                 timer_active    = 2;
2017                 timer_expires   = sp->sk_timer.expires;
2018         } else {
2019                 timer_active    = 0;
2020                 timer_expires = jiffies;
2021         }
2022
2023         seq_printf(seq,
2024                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2025                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n",
2026                    i,
2027                    src->s6_addr32[0], src->s6_addr32[1],
2028                    src->s6_addr32[2], src->s6_addr32[3], srcp,
2029                    dest->s6_addr32[0], dest->s6_addr32[1],
2030                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
2031                    sp->sk_state,
2032                    tp->write_seq-tp->snd_una,
2033                    (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2034                    timer_active,
2035                    jiffies_to_clock_t(timer_expires - jiffies),
2036                    icsk->icsk_retransmits,
2037                    sock_i_uid(sp),
2038                    icsk->icsk_probes_out,
2039                    sock_i_ino(sp),
2040                    atomic_read(&sp->sk_refcnt), sp,
2041                    jiffies_to_clock_t(icsk->icsk_rto),
2042                    jiffies_to_clock_t(icsk->icsk_ack.ato),
2043                    (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2044                    tp->snd_cwnd,
2045                    tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
2046                    );
2047 }
2048
2049 static void get_timewait6_sock(struct seq_file *seq,
2050                                struct inet_timewait_sock *tw, int i)
2051 {
2052         struct in6_addr *dest, *src;
2053         __u16 destp, srcp;
2054         struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2055         int ttd = tw->tw_ttd - jiffies;
2056
2057         if (ttd < 0)
2058                 ttd = 0;
2059
2060         dest = &tw6->tw_v6_daddr;
2061         src  = &tw6->tw_v6_rcv_saddr;
2062         destp = ntohs(tw->tw_dport);
2063         srcp  = ntohs(tw->tw_sport);
2064
2065         seq_printf(seq,
2066                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2067                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2068                    i,
2069                    src->s6_addr32[0], src->s6_addr32[1],
2070                    src->s6_addr32[2], src->s6_addr32[3], srcp,
2071                    dest->s6_addr32[0], dest->s6_addr32[1],
2072                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
2073                    tw->tw_substate, 0, 0,
2074                    3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2075                    atomic_read(&tw->tw_refcnt), tw);
2076 }
2077
2078 static int tcp6_seq_show(struct seq_file *seq, void *v)
2079 {
2080         struct tcp_iter_state *st;
2081
2082         if (v == SEQ_START_TOKEN) {
2083                 seq_puts(seq,
2084                          "  sl  "
2085                          "local_address                         "
2086                          "remote_address                        "
2087                          "st tx_queue rx_queue tr tm->when retrnsmt"
2088                          "   uid  timeout inode\n");
2089                 goto out;
2090         }
2091         st = seq->private;
2092
2093         switch (st->state) {
2094         case TCP_SEQ_STATE_LISTENING:
2095         case TCP_SEQ_STATE_ESTABLISHED:
2096                 get_tcp6_sock(seq, v, st->num);
2097                 break;
2098         case TCP_SEQ_STATE_OPENREQ:
2099                 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2100                 break;
2101         case TCP_SEQ_STATE_TIME_WAIT:
2102                 get_timewait6_sock(seq, v, st->num);
2103                 break;
2104         }
2105 out:
2106         return 0;
2107 }
2108
2109 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2110         .name           = "tcp6",
2111         .family         = AF_INET6,
2112         .seq_fops       = {
2113                 .owner          = THIS_MODULE,
2114         },
2115         .seq_ops        = {
2116                 .show           = tcp6_seq_show,
2117         },
2118 };
2119
2120 int __net_init tcp6_proc_init(struct net *net)
2121 {
2122         return tcp_proc_register(net, &tcp6_seq_afinfo);
2123 }
2124
2125 void tcp6_proc_exit(struct net *net)
2126 {
2127         tcp_proc_unregister(net, &tcp6_seq_afinfo);
2128 }
2129 #endif
2130
2131 struct proto tcpv6_prot = {
2132         .name                   = "TCPv6",
2133         .owner                  = THIS_MODULE,
2134         .close                  = tcp_close,
2135         .connect                = tcp_v6_connect,
2136         .disconnect             = tcp_disconnect,
2137         .accept                 = inet_csk_accept,
2138         .ioctl                  = tcp_ioctl,
2139         .init                   = tcp_v6_init_sock,
2140         .destroy                = tcp_v6_destroy_sock,
2141         .shutdown               = tcp_shutdown,
2142         .setsockopt             = tcp_setsockopt,
2143         .getsockopt             = tcp_getsockopt,
2144         .recvmsg                = tcp_recvmsg,
2145         .sendmsg                = tcp_sendmsg,
2146         .sendpage               = tcp_sendpage,
2147         .backlog_rcv            = tcp_v6_do_rcv,
2148         .hash                   = tcp_v6_hash,
2149         .unhash                 = inet_unhash,
2150         .get_port               = inet_csk_get_port,
2151         .enter_memory_pressure  = tcp_enter_memory_pressure,
2152         .sockets_allocated      = &tcp_sockets_allocated,
2153         .memory_allocated       = &tcp_memory_allocated,
2154         .memory_pressure        = &tcp_memory_pressure,
2155         .orphan_count           = &tcp_orphan_count,
2156         .sysctl_mem             = sysctl_tcp_mem,
2157         .sysctl_wmem            = sysctl_tcp_wmem,
2158         .sysctl_rmem            = sysctl_tcp_rmem,
2159         .max_header             = MAX_TCP_HEADER,
2160         .obj_size               = sizeof(struct tcp6_sock),
2161         .slab_flags             = SLAB_DESTROY_BY_RCU,
2162         .twsk_prot              = &tcp6_timewait_sock_ops,
2163         .rsk_prot               = &tcp6_request_sock_ops,
2164         .h.hashinfo             = &tcp_hashinfo,
2165         .no_autobind            = true,
2166 #ifdef CONFIG_COMPAT
2167         .compat_setsockopt      = compat_tcp_setsockopt,
2168         .compat_getsockopt      = compat_tcp_getsockopt,
2169 #endif
2170 };
2171
2172 static const struct inet6_protocol tcpv6_protocol = {
2173         .handler        =       tcp_v6_rcv,
2174         .err_handler    =       tcp_v6_err,
2175         .gso_send_check =       tcp_v6_gso_send_check,
2176         .gso_segment    =       tcp_tso_segment,
2177         .gro_receive    =       tcp6_gro_receive,
2178         .gro_complete   =       tcp6_gro_complete,
2179         .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2180 };
2181
2182 static struct inet_protosw tcpv6_protosw = {
2183         .type           =       SOCK_STREAM,
2184         .protocol       =       IPPROTO_TCP,
2185         .prot           =       &tcpv6_prot,
2186         .ops            =       &inet6_stream_ops,
2187         .no_check       =       0,
2188         .flags          =       INET_PROTOSW_PERMANENT |
2189                                 INET_PROTOSW_ICSK,
2190 };
2191
2192 static int __net_init tcpv6_net_init(struct net *net)
2193 {
2194         return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2195                                     SOCK_RAW, IPPROTO_TCP, net);
2196 }
2197
2198 static void __net_exit tcpv6_net_exit(struct net *net)
2199 {
2200         inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2201 }
2202
2203 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2204 {
2205         inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2206 }
2207
2208 static struct pernet_operations tcpv6_net_ops = {
2209         .init       = tcpv6_net_init,
2210         .exit       = tcpv6_net_exit,
2211         .exit_batch = tcpv6_net_exit_batch,
2212 };
2213
2214 int __init tcpv6_init(void)
2215 {
2216         int ret;
2217
2218         ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2219         if (ret)
2220                 goto out;
2221
2222         /* register inet6 protocol */
2223         ret = inet6_register_protosw(&tcpv6_protosw);
2224         if (ret)
2225                 goto out_tcpv6_protocol;
2226
2227         ret = register_pernet_subsys(&tcpv6_net_ops);
2228         if (ret)
2229                 goto out_tcpv6_protosw;
2230 out:
2231         return ret;
2232
2233 out_tcpv6_protocol:
2234         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2235 out_tcpv6_protosw:
2236         inet6_unregister_protosw(&tcpv6_protosw);
2237         goto out;
2238 }
2239
2240 void tcpv6_exit(void)
2241 {
2242         unregister_pernet_subsys(&tcpv6_net_ops);
2243         inet6_unregister_protosw(&tcpv6_protosw);
2244         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2245 }