net: Pass optional SKB and SK arguments to dst_ops->{update_pmtu,redirect}()
[linux-3.10.git] / net / ipv6 / tcp_ipv6.c
1 /*
2  *      TCP over IPv6
3  *      Linux INET6 implementation
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *
8  *      Based on:
9  *      linux/net/ipv4/tcp.c
10  *      linux/net/ipv4/tcp_input.c
11  *      linux/net/ipv4/tcp_output.c
12  *
13  *      Fixes:
14  *      Hideaki YOSHIFUJI       :       sin6_scope_id support
15  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
16  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
17  *                                      a single port at the same time.
18  *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/tcp6 to seq_file.
19  *
20  *      This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
66
67 #include <asm/uaccess.h>
68
69 #include <linux/proc_fs.h>
70 #include <linux/seq_file.h>
71
72 #include <linux/crypto.h>
73 #include <linux/scatterlist.h>
74
75 static void     tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
76 static void     tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
77                                       struct request_sock *req);
78
79 static int      tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
80 static void     __tcp_v6_send_check(struct sk_buff *skb,
81                                     const struct in6_addr *saddr,
82                                     const struct in6_addr *daddr);
83
84 static const struct inet_connection_sock_af_ops ipv6_mapped;
85 static const struct inet_connection_sock_af_ops ipv6_specific;
86 #ifdef CONFIG_TCP_MD5SIG
87 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
88 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
89 #else
90 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
91                                                    const struct in6_addr *addr)
92 {
93         return NULL;
94 }
95 #endif
96
97 static void tcp_v6_hash(struct sock *sk)
98 {
99         if (sk->sk_state != TCP_CLOSE) {
100                 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
101                         tcp_prot.hash(sk);
102                         return;
103                 }
104                 local_bh_disable();
105                 __inet6_hash(sk, NULL);
106                 local_bh_enable();
107         }
108 }
109
110 static __inline__ __sum16 tcp_v6_check(int len,
111                                    const struct in6_addr *saddr,
112                                    const struct in6_addr *daddr,
113                                    __wsum base)
114 {
115         return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
116 }
117
118 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
119 {
120         return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
121                                             ipv6_hdr(skb)->saddr.s6_addr32,
122                                             tcp_hdr(skb)->dest,
123                                             tcp_hdr(skb)->source);
124 }
125
126 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
127                           int addr_len)
128 {
129         struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
130         struct inet_sock *inet = inet_sk(sk);
131         struct inet_connection_sock *icsk = inet_csk(sk);
132         struct ipv6_pinfo *np = inet6_sk(sk);
133         struct tcp_sock *tp = tcp_sk(sk);
134         struct in6_addr *saddr = NULL, *final_p, final;
135         struct rt6_info *rt;
136         struct flowi6 fl6;
137         struct dst_entry *dst;
138         int addr_type;
139         int err;
140
141         if (addr_len < SIN6_LEN_RFC2133)
142                 return -EINVAL;
143
144         if (usin->sin6_family != AF_INET6)
145                 return -EAFNOSUPPORT;
146
147         memset(&fl6, 0, sizeof(fl6));
148
149         if (np->sndflow) {
150                 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
151                 IP6_ECN_flow_init(fl6.flowlabel);
152                 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
153                         struct ip6_flowlabel *flowlabel;
154                         flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
155                         if (flowlabel == NULL)
156                                 return -EINVAL;
157                         usin->sin6_addr = flowlabel->dst;
158                         fl6_sock_release(flowlabel);
159                 }
160         }
161
162         /*
163          *      connect() to INADDR_ANY means loopback (BSD'ism).
164          */
165
166         if(ipv6_addr_any(&usin->sin6_addr))
167                 usin->sin6_addr.s6_addr[15] = 0x1;
168
169         addr_type = ipv6_addr_type(&usin->sin6_addr);
170
171         if(addr_type & IPV6_ADDR_MULTICAST)
172                 return -ENETUNREACH;
173
174         if (addr_type&IPV6_ADDR_LINKLOCAL) {
175                 if (addr_len >= sizeof(struct sockaddr_in6) &&
176                     usin->sin6_scope_id) {
177                         /* If interface is set while binding, indices
178                          * must coincide.
179                          */
180                         if (sk->sk_bound_dev_if &&
181                             sk->sk_bound_dev_if != usin->sin6_scope_id)
182                                 return -EINVAL;
183
184                         sk->sk_bound_dev_if = usin->sin6_scope_id;
185                 }
186
187                 /* Connect to link-local address requires an interface */
188                 if (!sk->sk_bound_dev_if)
189                         return -EINVAL;
190         }
191
192         if (tp->rx_opt.ts_recent_stamp &&
193             !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
194                 tp->rx_opt.ts_recent = 0;
195                 tp->rx_opt.ts_recent_stamp = 0;
196                 tp->write_seq = 0;
197         }
198
199         np->daddr = usin->sin6_addr;
200         np->flow_label = fl6.flowlabel;
201
202         /*
203          *      TCP over IPv4
204          */
205
206         if (addr_type == IPV6_ADDR_MAPPED) {
207                 u32 exthdrlen = icsk->icsk_ext_hdr_len;
208                 struct sockaddr_in sin;
209
210                 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
211
212                 if (__ipv6_only_sock(sk))
213                         return -ENETUNREACH;
214
215                 sin.sin_family = AF_INET;
216                 sin.sin_port = usin->sin6_port;
217                 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
218
219                 icsk->icsk_af_ops = &ipv6_mapped;
220                 sk->sk_backlog_rcv = tcp_v4_do_rcv;
221 #ifdef CONFIG_TCP_MD5SIG
222                 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
223 #endif
224
225                 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
226
227                 if (err) {
228                         icsk->icsk_ext_hdr_len = exthdrlen;
229                         icsk->icsk_af_ops = &ipv6_specific;
230                         sk->sk_backlog_rcv = tcp_v6_do_rcv;
231 #ifdef CONFIG_TCP_MD5SIG
232                         tp->af_specific = &tcp_sock_ipv6_specific;
233 #endif
234                         goto failure;
235                 } else {
236                         ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
237                         ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
238                                                &np->rcv_saddr);
239                 }
240
241                 return err;
242         }
243
244         if (!ipv6_addr_any(&np->rcv_saddr))
245                 saddr = &np->rcv_saddr;
246
247         fl6.flowi6_proto = IPPROTO_TCP;
248         fl6.daddr = np->daddr;
249         fl6.saddr = saddr ? *saddr : np->saddr;
250         fl6.flowi6_oif = sk->sk_bound_dev_if;
251         fl6.flowi6_mark = sk->sk_mark;
252         fl6.fl6_dport = usin->sin6_port;
253         fl6.fl6_sport = inet->inet_sport;
254
255         final_p = fl6_update_dst(&fl6, np->opt, &final);
256
257         security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
258
259         dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
260         if (IS_ERR(dst)) {
261                 err = PTR_ERR(dst);
262                 goto failure;
263         }
264
265         if (saddr == NULL) {
266                 saddr = &fl6.saddr;
267                 np->rcv_saddr = *saddr;
268         }
269
270         /* set the source address */
271         np->saddr = *saddr;
272         inet->inet_rcv_saddr = LOOPBACK4_IPV6;
273
274         sk->sk_gso_type = SKB_GSO_TCPV6;
275         __ip6_dst_store(sk, dst, NULL, NULL);
276
277         rt = (struct rt6_info *) dst;
278         if (tcp_death_row.sysctl_tw_recycle &&
279             !tp->rx_opt.ts_recent_stamp &&
280             ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr))
281                 tcp_fetch_timewait_stamp(sk, dst);
282
283         icsk->icsk_ext_hdr_len = 0;
284         if (np->opt)
285                 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
286                                           np->opt->opt_nflen);
287
288         tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
289
290         inet->inet_dport = usin->sin6_port;
291
292         tcp_set_state(sk, TCP_SYN_SENT);
293         err = inet6_hash_connect(&tcp_death_row, sk);
294         if (err)
295                 goto late_failure;
296
297         if (!tp->write_seq)
298                 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
299                                                              np->daddr.s6_addr32,
300                                                              inet->inet_sport,
301                                                              inet->inet_dport);
302
303         err = tcp_connect(sk);
304         if (err)
305                 goto late_failure;
306
307         return 0;
308
309 late_failure:
310         tcp_set_state(sk, TCP_CLOSE);
311         __sk_dst_reset(sk);
312 failure:
313         inet->inet_dport = 0;
314         sk->sk_route_caps = 0;
315         return err;
316 }
317
318 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
319                 u8 type, u8 code, int offset, __be32 info)
320 {
321         const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
322         const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
323         struct ipv6_pinfo *np;
324         struct sock *sk;
325         int err;
326         struct tcp_sock *tp;
327         __u32 seq;
328         struct net *net = dev_net(skb->dev);
329
330         sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
331                         th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
332
333         if (sk == NULL) {
334                 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
335                                    ICMP6_MIB_INERRORS);
336                 return;
337         }
338
339         if (sk->sk_state == TCP_TIME_WAIT) {
340                 inet_twsk_put(inet_twsk(sk));
341                 return;
342         }
343
344         bh_lock_sock(sk);
345         if (sock_owned_by_user(sk))
346                 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
347
348         if (sk->sk_state == TCP_CLOSE)
349                 goto out;
350
351         if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
352                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
353                 goto out;
354         }
355
356         tp = tcp_sk(sk);
357         seq = ntohl(th->seq);
358         if (sk->sk_state != TCP_LISTEN &&
359             !between(seq, tp->snd_una, tp->snd_nxt)) {
360                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
361                 goto out;
362         }
363
364         np = inet6_sk(sk);
365
366         if (type == NDISC_REDIRECT) {
367                 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
368
369                 if (dst)
370                         dst->ops->redirect(dst, sk, skb);
371         }
372
373         if (type == ICMPV6_PKT_TOOBIG) {
374                 struct dst_entry *dst;
375
376                 if (sock_owned_by_user(sk))
377                         goto out;
378                 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
379                         goto out;
380
381                 dst = inet6_csk_update_pmtu(sk, ntohl(info));
382                 if (!dst)
383                         goto out;
384
385                 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
386                         tcp_sync_mss(sk, dst_mtu(dst));
387                         tcp_simple_retransmit(sk);
388                 }
389                 goto out;
390         }
391
392         icmpv6_err_convert(type, code, &err);
393
394         /* Might be for an request_sock */
395         switch (sk->sk_state) {
396                 struct request_sock *req, **prev;
397         case TCP_LISTEN:
398                 if (sock_owned_by_user(sk))
399                         goto out;
400
401                 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
402                                            &hdr->saddr, inet6_iif(skb));
403                 if (!req)
404                         goto out;
405
406                 /* ICMPs are not backlogged, hence we cannot get
407                  * an established socket here.
408                  */
409                 WARN_ON(req->sk != NULL);
410
411                 if (seq != tcp_rsk(req)->snt_isn) {
412                         NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
413                         goto out;
414                 }
415
416                 inet_csk_reqsk_queue_drop(sk, req, prev);
417                 goto out;
418
419         case TCP_SYN_SENT:
420         case TCP_SYN_RECV:  /* Cannot happen.
421                                It can, it SYNs are crossed. --ANK */
422                 if (!sock_owned_by_user(sk)) {
423                         sk->sk_err = err;
424                         sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
425
426                         tcp_done(sk);
427                 } else
428                         sk->sk_err_soft = err;
429                 goto out;
430         }
431
432         if (!sock_owned_by_user(sk) && np->recverr) {
433                 sk->sk_err = err;
434                 sk->sk_error_report(sk);
435         } else
436                 sk->sk_err_soft = err;
437
438 out:
439         bh_unlock_sock(sk);
440         sock_put(sk);
441 }
442
443
444 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
445                               struct flowi6 *fl6,
446                               struct request_sock *req,
447                               struct request_values *rvp,
448                               u16 queue_mapping)
449 {
450         struct inet6_request_sock *treq = inet6_rsk(req);
451         struct ipv6_pinfo *np = inet6_sk(sk);
452         struct sk_buff * skb;
453         int err = -ENOMEM;
454
455         /* First, grab a route. */
456         if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
457                 goto done;
458
459         skb = tcp_make_synack(sk, dst, req, rvp);
460
461         if (skb) {
462                 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
463
464                 fl6->daddr = treq->rmt_addr;
465                 skb_set_queue_mapping(skb, queue_mapping);
466                 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
467                 err = net_xmit_eval(err);
468         }
469
470 done:
471         return err;
472 }
473
474 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
475                              struct request_values *rvp)
476 {
477         struct flowi6 fl6;
478
479         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
480         return tcp_v6_send_synack(sk, NULL, &fl6, req, rvp, 0);
481 }
482
483 static void tcp_v6_reqsk_destructor(struct request_sock *req)
484 {
485         kfree_skb(inet6_rsk(req)->pktopts);
486 }
487
488 #ifdef CONFIG_TCP_MD5SIG
489 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
490                                                    const struct in6_addr *addr)
491 {
492         return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
493 }
494
495 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
496                                                 struct sock *addr_sk)
497 {
498         return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
499 }
500
501 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
502                                                       struct request_sock *req)
503 {
504         return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
505 }
506
507 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
508                                   int optlen)
509 {
510         struct tcp_md5sig cmd;
511         struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
512
513         if (optlen < sizeof(cmd))
514                 return -EINVAL;
515
516         if (copy_from_user(&cmd, optval, sizeof(cmd)))
517                 return -EFAULT;
518
519         if (sin6->sin6_family != AF_INET6)
520                 return -EINVAL;
521
522         if (!cmd.tcpm_keylen) {
523                 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
524                         return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
525                                               AF_INET);
526                 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
527                                       AF_INET6);
528         }
529
530         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
531                 return -EINVAL;
532
533         if (ipv6_addr_v4mapped(&sin6->sin6_addr))
534                 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
535                                       AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
536
537         return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
538                               AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
539 }
540
541 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
542                                         const struct in6_addr *daddr,
543                                         const struct in6_addr *saddr, int nbytes)
544 {
545         struct tcp6_pseudohdr *bp;
546         struct scatterlist sg;
547
548         bp = &hp->md5_blk.ip6;
549         /* 1. TCP pseudo-header (RFC2460) */
550         bp->saddr = *saddr;
551         bp->daddr = *daddr;
552         bp->protocol = cpu_to_be32(IPPROTO_TCP);
553         bp->len = cpu_to_be32(nbytes);
554
555         sg_init_one(&sg, bp, sizeof(*bp));
556         return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
557 }
558
559 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
560                                const struct in6_addr *daddr, struct in6_addr *saddr,
561                                const struct tcphdr *th)
562 {
563         struct tcp_md5sig_pool *hp;
564         struct hash_desc *desc;
565
566         hp = tcp_get_md5sig_pool();
567         if (!hp)
568                 goto clear_hash_noput;
569         desc = &hp->md5_desc;
570
571         if (crypto_hash_init(desc))
572                 goto clear_hash;
573         if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
574                 goto clear_hash;
575         if (tcp_md5_hash_header(hp, th))
576                 goto clear_hash;
577         if (tcp_md5_hash_key(hp, key))
578                 goto clear_hash;
579         if (crypto_hash_final(desc, md5_hash))
580                 goto clear_hash;
581
582         tcp_put_md5sig_pool();
583         return 0;
584
585 clear_hash:
586         tcp_put_md5sig_pool();
587 clear_hash_noput:
588         memset(md5_hash, 0, 16);
589         return 1;
590 }
591
592 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
593                                const struct sock *sk,
594                                const struct request_sock *req,
595                                const struct sk_buff *skb)
596 {
597         const struct in6_addr *saddr, *daddr;
598         struct tcp_md5sig_pool *hp;
599         struct hash_desc *desc;
600         const struct tcphdr *th = tcp_hdr(skb);
601
602         if (sk) {
603                 saddr = &inet6_sk(sk)->saddr;
604                 daddr = &inet6_sk(sk)->daddr;
605         } else if (req) {
606                 saddr = &inet6_rsk(req)->loc_addr;
607                 daddr = &inet6_rsk(req)->rmt_addr;
608         } else {
609                 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
610                 saddr = &ip6h->saddr;
611                 daddr = &ip6h->daddr;
612         }
613
614         hp = tcp_get_md5sig_pool();
615         if (!hp)
616                 goto clear_hash_noput;
617         desc = &hp->md5_desc;
618
619         if (crypto_hash_init(desc))
620                 goto clear_hash;
621
622         if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
623                 goto clear_hash;
624         if (tcp_md5_hash_header(hp, th))
625                 goto clear_hash;
626         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
627                 goto clear_hash;
628         if (tcp_md5_hash_key(hp, key))
629                 goto clear_hash;
630         if (crypto_hash_final(desc, md5_hash))
631                 goto clear_hash;
632
633         tcp_put_md5sig_pool();
634         return 0;
635
636 clear_hash:
637         tcp_put_md5sig_pool();
638 clear_hash_noput:
639         memset(md5_hash, 0, 16);
640         return 1;
641 }
642
643 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
644 {
645         const __u8 *hash_location = NULL;
646         struct tcp_md5sig_key *hash_expected;
647         const struct ipv6hdr *ip6h = ipv6_hdr(skb);
648         const struct tcphdr *th = tcp_hdr(skb);
649         int genhash;
650         u8 newhash[16];
651
652         hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
653         hash_location = tcp_parse_md5sig_option(th);
654
655         /* We've parsed the options - do we have a hash? */
656         if (!hash_expected && !hash_location)
657                 return 0;
658
659         if (hash_expected && !hash_location) {
660                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
661                 return 1;
662         }
663
664         if (!hash_expected && hash_location) {
665                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
666                 return 1;
667         }
668
669         /* check the signature */
670         genhash = tcp_v6_md5_hash_skb(newhash,
671                                       hash_expected,
672                                       NULL, NULL, skb);
673
674         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
675                 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
676                                      genhash ? "failed" : "mismatch",
677                                      &ip6h->saddr, ntohs(th->source),
678                                      &ip6h->daddr, ntohs(th->dest));
679                 return 1;
680         }
681         return 0;
682 }
683 #endif
684
685 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
686         .family         =       AF_INET6,
687         .obj_size       =       sizeof(struct tcp6_request_sock),
688         .rtx_syn_ack    =       tcp_v6_rtx_synack,
689         .send_ack       =       tcp_v6_reqsk_send_ack,
690         .destructor     =       tcp_v6_reqsk_destructor,
691         .send_reset     =       tcp_v6_send_reset,
692         .syn_ack_timeout =      tcp_syn_ack_timeout,
693 };
694
695 #ifdef CONFIG_TCP_MD5SIG
696 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
697         .md5_lookup     =       tcp_v6_reqsk_md5_lookup,
698         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
699 };
700 #endif
701
702 static void __tcp_v6_send_check(struct sk_buff *skb,
703                                 const struct in6_addr *saddr, const struct in6_addr *daddr)
704 {
705         struct tcphdr *th = tcp_hdr(skb);
706
707         if (skb->ip_summed == CHECKSUM_PARTIAL) {
708                 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
709                 skb->csum_start = skb_transport_header(skb) - skb->head;
710                 skb->csum_offset = offsetof(struct tcphdr, check);
711         } else {
712                 th->check = tcp_v6_check(skb->len, saddr, daddr,
713                                          csum_partial(th, th->doff << 2,
714                                                       skb->csum));
715         }
716 }
717
718 static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
719 {
720         struct ipv6_pinfo *np = inet6_sk(sk);
721
722         __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
723 }
724
725 static int tcp_v6_gso_send_check(struct sk_buff *skb)
726 {
727         const struct ipv6hdr *ipv6h;
728         struct tcphdr *th;
729
730         if (!pskb_may_pull(skb, sizeof(*th)))
731                 return -EINVAL;
732
733         ipv6h = ipv6_hdr(skb);
734         th = tcp_hdr(skb);
735
736         th->check = 0;
737         skb->ip_summed = CHECKSUM_PARTIAL;
738         __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
739         return 0;
740 }
741
742 static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
743                                          struct sk_buff *skb)
744 {
745         const struct ipv6hdr *iph = skb_gro_network_header(skb);
746
747         switch (skb->ip_summed) {
748         case CHECKSUM_COMPLETE:
749                 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
750                                   skb->csum)) {
751                         skb->ip_summed = CHECKSUM_UNNECESSARY;
752                         break;
753                 }
754
755                 /* fall through */
756         case CHECKSUM_NONE:
757                 NAPI_GRO_CB(skb)->flush = 1;
758                 return NULL;
759         }
760
761         return tcp_gro_receive(head, skb);
762 }
763
764 static int tcp6_gro_complete(struct sk_buff *skb)
765 {
766         const struct ipv6hdr *iph = ipv6_hdr(skb);
767         struct tcphdr *th = tcp_hdr(skb);
768
769         th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
770                                   &iph->saddr, &iph->daddr, 0);
771         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
772
773         return tcp_gro_complete(skb);
774 }
775
776 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
777                                  u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
778 {
779         const struct tcphdr *th = tcp_hdr(skb);
780         struct tcphdr *t1;
781         struct sk_buff *buff;
782         struct flowi6 fl6;
783         struct net *net = dev_net(skb_dst(skb)->dev);
784         struct sock *ctl_sk = net->ipv6.tcp_sk;
785         unsigned int tot_len = sizeof(struct tcphdr);
786         struct dst_entry *dst;
787         __be32 *topt;
788
789         if (ts)
790                 tot_len += TCPOLEN_TSTAMP_ALIGNED;
791 #ifdef CONFIG_TCP_MD5SIG
792         if (key)
793                 tot_len += TCPOLEN_MD5SIG_ALIGNED;
794 #endif
795
796         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
797                          GFP_ATOMIC);
798         if (buff == NULL)
799                 return;
800
801         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
802
803         t1 = (struct tcphdr *) skb_push(buff, tot_len);
804         skb_reset_transport_header(buff);
805
806         /* Swap the send and the receive. */
807         memset(t1, 0, sizeof(*t1));
808         t1->dest = th->source;
809         t1->source = th->dest;
810         t1->doff = tot_len / 4;
811         t1->seq = htonl(seq);
812         t1->ack_seq = htonl(ack);
813         t1->ack = !rst || !th->ack;
814         t1->rst = rst;
815         t1->window = htons(win);
816
817         topt = (__be32 *)(t1 + 1);
818
819         if (ts) {
820                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
821                                 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
822                 *topt++ = htonl(tcp_time_stamp);
823                 *topt++ = htonl(ts);
824         }
825
826 #ifdef CONFIG_TCP_MD5SIG
827         if (key) {
828                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
829                                 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
830                 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
831                                     &ipv6_hdr(skb)->saddr,
832                                     &ipv6_hdr(skb)->daddr, t1);
833         }
834 #endif
835
836         memset(&fl6, 0, sizeof(fl6));
837         fl6.daddr = ipv6_hdr(skb)->saddr;
838         fl6.saddr = ipv6_hdr(skb)->daddr;
839
840         buff->ip_summed = CHECKSUM_PARTIAL;
841         buff->csum = 0;
842
843         __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
844
845         fl6.flowi6_proto = IPPROTO_TCP;
846         fl6.flowi6_oif = inet6_iif(skb);
847         fl6.fl6_dport = t1->dest;
848         fl6.fl6_sport = t1->source;
849         security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
850
851         /* Pass a socket to ip6_dst_lookup either it is for RST
852          * Underlying function will use this to retrieve the network
853          * namespace
854          */
855         dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
856         if (!IS_ERR(dst)) {
857                 skb_dst_set(buff, dst);
858                 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
859                 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
860                 if (rst)
861                         TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
862                 return;
863         }
864
865         kfree_skb(buff);
866 }
867
868 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
869 {
870         const struct tcphdr *th = tcp_hdr(skb);
871         u32 seq = 0, ack_seq = 0;
872         struct tcp_md5sig_key *key = NULL;
873 #ifdef CONFIG_TCP_MD5SIG
874         const __u8 *hash_location = NULL;
875         struct ipv6hdr *ipv6h = ipv6_hdr(skb);
876         unsigned char newhash[16];
877         int genhash;
878         struct sock *sk1 = NULL;
879 #endif
880
881         if (th->rst)
882                 return;
883
884         if (!ipv6_unicast_destination(skb))
885                 return;
886
887 #ifdef CONFIG_TCP_MD5SIG
888         hash_location = tcp_parse_md5sig_option(th);
889         if (!sk && hash_location) {
890                 /*
891                  * active side is lost. Try to find listening socket through
892                  * source port, and then find md5 key through listening socket.
893                  * we are not loose security here:
894                  * Incoming packet is checked with md5 hash with finding key,
895                  * no RST generated if md5 hash doesn't match.
896                  */
897                 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
898                                            &tcp_hashinfo, &ipv6h->daddr,
899                                            ntohs(th->source), inet6_iif(skb));
900                 if (!sk1)
901                         return;
902
903                 rcu_read_lock();
904                 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
905                 if (!key)
906                         goto release_sk1;
907
908                 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
909                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
910                         goto release_sk1;
911         } else {
912                 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
913         }
914 #endif
915
916         if (th->ack)
917                 seq = ntohl(th->ack_seq);
918         else
919                 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
920                           (th->doff << 2);
921
922         tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
923
924 #ifdef CONFIG_TCP_MD5SIG
925 release_sk1:
926         if (sk1) {
927                 rcu_read_unlock();
928                 sock_put(sk1);
929         }
930 #endif
931 }
932
933 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
934                             struct tcp_md5sig_key *key, u8 tclass)
935 {
936         tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
937 }
938
939 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
940 {
941         struct inet_timewait_sock *tw = inet_twsk(sk);
942         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
943
944         tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
945                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
946                         tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
947                         tw->tw_tclass);
948
949         inet_twsk_put(tw);
950 }
951
952 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
953                                   struct request_sock *req)
954 {
955         tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
956                         tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
957 }
958
959
960 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
961 {
962         struct request_sock *req, **prev;
963         const struct tcphdr *th = tcp_hdr(skb);
964         struct sock *nsk;
965
966         /* Find possible connection requests. */
967         req = inet6_csk_search_req(sk, &prev, th->source,
968                                    &ipv6_hdr(skb)->saddr,
969                                    &ipv6_hdr(skb)->daddr, inet6_iif(skb));
970         if (req)
971                 return tcp_check_req(sk, skb, req, prev);
972
973         nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
974                         &ipv6_hdr(skb)->saddr, th->source,
975                         &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
976
977         if (nsk) {
978                 if (nsk->sk_state != TCP_TIME_WAIT) {
979                         bh_lock_sock(nsk);
980                         return nsk;
981                 }
982                 inet_twsk_put(inet_twsk(nsk));
983                 return NULL;
984         }
985
986 #ifdef CONFIG_SYN_COOKIES
987         if (!th->syn)
988                 sk = cookie_v6_check(sk, skb);
989 #endif
990         return sk;
991 }
992
993 /* FIXME: this is substantially similar to the ipv4 code.
994  * Can some kind of merge be done? -- erics
995  */
996 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
997 {
998         struct tcp_extend_values tmp_ext;
999         struct tcp_options_received tmp_opt;
1000         const u8 *hash_location;
1001         struct request_sock *req;
1002         struct inet6_request_sock *treq;
1003         struct ipv6_pinfo *np = inet6_sk(sk);
1004         struct tcp_sock *tp = tcp_sk(sk);
1005         __u32 isn = TCP_SKB_CB(skb)->when;
1006         struct dst_entry *dst = NULL;
1007         struct flowi6 fl6;
1008         bool want_cookie = false;
1009
1010         if (skb->protocol == htons(ETH_P_IP))
1011                 return tcp_v4_conn_request(sk, skb);
1012
1013         if (!ipv6_unicast_destination(skb))
1014                 goto drop;
1015
1016         if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1017                 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1018                 if (!want_cookie)
1019                         goto drop;
1020         }
1021
1022         if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1023                 goto drop;
1024
1025         req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1026         if (req == NULL)
1027                 goto drop;
1028
1029 #ifdef CONFIG_TCP_MD5SIG
1030         tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1031 #endif
1032
1033         tcp_clear_options(&tmp_opt);
1034         tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1035         tmp_opt.user_mss = tp->rx_opt.user_mss;
1036         tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1037
1038         if (tmp_opt.cookie_plus > 0 &&
1039             tmp_opt.saw_tstamp &&
1040             !tp->rx_opt.cookie_out_never &&
1041             (sysctl_tcp_cookie_size > 0 ||
1042              (tp->cookie_values != NULL &&
1043               tp->cookie_values->cookie_desired > 0))) {
1044                 u8 *c;
1045                 u32 *d;
1046                 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1047                 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1048
1049                 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1050                         goto drop_and_free;
1051
1052                 /* Secret recipe starts with IP addresses */
1053                 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1054                 *mess++ ^= *d++;
1055                 *mess++ ^= *d++;
1056                 *mess++ ^= *d++;
1057                 *mess++ ^= *d++;
1058                 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1059                 *mess++ ^= *d++;
1060                 *mess++ ^= *d++;
1061                 *mess++ ^= *d++;
1062                 *mess++ ^= *d++;
1063
1064                 /* plus variable length Initiator Cookie */
1065                 c = (u8 *)mess;
1066                 while (l-- > 0)
1067                         *c++ ^= *hash_location++;
1068
1069                 want_cookie = false;    /* not our kind of cookie */
1070                 tmp_ext.cookie_out_never = 0; /* false */
1071                 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1072         } else if (!tp->rx_opt.cookie_in_always) {
1073                 /* redundant indications, but ensure initialization. */
1074                 tmp_ext.cookie_out_never = 1; /* true */
1075                 tmp_ext.cookie_plus = 0;
1076         } else {
1077                 goto drop_and_free;
1078         }
1079         tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1080
1081         if (want_cookie && !tmp_opt.saw_tstamp)
1082                 tcp_clear_options(&tmp_opt);
1083
1084         tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1085         tcp_openreq_init(req, &tmp_opt, skb);
1086
1087         treq = inet6_rsk(req);
1088         treq->rmt_addr = ipv6_hdr(skb)->saddr;
1089         treq->loc_addr = ipv6_hdr(skb)->daddr;
1090         if (!want_cookie || tmp_opt.tstamp_ok)
1091                 TCP_ECN_create_request(req, skb);
1092
1093         treq->iif = sk->sk_bound_dev_if;
1094
1095         /* So that link locals have meaning */
1096         if (!sk->sk_bound_dev_if &&
1097             ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1098                 treq->iif = inet6_iif(skb);
1099
1100         if (!isn) {
1101                 if (ipv6_opt_accepted(sk, skb) ||
1102                     np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1103                     np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1104                         atomic_inc(&skb->users);
1105                         treq->pktopts = skb;
1106                 }
1107
1108                 if (want_cookie) {
1109                         isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1110                         req->cookie_ts = tmp_opt.tstamp_ok;
1111                         goto have_isn;
1112                 }
1113
1114                 /* VJ's idea. We save last timestamp seen
1115                  * from the destination in peer table, when entering
1116                  * state TIME-WAIT, and check against it before
1117                  * accepting new connection request.
1118                  *
1119                  * If "isn" is not zero, this request hit alive
1120                  * timewait bucket, so that all the necessary checks
1121                  * are made in the function processing timewait state.
1122                  */
1123                 if (tmp_opt.saw_tstamp &&
1124                     tcp_death_row.sysctl_tw_recycle &&
1125                     (dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) {
1126                         if (!tcp_peer_is_proven(req, dst, true)) {
1127                                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1128                                 goto drop_and_release;
1129                         }
1130                 }
1131                 /* Kill the following clause, if you dislike this way. */
1132                 else if (!sysctl_tcp_syncookies &&
1133                          (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1134                           (sysctl_max_syn_backlog >> 2)) &&
1135                          !tcp_peer_is_proven(req, dst, false)) {
1136                         /* Without syncookies last quarter of
1137                          * backlog is filled with destinations,
1138                          * proven to be alive.
1139                          * It means that we continue to communicate
1140                          * to destinations, already remembered
1141                          * to the moment of synflood.
1142                          */
1143                         LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1144                                        &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1145                         goto drop_and_release;
1146                 }
1147
1148                 isn = tcp_v6_init_sequence(skb);
1149         }
1150 have_isn:
1151         tcp_rsk(req)->snt_isn = isn;
1152         tcp_rsk(req)->snt_synack = tcp_time_stamp;
1153
1154         if (security_inet_conn_request(sk, skb, req))
1155                 goto drop_and_release;
1156
1157         if (tcp_v6_send_synack(sk, dst, &fl6, req,
1158                                (struct request_values *)&tmp_ext,
1159                                skb_get_queue_mapping(skb)) ||
1160             want_cookie)
1161                 goto drop_and_free;
1162
1163         inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1164         return 0;
1165
1166 drop_and_release:
1167         dst_release(dst);
1168 drop_and_free:
1169         reqsk_free(req);
1170 drop:
1171         return 0; /* don't send reset */
1172 }
1173
1174 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1175                                           struct request_sock *req,
1176                                           struct dst_entry *dst)
1177 {
1178         struct inet6_request_sock *treq;
1179         struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1180         struct tcp6_sock *newtcp6sk;
1181         struct inet_sock *newinet;
1182         struct tcp_sock *newtp;
1183         struct sock *newsk;
1184 #ifdef CONFIG_TCP_MD5SIG
1185         struct tcp_md5sig_key *key;
1186 #endif
1187         struct flowi6 fl6;
1188
1189         if (skb->protocol == htons(ETH_P_IP)) {
1190                 /*
1191                  *      v6 mapped
1192                  */
1193
1194                 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1195
1196                 if (newsk == NULL)
1197                         return NULL;
1198
1199                 newtcp6sk = (struct tcp6_sock *)newsk;
1200                 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1201
1202                 newinet = inet_sk(newsk);
1203                 newnp = inet6_sk(newsk);
1204                 newtp = tcp_sk(newsk);
1205
1206                 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1207
1208                 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1209
1210                 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1211
1212                 newnp->rcv_saddr = newnp->saddr;
1213
1214                 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1215                 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1216 #ifdef CONFIG_TCP_MD5SIG
1217                 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1218 #endif
1219
1220                 newnp->ipv6_ac_list = NULL;
1221                 newnp->ipv6_fl_list = NULL;
1222                 newnp->pktoptions  = NULL;
1223                 newnp->opt         = NULL;
1224                 newnp->mcast_oif   = inet6_iif(skb);
1225                 newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1226                 newnp->rcv_tclass  = ipv6_tclass(ipv6_hdr(skb));
1227
1228                 /*
1229                  * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1230                  * here, tcp_create_openreq_child now does this for us, see the comment in
1231                  * that function for the gory details. -acme
1232                  */
1233
1234                 /* It is tricky place. Until this moment IPv4 tcp
1235                    worked with IPv6 icsk.icsk_af_ops.
1236                    Sync it now.
1237                  */
1238                 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1239
1240                 return newsk;
1241         }
1242
1243         treq = inet6_rsk(req);
1244
1245         if (sk_acceptq_is_full(sk))
1246                 goto out_overflow;
1247
1248         if (!dst) {
1249                 dst = inet6_csk_route_req(sk, &fl6, req);
1250                 if (!dst)
1251                         goto out;
1252         }
1253
1254         newsk = tcp_create_openreq_child(sk, req, skb);
1255         if (newsk == NULL)
1256                 goto out_nonewsk;
1257
1258         /*
1259          * No need to charge this sock to the relevant IPv6 refcnt debug socks
1260          * count here, tcp_create_openreq_child now does this for us, see the
1261          * comment in that function for the gory details. -acme
1262          */
1263
1264         newsk->sk_gso_type = SKB_GSO_TCPV6;
1265         __ip6_dst_store(newsk, dst, NULL, NULL);
1266
1267         newtcp6sk = (struct tcp6_sock *)newsk;
1268         inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1269
1270         newtp = tcp_sk(newsk);
1271         newinet = inet_sk(newsk);
1272         newnp = inet6_sk(newsk);
1273
1274         memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1275
1276         newnp->daddr = treq->rmt_addr;
1277         newnp->saddr = treq->loc_addr;
1278         newnp->rcv_saddr = treq->loc_addr;
1279         newsk->sk_bound_dev_if = treq->iif;
1280
1281         /* Now IPv6 options...
1282
1283            First: no IPv4 options.
1284          */
1285         newinet->inet_opt = NULL;
1286         newnp->ipv6_ac_list = NULL;
1287         newnp->ipv6_fl_list = NULL;
1288
1289         /* Clone RX bits */
1290         newnp->rxopt.all = np->rxopt.all;
1291
1292         /* Clone pktoptions received with SYN */
1293         newnp->pktoptions = NULL;
1294         if (treq->pktopts != NULL) {
1295                 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1296                 consume_skb(treq->pktopts);
1297                 treq->pktopts = NULL;
1298                 if (newnp->pktoptions)
1299                         skb_set_owner_r(newnp->pktoptions, newsk);
1300         }
1301         newnp->opt        = NULL;
1302         newnp->mcast_oif  = inet6_iif(skb);
1303         newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1304         newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1305
1306         /* Clone native IPv6 options from listening socket (if any)
1307
1308            Yes, keeping reference count would be much more clever,
1309            but we make one more one thing there: reattach optmem
1310            to newsk.
1311          */
1312         if (np->opt)
1313                 newnp->opt = ipv6_dup_options(newsk, np->opt);
1314
1315         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1316         if (newnp->opt)
1317                 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1318                                                      newnp->opt->opt_flen);
1319
1320         tcp_mtup_init(newsk);
1321         tcp_sync_mss(newsk, dst_mtu(dst));
1322         newtp->advmss = dst_metric_advmss(dst);
1323         if (tcp_sk(sk)->rx_opt.user_mss &&
1324             tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1325                 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1326
1327         tcp_initialize_rcv_mss(newsk);
1328         if (tcp_rsk(req)->snt_synack)
1329                 tcp_valid_rtt_meas(newsk,
1330                     tcp_time_stamp - tcp_rsk(req)->snt_synack);
1331         newtp->total_retrans = req->retrans;
1332
1333         newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1334         newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1335
1336 #ifdef CONFIG_TCP_MD5SIG
1337         /* Copy over the MD5 key from the original socket */
1338         if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1339                 /* We're using one, so create a matching key
1340                  * on the newsk structure. If we fail to get
1341                  * memory, then we end up not copying the key
1342                  * across. Shucks.
1343                  */
1344                 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
1345                                AF_INET6, key->key, key->keylen, GFP_ATOMIC);
1346         }
1347 #endif
1348
1349         if (__inet_inherit_port(sk, newsk) < 0) {
1350                 sock_put(newsk);
1351                 goto out;
1352         }
1353         __inet6_hash(newsk, NULL);
1354
1355         return newsk;
1356
1357 out_overflow:
1358         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1359 out_nonewsk:
1360         dst_release(dst);
1361 out:
1362         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1363         return NULL;
1364 }
1365
1366 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1367 {
1368         if (skb->ip_summed == CHECKSUM_COMPLETE) {
1369                 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1370                                   &ipv6_hdr(skb)->daddr, skb->csum)) {
1371                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1372                         return 0;
1373                 }
1374         }
1375
1376         skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1377                                               &ipv6_hdr(skb)->saddr,
1378                                               &ipv6_hdr(skb)->daddr, 0));
1379
1380         if (skb->len <= 76) {
1381                 return __skb_checksum_complete(skb);
1382         }
1383         return 0;
1384 }
1385
1386 /* The socket must have it's spinlock held when we get
1387  * here.
1388  *
1389  * We have a potential double-lock case here, so even when
1390  * doing backlog processing we use the BH locking scheme.
1391  * This is because we cannot sleep with the original spinlock
1392  * held.
1393  */
1394 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1395 {
1396         struct ipv6_pinfo *np = inet6_sk(sk);
1397         struct tcp_sock *tp;
1398         struct sk_buff *opt_skb = NULL;
1399
1400         /* Imagine: socket is IPv6. IPv4 packet arrives,
1401            goes to IPv4 receive handler and backlogged.
1402            From backlog it always goes here. Kerboom...
1403            Fortunately, tcp_rcv_established and rcv_established
1404            handle them correctly, but it is not case with
1405            tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1406          */
1407
1408         if (skb->protocol == htons(ETH_P_IP))
1409                 return tcp_v4_do_rcv(sk, skb);
1410
1411 #ifdef CONFIG_TCP_MD5SIG
1412         if (tcp_v6_inbound_md5_hash (sk, skb))
1413                 goto discard;
1414 #endif
1415
1416         if (sk_filter(sk, skb))
1417                 goto discard;
1418
1419         /*
1420          *      socket locking is here for SMP purposes as backlog rcv
1421          *      is currently called with bh processing disabled.
1422          */
1423
1424         /* Do Stevens' IPV6_PKTOPTIONS.
1425
1426            Yes, guys, it is the only place in our code, where we
1427            may make it not affecting IPv4.
1428            The rest of code is protocol independent,
1429            and I do not like idea to uglify IPv4.
1430
1431            Actually, all the idea behind IPV6_PKTOPTIONS
1432            looks not very well thought. For now we latch
1433            options, received in the last packet, enqueued
1434            by tcp. Feel free to propose better solution.
1435                                                --ANK (980728)
1436          */
1437         if (np->rxopt.all)
1438                 opt_skb = skb_clone(skb, GFP_ATOMIC);
1439
1440         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1441                 sock_rps_save_rxhash(sk, skb);
1442                 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1443                         goto reset;
1444                 if (opt_skb)
1445                         goto ipv6_pktoptions;
1446                 return 0;
1447         }
1448
1449         if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1450                 goto csum_err;
1451
1452         if (sk->sk_state == TCP_LISTEN) {
1453                 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1454                 if (!nsk)
1455                         goto discard;
1456
1457                 /*
1458                  * Queue it on the new socket if the new socket is active,
1459                  * otherwise we just shortcircuit this and continue with
1460                  * the new socket..
1461                  */
1462                 if(nsk != sk) {
1463                         sock_rps_save_rxhash(nsk, skb);
1464                         if (tcp_child_process(sk, nsk, skb))
1465                                 goto reset;
1466                         if (opt_skb)
1467                                 __kfree_skb(opt_skb);
1468                         return 0;
1469                 }
1470         } else
1471                 sock_rps_save_rxhash(sk, skb);
1472
1473         if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1474                 goto reset;
1475         if (opt_skb)
1476                 goto ipv6_pktoptions;
1477         return 0;
1478
1479 reset:
1480         tcp_v6_send_reset(sk, skb);
1481 discard:
1482         if (opt_skb)
1483                 __kfree_skb(opt_skb);
1484         kfree_skb(skb);
1485         return 0;
1486 csum_err:
1487         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1488         goto discard;
1489
1490
1491 ipv6_pktoptions:
1492         /* Do you ask, what is it?
1493
1494            1. skb was enqueued by tcp.
1495            2. skb is added to tail of read queue, rather than out of order.
1496            3. socket is not in passive state.
1497            4. Finally, it really contains options, which user wants to receive.
1498          */
1499         tp = tcp_sk(sk);
1500         if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1501             !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1502                 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1503                         np->mcast_oif = inet6_iif(opt_skb);
1504                 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1505                         np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1506                 if (np->rxopt.bits.rxtclass)
1507                         np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1508                 if (ipv6_opt_accepted(sk, opt_skb)) {
1509                         skb_set_owner_r(opt_skb, sk);
1510                         opt_skb = xchg(&np->pktoptions, opt_skb);
1511                 } else {
1512                         __kfree_skb(opt_skb);
1513                         opt_skb = xchg(&np->pktoptions, NULL);
1514                 }
1515         }
1516
1517         kfree_skb(opt_skb);
1518         return 0;
1519 }
1520
1521 static int tcp_v6_rcv(struct sk_buff *skb)
1522 {
1523         const struct tcphdr *th;
1524         const struct ipv6hdr *hdr;
1525         struct sock *sk;
1526         int ret;
1527         struct net *net = dev_net(skb->dev);
1528
1529         if (skb->pkt_type != PACKET_HOST)
1530                 goto discard_it;
1531
1532         /*
1533          *      Count it even if it's bad.
1534          */
1535         TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1536
1537         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1538                 goto discard_it;
1539
1540         th = tcp_hdr(skb);
1541
1542         if (th->doff < sizeof(struct tcphdr)/4)
1543                 goto bad_packet;
1544         if (!pskb_may_pull(skb, th->doff*4))
1545                 goto discard_it;
1546
1547         if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1548                 goto bad_packet;
1549
1550         th = tcp_hdr(skb);
1551         hdr = ipv6_hdr(skb);
1552         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1553         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1554                                     skb->len - th->doff*4);
1555         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1556         TCP_SKB_CB(skb)->when = 0;
1557         TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1558         TCP_SKB_CB(skb)->sacked = 0;
1559
1560         sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1561         if (!sk)
1562                 goto no_tcp_socket;
1563
1564 process:
1565         if (sk->sk_state == TCP_TIME_WAIT)
1566                 goto do_time_wait;
1567
1568         if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1569                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1570                 goto discard_and_relse;
1571         }
1572
1573         if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1574                 goto discard_and_relse;
1575
1576         if (sk_filter(sk, skb))
1577                 goto discard_and_relse;
1578
1579         skb->dev = NULL;
1580
1581         bh_lock_sock_nested(sk);
1582         ret = 0;
1583         if (!sock_owned_by_user(sk)) {
1584 #ifdef CONFIG_NET_DMA
1585                 struct tcp_sock *tp = tcp_sk(sk);
1586                 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1587                         tp->ucopy.dma_chan = net_dma_find_channel();
1588                 if (tp->ucopy.dma_chan)
1589                         ret = tcp_v6_do_rcv(sk, skb);
1590                 else
1591 #endif
1592                 {
1593                         if (!tcp_prequeue(sk, skb))
1594                                 ret = tcp_v6_do_rcv(sk, skb);
1595                 }
1596         } else if (unlikely(sk_add_backlog(sk, skb,
1597                                            sk->sk_rcvbuf + sk->sk_sndbuf))) {
1598                 bh_unlock_sock(sk);
1599                 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1600                 goto discard_and_relse;
1601         }
1602         bh_unlock_sock(sk);
1603
1604         sock_put(sk);
1605         return ret ? -1 : 0;
1606
1607 no_tcp_socket:
1608         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1609                 goto discard_it;
1610
1611         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1612 bad_packet:
1613                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1614         } else {
1615                 tcp_v6_send_reset(NULL, skb);
1616         }
1617
1618 discard_it:
1619
1620         /*
1621          *      Discard frame
1622          */
1623
1624         kfree_skb(skb);
1625         return 0;
1626
1627 discard_and_relse:
1628         sock_put(sk);
1629         goto discard_it;
1630
1631 do_time_wait:
1632         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1633                 inet_twsk_put(inet_twsk(sk));
1634                 goto discard_it;
1635         }
1636
1637         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1638                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1639                 inet_twsk_put(inet_twsk(sk));
1640                 goto discard_it;
1641         }
1642
1643         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1644         case TCP_TW_SYN:
1645         {
1646                 struct sock *sk2;
1647
1648                 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1649                                             &ipv6_hdr(skb)->daddr,
1650                                             ntohs(th->dest), inet6_iif(skb));
1651                 if (sk2 != NULL) {
1652                         struct inet_timewait_sock *tw = inet_twsk(sk);
1653                         inet_twsk_deschedule(tw, &tcp_death_row);
1654                         inet_twsk_put(tw);
1655                         sk = sk2;
1656                         goto process;
1657                 }
1658                 /* Fall through to ACK */
1659         }
1660         case TCP_TW_ACK:
1661                 tcp_v6_timewait_ack(sk, skb);
1662                 break;
1663         case TCP_TW_RST:
1664                 goto no_tcp_socket;
1665         case TCP_TW_SUCCESS:;
1666         }
1667         goto discard_it;
1668 }
1669
1670 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1671         .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
1672         .twsk_unique    = tcp_twsk_unique,
1673         .twsk_destructor= tcp_twsk_destructor,
1674 };
1675
1676 static const struct inet_connection_sock_af_ops ipv6_specific = {
1677         .queue_xmit        = inet6_csk_xmit,
1678         .send_check        = tcp_v6_send_check,
1679         .rebuild_header    = inet6_sk_rebuild_header,
1680         .conn_request      = tcp_v6_conn_request,
1681         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1682         .net_header_len    = sizeof(struct ipv6hdr),
1683         .net_frag_header_len = sizeof(struct frag_hdr),
1684         .setsockopt        = ipv6_setsockopt,
1685         .getsockopt        = ipv6_getsockopt,
1686         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1687         .sockaddr_len      = sizeof(struct sockaddr_in6),
1688         .bind_conflict     = inet6_csk_bind_conflict,
1689 #ifdef CONFIG_COMPAT
1690         .compat_setsockopt = compat_ipv6_setsockopt,
1691         .compat_getsockopt = compat_ipv6_getsockopt,
1692 #endif
1693 };
1694
1695 #ifdef CONFIG_TCP_MD5SIG
1696 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1697         .md5_lookup     =       tcp_v6_md5_lookup,
1698         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
1699         .md5_parse      =       tcp_v6_parse_md5_keys,
1700 };
1701 #endif
1702
1703 /*
1704  *      TCP over IPv4 via INET6 API
1705  */
1706
1707 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1708         .queue_xmit        = ip_queue_xmit,
1709         .send_check        = tcp_v4_send_check,
1710         .rebuild_header    = inet_sk_rebuild_header,
1711         .conn_request      = tcp_v6_conn_request,
1712         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1713         .net_header_len    = sizeof(struct iphdr),
1714         .setsockopt        = ipv6_setsockopt,
1715         .getsockopt        = ipv6_getsockopt,
1716         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1717         .sockaddr_len      = sizeof(struct sockaddr_in6),
1718         .bind_conflict     = inet6_csk_bind_conflict,
1719 #ifdef CONFIG_COMPAT
1720         .compat_setsockopt = compat_ipv6_setsockopt,
1721         .compat_getsockopt = compat_ipv6_getsockopt,
1722 #endif
1723 };
1724
1725 #ifdef CONFIG_TCP_MD5SIG
1726 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1727         .md5_lookup     =       tcp_v4_md5_lookup,
1728         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1729         .md5_parse      =       tcp_v6_parse_md5_keys,
1730 };
1731 #endif
1732
1733 /* NOTE: A lot of things set to zero explicitly by call to
1734  *       sk_alloc() so need not be done here.
1735  */
1736 static int tcp_v6_init_sock(struct sock *sk)
1737 {
1738         struct inet_connection_sock *icsk = inet_csk(sk);
1739
1740         tcp_init_sock(sk);
1741
1742         icsk->icsk_af_ops = &ipv6_specific;
1743
1744 #ifdef CONFIG_TCP_MD5SIG
1745         tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1746 #endif
1747
1748         return 0;
1749 }
1750
1751 static void tcp_v6_destroy_sock(struct sock *sk)
1752 {
1753         tcp_v4_destroy_sock(sk);
1754         inet6_destroy_sock(sk);
1755 }
1756
1757 #ifdef CONFIG_PROC_FS
1758 /* Proc filesystem TCPv6 sock list dumping. */
1759 static void get_openreq6(struct seq_file *seq,
1760                          const struct sock *sk, struct request_sock *req, int i, int uid)
1761 {
1762         int ttd = req->expires - jiffies;
1763         const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1764         const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1765
1766         if (ttd < 0)
1767                 ttd = 0;
1768
1769         seq_printf(seq,
1770                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1771                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1772                    i,
1773                    src->s6_addr32[0], src->s6_addr32[1],
1774                    src->s6_addr32[2], src->s6_addr32[3],
1775                    ntohs(inet_rsk(req)->loc_port),
1776                    dest->s6_addr32[0], dest->s6_addr32[1],
1777                    dest->s6_addr32[2], dest->s6_addr32[3],
1778                    ntohs(inet_rsk(req)->rmt_port),
1779                    TCP_SYN_RECV,
1780                    0,0, /* could print option size, but that is af dependent. */
1781                    1,   /* timers active (only the expire timer) */
1782                    jiffies_to_clock_t(ttd),
1783                    req->retrans,
1784                    uid,
1785                    0,  /* non standard timer */
1786                    0, /* open_requests have no inode */
1787                    0, req);
1788 }
1789
1790 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1791 {
1792         const struct in6_addr *dest, *src;
1793         __u16 destp, srcp;
1794         int timer_active;
1795         unsigned long timer_expires;
1796         const struct inet_sock *inet = inet_sk(sp);
1797         const struct tcp_sock *tp = tcp_sk(sp);
1798         const struct inet_connection_sock *icsk = inet_csk(sp);
1799         const struct ipv6_pinfo *np = inet6_sk(sp);
1800
1801         dest  = &np->daddr;
1802         src   = &np->rcv_saddr;
1803         destp = ntohs(inet->inet_dport);
1804         srcp  = ntohs(inet->inet_sport);
1805
1806         if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1807                 timer_active    = 1;
1808                 timer_expires   = icsk->icsk_timeout;
1809         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1810                 timer_active    = 4;
1811                 timer_expires   = icsk->icsk_timeout;
1812         } else if (timer_pending(&sp->sk_timer)) {
1813                 timer_active    = 2;
1814                 timer_expires   = sp->sk_timer.expires;
1815         } else {
1816                 timer_active    = 0;
1817                 timer_expires = jiffies;
1818         }
1819
1820         seq_printf(seq,
1821                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1822                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1823                    i,
1824                    src->s6_addr32[0], src->s6_addr32[1],
1825                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1826                    dest->s6_addr32[0], dest->s6_addr32[1],
1827                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1828                    sp->sk_state,
1829                    tp->write_seq-tp->snd_una,
1830                    (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1831                    timer_active,
1832                    jiffies_to_clock_t(timer_expires - jiffies),
1833                    icsk->icsk_retransmits,
1834                    sock_i_uid(sp),
1835                    icsk->icsk_probes_out,
1836                    sock_i_ino(sp),
1837                    atomic_read(&sp->sk_refcnt), sp,
1838                    jiffies_to_clock_t(icsk->icsk_rto),
1839                    jiffies_to_clock_t(icsk->icsk_ack.ato),
1840                    (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1841                    tp->snd_cwnd,
1842                    tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1843                    );
1844 }
1845
1846 static void get_timewait6_sock(struct seq_file *seq,
1847                                struct inet_timewait_sock *tw, int i)
1848 {
1849         const struct in6_addr *dest, *src;
1850         __u16 destp, srcp;
1851         const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1852         int ttd = tw->tw_ttd - jiffies;
1853
1854         if (ttd < 0)
1855                 ttd = 0;
1856
1857         dest = &tw6->tw_v6_daddr;
1858         src  = &tw6->tw_v6_rcv_saddr;
1859         destp = ntohs(tw->tw_dport);
1860         srcp  = ntohs(tw->tw_sport);
1861
1862         seq_printf(seq,
1863                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1864                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1865                    i,
1866                    src->s6_addr32[0], src->s6_addr32[1],
1867                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1868                    dest->s6_addr32[0], dest->s6_addr32[1],
1869                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1870                    tw->tw_substate, 0, 0,
1871                    3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1872                    atomic_read(&tw->tw_refcnt), tw);
1873 }
1874
1875 static int tcp6_seq_show(struct seq_file *seq, void *v)
1876 {
1877         struct tcp_iter_state *st;
1878
1879         if (v == SEQ_START_TOKEN) {
1880                 seq_puts(seq,
1881                          "  sl  "
1882                          "local_address                         "
1883                          "remote_address                        "
1884                          "st tx_queue rx_queue tr tm->when retrnsmt"
1885                          "   uid  timeout inode\n");
1886                 goto out;
1887         }
1888         st = seq->private;
1889
1890         switch (st->state) {
1891         case TCP_SEQ_STATE_LISTENING:
1892         case TCP_SEQ_STATE_ESTABLISHED:
1893                 get_tcp6_sock(seq, v, st->num);
1894                 break;
1895         case TCP_SEQ_STATE_OPENREQ:
1896                 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1897                 break;
1898         case TCP_SEQ_STATE_TIME_WAIT:
1899                 get_timewait6_sock(seq, v, st->num);
1900                 break;
1901         }
1902 out:
1903         return 0;
1904 }
1905
1906 static const struct file_operations tcp6_afinfo_seq_fops = {
1907         .owner   = THIS_MODULE,
1908         .open    = tcp_seq_open,
1909         .read    = seq_read,
1910         .llseek  = seq_lseek,
1911         .release = seq_release_net
1912 };
1913
1914 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1915         .name           = "tcp6",
1916         .family         = AF_INET6,
1917         .seq_fops       = &tcp6_afinfo_seq_fops,
1918         .seq_ops        = {
1919                 .show           = tcp6_seq_show,
1920         },
1921 };
1922
1923 int __net_init tcp6_proc_init(struct net *net)
1924 {
1925         return tcp_proc_register(net, &tcp6_seq_afinfo);
1926 }
1927
1928 void tcp6_proc_exit(struct net *net)
1929 {
1930         tcp_proc_unregister(net, &tcp6_seq_afinfo);
1931 }
1932 #endif
1933
1934 struct proto tcpv6_prot = {
1935         .name                   = "TCPv6",
1936         .owner                  = THIS_MODULE,
1937         .close                  = tcp_close,
1938         .connect                = tcp_v6_connect,
1939         .disconnect             = tcp_disconnect,
1940         .accept                 = inet_csk_accept,
1941         .ioctl                  = tcp_ioctl,
1942         .init                   = tcp_v6_init_sock,
1943         .destroy                = tcp_v6_destroy_sock,
1944         .shutdown               = tcp_shutdown,
1945         .setsockopt             = tcp_setsockopt,
1946         .getsockopt             = tcp_getsockopt,
1947         .recvmsg                = tcp_recvmsg,
1948         .sendmsg                = tcp_sendmsg,
1949         .sendpage               = tcp_sendpage,
1950         .backlog_rcv            = tcp_v6_do_rcv,
1951         .release_cb             = tcp_release_cb,
1952         .hash                   = tcp_v6_hash,
1953         .unhash                 = inet_unhash,
1954         .get_port               = inet_csk_get_port,
1955         .enter_memory_pressure  = tcp_enter_memory_pressure,
1956         .sockets_allocated      = &tcp_sockets_allocated,
1957         .memory_allocated       = &tcp_memory_allocated,
1958         .memory_pressure        = &tcp_memory_pressure,
1959         .orphan_count           = &tcp_orphan_count,
1960         .sysctl_wmem            = sysctl_tcp_wmem,
1961         .sysctl_rmem            = sysctl_tcp_rmem,
1962         .max_header             = MAX_TCP_HEADER,
1963         .obj_size               = sizeof(struct tcp6_sock),
1964         .slab_flags             = SLAB_DESTROY_BY_RCU,
1965         .twsk_prot              = &tcp6_timewait_sock_ops,
1966         .rsk_prot               = &tcp6_request_sock_ops,
1967         .h.hashinfo             = &tcp_hashinfo,
1968         .no_autobind            = true,
1969 #ifdef CONFIG_COMPAT
1970         .compat_setsockopt      = compat_tcp_setsockopt,
1971         .compat_getsockopt      = compat_tcp_getsockopt,
1972 #endif
1973 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
1974         .proto_cgroup           = tcp_proto_cgroup,
1975 #endif
1976 };
1977
1978 static const struct inet6_protocol tcpv6_protocol = {
1979         .handler        =       tcp_v6_rcv,
1980         .err_handler    =       tcp_v6_err,
1981         .gso_send_check =       tcp_v6_gso_send_check,
1982         .gso_segment    =       tcp_tso_segment,
1983         .gro_receive    =       tcp6_gro_receive,
1984         .gro_complete   =       tcp6_gro_complete,
1985         .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1986 };
1987
1988 static struct inet_protosw tcpv6_protosw = {
1989         .type           =       SOCK_STREAM,
1990         .protocol       =       IPPROTO_TCP,
1991         .prot           =       &tcpv6_prot,
1992         .ops            =       &inet6_stream_ops,
1993         .no_check       =       0,
1994         .flags          =       INET_PROTOSW_PERMANENT |
1995                                 INET_PROTOSW_ICSK,
1996 };
1997
1998 static int __net_init tcpv6_net_init(struct net *net)
1999 {
2000         return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2001                                     SOCK_RAW, IPPROTO_TCP, net);
2002 }
2003
2004 static void __net_exit tcpv6_net_exit(struct net *net)
2005 {
2006         inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2007 }
2008
2009 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2010 {
2011         inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2012 }
2013
2014 static struct pernet_operations tcpv6_net_ops = {
2015         .init       = tcpv6_net_init,
2016         .exit       = tcpv6_net_exit,
2017         .exit_batch = tcpv6_net_exit_batch,
2018 };
2019
2020 int __init tcpv6_init(void)
2021 {
2022         int ret;
2023
2024         ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2025         if (ret)
2026                 goto out;
2027
2028         /* register inet6 protocol */
2029         ret = inet6_register_protosw(&tcpv6_protosw);
2030         if (ret)
2031                 goto out_tcpv6_protocol;
2032
2033         ret = register_pernet_subsys(&tcpv6_net_ops);
2034         if (ret)
2035                 goto out_tcpv6_protosw;
2036 out:
2037         return ret;
2038
2039 out_tcpv6_protocol:
2040         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2041 out_tcpv6_protosw:
2042         inet6_unregister_protosw(&tcpv6_protosw);
2043         goto out;
2044 }
2045
2046 void tcpv6_exit(void)
2047 {
2048         unregister_pernet_subsys(&tcpv6_net_ops);
2049         inet6_unregister_protosw(&tcpv6_protosw);
2050         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2051 }