086deffff9c9c3100c3975ab9292bf25e8ca44eb
[linux-3.10.git] / net / ipv6 / tcp_ipv6.c
1 /*
2  *      TCP over IPv6
3  *      Linux INET6 implementation
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *
8  *      $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
9  *
10  *      Based on:
11  *      linux/net/ipv4/tcp.c
12  *      linux/net/ipv4/tcp_input.c
13  *      linux/net/ipv4/tcp_output.c
14  *
15  *      Fixes:
16  *      Hideaki YOSHIFUJI       :       sin6_scope_id support
17  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
18  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
19  *                                      a single port at the same time.
20  *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/tcp6 to seq_file.
21  *
22  *      This program is free software; you can redistribute it and/or
23  *      modify it under the terms of the GNU General Public License
24  *      as published by the Free Software Foundation; either version
25  *      2 of the License, or (at your option) any later version.
26  */
27
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/jiffies.h>
35 #include <linux/in.h>
36 #include <linux/in6.h>
37 #include <linux/netdevice.h>
38 #include <linux/init.h>
39 #include <linux/jhash.h>
40 #include <linux/ipsec.h>
41 #include <linux/times.h>
42
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63
64 #include <asm/uaccess.h>
65
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
68
69 #include <linux/crypto.h>
70 #include <linux/scatterlist.h>
71
72 static void     tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
73 static void     tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
74 static void     tcp_v6_send_check(struct sock *sk, int len,
75                                   struct sk_buff *skb);
76
77 static int      tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78
79 static struct inet_connection_sock_af_ops ipv6_mapped;
80 static struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84 #endif
85
86 static void tcp_v6_hash(struct sock *sk)
87 {
88         if (sk->sk_state != TCP_CLOSE) {
89                 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
90                         tcp_prot.hash(sk);
91                         return;
92                 }
93                 local_bh_disable();
94                 __inet6_hash(sk);
95                 local_bh_enable();
96         }
97 }
98
99 static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len,
100                                    struct in6_addr *saddr,
101                                    struct in6_addr *daddr,
102                                    __wsum base)
103 {
104         return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
105 }
106
107 static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
108 {
109         return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
110                                             ipv6_hdr(skb)->saddr.s6_addr32,
111                                             tcp_hdr(skb)->dest,
112                                             tcp_hdr(skb)->source);
113 }
114
115 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
116                           int addr_len)
117 {
118         struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
119         struct inet_sock *inet = inet_sk(sk);
120         struct inet_connection_sock *icsk = inet_csk(sk);
121         struct ipv6_pinfo *np = inet6_sk(sk);
122         struct tcp_sock *tp = tcp_sk(sk);
123         struct in6_addr *saddr = NULL, *final_p = NULL, final;
124         struct flowi fl;
125         struct dst_entry *dst;
126         int addr_type;
127         int err;
128
129         if (addr_len < SIN6_LEN_RFC2133)
130                 return -EINVAL;
131
132         if (usin->sin6_family != AF_INET6)
133                 return(-EAFNOSUPPORT);
134
135         memset(&fl, 0, sizeof(fl));
136
137         if (np->sndflow) {
138                 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
139                 IP6_ECN_flow_init(fl.fl6_flowlabel);
140                 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
141                         struct ip6_flowlabel *flowlabel;
142                         flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
143                         if (flowlabel == NULL)
144                                 return -EINVAL;
145                         ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
146                         fl6_sock_release(flowlabel);
147                 }
148         }
149
150         /*
151          *      connect() to INADDR_ANY means loopback (BSD'ism).
152          */
153
154         if(ipv6_addr_any(&usin->sin6_addr))
155                 usin->sin6_addr.s6_addr[15] = 0x1;
156
157         addr_type = ipv6_addr_type(&usin->sin6_addr);
158
159         if(addr_type & IPV6_ADDR_MULTICAST)
160                 return -ENETUNREACH;
161
162         if (addr_type&IPV6_ADDR_LINKLOCAL) {
163                 if (addr_len >= sizeof(struct sockaddr_in6) &&
164                     usin->sin6_scope_id) {
165                         /* If interface is set while binding, indices
166                          * must coincide.
167                          */
168                         if (sk->sk_bound_dev_if &&
169                             sk->sk_bound_dev_if != usin->sin6_scope_id)
170                                 return -EINVAL;
171
172                         sk->sk_bound_dev_if = usin->sin6_scope_id;
173                 }
174
175                 /* Connect to link-local address requires an interface */
176                 if (!sk->sk_bound_dev_if)
177                         return -EINVAL;
178         }
179
180         if (tp->rx_opt.ts_recent_stamp &&
181             !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
182                 tp->rx_opt.ts_recent = 0;
183                 tp->rx_opt.ts_recent_stamp = 0;
184                 tp->write_seq = 0;
185         }
186
187         ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
188         np->flow_label = fl.fl6_flowlabel;
189
190         /*
191          *      TCP over IPv4
192          */
193
194         if (addr_type == IPV6_ADDR_MAPPED) {
195                 u32 exthdrlen = icsk->icsk_ext_hdr_len;
196                 struct sockaddr_in sin;
197
198                 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
199
200                 if (__ipv6_only_sock(sk))
201                         return -ENETUNREACH;
202
203                 sin.sin_family = AF_INET;
204                 sin.sin_port = usin->sin6_port;
205                 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
206
207                 icsk->icsk_af_ops = &ipv6_mapped;
208                 sk->sk_backlog_rcv = tcp_v4_do_rcv;
209 #ifdef CONFIG_TCP_MD5SIG
210                 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
211 #endif
212
213                 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
214
215                 if (err) {
216                         icsk->icsk_ext_hdr_len = exthdrlen;
217                         icsk->icsk_af_ops = &ipv6_specific;
218                         sk->sk_backlog_rcv = tcp_v6_do_rcv;
219 #ifdef CONFIG_TCP_MD5SIG
220                         tp->af_specific = &tcp_sock_ipv6_specific;
221 #endif
222                         goto failure;
223                 } else {
224                         ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
225                                       inet->saddr);
226                         ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
227                                       inet->rcv_saddr);
228                 }
229
230                 return err;
231         }
232
233         if (!ipv6_addr_any(&np->rcv_saddr))
234                 saddr = &np->rcv_saddr;
235
236         fl.proto = IPPROTO_TCP;
237         ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
238         ipv6_addr_copy(&fl.fl6_src,
239                        (saddr ? saddr : &np->saddr));
240         fl.oif = sk->sk_bound_dev_if;
241         fl.fl_ip_dport = usin->sin6_port;
242         fl.fl_ip_sport = inet->sport;
243
244         if (np->opt && np->opt->srcrt) {
245                 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
246                 ipv6_addr_copy(&final, &fl.fl6_dst);
247                 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
248                 final_p = &final;
249         }
250
251         security_sk_classify_flow(sk, &fl);
252
253         err = ip6_dst_lookup(sk, &dst, &fl);
254         if (err)
255                 goto failure;
256         if (final_p)
257                 ipv6_addr_copy(&fl.fl6_dst, final_p);
258
259         if ((err = __xfrm_lookup(&dst, &fl, sk, XFRM_LOOKUP_WAIT)) < 0) {
260                 if (err == -EREMOTE)
261                         err = ip6_dst_blackhole(sk, &dst, &fl);
262                 if (err < 0)
263                         goto failure;
264         }
265
266         if (saddr == NULL) {
267                 saddr = &fl.fl6_src;
268                 ipv6_addr_copy(&np->rcv_saddr, saddr);
269         }
270
271         /* set the source address */
272         ipv6_addr_copy(&np->saddr, saddr);
273         inet->rcv_saddr = LOOPBACK4_IPV6;
274
275         sk->sk_gso_type = SKB_GSO_TCPV6;
276         __ip6_dst_store(sk, dst, NULL, NULL);
277
278         icsk->icsk_ext_hdr_len = 0;
279         if (np->opt)
280                 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
281                                           np->opt->opt_nflen);
282
283         tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
284
285         inet->dport = usin->sin6_port;
286
287         tcp_set_state(sk, TCP_SYN_SENT);
288         err = inet6_hash_connect(&tcp_death_row, sk);
289         if (err)
290                 goto late_failure;
291
292         if (!tp->write_seq)
293                 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
294                                                              np->daddr.s6_addr32,
295                                                              inet->sport,
296                                                              inet->dport);
297
298         err = tcp_connect(sk);
299         if (err)
300                 goto late_failure;
301
302         return 0;
303
304 late_failure:
305         tcp_set_state(sk, TCP_CLOSE);
306         __sk_dst_reset(sk);
307 failure:
308         inet->dport = 0;
309         sk->sk_route_caps = 0;
310         return err;
311 }
312
313 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
314                 int type, int code, int offset, __be32 info)
315 {
316         struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
317         const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
318         struct ipv6_pinfo *np;
319         struct sock *sk;
320         int err;
321         struct tcp_sock *tp;
322         __u32 seq;
323
324         sk = inet6_lookup(dev_net(skb->dev), &tcp_hashinfo, &hdr->daddr,
325                         th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
326
327         if (sk == NULL) {
328                 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
329                 return;
330         }
331
332         if (sk->sk_state == TCP_TIME_WAIT) {
333                 inet_twsk_put(inet_twsk(sk));
334                 return;
335         }
336
337         bh_lock_sock(sk);
338         if (sock_owned_by_user(sk))
339                 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
340
341         if (sk->sk_state == TCP_CLOSE)
342                 goto out;
343
344         tp = tcp_sk(sk);
345         seq = ntohl(th->seq);
346         if (sk->sk_state != TCP_LISTEN &&
347             !between(seq, tp->snd_una, tp->snd_nxt)) {
348                 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
349                 goto out;
350         }
351
352         np = inet6_sk(sk);
353
354         if (type == ICMPV6_PKT_TOOBIG) {
355                 struct dst_entry *dst = NULL;
356
357                 if (sock_owned_by_user(sk))
358                         goto out;
359                 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
360                         goto out;
361
362                 /* icmp should have updated the destination cache entry */
363                 dst = __sk_dst_check(sk, np->dst_cookie);
364
365                 if (dst == NULL) {
366                         struct inet_sock *inet = inet_sk(sk);
367                         struct flowi fl;
368
369                         /* BUGGG_FUTURE: Again, it is not clear how
370                            to handle rthdr case. Ignore this complexity
371                            for now.
372                          */
373                         memset(&fl, 0, sizeof(fl));
374                         fl.proto = IPPROTO_TCP;
375                         ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
376                         ipv6_addr_copy(&fl.fl6_src, &np->saddr);
377                         fl.oif = sk->sk_bound_dev_if;
378                         fl.fl_ip_dport = inet->dport;
379                         fl.fl_ip_sport = inet->sport;
380                         security_skb_classify_flow(skb, &fl);
381
382                         if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
383                                 sk->sk_err_soft = -err;
384                                 goto out;
385                         }
386
387                         if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
388                                 sk->sk_err_soft = -err;
389                                 goto out;
390                         }
391
392                 } else
393                         dst_hold(dst);
394
395                 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
396                         tcp_sync_mss(sk, dst_mtu(dst));
397                         tcp_simple_retransmit(sk);
398                 } /* else let the usual retransmit timer handle it */
399                 dst_release(dst);
400                 goto out;
401         }
402
403         icmpv6_err_convert(type, code, &err);
404
405         /* Might be for an request_sock */
406         switch (sk->sk_state) {
407                 struct request_sock *req, **prev;
408         case TCP_LISTEN:
409                 if (sock_owned_by_user(sk))
410                         goto out;
411
412                 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
413                                            &hdr->saddr, inet6_iif(skb));
414                 if (!req)
415                         goto out;
416
417                 /* ICMPs are not backlogged, hence we cannot get
418                  * an established socket here.
419                  */
420                 BUG_TRAP(req->sk == NULL);
421
422                 if (seq != tcp_rsk(req)->snt_isn) {
423                         NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
424                         goto out;
425                 }
426
427                 inet_csk_reqsk_queue_drop(sk, req, prev);
428                 goto out;
429
430         case TCP_SYN_SENT:
431         case TCP_SYN_RECV:  /* Cannot happen.
432                                It can, it SYNs are crossed. --ANK */
433                 if (!sock_owned_by_user(sk)) {
434                         sk->sk_err = err;
435                         sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
436
437                         tcp_done(sk);
438                 } else
439                         sk->sk_err_soft = err;
440                 goto out;
441         }
442
443         if (!sock_owned_by_user(sk) && np->recverr) {
444                 sk->sk_err = err;
445                 sk->sk_error_report(sk);
446         } else
447                 sk->sk_err_soft = err;
448
449 out:
450         bh_unlock_sock(sk);
451         sock_put(sk);
452 }
453
454
455 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
456 {
457         struct inet6_request_sock *treq = inet6_rsk(req);
458         struct ipv6_pinfo *np = inet6_sk(sk);
459         struct sk_buff * skb;
460         struct ipv6_txoptions *opt = NULL;
461         struct in6_addr * final_p = NULL, final;
462         struct flowi fl;
463         struct dst_entry *dst;
464         int err = -1;
465
466         memset(&fl, 0, sizeof(fl));
467         fl.proto = IPPROTO_TCP;
468         ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
469         ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
470         fl.fl6_flowlabel = 0;
471         fl.oif = treq->iif;
472         fl.fl_ip_dport = inet_rsk(req)->rmt_port;
473         fl.fl_ip_sport = inet_sk(sk)->sport;
474         security_req_classify_flow(req, &fl);
475
476         opt = np->opt;
477         if (opt && opt->srcrt) {
478                 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
479                 ipv6_addr_copy(&final, &fl.fl6_dst);
480                 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
481                 final_p = &final;
482         }
483
484         err = ip6_dst_lookup(sk, &dst, &fl);
485         if (err)
486                 goto done;
487         if (final_p)
488                 ipv6_addr_copy(&fl.fl6_dst, final_p);
489         if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
490                 goto done;
491
492         skb = tcp_make_synack(sk, dst, req);
493         if (skb) {
494                 struct tcphdr *th = tcp_hdr(skb);
495
496                 th->check = tcp_v6_check(th, skb->len,
497                                          &treq->loc_addr, &treq->rmt_addr,
498                                          csum_partial((char *)th, skb->len, skb->csum));
499
500                 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
501                 err = ip6_xmit(sk, skb, &fl, opt, 0);
502                 err = net_xmit_eval(err);
503         }
504
505 done:
506         if (opt && opt != np->opt)
507                 sock_kfree_s(sk, opt, opt->tot_len);
508         dst_release(dst);
509         return err;
510 }
511
512 static inline void syn_flood_warning(struct sk_buff *skb)
513 {
514 #ifdef CONFIG_SYN_COOKIES
515         if (sysctl_tcp_syncookies)
516                 printk(KERN_INFO
517                        "TCPv6: Possible SYN flooding on port %d. "
518                        "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
519         else
520 #endif
521                 printk(KERN_INFO
522                        "TCPv6: Possible SYN flooding on port %d. "
523                        "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
524 }
525
526 static void tcp_v6_reqsk_destructor(struct request_sock *req)
527 {
528         if (inet6_rsk(req)->pktopts)
529                 kfree_skb(inet6_rsk(req)->pktopts);
530 }
531
532 #ifdef CONFIG_TCP_MD5SIG
533 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
534                                                    struct in6_addr *addr)
535 {
536         struct tcp_sock *tp = tcp_sk(sk);
537         int i;
538
539         BUG_ON(tp == NULL);
540
541         if (!tp->md5sig_info || !tp->md5sig_info->entries6)
542                 return NULL;
543
544         for (i = 0; i < tp->md5sig_info->entries6; i++) {
545                 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0)
546                         return &tp->md5sig_info->keys6[i].base;
547         }
548         return NULL;
549 }
550
551 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
552                                                 struct sock *addr_sk)
553 {
554         return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
555 }
556
557 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
558                                                       struct request_sock *req)
559 {
560         return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
561 }
562
563 static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
564                              char *newkey, u8 newkeylen)
565 {
566         /* Add key to the list */
567         struct tcp_md5sig_key *key;
568         struct tcp_sock *tp = tcp_sk(sk);
569         struct tcp6_md5sig_key *keys;
570
571         key = tcp_v6_md5_do_lookup(sk, peer);
572         if (key) {
573                 /* modify existing entry - just update that one */
574                 kfree(key->key);
575                 key->key = newkey;
576                 key->keylen = newkeylen;
577         } else {
578                 /* reallocate new list if current one is full. */
579                 if (!tp->md5sig_info) {
580                         tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
581                         if (!tp->md5sig_info) {
582                                 kfree(newkey);
583                                 return -ENOMEM;
584                         }
585                         sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
586                 }
587                 if (tcp_alloc_md5sig_pool() == NULL) {
588                         kfree(newkey);
589                         return -ENOMEM;
590                 }
591                 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
592                         keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
593                                        (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
594
595                         if (!keys) {
596                                 tcp_free_md5sig_pool();
597                                 kfree(newkey);
598                                 return -ENOMEM;
599                         }
600
601                         if (tp->md5sig_info->entries6)
602                                 memmove(keys, tp->md5sig_info->keys6,
603                                         (sizeof (tp->md5sig_info->keys6[0]) *
604                                          tp->md5sig_info->entries6));
605
606                         kfree(tp->md5sig_info->keys6);
607                         tp->md5sig_info->keys6 = keys;
608                         tp->md5sig_info->alloced6++;
609                 }
610
611                 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
612                                peer);
613                 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
614                 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
615
616                 tp->md5sig_info->entries6++;
617         }
618         return 0;
619 }
620
621 static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
622                                u8 *newkey, __u8 newkeylen)
623 {
624         return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
625                                  newkey, newkeylen);
626 }
627
628 static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
629 {
630         struct tcp_sock *tp = tcp_sk(sk);
631         int i;
632
633         for (i = 0; i < tp->md5sig_info->entries6; i++) {
634                 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) {
635                         /* Free the key */
636                         kfree(tp->md5sig_info->keys6[i].base.key);
637                         tp->md5sig_info->entries6--;
638
639                         if (tp->md5sig_info->entries6 == 0) {
640                                 kfree(tp->md5sig_info->keys6);
641                                 tp->md5sig_info->keys6 = NULL;
642                                 tp->md5sig_info->alloced6 = 0;
643                         } else {
644                                 /* shrink the database */
645                                 if (tp->md5sig_info->entries6 != i)
646                                         memmove(&tp->md5sig_info->keys6[i],
647                                                 &tp->md5sig_info->keys6[i+1],
648                                                 (tp->md5sig_info->entries6 - i)
649                                                 * sizeof (tp->md5sig_info->keys6[0]));
650                         }
651                         tcp_free_md5sig_pool();
652                         return 0;
653                 }
654         }
655         return -ENOENT;
656 }
657
658 static void tcp_v6_clear_md5_list (struct sock *sk)
659 {
660         struct tcp_sock *tp = tcp_sk(sk);
661         int i;
662
663         if (tp->md5sig_info->entries6) {
664                 for (i = 0; i < tp->md5sig_info->entries6; i++)
665                         kfree(tp->md5sig_info->keys6[i].base.key);
666                 tp->md5sig_info->entries6 = 0;
667                 tcp_free_md5sig_pool();
668         }
669
670         kfree(tp->md5sig_info->keys6);
671         tp->md5sig_info->keys6 = NULL;
672         tp->md5sig_info->alloced6 = 0;
673
674         if (tp->md5sig_info->entries4) {
675                 for (i = 0; i < tp->md5sig_info->entries4; i++)
676                         kfree(tp->md5sig_info->keys4[i].base.key);
677                 tp->md5sig_info->entries4 = 0;
678                 tcp_free_md5sig_pool();
679         }
680
681         kfree(tp->md5sig_info->keys4);
682         tp->md5sig_info->keys4 = NULL;
683         tp->md5sig_info->alloced4 = 0;
684 }
685
686 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
687                                   int optlen)
688 {
689         struct tcp_md5sig cmd;
690         struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
691         u8 *newkey;
692
693         if (optlen < sizeof(cmd))
694                 return -EINVAL;
695
696         if (copy_from_user(&cmd, optval, sizeof(cmd)))
697                 return -EFAULT;
698
699         if (sin6->sin6_family != AF_INET6)
700                 return -EINVAL;
701
702         if (!cmd.tcpm_keylen) {
703                 if (!tcp_sk(sk)->md5sig_info)
704                         return -ENOENT;
705                 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
706                         return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
707                 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
708         }
709
710         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
711                 return -EINVAL;
712
713         if (!tcp_sk(sk)->md5sig_info) {
714                 struct tcp_sock *tp = tcp_sk(sk);
715                 struct tcp_md5sig_info *p;
716
717                 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
718                 if (!p)
719                         return -ENOMEM;
720
721                 tp->md5sig_info = p;
722                 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
723         }
724
725         newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
726         if (!newkey)
727                 return -ENOMEM;
728         if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
729                 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
730                                          newkey, cmd.tcpm_keylen);
731         }
732         return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
733 }
734
735 static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
736                                    struct in6_addr *saddr,
737                                    struct in6_addr *daddr,
738                                    struct tcphdr *th, int protocol,
739                                    unsigned int tcplen)
740 {
741         struct scatterlist sg[4];
742         __u16 data_len;
743         int block = 0;
744         __sum16 cksum;
745         struct tcp_md5sig_pool *hp;
746         struct tcp6_pseudohdr *bp;
747         struct hash_desc *desc;
748         int err;
749         unsigned int nbytes = 0;
750
751         hp = tcp_get_md5sig_pool();
752         if (!hp) {
753                 printk(KERN_WARNING "%s(): hash pool not found...\n", __func__);
754                 goto clear_hash_noput;
755         }
756         bp = &hp->md5_blk.ip6;
757         desc = &hp->md5_desc;
758
759         /* 1. TCP pseudo-header (RFC2460) */
760         ipv6_addr_copy(&bp->saddr, saddr);
761         ipv6_addr_copy(&bp->daddr, daddr);
762         bp->len = htonl(tcplen);
763         bp->protocol = htonl(protocol);
764
765         sg_init_table(sg, 4);
766
767         sg_set_buf(&sg[block++], bp, sizeof(*bp));
768         nbytes += sizeof(*bp);
769
770         /* 2. TCP header, excluding options */
771         cksum = th->check;
772         th->check = 0;
773         sg_set_buf(&sg[block++], th, sizeof(*th));
774         nbytes += sizeof(*th);
775
776         /* 3. TCP segment data (if any) */
777         data_len = tcplen - (th->doff << 2);
778         if (data_len > 0) {
779                 u8 *data = (u8 *)th + (th->doff << 2);
780                 sg_set_buf(&sg[block++], data, data_len);
781                 nbytes += data_len;
782         }
783
784         /* 4. shared key */
785         sg_set_buf(&sg[block++], key->key, key->keylen);
786         nbytes += key->keylen;
787
788         sg_mark_end(&sg[block - 1]);
789
790         /* Now store the hash into the packet */
791         err = crypto_hash_init(desc);
792         if (err) {
793                 printk(KERN_WARNING "%s(): hash_init failed\n", __func__);
794                 goto clear_hash;
795         }
796         err = crypto_hash_update(desc, sg, nbytes);
797         if (err) {
798                 printk(KERN_WARNING "%s(): hash_update failed\n", __func__);
799                 goto clear_hash;
800         }
801         err = crypto_hash_final(desc, md5_hash);
802         if (err) {
803                 printk(KERN_WARNING "%s(): hash_final failed\n", __func__);
804                 goto clear_hash;
805         }
806
807         /* Reset header, and free up the crypto */
808         tcp_put_md5sig_pool();
809         th->check = cksum;
810 out:
811         return 0;
812 clear_hash:
813         tcp_put_md5sig_pool();
814 clear_hash_noput:
815         memset(md5_hash, 0, 16);
816         goto out;
817 }
818
819 static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
820                                 struct sock *sk,
821                                 struct dst_entry *dst,
822                                 struct request_sock *req,
823                                 struct tcphdr *th, int protocol,
824                                 unsigned int tcplen)
825 {
826         struct in6_addr *saddr, *daddr;
827
828         if (sk) {
829                 saddr = &inet6_sk(sk)->saddr;
830                 daddr = &inet6_sk(sk)->daddr;
831         } else {
832                 saddr = &inet6_rsk(req)->loc_addr;
833                 daddr = &inet6_rsk(req)->rmt_addr;
834         }
835         return tcp_v6_do_calc_md5_hash(md5_hash, key,
836                                        saddr, daddr,
837                                        th, protocol, tcplen);
838 }
839
840 static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
841 {
842         __u8 *hash_location = NULL;
843         struct tcp_md5sig_key *hash_expected;
844         struct ipv6hdr *ip6h = ipv6_hdr(skb);
845         struct tcphdr *th = tcp_hdr(skb);
846         int length = (th->doff << 2) - sizeof (*th);
847         int genhash;
848         u8 *ptr;
849         u8 newhash[16];
850
851         hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
852
853         /* If the TCP option is too short, we can short cut */
854         if (length < TCPOLEN_MD5SIG)
855                 return hash_expected ? 1 : 0;
856
857         /* parse options */
858         ptr = (u8*)(th + 1);
859         while (length > 0) {
860                 int opcode = *ptr++;
861                 int opsize;
862
863                 switch(opcode) {
864                 case TCPOPT_EOL:
865                         goto done_opts;
866                 case TCPOPT_NOP:
867                         length--;
868                         continue;
869                 default:
870                         opsize = *ptr++;
871                         if (opsize < 2 || opsize > length)
872                                 goto done_opts;
873                         if (opcode == TCPOPT_MD5SIG) {
874                                 hash_location = ptr;
875                                 goto done_opts;
876                         }
877                 }
878                 ptr += opsize - 2;
879                 length -= opsize;
880         }
881
882 done_opts:
883         /* do we have a hash as expected? */
884         if (!hash_expected) {
885                 if (!hash_location)
886                         return 0;
887                 if (net_ratelimit()) {
888                         printk(KERN_INFO "MD5 Hash NOT expected but found "
889                                "(" NIP6_FMT ", %u)->"
890                                "(" NIP6_FMT ", %u)\n",
891                                NIP6(ip6h->saddr), ntohs(th->source),
892                                NIP6(ip6h->daddr), ntohs(th->dest));
893                 }
894                 return 1;
895         }
896
897         if (!hash_location) {
898                 if (net_ratelimit()) {
899                         printk(KERN_INFO "MD5 Hash expected but NOT found "
900                                "(" NIP6_FMT ", %u)->"
901                                "(" NIP6_FMT ", %u)\n",
902                                NIP6(ip6h->saddr), ntohs(th->source),
903                                NIP6(ip6h->daddr), ntohs(th->dest));
904                 }
905                 return 1;
906         }
907
908         /* check the signature */
909         genhash = tcp_v6_do_calc_md5_hash(newhash,
910                                           hash_expected,
911                                           &ip6h->saddr, &ip6h->daddr,
912                                           th, sk->sk_protocol,
913                                           skb->len);
914         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
915                 if (net_ratelimit()) {
916                         printk(KERN_INFO "MD5 Hash %s for "
917                                "(" NIP6_FMT ", %u)->"
918                                "(" NIP6_FMT ", %u)\n",
919                                genhash ? "failed" : "mismatch",
920                                NIP6(ip6h->saddr), ntohs(th->source),
921                                NIP6(ip6h->daddr), ntohs(th->dest));
922                 }
923                 return 1;
924         }
925         return 0;
926 }
927 #endif
928
929 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
930         .family         =       AF_INET6,
931         .obj_size       =       sizeof(struct tcp6_request_sock),
932         .rtx_syn_ack    =       tcp_v6_send_synack,
933         .send_ack       =       tcp_v6_reqsk_send_ack,
934         .destructor     =       tcp_v6_reqsk_destructor,
935         .send_reset     =       tcp_v6_send_reset
936 };
937
938 #ifdef CONFIG_TCP_MD5SIG
939 static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
940         .md5_lookup     =       tcp_v6_reqsk_md5_lookup,
941 };
942 #endif
943
944 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
945         .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
946         .twsk_unique    = tcp_twsk_unique,
947         .twsk_destructor= tcp_twsk_destructor,
948 };
949
950 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
951 {
952         struct ipv6_pinfo *np = inet6_sk(sk);
953         struct tcphdr *th = tcp_hdr(skb);
954
955         if (skb->ip_summed == CHECKSUM_PARTIAL) {
956                 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,  0);
957                 skb->csum_start = skb_transport_header(skb) - skb->head;
958                 skb->csum_offset = offsetof(struct tcphdr, check);
959         } else {
960                 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
961                                             csum_partial((char *)th, th->doff<<2,
962                                                          skb->csum));
963         }
964 }
965
966 static int tcp_v6_gso_send_check(struct sk_buff *skb)
967 {
968         struct ipv6hdr *ipv6h;
969         struct tcphdr *th;
970
971         if (!pskb_may_pull(skb, sizeof(*th)))
972                 return -EINVAL;
973
974         ipv6h = ipv6_hdr(skb);
975         th = tcp_hdr(skb);
976
977         th->check = 0;
978         th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
979                                      IPPROTO_TCP, 0);
980         skb->csum_start = skb_transport_header(skb) - skb->head;
981         skb->csum_offset = offsetof(struct tcphdr, check);
982         skb->ip_summed = CHECKSUM_PARTIAL;
983         return 0;
984 }
985
986 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
987 {
988         struct tcphdr *th = tcp_hdr(skb), *t1;
989         struct sk_buff *buff;
990         struct flowi fl;
991         struct net *net = dev_net(skb->dst->dev);
992         struct sock *ctl_sk = net->ipv6.tcp_sk;
993         unsigned int tot_len = sizeof(*th);
994 #ifdef CONFIG_TCP_MD5SIG
995         struct tcp_md5sig_key *key;
996 #endif
997
998         if (th->rst)
999                 return;
1000
1001         if (!ipv6_unicast_destination(skb))
1002                 return;
1003
1004 #ifdef CONFIG_TCP_MD5SIG
1005         if (sk)
1006                 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
1007         else
1008                 key = NULL;
1009
1010         if (key)
1011                 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1012 #endif
1013
1014         /*
1015          * We need to grab some memory, and put together an RST,
1016          * and then put it into the queue to be sent.
1017          */
1018
1019         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1020                          GFP_ATOMIC);
1021         if (buff == NULL)
1022                 return;
1023
1024         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1025
1026         t1 = (struct tcphdr *) skb_push(buff, tot_len);
1027
1028         /* Swap the send and the receive. */
1029         memset(t1, 0, sizeof(*t1));
1030         t1->dest = th->source;
1031         t1->source = th->dest;
1032         t1->doff = tot_len / 4;
1033         t1->rst = 1;
1034
1035         if(th->ack) {
1036                 t1->seq = th->ack_seq;
1037         } else {
1038                 t1->ack = 1;
1039                 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
1040                                     + skb->len - (th->doff<<2));
1041         }
1042
1043 #ifdef CONFIG_TCP_MD5SIG
1044         if (key) {
1045                 __be32 *opt = (__be32*)(t1 + 1);
1046                 opt[0] = htonl((TCPOPT_NOP << 24) |
1047                                (TCPOPT_NOP << 16) |
1048                                (TCPOPT_MD5SIG << 8) |
1049                                TCPOLEN_MD5SIG);
1050                 tcp_v6_do_calc_md5_hash((__u8 *)&opt[1], key,
1051                                         &ipv6_hdr(skb)->daddr,
1052                                         &ipv6_hdr(skb)->saddr,
1053                                         t1, IPPROTO_TCP, tot_len);
1054         }
1055 #endif
1056
1057         buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
1058
1059         memset(&fl, 0, sizeof(fl));
1060         ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1061         ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1062
1063         t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1064                                     sizeof(*t1), IPPROTO_TCP,
1065                                     buff->csum);
1066
1067         fl.proto = IPPROTO_TCP;
1068         fl.oif = inet6_iif(skb);
1069         fl.fl_ip_dport = t1->dest;
1070         fl.fl_ip_sport = t1->source;
1071         security_skb_classify_flow(skb, &fl);
1072
1073         /* Pass a socket to ip6_dst_lookup either it is for RST
1074          * Underlying function will use this to retrieve the network
1075          * namespace
1076          */
1077         if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) {
1078
1079                 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1080                         ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
1081                         TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1082                         TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1083                         return;
1084                 }
1085         }
1086
1087         kfree_skb(buff);
1088 }
1089
1090 static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1091                             struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
1092 {
1093         struct tcphdr *th = tcp_hdr(skb), *t1;
1094         struct sk_buff *buff;
1095         struct flowi fl;
1096         struct net *net = dev_net(skb->dev);
1097         struct sock *ctl_sk = net->ipv6.tcp_sk;
1098         unsigned int tot_len = sizeof(struct tcphdr);
1099         __be32 *topt;
1100 #ifdef CONFIG_TCP_MD5SIG
1101         struct tcp_md5sig_key *key;
1102         struct tcp_md5sig_key tw_key;
1103 #endif
1104
1105 #ifdef CONFIG_TCP_MD5SIG
1106         if (!tw && skb->sk) {
1107                 key = tcp_v6_md5_do_lookup(skb->sk, &ipv6_hdr(skb)->daddr);
1108         } else if (tw && tw->tw_md5_keylen) {
1109                 tw_key.key = tw->tw_md5_key;
1110                 tw_key.keylen = tw->tw_md5_keylen;
1111                 key = &tw_key;
1112         } else {
1113                 key = NULL;
1114         }
1115 #endif
1116
1117         if (ts)
1118                 tot_len += TCPOLEN_TSTAMP_ALIGNED;
1119 #ifdef CONFIG_TCP_MD5SIG
1120         if (key)
1121                 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1122 #endif
1123
1124         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1125                          GFP_ATOMIC);
1126         if (buff == NULL)
1127                 return;
1128
1129         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1130
1131         t1 = (struct tcphdr *) skb_push(buff,tot_len);
1132
1133         /* Swap the send and the receive. */
1134         memset(t1, 0, sizeof(*t1));
1135         t1->dest = th->source;
1136         t1->source = th->dest;
1137         t1->doff = tot_len/4;
1138         t1->seq = htonl(seq);
1139         t1->ack_seq = htonl(ack);
1140         t1->ack = 1;
1141         t1->window = htons(win);
1142
1143         topt = (__be32 *)(t1 + 1);
1144
1145         if (ts) {
1146                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1147                                 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1148                 *topt++ = htonl(tcp_time_stamp);
1149                 *topt = htonl(ts);
1150         }
1151
1152 #ifdef CONFIG_TCP_MD5SIG
1153         if (key) {
1154                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1155                                 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1156                 tcp_v6_do_calc_md5_hash((__u8 *)topt, key,
1157                                         &ipv6_hdr(skb)->daddr,
1158                                         &ipv6_hdr(skb)->saddr,
1159                                         t1, IPPROTO_TCP, tot_len);
1160         }
1161 #endif
1162
1163         buff->csum = csum_partial((char *)t1, tot_len, 0);
1164
1165         memset(&fl, 0, sizeof(fl));
1166         ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1167         ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1168
1169         t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1170                                     tot_len, IPPROTO_TCP,
1171                                     buff->csum);
1172
1173         fl.proto = IPPROTO_TCP;
1174         fl.oif = inet6_iif(skb);
1175         fl.fl_ip_dport = t1->dest;
1176         fl.fl_ip_sport = t1->source;
1177         security_skb_classify_flow(skb, &fl);
1178
1179         if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) {
1180                 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1181                         ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
1182                         TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1183                         return;
1184                 }
1185         }
1186
1187         kfree_skb(buff);
1188 }
1189
1190 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1191 {
1192         struct inet_timewait_sock *tw = inet_twsk(sk);
1193         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1194
1195         tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1196                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1197                         tcptw->tw_ts_recent);
1198
1199         inet_twsk_put(tw);
1200 }
1201
1202 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1203 {
1204         tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
1205 }
1206
1207
1208 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1209 {
1210         struct request_sock *req, **prev;
1211         const struct tcphdr *th = tcp_hdr(skb);
1212         struct sock *nsk;
1213
1214         /* Find possible connection requests. */
1215         req = inet6_csk_search_req(sk, &prev, th->source,
1216                                    &ipv6_hdr(skb)->saddr,
1217                                    &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1218         if (req)
1219                 return tcp_check_req(sk, skb, req, prev);
1220
1221         nsk = __inet6_lookup_established(sk->sk_net, &tcp_hashinfo,
1222                         &ipv6_hdr(skb)->saddr, th->source,
1223                         &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1224
1225         if (nsk) {
1226                 if (nsk->sk_state != TCP_TIME_WAIT) {
1227                         bh_lock_sock(nsk);
1228                         return nsk;
1229                 }
1230                 inet_twsk_put(inet_twsk(nsk));
1231                 return NULL;
1232         }
1233
1234 #ifdef CONFIG_SYN_COOKIES
1235         if (!th->rst && !th->syn && th->ack)
1236                 sk = cookie_v6_check(sk, skb);
1237 #endif
1238         return sk;
1239 }
1240
1241 /* FIXME: this is substantially similar to the ipv4 code.
1242  * Can some kind of merge be done? -- erics
1243  */
1244 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1245 {
1246         struct inet6_request_sock *treq;
1247         struct ipv6_pinfo *np = inet6_sk(sk);
1248         struct tcp_options_received tmp_opt;
1249         struct tcp_sock *tp = tcp_sk(sk);
1250         struct request_sock *req = NULL;
1251         __u32 isn = TCP_SKB_CB(skb)->when;
1252 #ifdef CONFIG_SYN_COOKIES
1253         int want_cookie = 0;
1254 #else
1255 #define want_cookie 0
1256 #endif
1257
1258         if (skb->protocol == htons(ETH_P_IP))
1259                 return tcp_v4_conn_request(sk, skb);
1260
1261         if (!ipv6_unicast_destination(skb))
1262                 goto drop;
1263
1264         if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1265                 if (net_ratelimit())
1266                         syn_flood_warning(skb);
1267 #ifdef CONFIG_SYN_COOKIES
1268                 if (sysctl_tcp_syncookies)
1269                         want_cookie = 1;
1270                 else
1271 #endif
1272                 goto drop;
1273         }
1274
1275         if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1276                 goto drop;
1277
1278         req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1279         if (req == NULL)
1280                 goto drop;
1281
1282 #ifdef CONFIG_TCP_MD5SIG
1283         tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1284 #endif
1285
1286         tcp_clear_options(&tmp_opt);
1287         tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1288         tmp_opt.user_mss = tp->rx_opt.user_mss;
1289
1290         tcp_parse_options(skb, &tmp_opt, 0);
1291
1292         if (want_cookie) {
1293                 tcp_clear_options(&tmp_opt);
1294                 tmp_opt.saw_tstamp = 0;
1295         }
1296
1297         tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1298         tcp_openreq_init(req, &tmp_opt, skb);
1299
1300         treq = inet6_rsk(req);
1301         ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1302         ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1303         treq->pktopts = NULL;
1304         if (!want_cookie)
1305                 TCP_ECN_create_request(req, tcp_hdr(skb));
1306
1307         if (want_cookie) {
1308                 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1309         } else if (!isn) {
1310                 if (ipv6_opt_accepted(sk, skb) ||
1311                     np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1312                     np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1313                         atomic_inc(&skb->users);
1314                         treq->pktopts = skb;
1315                 }
1316                 treq->iif = sk->sk_bound_dev_if;
1317
1318                 /* So that link locals have meaning */
1319                 if (!sk->sk_bound_dev_if &&
1320                     ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1321                         treq->iif = inet6_iif(skb);
1322
1323                 isn = tcp_v6_init_sequence(skb);
1324         }
1325
1326         tcp_rsk(req)->snt_isn = isn;
1327
1328         security_inet_conn_request(sk, skb, req);
1329
1330         if (tcp_v6_send_synack(sk, req))
1331                 goto drop;
1332
1333         if (!want_cookie) {
1334                 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1335                 return 0;
1336         }
1337
1338 drop:
1339         if (req)
1340                 reqsk_free(req);
1341
1342         return 0; /* don't send reset */
1343 }
1344
1345 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1346                                           struct request_sock *req,
1347                                           struct dst_entry *dst)
1348 {
1349         struct inet6_request_sock *treq = inet6_rsk(req);
1350         struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1351         struct tcp6_sock *newtcp6sk;
1352         struct inet_sock *newinet;
1353         struct tcp_sock *newtp;
1354         struct sock *newsk;
1355         struct ipv6_txoptions *opt;
1356 #ifdef CONFIG_TCP_MD5SIG
1357         struct tcp_md5sig_key *key;
1358 #endif
1359
1360         if (skb->protocol == htons(ETH_P_IP)) {
1361                 /*
1362                  *      v6 mapped
1363                  */
1364
1365                 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1366
1367                 if (newsk == NULL)
1368                         return NULL;
1369
1370                 newtcp6sk = (struct tcp6_sock *)newsk;
1371                 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1372
1373                 newinet = inet_sk(newsk);
1374                 newnp = inet6_sk(newsk);
1375                 newtp = tcp_sk(newsk);
1376
1377                 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1378
1379                 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1380                               newinet->daddr);
1381
1382                 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1383                               newinet->saddr);
1384
1385                 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1386
1387                 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1388                 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1389 #ifdef CONFIG_TCP_MD5SIG
1390                 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1391 #endif
1392
1393                 newnp->pktoptions  = NULL;
1394                 newnp->opt         = NULL;
1395                 newnp->mcast_oif   = inet6_iif(skb);
1396                 newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1397
1398                 /*
1399                  * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1400                  * here, tcp_create_openreq_child now does this for us, see the comment in
1401                  * that function for the gory details. -acme
1402                  */
1403
1404                 /* It is tricky place. Until this moment IPv4 tcp
1405                    worked with IPv6 icsk.icsk_af_ops.
1406                    Sync it now.
1407                  */
1408                 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1409
1410                 return newsk;
1411         }
1412
1413         opt = np->opt;
1414
1415         if (sk_acceptq_is_full(sk))
1416                 goto out_overflow;
1417
1418         if (dst == NULL) {
1419                 struct in6_addr *final_p = NULL, final;
1420                 struct flowi fl;
1421
1422                 memset(&fl, 0, sizeof(fl));
1423                 fl.proto = IPPROTO_TCP;
1424                 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1425                 if (opt && opt->srcrt) {
1426                         struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1427                         ipv6_addr_copy(&final, &fl.fl6_dst);
1428                         ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1429                         final_p = &final;
1430                 }
1431                 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1432                 fl.oif = sk->sk_bound_dev_if;
1433                 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1434                 fl.fl_ip_sport = inet_sk(sk)->sport;
1435                 security_req_classify_flow(req, &fl);
1436
1437                 if (ip6_dst_lookup(sk, &dst, &fl))
1438                         goto out;
1439
1440                 if (final_p)
1441                         ipv6_addr_copy(&fl.fl6_dst, final_p);
1442
1443                 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1444                         goto out;
1445         }
1446
1447         newsk = tcp_create_openreq_child(sk, req, skb);
1448         if (newsk == NULL)
1449                 goto out;
1450
1451         /*
1452          * No need to charge this sock to the relevant IPv6 refcnt debug socks
1453          * count here, tcp_create_openreq_child now does this for us, see the
1454          * comment in that function for the gory details. -acme
1455          */
1456
1457         newsk->sk_gso_type = SKB_GSO_TCPV6;
1458         __ip6_dst_store(newsk, dst, NULL, NULL);
1459
1460         newtcp6sk = (struct tcp6_sock *)newsk;
1461         inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1462
1463         newtp = tcp_sk(newsk);
1464         newinet = inet_sk(newsk);
1465         newnp = inet6_sk(newsk);
1466
1467         memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1468
1469         ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1470         ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1471         ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1472         newsk->sk_bound_dev_if = treq->iif;
1473
1474         /* Now IPv6 options...
1475
1476            First: no IPv4 options.
1477          */
1478         newinet->opt = NULL;
1479         newnp->ipv6_fl_list = NULL;
1480
1481         /* Clone RX bits */
1482         newnp->rxopt.all = np->rxopt.all;
1483
1484         /* Clone pktoptions received with SYN */
1485         newnp->pktoptions = NULL;
1486         if (treq->pktopts != NULL) {
1487                 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1488                 kfree_skb(treq->pktopts);
1489                 treq->pktopts = NULL;
1490                 if (newnp->pktoptions)
1491                         skb_set_owner_r(newnp->pktoptions, newsk);
1492         }
1493         newnp->opt        = NULL;
1494         newnp->mcast_oif  = inet6_iif(skb);
1495         newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1496
1497         /* Clone native IPv6 options from listening socket (if any)
1498
1499            Yes, keeping reference count would be much more clever,
1500            but we make one more one thing there: reattach optmem
1501            to newsk.
1502          */
1503         if (opt) {
1504                 newnp->opt = ipv6_dup_options(newsk, opt);
1505                 if (opt != np->opt)
1506                         sock_kfree_s(sk, opt, opt->tot_len);
1507         }
1508
1509         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1510         if (newnp->opt)
1511                 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1512                                                      newnp->opt->opt_flen);
1513
1514         tcp_mtup_init(newsk);
1515         tcp_sync_mss(newsk, dst_mtu(dst));
1516         newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1517         tcp_initialize_rcv_mss(newsk);
1518
1519         newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1520
1521 #ifdef CONFIG_TCP_MD5SIG
1522         /* Copy over the MD5 key from the original socket */
1523         if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1524                 /* We're using one, so create a matching key
1525                  * on the newsk structure. If we fail to get
1526                  * memory, then we end up not copying the key
1527                  * across. Shucks.
1528                  */
1529                 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1530                 if (newkey != NULL)
1531                         tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,
1532                                           newkey, key->keylen);
1533         }
1534 #endif
1535
1536         __inet6_hash(newsk);
1537         inet_inherit_port(sk, newsk);
1538
1539         return newsk;
1540
1541 out_overflow:
1542         NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1543 out:
1544         NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1545         if (opt && opt != np->opt)
1546                 sock_kfree_s(sk, opt, opt->tot_len);
1547         dst_release(dst);
1548         return NULL;
1549 }
1550
1551 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1552 {
1553         if (skb->ip_summed == CHECKSUM_COMPLETE) {
1554                 if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr,
1555                                   &ipv6_hdr(skb)->daddr, skb->csum)) {
1556                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1557                         return 0;
1558                 }
1559         }
1560
1561         skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len,
1562                                               &ipv6_hdr(skb)->saddr,
1563                                               &ipv6_hdr(skb)->daddr, 0));
1564
1565         if (skb->len <= 76) {
1566                 return __skb_checksum_complete(skb);
1567         }
1568         return 0;
1569 }
1570
1571 /* The socket must have it's spinlock held when we get
1572  * here.
1573  *
1574  * We have a potential double-lock case here, so even when
1575  * doing backlog processing we use the BH locking scheme.
1576  * This is because we cannot sleep with the original spinlock
1577  * held.
1578  */
1579 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1580 {
1581         struct ipv6_pinfo *np = inet6_sk(sk);
1582         struct tcp_sock *tp;
1583         struct sk_buff *opt_skb = NULL;
1584
1585         /* Imagine: socket is IPv6. IPv4 packet arrives,
1586            goes to IPv4 receive handler and backlogged.
1587            From backlog it always goes here. Kerboom...
1588            Fortunately, tcp_rcv_established and rcv_established
1589            handle them correctly, but it is not case with
1590            tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1591          */
1592
1593         if (skb->protocol == htons(ETH_P_IP))
1594                 return tcp_v4_do_rcv(sk, skb);
1595
1596 #ifdef CONFIG_TCP_MD5SIG
1597         if (tcp_v6_inbound_md5_hash (sk, skb))
1598                 goto discard;
1599 #endif
1600
1601         if (sk_filter(sk, skb))
1602                 goto discard;
1603
1604         /*
1605          *      socket locking is here for SMP purposes as backlog rcv
1606          *      is currently called with bh processing disabled.
1607          */
1608
1609         /* Do Stevens' IPV6_PKTOPTIONS.
1610
1611            Yes, guys, it is the only place in our code, where we
1612            may make it not affecting IPv4.
1613            The rest of code is protocol independent,
1614            and I do not like idea to uglify IPv4.
1615
1616            Actually, all the idea behind IPV6_PKTOPTIONS
1617            looks not very well thought. For now we latch
1618            options, received in the last packet, enqueued
1619            by tcp. Feel free to propose better solution.
1620                                                --ANK (980728)
1621          */
1622         if (np->rxopt.all)
1623                 opt_skb = skb_clone(skb, GFP_ATOMIC);
1624
1625         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1626                 TCP_CHECK_TIMER(sk);
1627                 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1628                         goto reset;
1629                 TCP_CHECK_TIMER(sk);
1630                 if (opt_skb)
1631                         goto ipv6_pktoptions;
1632                 return 0;
1633         }
1634
1635         if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1636                 goto csum_err;
1637
1638         if (sk->sk_state == TCP_LISTEN) {
1639                 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1640                 if (!nsk)
1641                         goto discard;
1642
1643                 /*
1644                  * Queue it on the new socket if the new socket is active,
1645                  * otherwise we just shortcircuit this and continue with
1646                  * the new socket..
1647                  */
1648                 if(nsk != sk) {
1649                         if (tcp_child_process(sk, nsk, skb))
1650                                 goto reset;
1651                         if (opt_skb)
1652                                 __kfree_skb(opt_skb);
1653                         return 0;
1654                 }
1655         }
1656
1657         TCP_CHECK_TIMER(sk);
1658         if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1659                 goto reset;
1660         TCP_CHECK_TIMER(sk);
1661         if (opt_skb)
1662                 goto ipv6_pktoptions;
1663         return 0;
1664
1665 reset:
1666         tcp_v6_send_reset(sk, skb);
1667 discard:
1668         if (opt_skb)
1669                 __kfree_skb(opt_skb);
1670         kfree_skb(skb);
1671         return 0;
1672 csum_err:
1673         TCP_INC_STATS_BH(TCP_MIB_INERRS);
1674         goto discard;
1675
1676
1677 ipv6_pktoptions:
1678         /* Do you ask, what is it?
1679
1680            1. skb was enqueued by tcp.
1681            2. skb is added to tail of read queue, rather than out of order.
1682            3. socket is not in passive state.
1683            4. Finally, it really contains options, which user wants to receive.
1684          */
1685         tp = tcp_sk(sk);
1686         if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1687             !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1688                 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1689                         np->mcast_oif = inet6_iif(opt_skb);
1690                 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1691                         np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1692                 if (ipv6_opt_accepted(sk, opt_skb)) {
1693                         skb_set_owner_r(opt_skb, sk);
1694                         opt_skb = xchg(&np->pktoptions, opt_skb);
1695                 } else {
1696                         __kfree_skb(opt_skb);
1697                         opt_skb = xchg(&np->pktoptions, NULL);
1698                 }
1699         }
1700
1701         if (opt_skb)
1702                 kfree_skb(opt_skb);
1703         return 0;
1704 }
1705
1706 static int tcp_v6_rcv(struct sk_buff *skb)
1707 {
1708         struct tcphdr *th;
1709         struct sock *sk;
1710         int ret;
1711
1712         if (skb->pkt_type != PACKET_HOST)
1713                 goto discard_it;
1714
1715         /*
1716          *      Count it even if it's bad.
1717          */
1718         TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1719
1720         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1721                 goto discard_it;
1722
1723         th = tcp_hdr(skb);
1724
1725         if (th->doff < sizeof(struct tcphdr)/4)
1726                 goto bad_packet;
1727         if (!pskb_may_pull(skb, th->doff*4))
1728                 goto discard_it;
1729
1730         if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1731                 goto bad_packet;
1732
1733         th = tcp_hdr(skb);
1734         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1735         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1736                                     skb->len - th->doff*4);
1737         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1738         TCP_SKB_CB(skb)->when = 0;
1739         TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
1740         TCP_SKB_CB(skb)->sacked = 0;
1741
1742         sk = __inet6_lookup(dev_net(skb->dev), &tcp_hashinfo,
1743                         &ipv6_hdr(skb)->saddr, th->source,
1744                         &ipv6_hdr(skb)->daddr, ntohs(th->dest),
1745                         inet6_iif(skb));
1746
1747         if (!sk)
1748                 goto no_tcp_socket;
1749
1750 process:
1751         if (sk->sk_state == TCP_TIME_WAIT)
1752                 goto do_time_wait;
1753
1754         if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1755                 goto discard_and_relse;
1756
1757         if (sk_filter(sk, skb))
1758                 goto discard_and_relse;
1759
1760         skb->dev = NULL;
1761
1762         bh_lock_sock_nested(sk);
1763         ret = 0;
1764         if (!sock_owned_by_user(sk)) {
1765 #ifdef CONFIG_NET_DMA
1766                 struct tcp_sock *tp = tcp_sk(sk);
1767                 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1768                         tp->ucopy.dma_chan = get_softnet_dma();
1769                 if (tp->ucopy.dma_chan)
1770                         ret = tcp_v6_do_rcv(sk, skb);
1771                 else
1772 #endif
1773                 {
1774                         if (!tcp_prequeue(sk, skb))
1775                                 ret = tcp_v6_do_rcv(sk, skb);
1776                 }
1777         } else
1778                 sk_add_backlog(sk, skb);
1779         bh_unlock_sock(sk);
1780
1781         sock_put(sk);
1782         return ret ? -1 : 0;
1783
1784 no_tcp_socket:
1785         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1786                 goto discard_it;
1787
1788         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1789 bad_packet:
1790                 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1791         } else {
1792                 tcp_v6_send_reset(NULL, skb);
1793         }
1794
1795 discard_it:
1796
1797         /*
1798          *      Discard frame
1799          */
1800
1801         kfree_skb(skb);
1802         return 0;
1803
1804 discard_and_relse:
1805         sock_put(sk);
1806         goto discard_it;
1807
1808 do_time_wait:
1809         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1810                 inet_twsk_put(inet_twsk(sk));
1811                 goto discard_it;
1812         }
1813
1814         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1815                 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1816                 inet_twsk_put(inet_twsk(sk));
1817                 goto discard_it;
1818         }
1819
1820         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1821         case TCP_TW_SYN:
1822         {
1823                 struct sock *sk2;
1824
1825                 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1826                                             &ipv6_hdr(skb)->daddr,
1827                                             ntohs(th->dest), inet6_iif(skb));
1828                 if (sk2 != NULL) {
1829                         struct inet_timewait_sock *tw = inet_twsk(sk);
1830                         inet_twsk_deschedule(tw, &tcp_death_row);
1831                         inet_twsk_put(tw);
1832                         sk = sk2;
1833                         goto process;
1834                 }
1835                 /* Fall through to ACK */
1836         }
1837         case TCP_TW_ACK:
1838                 tcp_v6_timewait_ack(sk, skb);
1839                 break;
1840         case TCP_TW_RST:
1841                 goto no_tcp_socket;
1842         case TCP_TW_SUCCESS:;
1843         }
1844         goto discard_it;
1845 }
1846
1847 static int tcp_v6_remember_stamp(struct sock *sk)
1848 {
1849         /* Alas, not yet... */
1850         return 0;
1851 }
1852
1853 static struct inet_connection_sock_af_ops ipv6_specific = {
1854         .queue_xmit        = inet6_csk_xmit,
1855         .send_check        = tcp_v6_send_check,
1856         .rebuild_header    = inet6_sk_rebuild_header,
1857         .conn_request      = tcp_v6_conn_request,
1858         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1859         .remember_stamp    = tcp_v6_remember_stamp,
1860         .net_header_len    = sizeof(struct ipv6hdr),
1861         .setsockopt        = ipv6_setsockopt,
1862         .getsockopt        = ipv6_getsockopt,
1863         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1864         .sockaddr_len      = sizeof(struct sockaddr_in6),
1865         .bind_conflict     = inet6_csk_bind_conflict,
1866 #ifdef CONFIG_COMPAT
1867         .compat_setsockopt = compat_ipv6_setsockopt,
1868         .compat_getsockopt = compat_ipv6_getsockopt,
1869 #endif
1870 };
1871
1872 #ifdef CONFIG_TCP_MD5SIG
1873 static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1874         .md5_lookup     =       tcp_v6_md5_lookup,
1875         .calc_md5_hash  =       tcp_v6_calc_md5_hash,
1876         .md5_add        =       tcp_v6_md5_add_func,
1877         .md5_parse      =       tcp_v6_parse_md5_keys,
1878 };
1879 #endif
1880
1881 /*
1882  *      TCP over IPv4 via INET6 API
1883  */
1884
1885 static struct inet_connection_sock_af_ops ipv6_mapped = {
1886         .queue_xmit        = ip_queue_xmit,
1887         .send_check        = tcp_v4_send_check,
1888         .rebuild_header    = inet_sk_rebuild_header,
1889         .conn_request      = tcp_v6_conn_request,
1890         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1891         .remember_stamp    = tcp_v4_remember_stamp,
1892         .net_header_len    = sizeof(struct iphdr),
1893         .setsockopt        = ipv6_setsockopt,
1894         .getsockopt        = ipv6_getsockopt,
1895         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1896         .sockaddr_len      = sizeof(struct sockaddr_in6),
1897         .bind_conflict     = inet6_csk_bind_conflict,
1898 #ifdef CONFIG_COMPAT
1899         .compat_setsockopt = compat_ipv6_setsockopt,
1900         .compat_getsockopt = compat_ipv6_getsockopt,
1901 #endif
1902 };
1903
1904 #ifdef CONFIG_TCP_MD5SIG
1905 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1906         .md5_lookup     =       tcp_v4_md5_lookup,
1907         .calc_md5_hash  =       tcp_v4_calc_md5_hash,
1908         .md5_add        =       tcp_v6_md5_add_func,
1909         .md5_parse      =       tcp_v6_parse_md5_keys,
1910 };
1911 #endif
1912
1913 /* NOTE: A lot of things set to zero explicitly by call to
1914  *       sk_alloc() so need not be done here.
1915  */
1916 static int tcp_v6_init_sock(struct sock *sk)
1917 {
1918         struct inet_connection_sock *icsk = inet_csk(sk);
1919         struct tcp_sock *tp = tcp_sk(sk);
1920
1921         skb_queue_head_init(&tp->out_of_order_queue);
1922         tcp_init_xmit_timers(sk);
1923         tcp_prequeue_init(tp);
1924
1925         icsk->icsk_rto = TCP_TIMEOUT_INIT;
1926         tp->mdev = TCP_TIMEOUT_INIT;
1927
1928         /* So many TCP implementations out there (incorrectly) count the
1929          * initial SYN frame in their delayed-ACK and congestion control
1930          * algorithms that we must have the following bandaid to talk
1931          * efficiently to them.  -DaveM
1932          */
1933         tp->snd_cwnd = 2;
1934
1935         /* See draft-stevens-tcpca-spec-01 for discussion of the
1936          * initialization of these values.
1937          */
1938         tp->snd_ssthresh = 0x7fffffff;
1939         tp->snd_cwnd_clamp = ~0;
1940         tp->mss_cache = 536;
1941
1942         tp->reordering = sysctl_tcp_reordering;
1943
1944         sk->sk_state = TCP_CLOSE;
1945
1946         icsk->icsk_af_ops = &ipv6_specific;
1947         icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1948         icsk->icsk_sync_mss = tcp_sync_mss;
1949         sk->sk_write_space = sk_stream_write_space;
1950         sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1951
1952 #ifdef CONFIG_TCP_MD5SIG
1953         tp->af_specific = &tcp_sock_ipv6_specific;
1954 #endif
1955
1956         sk->sk_sndbuf = sysctl_tcp_wmem[1];
1957         sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1958
1959         atomic_inc(&tcp_sockets_allocated);
1960
1961         return 0;
1962 }
1963
1964 static int tcp_v6_destroy_sock(struct sock *sk)
1965 {
1966 #ifdef CONFIG_TCP_MD5SIG
1967         /* Clean up the MD5 key list */
1968         if (tcp_sk(sk)->md5sig_info)
1969                 tcp_v6_clear_md5_list(sk);
1970 #endif
1971         tcp_v4_destroy_sock(sk);
1972         return inet6_destroy_sock(sk);
1973 }
1974
1975 #ifdef CONFIG_PROC_FS
1976 /* Proc filesystem TCPv6 sock list dumping. */
1977 static void get_openreq6(struct seq_file *seq,
1978                          struct sock *sk, struct request_sock *req, int i, int uid)
1979 {
1980         int ttd = req->expires - jiffies;
1981         struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1982         struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1983
1984         if (ttd < 0)
1985                 ttd = 0;
1986
1987         seq_printf(seq,
1988                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1989                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1990                    i,
1991                    src->s6_addr32[0], src->s6_addr32[1],
1992                    src->s6_addr32[2], src->s6_addr32[3],
1993                    ntohs(inet_sk(sk)->sport),
1994                    dest->s6_addr32[0], dest->s6_addr32[1],
1995                    dest->s6_addr32[2], dest->s6_addr32[3],
1996                    ntohs(inet_rsk(req)->rmt_port),
1997                    TCP_SYN_RECV,
1998                    0,0, /* could print option size, but that is af dependent. */
1999                    1,   /* timers active (only the expire timer) */
2000                    jiffies_to_clock_t(ttd),
2001                    req->retrans,
2002                    uid,
2003                    0,  /* non standard timer */
2004                    0, /* open_requests have no inode */
2005                    0, req);
2006 }
2007
2008 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2009 {
2010         struct in6_addr *dest, *src;
2011         __u16 destp, srcp;
2012         int timer_active;
2013         unsigned long timer_expires;
2014         struct inet_sock *inet = inet_sk(sp);
2015         struct tcp_sock *tp = tcp_sk(sp);
2016         const struct inet_connection_sock *icsk = inet_csk(sp);
2017         struct ipv6_pinfo *np = inet6_sk(sp);
2018
2019         dest  = &np->daddr;
2020         src   = &np->rcv_saddr;
2021         destp = ntohs(inet->dport);
2022         srcp  = ntohs(inet->sport);
2023
2024         if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2025                 timer_active    = 1;
2026                 timer_expires   = icsk->icsk_timeout;
2027         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2028                 timer_active    = 4;
2029                 timer_expires   = icsk->icsk_timeout;
2030         } else if (timer_pending(&sp->sk_timer)) {
2031                 timer_active    = 2;
2032                 timer_expires   = sp->sk_timer.expires;
2033         } else {
2034                 timer_active    = 0;
2035                 timer_expires = jiffies;
2036         }
2037
2038         seq_printf(seq,
2039                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2040                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
2041                    i,
2042                    src->s6_addr32[0], src->s6_addr32[1],
2043                    src->s6_addr32[2], src->s6_addr32[3], srcp,
2044                    dest->s6_addr32[0], dest->s6_addr32[1],
2045                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
2046                    sp->sk_state,
2047                    tp->write_seq-tp->snd_una,
2048                    (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2049                    timer_active,
2050                    jiffies_to_clock_t(timer_expires - jiffies),
2051                    icsk->icsk_retransmits,
2052                    sock_i_uid(sp),
2053                    icsk->icsk_probes_out,
2054                    sock_i_ino(sp),
2055                    atomic_read(&sp->sk_refcnt), sp,
2056                    icsk->icsk_rto,
2057                    icsk->icsk_ack.ato,
2058                    (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2059                    tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
2060                    );
2061 }
2062
2063 static void get_timewait6_sock(struct seq_file *seq,
2064                                struct inet_timewait_sock *tw, int i)
2065 {
2066         struct in6_addr *dest, *src;
2067         __u16 destp, srcp;
2068         struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2069         int ttd = tw->tw_ttd - jiffies;
2070
2071         if (ttd < 0)
2072                 ttd = 0;
2073
2074         dest = &tw6->tw_v6_daddr;
2075         src  = &tw6->tw_v6_rcv_saddr;
2076         destp = ntohs(tw->tw_dport);
2077         srcp  = ntohs(tw->tw_sport);
2078
2079         seq_printf(seq,
2080                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2081                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2082                    i,
2083                    src->s6_addr32[0], src->s6_addr32[1],
2084                    src->s6_addr32[2], src->s6_addr32[3], srcp,
2085                    dest->s6_addr32[0], dest->s6_addr32[1],
2086                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
2087                    tw->tw_substate, 0, 0,
2088                    3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2089                    atomic_read(&tw->tw_refcnt), tw);
2090 }
2091
2092 static int tcp6_seq_show(struct seq_file *seq, void *v)
2093 {
2094         struct tcp_iter_state *st;
2095
2096         if (v == SEQ_START_TOKEN) {
2097                 seq_puts(seq,
2098                          "  sl  "
2099                          "local_address                         "
2100                          "remote_address                        "
2101                          "st tx_queue rx_queue tr tm->when retrnsmt"
2102                          "   uid  timeout inode\n");
2103                 goto out;
2104         }
2105         st = seq->private;
2106
2107         switch (st->state) {
2108         case TCP_SEQ_STATE_LISTENING:
2109         case TCP_SEQ_STATE_ESTABLISHED:
2110                 get_tcp6_sock(seq, v, st->num);
2111                 break;
2112         case TCP_SEQ_STATE_OPENREQ:
2113                 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2114                 break;
2115         case TCP_SEQ_STATE_TIME_WAIT:
2116                 get_timewait6_sock(seq, v, st->num);
2117                 break;
2118         }
2119 out:
2120         return 0;
2121 }
2122
2123 static struct file_operations tcp6_seq_fops;
2124 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2125         .owner          = THIS_MODULE,
2126         .name           = "tcp6",
2127         .family         = AF_INET6,
2128         .seq_show       = tcp6_seq_show,
2129         .seq_fops       = &tcp6_seq_fops,
2130 };
2131
2132 int tcp6_proc_init(struct net *net)
2133 {
2134         return tcp_proc_register(net, &tcp6_seq_afinfo);
2135 }
2136
2137 void tcp6_proc_exit(struct net *net)
2138 {
2139         tcp_proc_unregister(net, &tcp6_seq_afinfo);
2140 }
2141 #endif
2142
2143 DEFINE_PROTO_INUSE(tcpv6)
2144
2145 struct proto tcpv6_prot = {
2146         .name                   = "TCPv6",
2147         .owner                  = THIS_MODULE,
2148         .close                  = tcp_close,
2149         .connect                = tcp_v6_connect,
2150         .disconnect             = tcp_disconnect,
2151         .accept                 = inet_csk_accept,
2152         .ioctl                  = tcp_ioctl,
2153         .init                   = tcp_v6_init_sock,
2154         .destroy                = tcp_v6_destroy_sock,
2155         .shutdown               = tcp_shutdown,
2156         .setsockopt             = tcp_setsockopt,
2157         .getsockopt             = tcp_getsockopt,
2158         .recvmsg                = tcp_recvmsg,
2159         .backlog_rcv            = tcp_v6_do_rcv,
2160         .hash                   = tcp_v6_hash,
2161         .unhash                 = inet_unhash,
2162         .get_port               = inet_csk_get_port,
2163         .enter_memory_pressure  = tcp_enter_memory_pressure,
2164         .sockets_allocated      = &tcp_sockets_allocated,
2165         .memory_allocated       = &tcp_memory_allocated,
2166         .memory_pressure        = &tcp_memory_pressure,
2167         .orphan_count           = &tcp_orphan_count,
2168         .sysctl_mem             = sysctl_tcp_mem,
2169         .sysctl_wmem            = sysctl_tcp_wmem,
2170         .sysctl_rmem            = sysctl_tcp_rmem,
2171         .max_header             = MAX_TCP_HEADER,
2172         .obj_size               = sizeof(struct tcp6_sock),
2173         .twsk_prot              = &tcp6_timewait_sock_ops,
2174         .rsk_prot               = &tcp6_request_sock_ops,
2175         .h.hashinfo             = &tcp_hashinfo,
2176 #ifdef CONFIG_COMPAT
2177         .compat_setsockopt      = compat_tcp_setsockopt,
2178         .compat_getsockopt      = compat_tcp_getsockopt,
2179 #endif
2180         REF_PROTO_INUSE(tcpv6)
2181 };
2182
2183 static struct inet6_protocol tcpv6_protocol = {
2184         .handler        =       tcp_v6_rcv,
2185         .err_handler    =       tcp_v6_err,
2186         .gso_send_check =       tcp_v6_gso_send_check,
2187         .gso_segment    =       tcp_tso_segment,
2188         .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2189 };
2190
2191 static struct inet_protosw tcpv6_protosw = {
2192         .type           =       SOCK_STREAM,
2193         .protocol       =       IPPROTO_TCP,
2194         .prot           =       &tcpv6_prot,
2195         .ops            =       &inet6_stream_ops,
2196         .capability     =       -1,
2197         .no_check       =       0,
2198         .flags          =       INET_PROTOSW_PERMANENT |
2199                                 INET_PROTOSW_ICSK,
2200 };
2201
2202 static int tcpv6_net_init(struct net *net)
2203 {
2204         int err;
2205         struct socket *sock;
2206         struct sock *sk;
2207
2208         err = inet_csk_ctl_sock_create(&sock, PF_INET6, SOCK_RAW, IPPROTO_TCP);
2209         if (err)
2210                 return err;
2211
2212         net->ipv6.tcp_sk = sk = sock->sk;
2213         sk_change_net(sk, net);
2214         return err;
2215 }
2216
2217 static void tcpv6_net_exit(struct net *net)
2218 {
2219         sk_release_kernel(net->ipv6.tcp_sk);
2220 }
2221
2222 static struct pernet_operations tcpv6_net_ops = {
2223         .init = tcpv6_net_init,
2224         .exit = tcpv6_net_exit,
2225 };
2226
2227 int __init tcpv6_init(void)
2228 {
2229         int ret;
2230
2231         ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2232         if (ret)
2233                 goto out;
2234
2235         /* register inet6 protocol */
2236         ret = inet6_register_protosw(&tcpv6_protosw);
2237         if (ret)
2238                 goto out_tcpv6_protocol;
2239
2240         ret = register_pernet_subsys(&tcpv6_net_ops);
2241         if (ret)
2242                 goto out_tcpv6_protosw;
2243 out:
2244         return ret;
2245
2246 out_tcpv6_protocol:
2247         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2248 out_tcpv6_protosw:
2249         inet6_unregister_protosw(&tcpv6_protosw);
2250         goto out;
2251 }
2252
2253 void tcpv6_exit(void)
2254 {
2255         unregister_pernet_subsys(&tcpv6_net_ops);
2256         inet6_unregister_protosw(&tcpv6_protosw);
2257         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2258 }