tcp: TCP Fast Open Server - support TFO listeners
[linux-3.10.git] / net / ipv4 / af_inet.c
index 85a3b17..4f70ef0 100644 (file)
@@ -149,6 +149,11 @@ void inet_sock_destruct(struct sock *sk)
                pr_err("Attempt to release alive inet socket %p\n", sk);
                return;
        }
+       if (sk->sk_type == SOCK_STREAM) {
+               struct fastopen_queue *fastopenq =
+                       inet_csk(sk)->icsk_accept_queue.fastopenq;
+               kfree(fastopenq);
+       }
 
        WARN_ON(atomic_read(&sk->sk_rmem_alloc));
        WARN_ON(atomic_read(&sk->sk_wmem_alloc));
@@ -157,6 +162,7 @@ void inet_sock_destruct(struct sock *sk)
 
        kfree(rcu_dereference_protected(inet->inet_opt, 1));
        dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
+       dst_release(sk->sk_rx_dst);
        sk_refcnt_debug_dec(sk);
 }
 EXPORT_SYMBOL(inet_sock_destruct);
@@ -211,6 +217,26 @@ int inet_listen(struct socket *sock, int backlog)
         * we can only allow the backlog to be adjusted.
         */
        if (old_state != TCP_LISTEN) {
+               /* Check special setups for testing purpose to enable TFO w/o
+                * requiring TCP_FASTOPEN sockopt.
+                * Note that only TCP sockets (SOCK_STREAM) will reach here.
+                * Also fastopenq may already been allocated because this
+                * socket was in TCP_LISTEN state previously but was
+                * shutdown() (rather than close()).
+                */
+               if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 &&
+                   inet_csk(sk)->icsk_accept_queue.fastopenq == NULL) {
+                       if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0)
+                               err = fastopen_init_queue(sk, backlog);
+                       else if ((sysctl_tcp_fastopen &
+                                 TFO_SERVER_WO_SOCKOPT2) != 0)
+                               err = fastopen_init_queue(sk,
+                                   ((uint)sysctl_tcp_fastopen) >> 16);
+                       else
+                               err = 0;
+                       if (err)
+                               goto out;
+               }
                err = inet_csk_listen_start(sk, backlog);
                if (err)
                        goto out;
@@ -555,11 +581,12 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
 }
 EXPORT_SYMBOL(inet_dgram_connect);
 
-static long inet_wait_for_connect(struct sock *sk, long timeo)
+static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
 {
        DEFINE_WAIT(wait);
 
        prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+       sk->sk_write_pending += writebias;
 
        /* Basic assumption: if someone sets sk->sk_err, he _must_
         * change state of the socket from TCP_SYN_*.
@@ -575,6 +602,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
                prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
        }
        finish_wait(sk_sleep(sk), &wait);
+       sk->sk_write_pending -= writebias;
        return timeo;
 }
 
@@ -582,8 +610,8 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
  *     Connect to a remote host. There is regrettably still a little
  *     TCP 'magic' in here.
  */
-int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
-                       int addr_len, int flags)
+int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+                         int addr_len, int flags)
 {
        struct sock *sk = sock->sk;
        int err;
@@ -592,8 +620,6 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
        if (addr_len < sizeof(uaddr->sa_family))
                return -EINVAL;
 
-       lock_sock(sk);
-
        if (uaddr->sa_family == AF_UNSPEC) {
                err = sk->sk_prot->disconnect(sk, flags);
                sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
@@ -633,8 +659,12 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
        timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
 
        if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+               int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
+                               tcp_sk(sk)->fastopen_req &&
+                               tcp_sk(sk)->fastopen_req->data ? 1 : 0;
+
                /* Error code is set above */
-               if (!timeo || !inet_wait_for_connect(sk, timeo))
+               if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
                        goto out;
 
                err = sock_intr_errno(timeo);
@@ -656,7 +686,6 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
        sock->state = SS_CONNECTED;
        err = 0;
 out:
-       release_sock(sk);
        return err;
 
 sock_error:
@@ -666,6 +695,18 @@ sock_error:
                sock->state = SS_DISCONNECTING;
        goto out;
 }
+EXPORT_SYMBOL(__inet_stream_connect);
+
+int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+                       int addr_len, int flags)
+{
+       int err;
+
+       lock_sock(sock->sk);
+       err = __inet_stream_connect(sock, uaddr, addr_len, flags);
+       release_sock(sock->sk);
+       return err;
+}
 EXPORT_SYMBOL(inet_stream_connect);
 
 /*
@@ -685,7 +726,8 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
 
        sock_rps_record_flow(sk2);
        WARN_ON(!((1 << sk2->sk_state) &
-                 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)));
+                 (TCPF_ESTABLISHED | TCPF_SYN_RECV |
+                 TCPF_CLOSE_WAIT | TCPF_CLOSE)));
 
        sock_graft(sk2, newsock);
 
@@ -1348,7 +1390,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
        if (*(u8 *)iph != 0x45)
                goto out_unlock;
 
-       if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
+       if (unlikely(ip_fast_csum((u8 *)iph, 5)))
                goto out_unlock;
 
        id = ntohl(*(__be32 *)&iph->id);
@@ -1364,7 +1406,6 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
                iph2 = ip_hdr(p);
 
                if ((iph->protocol ^ iph2->protocol) |
-                   (iph->tos ^ iph2->tos) |
                    ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
                    ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
                        NAPI_GRO_CB(p)->same_flow = 0;
@@ -1374,6 +1415,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
                /* All fields must match except length and checksum. */
                NAPI_GRO_CB(p)->flush |=
                        (iph->ttl ^ iph2->ttl) |
+                       (iph->tos ^ iph2->tos) |
                        ((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id);
 
                NAPI_GRO_CB(p)->flush |= flush;
@@ -1518,14 +1560,15 @@ static const struct net_protocol igmp_protocol = {
 #endif
 
 static const struct net_protocol tcp_protocol = {
-       .handler =      tcp_v4_rcv,
-       .err_handler =  tcp_v4_err,
-       .gso_send_check = tcp_v4_gso_send_check,
-       .gso_segment =  tcp_tso_segment,
-       .gro_receive =  tcp4_gro_receive,
-       .gro_complete = tcp4_gro_complete,
-       .no_policy =    1,
-       .netns_ok =     1,
+       .early_demux    =       tcp_v4_early_demux,
+       .handler        =       tcp_v4_rcv,
+       .err_handler    =       tcp_v4_err,
+       .gso_send_check =       tcp_v4_gso_send_check,
+       .gso_segment    =       tcp_tso_segment,
+       .gro_receive    =       tcp4_gro_receive,
+       .gro_complete   =       tcp4_gro_complete,
+       .no_policy      =       1,
+       .netns_ok       =       1,
 };
 
 static const struct net_protocol udp_protocol = {