]> nv-tegra.nvidia Code Review - linux-3.10.git/blobdiff - net/ipv4/af_inet.c
tcp: TCP Fast Open Server - support TFO listeners
[linux-3.10.git] / net / ipv4 / af_inet.c
index edc414625be28a5f8a19a90ee65efb39a83c7264..4f70ef0b946dba1b7efa71ad9b0d6debb2f713c9 100644 (file)
@@ -149,6 +149,11 @@ void inet_sock_destruct(struct sock *sk)
                pr_err("Attempt to release alive inet socket %p\n", sk);
                return;
        }
+       if (sk->sk_type == SOCK_STREAM) {
+               struct fastopen_queue *fastopenq =
+                       inet_csk(sk)->icsk_accept_queue.fastopenq;
+               kfree(fastopenq);
+       }
 
        WARN_ON(atomic_read(&sk->sk_rmem_alloc));
        WARN_ON(atomic_read(&sk->sk_wmem_alloc));
@@ -212,6 +217,26 @@ int inet_listen(struct socket *sock, int backlog)
         * we can only allow the backlog to be adjusted.
         */
        if (old_state != TCP_LISTEN) {
+               /* Check special setups for testing purpose to enable TFO w/o
+                * requiring TCP_FASTOPEN sockopt.
+                * Note that only TCP sockets (SOCK_STREAM) will reach here.
+                * Also fastopenq may already been allocated because this
+                * socket was in TCP_LISTEN state previously but was
+                * shutdown() (rather than close()).
+                */
+               if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 &&
+                   inet_csk(sk)->icsk_accept_queue.fastopenq == NULL) {
+                       if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0)
+                               err = fastopen_init_queue(sk, backlog);
+                       else if ((sysctl_tcp_fastopen &
+                                 TFO_SERVER_WO_SOCKOPT2) != 0)
+                               err = fastopen_init_queue(sk,
+                                   ((uint)sysctl_tcp_fastopen) >> 16);
+                       else
+                               err = 0;
+                       if (err)
+                               goto out;
+               }
                err = inet_csk_listen_start(sk, backlog);
                if (err)
                        goto out;
@@ -585,8 +610,8 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
  *     Connect to a remote host. There is regrettably still a little
  *     TCP 'magic' in here.
  */
-int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
-                       int addr_len, int flags)
+int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+                         int addr_len, int flags)
 {
        struct sock *sk = sock->sk;
        int err;
@@ -595,8 +620,6 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
        if (addr_len < sizeof(uaddr->sa_family))
                return -EINVAL;
 
-       lock_sock(sk);
-
        if (uaddr->sa_family == AF_UNSPEC) {
                err = sk->sk_prot->disconnect(sk, flags);
                sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
@@ -663,7 +686,6 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
        sock->state = SS_CONNECTED;
        err = 0;
 out:
-       release_sock(sk);
        return err;
 
 sock_error:
@@ -673,6 +695,18 @@ sock_error:
                sock->state = SS_DISCONNECTING;
        goto out;
 }
+EXPORT_SYMBOL(__inet_stream_connect);
+
+int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+                       int addr_len, int flags)
+{
+       int err;
+
+       lock_sock(sock->sk);
+       err = __inet_stream_connect(sock, uaddr, addr_len, flags);
+       release_sock(sock->sk);
+       return err;
+}
 EXPORT_SYMBOL(inet_stream_connect);
 
 /*
@@ -692,7 +726,8 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
 
        sock_rps_record_flow(sk2);
        WARN_ON(!((1 << sk2->sk_state) &
-                 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)));
+                 (TCPF_ESTABLISHED | TCPF_SYN_RECV |
+                 TCPF_CLOSE_WAIT | TCPF_CLOSE)));
 
        sock_graft(sk2, newsock);
 
@@ -1355,7 +1390,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
        if (*(u8 *)iph != 0x45)
                goto out_unlock;
 
-       if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
+       if (unlikely(ip_fast_csum((u8 *)iph, 5)))
                goto out_unlock;
 
        id = ntohl(*(__be32 *)&iph->id);
@@ -1371,7 +1406,6 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
                iph2 = ip_hdr(p);
 
                if ((iph->protocol ^ iph2->protocol) |
-                   (iph->tos ^ iph2->tos) |
                    ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
                    ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
                        NAPI_GRO_CB(p)->same_flow = 0;
@@ -1381,6 +1415,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
                /* All fields must match except length and checksum. */
                NAPI_GRO_CB(p)->flush |=
                        (iph->ttl ^ iph2->ttl) |
+                       (iph->tos ^ iph2->tos) |
                        ((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id);
 
                NAPI_GRO_CB(p)->flush |= flush;