Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes...
[linux-2.6.git] / net / ipv4 / tcp.c
index 98440ad..6afb6d8 100644 (file)
 #include <linux/cache.h>
 #include <linux/err.h>
 #include <linux/crypto.h>
+#include <linux/time.h>
 
 #include <net/icmp.h>
 #include <net/tcp.h>
@@ -428,7 +429,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
                if (tp->urg_seq == tp->copied_seq &&
                    !sock_flag(sk, SOCK_URGINLINE) &&
                    tp->urg_data)
-                       target--;
+                       target++;
 
                /* Potential race condition. If read of tp below will
                 * escape above sk->sk_state, we can be illegally awaken
@@ -535,8 +536,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
                tp->nonagle &= ~TCP_NAGLE_PUSH;
 }
 
-static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
-                               struct sk_buff *skb)
+static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
 {
        if (flags & MSG_OOB)
                tp->snd_up = tp->write_seq;
@@ -545,13 +545,13 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
 static inline void tcp_push(struct sock *sk, int flags, int mss_now,
                            int nonagle)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
-
        if (tcp_send_head(sk)) {
-               struct sk_buff *skb = tcp_write_queue_tail(sk);
+               struct tcp_sock *tp = tcp_sk(sk);
+
                if (!(flags & MSG_MORE) || forced_push(tp))
-                       tcp_mark_push(tp, skb);
-               tcp_mark_urg(tp, flags, skb);
+                       tcp_mark_push(tp, tcp_write_queue_tail(sk));
+
+               tcp_mark_urg(tp, flags);
                __tcp_push_pending_frames(sk, mss_now,
                                          (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
        }
@@ -876,12 +876,12 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
 #define TCP_PAGE(sk)   (sk->sk_sndmsg_page)
 #define TCP_OFF(sk)    (sk->sk_sndmsg_off)
 
-static inline int select_size(struct sock *sk)
+static inline int select_size(struct sock *sk, int sg)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        int tmp = tp->mss_cache;
 
-       if (sk->sk_route_caps & NETIF_F_SG) {
+       if (sg) {
                if (sk_can_gso(sk))
                        tmp = 0;
                else {
@@ -905,7 +905,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
        struct sk_buff *skb;
        int iovlen, flags;
        int mss_now, size_goal;
-       int err, copied;
+       int sg, err, copied;
        long timeo;
 
        lock_sock(sk);
@@ -933,6 +933,8 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
        if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
                goto out_err;
 
+       sg = sk->sk_route_caps & NETIF_F_SG;
+
        while (--iovlen >= 0) {
                int seglen = iov->iov_len;
                unsigned char __user *from = iov->iov_base;
@@ -958,8 +960,9 @@ new_segment:
                                if (!sk_stream_memory_free(sk))
                                        goto wait_for_sndbuf;
 
-                               skb = sk_stream_alloc_skb(sk, select_size(sk),
-                                               sk->sk_allocation);
+                               skb = sk_stream_alloc_skb(sk,
+                                                         select_size(sk, sg),
+                                                         sk->sk_allocation);
                                if (!skb)
                                        goto wait_for_memory;
 
@@ -996,9 +999,7 @@ new_segment:
                                        /* We can extend the last page
                                         * fragment. */
                                        merge = 1;
-                               } else if (i == MAX_SKB_FRAGS ||
-                                          (!i &&
-                                          !(sk->sk_route_caps & NETIF_F_SG))) {
+                               } else if (i == MAX_SKB_FRAGS || !sg) {
                                        /* Need to add new fragment and cannot
                                         * do this because interface is non-SG,
                                         * or because all the page slots are
@@ -1183,7 +1184,9 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
 #if TCP_DEBUG
        struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
 
-       WARN_ON(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
+       WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
+            KERN_INFO "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
+            tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
 #endif
 
        if (inet_csk_ack_scheduled(sk)) {
@@ -1251,6 +1254,39 @@ static void tcp_prequeue_process(struct sock *sk)
        tp->ucopy.memory = 0;
 }
 
+#ifdef CONFIG_NET_DMA
+static void tcp_service_net_dma(struct sock *sk, bool wait)
+{
+       dma_cookie_t done, used;
+       dma_cookie_t last_issued;
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       if (!tp->ucopy.dma_chan)
+               return;
+
+       last_issued = tp->ucopy.dma_cookie;
+       dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+
+       do {
+               if (dma_async_memcpy_complete(tp->ucopy.dma_chan,
+                                             last_issued, &done,
+                                             &used) == DMA_SUCCESS) {
+                       /* Safe to free early-copied skbs now */
+                       __skb_queue_purge(&sk->sk_async_wait_queue);
+                       break;
+               } else {
+                       struct sk_buff *skb;
+                       while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
+                              (dma_async_is_complete(skb->dma_cookie, done,
+                                                     used) == DMA_SUCCESS)) {
+                               __skb_dequeue(&sk->sk_async_wait_queue);
+                               kfree_skb(skb);
+                       }
+               }
+       } while (wait);
+}
+#endif
+
 static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
 {
        struct sk_buff *skb;
@@ -1430,11 +1466,13 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                        /* Now that we have two receive queues this
                         * shouldn't happen.
                         */
-                       if (before(*seq, TCP_SKB_CB(skb)->seq)) {
-                               printk(KERN_INFO "recvmsg bug: copied %X "
-                                      "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
+                       if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
+                            KERN_INFO "recvmsg bug: copied %X "
+                                      "seq %X rcvnxt %X fl %X\n", *seq,
+                                      TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
+                                      flags))
                                break;
-                       }
+
                        offset = *seq - TCP_SKB_CB(skb)->seq;
                        if (tcp_hdr(skb)->syn)
                                offset--;
@@ -1443,8 +1481,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                        if (tcp_hdr(skb)->fin)
                                goto found_fin_ok;
                        WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: "
-                                       "copied %X seq %X\n", *seq,
-                                       TCP_SKB_CB(skb)->seq);
+                                       "copied %X seq %X rcvnxt %X fl %X\n",
+                                       *seq, TCP_SKB_CB(skb)->seq,
+                                       tp->rcv_nxt, flags);
                }
 
                /* Well, if we have backlog, try to process it now yet. */
@@ -1540,6 +1579,10 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                        /* __ Set realtime policy in scheduler __ */
                }
 
+#ifdef CONFIG_NET_DMA
+               if (tp->ucopy.dma_chan)
+                       dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+#endif
                if (copied >= target) {
                        /* Do not sleep, just process backlog. */
                        release_sock(sk);
@@ -1548,6 +1591,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                        sk_wait_data(sk, &timeo);
 
 #ifdef CONFIG_NET_DMA
+               tcp_service_net_dma(sk, false);  /* Don't block */
                tp->ucopy.wakeup = 0;
 #endif
 
@@ -1627,6 +1671,9 @@ do_prequeue:
                                                copied = -EFAULT;
                                        break;
                                }
+
+                               dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+
                                if ((offset + used) == skb->len)
                                        copied_early = 1;
 
@@ -1696,27 +1743,9 @@ skip_copy:
        }
 
 #ifdef CONFIG_NET_DMA
-       if (tp->ucopy.dma_chan) {
-               dma_cookie_t done, used;
-
-               dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
-
-               while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
-                                                tp->ucopy.dma_cookie, &done,
-                                                &used) == DMA_IN_PROGRESS) {
-                       /* do partial cleanup of sk_async_wait_queue */
-                       while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
-                              (dma_async_is_complete(skb->dma_cookie, done,
-                                                     used) == DMA_SUCCESS)) {
-                               __skb_dequeue(&sk->sk_async_wait_queue);
-                               kfree_skb(skb);
-                       }
-               }
+       tcp_service_net_dma(sk, true);  /* Wait for queue to drain */
+       tp->ucopy.dma_chan = NULL;
 
-               /* Safe to free early-copied skbs now */
-               __skb_queue_purge(&sk->sk_async_wait_queue);
-               tp->ucopy.dma_chan = NULL;
-       }
        if (tp->ucopy.pinned_list) {
                dma_unpin_iovec_pages(tp->ucopy.pinned_list);
                tp->ucopy.pinned_list = NULL;
@@ -2037,7 +2066,7 @@ int tcp_disconnect(struct sock *sk, int flags)
        __skb_queue_purge(&sk->sk_async_wait_queue);
 #endif
 
-       inet->dport = 0;
+       inet->inet_dport = 0;
 
        if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
                inet_reset_saddr(sk);
@@ -2054,6 +2083,7 @@ int tcp_disconnect(struct sock *sk, int flags)
        tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
        tp->snd_cwnd_cnt = 0;
        tp->bytes_acked = 0;
+       tp->window_clamp = 0;
        tcp_set_ca_state(sk, TCP_CA_Open);
        tcp_clear_retrans(tp);
        inet_csk_delack_init(sk);
@@ -2061,7 +2091,7 @@ int tcp_disconnect(struct sock *sk, int flags)
        memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
        __sk_dst_reset(sk);
 
-       WARN_ON(inet->num && !icsk->icsk_bind_hash);
+       WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
 
        sk->sk_error_report(sk);
        return err;
@@ -2078,8 +2108,9 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
        int val;
        int err = 0;
 
-       /* This is a string value all the others are int's */
-       if (optname == TCP_CONGESTION) {
+       /* These are data/string values, all the others are ints */
+       switch (optname) {
+       case TCP_CONGESTION: {
                char name[TCP_CA_NAME_MAX];
 
                if (optlen < 1)
@@ -2096,6 +2127,93 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                release_sock(sk);
                return err;
        }
+       case TCP_COOKIE_TRANSACTIONS: {
+               struct tcp_cookie_transactions ctd;
+               struct tcp_cookie_values *cvp = NULL;
+
+               if (sizeof(ctd) > optlen)
+                       return -EINVAL;
+               if (copy_from_user(&ctd, optval, sizeof(ctd)))
+                       return -EFAULT;
+
+               if (ctd.tcpct_used > sizeof(ctd.tcpct_value) ||
+                   ctd.tcpct_s_data_desired > TCP_MSS_DESIRED)
+                       return -EINVAL;
+
+               if (ctd.tcpct_cookie_desired == 0) {
+                       /* default to global value */
+               } else if ((0x1 & ctd.tcpct_cookie_desired) ||
+                          ctd.tcpct_cookie_desired > TCP_COOKIE_MAX ||
+                          ctd.tcpct_cookie_desired < TCP_COOKIE_MIN) {
+                       return -EINVAL;
+               }
+
+               if (TCP_COOKIE_OUT_NEVER & ctd.tcpct_flags) {
+                       /* Supercedes all other values */
+                       lock_sock(sk);
+                       if (tp->cookie_values != NULL) {
+                               kref_put(&tp->cookie_values->kref,
+                                        tcp_cookie_values_release);
+                               tp->cookie_values = NULL;
+                       }
+                       tp->rx_opt.cookie_in_always = 0; /* false */
+                       tp->rx_opt.cookie_out_never = 1; /* true */
+                       release_sock(sk);
+                       return err;
+               }
+
+               /* Allocate ancillary memory before locking.
+                */
+               if (ctd.tcpct_used > 0 ||
+                   (tp->cookie_values == NULL &&
+                    (sysctl_tcp_cookie_size > 0 ||
+                     ctd.tcpct_cookie_desired > 0 ||
+                     ctd.tcpct_s_data_desired > 0))) {
+                       cvp = kzalloc(sizeof(*cvp) + ctd.tcpct_used,
+                                     GFP_KERNEL);
+                       if (cvp == NULL)
+                               return -ENOMEM;
+               }
+               lock_sock(sk);
+               tp->rx_opt.cookie_in_always =
+                       (TCP_COOKIE_IN_ALWAYS & ctd.tcpct_flags);
+               tp->rx_opt.cookie_out_never = 0; /* false */
+
+               if (tp->cookie_values != NULL) {
+                       if (cvp != NULL) {
+                               /* Changed values are recorded by a changed
+                                * pointer, ensuring the cookie will differ,
+                                * without separately hashing each value later.
+                                */
+                               kref_put(&tp->cookie_values->kref,
+                                        tcp_cookie_values_release);
+                               kref_init(&cvp->kref);
+                               tp->cookie_values = cvp;
+                       } else {
+                               cvp = tp->cookie_values;
+                       }
+               }
+               if (cvp != NULL) {
+                       cvp->cookie_desired = ctd.tcpct_cookie_desired;
+
+                       if (ctd.tcpct_used > 0) {
+                               memcpy(cvp->s_data_payload, ctd.tcpct_value,
+                                      ctd.tcpct_used);
+                               cvp->s_data_desired = ctd.tcpct_used;
+                               cvp->s_data_constant = 1; /* true */
+                       } else {
+                               /* No constant payload data. */
+                               cvp->s_data_desired = ctd.tcpct_s_data_desired;
+                               cvp->s_data_constant = 0; /* false */
+                       }
+               }
+               release_sock(sk);
+               return err;
+       }
+       default:
+               /* fallthru */
+               break;
+       };
 
        if (optlen < sizeof(int))
                return -EINVAL;
@@ -2134,6 +2252,20 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                }
                break;
 
+       case TCP_THIN_LINEAR_TIMEOUTS:
+               if (val < 0 || val > 1)
+                       err = -EINVAL;
+               else
+                       tp->thin_lto = val;
+               break;
+
+       case TCP_THIN_DUPACK:
+               if (val < 0 || val > 1)
+                       err = -EINVAL;
+               else
+                       tp->thin_dupack = val;
+               break;
+
        case TCP_CORK:
                /* When set indicates to always queue non-full frames.
                 * Later the user clears this option and we transmit
@@ -2420,6 +2552,42 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
                if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
                        return -EFAULT;
                return 0;
+
+       case TCP_COOKIE_TRANSACTIONS: {
+               struct tcp_cookie_transactions ctd;
+               struct tcp_cookie_values *cvp = tp->cookie_values;
+
+               if (get_user(len, optlen))
+                       return -EFAULT;
+               if (len < sizeof(ctd))
+                       return -EINVAL;
+
+               memset(&ctd, 0, sizeof(ctd));
+               ctd.tcpct_flags = (tp->rx_opt.cookie_in_always ?
+                                  TCP_COOKIE_IN_ALWAYS : 0)
+                               | (tp->rx_opt.cookie_out_never ?
+                                  TCP_COOKIE_OUT_NEVER : 0);
+
+               if (cvp != NULL) {
+                       ctd.tcpct_flags |= (cvp->s_data_in ?
+                                           TCP_S_DATA_IN : 0)
+                                        | (cvp->s_data_out ?
+                                           TCP_S_DATA_OUT : 0);
+
+                       ctd.tcpct_cookie_desired = cvp->cookie_desired;
+                       ctd.tcpct_s_data_desired = cvp->s_data_desired;
+
+                       memcpy(&ctd.tcpct_value[0], &cvp->cookie_pair[0],
+                              cvp->cookie_pair_size);
+                       ctd.tcpct_used = cvp->cookie_pair_size;
+               }
+
+               if (put_user(sizeof(ctd), optlen))
+                       return -EFAULT;
+               if (copy_to_user(optval, &ctd, sizeof(ctd)))
+                       return -EFAULT;
+               return 0;
+       }
        default:
                return -ENOPROTOOPT;
        }
@@ -2657,10 +2825,10 @@ EXPORT_SYMBOL(tcp_gro_complete);
 
 #ifdef CONFIG_TCP_MD5SIG
 static unsigned long tcp_md5sig_users;
-static struct tcp_md5sig_pool **tcp_md5sig_pool;
+static struct tcp_md5sig_pool * __percpu *tcp_md5sig_pool;
 static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
 
-static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
+static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool)
 {
        int cpu;
        for_each_possible_cpu(cpu) {
@@ -2677,7 +2845,7 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
 
 void tcp_free_md5sig_pool(void)
 {
-       struct tcp_md5sig_pool **pool = NULL;
+       struct tcp_md5sig_pool * __percpu *pool = NULL;
 
        spin_lock_bh(&tcp_md5sig_pool_lock);
        if (--tcp_md5sig_users == 0) {
@@ -2691,10 +2859,11 @@ void tcp_free_md5sig_pool(void)
 
 EXPORT_SYMBOL(tcp_free_md5sig_pool);
 
-static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(struct sock *sk)
+static struct tcp_md5sig_pool * __percpu *
+__tcp_alloc_md5sig_pool(struct sock *sk)
 {
        int cpu;
-       struct tcp_md5sig_pool **pool;
+       struct tcp_md5sig_pool * __percpu *pool;
 
        pool = alloc_percpu(struct tcp_md5sig_pool *);
        if (!pool)
@@ -2721,9 +2890,9 @@ out_free:
        return NULL;
 }
 
-struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *sk)
+struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
 {
-       struct tcp_md5sig_pool **pool;
+       struct tcp_md5sig_pool * __percpu *pool;
        int alloc = 0;
 
 retry:
@@ -2742,7 +2911,9 @@ retry:
 
        if (alloc) {
                /* we cannot hold spinlock here because this may sleep. */
-               struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(sk);
+               struct tcp_md5sig_pool * __percpu *p;
+
+               p = __tcp_alloc_md5sig_pool(sk);
                spin_lock_bh(&tcp_md5sig_pool_lock);
                if (!p) {
                        tcp_md5sig_users--;
@@ -2766,7 +2937,7 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
 
 struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
 {
-       struct tcp_md5sig_pool **p;
+       struct tcp_md5sig_pool * __percpu *p;
        spin_lock_bh(&tcp_md5sig_pool_lock);
        p = tcp_md5sig_pool;
        if (p)
@@ -2842,6 +3013,135 @@ EXPORT_SYMBOL(tcp_md5_hash_key);
 
 #endif
 
+/**
+ * Each Responder maintains up to two secret values concurrently for
+ * efficient secret rollover.  Each secret value has 4 states:
+ *
+ * Generating.  (tcp_secret_generating != tcp_secret_primary)
+ *    Generates new Responder-Cookies, but not yet used for primary
+ *    verification.  This is a short-term state, typically lasting only
+ *    one round trip time (RTT).
+ *
+ * Primary.  (tcp_secret_generating == tcp_secret_primary)
+ *    Used both for generation and primary verification.
+ *
+ * Retiring.  (tcp_secret_retiring != tcp_secret_secondary)
+ *    Used for verification, until the first failure that can be
+ *    verified by the newer Generating secret.  At that time, this
+ *    cookie's state is changed to Secondary, and the Generating
+ *    cookie's state is changed to Primary.  This is a short-term state,
+ *    typically lasting only one round trip time (RTT).
+ *
+ * Secondary.  (tcp_secret_retiring == tcp_secret_secondary)
+ *    Used for secondary verification, after primary verification
+ *    failures.  This state lasts no more than twice the Maximum Segment
+ *    Lifetime (2MSL).  Then, the secret is discarded.
+ */
+struct tcp_cookie_secret {
+       /* The secret is divided into two parts.  The digest part is the
+        * equivalent of previously hashing a secret and saving the state,
+        * and serves as an initialization vector (IV).  The message part
+        * serves as the trailing secret.
+        */
+       u32                             secrets[COOKIE_WORKSPACE_WORDS];
+       unsigned long                   expires;
+};
+
+#define TCP_SECRET_1MSL (HZ * TCP_PAWS_MSL)
+#define TCP_SECRET_2MSL (HZ * TCP_PAWS_MSL * 2)
+#define TCP_SECRET_LIFE (HZ * 600)
+
+static struct tcp_cookie_secret tcp_secret_one;
+static struct tcp_cookie_secret tcp_secret_two;
+
+/* Essentially a circular list, without dynamic allocation. */
+static struct tcp_cookie_secret *tcp_secret_generating;
+static struct tcp_cookie_secret *tcp_secret_primary;
+static struct tcp_cookie_secret *tcp_secret_retiring;
+static struct tcp_cookie_secret *tcp_secret_secondary;
+
+static DEFINE_SPINLOCK(tcp_secret_locker);
+
+/* Select a pseudo-random word in the cookie workspace.
+ */
+static inline u32 tcp_cookie_work(const u32 *ws, const int n)
+{
+       return ws[COOKIE_DIGEST_WORDS + ((COOKIE_MESSAGE_WORDS-1) & ws[n])];
+}
+
+/* Fill bakery[COOKIE_WORKSPACE_WORDS] with generator, updating as needed.
+ * Called in softirq context.
+ * Returns: 0 for success.
+ */
+int tcp_cookie_generator(u32 *bakery)
+{
+       unsigned long jiffy = jiffies;
+
+       if (unlikely(time_after_eq(jiffy, tcp_secret_generating->expires))) {
+               spin_lock_bh(&tcp_secret_locker);
+               if (!time_after_eq(jiffy, tcp_secret_generating->expires)) {
+                       /* refreshed by another */
+                       memcpy(bakery,
+                              &tcp_secret_generating->secrets[0],
+                              COOKIE_WORKSPACE_WORDS);
+               } else {
+                       /* still needs refreshing */
+                       get_random_bytes(bakery, COOKIE_WORKSPACE_WORDS);
+
+                       /* The first time, paranoia assumes that the
+                        * randomization function isn't as strong.  But,
+                        * this secret initialization is delayed until
+                        * the last possible moment (packet arrival).
+                        * Although that time is observable, it is
+                        * unpredictably variable.  Mash in the most
+                        * volatile clock bits available, and expire the
+                        * secret extra quickly.
+                        */
+                       if (unlikely(tcp_secret_primary->expires ==
+                                    tcp_secret_secondary->expires)) {
+                               struct timespec tv;
+
+                               getnstimeofday(&tv);
+                               bakery[COOKIE_DIGEST_WORDS+0] ^=
+                                       (u32)tv.tv_nsec;
+
+                               tcp_secret_secondary->expires = jiffy
+                                       + TCP_SECRET_1MSL
+                                       + (0x0f & tcp_cookie_work(bakery, 0));
+                       } else {
+                               tcp_secret_secondary->expires = jiffy
+                                       + TCP_SECRET_LIFE
+                                       + (0xff & tcp_cookie_work(bakery, 1));
+                               tcp_secret_primary->expires = jiffy
+                                       + TCP_SECRET_2MSL
+                                       + (0x1f & tcp_cookie_work(bakery, 2));
+                       }
+                       memcpy(&tcp_secret_secondary->secrets[0],
+                              bakery, COOKIE_WORKSPACE_WORDS);
+
+                       rcu_assign_pointer(tcp_secret_generating,
+                                          tcp_secret_secondary);
+                       rcu_assign_pointer(tcp_secret_retiring,
+                                          tcp_secret_primary);
+                       /*
+                        * Neither call_rcu() nor synchronize_rcu() needed.
+                        * Retiring data is not freed.  It is replaced after
+                        * further (locked) pointer updates, and a quiet time
+                        * (minimum 1MSL, maximum LIFE - 2MSL).
+                        */
+               }
+               spin_unlock_bh(&tcp_secret_locker);
+       } else {
+               rcu_read_lock_bh();
+               memcpy(bakery,
+                      &rcu_dereference(tcp_secret_generating)->secrets[0],
+                      COOKIE_WORKSPACE_WORDS);
+               rcu_read_unlock_bh();
+       }
+       return 0;
+}
+EXPORT_SYMBOL(tcp_cookie_generator);
+
 void tcp_done(struct sock *sk)
 {
        if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
@@ -2876,6 +3176,7 @@ void __init tcp_init(void)
        struct sk_buff *skb = NULL;
        unsigned long nr_pages, limit;
        int order, i, max_share;
+       unsigned long jiffy = jiffies;
 
        BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
 
@@ -2898,11 +3199,10 @@ void __init tcp_init(void)
                                        (totalram_pages >= 128 * 1024) ?
                                        13 : 15,
                                        0,
-                                       &tcp_hashinfo.ehash_size,
                                        NULL,
+                                       &tcp_hashinfo.ehash_mask,
                                        thash_entries ? 0 : 512 * 1024);
-       tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
-       for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
+       for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) {
                INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
                INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i);
        }
@@ -2911,7 +3211,7 @@ void __init tcp_init(void)
        tcp_hashinfo.bhash =
                alloc_large_system_hash("TCP bind",
                                        sizeof(struct inet_bind_hashbucket),
-                                       tcp_hashinfo.ehash_size,
+                                       tcp_hashinfo.ehash_mask + 1,
                                        (totalram_pages >= 128 * 1024) ?
                                        13 : 15,
                                        0,
@@ -2966,10 +3266,19 @@ void __init tcp_init(void)
        sysctl_tcp_rmem[2] = max(87380, max_share);
 
        printk(KERN_INFO "TCP: Hash tables configured "
-              "(established %d bind %d)\n",
-              tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size);
+              "(established %u bind %u)\n",
+              tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
 
        tcp_register_congestion_control(&tcp_reno);
+
+       memset(&tcp_secret_one.secrets[0], 0, sizeof(tcp_secret_one.secrets));
+       memset(&tcp_secret_two.secrets[0], 0, sizeof(tcp_secret_two.secrets));
+       tcp_secret_one.expires = jiffy; /* past due */
+       tcp_secret_two.expires = jiffy; /* past due */
+       tcp_secret_generating = &tcp_secret_one;
+       tcp_secret_primary = &tcp_secret_one;
+       tcp_secret_retiring = &tcp_secret_two;
+       tcp_secret_secondary = &tcp_secret_two;
 }
 
 EXPORT_SYMBOL(tcp_close);