]> nv-tegra.nvidia Code Review - linux-3.10.git/blobdiff - net/ipv4/tcp_input.c
Merge branch 'master' of github.com:davem330/net
[linux-3.10.git] / net / ipv4 / tcp_input.c
index e25827719e702badc546bbe39dc69519bf9b954a..81cae641c9a927f03109c6c37617914f6eae4739 100644 (file)
  */
 
 #include <linux/mm.h>
+#include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/sysctl.h>
+#include <linux/kernel.h>
 #include <net/dst.h>
 #include <net/tcp.h>
 #include <net/inet_common.h>
@@ -76,10 +78,13 @@ int sysctl_tcp_window_scaling __read_mostly = 1;
 int sysctl_tcp_sack __read_mostly = 1;
 int sysctl_tcp_fack __read_mostly = 1;
 int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
-int sysctl_tcp_ecn __read_mostly;
+EXPORT_SYMBOL(sysctl_tcp_reordering);
+int sysctl_tcp_ecn __read_mostly = 2;
+EXPORT_SYMBOL(sysctl_tcp_ecn);
 int sysctl_tcp_dsack __read_mostly = 1;
 int sysctl_tcp_app_win __read_mostly = 31;
 int sysctl_tcp_adv_win_scale __read_mostly = 2;
+EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
 
 int sysctl_tcp_stdurg __read_mostly;
 int sysctl_tcp_rfc1337 __read_mostly;
@@ -88,6 +93,8 @@ int sysctl_tcp_frto __read_mostly = 2;
 int sysctl_tcp_frto_response __read_mostly;
 int sysctl_tcp_nometrics_save __read_mostly;
 
+int sysctl_tcp_thin_dupack __read_mostly;
+
 int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
 int sysctl_tcp_abc __read_mostly;
 
@@ -139,7 +146,7 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
                 * "len" is invariant segment length, including TCP header.
                 */
                len += skb->data - skb_transport_header(skb);
-               if (len >= TCP_MIN_RCVMSS + sizeof(struct tcphdr) ||
+               if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) ||
                    /* If PSH is not set, packet should be
                     * full sized, provided peer TCP is not badly broken.
                     * This observation (if it is correct 8)) allows
@@ -175,7 +182,7 @@ static void tcp_incr_quickack(struct sock *sk)
                icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
 }
 
-void tcp_enter_quickack_mode(struct sock *sk)
+static void tcp_enter_quickack_mode(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        tcp_incr_quickack(sk);
@@ -210,16 +217,25 @@ static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp)
        tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
 }
 
-static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb)
+static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
 {
-       if (tp->ecn_flags & TCP_ECN_OK) {
-               if (INET_ECN_is_ce(TCP_SKB_CB(skb)->flags))
-                       tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+       if (!(tp->ecn_flags & TCP_ECN_OK))
+               return;
+
+       switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
+       case INET_ECN_NOT_ECT:
                /* Funny extension: if ECT is not set on a segment,
-                * it is surely retransmit. It is not in ECN RFC,
-                * but Linux follows this rule. */
-               else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags)))
+                * and we already seen ECT on a previous segment,
+                * it is probably a retransmit.
+                */
+               if (tp->ecn_flags & TCP_ECN_SEEN)
                        tcp_enter_quickack_mode((struct sock *)tp);
+               break;
+       case INET_ECN_CE:
+               tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+               /* fallinto */
+       default:
+               tp->ecn_flags |= TCP_ECN_SEEN;
        }
 }
 
@@ -252,8 +268,11 @@ static void tcp_fixup_sndbuf(struct sock *sk)
        int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 +
                     sizeof(struct sk_buff);
 
-       if (sk->sk_sndbuf < 3 * sndmem)
-               sk->sk_sndbuf = min(3 * sndmem, sysctl_tcp_wmem[2]);
+       if (sk->sk_sndbuf < 3 * sndmem) {
+               sk->sk_sndbuf = 3 * sndmem;
+               if (sk->sk_sndbuf > sysctl_tcp_wmem[2])
+                       sk->sk_sndbuf = sysctl_tcp_wmem[2];
+       }
 }
 
 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
@@ -389,7 +408,7 @@ static void tcp_clamp_window(struct sock *sk)
        if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
            !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
            !tcp_memory_pressure &&
-           atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
+           atomic_long_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
                sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
                                    sysctl_tcp_rmem[2]);
        }
@@ -410,20 +429,21 @@ void tcp_initialize_rcv_mss(struct sock *sk)
        unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
 
        hint = min(hint, tp->rcv_wnd / 2);
-       hint = min(hint, TCP_MIN_RCVMSS);
+       hint = min(hint, TCP_MSS_DEFAULT);
        hint = max(hint, TCP_MIN_MSS);
 
        inet_csk(sk)->icsk_ack.rcv_mss = hint;
 }
+EXPORT_SYMBOL(tcp_initialize_rcv_mss);
 
 /* Receiver "autotuning" code.
  *
  * The algorithm for RTT estimation w/o timestamps is based on
  * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL.
- * <http://www.lanl.gov/radiant/website/pubs/drs/lacsi2001.ps>
+ * <http://public.lanl.gov/radiant/pubs.html#DRS>
  *
  * More detail on this code can be found at
- * <http://www.psc.edu/~jheffner/senior_thesis.ps>,
+ * <http://staff.psc.edu/jheffner/>,
  * though this reference is out of date.  A new paper
  * is pending.
  */
@@ -596,16 +616,6 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
                tcp_grow_window(sk, skb);
 }
 
-static u32 tcp_rto_min(struct sock *sk)
-{
-       struct dst_entry *dst = __sk_dst_get(sk);
-       u32 rto_min = TCP_RTO_MIN;
-
-       if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
-               rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
-       return rto_min;
-}
-
 /* Called to compute a smoothed rtt estimate. The data fed to this
  * routine either comes from timestamps, or from segments that were
  * known _not_ to have been retransmitted [see Karn/Partridge
@@ -694,22 +704,18 @@ static inline void tcp_set_rto(struct sock *sk)
         *    is invisible. Actually, Linux-2.4 also generates erratic
         *    ACKs in some circumstances.
         */
-       inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar;
+       inet_csk(sk)->icsk_rto = __tcp_set_rto(tp);
 
        /* 2. Fixups made earlier cannot be right.
         *    If we do not estimate RTO correctly without them,
         *    all the algo is pure shit and should be replaced
         *    with correct one. It is exactly, which we pretend to do.
         */
-}
 
-/* NOTE: clamping at TCP_RTO_MIN is not required, current algo
- * guarantees that rto is higher.
- */
-static inline void tcp_bound_rto(struct sock *sk)
-{
-       if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
-               inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
+       /* NOTE: clamping at TCP_RTO_MIN is not required, current algo
+        * guarantees that rto is higher.
+        */
+       tcp_bound_rto(sk);
 }
 
 /* Save metrics learned by this TCP session.
@@ -737,7 +743,7 @@ void tcp_update_metrics(struct sock *sk)
                         * Reset our results.
                         */
                        if (!(dst_metric_locked(dst, RTAX_RTT)))
-                               dst->metrics[RTAX_RTT - 1] = 0;
+                               dst_metric_set(dst, RTAX_RTT, 0);
                        return;
                }
 
@@ -774,62 +780,53 @@ void tcp_update_metrics(struct sock *sk)
                        set_dst_metric_rtt(dst, RTAX_RTTVAR, var);
                }
 
-               if (tp->snd_ssthresh >= 0xFFFF) {
+               if (tcp_in_initial_slowstart(tp)) {
                        /* Slow start still did not finish. */
                        if (dst_metric(dst, RTAX_SSTHRESH) &&
                            !dst_metric_locked(dst, RTAX_SSTHRESH) &&
                            (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH))
-                               dst->metrics[RTAX_SSTHRESH-1] = tp->snd_cwnd >> 1;
+                               dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_cwnd >> 1);
                        if (!dst_metric_locked(dst, RTAX_CWND) &&
                            tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
-                               dst->metrics[RTAX_CWND - 1] = tp->snd_cwnd;
+                               dst_metric_set(dst, RTAX_CWND, tp->snd_cwnd);
                } else if (tp->snd_cwnd > tp->snd_ssthresh &&
                           icsk->icsk_ca_state == TCP_CA_Open) {
                        /* Cong. avoidance phase, cwnd is reliable. */
                        if (!dst_metric_locked(dst, RTAX_SSTHRESH))
-                               dst->metrics[RTAX_SSTHRESH-1] =
-                                       max(tp->snd_cwnd >> 1, tp->snd_ssthresh);
+                               dst_metric_set(dst, RTAX_SSTHRESH,
+                                              max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
                        if (!dst_metric_locked(dst, RTAX_CWND))
-                               dst->metrics[RTAX_CWND-1] = (dst_metric(dst, RTAX_CWND) + tp->snd_cwnd) >> 1;
+                               dst_metric_set(dst, RTAX_CWND,
+                                              (dst_metric(dst, RTAX_CWND) +
+                                               tp->snd_cwnd) >> 1);
                } else {
                        /* Else slow start did not finish, cwnd is non-sense,
                           ssthresh may be also invalid.
                         */
                        if (!dst_metric_locked(dst, RTAX_CWND))
-                               dst->metrics[RTAX_CWND-1] = (dst_metric(dst, RTAX_CWND) + tp->snd_ssthresh) >> 1;
+                               dst_metric_set(dst, RTAX_CWND,
+                                              (dst_metric(dst, RTAX_CWND) +
+                                               tp->snd_ssthresh) >> 1);
                        if (dst_metric(dst, RTAX_SSTHRESH) &&
                            !dst_metric_locked(dst, RTAX_SSTHRESH) &&
                            tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH))
-                               dst->metrics[RTAX_SSTHRESH-1] = tp->snd_ssthresh;
+                               dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_ssthresh);
                }
 
                if (!dst_metric_locked(dst, RTAX_REORDERING)) {
                        if (dst_metric(dst, RTAX_REORDERING) < tp->reordering &&
                            tp->reordering != sysctl_tcp_reordering)
-                               dst->metrics[RTAX_REORDERING-1] = tp->reordering;
+                               dst_metric_set(dst, RTAX_REORDERING, tp->reordering);
                }
        }
 }
 
-/* Numbers are taken from RFC3390.
- *
- * John Heffner states:
- *
- *     The RFC specifies a window of no more than 4380 bytes
- *     unless 2*MSS > 4380.  Reading the pseudocode in the RFC
- *     is a bit misleading because they use a clamp at 4380 bytes
- *     rather than use a multiplier in the relevant range.
- */
 __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
 {
        __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
 
-       if (!cwnd) {
-               if (tp->mss_cache > 1460)
-                       cwnd = 2;
-               else
-                       cwnd = (tp->mss_cache > 1095) ? 3 : 4;
-       }
+       if (!cwnd)
+               cwnd = TCP_INIT_CWND;
        return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
 }
 
@@ -892,6 +889,11 @@ static void tcp_init_metrics(struct sock *sk)
                tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH);
                if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
                        tp->snd_ssthresh = tp->snd_cwnd_clamp;
+       } else {
+               /* ssthresh may have been reduced unnecessarily during.
+                * 3WHS. Restore it back to its initial default.
+                */
+               tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
        }
        if (dst_metric(dst, RTAX_REORDERING) &&
            tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
@@ -899,10 +901,7 @@ static void tcp_init_metrics(struct sock *sk)
                tp->reordering = dst_metric(dst, RTAX_REORDERING);
        }
 
-       if (dst_metric(dst, RTAX_RTT) == 0)
-               goto reset;
-
-       if (!tp->srtt && dst_metric_rtt(dst, RTAX_RTT) < (TCP_TIMEOUT_INIT << 3))
+       if (dst_metric(dst, RTAX_RTT) == 0 || tp->srtt == 0)
                goto reset;
 
        /* Initial rtt is determined from SYN,SYN-ACK.
@@ -928,23 +927,27 @@ static void tcp_init_metrics(struct sock *sk)
                tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
        }
        tcp_set_rto(sk);
-       tcp_bound_rto(sk);
-       if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
-               goto reset;
-       tp->snd_cwnd = tcp_init_cwnd(tp, dst);
-       tp->snd_cwnd_stamp = tcp_time_stamp;
-       return;
-
 reset:
-       /* Play conservative. If timestamps are not
-        * supported, TCP will fail to recalculate correct
-        * rtt, if initial rto is too small. FORGET ALL AND RESET!
-        */
-       if (!tp->rx_opt.saw_tstamp && tp->srtt) {
-               tp->srtt = 0;
-               tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
-               inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
+       if (tp->srtt == 0) {
+               /* RFC2988bis: We've failed to get a valid RTT sample from
+                * 3WHS. This is most likely due to retransmission,
+                * including spurious one. Reset the RTO back to 3secs
+                * from the more aggressive 1sec to avoid more spurious
+                * retransmission.
+                */
+               tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
+               inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
        }
+       /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
+        * retransmitted. In light of RFC2988bis' more aggressive 1sec
+        * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
+        * retransmission has occurred.
+        */
+       if (tp->total_retrans > 1)
+               tp->snd_cwnd = 1;
+       else
+               tp->snd_cwnd = tcp_init_cwnd(tp, dst);
+       tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
 static void tcp_update_reordering(struct sock *sk, const int metric,
@@ -1130,7 +1133,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
                return 0;
 
        /* ...Then it's D-SACK, and must reside below snd_una completely */
-       if (!after(end_seq, tp->snd_una))
+       if (after(end_seq, tp->snd_una))
                return 0;
 
        if (!before(start_seq, tp->undo_marker))
@@ -1182,10 +1185,18 @@ static void tcp_mark_lost_retrans(struct sock *sk)
                if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS))
                        continue;
 
-               if (after(received_upto, ack_seq) &&
-                   (tcp_is_fack(tp) ||
-                    !before(received_upto,
-                            ack_seq + tp->reordering * tp->mss_cache))) {
+               /* TODO: We would like to get rid of tcp_is_fack(tp) only
+                * constraint here (see above) but figuring out that at
+                * least tp->reordering SACK blocks reside between ack_seq
+                * and received_upto is not easy task to do cheaply with
+                * the available datastructures.
+                *
+                * Whether FACK should check here for tp->reordering segs
+                * in-between one could argue for either way (it would be
+                * rather simple to implement as we could count fack_count
+                * during the walk and do tp->fackets_out - fack_count).
+                */
+               if (after(received_upto, ack_seq)) {
                        TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
                        tp->retrans_out -= tcp_skb_pcount(skb);
 
@@ -1229,7 +1240,7 @@ static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
        }
 
        /* D-SACK for already forgotten data... Do dumb counting. */
-       if (dup_sack &&
+       if (dup_sack && tp->undo_marker && tp->undo_retrans &&
            !after(end_seq_0, prior_snd_una) &&
            after(end_seq_0, tp->undo_marker))
                tp->undo_retrans--;
@@ -1306,7 +1317,8 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
 
        /* Account D-SACK for retransmitted packet. */
        if (dup_sack && (sacked & TCPCB_RETRANS)) {
-               if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
+               if (tp->undo_marker && tp->undo_retrans &&
+                   after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
                        tp->undo_retrans--;
                if (sacked & TCPCB_SACKED_ACKED)
                        state->reord = min(fack_count, state->reord);
@@ -1376,18 +1388,17 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
        return sacked;
 }
 
-static int tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
-                          struct sk_buff *skb,
+static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
                           struct tcp_sacktag_state *state,
-                          unsigned int pcount, int shifted, int mss)
+                          unsigned int pcount, int shifted, int mss,
+                          int dup_sack)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
 
        BUG_ON(!pcount);
 
-       /* Tweak before seqno plays */
-       if (!tcp_is_fack(tp) && tcp_is_sack(tp) && tp->lost_skb_hint &&
-           !before(TCP_SKB_CB(tp->lost_skb_hint)->seq, TCP_SKB_CB(skb)->seq))
+       if (skb == tp->lost_skb_hint)
                tp->lost_cnt_hint += pcount;
 
        TCP_SKB_CB(prev)->end_seq += shifted;
@@ -1414,7 +1425,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
        }
 
        /* We discard results */
-       tcp_sacktag_one(skb, sk, state, 0, pcount);
+       tcp_sacktag_one(skb, sk, state, dup_sack, pcount);
 
        /* Difference in this won't matter, both ACKed by the same cumul. ACK */
        TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
@@ -1436,7 +1447,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
                tp->lost_cnt_hint -= tcp_skb_pcount(prev);
        }
 
-       TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(prev)->flags;
+       TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags;
        if (skb == tcp_highest_sack(sk))
                tcp_advance_highest_sack(sk, skb);
 
@@ -1565,7 +1576,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
 
        if (!skb_shift(prev, skb, len))
                goto fallback;
-       if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss))
+       if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
                goto out;
 
        /* Hole filled allows collapsing with the next as well, this is very
@@ -1584,8 +1595,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
        len = skb->len;
        if (skb_shift(prev, skb, len)) {
                pcount += tcp_skb_pcount(skb);
-               tcp_shifted_skb(sk, prev, skb, state, tcp_skb_pcount(skb), len,
-                               mss);
+               tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0);
        }
 
 out:
@@ -1798,11 +1808,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
        for (i = used_sacks - 1; i > 0; i--) {
                for (j = 0; j < i; j++) {
                        if (after(sp[j].start_seq, sp[j + 1].start_seq)) {
-                               struct tcp_sack_block tmp;
-
-                               tmp = sp[j];
-                               sp[j] = sp[j + 1];
-                               sp[j + 1] = tmp;
+                               swap(sp[j], sp[j + 1]);
 
                                /* Track where the first SACK block goes to */
                                if (j == first_sack_index)
@@ -2307,14 +2313,14 @@ static inline int tcp_fackets_out(struct tcp_sock *tp)
  * they differ. Since neither occurs due to loss, TCP should really
  * ignore them.
  */
-static inline int tcp_dupack_heurestics(struct tcp_sock *tp)
+static inline int tcp_dupack_heuristics(struct tcp_sock *tp)
 {
        return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1;
 }
 
 static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
 {
-       return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto);
+       return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto;
 }
 
 static inline int tcp_head_timedout(struct sock *sk)
@@ -2432,7 +2438,7 @@ static int tcp_time_to_recover(struct sock *sk)
                return 1;
 
        /* Not-A-Trick#2 : Classic rule... */
-       if (tcp_dupack_heurestics(tp) > tp->reordering)
+       if (tcp_dupack_heuristics(tp) > tp->reordering)
                return 1;
 
        /* Trick#3 : when we use RFC2988 timer restart, fast
@@ -2454,13 +2460,61 @@ static int tcp_time_to_recover(struct sock *sk)
                return 1;
        }
 
+       /* If a thin stream is detected, retransmit after first
+        * received dupack. Employ only if SACK is supported in order
+        * to avoid possible corner-case series of spurious retransmissions
+        * Use only if there are no unsent data.
+        */
+       if ((tp->thin_dupack || sysctl_tcp_thin_dupack) &&
+           tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 &&
+           tcp_is_sack(tp) && !tcp_send_head(sk))
+               return 1;
+
        return 0;
 }
 
+/* New heuristics: it is possible only after we switched to restart timer
+ * each time when something is ACKed. Hence, we can detect timed out packets
+ * during fast retransmit without falling to slow start.
+ *
+ * Usefulness of this as is very questionable, since we should know which of
+ * the segments is the next to timeout which is relatively expensive to find
+ * in general case unless we add some data structure just for that. The
+ * current approach certainly won't find the right one too often and when it
+ * finally does find _something_ it usually marks large part of the window
+ * right away (because a retransmission with a larger timestamp blocks the
+ * loop from advancing). -ij
+ */
+static void tcp_timeout_skbs(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct sk_buff *skb;
+
+       if (!tcp_is_fack(tp) || !tcp_head_timedout(sk))
+               return;
+
+       skb = tp->scoreboard_skb_hint;
+       if (tp->scoreboard_skb_hint == NULL)
+               skb = tcp_write_queue_head(sk);
+
+       tcp_for_write_queue_from(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
+               if (!tcp_skb_timedout(sk, skb))
+                       break;
+
+               tcp_skb_mark_lost(tp, skb);
+       }
+
+       tp->scoreboard_skb_hint = skb;
+
+       tcp_verify_left_out(tp);
+}
+
 /* Mark head of queue up as lost. With RFC3517 SACK, the packets is
  * is against sacked "cnt", otherwise it's against facked "cnt"
  */
-static void tcp_mark_head_lost(struct sock *sk, int packets)
+static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
@@ -2472,6 +2526,9 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
        if (tp->lost_skb_hint) {
                skb = tp->lost_skb_hint;
                cnt = tp->lost_cnt_hint;
+               /* Head already handled? */
+               if (mark_head && skb != tcp_write_queue_head(sk))
+                       return;
        } else {
                skb = tcp_write_queue_head(sk);
                cnt = 0;
@@ -2494,7 +2551,8 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
                        cnt += tcp_skb_pcount(skb);
 
                if (cnt > packets) {
-                       if (tcp_is_sack(tp) || (oldcnt >= packets))
+                       if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) ||
+                           (oldcnt >= packets))
                                break;
 
                        mss = skb_shinfo(skb)->gso_size;
@@ -2505,6 +2563,9 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
                }
 
                tcp_skb_mark_lost(tp, skb);
+
+               if (mark_head)
+                       break;
        }
        tcp_verify_left_out(tp);
 }
@@ -2516,43 +2577,21 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
        struct tcp_sock *tp = tcp_sk(sk);
 
        if (tcp_is_reno(tp)) {
-               tcp_mark_head_lost(sk, 1);
+               tcp_mark_head_lost(sk, 1, 1);
        } else if (tcp_is_fack(tp)) {
                int lost = tp->fackets_out - tp->reordering;
                if (lost <= 0)
                        lost = 1;
-               tcp_mark_head_lost(sk, lost);
+               tcp_mark_head_lost(sk, lost, 0);
        } else {
                int sacked_upto = tp->sacked_out - tp->reordering;
-               if (sacked_upto < fast_rexmit)
-                       sacked_upto = fast_rexmit;
-               tcp_mark_head_lost(sk, sacked_upto);
+               if (sacked_upto >= 0)
+                       tcp_mark_head_lost(sk, sacked_upto, 0);
+               else if (fast_rexmit)
+                       tcp_mark_head_lost(sk, 1, 1);
        }
 
-       /* New heuristics: it is possible only after we switched
-        * to restart timer each time when something is ACKed.
-        * Hence, we can detect timed out packets during fast
-        * retransmit without falling to slow start.
-        */
-       if (tcp_is_fack(tp) && tcp_head_timedout(sk)) {
-               struct sk_buff *skb;
-
-               skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
-                       : tcp_write_queue_head(sk);
-
-               tcp_for_write_queue_from(skb, sk) {
-                       if (skb == tcp_send_head(sk))
-                               break;
-                       if (!tcp_skb_timedout(sk, skb))
-                               break;
-
-                       tcp_skb_mark_lost(tp, skb);
-               }
-
-               tp->scoreboard_skb_hint = skb;
-
-               tcp_verify_left_out(tp);
-       }
+       tcp_timeout_skbs(sk);
 }
 
 /* CWND moderation, preventing bursts due to too big ACKs
@@ -2615,7 +2654,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
        if (sk->sk_family == AF_INET) {
                printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
                       msg,
-                      &inet->daddr, ntohs(inet->dport),
+                      &inet->inet_daddr, ntohs(inet->inet_dport),
                       tp->snd_cwnd, tcp_left_out(tp),
                       tp->snd_ssthresh, tp->prior_ssthresh,
                       tp->packets_out);
@@ -2625,7 +2664,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
                struct ipv6_pinfo *np = inet6_sk(sk);
                printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
                       msg,
-                      &np->daddr, ntohs(inet->dport),
+                      &np->daddr, ntohs(inet->inet_dport),
                       tp->snd_cwnd, tcp_left_out(tp),
                       tp->snd_ssthresh, tp->prior_ssthresh,
                       tp->packets_out);
@@ -2636,7 +2675,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
 #define DBGUNDO(x...) do { } while (0)
 #endif
 
-static void tcp_undo_cwr(struct sock *sk, const int undo)
+static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
@@ -2648,14 +2687,13 @@ static void tcp_undo_cwr(struct sock *sk, const int undo)
                else
                        tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1);
 
-               if (undo && tp->prior_ssthresh > tp->snd_ssthresh) {
+               if (undo_ssthresh && tp->prior_ssthresh > tp->snd_ssthresh) {
                        tp->snd_ssthresh = tp->prior_ssthresh;
                        TCP_ECN_withdraw_cwr(tp);
                }
        } else {
                tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
        }
-       tcp_moderate_cwnd(tp);
        tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
@@ -2676,7 +2714,7 @@ static int tcp_try_undo_recovery(struct sock *sk)
                 * or our original transmission succeeded.
                 */
                DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
-               tcp_undo_cwr(sk, 1);
+               tcp_undo_cwr(sk, true);
                if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
                        mib_idx = LINUX_MIB_TCPLOSSUNDO;
                else
@@ -2703,12 +2741,41 @@ static void tcp_try_undo_dsack(struct sock *sk)
 
        if (tp->undo_marker && !tp->undo_retrans) {
                DBGUNDO(sk, "D-SACK");
-               tcp_undo_cwr(sk, 1);
+               tcp_undo_cwr(sk, true);
                tp->undo_marker = 0;
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
        }
 }
 
+/* We can clear retrans_stamp when there are no retransmissions in the
+ * window. It would seem that it is trivially available for us in
+ * tp->retrans_out, however, that kind of assumptions doesn't consider
+ * what will happen if errors occur when sending retransmission for the
+ * second time. ...It could the that such segment has only
+ * TCPCB_EVER_RETRANS set at the present time. It seems that checking
+ * the head skb is enough except for some reneging corner cases that
+ * are not worth the effort.
+ *
+ * Main reason for all this complexity is the fact that connection dying
+ * time now depends on the validity of the retrans_stamp, in particular,
+ * that successive retransmissions of a segment must not advance
+ * retrans_stamp under any conditions.
+ */
+static int tcp_any_retrans_done(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct sk_buff *skb;
+
+       if (tp->retrans_out)
+               return 1;
+
+       skb = tcp_write_queue_head(sk);
+       if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
+               return 1;
+
+       return 0;
+}
+
 /* Undo during fast recovery after partial ACK. */
 
 static int tcp_try_undo_partial(struct sock *sk, int acked)
@@ -2721,13 +2788,13 @@ static int tcp_try_undo_partial(struct sock *sk, int acked)
                /* Plain luck! Hole if filled with delayed
                 * packet, rather than with a retransmit.
                 */
-               if (tp->retrans_out == 0)
+               if (!tcp_any_retrans_done(sk))
                        tp->retrans_stamp = 0;
 
                tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
 
                DBGUNDO(sk, "Hoe");
-               tcp_undo_cwr(sk, 0);
+               tcp_undo_cwr(sk, false);
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
 
                /* So... Do not make Hoe's retransmit yet.
@@ -2756,7 +2823,7 @@ static int tcp_try_undo_loss(struct sock *sk)
 
                DBGUNDO(sk, "partial loss");
                tp->lost_out = 0;
-               tcp_undo_cwr(sk, 1);
+               tcp_undo_cwr(sk, true);
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
                inet_csk(sk)->icsk_retransmits = 0;
                tp->undo_marker = 0;
@@ -2770,8 +2837,15 @@ static int tcp_try_undo_loss(struct sock *sk)
 static inline void tcp_complete_cwr(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
-       tp->snd_cwnd_stamp = tcp_time_stamp;
+
+       /* Do not moderate cwnd if it's already undone in cwr or recovery. */
+       if (tp->undo_marker) {
+               if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR)
+                       tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
+               else /* PRR */
+                       tp->snd_cwnd = tp->snd_ssthresh;
+               tp->snd_cwnd_stamp = tcp_time_stamp;
+       }
        tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
 }
 
@@ -2780,7 +2854,7 @@ static void tcp_try_keep_open(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        int state = TCP_CA_Open;
 
-       if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker)
+       if (tcp_left_out(tp) || tcp_any_retrans_done(sk) || tp->undo_marker)
                state = TCP_CA_Disorder;
 
        if (inet_csk(sk)->icsk_ca_state != state) {
@@ -2795,7 +2869,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
 
        tcp_verify_left_out(tp);
 
-       if (!tp->frto_counter && tp->retrans_out == 0)
+       if (!tp->frto_counter && !tcp_any_retrans_done(sk))
                tp->retrans_stamp = 0;
 
        if (flag & FLAG_ECE)
@@ -2817,7 +2891,7 @@ static void tcp_mtup_probe_failed(struct sock *sk)
        icsk->icsk_mtup.probe_size = 0;
 }
 
-static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
+static void tcp_mtup_probe_success(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2829,7 +2903,7 @@ static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
                       icsk->icsk_mtup.probe_size;
        tp->snd_cwnd_cnt = 0;
        tp->snd_cwnd_stamp = tcp_time_stamp;
-       tp->rcv_ssthresh = tcp_current_ssthresh(sk);
+       tp->snd_ssthresh = tcp_current_ssthresh(sk);
 
        icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
        icsk->icsk_mtup.probe_size = 0;
@@ -2845,7 +2919,7 @@ void tcp_simple_retransmit(struct sock *sk)
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
-       unsigned int mss = tcp_current_mss(sk, 0);
+       unsigned int mss = tcp_current_mss(sk);
        u32 prior_lost = tp->lost_out;
 
        tcp_for_write_queue(skb, sk) {
@@ -2885,6 +2959,39 @@ void tcp_simple_retransmit(struct sock *sk)
        }
        tcp_xmit_retransmit_queue(sk);
 }
+EXPORT_SYMBOL(tcp_simple_retransmit);
+
+/* This function implements the PRR algorithm, specifcally the PRR-SSRB
+ * (proportional rate reduction with slow start reduction bound) as described in
+ * http://www.ietf.org/id/draft-mathis-tcpm-proportional-rate-reduction-01.txt.
+ * It computes the number of packets to send (sndcnt) based on packets newly
+ * delivered:
+ *   1) If the packets in flight is larger than ssthresh, PRR spreads the
+ *     cwnd reductions across a full RTT.
+ *   2) If packets in flight is lower than ssthresh (such as due to excess
+ *     losses and/or application stalls), do not perform any further cwnd
+ *     reductions, but instead slow start up to ssthresh.
+ */
+static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked,
+                                       int fast_rexmit, int flag)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       int sndcnt = 0;
+       int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
+
+       if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
+               u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
+                              tp->prior_cwnd - 1;
+               sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
+       } else {
+               sndcnt = min_t(int, delta,
+                              max_t(int, tp->prr_delivered - tp->prr_out,
+                                    newly_acked_sacked) + 1);
+       }
+
+       sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
+       tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
+}
 
 /* Process an event, which can update packets-in-flight not trivially.
  * Main goal of this function is to calculate new estimate for left_out,
@@ -2897,7 +3004,8 @@ void tcp_simple_retransmit(struct sock *sk)
  * It does _not_ decide what to send, it is made in function
  * tcp_xmit_retransmit_queue().
  */
-static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
+static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
+                                 int newly_acked_sacked, int flag)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
@@ -2925,7 +3033,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
            before(tp->snd_una, tp->high_seq) &&
            icsk->icsk_ca_state != TCP_CA_Open &&
            tp->fackets_out > tp->reordering) {
-               tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering);
+               tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering, 0);
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS);
        }
 
@@ -3047,16 +3155,28 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
 
                tp->bytes_acked = 0;
                tp->snd_cwnd_cnt = 0;
+               tp->prior_cwnd = tp->snd_cwnd;
+               tp->prr_delivered = 0;
+               tp->prr_out = 0;
                tcp_set_ca_state(sk, TCP_CA_Recovery);
                fast_rexmit = 1;
        }
 
        if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk)))
                tcp_update_scoreboard(sk, fast_rexmit);
-       tcp_cwnd_down(sk, flag);
+       tp->prr_delivered += newly_acked_sacked;
+       tcp_update_cwnd_in_recovery(sk, newly_acked_sacked, fast_rexmit, flag);
        tcp_xmit_retransmit_queue(sk);
 }
 
+void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt)
+{
+       tcp_rtt_estimator(sk, seq_rtt);
+       tcp_set_rto(sk);
+       inet_csk(sk)->icsk_backoff = 0;
+}
+EXPORT_SYMBOL(tcp_valid_rtt_meas);
+
 /* Read draft-ietf-tcplw-high-performance before mucking
  * with this code. (Supersedes RFC1323)
  */
@@ -3078,11 +3198,8 @@ static void tcp_ack_saw_tstamp(struct sock *sk, int flag)
         * in window is lost... Voila.                          --ANK (010210)
         */
        struct tcp_sock *tp = tcp_sk(sk);
-       const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
-       tcp_rtt_estimator(sk, seq_rtt);
-       tcp_set_rto(sk);
-       inet_csk(sk)->icsk_backoff = 0;
-       tcp_bound_rto(sk);
+
+       tcp_valid_rtt_meas(sk, tcp_time_stamp - tp->rx_opt.rcv_tsecr);
 }
 
 static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag)
@@ -3099,10 +3216,7 @@ static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag)
        if (flag & FLAG_RETRANS_DATA_ACKED)
                return;
 
-       tcp_rtt_estimator(sk, seq_rtt);
-       tcp_set_rto(sk);
-       inet_csk(sk)->icsk_backoff = 0;
-       tcp_bound_rto(sk);
+       tcp_valid_rtt_meas(sk, seq_rtt);
 }
 
 static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
@@ -3181,7 +3295,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
 
        while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
                struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
-               u32 end_seq;
                u32 acked_pcount;
                u8 sacked = scb->sacked;
 
@@ -3196,16 +3309,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                                break;
 
                        fully_acked = 0;
-                       end_seq = tp->snd_una;
                } else {
                        acked_pcount = tcp_skb_pcount(skb);
-                       end_seq = scb->end_seq;
-               }
-
-               /* MTU probing checks */
-               if (fully_acked && icsk->icsk_mtup.probe_size &&
-                   !after(tp->mtu_probe.probe_seq_end, scb->end_seq)) {
-                       tcp_mtup_probe_success(sk, skb);
                }
 
                if (sacked & TCPCB_RETRANS) {
@@ -3241,7 +3346,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                 * connection startup slow start one packet too
                 * quickly.  This is severely frowned upon behavior.
                 */
-               if (!(scb->flags & TCPCB_FLAG_SYN)) {
+               if (!(scb->tcp_flags & TCPHDR_SYN)) {
                        flag |= FLAG_DATA_ACKED;
                } else {
                        flag |= FLAG_SYN_ACKED;
@@ -3270,24 +3375,26 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                const struct tcp_congestion_ops *ca_ops
                        = inet_csk(sk)->icsk_ca_ops;
 
+               if (unlikely(icsk->icsk_mtup.probe_size &&
+                            !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
+                       tcp_mtup_probe_success(sk);
+               }
+
                tcp_ack_update_rtt(sk, flag, seq_rtt);
                tcp_rearm_rto(sk);
 
                if (tcp_is_reno(tp)) {
                        tcp_remove_reno_sacks(sk, pkts_acked);
                } else {
+                       int delta;
+
                        /* Non-retransmitted hole got filled? That's reordering */
                        if (reord < prior_fackets)
                                tcp_update_reordering(sk, tp->fackets_out - reord, 0);
 
-                       /* No need to care for underflows here because
-                        * the lost_skb_hint gets NULLed if we're past it
-                        * (or something non-trivial happened)
-                        */
-                       if (tcp_is_fack(tp))
-                               tp->lost_cnt_hint -= pkts_acked;
-                       else
-                               tp->lost_cnt_hint -= prior_sacked - tp->sacked_out;
+                       delta = tcp_is_fack(tp) ? pkts_acked :
+                                                 prior_sacked - tp->sacked_out;
+                       tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta);
                }
 
                tp->fackets_out -= min(pkts_acked, tp->fackets_out);
@@ -3303,7 +3410,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                                                 net_invalid_timestamp()))
                                        rtt_us = ktime_us_delta(ktime_get_real(),
                                                                last_ackt);
-                               else if (ca_seq_rtt > 0)
+                               else if (ca_seq_rtt >= 0)
                                        rtt_us = jiffies_to_usecs(ca_seq_rtt);
                        }
 
@@ -3359,8 +3466,8 @@ static void tcp_ack_probe(struct sock *sk)
 
 static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag)
 {
-       return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
-               inet_csk(sk)->icsk_ca_state != TCP_CA_Open);
+       return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
+               inet_csk(sk)->icsk_ca_state != TCP_CA_Open;
 }
 
 static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
@@ -3377,9 +3484,9 @@ static inline int tcp_may_update_window(const struct tcp_sock *tp,
                                        const u32 ack, const u32 ack_seq,
                                        const u32 nwin)
 {
-       return (after(ack, tp->snd_una) ||
+       return  after(ack, tp->snd_una) ||
                after(ack_seq, tp->snd_wl1) ||
-               (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd));
+               (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd);
 }
 
 /* Update our send window.
@@ -3399,7 +3506,7 @@ static int tcp_ack_update_window(struct sock *sk, struct sk_buff *skb, u32 ack,
 
        if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
                flag |= FLAG_WIN_UPDATE;
-               tcp_update_wl(tp, ack, ack_seq);
+               tcp_update_wl(tp, ack_seq);
 
                if (tp->snd_wnd != nwin) {
                        tp->snd_wnd = nwin;
@@ -3447,7 +3554,7 @@ static void tcp_undo_spur_to_response(struct sock *sk, int flag)
        if (flag & FLAG_ECE)
                tcp_ratehalving_spur_to_response(sk);
        else
-               tcp_undo_cwr(sk, 1);
+               tcp_undo_cwr(sk, true);
 }
 
 /* F-RTO spurious RTO detection algorithm (RFC4138)
@@ -3573,17 +3680,22 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
        u32 prior_in_flight;
        u32 prior_fackets;
        int prior_packets;
+       int prior_sacked = tp->sacked_out;
+       int newly_acked_sacked = 0;
        int frto_cwnd = 0;
 
-       /* If the ack is newer than sent or older than previous acks
+       /* If the ack is older than previous acks
         * then we can probably ignore it.
         */
-       if (after(ack, tp->snd_nxt))
-               goto uninteresting_ack;
-
        if (before(ack, prior_snd_una))
                goto old_ack;
 
+       /* If the ack includes data we haven't sent yet, discard
+        * this segment (RFC793 Section 3.9).
+        */
+       if (after(ack, tp->snd_nxt))
+               goto invalid_ack;
+
        if (after(ack, prior_snd_una))
                flag |= FLAG_SND_UNA_ADVANCED;
 
@@ -3604,7 +3716,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
                 * No more checks are required.
                 * Note, we use the fact that SND.UNA>=SND.WL2.
                 */
-               tcp_update_wl(tp, ack, ack_seq);
+               tcp_update_wl(tp, ack_seq);
                tp->snd_una = ack;
                flag |= FLAG_WIN_UPDATE;
 
@@ -3641,6 +3753,9 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
        /* See if we can take anything off of the retransmit queue. */
        flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
 
+       newly_acked_sacked = (prior_packets - prior_sacked) -
+                            (tp->packets_out - tp->sacked_out);
+
        if (tp->frto_counter)
                frto_cwnd = tcp_process_frto(sk, flag);
        /* Guarantee sacktag reordering detection against wrap-arounds */
@@ -3653,14 +3768,14 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
                    tcp_may_raise_cwnd(sk, flag))
                        tcp_cong_avoid(sk, ack, prior_in_flight);
                tcp_fastretrans_alert(sk, prior_packets - tp->packets_out,
-                                     flag);
+                                     newly_acked_sacked, flag);
        } else {
                if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
                        tcp_cong_avoid(sk, ack, prior_in_flight);
        }
 
        if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
-               dst_confirm(sk->sk_dst_cache);
+               dst_confirm(__sk_dst_get(sk));
 
        return 1;
 
@@ -3673,6 +3788,10 @@ no_queue:
                tcp_ack_probe(sk);
        return 1;
 
+invalid_ack:
+       SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
+       return -1;
+
 old_ack:
        if (TCP_SKB_CB(skb)->sacked) {
                tcp_sacktag_write_queue(sk, skb, prior_snd_una);
@@ -3680,8 +3799,7 @@ old_ack:
                        tcp_try_keep_open(sk);
        }
 
-uninteresting_ack:
-       SOCK_DEBUG(sk, "Ack %u out of %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
+       SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
        return 0;
 }
 
@@ -3690,7 +3808,7 @@ uninteresting_ack:
  * the fast version below fails.
  */
 void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
-                      int estab)
+                      u8 **hvpp, int estab)
 {
        unsigned char *ptr;
        struct tcphdr *th = tcp_hdr(skb);
@@ -3774,6 +3892,30 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                 */
                                break;
 #endif
+                       case TCPOPT_COOKIE:
+                               /* This option is variable length.
+                                */
+                               switch (opsize) {
+                               case TCPOLEN_COOKIE_BASE:
+                                       /* not yet implemented */
+                                       break;
+                               case TCPOLEN_COOKIE_PAIR:
+                                       /* not yet implemented */
+                                       break;
+                               case TCPOLEN_COOKIE_MIN+0:
+                               case TCPOLEN_COOKIE_MIN+2:
+                               case TCPOLEN_COOKIE_MIN+4:
+                               case TCPOLEN_COOKIE_MIN+6:
+                               case TCPOLEN_COOKIE_MAX:
+                                       /* 16-bit multiple */
+                                       opt_rx->cookie_plus = opsize;
+                                       *hvpp = ptr;
+                                       break;
+                               default:
+                                       /* ignore option */
+                                       break;
+                               }
+                               break;
                        }
 
                        ptr += opsize-2;
@@ -3781,6 +3923,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                }
        }
 }
+EXPORT_SYMBOL(tcp_parse_options);
 
 static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th)
 {
@@ -3802,17 +3945,20 @@ static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th)
  * If it is wrong it falls back on tcp_parse_options().
  */
 static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
-                                 struct tcp_sock *tp)
+                                 struct tcp_sock *tp, u8 **hvpp)
 {
-       if (th->doff == sizeof(struct tcphdr) >> 2) {
+       /* In the spirit of fast parsing, compare doff directly to constant
+        * values.  Because equality is used, short doff can be ignored here.
+        */
+       if (th->doff == (sizeof(*th) / 4)) {
                tp->rx_opt.saw_tstamp = 0;
                return 0;
        } else if (tp->rx_opt.tstamp_ok &&
-                  th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) {
+                  th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) {
                if (tcp_parse_aligned_timestamp(tp, th))
                        return 1;
        }
-       tcp_parse_options(skb, &tp->rx_opt, 1);
+       tcp_parse_options(skb, &tp->rx_opt, hvpp, 1);
        return 1;
 }
 
@@ -3844,13 +3990,14 @@ u8 *tcp_parse_md5sig_option(struct tcphdr *th)
                        if (opsize < 2 || opsize > length)
                                return NULL;
                        if (opcode == TCPOPT_MD5SIG)
-                               return ptr;
+                               return opsize == TCPOLEN_MD5SIG ? ptr : NULL;
                }
                ptr += opsize - 2;
                length -= opsize;
        }
        return NULL;
 }
+EXPORT_SYMBOL(tcp_parse_md5sig_option);
 #endif
 
 static inline void tcp_store_ts_recent(struct tcp_sock *tp)
@@ -3869,8 +4016,7 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
                 * Not only, also it occurs for expired timestamps.
                 */
 
-               if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 ||
-                  get_seconds() >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS)
+               if (tcp_paws_check(&tp->rx_opt, 0))
                        tcp_store_ts_recent(tp);
        }
 }
@@ -3922,9 +4068,9 @@ static inline int tcp_paws_discard(const struct sock *sk,
                                   const struct sk_buff *skb)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
-       return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW &&
-               get_seconds() < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS &&
-               !tcp_disordered_ack(sk, skb));
+
+       return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) &&
+              !tcp_disordered_ack(sk, skb);
 }
 
 /* Check segment sequence number for validity.
@@ -3962,6 +4108,8 @@ static void tcp_reset(struct sock *sk)
        default:
                sk->sk_err = ECONNRESET;
        }
+       /* This barrier is coupled with smp_rmb() in tcp_poll() */
+       smp_wmb();
 
        if (!sock_flag(sk, SOCK_DEAD))
                sk->sk_error_report(sk);
@@ -4082,7 +4230,6 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
                tp->rx_opt.dsack = 1;
                tp->duplicate_sack[0].start_seq = seq;
                tp->duplicate_sack[0].end_seq = end_seq;
-               tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + 1;
        }
 }
 
@@ -4137,8 +4284,6 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
                         * Decrease num_sacks.
                         */
                        tp->rx_opt.num_sacks--;
-                       tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks +
-                                              tp->rx_opt.dsack;
                        for (i = this_sack; i < tp->rx_opt.num_sacks; i++)
                                sp[i] = sp[i + 1];
                        continue;
@@ -4147,20 +4292,6 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
        }
 }
 
-static inline void tcp_sack_swap(struct tcp_sack_block *sack1,
-                                struct tcp_sack_block *sack2)
-{
-       __u32 tmp;
-
-       tmp = sack1->start_seq;
-       sack1->start_seq = sack2->start_seq;
-       sack2->start_seq = tmp;
-
-       tmp = sack1->end_seq;
-       sack1->end_seq = sack2->end_seq;
-       sack2->end_seq = tmp;
-}
-
 static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
 {
        struct tcp_sock *tp = tcp_sk(sk);
@@ -4175,7 +4306,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
                if (tcp_sack_extend(sp, seq, end_seq)) {
                        /* Rotate this_sack to the first one. */
                        for (; this_sack > 0; this_sack--, sp--)
-                               tcp_sack_swap(sp, sp - 1);
+                               swap(*sp, *(sp - 1));
                        if (cur_sacks > 1)
                                tcp_sack_maybe_coalesce(tp);
                        return;
@@ -4201,7 +4332,6 @@ new_sack:
        sp->start_seq = seq;
        sp->end_seq = end_seq;
        tp->rx_opt.num_sacks++;
-       tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
 }
 
 /* RCV.NXT advances, some SACKs should be eaten. */
@@ -4215,7 +4345,6 @@ static void tcp_sack_remove(struct tcp_sock *tp)
        /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
        if (skb_queue_empty(&tp->out_of_order_queue)) {
                tp->rx_opt.num_sacks = 0;
-               tp->rx_opt.eff_sacks = tp->rx_opt.dsack;
                return;
        }
 
@@ -4236,11 +4365,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
                this_sack++;
                sp++;
        }
-       if (num_sacks != tp->rx_opt.num_sacks) {
-               tp->rx_opt.num_sacks = num_sacks;
-               tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks +
-                                      tp->rx_opt.dsack;
-       }
+       tp->rx_opt.num_sacks = num_sacks;
 }
 
 /* This one checks to see if we can put data from the
@@ -4264,7 +4389,7 @@ static void tcp_ofo_queue(struct sock *sk)
                }
 
                if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
-                       SOCK_DEBUG(sk, "ofo packet was already received \n");
+                       SOCK_DEBUG(sk, "ofo packet was already received\n");
                        __skb_unlink(skb, &tp->out_of_order_queue);
                        __kfree_skb(skb);
                        continue;
@@ -4312,14 +4437,12 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
        if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
                goto drop;
 
+       skb_dst_drop(skb);
        __skb_pull(skb, th->doff * 4);
 
        TCP_ECN_accept_cwr(tp, skb);
 
-       if (tp->rx_opt.dsack) {
-               tp->rx_opt.dsack = 0;
-               tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks;
-       }
+       tp->rx_opt.dsack = 0;
 
        /*  Queue data for delivery to the user.
         *  Packets in sequence go to the receive queue.
@@ -4342,7 +4465,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
                        if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) {
                                tp->ucopy.len -= chunk;
                                tp->copied_seq += chunk;
-                               eaten = (chunk == skb->len && !th->fin);
+                               eaten = (chunk == skb->len);
                                tcp_rcv_space_adjust(sk);
                        }
                        local_bh_disable();
@@ -4438,15 +4561,13 @@ drop:
                /* Initial out of order segment, build 1 SACK. */
                if (tcp_is_sack(tp)) {
                        tp->rx_opt.num_sacks = 1;
-                       tp->rx_opt.dsack     = 0;
-                       tp->rx_opt.eff_sacks = 1;
                        tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
                        tp->selective_acks[0].end_seq =
                                                TCP_SKB_CB(skb)->end_seq;
                }
                __skb_queue_head(&tp->out_of_order_queue, skb);
        } else {
-               struct sk_buff *skb1 = tp->out_of_order_queue.prev;
+               struct sk_buff *skb1 = skb_peek_tail(&tp->out_of_order_queue);
                u32 seq = TCP_SKB_CB(skb)->seq;
                u32 end_seq = TCP_SKB_CB(skb)->end_seq;
 
@@ -4463,15 +4584,18 @@ drop:
                }
 
                /* Find place to insert this segment. */
-               do {
+               while (1) {
                        if (!after(TCP_SKB_CB(skb1)->seq, seq))
                                break;
-               } while ((skb1 = skb1->prev) !=
-                        (struct sk_buff *)&tp->out_of_order_queue);
+                       if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) {
+                               skb1 = NULL;
+                               break;
+                       }
+                       skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1);
+               }
 
                /* Do skb overlap to previous one? */
-               if (skb1 != (struct sk_buff *)&tp->out_of_order_queue &&
-                   before(seq, TCP_SKB_CB(skb1)->end_seq)) {
+               if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
                        if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
                                /* All the bits are present. Drop. */
                                __kfree_skb(skb);
@@ -4483,15 +4607,26 @@ drop:
                                tcp_dsack_set(sk, seq,
                                              TCP_SKB_CB(skb1)->end_seq);
                        } else {
-                               skb1 = skb1->prev;
+                               if (skb_queue_is_first(&tp->out_of_order_queue,
+                                                      skb1))
+                                       skb1 = NULL;
+                               else
+                                       skb1 = skb_queue_prev(
+                                               &tp->out_of_order_queue,
+                                               skb1);
                        }
                }
-               __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
+               if (!skb1)
+                       __skb_queue_head(&tp->out_of_order_queue, skb);
+               else
+                       __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
 
                /* And clean segments covered by new one as whole. */
-               while ((skb1 = skb->next) !=
-                      (struct sk_buff *)&tp->out_of_order_queue &&
-                      after(end_seq, TCP_SKB_CB(skb1)->seq)) {
+               while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) {
+                       skb1 = skb_queue_next(&tp->out_of_order_queue, skb);
+
+                       if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
+                               break;
                        if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
                                tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
                                                 end_seq);
@@ -4512,7 +4647,10 @@ add_sack:
 static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
                                        struct sk_buff_head *list)
 {
-       struct sk_buff *next = skb->next;
+       struct sk_buff *next = NULL;
+
+       if (!skb_queue_is_last(list, skb))
+               next = skb_queue_next(list, skb);
 
        __skb_unlink(skb, list);
        __kfree_skb(skb);
@@ -4523,6 +4661,9 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
 
 /* Collapse contiguous sequence of skbs head..tail with
  * sequence numbers start..end.
+ *
+ * If tail is NULL, this means until the end of the list.
+ *
  * Segments with FIN/SYN are not collapsed (only because this
  * simplifies code)
  */
@@ -4531,15 +4672,23 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
             struct sk_buff *head, struct sk_buff *tail,
             u32 start, u32 end)
 {
-       struct sk_buff *skb;
+       struct sk_buff *skb, *n;
+       bool end_of_skbs;
 
        /* First, check that queue is collapsible and find
         * the point where collapsing can be useful. */
-       for (skb = head; skb != tail;) {
+       skb = head;
+restart:
+       end_of_skbs = true;
+       skb_queue_walk_from_safe(list, skb, n) {
+               if (skb == tail)
+                       break;
                /* No new bits? It is possible on ofo queue. */
                if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
                        skb = tcp_collapse_one(sk, skb, list);
-                       continue;
+                       if (!skb)
+                               break;
+                       goto restart;
                }
 
                /* The first skb to collapse is:
@@ -4549,16 +4698,24 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                 */
                if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin &&
                    (tcp_win_from_space(skb->truesize) > skb->len ||
-                    before(TCP_SKB_CB(skb)->seq, start) ||
-                    (skb->next != tail &&
-                     TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb->next)->seq)))
+                    before(TCP_SKB_CB(skb)->seq, start))) {
+                       end_of_skbs = false;
                        break;
+               }
+
+               if (!skb_queue_is_last(list, skb)) {
+                       struct sk_buff *next = skb_queue_next(list, skb);
+                       if (next != tail &&
+                           TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) {
+                               end_of_skbs = false;
+                               break;
+                       }
+               }
 
                /* Decided to skip this, advance start seq. */
                start = TCP_SKB_CB(skb)->end_seq;
-               skb = skb->next;
        }
-       if (skb == tail || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin)
+       if (end_of_skbs || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin)
                return;
 
        while (before(start, end)) {
@@ -4603,7 +4760,8 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                        }
                        if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
                                skb = tcp_collapse_one(sk, skb, list);
-                               if (skb == tail ||
+                               if (!skb ||
+                                   skb == tail ||
                                    tcp_hdr(skb)->syn ||
                                    tcp_hdr(skb)->fin)
                                        return;
@@ -4630,17 +4788,21 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
        head = skb;
 
        for (;;) {
-               skb = skb->next;
+               struct sk_buff *next = NULL;
+
+               if (!skb_queue_is_last(&tp->out_of_order_queue, skb))
+                       next = skb_queue_next(&tp->out_of_order_queue, skb);
+               skb = next;
 
                /* Segment is terminated when we see gap or when
                 * we are at the end of all the queue. */
-               if (skb == (struct sk_buff *)&tp->out_of_order_queue ||
+               if (!skb ||
                    after(TCP_SKB_CB(skb)->seq, end) ||
                    before(TCP_SKB_CB(skb)->end_seq, start)) {
                        tcp_collapse(sk, &tp->out_of_order_queue,
                                     head, skb, start, end);
                        head = skb;
-                       if (skb == (struct sk_buff *)&tp->out_of_order_queue)
+                       if (!skb)
                                break;
                        /* Start new segment */
                        start = TCP_SKB_CB(skb)->seq;
@@ -4701,10 +4863,11 @@ static int tcp_prune_queue(struct sock *sk)
                tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
 
        tcp_collapse_ofo_queue(sk);
-       tcp_collapse(sk, &sk->sk_receive_queue,
-                    sk->sk_receive_queue.next,
-                    (struct sk_buff *)&sk->sk_receive_queue,
-                    tp->copied_seq, tp->rcv_nxt);
+       if (!skb_queue_empty(&sk->sk_receive_queue))
+               tcp_collapse(sk, &sk->sk_receive_queue,
+                            skb_peek(&sk->sk_receive_queue),
+                            NULL,
+                            tp->copied_seq, tp->rcv_nxt);
        sk_mem_reclaim(sk);
 
        if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
@@ -4766,7 +4929,7 @@ static int tcp_should_expand_sndbuf(struct sock *sk)
                return 0;
 
        /* If we are under soft global TCP memory pressure, do not expand.  */
-       if (atomic_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
+       if (atomic_long_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
                return 0;
 
        /* If we filled the congestion window, do not expand.  */
@@ -4824,11 +4987,11 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
        struct tcp_sock *tp = tcp_sk(sk);
 
            /* More than one full frame received... */
-       if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss
+       if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
             /* ... and right edge of window advances far enough.
              * (tcp_recvmsg() will send ACK otherwise). Or...
              */
-            && __tcp_select_window(sk) >= tp->rcv_wnd) ||
+            __tcp_select_window(sk) >= tp->rcv_wnd) ||
            /* We ACK each frame or... */
            tcp_in_quickack_mode(sk) ||
            /* We have out of order data. */
@@ -5009,7 +5172,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
                return 0;
 
        if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-               tp->ucopy.dma_chan = get_softnet_dma();
+               tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
 
        if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
 
@@ -5049,10 +5212,12 @@ out:
 static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
                              struct tcphdr *th, int syn_inerr)
 {
+       u8 *hash_location;
        struct tcp_sock *tp = tcp_sk(sk);
 
        /* RFC1323: H1. Apply PAWS check first. */
-       if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
+       if (tcp_fast_parse_options(skb, th, tp, &hash_location) &&
+           tp->rx_opt.saw_tstamp &&
            tcp_paws_discard(sk, skb)) {
                if (!th->rst) {
                        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
@@ -5160,7 +5325,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
         */
 
        if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags &&
-           TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
+           TCP_SKB_CB(skb)->seq == tp->rcv_nxt &&
+           !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) {
                int tcp_header_len = tp->tcp_header_len;
 
                /* Timestamp header prediction: tcp_header_len
@@ -5313,8 +5479,8 @@ slow_path:
                return -res;
 
 step5:
-       if (th->ack)
-               tcp_ack(sk, skb, FLAG_SLOWPATH);
+       if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
+               goto discard;
 
        tcp_rcv_rtt_measure_ts(sk, skb);
 
@@ -5335,15 +5501,18 @@ discard:
        __kfree_skb(skb);
        return 0;
 }
+EXPORT_SYMBOL(tcp_rcv_established);
 
 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                                         struct tcphdr *th, unsigned len)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
+       u8 *hash_location;
        struct inet_connection_sock *icsk = inet_csk(sk);
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct tcp_cookie_values *cvp = tp->cookie_values;
        int saved_clamp = tp->rx_opt.mss_clamp;
 
-       tcp_parse_options(skb, &tp->rx_opt, 0);
+       tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0);
 
        if (th->ack) {
                /* rfc793:
@@ -5412,7 +5581,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                 * never scaled.
                 */
                tp->snd_wnd = ntohs(th->window);
-               tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(skb)->seq);
+               tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
 
                if (!tp->rx_opt.wscale_ok) {
                        tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
@@ -5440,6 +5609,31 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                 * Change state from SYN-SENT only after copied_seq
                 * is initialized. */
                tp->copied_seq = tp->rcv_nxt;
+
+               if (cvp != NULL &&
+                   cvp->cookie_pair_size > 0 &&
+                   tp->rx_opt.cookie_plus > 0) {
+                       int cookie_size = tp->rx_opt.cookie_plus
+                                       - TCPOLEN_COOKIE_BASE;
+                       int cookie_pair_size = cookie_size
+                                            + cvp->cookie_desired;
+
+                       /* A cookie extension option was sent and returned.
+                        * Note that each incoming SYNACK replaces the
+                        * Responder cookie.  The initial exchange is most
+                        * fragile, as protection against spoofing relies
+                        * entirely upon the sequence and timestamp (above).
+                        * This replacement strategy allows the correct pair to
+                        * pass through, while any others will be filtered via
+                        * Responder verification later.
+                        */
+                       if (sizeof(cvp->cookie_pair) >= cookie_pair_size) {
+                               memcpy(&cvp->cookie_pair[cvp->cookie_desired],
+                                      hash_location, cookie_size);
+                               cvp->cookie_pair_size = cookie_pair_size;
+                       }
+               }
+
                smp_mb();
                tcp_set_state(sk, TCP_ESTABLISHED);
 
@@ -5513,7 +5707,7 @@ discard:
 
        /* PAWS check. */
        if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp &&
-           tcp_paws_check(&tp->rx_opt, 0))
+           tcp_paws_reject(&tp->rx_opt, 0))
                goto discard_and_undo;
 
        if (th->syn) {
@@ -5651,7 +5845,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 
        /* step 5: check the ACK field */
        if (th->ack) {
-               int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH);
+               int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
 
                switch (sk->sk_state) {
                case TCP_SYN_RECV:
@@ -5673,16 +5867,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                                tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
                                tp->snd_wnd = ntohs(th->window) <<
                                              tp->rx_opt.snd_wscale;
-                               tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq,
-                                           TCP_SKB_CB(skb)->seq);
-
-                               /* tcp_ack considers this ACK as duplicate
-                                * and does not calculate rtt.
-                                * Fix it at least with timestamps.
-                                */
-                               if (tp->rx_opt.saw_tstamp &&
-                                   tp->rx_opt.rcv_tsecr && !tp->srtt)
-                                       tcp_ack_saw_tstamp(sk, 0);
+                               tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
 
                                if (tp->rx_opt.tstamp_ok)
                                        tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
@@ -5714,7 +5899,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                        if (tp->snd_una == tp->write_seq) {
                                tcp_set_state(sk, TCP_FIN_WAIT2);
                                sk->sk_shutdown |= SEND_SHUTDOWN;
-                               dst_confirm(sk->sk_dst_cache);
+                               dst_confirm(__sk_dst_get(sk));
 
                                if (!sock_flag(sk, SOCK_DEAD))
                                        /* Wake up lingering close() */
@@ -5810,14 +5995,4 @@ discard:
        }
        return 0;
 }
-
-EXPORT_SYMBOL(sysctl_tcp_ecn);
-EXPORT_SYMBOL(sysctl_tcp_reordering);
-EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
-EXPORT_SYMBOL(tcp_parse_options);
-#ifdef CONFIG_TCP_MD5SIG
-EXPORT_SYMBOL(tcp_parse_md5sig_option);
-#endif
-EXPORT_SYMBOL(tcp_rcv_established);
 EXPORT_SYMBOL(tcp_rcv_state_process);
-EXPORT_SYMBOL(tcp_initialize_rcv_mss);