[TCP]: Move to new TSO segmenting scheme.
[linux-2.6.git] / net / ipv4 / tcp_input.c
index 1dba7fd..8de2f10 100644 (file)
@@ -740,10 +740,10 @@ __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
        __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
 
        if (!cwnd) {
-               if (tp->mss_cache_std > 1460)
+               if (tp->mss_cache > 1460)
                        cwnd = 2;
                else
-                       cwnd = (tp->mss_cache_std > 1095) ? 3 : 4;
+                       cwnd = (tp->mss_cache > 1095) ? 3 : 4;
        }
        return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
 }
@@ -914,7 +914,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
        if (sk->sk_route_caps & NETIF_F_TSO) {
                sk->sk_route_caps &= ~NETIF_F_TSO;
                sock_set_flag(sk, SOCK_NO_LARGESEND);
-               tp->mss_cache = tp->mss_cache_std;
+               tp->mss_cache = tp->mss_cache;
        }
 
        if (!tp->sacked_out)
@@ -1077,7 +1077,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
                            (IsFack(tp) ||
                             !before(lost_retrans,
                                     TCP_SKB_CB(skb)->ack_seq + tp->reordering *
-                                    tp->mss_cache_std))) {
+                                    tp->mss_cache))) {
                                TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
                                tp->retrans_out -= tcp_skb_pcount(skb);
 
@@ -2038,7 +2038,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
                 * the other end.
                 */
                if (after(scb->end_seq, tp->snd_una)) {
-                       if (tcp_skb_pcount(skb) > 1)
+                       if (tcp_skb_pcount(skb) > 1 &&
+                           after(tp->snd_una, scb->seq))
                                acked |= tcp_tso_acked(sk, skb,
                                                       now, &seq_rtt);
                        break;
@@ -3299,6 +3300,28 @@ void tcp_cwnd_application_limited(struct sock *sk)
        tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
+static inline int tcp_should_expand_sndbuf(struct sock *sk, struct tcp_sock *tp)
+{
+       /* If the user specified a specific send buffer setting, do
+        * not modify it.
+        */
+       if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
+               return 0;
+
+       /* If we are under global TCP memory pressure, do not expand.  */
+       if (tcp_memory_pressure)
+               return 0;
+
+       /* If we are under soft global TCP memory pressure, do not expand.  */
+       if (atomic_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
+               return 0;
+
+       /* If we filled the congestion window, do not expand.  */
+       if (tp->packets_out >= tp->snd_cwnd)
+               return 0;
+
+       return 1;
+}
 
 /* When incoming ACK allowed to free some skb from write_queue,
  * we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket
@@ -3310,11 +3333,8 @@ static void tcp_new_space(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (tp->packets_out < tp->snd_cwnd &&
-           !(sk->sk_userlocks & SOCK_SNDBUF_LOCK) &&
-           !tcp_memory_pressure &&
-           atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
-               int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache_std) +
+       if (tcp_should_expand_sndbuf(sk, tp)) {
+               int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
                        MAX_TCP_HEADER + 16 + sizeof(struct sk_buff),
                    demanded = max_t(unsigned int, tp->snd_cwnd,
                                                   tp->reordering + 1);