diff options
| author | Ben Hutchings <bhutchings@solarflare.com> | 2012-07-30 16:11:42 +0000 | 
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2012-08-02 00:19:17 -0700 | 
| commit | 1485348d2424e1131ea42efc033cbd9366462b01 (patch) | |
| tree | 706d69b398cbd109fd3a9806fa485150d043cc52 /net/ipv4/tcp_output.c | |
| parent | 7e6d06f0de3f74ca929441add094518ae332257c (diff) | |
| download | olio-linux-3.10-1485348d2424e1131ea42efc033cbd9366462b01.tar.xz olio-linux-3.10-1485348d2424e1131ea42efc033cbd9366462b01.zip  | |
tcp: Apply device TSO segment limit earlier
Cache the device gso_max_segs in sock::sk_gso_max_segs and use it to
limit the size of TSO skbs.  This avoids the need to fall back to
software GSO for local TCP senders.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
| -rw-r--r-- | net/ipv4/tcp_output.c | 21 | 
1 files changed, 12 insertions, 9 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3f1bcff0b10..a7b3ec9b6c3 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1522,21 +1522,21 @@ static void tcp_cwnd_validate(struct sock *sk)   * when we would be allowed to send the split-due-to-Nagle skb fully.   */  static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, -					unsigned int mss_now, unsigned int cwnd) +					unsigned int mss_now, unsigned int max_segs)  {  	const struct tcp_sock *tp = tcp_sk(sk); -	u32 needed, window, cwnd_len; +	u32 needed, window, max_len;  	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; -	cwnd_len = mss_now * cwnd; +	max_len = mss_now * max_segs; -	if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) -		return cwnd_len; +	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) +		return max_len;  	needed = min(skb->len, window); -	if (cwnd_len <= needed) -		return cwnd_len; +	if (max_len <= needed) +		return max_len;  	return needed - needed % mss_now;  } @@ -1765,7 +1765,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)  	limit = min(send_win, cong_win);  	/* If a full-sized TSO skb can be sent, do it. */ -	if (limit >= sk->sk_gso_max_size) +	if (limit >= min_t(unsigned int, sk->sk_gso_max_size, +			   sk->sk_gso_max_segs * tp->mss_cache))  		goto send_now;  	/* Middle in queue won't get any more data, full sendable already? */ @@ -1999,7 +2000,9 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,  		limit = mss_now;  		if (tso_segs > 1 && !tcp_urg_mode(tp))  			limit = tcp_mss_split_point(sk, skb, mss_now, -						    cwnd_quota); +						    min_t(unsigned int, +							  cwnd_quota, +							  sk->sk_gso_max_segs));  		if (skb->len > limit &&  		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))  |