diff options
Diffstat (limited to 'net/ipv4/tcp_output.c')
| -rw-r--r-- | net/ipv4/tcp_output.c | 22 | 
1 files changed, 10 insertions, 12 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 80147ba4414..59505ce0c8f 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -722,8 +722,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)  static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,  				 unsigned int mss_now)  { -	if (skb->len <= mss_now || !sk_can_gso(sk) || -	    tcp_urg_mode(tcp_sk(sk))) { +	if (skb->len <= mss_now || !sk_can_gso(sk)) {  		/* Avoid the costly divide in the normal  		 * non-TSO case.  		 */ @@ -1029,10 +1028,6 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)  /* Compute the current effective MSS, taking SACKs and IP options,   * and even PMTU discovery events into account. - * - * LARGESEND note: !tcp_urg_mode is overkill, only frames up to snd_up - * cannot be large. However, taking into account rare use of URG, this - * is not a big flaw.   */  unsigned int tcp_current_mss(struct sock *sk, int large_allowed)  { @@ -1047,7 +1042,7 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)  	mss_now = tp->mss_cache; -	if (large_allowed && sk_can_gso(sk) && !tcp_urg_mode(tp)) +	if (large_allowed && sk_can_gso(sk))  		doing_tso = 1;  	if (dst) { @@ -1164,9 +1159,7 @@ static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb,  {  	int tso_segs = tcp_skb_pcount(skb); -	if (!tso_segs || -	    (tso_segs > 1 && (tcp_skb_mss(skb) != mss_now || -			      tcp_urg_mode(tcp_sk(sk))))) { +	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {  		tcp_set_skb_tso_segs(sk, skb, mss_now);  		tso_segs = tcp_skb_pcount(skb);  	} @@ -1519,6 +1512,10 @@ static int tcp_mtu_probe(struct sock *sk)   * send_head.  This happens as incoming acks open up the remote   * window for us.   * + * LARGESEND note: !tcp_urg_mode is overkill, only frames between + * snd_up-64k-mss .. snd_up cannot be large. However, taking into + * account rare use of URG, this is not a big flaw. + *   * Returns 1, if no segments are in flight and we have queued segments, but   * cannot send anything now because of SWS or another problem.   */ @@ -1563,7 +1560,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)  		}  		limit = mss_now; -		if (tso_segs > 1) +		if (tso_segs > 1 && !tcp_urg_mode(tp))  			limit = tcp_mss_split_point(sk, skb, mss_now,  						    cwnd_quota); @@ -1620,6 +1617,7 @@ void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,   */  void tcp_push_one(struct sock *sk, unsigned int mss_now)  { +	struct tcp_sock *tp = tcp_sk(sk);  	struct sk_buff *skb = tcp_send_head(sk);  	unsigned int tso_segs, cwnd_quota; @@ -1634,7 +1632,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)  		BUG_ON(!tso_segs);  		limit = mss_now; -		if (tso_segs > 1) +		if (tso_segs > 1 && !tcp_urg_mode(tp))  			limit = tcp_mss_split_point(sk, skb, mss_now,  						    cwnd_quota);  |