diff options
| author | Stephen Hemminger <shemminger@linux-foundation.org> | 2007-07-16 18:35:52 -0700 | 
|---|---|---|
| committer | David S. Miller <davem@sunset.davemloft.net> | 2007-07-18 01:46:58 -0700 | 
| commit | 16751347a060a10c09b11593bb179fd5b0240c04 (patch) | |
| tree | 40399ba217d9e4c45060c4d12334c53f931ca635 /net | |
| parent | 44beac008631d1b8a52f103e04eacba2bda81511 (diff) | |
| download | olio-linux-3.10-16751347a060a10c09b11593bb179fd5b0240c04.tar.xz olio-linux-3.10-16751347a060a10c09b11593bb179fd5b0240c04.zip  | |
[TCP]: remove unused argument to cong_avoid op
None of the existing TCP congestion controls use the rtt value pased
in the ca_ops->cong_avoid interface.  Which is lucky because seq_rtt
could have been -1 when handling a duplicate ack.
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
| -rw-r--r-- | net/ipv4/tcp_bic.c | 2 | ||||
| -rw-r--r-- | net/ipv4/tcp_cong.c | 3 | ||||
| -rw-r--r-- | net/ipv4/tcp_cubic.c | 2 | ||||
| -rw-r--r-- | net/ipv4/tcp_highspeed.c | 2 | ||||
| -rw-r--r-- | net/ipv4/tcp_htcp.c | 2 | ||||
| -rw-r--r-- | net/ipv4/tcp_hybla.c | 4 | ||||
| -rw-r--r-- | net/ipv4/tcp_illinois.c | 2 | ||||
| -rw-r--r-- | net/ipv4/tcp_input.c | 8 | ||||
| -rw-r--r-- | net/ipv4/tcp_lp.c | 5 | ||||
| -rw-r--r-- | net/ipv4/tcp_scalable.c | 2 | ||||
| -rw-r--r-- | net/ipv4/tcp_vegas.c | 6 | ||||
| -rw-r--r-- | net/ipv4/tcp_veno.c | 6 | ||||
| -rw-r--r-- | net/ipv4/tcp_yeah.c | 2 | 
13 files changed, 22 insertions, 24 deletions
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c index dd9ef65ad3f..519de091a94 100644 --- a/net/ipv4/tcp_bic.c +++ b/net/ipv4/tcp_bic.c @@ -137,7 +137,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)  }  static void bictcp_cong_avoid(struct sock *sk, u32 ack, -			      u32 seq_rtt, u32 in_flight, int data_acked) +			      u32 in_flight, int data_acked)  {  	struct tcp_sock *tp = tcp_sk(sk);  	struct bictcp *ca = inet_csk_ca(sk); diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 1260e52ad77..55fca1820c3 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -324,8 +324,7 @@ EXPORT_SYMBOL_GPL(tcp_slow_start);  /* This is Jacobson's slow start and congestion avoidance.   * SIGCOMM '88, p. 328.   */ -void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight, -			 int flag) +void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag)  {  	struct tcp_sock *tp = tcp_sk(sk); diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index ebfaac2f9f4..d17da30d82d 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -270,7 +270,7 @@ static inline void measure_delay(struct sock *sk)  }  static void bictcp_cong_avoid(struct sock *sk, u32 ack, -			      u32 seq_rtt, u32 in_flight, int data_acked) +			      u32 in_flight, int data_acked)  {  	struct tcp_sock *tp = tcp_sk(sk);  	struct bictcp *ca = inet_csk_ca(sk); diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c index 43d624e5043..14a073d8b60 100644 --- a/net/ipv4/tcp_highspeed.c +++ b/net/ipv4/tcp_highspeed.c @@ -109,7 +109,7 @@ static void hstcp_init(struct sock *sk)  	tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);  } -static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt, +static void hstcp_cong_avoid(struct sock *sk, u32 adk,  			     u32 in_flight, int data_acked)  {  	struct tcp_sock *tp = tcp_sk(sk); diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c index 4ba4a7ae0a8..632c05a7588 100644 --- a/net/ipv4/tcp_htcp.c +++ b/net/ipv4/tcp_htcp.c @@ -225,7 +225,7 @@ static u32 htcp_recalc_ssthresh(struct sock *sk)  	return max((tp->snd_cwnd * ca->beta) >> 7, 2U);  } -static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, +static void htcp_cong_avoid(struct sock *sk, u32 ack, s32 rtt,  			    u32 in_flight, int data_acked)  {  	struct tcp_sock *tp = tcp_sk(sk); diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c index e5be3511722..b3e55cf5617 100644 --- a/net/ipv4/tcp_hybla.c +++ b/net/ipv4/tcp_hybla.c @@ -85,7 +85,7 @@ static inline u32 hybla_fraction(u32 odds)   *     o Give cwnd a new value based on the model proposed   *     o remember increments <1   */ -static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 rtt, +static void hybla_cong_avoid(struct sock *sk, u32 ack,  			    u32 in_flight, int flag)  {  	struct tcp_sock *tp = tcp_sk(sk); @@ -103,7 +103,7 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 rtt,  		return;  	if (!ca->hybla_en) -		return tcp_reno_cong_avoid(sk, ack, rtt, in_flight, flag); +		return tcp_reno_cong_avoid(sk, ack, in_flight, flag);  	if (ca->rho == 0)  		hybla_recalc_param(sk); diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c index b2b2256d3b8..cc5de6f69d4 100644 --- a/net/ipv4/tcp_illinois.c +++ b/net/ipv4/tcp_illinois.c @@ -258,7 +258,7 @@ static void tcp_illinois_state(struct sock *sk, u8 new_state)  /*   * Increase window in response to successful acknowledgment.   */ -static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 rtt, +static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack,  				    u32 in_flight, int flag)  {  	struct tcp_sock *tp = tcp_sk(sk); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 4e5884ac8f2..fec8a7a4dba 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2323,11 +2323,11 @@ static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,  		tcp_ack_no_tstamp(sk, seq_rtt, flag);  } -static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, +static void tcp_cong_avoid(struct sock *sk, u32 ack,  			   u32 in_flight, int good)  {  	const struct inet_connection_sock *icsk = inet_csk(sk); -	icsk->icsk_ca_ops->cong_avoid(sk, ack, rtt, in_flight, good); +	icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight, good);  	tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;  } @@ -2826,11 +2826,11 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)  		/* Advance CWND, if state allows this. */  		if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&  		    tcp_may_raise_cwnd(sk, flag)) -			tcp_cong_avoid(sk, ack,  seq_rtt, prior_in_flight, 0); +			tcp_cong_avoid(sk, ack, prior_in_flight, 0);  		tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);  	} else {  		if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) -			tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 1); +			tcp_cong_avoid(sk, ack, prior_in_flight, 1);  	}  	if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP)) diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c index e49836ce012..80e140e3ec2 100644 --- a/net/ipv4/tcp_lp.c +++ b/net/ipv4/tcp_lp.c @@ -115,13 +115,12 @@ static void tcp_lp_init(struct sock *sk)   * Will only call newReno CA when away from inference.   * From TCP-LP's paper, this will be handled in additive increasement.   */ -static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight, -			      int flag) +static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag)  {  	struct lp *lp = inet_csk_ca(sk);  	if (!(lp->flag & LP_WITHIN_INF)) -		tcp_reno_cong_avoid(sk, ack, rtt, in_flight, flag); +		tcp_reno_cong_avoid(sk, ack, in_flight, flag);  }  /** diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c index 4624501e968..be27a33a1c6 100644 --- a/net/ipv4/tcp_scalable.c +++ b/net/ipv4/tcp_scalable.c @@ -15,7 +15,7 @@  #define TCP_SCALABLE_AI_CNT	50U  #define TCP_SCALABLE_MD_SCALE	3 -static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 rtt, +static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack,  				    u32 in_flight, int flag)  {  	struct tcp_sock *tp = tcp_sk(sk); diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index e218a51cece..914e0307f7a 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c @@ -163,13 +163,13 @@ void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event)  EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event);  static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, -				 u32 seq_rtt, u32 in_flight, int flag) +				 u32 in_flight, int flag)  {  	struct tcp_sock *tp = tcp_sk(sk);  	struct vegas *vegas = inet_csk_ca(sk);  	if (!vegas->doing_vegas_now) -		return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag); +		return tcp_reno_cong_avoid(sk, ack, in_flight, flag);  	/* The key players are v_beg_snd_una and v_beg_snd_nxt.  	 * @@ -228,7 +228,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,  			/* We don't have enough RTT samples to do the Vegas  			 * calculation, so we'll behave like Reno.  			 */ -			tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag); +			tcp_reno_cong_avoid(sk, ack, in_flight, flag);  		} else {  			u32 rtt, target_cwnd, diff; diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c index ec854cc5fad..7a55ddf8603 100644 --- a/net/ipv4/tcp_veno.c +++ b/net/ipv4/tcp_veno.c @@ -115,13 +115,13 @@ static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event)  }  static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, -				u32 seq_rtt, u32 in_flight, int flag) +				u32 in_flight, int flag)  {  	struct tcp_sock *tp = tcp_sk(sk);  	struct veno *veno = inet_csk_ca(sk);  	if (!veno->doing_veno_now) -		return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag); +		return tcp_reno_cong_avoid(sk, ack, in_flight, flag);  	/* limited by applications */  	if (!tcp_is_cwnd_limited(sk, in_flight)) @@ -132,7 +132,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack,  		/* We don't have enough rtt samples to do the Veno  		 * calculation, so we'll behave like Reno.  		 */ -		tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag); +		tcp_reno_cong_avoid(sk, ack, in_flight, flag);  	} else {  		u32 rtt, target_cwnd; diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c index 545ed237ab5..c04b7c6ec70 100644 --- a/net/ipv4/tcp_yeah.c +++ b/net/ipv4/tcp_yeah.c @@ -70,7 +70,7 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, ktime_t last)  }  static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, -				u32 seq_rtt, u32 in_flight, int flag) +				u32 in_flight, int flag)  {  	struct tcp_sock *tp = tcp_sk(sk);  	struct yeah *yeah = inet_csk_ca(sk);  |