diff options
| author | Eric Dumazet <edumazet@google.com> | 2012-06-20 05:02:19 +0000 | 
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2012-06-22 21:47:33 -0700 | 
| commit | 7586eceb0abc0ea1c2b023e3e5d4dfd4ff40930a (patch) | |
| tree | 79fc35a3afa23896ab3e6e00b4d9d1178bfee1df /net/ipv4/tcp_ipv4.c | |
| parent | 24ea818e305b92ad1fadcca015ae3b0c1222c497 (diff) | |
| download | olio-linux-3.10-7586eceb0abc0ea1c2b023e3e5d4dfd4ff40930a.tar.xz olio-linux-3.10-7586eceb0abc0ea1c2b023e3e5d4dfd4ff40930a.zip  | |
ipv4: tcp: dont cache output dst for syncookies
Don't cache output dst for syncookies, as this adds pressure on IP route
cache and rcu subsystem for no gain.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Hans Schillstrom <hans.schillstrom@ericsson.com>
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
| -rw-r--r-- | net/ipv4/tcp_ipv4.c | 12 | 
1 files changed, 7 insertions, 5 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 21e22a00481..b52934f5334 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -825,7 +825,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,  static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,  			      struct request_sock *req,  			      struct request_values *rvp, -			      u16 queue_mapping) +			      u16 queue_mapping, +			      bool nocache)  {  	const struct inet_request_sock *ireq = inet_rsk(req);  	struct flowi4 fl4; @@ -833,7 +834,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,  	struct sk_buff * skb;  	/* First, grab a route. */ -	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) +	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req, nocache)) == NULL)  		return -1;  	skb = tcp_make_synack(sk, dst, req, rvp); @@ -855,7 +856,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,  			      struct request_values *rvp)  {  	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); -	return tcp_v4_send_synack(sk, NULL, req, rvp, 0); +	return tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);  }  /* @@ -1388,7 +1389,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)  		 */  		if (tmp_opt.saw_tstamp &&  		    tcp_death_row.sysctl_tw_recycle && -		    (dst = inet_csk_route_req(sk, &fl4, req)) != NULL && +		    (dst = inet_csk_route_req(sk, &fl4, req, want_cookie)) != NULL &&  		    fl4.daddr == saddr &&  		    (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) {  			inet_peer_refcheck(peer); @@ -1424,7 +1425,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)  	if (tcp_v4_send_synack(sk, dst, req,  			       (struct request_values *)&tmp_ext, -			       skb_get_queue_mapping(skb)) || +			       skb_get_queue_mapping(skb), +			       want_cookie) ||  	    want_cookie)  		goto drop_and_free;  |