diff options
Diffstat (limited to 'net/ipv4/tcp.c')
| -rw-r--r-- | net/ipv4/tcp.c | 112 | 
1 files changed, 91 insertions, 21 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 1d723de1868..eec8cf7c024 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -5,8 +5,6 @@   *   *		Implementation of the Transmission Control Protocol(TCP).   * - * Version:	$Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $ - *   * Authors:	Ross Biro   *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>   *		Mark Evans, <evansmp@uhura.aston.ac.uk> @@ -318,10 +316,10 @@ int tcp_memory_pressure __read_mostly;  EXPORT_SYMBOL(tcp_memory_pressure); -void tcp_enter_memory_pressure(void) +void tcp_enter_memory_pressure(struct sock *sk)  {  	if (!tcp_memory_pressure) { -		NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES); +		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);  		tcp_memory_pressure = 1;  	}  } @@ -346,8 +344,8 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)  		return inet_csk_listen_poll(sk);  	/* Socket is not locked. We are protected from async events -	   by poll logic and correct handling of state changes -	   made by another threads is impossible in any case. +	 * by poll logic and correct handling of state changes +	 * made by other threads is impossible in any case.  	 */  	mask = 0; @@ -373,10 +371,10 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)  	 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP  	 * if and only if shutdown has been made in both directions.  	 * Actually, it is interesting to look how Solaris and DUX -	 * solve this dilemma. I would prefer, if PULLHUP were maskable, +	 * solve this dilemma. I would prefer, if POLLHUP were maskable,  	 * then we could set it on SND_SHUTDOWN. BTW examples given  	 * in Stevens' books assume exactly this behaviour, it explains -	 * why PULLHUP is incompatible with POLLOUT.	--ANK +	 * why POLLHUP is incompatible with POLLOUT.	--ANK  	 *  	 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent  	 * blocking on fresh not-connected or disconnected socket. --ANK @@ -651,7 +649,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)  		}  		__kfree_skb(skb);  	} else { -		sk->sk_prot->enter_memory_pressure(); +		sk->sk_prot->enter_memory_pressure(sk);  		sk_stream_moderate_sndbuf(sk);  	}  	return NULL; @@ -1155,7 +1153,7 @@ static void tcp_prequeue_process(struct sock *sk)  	struct sk_buff *skb;  	struct tcp_sock *tp = tcp_sk(sk); -	NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED); +	NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED);  	/* RX process wants to run with disabled BHs, though it is not  	 * necessary */ @@ -1477,7 +1475,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,  			/* __ Restore normal policy in scheduler __ */  			if ((chunk = len - tp->ucopy.len) != 0) { -				NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); +				NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);  				len -= chunk;  				copied += chunk;  			} @@ -1488,7 +1486,7 @@ do_prequeue:  				tcp_prequeue_process(sk);  				if ((chunk = len - tp->ucopy.len) != 0) { -					NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); +					NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);  					len -= chunk;  					copied += chunk;  				} @@ -1603,7 +1601,7 @@ skip_copy:  			tcp_prequeue_process(sk);  			if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { -				NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); +				NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);  				len -= chunk;  				copied += chunk;  			} @@ -1670,12 +1668,12 @@ void tcp_set_state(struct sock *sk, int state)  	switch (state) {  	case TCP_ESTABLISHED:  		if (oldstate != TCP_ESTABLISHED) -			TCP_INC_STATS(TCP_MIB_CURRESTAB); +			TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);  		break;  	case TCP_CLOSE:  		if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) -			TCP_INC_STATS(TCP_MIB_ESTABRESETS); +			TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);  		sk->sk_prot->unhash(sk);  		if (inet_csk(sk)->icsk_bind_hash && @@ -1684,7 +1682,7 @@ void tcp_set_state(struct sock *sk, int state)  		/* fall through */  	default:  		if (oldstate==TCP_ESTABLISHED) -			TCP_DEC_STATS(TCP_MIB_CURRESTAB); +			TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);  	}  	/* Change state AFTER socket is unhashed to avoid closed @@ -1795,13 +1793,13 @@ void tcp_close(struct sock *sk, long timeout)  	 */  	if (data_was_unread) {  		/* Unread data was tossed, zap the connection. */ -		NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE); +		NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);  		tcp_set_state(sk, TCP_CLOSE);  		tcp_send_active_reset(sk, GFP_KERNEL);  	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {  		/* Check zero linger _after_ checking for unread data. */  		sk->sk_prot->disconnect(sk, 0); -		NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA); +		NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA);  	} else if (tcp_close_state(sk)) {  		/* We FIN if the application ate all the data before  		 * zapping the connection. @@ -1873,7 +1871,8 @@ adjudge_to_death:  		if (tp->linger2 < 0) {  			tcp_set_state(sk, TCP_CLOSE);  			tcp_send_active_reset(sk, GFP_ATOMIC); -			NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER); +			NET_INC_STATS_BH(sock_net(sk), +					LINUX_MIB_TCPABORTONLINGER);  		} else {  			const int tmo = tcp_fin_time(sk); @@ -1895,7 +1894,8 @@ adjudge_to_death:  				       "sockets\n");  			tcp_set_state(sk, TCP_CLOSE);  			tcp_send_active_reset(sk, GFP_ATOMIC); -			NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); +			NET_INC_STATS_BH(sock_net(sk), +					LINUX_MIB_TCPABORTONMEMORY);  		}  	} @@ -2467,6 +2467,76 @@ static unsigned long tcp_md5sig_users;  static struct tcp_md5sig_pool **tcp_md5sig_pool;  static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); +int tcp_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, +		      int bplen, +		      struct tcphdr *th, unsigned int tcplen, +		      struct tcp_md5sig_pool *hp) +{ +	struct scatterlist sg[4]; +	__u16 data_len; +	int block = 0; +	__sum16 cksum; +	struct hash_desc *desc = &hp->md5_desc; +	int err; +	unsigned int nbytes = 0; + +	sg_init_table(sg, 4); + +	/* 1. The TCP pseudo-header */ +	sg_set_buf(&sg[block++], &hp->md5_blk, bplen); +	nbytes += bplen; + +	/* 2. The TCP header, excluding options, and assuming a +	 * checksum of zero +	 */ +	cksum = th->check; +	th->check = 0; +	sg_set_buf(&sg[block++], th, sizeof(*th)); +	nbytes += sizeof(*th); + +	/* 3. The TCP segment data (if any) */ +	data_len = tcplen - (th->doff << 2); +	if (data_len > 0) { +		u8 *data = (u8 *)th + (th->doff << 2); +		sg_set_buf(&sg[block++], data, data_len); +		nbytes += data_len; +	} + +	/* 4. an independently-specified key or password, known to both +	 * TCPs and presumably connection-specific +	 */ +	sg_set_buf(&sg[block++], key->key, key->keylen); +	nbytes += key->keylen; + +	sg_mark_end(&sg[block - 1]); + +	/* Now store the hash into the packet */ +	err = crypto_hash_init(desc); +	if (err) { +		if (net_ratelimit()) +			printk(KERN_WARNING "%s(): hash_init failed\n", __func__); +		return -1; +	} +	err = crypto_hash_update(desc, sg, nbytes); +	if (err) { +		if (net_ratelimit()) +			printk(KERN_WARNING "%s(): hash_update failed\n", __func__); +		return -1; +	} +	err = crypto_hash_final(desc, md5_hash); +	if (err) { +		if (net_ratelimit()) +			printk(KERN_WARNING "%s(): hash_final failed\n", __func__); +		return -1; +	} + +	/* Reset header */ +	th->check = cksum; + +	return 0; +} +EXPORT_SYMBOL(tcp_calc_md5_hash); +  static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)  {  	int cpu; @@ -2595,7 +2665,7 @@ EXPORT_SYMBOL(__tcp_put_md5sig_pool);  void tcp_done(struct sock *sk)  {  	if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) -		TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); +		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);  	tcp_set_state(sk, TCP_CLOSE);  	tcp_clear_xmit_timers(sk);  |