diff options
Diffstat (limited to 'net/ipv4/tcp.c')
| -rw-r--r-- | net/ipv4/tcp.c | 101 | 
1 files changed, 70 insertions, 31 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 5901010fad5..296150b2a62 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -265,6 +265,7 @@  #include <linux/err.h>  #include <linux/crypto.h>  #include <linux/time.h> +#include <linux/slab.h>  #include <net/icmp.h>  #include <net/tcp.h> @@ -429,7 +430,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)  		if (tp->urg_seq == tp->copied_seq &&  		    !sock_flag(sk, SOCK_URGINLINE) &&  		    tp->urg_data) -			target--; +			target++;  		/* Potential race condition. If read of tp below will  		 * escape above sk->sk_state, we can be illegally awaken @@ -1254,6 +1255,39 @@ static void tcp_prequeue_process(struct sock *sk)  	tp->ucopy.memory = 0;  } +#ifdef CONFIG_NET_DMA +static void tcp_service_net_dma(struct sock *sk, bool wait) +{ +	dma_cookie_t done, used; +	dma_cookie_t last_issued; +	struct tcp_sock *tp = tcp_sk(sk); + +	if (!tp->ucopy.dma_chan) +		return; + +	last_issued = tp->ucopy.dma_cookie; +	dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); + +	do { +		if (dma_async_memcpy_complete(tp->ucopy.dma_chan, +					      last_issued, &done, +					      &used) == DMA_SUCCESS) { +			/* Safe to free early-copied skbs now */ +			__skb_queue_purge(&sk->sk_async_wait_queue); +			break; +		} else { +			struct sk_buff *skb; +			while ((skb = skb_peek(&sk->sk_async_wait_queue)) && +			       (dma_async_is_complete(skb->dma_cookie, done, +						      used) == DMA_SUCCESS)) { +				__skb_dequeue(&sk->sk_async_wait_queue); +				kfree_skb(skb); +			} +		} +	} while (wait); +} +#endif +  static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)  {  	struct sk_buff *skb; @@ -1335,6 +1369,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,  		sk_eat_skb(sk, skb, 0);  		if (!desc->count)  			break; +		tp->copied_seq = seq;  	}  	tp->copied_seq = seq; @@ -1546,6 +1581,10 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,  			/* __ Set realtime policy in scheduler __ */  		} +#ifdef CONFIG_NET_DMA +		if (tp->ucopy.dma_chan) +			dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); +#endif  		if (copied >= target) {  			/* Do not sleep, just process backlog. */  			release_sock(sk); @@ -1554,6 +1593,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,  			sk_wait_data(sk, &timeo);  #ifdef CONFIG_NET_DMA +		tcp_service_net_dma(sk, false);  /* Don't block */  		tp->ucopy.wakeup = 0;  #endif @@ -1633,6 +1673,9 @@ do_prequeue:  						copied = -EFAULT;  					break;  				} + +				dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); +  				if ((offset + used) == skb->len)  					copied_early = 1; @@ -1702,27 +1745,9 @@ skip_copy:  	}  #ifdef CONFIG_NET_DMA -	if (tp->ucopy.dma_chan) { -		dma_cookie_t done, used; - -		dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); - -		while (dma_async_memcpy_complete(tp->ucopy.dma_chan, -						 tp->ucopy.dma_cookie, &done, -						 &used) == DMA_IN_PROGRESS) { -			/* do partial cleanup of sk_async_wait_queue */ -			while ((skb = skb_peek(&sk->sk_async_wait_queue)) && -			       (dma_async_is_complete(skb->dma_cookie, done, -						      used) == DMA_SUCCESS)) { -				__skb_dequeue(&sk->sk_async_wait_queue); -				kfree_skb(skb); -			} -		} +	tcp_service_net_dma(sk, true);  /* Wait for queue to drain */ +	tp->ucopy.dma_chan = NULL; -		/* Safe to free early-copied skbs now */ -		__skb_queue_purge(&sk->sk_async_wait_queue); -		tp->ucopy.dma_chan = NULL; -	}  	if (tp->ucopy.pinned_list) {  		dma_unpin_iovec_pages(tp->ucopy.pinned_list);  		tp->ucopy.pinned_list = NULL; @@ -2814,7 +2839,6 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool)  			if (p->md5_desc.tfm)  				crypto_free_hash(p->md5_desc.tfm);  			kfree(p); -			p = NULL;  		}  	}  	free_percpu(pool); @@ -2912,25 +2936,40 @@ retry:  EXPORT_SYMBOL(tcp_alloc_md5sig_pool); -struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) + +/** + *	tcp_get_md5sig_pool - get md5sig_pool for this user + * + *	We use percpu structure, so if we succeed, we exit with preemption + *	and BH disabled, to make sure another thread or softirq handling + *	wont try to get same context. + */ +struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)  {  	struct tcp_md5sig_pool * __percpu *p; -	spin_lock_bh(&tcp_md5sig_pool_lock); + +	local_bh_disable(); + +	spin_lock(&tcp_md5sig_pool_lock);  	p = tcp_md5sig_pool;  	if (p)  		tcp_md5sig_users++; -	spin_unlock_bh(&tcp_md5sig_pool_lock); -	return (p ? *per_cpu_ptr(p, cpu) : NULL); -} +	spin_unlock(&tcp_md5sig_pool_lock); -EXPORT_SYMBOL(__tcp_get_md5sig_pool); +	if (p) +		return *per_cpu_ptr(p, smp_processor_id()); -void __tcp_put_md5sig_pool(void) +	local_bh_enable(); +	return NULL; +} +EXPORT_SYMBOL(tcp_get_md5sig_pool); + +void tcp_put_md5sig_pool(void)  { +	local_bh_enable();  	tcp_free_md5sig_pool();  } - -EXPORT_SYMBOL(__tcp_put_md5sig_pool); +EXPORT_SYMBOL(tcp_put_md5sig_pool);  int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,  			struct tcphdr *th)  |