diff options
Diffstat (limited to 'net/sctp/socket.c')
| -rw-r--r-- | net/sctp/socket.c | 44 | 
1 files changed, 27 insertions, 17 deletions
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index c1941276f6e..ba1add0b13c 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3719,12 +3719,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)  	sp->hmac = NULL;  	SCTP_DBG_OBJCNT_INC(sock); -	percpu_counter_inc(&sctp_sockets_allocated); - -	/* Set socket backlog limit. */ -	sk->sk_backlog.limit = sysctl_sctp_rmem[1];  	local_bh_disable(); +	percpu_counter_inc(&sctp_sockets_allocated);  	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);  	local_bh_enable(); @@ -3741,8 +3738,8 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)  	/* Release our hold on the endpoint. */  	ep = sctp_sk(sk)->ep;  	sctp_endpoint_free(ep); -	percpu_counter_dec(&sctp_sockets_allocated);  	local_bh_disable(); +	percpu_counter_dec(&sctp_sockets_allocated);  	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);  	local_bh_enable();  } @@ -4387,7 +4384,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,  				transports) {  		memcpy(&temp, &from->ipaddr, sizeof(temp));  		sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); -		addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len; +		addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;  		if (space_left < addrlen)  			return -ENOMEM;  		if (copy_to_user(to, &temp, addrlen)) @@ -5702,7 +5699,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)  	struct sctp_sock *sp = sctp_sk(sk);  	unsigned int mask; -	poll_wait(file, sk->sk_sleep, wait); +	poll_wait(file, sk_sleep(sk), wait);  	/* A TCP-style listening socket becomes readable when the accept queue  	 * is not empty. @@ -5943,7 +5940,7 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)  	int error;  	DEFINE_WAIT(wait); -	prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); +	prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);  	/* Socket errors? */  	error = sock_error(sk); @@ -5980,14 +5977,14 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)  	sctp_lock_sock(sk);  ready: -	finish_wait(sk->sk_sleep, &wait); +	finish_wait(sk_sleep(sk), &wait);  	return 0;  interrupted:  	error = sock_intr_errno(*timeo_p);  out: -	finish_wait(sk->sk_sleep, &wait); +	finish_wait(sk_sleep(sk), &wait);  	*err = error;  	return error;  } @@ -6061,14 +6058,14 @@ static void __sctp_write_space(struct sctp_association *asoc)  			wake_up_interruptible(&asoc->wait);  		if (sctp_writeable(sk)) { -			if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) -				wake_up_interruptible(sk->sk_sleep); +			if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) +				wake_up_interruptible(sk_sleep(sk));  			/* Note that we try to include the Async I/O support  			 * here by modeling from the current TCP/UDP code.  			 * We have not tested with it yet.  			 */ -			if (sock->fasync_list && +			if (sock->wq->fasync_list &&  			    !(sk->sk_shutdown & SEND_SHUTDOWN))  				sock_wake_async(sock,  						SOCK_WAKE_SPACE, POLL_OUT); @@ -6188,6 +6185,19 @@ do_nonblock:  	goto out;  } +void sctp_data_ready(struct sock *sk, int len) +{ +	struct socket_wq *wq; + +	rcu_read_lock(); +	wq = rcu_dereference(sk->sk_wq); +	if (wq_has_sleeper(wq)) +		wake_up_interruptible_sync_poll(&wq->wait, POLLIN | +						POLLRDNORM | POLLRDBAND); +	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); +	rcu_read_unlock(); +} +  /* If socket sndbuf has changed, wake up all per association waiters.  */  void sctp_write_space(struct sock *sk)  { @@ -6296,7 +6306,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)  	for (;;) { -		prepare_to_wait_exclusive(sk->sk_sleep, &wait, +		prepare_to_wait_exclusive(sk_sleep(sk), &wait,  					  TASK_INTERRUPTIBLE);  		if (list_empty(&ep->asocs)) { @@ -6322,7 +6332,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)  			break;  	} -	finish_wait(sk->sk_sleep, &wait); +	finish_wait(sk_sleep(sk), &wait);  	return err;  } @@ -6332,7 +6342,7 @@ static void sctp_wait_for_close(struct sock *sk, long timeout)  	DEFINE_WAIT(wait);  	do { -		prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); +		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);  		if (list_empty(&sctp_sk(sk)->ep->asocs))  			break;  		sctp_release_sock(sk); @@ -6340,7 +6350,7 @@ static void sctp_wait_for_close(struct sock *sk, long timeout)  		sctp_lock_sock(sk);  	} while (!signal_pending(current) && timeout); -	finish_wait(sk->sk_sleep, &wait); +	finish_wait(sk_sleep(sk), &wait);  }  static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)  |