diff options
Diffstat (limited to 'net/sctp')
| -rw-r--r-- | net/sctp/associola.c | 2 | ||||
| -rw-r--r-- | net/sctp/endpointola.c | 2 | ||||
| -rw-r--r-- | net/sctp/sm_statefuns.c | 2 | ||||
| -rw-r--r-- | net/sctp/socket.c | 6 | ||||
| -rw-r--r-- | net/sctp/ssnmap.c | 8 | ||||
| -rw-r--r-- | net/sctp/tsnmap.c | 13 | ||||
| -rw-r--r-- | net/sctp/ulpqueue.c | 87 | 
7 files changed, 87 insertions, 33 deletions
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 43cd0dd9149..d2709e2b7be 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1079,7 +1079,7 @@ struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,  			transports) {  		if (transport == active) -			break; +			continue;  		list_for_each_entry(chunk, &transport->transmitted,  				transmitted_list) {  			if (key == chunk->subh.data_hdr->tsn) { diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 2b3ef03c609..12ed45dbe75 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -155,7 +155,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,  	/* SCTP-AUTH extensions*/  	INIT_LIST_HEAD(&ep->endpoint_shared_keys); -	null_key = sctp_auth_shkey_create(0, GFP_KERNEL); +	null_key = sctp_auth_shkey_create(0, gfp);  	if (!null_key)  		goto nomem; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 5131fcfedb0..de1a0138317 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -2082,7 +2082,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net,  	}  	/* Delete the tempory new association. */ -	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); +	sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc));  	sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());  	/* Restore association pointer to provide SCTP command interpeter diff --git a/net/sctp/socket.c b/net/sctp/socket.c index c99458df3f3..b9070736b8d 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -5653,6 +5653,9 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,  	if (len < sizeof(sctp_assoc_t))  		return -EINVAL; +	/* Allow the struct to grow and fill in as much as possible */ +	len = min_t(size_t, len, sizeof(sas)); +  	if (copy_from_user(&sas, optval, len))  		return -EFAULT; @@ -5686,9 +5689,6 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,  	/* Mark beginning of a new observation period */  	asoc->stats.max_obs_rto = asoc->rto_min; -	/* Allow the struct to grow and fill in as much as possible */ -	len = min_t(size_t, len, sizeof(sas)); -  	if (put_user(len, optlen))  		return -EFAULT; diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c index 442ad4ed631..825ea94415b 100644 --- a/net/sctp/ssnmap.c +++ b/net/sctp/ssnmap.c @@ -41,8 +41,6 @@  #include <net/sctp/sctp.h>  #include <net/sctp/sm.h> -#define MAX_KMALLOC_SIZE	131072 -  static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in,  					    __u16 out); @@ -65,7 +63,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,  	int size;  	size = sctp_ssnmap_size(in, out); -	if (size <= MAX_KMALLOC_SIZE) +	if (size <= KMALLOC_MAX_SIZE)  		retval = kmalloc(size, gfp);  	else  		retval = (struct sctp_ssnmap *) @@ -82,7 +80,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,  	return retval;  fail_map: -	if (size <= MAX_KMALLOC_SIZE) +	if (size <= KMALLOC_MAX_SIZE)  		kfree(retval);  	else  		free_pages((unsigned long)retval, get_order(size)); @@ -124,7 +122,7 @@ void sctp_ssnmap_free(struct sctp_ssnmap *map)  		int size;  		size = sctp_ssnmap_size(map->in.len, map->out.len); -		if (size <= MAX_KMALLOC_SIZE) +		if (size <= KMALLOC_MAX_SIZE)  			kfree(map);  		else  			free_pages((unsigned long)map, get_order(size)); diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c index 5f25e0c92c3..396c45174e5 100644 --- a/net/sctp/tsnmap.c +++ b/net/sctp/tsnmap.c @@ -51,7 +51,7 @@  static void sctp_tsnmap_update(struct sctp_tsnmap *map);  static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off,  				     __u16 len, __u16 *start, __u16 *end); -static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap); +static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size);  /* Initialize a block of memory as a tsnmap.  */  struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len, @@ -124,7 +124,7 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn,  	gap = tsn - map->base_tsn; -	if (gap >= map->len && !sctp_tsnmap_grow(map, gap)) +	if (gap >= map->len && !sctp_tsnmap_grow(map, gap + 1))  		return -ENOMEM;  	if (!sctp_tsnmap_has_gap(map) && gap == 0) { @@ -360,23 +360,24 @@ __u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map,  	return ngaps;  } -static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap) +static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size)  {  	unsigned long *new;  	unsigned long inc;  	u16  len; -	if (gap >= SCTP_TSN_MAP_SIZE) +	if (size > SCTP_TSN_MAP_SIZE)  		return 0; -	inc = ALIGN((gap - map->len),BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT; +	inc = ALIGN((size - map->len), BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT;  	len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE);  	new = kzalloc(len>>3, GFP_ATOMIC);  	if (!new)  		return 0; -	bitmap_copy(new, map->tsn_map, map->max_tsn_seen - map->base_tsn); +	bitmap_copy(new, map->tsn_map, +		map->max_tsn_seen - map->cumulative_tsn_ack_point);  	kfree(map->tsn_map);  	map->tsn_map = new;  	map->len = len; diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index ada17464b65..0fd5b3d2df0 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -106,6 +106,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,  {  	struct sk_buff_head temp;  	struct sctp_ulpevent *event; +	int event_eor = 0;  	/* Create an event from the incoming chunk. */  	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); @@ -127,10 +128,12 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,  	/* Send event to the ULP.  'event' is the sctp_ulpevent for  	 * very first SKB on the 'temp' list.  	 */ -	if (event) +	if (event) { +		event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;  		sctp_ulpq_tail_event(ulpq, event); +	} -	return 0; +	return event_eor;  }  /* Add a new event for propagation to the ULP.  */ @@ -540,14 +543,19 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)  		ctsn = cevent->tsn;  		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { +		case SCTP_DATA_FIRST_FRAG: +			if (!first_frag) +				return NULL; +			goto done;  		case SCTP_DATA_MIDDLE_FRAG:  			if (!first_frag) {  				first_frag = pos;  				next_tsn = ctsn + 1;  				last_frag = pos; -			} else if (next_tsn == ctsn) +			} else if (next_tsn == ctsn) {  				next_tsn++; -			else +				last_frag = pos; +			} else  				goto done;  			break;  		case SCTP_DATA_LAST_FRAG: @@ -651,6 +659,14 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)  			} else  				goto done;  			break; + +		case SCTP_DATA_LAST_FRAG: +			if (!first_frag) +				return NULL; +			else +				goto done; +			break; +  		default:  			return NULL;  		} @@ -962,20 +978,43 @@ static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,  		struct sk_buff_head *list, __u16 needed)  {  	__u16 freed = 0; -	__u32 tsn; -	struct sk_buff *skb; +	__u32 tsn, last_tsn; +	struct sk_buff *skb, *flist, *last;  	struct sctp_ulpevent *event;  	struct sctp_tsnmap *tsnmap;  	tsnmap = &ulpq->asoc->peer.tsn_map; -	while ((skb = __skb_dequeue_tail(list)) != NULL) { -		freed += skb_headlen(skb); +	while ((skb = skb_peek_tail(list)) != NULL) {  		event = sctp_skb2event(skb);  		tsn = event->tsn; +		/* Don't renege below the Cumulative TSN ACK Point. */ +		if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap))) +			break; + +		/* Events in ordering queue may have multiple fragments +		 * corresponding to additional TSNs.  Sum the total +		 * freed space; find the last TSN. +		 */ +		freed += skb_headlen(skb); +		flist = skb_shinfo(skb)->frag_list; +		for (last = flist; flist; flist = flist->next) { +			last = flist; +			freed += skb_headlen(last); +		} +		if (last) +			last_tsn = sctp_skb2event(last)->tsn; +		else +			last_tsn = tsn; + +		/* Unlink the event, then renege all applicable TSNs. */ +		__skb_unlink(skb, list);  		sctp_ulpevent_free(event); -		sctp_tsnmap_renege(tsnmap, tsn); +		while (TSN_lte(tsn, last_tsn)) { +			sctp_tsnmap_renege(tsnmap, tsn); +			tsn++; +		}  		if (freed >= needed)  			return freed;  	} @@ -1002,16 +1041,28 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,  	struct sctp_ulpevent *event;  	struct sctp_association *asoc;  	struct sctp_sock *sp; +	__u32 ctsn; +	struct sk_buff *skb;  	asoc = ulpq->asoc;  	sp = sctp_sk(asoc->base.sk);  	/* If the association is already in Partial Delivery mode -	 * we have noting to do. +	 * we have nothing to do.  	 */  	if (ulpq->pd_mode)  		return; +	/* Data must be at or below the Cumulative TSN ACK Point to +	 * start partial delivery. +	 */ +	skb = skb_peek(&asoc->ulpq.reasm); +	if (skb != NULL) { +		ctsn = sctp_skb2event(skb)->tsn; +		if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map))) +			return; +	} +  	/* If the user enabled fragment interleave socket option,  	 * multiple associations can enter partial delivery.  	 * Otherwise, we can only enter partial delivery if the @@ -1054,12 +1105,16 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,  	}  	/* If able to free enough room, accept this chunk. */  	if (chunk && (freed >= needed)) { -		__u32 tsn; -		tsn = ntohl(chunk->subh.data_hdr->tsn); -		sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport); -		sctp_ulpq_tail_data(ulpq, chunk, gfp); - -		sctp_ulpq_partial_delivery(ulpq, gfp); +		int retval; +		retval = sctp_ulpq_tail_data(ulpq, chunk, gfp); +		/* +		 * Enter partial delivery if chunk has not been +		 * delivered; otherwise, drain the reassembly queue. +		 */ +		if (retval <= 0) +			sctp_ulpq_partial_delivery(ulpq, gfp); +		else if (retval == 1) +			sctp_ulpq_reasm_drain(ulpq);  	}  	sk_mem_reclaim(asoc->base.sk);  |