diff options
| author | Frederic Weisbecker <fweisbec@gmail.com> | 2013-05-02 17:37:49 +0200 | 
|---|---|---|
| committer | Frederic Weisbecker <fweisbec@gmail.com> | 2013-05-02 17:54:19 +0200 | 
| commit | c032862fba51a3ca504752d3a25186b324c5ce83 (patch) | |
| tree | 955dc2ba4ab3df76ecc2bb780ee84aca04967e8d /net/sctp/ulpqueue.c | |
| parent | fda76e074c7737fc57855dd17c762e50ed526052 (diff) | |
| parent | 8700c95adb033843fc163d112b9d21d4fda78018 (diff) | |
| download | olio-linux-3.10-c032862fba51a3ca504752d3a25186b324c5ce83.tar.xz olio-linux-3.10-c032862fba51a3ca504752d3a25186b324c5ce83.zip  | |
Merge commit '8700c95adb03' into timers/nohz
The full dynticks tree needs the latest RCU and sched
upstream updates in order to fix some dependencies.
Merge a common upstream merge point that has these
updates.
Conflicts:
	include/linux/perf_event.h
	kernel/rcutree.h
	kernel/rcutree_plugin.h
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Diffstat (limited to 'net/sctp/ulpqueue.c')
| -rw-r--r-- | net/sctp/ulpqueue.c | 87 | 
1 files changed, 71 insertions, 16 deletions
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index ada17464b65..0fd5b3d2df0 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -106,6 +106,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,  {  	struct sk_buff_head temp;  	struct sctp_ulpevent *event; +	int event_eor = 0;  	/* Create an event from the incoming chunk. */  	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); @@ -127,10 +128,12 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,  	/* Send event to the ULP.  'event' is the sctp_ulpevent for  	 * very first SKB on the 'temp' list.  	 */ -	if (event) +	if (event) { +		event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;  		sctp_ulpq_tail_event(ulpq, event); +	} -	return 0; +	return event_eor;  }  /* Add a new event for propagation to the ULP.  */ @@ -540,14 +543,19 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)  		ctsn = cevent->tsn;  		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { +		case SCTP_DATA_FIRST_FRAG: +			if (!first_frag) +				return NULL; +			goto done;  		case SCTP_DATA_MIDDLE_FRAG:  			if (!first_frag) {  				first_frag = pos;  				next_tsn = ctsn + 1;  				last_frag = pos; -			} else if (next_tsn == ctsn) +			} else if (next_tsn == ctsn) {  				next_tsn++; -			else +				last_frag = pos; +			} else  				goto done;  			break;  		case SCTP_DATA_LAST_FRAG: @@ -651,6 +659,14 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)  			} else  				goto done;  			break; + +		case SCTP_DATA_LAST_FRAG: +			if (!first_frag) +				return NULL; +			else +				goto done; +			break; +  		default:  			return NULL;  		} @@ -962,20 +978,43 @@ static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,  		struct sk_buff_head *list, __u16 needed)  {  	__u16 freed = 0; -	__u32 tsn; -	struct sk_buff *skb; +	__u32 tsn, last_tsn; +	struct sk_buff *skb, *flist, *last;  	struct sctp_ulpevent *event;  	struct sctp_tsnmap *tsnmap;  	tsnmap = &ulpq->asoc->peer.tsn_map; -	while ((skb = __skb_dequeue_tail(list)) != NULL) { -		freed += skb_headlen(skb); +	while ((skb = skb_peek_tail(list)) != NULL) {  		event = sctp_skb2event(skb);  		tsn = event->tsn; +		/* Don't renege below the Cumulative TSN ACK Point. */ +		if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap))) +			break; + +		/* Events in ordering queue may have multiple fragments +		 * corresponding to additional TSNs.  Sum the total +		 * freed space; find the last TSN. +		 */ +		freed += skb_headlen(skb); +		flist = skb_shinfo(skb)->frag_list; +		for (last = flist; flist; flist = flist->next) { +			last = flist; +			freed += skb_headlen(last); +		} +		if (last) +			last_tsn = sctp_skb2event(last)->tsn; +		else +			last_tsn = tsn; + +		/* Unlink the event, then renege all applicable TSNs. */ +		__skb_unlink(skb, list);  		sctp_ulpevent_free(event); -		sctp_tsnmap_renege(tsnmap, tsn); +		while (TSN_lte(tsn, last_tsn)) { +			sctp_tsnmap_renege(tsnmap, tsn); +			tsn++; +		}  		if (freed >= needed)  			return freed;  	} @@ -1002,16 +1041,28 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,  	struct sctp_ulpevent *event;  	struct sctp_association *asoc;  	struct sctp_sock *sp; +	__u32 ctsn; +	struct sk_buff *skb;  	asoc = ulpq->asoc;  	sp = sctp_sk(asoc->base.sk);  	/* If the association is already in Partial Delivery mode -	 * we have noting to do. +	 * we have nothing to do.  	 */  	if (ulpq->pd_mode)  		return; +	/* Data must be at or below the Cumulative TSN ACK Point to +	 * start partial delivery. +	 */ +	skb = skb_peek(&asoc->ulpq.reasm); +	if (skb != NULL) { +		ctsn = sctp_skb2event(skb)->tsn; +		if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map))) +			return; +	} +  	/* If the user enabled fragment interleave socket option,  	 * multiple associations can enter partial delivery.  	 * Otherwise, we can only enter partial delivery if the @@ -1054,12 +1105,16 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,  	}  	/* If able to free enough room, accept this chunk. */  	if (chunk && (freed >= needed)) { -		__u32 tsn; -		tsn = ntohl(chunk->subh.data_hdr->tsn); -		sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport); -		sctp_ulpq_tail_data(ulpq, chunk, gfp); - -		sctp_ulpq_partial_delivery(ulpq, gfp); +		int retval; +		retval = sctp_ulpq_tail_data(ulpq, chunk, gfp); +		/* +		 * Enter partial delivery if chunk has not been +		 * delivered; otherwise, drain the reassembly queue. +		 */ +		if (retval <= 0) +			sctp_ulpq_partial_delivery(ulpq, gfp); +		else if (retval == 1) +			sctp_ulpq_reasm_drain(ulpq);  	}  	sk_mem_reclaim(asoc->base.sk);  |