diff options
| author | Pavel Emelyanov <xemul@openvz.org> | 2007-10-23 20:30:25 -0700 | 
|---|---|---|
| committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-23 21:27:52 -0700 | 
| commit | 16d14ef9f29dfa9b1d99f3eff860e9f15bc99f39 (patch) | |
| tree | 8f13d343e807b79a23706d10d291f62f5a0a958f /net/sctp/ulpqueue.c | |
| parent | 5c58298c2536252ab95aa2b1497ab47eb878ca5d (diff) | |
| download | olio-linux-3.10-16d14ef9f29dfa9b1d99f3eff860e9f15bc99f39.tar.xz olio-linux-3.10-16d14ef9f29dfa9b1d99f3eff860e9f15bc99f39.zip  | |
[SCTP]: Consolidate sctp_ulpq_renege_xxx functions
Both are equal, except for the list to be traversed.
Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Acked-by: Vlad Yasevich <vladislav.yasevich@hp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp/ulpqueue.c')
| -rw-r--r-- | net/sctp/ulpqueue.c | 34 | 
1 files changed, 10 insertions, 24 deletions
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index b9370956b18..4be92d0a2ca 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -908,8 +908,8 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)  	return;  } -/* Renege 'needed' bytes from the ordering queue. */ -static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) +static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, +		struct sk_buff_head *list, __u16 needed)  {  	__u16 freed = 0;  	__u32 tsn; @@ -919,7 +919,7 @@ static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)  	tsnmap = &ulpq->asoc->peer.tsn_map; -	while ((skb = __skb_dequeue_tail(&ulpq->lobby)) != NULL) { +	while ((skb = __skb_dequeue_tail(list)) != NULL) {  		freed += skb_headlen(skb);  		event = sctp_skb2event(skb);  		tsn = event->tsn; @@ -933,30 +933,16 @@ static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)  	return freed;  } +/* Renege 'needed' bytes from the ordering queue. */ +static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) +{ +	return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed); +} +  /* Renege 'needed' bytes from the reassembly queue. */  static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)  { -	__u16 freed = 0; -	__u32 tsn; -	struct sk_buff *skb; -	struct sctp_ulpevent *event; -	struct sctp_tsnmap *tsnmap; - -	tsnmap = &ulpq->asoc->peer.tsn_map; - -	/* Walk backwards through the list, reneges the newest tsns. */ -	while ((skb = __skb_dequeue_tail(&ulpq->reasm)) != NULL) { -		freed += skb_headlen(skb); -		event = sctp_skb2event(skb); -		tsn = event->tsn; - -		sctp_ulpevent_free(event); -		sctp_tsnmap_renege(tsnmap, tsn); -		if (freed >= needed) -			return freed; -	} - -	return freed; +	return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);  }  /* Partial deliver the first message as there is pressure on rwnd. */  |