mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
sctp: Pass sk_buff_head explicitly to sctp_ulpq_tail_event().
Now the SKB list implementation assumption can be removed. And now that we know that the list head is always non-NULL we can remove the code blocks dealing with that as well. Signed-off-by: David S. Miller <davem@davemloft.net> Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
178ca044aa
commit
013b96ec64
@ -59,7 +59,7 @@ void sctp_ulpq_free(struct sctp_ulpq *);
|
|||||||
int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
|
int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
|
||||||
|
|
||||||
/* Add a new event for propagation to the ULP. */
|
/* Add a new event for propagation to the ULP. */
|
||||||
int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev);
|
int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sk_buff_head *skb_list);
|
||||||
|
|
||||||
/* Renege previously received chunks. */
|
/* Renege previously received chunks. */
|
||||||
void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
|
void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
|
||||||
|
@ -1317,7 +1317,7 @@ static int do_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *even
|
|||||||
|
|
||||||
skb_queue_head_init(&temp);
|
skb_queue_head_init(&temp);
|
||||||
__skb_queue_tail(&temp, sctp_event2skb(event));
|
__skb_queue_tail(&temp, sctp_event2skb(event));
|
||||||
return sctp_ulpq_tail_event(ulpq, event);
|
return sctp_ulpq_tail_event(ulpq, &temp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sctp_stream_interleave sctp_stream_interleave_0 = {
|
static struct sctp_stream_interleave sctp_stream_interleave_0 = {
|
||||||
|
@ -130,7 +130,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
|
|||||||
*/
|
*/
|
||||||
if (event) {
|
if (event) {
|
||||||
event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
|
event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
|
||||||
sctp_ulpq_tail_event(ulpq, event);
|
sctp_ulpq_tail_event(ulpq, &temp);
|
||||||
}
|
}
|
||||||
|
|
||||||
return event_eor;
|
return event_eor;
|
||||||
@ -194,18 +194,17 @@ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
|
|||||||
return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
|
return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If the SKB of 'event' is on a list, it is the first such member
|
int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list)
|
||||||
* of that list.
|
|
||||||
*/
|
|
||||||
int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
|
|
||||||
{
|
{
|
||||||
struct sock *sk = ulpq->asoc->base.sk;
|
struct sock *sk = ulpq->asoc->base.sk;
|
||||||
struct sctp_sock *sp = sctp_sk(sk);
|
struct sctp_sock *sp = sctp_sk(sk);
|
||||||
struct sk_buff_head *queue, *skb_list;
|
struct sctp_ulpevent *event;
|
||||||
struct sk_buff *skb = sctp_event2skb(event);
|
struct sk_buff_head *queue;
|
||||||
|
struct sk_buff *skb;
|
||||||
int clear_pd = 0;
|
int clear_pd = 0;
|
||||||
|
|
||||||
skb_list = (struct sk_buff_head *) skb->prev;
|
skb = __skb_peek(skb_list);
|
||||||
|
event = sctp_skb2event(skb);
|
||||||
|
|
||||||
/* If the socket is just going to throw this away, do not
|
/* If the socket is just going to throw this away, do not
|
||||||
* even try to deliver it.
|
* even try to deliver it.
|
||||||
@ -258,13 +257,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If we are harvesting multiple skbs they will be
|
skb_queue_splice_tail_init(skb_list, queue);
|
||||||
* collected on a list.
|
|
||||||
*/
|
|
||||||
if (skb_list)
|
|
||||||
skb_queue_splice_tail_init(skb_list, queue);
|
|
||||||
else
|
|
||||||
__skb_queue_tail(queue, skb);
|
|
||||||
|
|
||||||
/* Did we just complete partial delivery and need to get
|
/* Did we just complete partial delivery and need to get
|
||||||
* rolling again? Move pending data to the receive
|
* rolling again? Move pending data to the receive
|
||||||
@ -757,7 +750,7 @@ static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
|
|||||||
* sctp_ulpevent for very first SKB on the temp' list.
|
* sctp_ulpevent for very first SKB on the temp' list.
|
||||||
*/
|
*/
|
||||||
if (event)
|
if (event)
|
||||||
sctp_ulpq_tail_event(ulpq, event);
|
sctp_ulpq_tail_event(ulpq, &temp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -957,7 +950,7 @@ static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
|
|||||||
if (event) {
|
if (event) {
|
||||||
/* see if we have more ordered that we can deliver */
|
/* see if we have more ordered that we can deliver */
|
||||||
sctp_ulpq_retrieve_ordered(ulpq, event);
|
sctp_ulpq_retrieve_ordered(ulpq, event);
|
||||||
sctp_ulpq_tail_event(ulpq, event);
|
sctp_ulpq_tail_event(ulpq, &temp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1087,7 +1080,7 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
|
|||||||
|
|
||||||
skb_queue_head_init(&temp);
|
skb_queue_head_init(&temp);
|
||||||
__skb_queue_tail(&temp, sctp_event2skb(event));
|
__skb_queue_tail(&temp, sctp_event2skb(event));
|
||||||
sctp_ulpq_tail_event(ulpq, event);
|
sctp_ulpq_tail_event(ulpq, &temp);
|
||||||
sctp_ulpq_set_pd(ulpq);
|
sctp_ulpq_set_pd(ulpq);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user