|
@@ -106,6 +106,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
|
|
|
{
|
|
|
struct sk_buff_head temp;
|
|
|
struct sctp_ulpevent *event;
|
|
|
+ int event_eor = 0;
|
|
|
|
|
|
/* Create an event from the incoming chunk. */
|
|
|
event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
|
|
@@ -127,10 +128,12 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
|
|
|
/* Send event to the ULP. 'event' is the sctp_ulpevent for
|
|
|
* very first SKB on the 'temp' list.
|
|
|
*/
|
|
|
- if (event)
|
|
|
+ if (event) {
|
|
|
+ event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
|
|
|
sctp_ulpq_tail_event(ulpq, event);
|
|
|
+ }
|
|
|
|
|
|
- return 0;
|
|
|
+ return event_eor;
|
|
|
}
|
|
|
|
|
|
/* Add a new event for propagation to the ULP. */
|
|
@@ -540,14 +543,19 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
|
|
|
ctsn = cevent->tsn;
|
|
|
|
|
|
switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
|
|
|
+ case SCTP_DATA_FIRST_FRAG:
|
|
|
+ if (!first_frag)
|
|
|
+ return NULL;
|
|
|
+ goto done;
|
|
|
case SCTP_DATA_MIDDLE_FRAG:
|
|
|
if (!first_frag) {
|
|
|
first_frag = pos;
|
|
|
next_tsn = ctsn + 1;
|
|
|
last_frag = pos;
|
|
|
- } else if (next_tsn == ctsn)
|
|
|
+ } else if (next_tsn == ctsn) {
|
|
|
next_tsn++;
|
|
|
- else
|
|
|
+ last_frag = pos;
|
|
|
+ } else
|
|
|
goto done;
|
|
|
break;
|
|
|
case SCTP_DATA_LAST_FRAG:
|
|
@@ -651,6 +659,14 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
|
|
|
} else
|
|
|
goto done;
|
|
|
break;
|
|
|
+
|
|
|
+ case SCTP_DATA_LAST_FRAG:
|
|
|
+ if (!first_frag)
|
|
|
+ return NULL;
|
|
|
+ else
|
|
|
+ goto done;
|
|
|
+ break;
|
|
|
+
|
|
|
default:
|
|
|
return NULL;
|
|
|
}
|
|
@@ -1025,16 +1041,28 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
|
|
|
struct sctp_ulpevent *event;
|
|
|
struct sctp_association *asoc;
|
|
|
struct sctp_sock *sp;
|
|
|
+ __u32 ctsn;
|
|
|
+ struct sk_buff *skb;
|
|
|
|
|
|
asoc = ulpq->asoc;
|
|
|
sp = sctp_sk(asoc->base.sk);
|
|
|
|
|
|
/* If the association is already in Partial Delivery mode
|
|
|
- * we have noting to do.
|
|
|
+ * we have nothing to do.
|
|
|
*/
|
|
|
if (ulpq->pd_mode)
|
|
|
return;
|
|
|
|
|
|
+ /* Data must be at or below the Cumulative TSN ACK Point to
|
|
|
+ * start partial delivery.
|
|
|
+ */
|
|
|
+ skb = skb_peek(&asoc->ulpq.reasm);
|
|
|
+ if (skb != NULL) {
|
|
|
+ ctsn = sctp_skb2event(skb)->tsn;
|
|
|
+ if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
/* If the user enabled fragment interleave socket option,
|
|
|
* multiple associations can enter partial delivery.
|
|
|
* Otherwise, we can only enter partial delivery if the
|
|
@@ -1077,12 +1105,16 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
|
|
|
}
|
|
|
/* If able to free enough room, accept this chunk. */
|
|
|
if (chunk && (freed >= needed)) {
|
|
|
- __u32 tsn;
|
|
|
- tsn = ntohl(chunk->subh.data_hdr->tsn);
|
|
|
- sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport);
|
|
|
- sctp_ulpq_tail_data(ulpq, chunk, gfp);
|
|
|
-
|
|
|
- sctp_ulpq_partial_delivery(ulpq, gfp);
|
|
|
+ int retval;
|
|
|
+ retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
|
|
|
+ /*
|
|
|
+ * Enter partial delivery if chunk has not been
|
|
|
+ * delivered; otherwise, drain the reassembly queue.
|
|
|
+ */
|
|
|
+ if (retval <= 0)
|
|
|
+ sctp_ulpq_partial_delivery(ulpq, gfp);
|
|
|
+ else if (retval == 1)
|
|
|
+ sctp_ulpq_reasm_drain(ulpq);
|
|
|
}
|
|
|
|
|
|
sk_mem_reclaim(asoc->base.sk);
|