Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/vxy/lksctp-dev
authorDavid S. Miller <davem@sunset.davemloft.net>
Fri, 31 Aug 2007 05:11:31 +0000 (22:11 -0700)
committerDavid S. Miller <davem@sunset.davemloft.net>
Fri, 31 Aug 2007 05:11:31 +0000 (22:11 -0700)
include/net/sctp/sm.h
include/net/sctp/structs.h
include/net/sctp/ulpqueue.h
net/sctp/associola.c
net/sctp/outqueue.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
net/sctp/ulpqueue.c

index 73cb994..991c85b 100644 (file)
@@ -214,7 +214,7 @@ struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc,
                                          const struct sctp_chunk *);
 struct sctp_chunk *sctp_make_shutdown_complete(const struct sctp_association *,
                                          const struct sctp_chunk *);
-void sctp_init_cause(struct sctp_chunk *, __be16 cause, const void *, size_t);
+void sctp_init_cause(struct sctp_chunk *, __be16 cause, size_t);
 struct sctp_chunk *sctp_make_abort(const struct sctp_association *,
                              const struct sctp_chunk *,
                              const size_t hint);
index ee4559b..c0d5848 100644 (file)
@@ -726,6 +726,7 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len,
                          struct iovec *data);
 void sctp_chunk_free(struct sctp_chunk *);
 void  *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data);
+void  *sctp_addto_param(struct sctp_chunk *, int len, const void *data);
 struct sctp_chunk *sctp_chunkify(struct sk_buff *,
                                 const struct sctp_association *,
                                 struct sock *);
index 39ea3f4..cd33270 100644 (file)
@@ -83,6 +83,7 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc);
 /* Skip over an SSN. */
 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn);
 
+void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32);
 #endif /* __sctp_ulpqueue_h__ */
 
 
index 498edb0..2ad1caf 100644 (file)
@@ -727,7 +727,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
                break;
 
        case SCTP_TRANSPORT_DOWN:
-               transport->state = SCTP_INACTIVE;
+               /* if the transort was never confirmed, do not transition it
+                * to inactive state.
+                */
+               if (transport->state != SCTP_UNCONFIRMED)
+                       transport->state = SCTP_INACTIVE;
+
                spc_state = SCTP_ADDR_UNREACHABLE;
                break;
 
index 992f361..28f4fe7 100644 (file)
@@ -421,6 +421,13 @@ void sctp_retransmit_mark(struct sctp_outq *q,
                 */
                if ((fast_retransmit && (chunk->fast_retransmit > 0)) ||
                   (!fast_retransmit && !chunk->tsn_gap_acked)) {
+                       /* If this chunk was sent less then 1 rto ago, do not
+                        * retransmit this chunk, but give the peer time
+                        * to acknowlege it.
+                        */
+                       if ((jiffies - chunk->sent_at) < transport->rto)
+                               continue;
+
                        /* RFC 2960 6.2.1 Processing a Received SACK
                         *
                         * C) Any time a DATA chunk is marked for
index 51c4d7f..79856c9 100644 (file)
@@ -110,7 +110,7 @@ static const struct sctp_paramhdr prsctp_param = {
  * abort chunk.
  */
 void  sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
-                     const void *payload, size_t paylen)
+                     size_t paylen)
 {
        sctp_errhdr_t err;
        __u16 len;
@@ -120,7 +120,6 @@ void  sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
        len = sizeof(sctp_errhdr_t) + paylen;
        err.length  = htons(len);
        chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
-       sctp_addto_chunk(chunk, paylen, payload);
 }
 
 /* 3.3.2 Initiation (INIT) (1)
@@ -780,8 +779,8 @@ struct sctp_chunk *sctp_make_abort_no_data(
 
        /* Put the tsn back into network byte order.  */
        payload = htonl(tsn);
-       sctp_init_cause(retval, SCTP_ERROR_NO_DATA, (const void *)&payload,
-                       sizeof(payload));
+       sctp_init_cause(retval, SCTP_ERROR_NO_DATA, sizeof(payload));
+       sctp_addto_chunk(retval, sizeof(payload), (const void *)&payload);
 
        /* RFC 2960 6.4 Multi-homed SCTP Endpoints
         *
@@ -823,7 +822,8 @@ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
                        goto err_copy;
        }
 
-       sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen);
+       sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, paylen);
+       sctp_addto_chunk(retval, paylen, payload);
 
        if (paylen)
                kfree(payload);
@@ -850,15 +850,17 @@ struct sctp_chunk *sctp_make_abort_violation(
        struct sctp_paramhdr phdr;
 
        retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen
-                                       + sizeof(sctp_chunkhdr_t));
+                                       + sizeof(sctp_paramhdr_t));
        if (!retval)
                goto end;
 
-       sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, payload, paylen);
+       sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, paylen
+                                       + sizeof(sctp_paramhdr_t));
 
        phdr.type = htons(chunk->chunk_hdr->type);
        phdr.length = chunk->chunk_hdr->length;
-       sctp_addto_chunk(retval, sizeof(sctp_paramhdr_t), &phdr);
+       sctp_addto_chunk(retval, paylen, payload);
+       sctp_addto_param(retval, sizeof(sctp_paramhdr_t), &phdr);
 
 end:
        return retval;
@@ -955,7 +957,8 @@ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
        if (!retval)
                goto nodata;
 
-       sctp_init_cause(retval, cause_code, payload, paylen);
+       sctp_init_cause(retval, cause_code, paylen);
+       sctp_addto_chunk(retval, paylen, payload);
 
 nodata:
        return retval;
@@ -1128,7 +1131,7 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
        void *target;
        void *padding;
        int chunklen = ntohs(chunk->chunk_hdr->length);
-       int padlen = chunklen % 4;
+       int padlen = WORD_ROUND(chunklen) - chunklen;
 
        padding = skb_put(chunk->skb, padlen);
        target = skb_put(chunk->skb, len);
@@ -1143,6 +1146,25 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
        return target;
 }
 
+/* Append bytes to the end of a parameter.  Will panic if chunk is not big
+ * enough.
+ */
+void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data)
+{
+       void *target;
+       int chunklen = ntohs(chunk->chunk_hdr->length);
+
+       target = skb_put(chunk->skb, len);
+
+       memcpy(target, data, len);
+
+       /* Adjust the chunk length field.  */
+       chunk->chunk_hdr->length = htons(chunklen + len);
+       chunk->chunk_end = skb_tail_pointer(chunk->skb);
+
+       return target;
+}
+
 /* Append bytes from user space to the end of a chunk.  Will panic if
  * chunk is not big enough.
  * Returns a kernel err value.
@@ -1174,25 +1196,36 @@ out:
  */
 void sctp_chunk_assign_ssn(struct sctp_chunk *chunk)
 {
+       struct sctp_datamsg *msg;
+       struct sctp_chunk *lchunk;
+       struct sctp_stream *stream;
        __u16 ssn;
        __u16 sid;
 
        if (chunk->has_ssn)
                return;
 
-       /* This is the last possible instant to assign a SSN. */
-       if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
-               ssn = 0;
-       } else {
-               sid = ntohs(chunk->subh.data_hdr->stream);
-               if (chunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
-                       ssn = sctp_ssn_next(&chunk->asoc->ssnmap->out, sid);
-               else
-                       ssn = sctp_ssn_peek(&chunk->asoc->ssnmap->out, sid);
-       }
+       /* All fragments will be on the same stream */
+       sid = ntohs(chunk->subh.data_hdr->stream);
+       stream = &chunk->asoc->ssnmap->out;
 
-       chunk->subh.data_hdr->ssn = htons(ssn);
-       chunk->has_ssn = 1;
+       /* Now assign the sequence number to the entire message.
+        * All fragments must have the same stream sequence number.
+        */
+       msg = chunk->msg;
+       list_for_each_entry(lchunk, &msg->chunks, frag_list) {
+               if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
+                       ssn = 0;
+               } else {
+                       if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
+                               ssn = sctp_ssn_next(stream, sid);
+                       else
+                               ssn = sctp_ssn_peek(stream, sid);
+               }
+
+               lchunk->subh.data_hdr->ssn = htons(ssn);
+               lchunk->has_ssn = 1;
+       }
 }
 
 /* Helper function to assign a TSN if needed.  This assumes that both
@@ -1466,7 +1499,8 @@ no_hmac:
                        __be32 n = htonl(usecs);
 
                        sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE,
-                                       &n, sizeof(n));
+                                       sizeof(n));
+                       sctp_addto_chunk(*errp, sizeof(n), &n);
                        *error = -SCTP_IERROR_STALE_COOKIE;
                } else
                        *error = -SCTP_IERROR_NOMEM;
@@ -1556,7 +1590,8 @@ static int sctp_process_missing_param(const struct sctp_association *asoc,
                report.num_missing = htonl(1);
                report.type = paramtype;
                sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM,
-                               &report, sizeof(report));
+                               sizeof(report));
+               sctp_addto_chunk(*errp, sizeof(report), &report);
        }
 
        /* Stop processing this chunk. */
@@ -1574,7 +1609,7 @@ static int sctp_process_inv_mandatory(const struct sctp_association *asoc,
                *errp = sctp_make_op_error_space(asoc, chunk, 0);
 
        if (*errp)
-               sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, NULL, 0);
+               sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, 0);
 
        /* Stop processing this chunk. */
        return 0;
@@ -1595,9 +1630,10 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc,
                *errp = sctp_make_op_error_space(asoc, chunk, payload_len);
 
        if (*errp) {
-               sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, error,
-                               sizeof(error));
-               sctp_addto_chunk(*errp, sizeof(sctp_paramhdr_t), param);
+               sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION,
+                               sizeof(error) + sizeof(sctp_paramhdr_t));
+               sctp_addto_chunk(*errp, sizeof(error), error);
+               sctp_addto_param(*errp, sizeof(sctp_paramhdr_t), param);
        }
 
        return 0;
@@ -1618,9 +1654,10 @@ static int sctp_process_hn_param(const struct sctp_association *asoc,
        if (!*errp)
                *errp = sctp_make_op_error_space(asoc, chunk, len);
 
-       if (*errp)
-               sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED,
-                               param.v, len);
+       if (*errp) {
+               sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len);
+               sctp_addto_chunk(*errp, len, param.v);
+       }
 
        /* Stop processing this chunk. */
        return 0;
@@ -1672,10 +1709,13 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
                        *errp = sctp_make_op_error_space(asoc, chunk,
                                        ntohs(chunk->chunk_hdr->length));
 
-               if (*errp)
+               if (*errp) {
                        sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
-                                       param.v,
                                        WORD_ROUND(ntohs(param.p->length)));
+                       sctp_addto_chunk(*errp,
+                                       WORD_ROUND(ntohs(param.p->length)),
+                                       param.v);
+               }
 
                break;
        case SCTP_PARAM_ACTION_SKIP:
@@ -1690,8 +1730,10 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
 
                if (*errp) {
                        sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
-                                       param.v,
                                        WORD_ROUND(ntohs(param.p->length)));
+                       sctp_addto_chunk(*errp,
+                                       WORD_ROUND(ntohs(param.p->length)),
+                                       param.v);
                } else {
                        /* If there is no memory for generating the ERROR
                         * report as specified, an ABORT will be triggered
@@ -1791,7 +1833,7 @@ int sctp_verify_init(const struct sctp_association *asoc,
         * VIOLATION error.  We build the ERROR chunk here and let the normal
         * error handling code build and send the packet.
         */
-       if (param.v < (void*)chunk->chunk_end - sizeof(sctp_paramhdr_t)) {
+       if (param.v != (void*)chunk->chunk_end) {
                sctp_process_inv_paramlength(asoc, param.p, chunk, errp);
                return 0;
        }
index d9fad4f..8d78900 100644 (file)
@@ -1013,8 +1013,9 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
                break;
 
        case SCTP_DISPOSITION_VIOLATION:
-               printk(KERN_ERR "sctp protocol violation state %d "
-                      "chunkid %d\n", state, subtype.chunk);
+               if (net_ratelimit())
+                       printk(KERN_ERR "sctp protocol violation state %d "
+                              "chunkid %d\n", state, subtype.chunk);
                break;
 
        case SCTP_DISPOSITION_NOT_IMPL:
@@ -1130,6 +1131,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
                        /* Move the Cumulattive TSN Ack ahead. */
                        sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32);
 
+                       /* purge the fragmentation queue */
+                       sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32);
+
                        /* Abort any in progress partial delivery. */
                        sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
                        break;
index 71cad56..177528e 100644 (file)
@@ -264,7 +264,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
        struct sctp_chunk *err_chunk;
        struct sctp_packet *packet;
        sctp_unrecognized_param_t *unk_param;
-       struct sock *sk;
        int len;
 
        /* 6.10 Bundling
@@ -285,16 +284,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
        if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
                return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
 
-       sk = ep->base.sk;
-       /* If the endpoint is not listening or if the number of associations
-        * on the TCP-style socket exceed the max backlog, respond with an
-        * ABORT.
-        */
-       if (!sctp_sstate(sk, LISTENING) ||
-           (sctp_style(sk, TCP) &&
-            sk_acceptq_is_full(sk)))
-               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
-
        /* 3.1 A packet containing an INIT chunk MUST have a zero Verification
         * Tag.
         */
@@ -590,6 +579,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
        struct sctp_ulpevent *ev, *ai_ev = NULL;
        int error = 0;
        struct sctp_chunk *err_chk_p;
+       struct sock *sk;
 
        /* If the packet is an OOTB packet which is temporarily on the
         * control endpoint, respond with an ABORT.
@@ -605,6 +595,15 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
                return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
 
+       /* If the endpoint is not listening or if the number of associations
+        * on the TCP-style socket exceed the max backlog, respond with an
+        * ABORT.
+        */
+       sk = ep->base.sk;
+       if (!sctp_sstate(sk, LISTENING) ||
+           (sctp_style(sk, TCP) && sk_acceptq_is_full(sk)))
+               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+
        /* "Decode" the chunk.  We have no optional parameters so we
         * are in good shape.
         */
@@ -1032,19 +1031,21 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
        /* This should never happen, but lets log it if so.  */
        if (unlikely(!link)) {
                if (from_addr.sa.sa_family == AF_INET6) {
-                       printk(KERN_WARNING
-                              "%s association %p could not find address "
-                              NIP6_FMT "\n",
-                              __FUNCTION__,
-                              asoc,
-                              NIP6(from_addr.v6.sin6_addr));
+                       if (net_ratelimit())
+                               printk(KERN_WARNING
+                                   "%s association %p could not find address "
+                                   NIP6_FMT "\n",
+                                   __FUNCTION__,
+                                   asoc,
+                                   NIP6(from_addr.v6.sin6_addr));
                } else {
-                       printk(KERN_WARNING
-                              "%s association %p could not find address "
-                              NIPQUAD_FMT "\n",
-                              __FUNCTION__,
-                              asoc,
-                              NIPQUAD(from_addr.v4.sin_addr.s_addr));
+                       if (net_ratelimit())
+                               printk(KERN_WARNING
+                                   "%s association %p could not find address "
+                                   NIPQUAD_FMT "\n",
+                                   __FUNCTION__,
+                                   asoc,
+                                   NIPQUAD(from_addr.v4.sin_addr.s_addr));
                }
                return SCTP_DISPOSITION_DISCARD;
        }
@@ -3362,7 +3363,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
                abort = sctp_make_abort(asoc, asconf_ack,
                                        sizeof(sctp_errhdr_t));
                if (abort) {
-                       sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, NULL, 0);
+                       sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0);
                        sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
                                        SCTP_CHUNK(abort));
                }
@@ -3392,7 +3393,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
                abort = sctp_make_abort(asoc, asconf_ack,
                                        sizeof(sctp_errhdr_t));
                if (abort) {
-                       sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, NULL, 0);
+                       sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
                        sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
                                        SCTP_CHUNK(abort));
                }
index 01c6364..3335460 100644 (file)
@@ -353,6 +353,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
         * The function sctp_get_port_local() does duplicate address
         * detection.
         */
+       addr->v4.sin_port = htons(snum);
        if ((ret = sctp_get_port_local(sk, addr))) {
                if (ret == (long) sk) {
                        /* This endpoint has a conflicting address. */
@@ -5202,6 +5203,7 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog)
 
                sctp_unhash_endpoint(ep);
                sk->sk_state = SCTP_SS_CLOSED;
+               return 0;
        }
 
        /* Return if we are already listening. */
@@ -5249,6 +5251,7 @@ SCTP_STATIC int sctp_stream_listen(struct sock *sk, int backlog)
 
                sctp_unhash_endpoint(ep);
                sk->sk_state = SCTP_SS_CLOSED;
+               return 0;
        }
 
        if (sctp_sstate(sk, LISTENING))
index 34eb977..fa0ba2a 100644 (file)
@@ -659,6 +659,46 @@ done:
        return retval;
 }
 
+/*
+ * Flush out stale fragments from the reassembly queue when processing
+ * a Forward TSN.
+ *
+ * RFC 3758, Section 3.6
+ *
+ * After receiving and processing a FORWARD TSN, the data receiver MUST
+ * take cautions in updating its re-assembly queue.  The receiver MUST
+ * remove any partially reassembled message, which is still missing one
+ * or more TSNs earlier than or equal to the new cumulative TSN point.
+ * In the event that the receiver has invoked the partial delivery API,
+ * a notification SHOULD also be generated to inform the upper layer API
+ * that the message being partially delivered will NOT be completed.
+ */
+void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
+{
+       struct sk_buff *pos, *tmp;
+       struct sctp_ulpevent *event;
+       __u32 tsn;
+
+       if (skb_queue_empty(&ulpq->reasm))
+               return;
+
+       skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
+               event = sctp_skb2event(pos);
+               tsn = event->tsn;
+
+               /* Since the entire message must be abandoned by the
+                * sender (item A3 in Section 3.5, RFC 3758), we can
+                * free all fragments on the list that are less then
+                * or equal to ctsn_point
+                */
+               if (TSN_lte(tsn, fwd_tsn)) {
+                       __skb_unlink(pos, &ulpq->reasm);
+                       sctp_ulpevent_free(event);
+               } else
+                       break;
+       }
+}
+
 /* Helper function to gather skbs that have possibly become
  * ordered by an an incoming chunk.
  */
@@ -794,7 +834,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
 /* Helper function to gather skbs that have possibly become
  * ordered by forward tsn skipping their dependencies.
  */
-static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
+static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
 {
        struct sk_buff *pos, *tmp;
        struct sctp_ulpevent *cevent;
@@ -813,31 +853,40 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
                csid = cevent->stream;
                cssn = cevent->ssn;
 
-               if (cssn != sctp_ssn_peek(in, csid))
+               /* Have we gone too far?  */
+               if (csid > sid)
                        break;
 
-               /* Found it, so mark in the ssnmap. */
-               sctp_ssn_next(in, csid);
+               /* Have we not gone far enough?  */
+               if (csid < sid)
+                       continue;
+
+               /* see if this ssn has been marked by skipping */
+               if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
+                       break;
 
                __skb_unlink(pos, &ulpq->lobby);
-               if (!event) {
+               if (!event)
                        /* Create a temporary list to collect chunks on.  */
                        event = sctp_skb2event(pos);
-                       __skb_queue_tail(&temp, sctp_event2skb(event));
-               } else {
-                       /* Attach all gathered skbs to the event.  */
-                       __skb_queue_tail(&temp, pos);
-               }
+
+               /* Attach all gathered skbs to the event.  */
+               __skb_queue_tail(&temp, pos);
        }
 
        /* Send event to the ULP.  'event' is the sctp_ulpevent for
         * very first SKB on the 'temp' list.
         */
-       if (event)
+       if (event) {
+               /* see if we have more ordered that we can deliver */
+               sctp_ulpq_retrieve_ordered(ulpq, event);
                sctp_ulpq_tail_event(ulpq, event);
+       }
 }
 
-/* Skip over an SSN. */
+/* Skip over an SSN. This is used during the processing of
+ * Forwared TSN chunk to skip over the abandoned ordered data
+ */
 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
 {
        struct sctp_stream *in;
@@ -855,7 +904,7 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
        /* Go find any other chunks that were waiting for
         * ordering and deliver them if needed.
         */
-       sctp_ulpq_reap_ordered(ulpq);
+       sctp_ulpq_reap_ordered(ulpq, sid);
        return;
 }