blob: 368d9c33fde1013366241c73078c4ae844154d32 [file] [log] [blame]
Xin Long0c3f6f62017-12-08 21:04:01 +08001/* SCTP kernel implementation
2 * (C) Copyright Red Hat Inc. 2017
3 *
4 * This file is part of the SCTP kernel implementation
5 *
Xin Longfae8b6f2018-02-13 19:29:13 +08006 * These functions implement sctp stream message interleaving, mostly
7 * including I-DATA and I-FORWARD-TSN chunks process.
Xin Long0c3f6f62017-12-08 21:04:01 +08008 *
9 * This SCTP implementation is free software;
10 * you can redistribute it and/or modify it under the terms of
11 * the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This SCTP implementation is distributed in the hope that it
16 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
17 * ************************
18 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
19 * See the GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with GNU CC; see the file COPYING. If not, see
23 * <http://www.gnu.org/licenses/>.
24 *
25 * Please send any bug reports or fixes you make to the
26 * email addresched(es):
27 * lksctp developers <linux-sctp@vger.kernel.org>
28 *
29 * Written or modified by:
30 * Xin Long <lucien.xin@gmail.com>
31 */
32
Xin Longbd4d6272017-12-08 21:04:04 +080033#include <net/busy_poll.h>
Xin Long0c3f6f62017-12-08 21:04:01 +080034#include <net/sctp/sctp.h>
35#include <net/sctp/sm.h>
Xin Longbd4d6272017-12-08 21:04:04 +080036#include <net/sctp/ulpevent.h>
Xin Long0c3f6f62017-12-08 21:04:01 +080037#include <linux/sctp.h>
38
39static struct sctp_chunk *sctp_make_idatafrag_empty(
40 const struct sctp_association *asoc,
41 const struct sctp_sndrcvinfo *sinfo,
42 int len, __u8 flags, gfp_t gfp)
43{
44 struct sctp_chunk *retval;
45 struct sctp_idatahdr dp;
46
47 memset(&dp, 0, sizeof(dp));
48 dp.stream = htons(sinfo->sinfo_stream);
49
50 if (sinfo->sinfo_flags & SCTP_UNORDERED)
51 flags |= SCTP_DATA_UNORDERED;
52
53 retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp);
54 if (!retval)
55 return NULL;
56
57 retval->subh.idata_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
58 memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
59
60 return retval;
61}
62
Xin Long668c9be2017-12-08 21:04:02 +080063static void sctp_chunk_assign_mid(struct sctp_chunk *chunk)
64{
65 struct sctp_stream *stream;
66 struct sctp_chunk *lchunk;
67 __u32 cfsn = 0;
68 __u16 sid;
69
70 if (chunk->has_mid)
71 return;
72
73 sid = sctp_chunk_stream_no(chunk);
74 stream = &chunk->asoc->stream;
75
76 list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) {
77 struct sctp_idatahdr *hdr;
Xin Long13228232017-12-08 21:04:09 +080078 __u32 mid;
Xin Long668c9be2017-12-08 21:04:02 +080079
80 lchunk->has_mid = 1;
81
Xin Long668c9be2017-12-08 21:04:02 +080082 hdr = lchunk->subh.idata_hdr;
83
84 if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG)
85 hdr->ppid = lchunk->sinfo.sinfo_ppid;
86 else
87 hdr->fsn = htonl(cfsn++);
88
Xin Long13228232017-12-08 21:04:09 +080089 if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
90 mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
91 sctp_mid_uo_next(stream, out, sid) :
92 sctp_mid_uo_peek(stream, out, sid);
93 } else {
94 mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
95 sctp_mid_next(stream, out, sid) :
96 sctp_mid_peek(stream, out, sid);
97 }
98 hdr->mid = htonl(mid);
Xin Long668c9be2017-12-08 21:04:02 +080099 }
100}
101
Xin Long9d4ceaf2017-12-08 21:04:03 +0800102static bool sctp_validate_data(struct sctp_chunk *chunk)
103{
104 const struct sctp_stream *stream;
105 __u16 sid, ssn;
106
107 if (chunk->chunk_hdr->type != SCTP_CID_DATA)
108 return false;
109
110 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
111 return true;
112
113 stream = &chunk->asoc->stream;
114 sid = sctp_chunk_stream_no(chunk);
115 ssn = ntohs(chunk->subh.data_hdr->ssn);
116
117 return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid));
118}
119
120static bool sctp_validate_idata(struct sctp_chunk *chunk)
121{
122 struct sctp_stream *stream;
123 __u32 mid;
124 __u16 sid;
125
126 if (chunk->chunk_hdr->type != SCTP_CID_I_DATA)
127 return false;
128
129 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
130 return true;
131
132 stream = &chunk->asoc->stream;
133 sid = sctp_chunk_stream_no(chunk);
134 mid = ntohl(chunk->subh.idata_hdr->mid);
135
136 return !MID_lt(mid, sctp_mid_peek(stream, in, sid));
137}
138
Xin Longbd4d6272017-12-08 21:04:04 +0800139static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
140 struct sctp_ulpevent *event)
141{
142 struct sctp_ulpevent *cevent;
David S. Miller348bbc22018-11-10 19:28:27 -0800143 struct sk_buff *pos, *loc;
Xin Longbd4d6272017-12-08 21:04:04 +0800144
145 pos = skb_peek_tail(&ulpq->reasm);
146 if (!pos) {
147 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
148 return;
149 }
150
151 cevent = sctp_skb2event(pos);
152
153 if (event->stream == cevent->stream &&
154 event->mid == cevent->mid &&
155 (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
156 (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
157 event->fsn > cevent->fsn))) {
158 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
159 return;
160 }
161
162 if ((event->stream == cevent->stream &&
163 MID_lt(cevent->mid, event->mid)) ||
164 event->stream > cevent->stream) {
165 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
166 return;
167 }
168
David S. Miller348bbc22018-11-10 19:28:27 -0800169 loc = NULL;
Xin Longbd4d6272017-12-08 21:04:04 +0800170 skb_queue_walk(&ulpq->reasm, pos) {
171 cevent = sctp_skb2event(pos);
172
173 if (event->stream < cevent->stream ||
174 (event->stream == cevent->stream &&
David S. Miller348bbc22018-11-10 19:28:27 -0800175 MID_lt(event->mid, cevent->mid))) {
176 loc = pos;
Xin Longbd4d6272017-12-08 21:04:04 +0800177 break;
David S. Miller348bbc22018-11-10 19:28:27 -0800178 }
Xin Longbd4d6272017-12-08 21:04:04 +0800179 if (event->stream == cevent->stream &&
180 event->mid == cevent->mid &&
181 !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
182 (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
David S. Miller348bbc22018-11-10 19:28:27 -0800183 event->fsn < cevent->fsn)) {
184 loc = pos;
Xin Longbd4d6272017-12-08 21:04:04 +0800185 break;
David S. Miller348bbc22018-11-10 19:28:27 -0800186 }
Xin Longbd4d6272017-12-08 21:04:04 +0800187 }
188
David S. Miller348bbc22018-11-10 19:28:27 -0800189 if (!loc)
190 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
191 else
192 __skb_queue_before(&ulpq->reasm, loc, sctp_event2skb(event));
Xin Longbd4d6272017-12-08 21:04:04 +0800193}
194
195static struct sctp_ulpevent *sctp_intl_retrieve_partial(
196 struct sctp_ulpq *ulpq,
197 struct sctp_ulpevent *event)
198{
199 struct sk_buff *first_frag = NULL;
200 struct sk_buff *last_frag = NULL;
201 struct sctp_ulpevent *retval;
202 struct sctp_stream_in *sin;
203 struct sk_buff *pos;
204 __u32 next_fsn = 0;
205 int is_last = 0;
206
Konstantin Khorenko05364ca2018-08-10 20:11:42 +0300207 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
Xin Longbd4d6272017-12-08 21:04:04 +0800208
209 skb_queue_walk(&ulpq->reasm, pos) {
210 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
211
212 if (cevent->stream < event->stream)
213 continue;
214
215 if (cevent->stream > event->stream ||
216 cevent->mid != sin->mid)
217 break;
218
219 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
220 case SCTP_DATA_FIRST_FRAG:
221 goto out;
222 case SCTP_DATA_MIDDLE_FRAG:
223 if (!first_frag) {
224 if (cevent->fsn == sin->fsn) {
225 first_frag = pos;
226 last_frag = pos;
227 next_fsn = cevent->fsn + 1;
228 }
229 } else if (cevent->fsn == next_fsn) {
230 last_frag = pos;
231 next_fsn++;
232 } else {
233 goto out;
234 }
235 break;
236 case SCTP_DATA_LAST_FRAG:
237 if (!first_frag) {
238 if (cevent->fsn == sin->fsn) {
239 first_frag = pos;
240 last_frag = pos;
241 next_fsn = 0;
242 is_last = 1;
243 }
244 } else if (cevent->fsn == next_fsn) {
245 last_frag = pos;
246 next_fsn = 0;
247 is_last = 1;
248 }
249 goto out;
250 default:
251 goto out;
252 }
253 }
254
255out:
256 if (!first_frag)
257 return NULL;
258
259 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
260 &ulpq->reasm, first_frag,
261 last_frag);
262 if (retval) {
263 sin->fsn = next_fsn;
264 if (is_last) {
265 retval->msg_flags |= MSG_EOR;
266 sin->pd_mode = 0;
267 }
268 }
269
270 return retval;
271}
272
273static struct sctp_ulpevent *sctp_intl_retrieve_reassembled(
274 struct sctp_ulpq *ulpq,
275 struct sctp_ulpevent *event)
276{
277 struct sctp_association *asoc = ulpq->asoc;
278 struct sk_buff *pos, *first_frag = NULL;
279 struct sctp_ulpevent *retval = NULL;
280 struct sk_buff *pd_first = NULL;
281 struct sk_buff *pd_last = NULL;
282 struct sctp_stream_in *sin;
283 __u32 next_fsn = 0;
284 __u32 pd_point = 0;
285 __u32 pd_len = 0;
286 __u32 mid = 0;
287
Konstantin Khorenko05364ca2018-08-10 20:11:42 +0300288 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
Xin Longbd4d6272017-12-08 21:04:04 +0800289
290 skb_queue_walk(&ulpq->reasm, pos) {
291 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
292
293 if (cevent->stream < event->stream)
294 continue;
295 if (cevent->stream > event->stream)
296 break;
297
298 if (MID_lt(cevent->mid, event->mid))
299 continue;
300 if (MID_lt(event->mid, cevent->mid))
301 break;
302
303 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
304 case SCTP_DATA_FIRST_FRAG:
305 if (cevent->mid == sin->mid) {
306 pd_first = pos;
307 pd_last = pos;
308 pd_len = pos->len;
309 }
310
311 first_frag = pos;
312 next_fsn = 0;
313 mid = cevent->mid;
314 break;
315
316 case SCTP_DATA_MIDDLE_FRAG:
317 if (first_frag && cevent->mid == mid &&
318 cevent->fsn == next_fsn) {
319 next_fsn++;
320 if (pd_first) {
321 pd_last = pos;
322 pd_len += pos->len;
323 }
324 } else {
325 first_frag = NULL;
326 }
327 break;
328
329 case SCTP_DATA_LAST_FRAG:
330 if (first_frag && cevent->mid == mid &&
331 cevent->fsn == next_fsn)
332 goto found;
333 else
334 first_frag = NULL;
335 break;
336 }
337 }
338
339 if (!pd_first)
340 goto out;
341
342 pd_point = sctp_sk(asoc->base.sk)->pd_point;
343 if (pd_point && pd_point <= pd_len) {
344 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
345 &ulpq->reasm,
346 pd_first, pd_last);
347 if (retval) {
348 sin->fsn = next_fsn;
349 sin->pd_mode = 1;
350 }
351 }
352 goto out;
353
354found:
355 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
356 &ulpq->reasm,
357 first_frag, pos);
358 if (retval)
359 retval->msg_flags |= MSG_EOR;
360
361out:
362 return retval;
363}
364
365static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq,
366 struct sctp_ulpevent *event)
367{
368 struct sctp_ulpevent *retval = NULL;
369 struct sctp_stream_in *sin;
370
371 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
372 event->msg_flags |= MSG_EOR;
373 return event;
374 }
375
376 sctp_intl_store_reasm(ulpq, event);
377
Konstantin Khorenko05364ca2018-08-10 20:11:42 +0300378 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
Xin Longbd4d6272017-12-08 21:04:04 +0800379 if (sin->pd_mode && event->mid == sin->mid &&
380 event->fsn == sin->fsn)
381 retval = sctp_intl_retrieve_partial(ulpq, event);
382
383 if (!retval)
384 retval = sctp_intl_retrieve_reassembled(ulpq, event);
385
386 return retval;
387}
388
389static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
390 struct sctp_ulpevent *event)
391{
392 struct sctp_ulpevent *cevent;
393 struct sk_buff *pos;
394
395 pos = skb_peek_tail(&ulpq->lobby);
396 if (!pos) {
397 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
398 return;
399 }
400
401 cevent = (struct sctp_ulpevent *)pos->cb;
402 if (event->stream == cevent->stream &&
403 MID_lt(cevent->mid, event->mid)) {
404 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
405 return;
406 }
407
408 if (event->stream > cevent->stream) {
409 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
410 return;
411 }
412
413 skb_queue_walk(&ulpq->lobby, pos) {
414 cevent = (struct sctp_ulpevent *)pos->cb;
415
416 if (cevent->stream > event->stream)
417 break;
418
419 if (cevent->stream == event->stream &&
420 MID_lt(event->mid, cevent->mid))
421 break;
422 }
423
424 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
425}
426
427static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq,
428 struct sctp_ulpevent *event)
429{
430 struct sk_buff_head *event_list;
431 struct sctp_stream *stream;
432 struct sk_buff *pos, *tmp;
433 __u16 sid = event->stream;
434
435 stream = &ulpq->asoc->stream;
436 event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev;
437
438 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
439 struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb;
440
441 if (cevent->stream > sid)
442 break;
443
444 if (cevent->stream < sid)
445 continue;
446
447 if (cevent->mid != sctp_mid_peek(stream, in, sid))
448 break;
449
450 sctp_mid_next(stream, in, sid);
451
452 __skb_unlink(pos, &ulpq->lobby);
453
454 __skb_queue_tail(event_list, pos);
455 }
456}
457
458static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
459 struct sctp_ulpevent *event)
460{
461 struct sctp_stream *stream;
462 __u16 sid;
463
Xin Longbd4d6272017-12-08 21:04:04 +0800464 stream = &ulpq->asoc->stream;
465 sid = event->stream;
466
467 if (event->mid != sctp_mid_peek(stream, in, sid)) {
468 sctp_intl_store_ordered(ulpq, event);
469 return NULL;
470 }
471
472 sctp_mid_next(stream, in, sid);
473
474 sctp_intl_retrieve_ordered(ulpq, event);
475
476 return event;
477}
478
479static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
480 struct sctp_ulpevent *event)
481{
482 struct sk_buff *skb = sctp_event2skb(event);
483 struct sock *sk = ulpq->asoc->base.sk;
484 struct sctp_sock *sp = sctp_sk(sk);
485 struct sk_buff_head *skb_list;
486
487 skb_list = (struct sk_buff_head *)skb->prev;
488
489 if (sk->sk_shutdown & RCV_SHUTDOWN &&
490 (sk->sk_shutdown & SEND_SHUTDOWN ||
491 !sctp_ulpevent_is_notification(event)))
492 goto out_free;
493
494 if (!sctp_ulpevent_is_notification(event)) {
495 sk_mark_napi_id(sk, skb);
496 sk_incoming_cpu_update(sk);
497 }
498
499 if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
500 goto out_free;
501
502 if (skb_list)
503 skb_queue_splice_tail_init(skb_list,
504 &sk->sk_receive_queue);
505 else
506 __skb_queue_tail(&sk->sk_receive_queue, skb);
507
508 if (!sp->data_ready_signalled) {
509 sp->data_ready_signalled = 1;
510 sk->sk_data_ready(sk);
511 }
512
513 return 1;
514
515out_free:
516 if (skb_list)
517 sctp_queue_purge_ulpevents(skb_list);
518 else
519 sctp_ulpevent_free(event);
520
521 return 0;
522}
523
Xin Long13228232017-12-08 21:04:09 +0800524static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq,
525 struct sctp_ulpevent *event)
526{
527 struct sctp_ulpevent *cevent;
528 struct sk_buff *pos;
529
530 pos = skb_peek_tail(&ulpq->reasm_uo);
531 if (!pos) {
532 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
533 return;
534 }
535
536 cevent = sctp_skb2event(pos);
537
538 if (event->stream == cevent->stream &&
539 event->mid == cevent->mid &&
540 (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
541 (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
542 event->fsn > cevent->fsn))) {
543 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
544 return;
545 }
546
547 if ((event->stream == cevent->stream &&
548 MID_lt(cevent->mid, event->mid)) ||
549 event->stream > cevent->stream) {
550 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
551 return;
552 }
553
554 skb_queue_walk(&ulpq->reasm_uo, pos) {
555 cevent = sctp_skb2event(pos);
556
557 if (event->stream < cevent->stream ||
558 (event->stream == cevent->stream &&
559 MID_lt(event->mid, cevent->mid)))
560 break;
561
562 if (event->stream == cevent->stream &&
563 event->mid == cevent->mid &&
564 !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
565 (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
566 event->fsn < cevent->fsn))
567 break;
568 }
569
570 __skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event));
571}
572
573static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo(
574 struct sctp_ulpq *ulpq,
575 struct sctp_ulpevent *event)
576{
577 struct sk_buff *first_frag = NULL;
578 struct sk_buff *last_frag = NULL;
579 struct sctp_ulpevent *retval;
580 struct sctp_stream_in *sin;
581 struct sk_buff *pos;
582 __u32 next_fsn = 0;
583 int is_last = 0;
584
Konstantin Khorenko05364ca2018-08-10 20:11:42 +0300585 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
Xin Long13228232017-12-08 21:04:09 +0800586
587 skb_queue_walk(&ulpq->reasm_uo, pos) {
588 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
589
590 if (cevent->stream < event->stream)
591 continue;
592 if (cevent->stream > event->stream)
593 break;
594
595 if (MID_lt(cevent->mid, sin->mid_uo))
596 continue;
597 if (MID_lt(sin->mid_uo, cevent->mid))
598 break;
599
600 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
601 case SCTP_DATA_FIRST_FRAG:
602 goto out;
603 case SCTP_DATA_MIDDLE_FRAG:
604 if (!first_frag) {
605 if (cevent->fsn == sin->fsn_uo) {
606 first_frag = pos;
607 last_frag = pos;
608 next_fsn = cevent->fsn + 1;
609 }
610 } else if (cevent->fsn == next_fsn) {
611 last_frag = pos;
612 next_fsn++;
613 } else {
614 goto out;
615 }
616 break;
617 case SCTP_DATA_LAST_FRAG:
618 if (!first_frag) {
619 if (cevent->fsn == sin->fsn_uo) {
620 first_frag = pos;
621 last_frag = pos;
622 next_fsn = 0;
623 is_last = 1;
624 }
625 } else if (cevent->fsn == next_fsn) {
626 last_frag = pos;
627 next_fsn = 0;
628 is_last = 1;
629 }
630 goto out;
631 default:
632 goto out;
633 }
634 }
635
636out:
637 if (!first_frag)
638 return NULL;
639
640 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
641 &ulpq->reasm_uo, first_frag,
642 last_frag);
643 if (retval) {
644 sin->fsn_uo = next_fsn;
645 if (is_last) {
646 retval->msg_flags |= MSG_EOR;
647 sin->pd_mode_uo = 0;
648 }
649 }
650
651 return retval;
652}
653
654static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo(
655 struct sctp_ulpq *ulpq,
656 struct sctp_ulpevent *event)
657{
658 struct sctp_association *asoc = ulpq->asoc;
659 struct sk_buff *pos, *first_frag = NULL;
660 struct sctp_ulpevent *retval = NULL;
661 struct sk_buff *pd_first = NULL;
662 struct sk_buff *pd_last = NULL;
663 struct sctp_stream_in *sin;
664 __u32 next_fsn = 0;
665 __u32 pd_point = 0;
666 __u32 pd_len = 0;
667 __u32 mid = 0;
668
Konstantin Khorenko05364ca2018-08-10 20:11:42 +0300669 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
Xin Long13228232017-12-08 21:04:09 +0800670
671 skb_queue_walk(&ulpq->reasm_uo, pos) {
672 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
673
674 if (cevent->stream < event->stream)
675 continue;
676 if (cevent->stream > event->stream)
677 break;
678
679 if (MID_lt(cevent->mid, event->mid))
680 continue;
681 if (MID_lt(event->mid, cevent->mid))
682 break;
683
684 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
685 case SCTP_DATA_FIRST_FRAG:
686 if (!sin->pd_mode_uo) {
687 sin->mid_uo = cevent->mid;
688 pd_first = pos;
689 pd_last = pos;
690 pd_len = pos->len;
691 }
692
693 first_frag = pos;
694 next_fsn = 0;
695 mid = cevent->mid;
696 break;
697
698 case SCTP_DATA_MIDDLE_FRAG:
699 if (first_frag && cevent->mid == mid &&
700 cevent->fsn == next_fsn) {
701 next_fsn++;
702 if (pd_first) {
703 pd_last = pos;
704 pd_len += pos->len;
705 }
706 } else {
707 first_frag = NULL;
708 }
709 break;
710
711 case SCTP_DATA_LAST_FRAG:
712 if (first_frag && cevent->mid == mid &&
713 cevent->fsn == next_fsn)
714 goto found;
715 else
716 first_frag = NULL;
717 break;
718 }
719 }
720
721 if (!pd_first)
722 goto out;
723
724 pd_point = sctp_sk(asoc->base.sk)->pd_point;
725 if (pd_point && pd_point <= pd_len) {
726 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
727 &ulpq->reasm_uo,
728 pd_first, pd_last);
729 if (retval) {
730 sin->fsn_uo = next_fsn;
731 sin->pd_mode_uo = 1;
732 }
733 }
734 goto out;
735
736found:
737 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
738 &ulpq->reasm_uo,
739 first_frag, pos);
740 if (retval)
741 retval->msg_flags |= MSG_EOR;
742
743out:
744 return retval;
745}
746
747static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq,
748 struct sctp_ulpevent *event)
749{
750 struct sctp_ulpevent *retval = NULL;
751 struct sctp_stream_in *sin;
752
753 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
754 event->msg_flags |= MSG_EOR;
755 return event;
756 }
757
758 sctp_intl_store_reasm_uo(ulpq, event);
759
Konstantin Khorenko05364ca2018-08-10 20:11:42 +0300760 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
Xin Long13228232017-12-08 21:04:09 +0800761 if (sin->pd_mode_uo && event->mid == sin->mid_uo &&
762 event->fsn == sin->fsn_uo)
763 retval = sctp_intl_retrieve_partial_uo(ulpq, event);
764
765 if (!retval)
766 retval = sctp_intl_retrieve_reassembled_uo(ulpq, event);
767
768 return retval;
769}
770
771static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq)
772{
773 struct sctp_stream_in *csin, *sin = NULL;
774 struct sk_buff *first_frag = NULL;
775 struct sk_buff *last_frag = NULL;
776 struct sctp_ulpevent *retval;
777 struct sk_buff *pos;
778 __u32 next_fsn = 0;
779 __u16 sid = 0;
780
781 skb_queue_walk(&ulpq->reasm_uo, pos) {
782 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
783
Konstantin Khorenko05364ca2018-08-10 20:11:42 +0300784 csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
Xin Long13228232017-12-08 21:04:09 +0800785 if (csin->pd_mode_uo)
786 continue;
787
788 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
789 case SCTP_DATA_FIRST_FRAG:
790 if (first_frag)
791 goto out;
792 first_frag = pos;
793 last_frag = pos;
794 next_fsn = 0;
795 sin = csin;
796 sid = cevent->stream;
797 sin->mid_uo = cevent->mid;
798 break;
799 case SCTP_DATA_MIDDLE_FRAG:
800 if (!first_frag)
801 break;
802 if (cevent->stream == sid &&
803 cevent->mid == sin->mid_uo &&
804 cevent->fsn == next_fsn) {
805 next_fsn++;
806 last_frag = pos;
807 } else {
808 goto out;
809 }
810 break;
811 case SCTP_DATA_LAST_FRAG:
812 if (first_frag)
813 goto out;
814 break;
815 default:
816 break;
817 }
818 }
819
820 if (!first_frag)
821 return NULL;
822
823out:
824 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
825 &ulpq->reasm_uo, first_frag,
826 last_frag);
827 if (retval) {
828 sin->fsn_uo = next_fsn;
829 sin->pd_mode_uo = 1;
830 }
831
832 return retval;
833}
834
Xin Longbd4d6272017-12-08 21:04:04 +0800835static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
836 struct sctp_chunk *chunk, gfp_t gfp)
837{
838 struct sctp_ulpevent *event;
839 struct sk_buff_head temp;
840 int event_eor = 0;
841
842 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
843 if (!event)
844 return -ENOMEM;
845
846 event->mid = ntohl(chunk->subh.idata_hdr->mid);
847 if (event->msg_flags & SCTP_DATA_FIRST_FRAG)
848 event->ppid = chunk->subh.idata_hdr->ppid;
849 else
850 event->fsn = ntohl(chunk->subh.idata_hdr->fsn);
851
Xin Long13228232017-12-08 21:04:09 +0800852 if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
853 event = sctp_intl_reasm(ulpq, event);
854 if (event && event->msg_flags & MSG_EOR) {
855 skb_queue_head_init(&temp);
856 __skb_queue_tail(&temp, sctp_event2skb(event));
Xin Longbd4d6272017-12-08 21:04:04 +0800857
Xin Long13228232017-12-08 21:04:09 +0800858 event = sctp_intl_order(ulpq, event);
859 }
860 } else {
861 event = sctp_intl_reasm_uo(ulpq, event);
Xin Longbd4d6272017-12-08 21:04:04 +0800862 }
863
864 if (event) {
865 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
866 sctp_enqueue_event(ulpq, event);
867 }
868
869 return event_eor;
870}
871
Xin Long94014e82017-12-08 21:04:06 +0800872static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
873{
874 struct sctp_stream_in *csin, *sin = NULL;
875 struct sk_buff *first_frag = NULL;
876 struct sk_buff *last_frag = NULL;
877 struct sctp_ulpevent *retval;
878 struct sk_buff *pos;
879 __u32 next_fsn = 0;
880 __u16 sid = 0;
881
882 skb_queue_walk(&ulpq->reasm, pos) {
883 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
884
Konstantin Khorenko05364ca2018-08-10 20:11:42 +0300885 csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
Xin Long94014e82017-12-08 21:04:06 +0800886 if (csin->pd_mode)
887 continue;
888
889 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
890 case SCTP_DATA_FIRST_FRAG:
891 if (first_frag)
892 goto out;
893 if (cevent->mid == csin->mid) {
894 first_frag = pos;
895 last_frag = pos;
896 next_fsn = 0;
897 sin = csin;
898 sid = cevent->stream;
899 }
900 break;
901 case SCTP_DATA_MIDDLE_FRAG:
902 if (!first_frag)
903 break;
904 if (cevent->stream == sid &&
905 cevent->mid == sin->mid &&
906 cevent->fsn == next_fsn) {
907 next_fsn++;
908 last_frag = pos;
909 } else {
910 goto out;
911 }
912 break;
913 case SCTP_DATA_LAST_FRAG:
914 if (first_frag)
915 goto out;
916 break;
917 default:
918 break;
919 }
920 }
921
922 if (!first_frag)
923 return NULL;
924
925out:
926 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
927 &ulpq->reasm, first_frag,
928 last_frag);
929 if (retval) {
930 sin->fsn = next_fsn;
931 sin->pd_mode = 1;
932 }
933
934 return retval;
935}
936
937static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
938{
939 struct sctp_ulpevent *event;
940
Xin Long13228232017-12-08 21:04:09 +0800941 if (!skb_queue_empty(&ulpq->reasm)) {
942 do {
943 event = sctp_intl_retrieve_first(ulpq);
944 if (event)
945 sctp_enqueue_event(ulpq, event);
946 } while (event);
947 }
Xin Long94014e82017-12-08 21:04:06 +0800948
Xin Long13228232017-12-08 21:04:09 +0800949 if (!skb_queue_empty(&ulpq->reasm_uo)) {
950 do {
951 event = sctp_intl_retrieve_first_uo(ulpq);
952 if (event)
953 sctp_enqueue_event(ulpq, event);
954 } while (event);
955 }
Xin Long94014e82017-12-08 21:04:06 +0800956}
957
958static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
959 gfp_t gfp)
960{
961 struct sctp_association *asoc = ulpq->asoc;
962 __u32 freed = 0;
963 __u16 needed;
964
Xin Longfb234032018-02-12 18:31:24 +0800965 needed = ntohs(chunk->chunk_hdr->length) -
966 sizeof(struct sctp_idata_chunk);
Xin Long94014e82017-12-08 21:04:06 +0800967
968 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
969 freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
970 if (freed < needed)
971 freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm,
972 needed);
Xin Long13228232017-12-08 21:04:09 +0800973 if (freed < needed)
974 freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo,
975 needed);
Xin Long94014e82017-12-08 21:04:06 +0800976 }
977
Xin Long9ab23232018-02-16 17:18:33 +0800978 if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
979 sctp_intl_start_pd(ulpq, gfp);
Xin Long94014e82017-12-08 21:04:06 +0800980
981 sk_mem_reclaim(asoc->base.sk);
982}
983
Xin Long65f5e352017-12-08 21:04:08 +0800984static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
985 __u32 mid, __u16 flags, gfp_t gfp)
986{
987 struct sock *sk = ulpq->asoc->base.sk;
988 struct sctp_ulpevent *ev = NULL;
989
990 if (!sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
991 &sctp_sk(sk)->subscribe))
992 return;
993
994 ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
995 sid, mid, flags, gfp);
996 if (ev) {
997 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
998
999 if (!sctp_sk(sk)->data_ready_signalled) {
1000 sctp_sk(sk)->data_ready_signalled = 1;
1001 sk->sk_data_ready(sk);
1002 }
1003 }
1004}
1005
1006static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
1007{
1008 struct sctp_stream *stream = &ulpq->asoc->stream;
1009 struct sctp_ulpevent *cevent, *event = NULL;
1010 struct sk_buff_head *lobby = &ulpq->lobby;
1011 struct sk_buff *pos, *tmp;
1012 struct sk_buff_head temp;
1013 __u16 csid;
1014 __u32 cmid;
1015
1016 skb_queue_head_init(&temp);
1017 sctp_skb_for_each(pos, lobby, tmp) {
1018 cevent = (struct sctp_ulpevent *)pos->cb;
1019 csid = cevent->stream;
1020 cmid = cevent->mid;
1021
1022 if (csid > sid)
1023 break;
1024
1025 if (csid < sid)
1026 continue;
1027
1028 if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid)))
1029 break;
1030
1031 __skb_unlink(pos, lobby);
1032 if (!event)
1033 event = sctp_skb2event(pos);
1034
1035 __skb_queue_tail(&temp, pos);
1036 }
1037
1038 if (!event && pos != (struct sk_buff *)lobby) {
1039 cevent = (struct sctp_ulpevent *)pos->cb;
1040 csid = cevent->stream;
1041 cmid = cevent->mid;
1042
1043 if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) {
1044 sctp_mid_next(stream, in, csid);
1045 __skb_unlink(pos, lobby);
1046 __skb_queue_tail(&temp, pos);
1047 event = sctp_skb2event(pos);
1048 }
1049 }
1050
1051 if (event) {
1052 sctp_intl_retrieve_ordered(ulpq, event);
1053 sctp_enqueue_event(ulpq, event);
1054 }
1055}
1056
1057static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1058{
1059 struct sctp_stream *stream = &ulpq->asoc->stream;
1060 __u16 sid;
1061
1062 for (sid = 0; sid < stream->incnt; sid++) {
Konstantin Khorenko05364ca2018-08-10 20:11:42 +03001063 struct sctp_stream_in *sin = SCTP_SI(stream, sid);
Xin Long65f5e352017-12-08 21:04:08 +08001064 __u32 mid;
1065
Xin Long13228232017-12-08 21:04:09 +08001066 if (sin->pd_mode_uo) {
1067 sin->pd_mode_uo = 0;
1068
1069 mid = sin->mid_uo;
1070 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp);
1071 }
1072
Xin Long65f5e352017-12-08 21:04:08 +08001073 if (sin->pd_mode) {
1074 sin->pd_mode = 0;
1075
1076 mid = sin->mid;
1077 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp);
1078 sctp_mid_skip(stream, in, sid, mid);
1079
1080 sctp_intl_reap_ordered(ulpq, sid);
1081 }
1082 }
1083
1084 /* intl abort pd happens only when all data needs to be cleaned */
1085 sctp_ulpq_flush(ulpq);
1086}
1087
Xin Long8e0c3b72017-12-15 00:41:26 +08001088static inline int sctp_get_skip_pos(struct sctp_ifwdtsn_skip *skiplist,
1089 int nskips, __be16 stream, __u8 flags)
1090{
1091 int i;
1092
1093 for (i = 0; i < nskips; i++)
1094 if (skiplist[i].stream == stream &&
1095 skiplist[i].flags == flags)
1096 return i;
1097
1098 return i;
1099}
1100
1101#define SCTP_FTSN_U_BIT 0x1
1102static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
1103{
1104 struct sctp_ifwdtsn_skip ftsn_skip_arr[10];
1105 struct sctp_association *asoc = q->asoc;
1106 struct sctp_chunk *ftsn_chunk = NULL;
1107 struct list_head *lchunk, *temp;
1108 int nskips = 0, skip_pos;
1109 struct sctp_chunk *chunk;
1110 __u32 tsn;
1111
1112 if (!asoc->peer.prsctp_capable)
1113 return;
1114
1115 if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1116 asoc->adv_peer_ack_point = ctsn;
1117
1118 list_for_each_safe(lchunk, temp, &q->abandoned) {
1119 chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list);
1120 tsn = ntohl(chunk->subh.data_hdr->tsn);
1121
1122 if (TSN_lte(tsn, ctsn)) {
1123 list_del_init(lchunk);
1124 sctp_chunk_free(chunk);
1125 } else if (TSN_lte(tsn, asoc->adv_peer_ack_point + 1)) {
1126 __be16 sid = chunk->subh.idata_hdr->stream;
1127 __be32 mid = chunk->subh.idata_hdr->mid;
1128 __u8 flags = 0;
1129
1130 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1131 flags |= SCTP_FTSN_U_BIT;
1132
1133 asoc->adv_peer_ack_point = tsn;
1134 skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], nskips,
1135 sid, flags);
1136 ftsn_skip_arr[skip_pos].stream = sid;
1137 ftsn_skip_arr[skip_pos].reserved = 0;
1138 ftsn_skip_arr[skip_pos].flags = flags;
1139 ftsn_skip_arr[skip_pos].mid = mid;
1140 if (skip_pos == nskips)
1141 nskips++;
1142 if (nskips == 10)
1143 break;
1144 } else {
1145 break;
1146 }
1147 }
1148
1149 if (asoc->adv_peer_ack_point > ctsn)
1150 ftsn_chunk = sctp_make_ifwdtsn(asoc, asoc->adv_peer_ack_point,
1151 nskips, &ftsn_skip_arr[0]);
1152
1153 if (ftsn_chunk) {
1154 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1155 SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
1156 }
1157}
1158
Xin Long0fc2ea92017-12-15 00:41:27 +08001159#define _sctp_walk_ifwdtsn(pos, chunk, end) \
1160 for (pos = chunk->subh.ifwdtsn_hdr->skip; \
1161 (void *)pos < (void *)chunk->subh.ifwdtsn_hdr->skip + (end); pos++)
1162
1163#define sctp_walk_ifwdtsn(pos, ch) \
1164 _sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
1165 sizeof(struct sctp_ifwdtsn_chunk))
1166
1167static bool sctp_validate_fwdtsn(struct sctp_chunk *chunk)
1168{
1169 struct sctp_fwdtsn_skip *skip;
1170 __u16 incnt;
1171
1172 if (chunk->chunk_hdr->type != SCTP_CID_FWD_TSN)
1173 return false;
1174
1175 incnt = chunk->asoc->stream.incnt;
1176 sctp_walk_fwdtsn(skip, chunk)
1177 if (ntohs(skip->stream) >= incnt)
1178 return false;
1179
1180 return true;
1181}
1182
1183static bool sctp_validate_iftsn(struct sctp_chunk *chunk)
1184{
1185 struct sctp_ifwdtsn_skip *skip;
1186 __u16 incnt;
1187
1188 if (chunk->chunk_hdr->type != SCTP_CID_I_FWD_TSN)
1189 return false;
1190
1191 incnt = chunk->asoc->stream.incnt;
1192 sctp_walk_ifwdtsn(skip, chunk)
1193 if (ntohs(skip->stream) >= incnt)
1194 return false;
1195
1196 return true;
1197}
1198
Xin Long47b20a82017-12-15 00:41:28 +08001199static void sctp_report_fwdtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1200{
1201 /* Move the Cumulattive TSN Ack ahead. */
1202 sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1203 /* purge the fragmentation queue */
1204 sctp_ulpq_reasm_flushtsn(ulpq, ftsn);
1205 /* Abort any in progress partial delivery. */
1206 sctp_ulpq_abort_pd(ulpq, GFP_ATOMIC);
1207}
1208
1209static void sctp_intl_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1210{
1211 struct sk_buff *pos, *tmp;
1212
1213 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
1214 struct sctp_ulpevent *event = sctp_skb2event(pos);
1215 __u32 tsn = event->tsn;
1216
1217 if (TSN_lte(tsn, ftsn)) {
1218 __skb_unlink(pos, &ulpq->reasm);
1219 sctp_ulpevent_free(event);
1220 }
1221 }
1222
1223 skb_queue_walk_safe(&ulpq->reasm_uo, pos, tmp) {
1224 struct sctp_ulpevent *event = sctp_skb2event(pos);
1225 __u32 tsn = event->tsn;
1226
1227 if (TSN_lte(tsn, ftsn)) {
1228 __skb_unlink(pos, &ulpq->reasm_uo);
1229 sctp_ulpevent_free(event);
1230 }
1231 }
1232}
1233
1234static void sctp_report_iftsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1235{
1236 /* Move the Cumulattive TSN Ack ahead. */
1237 sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1238 /* purge the fragmentation queue */
1239 sctp_intl_reasm_flushtsn(ulpq, ftsn);
1240 /* abort only when it's for all data */
1241 if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map))
1242 sctp_intl_abort_pd(ulpq, GFP_ATOMIC);
1243}
1244
Xin Longde60fe92017-12-15 00:41:29 +08001245static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1246{
1247 struct sctp_fwdtsn_skip *skip;
1248
1249 /* Walk through all the skipped SSNs */
1250 sctp_walk_fwdtsn(skip, chunk)
1251 sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
1252}
1253
1254static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid,
1255 __u8 flags)
1256{
Konstantin Khorenko05364ca2018-08-10 20:11:42 +03001257 struct sctp_stream_in *sin = sctp_stream_in(&ulpq->asoc->stream, sid);
Xin Longde60fe92017-12-15 00:41:29 +08001258 struct sctp_stream *stream = &ulpq->asoc->stream;
1259
1260 if (flags & SCTP_FTSN_U_BIT) {
1261 if (sin->pd_mode_uo && MID_lt(sin->mid_uo, mid)) {
1262 sin->pd_mode_uo = 0;
1263 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1,
1264 GFP_ATOMIC);
1265 }
1266 return;
1267 }
1268
1269 if (MID_lt(mid, sctp_mid_peek(stream, in, sid)))
1270 return;
1271
1272 if (sin->pd_mode) {
1273 sin->pd_mode = 0;
1274 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x0, GFP_ATOMIC);
1275 }
1276
1277 sctp_mid_skip(stream, in, sid, mid);
1278
1279 sctp_intl_reap_ordered(ulpq, sid);
1280}
1281
1282static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1283{
1284 struct sctp_ifwdtsn_skip *skip;
1285
1286 /* Walk through all the skipped MIDs and abort stream pd if possible */
1287 sctp_walk_ifwdtsn(skip, chunk)
1288 sctp_intl_skip(ulpq, ntohs(skip->stream),
1289 ntohl(skip->mid), skip->flags);
1290}
1291
Xin Long0c3f6f62017-12-08 21:04:01 +08001292static struct sctp_stream_interleave sctp_stream_interleave_0 = {
1293 .data_chunk_len = sizeof(struct sctp_data_chunk),
Xin Long0fc2ea92017-12-15 00:41:27 +08001294 .ftsn_chunk_len = sizeof(struct sctp_fwdtsn_chunk),
Xin Long0c3f6f62017-12-08 21:04:01 +08001295 /* DATA process functions */
1296 .make_datafrag = sctp_make_datafrag_empty,
Xin Long668c9be2017-12-08 21:04:02 +08001297 .assign_number = sctp_chunk_assign_ssn,
Xin Long9d4ceaf2017-12-08 21:04:03 +08001298 .validate_data = sctp_validate_data,
Xin Longbd4d6272017-12-08 21:04:04 +08001299 .ulpevent_data = sctp_ulpq_tail_data,
Xin Long9162e0e2017-12-08 21:04:05 +08001300 .enqueue_event = sctp_ulpq_tail_event,
Xin Long94014e82017-12-08 21:04:06 +08001301 .renege_events = sctp_ulpq_renege,
Xin Longbe4e0ce2017-12-08 21:04:07 +08001302 .start_pd = sctp_ulpq_partial_delivery,
Xin Long65f5e352017-12-08 21:04:08 +08001303 .abort_pd = sctp_ulpq_abort_pd,
Xin Long8e0c3b72017-12-15 00:41:26 +08001304 /* FORWARD-TSN process functions */
1305 .generate_ftsn = sctp_generate_fwdtsn,
Xin Long0fc2ea92017-12-15 00:41:27 +08001306 .validate_ftsn = sctp_validate_fwdtsn,
Xin Long47b20a82017-12-15 00:41:28 +08001307 .report_ftsn = sctp_report_fwdtsn,
Xin Longde60fe92017-12-15 00:41:29 +08001308 .handle_ftsn = sctp_handle_fwdtsn,
Xin Long0c3f6f62017-12-08 21:04:01 +08001309};
1310
1311static struct sctp_stream_interleave sctp_stream_interleave_1 = {
1312 .data_chunk_len = sizeof(struct sctp_idata_chunk),
Xin Long0fc2ea92017-12-15 00:41:27 +08001313 .ftsn_chunk_len = sizeof(struct sctp_ifwdtsn_chunk),
Xin Long0c3f6f62017-12-08 21:04:01 +08001314 /* I-DATA process functions */
1315 .make_datafrag = sctp_make_idatafrag_empty,
Xin Long668c9be2017-12-08 21:04:02 +08001316 .assign_number = sctp_chunk_assign_mid,
Xin Long9d4ceaf2017-12-08 21:04:03 +08001317 .validate_data = sctp_validate_idata,
Xin Longbd4d6272017-12-08 21:04:04 +08001318 .ulpevent_data = sctp_ulpevent_idata,
Xin Long9162e0e2017-12-08 21:04:05 +08001319 .enqueue_event = sctp_enqueue_event,
Xin Long94014e82017-12-08 21:04:06 +08001320 .renege_events = sctp_renege_events,
Xin Longbe4e0ce2017-12-08 21:04:07 +08001321 .start_pd = sctp_intl_start_pd,
Xin Long65f5e352017-12-08 21:04:08 +08001322 .abort_pd = sctp_intl_abort_pd,
Xin Long8e0c3b72017-12-15 00:41:26 +08001323 /* I-FORWARD-TSN process functions */
1324 .generate_ftsn = sctp_generate_iftsn,
Xin Long0fc2ea92017-12-15 00:41:27 +08001325 .validate_ftsn = sctp_validate_iftsn,
Xin Long47b20a82017-12-15 00:41:28 +08001326 .report_ftsn = sctp_report_iftsn,
Xin Longde60fe92017-12-15 00:41:29 +08001327 .handle_ftsn = sctp_handle_iftsn,
Xin Long0c3f6f62017-12-08 21:04:01 +08001328};
1329
1330void sctp_stream_interleave_init(struct sctp_stream *stream)
1331{
1332 struct sctp_association *asoc;
1333
1334 asoc = container_of(stream, struct sctp_association, stream);
1335 stream->si = asoc->intl_enable ? &sctp_stream_interleave_1
1336 : &sctp_stream_interleave_0;
1337}