1use std::collections::VecDeque;
2
3pub const IKCP_RTO_NDL: u32 = 30; pub const IKCP_RTO_MIN: u32 = 100; pub const IKCP_RTO_DEF: u32 = 200;
6pub const IKCP_RTO_MAX: u32 = 60000;
7pub const IKCP_CMD_PUSH: u8 = 81; pub const IKCP_CMD_ACK: u8 = 82; pub const IKCP_CMD_WASK: u8 = 83; pub const IKCP_CMD_WINS: u8 = 84; pub const IKCP_ASK_SEND: u32 = 1; pub const IKCP_ASK_TELL: u32 = 2; pub const IKCP_WND_SND: u32 = 32;
14pub const IKCP_WND_RCV: u32 = 128; pub const IKCP_MTU_DEF: u32 = 1400;
16pub const IKCP_ACK_FAST: u32 = 3;
17pub const IKCP_INTERVAL: u32 = 100;
18pub const IKCP_OVERHEAD: u32 = 24;
19pub const IKCP_DEADLINK: u32 = 20;
20pub const IKCP_THRESH_INIT: u32 = 2;
21pub const IKCP_THRESH_MIN: u32 = 2;
22pub const IKCP_PROBE_INIT: u32 = 7000; pub const IKCP_PROBE_LIMIT: u32 = 120000; pub const IKCP_FASTACK_LIMIT: u32 = 5; pub(crate) fn memcpy(dst: &mut [u8], src: &[u8], len: usize) {
27 dst[..len].copy_from_slice(&src[..len]);
28}
29
30pub(crate) fn memoffset(dst: &mut [u8], src: usize) -> usize {
31 (dst.as_ptr() as usize) - src
32}
33
34pub fn ikcp_encode8u(p: &mut [u8], c: u8) -> &mut [u8] {
36 p[0] = c;
37 &mut p[1..]
38}
39
40pub fn ikcp_decode8u<'a>(p: &'a [u8], c: &mut u8) -> &'a [u8] {
42 *c = p[0];
43 &p[1..]
44}
45
46pub fn ikcp_encode16u(p: &mut [u8], w: u16) -> &mut [u8] {
48 memcpy(p, &w.to_le_bytes(), 2);
49 &mut p[2..]
50}
51
52pub fn ikcp_decode16u<'a>(p: &'a [u8], w: &mut u16) -> &'a [u8] {
54 *w = u16::from_le_bytes([p[0], p[1]]);
55 &p[2..]
56}
57
58pub fn ikcp_encode32u(p: &mut [u8], l: u32) -> &mut [u8] {
60 memcpy(p, &l.to_le_bytes(), 4);
61 &mut p[4..]
62}
63
64pub fn ikcp_decode32u<'a>(p: &'a [u8], l: &mut u32) -> &'a [u8] {
66 *l = u32::from_le_bytes([p[0], p[1], p[2], p[3]]);
67 &p[4..]
68}
69
70pub fn ikcp_timediff(later: u32, earlier: u32) -> i32 {
71 later.wrapping_sub(earlier) as i32
72}
73
74pub(crate) fn ikcp_segment_new(size: i32) -> IKCPSEG {
76 IKCPSEG {
77 conv: 0,
78 cmd: 0,
79 frg: 0,
80 wnd: 0,
81 ts: 0,
82 sn: 0,
83 una: 0,
84 resendts: 0,
85 rto: 0,
86 fastack: 0,
87 xmit: 0,
88 data: vec![0; size as usize],
89 }
90}
91
92pub(crate) fn ikcp_output<T, F>(
94 kcp: &IKCPCB,
95 size: i32,
96 data: &mut [u8],
97 user: &mut T,
98 output: &mut F,
99) where
100 F: FnMut(&mut [u8], i32, &IKCPCB, &mut T),
101{
102 if size == 0 {
103 return;
104 }
105
106 (*output)(data, size, kcp, user);
107}
108
109pub fn ikcp_create(conv: u32) -> IKCPCB {
111 IKCPCB {
112 conv,
113 snd_una: 0,
114 snd_nxt: 0,
115 rcv_nxt: 0,
116 ts_probe: 0,
117 probe_wait: 0,
118 snd_wnd: IKCP_WND_SND,
119 rcv_wnd: IKCP_WND_RCV,
120 rmt_wnd: IKCP_WND_RCV,
121 cwnd: 0,
122 incr: 0,
123 probe: 0,
124 mtu: IKCP_MTU_DEF,
125 mss: IKCP_MTU_DEF - IKCP_OVERHEAD,
126 stream: false,
127 snd_queue: VecDeque::new(),
128 rcv_queue: VecDeque::new(),
129 snd_buf: VecDeque::new(),
130 rcv_buf: VecDeque::new(),
131 state: false,
132 acklist: vec![],
133 rx_srtt: 0,
134 rx_rttval: 0,
135 rx_rto: IKCP_RTO_DEF as i32,
136 rx_minrto: IKCP_RTO_MIN as i32,
137 current: 0,
138 interval: IKCP_INTERVAL,
139 ts_flush: IKCP_INTERVAL,
140 nodelay: 0,
141 updated: false,
142 ssthresh: IKCP_THRESH_INIT,
143 fastresend: 0,
144 fastlimit: IKCP_FASTACK_LIMIT as i32,
145 nocwnd: false,
146 xmit: 0,
147 dead_link: IKCP_DEADLINK,
148 }
149}
150
151pub fn ikcp_recv(kcp: &mut IKCPCB, mut buffer: Option<&mut [u8]>, mut len: i32) -> i32 {
153 let ispeek = len < 0;
154 let mut recover = false;
155
156 if kcp.rcv_queue.is_empty() {
157 return -1;
158 }
159
160 len = len.abs();
161
162 let peeksize = ikcp_peeksize(kcp);
163
164 match peeksize {
165 x if x < 0 => return -2,
166 x if x > len => return -3,
167 _ => {}
168 }
169
170 if (kcp.rcv_queue.len() as u32) >= kcp.rcv_wnd {
171 recover = true;
172 }
173
174 len = 0;
176 while let Some(seg) = kcp.rcv_queue.front() {
177 if let Some(data) = buffer {
178 memcpy(data, &seg.data, seg.data.len());
179 buffer = Some(&mut data[..seg.data.len()]);
180 }
181
182 len += seg.data.len() as i32;
183 let fragment = seg.frg as i32;
184
185 if !ispeek {
186 kcp.rcv_queue.pop_front();
187 }
188
189 if fragment == 0 {
190 break;
191 }
192 }
193
194 while let Some(seg) = kcp.rcv_buf.front() {
196 if seg.sn == kcp.rcv_nxt && (kcp.rcv_queue.len() as u32) < kcp.rcv_wnd {
197 let x = kcp.rcv_buf.pop_front().unwrap();
198 kcp.rcv_queue.push_back(x);
199 kcp.rcv_nxt += 1;
200 } else {
201 break;
202 }
203 }
204
205 if (kcp.rcv_queue.len() as u32) < kcp.rcv_wnd && recover {
207 kcp.probe |= IKCP_ASK_TELL;
210 }
211
212 len
213}
214
215pub fn ikcp_peeksize(kcp: &IKCPCB) -> i32 {
217 let mut length = 0;
218
219 let seg = match kcp.rcv_queue.front() {
220 Some(x) => x,
221 None => return -1,
222 };
223
224 if seg.frg == 0 {
225 return seg.data.len() as i32;
226 }
227
228 if (kcp.rcv_queue.len() as u32) < (seg.frg + 1) {
229 return -1;
230 }
231
232 for seg in &kcp.rcv_queue {
233 length += seg.data.len() as i32;
234 if seg.frg == 0 {
235 break;
236 }
237 }
238
239 length
240}
241
242pub fn ikcp_send(kcp: &mut IKCPCB, mut buffer: Option<&mut [u8]>, mut len: i32) -> i32 {
244 let mut count: i32;
245
246 let mut sent = 0;
247
248 if len < 0 {
249 return -1;
250 }
251
252 if kcp.stream {
254 if let Some(old) = kcp.snd_queue.back() {
255 if (old.data.len() as u32) < kcp.mss {
256 let capacity = (kcp.mss - (old.data.len() as u32)) as i32;
257 let extend = len.min(capacity);
258 let mut seg = ikcp_segment_new(((old.data.len() as i64) + (extend as i64)) as i32);
259
260 memcpy(&mut seg.data, &old.data, old.data.len());
261
262 if let Some(data) = buffer {
263 memcpy(&mut seg.data[old.data.len()..], data, extend as usize);
264 buffer = Some(&mut data[extend as usize..]);
265 }
266
267 seg.frg = 0;
268 len -= extend;
269 kcp.snd_queue.pop_back();
270 kcp.snd_queue.push_back(seg);
271 sent = extend;
272 }
273
274 if len <= 0 {
275 return sent;
276 }
277 }
278 }
279
280 count = match len <= (kcp.mss as i32) {
281 true => 1,
282 false => (((len as i64) + (kcp.mss as i64) - 1) / (kcp.mss as i64)) as i32,
283 };
284
285 if count >= (kcp.rcv_wnd as i32).min(255) {
286 if kcp.stream && sent > 0 {
287 return sent;
288 }
289
290 return -2;
291 }
292
293 if count == 0 {
294 count = 1;
295 }
296
297 for i in 0..count {
299 let size = len.min(kcp.mss as i32);
300
301 let mut seg = ikcp_segment_new(size);
302
303 if let Some(data) = buffer {
304 if size > 0 {
305 memcpy(&mut seg.data, data, size as usize);
306 }
307
308 buffer = Some(&mut data[..size as usize]);
309 }
310
311 seg.frg = match !kcp.stream {
312 true => (count - i - 1) as u32,
313 false => 0,
314 };
315
316 kcp.snd_queue.push_back(seg);
317
318 len -= size;
319 sent += size;
320 }
321
322 sent
323}
324
325pub(crate) fn ikcp_update_ack(kcp: &mut IKCPCB, rtt: i32) {
327 if kcp.rx_srtt == 0 {
328 kcp.rx_srtt = rtt;
329 kcp.rx_rttval = rtt / 2;
330 } else {
331 let mut delta = (rtt - kcp.rx_srtt) as i64;
332 delta = delta.abs();
333 kcp.rx_rttval = ((((3 * kcp.rx_rttval) as i64) + delta) / 4) as i32;
334 kcp.rx_srtt = (7 * kcp.rx_srtt + rtt) / 8;
335 kcp.rx_srtt = kcp.rx_srtt.max(1);
336 }
337
338 let rto = ((kcp.rx_srtt as i64) + (kcp.interval.max((4 * kcp.rx_rttval) as u32) as i64)) as i32;
339 kcp.rx_rto = (rto as u32).clamp(kcp.rx_minrto as u32, IKCP_RTO_MAX) as i32;
340}
341
342pub(crate) fn ikcp_shrink_buf(kcp: &mut IKCPCB) {
343 kcp.snd_una = match kcp.snd_buf.front() {
344 Some(seg) => seg.sn,
345 None => kcp.snd_nxt,
346 }
347}
348
349pub(crate) fn ikcp_parse_ack(kcp: &mut IKCPCB, sn: u32) {
350 if ikcp_timediff(sn, kcp.snd_una) < 0 || ikcp_timediff(sn, kcp.snd_nxt) >= 0 {
351 return;
352 }
353
354 for (i, seg) in kcp.snd_buf.iter().enumerate() {
355 if sn == seg.sn {
356 kcp.snd_buf.remove(i);
357 break;
358 }
359
360 if ikcp_timediff(sn, seg.sn) < 0 {
361 break;
362 }
363 }
364}
365
366pub(crate) fn ikcp_parse_una(kcp: &mut IKCPCB, una: u32) {
367 for (i, seg) in kcp.snd_buf.iter().enumerate() {
368 if ikcp_timediff(una, seg.sn) <= 0 {
369 kcp.snd_buf.drain(0..i);
370 return;
371 }
372 }
373
374 kcp.snd_buf.clear();
375}
376
377pub(crate) fn ikcp_parse_fastack(kcp: &mut IKCPCB, sn: u32, ts: u32) {
378 if ikcp_timediff(sn, kcp.snd_una) < 0 || ikcp_timediff(sn, kcp.snd_nxt) >= 0 {
379 return;
380 }
381
382 for seg in &mut kcp.snd_buf {
383 if ikcp_timediff(sn, seg.sn) < 0 {
384 break;
385 }
386
387 if sn != seg.sn && ikcp_timediff(ts, seg.ts) >= 0 {
388 seg.fastack += 1;
389 }
390 }
391}
392
393pub(crate) fn ikcp_ack_push(kcp: &mut IKCPCB, sn: u32, ts: u32) {
395 kcp.acklist.push((sn, ts));
396}
397
398pub(crate) fn ikcp_ack_get(kcp: &IKCPCB, p: i32, sn: &mut u32, ts: &mut u32) {
399 (*sn, *ts) = kcp.acklist[p as usize];
400}
401
402pub(crate) fn ikcp_parse_data(kcp: &mut IKCPCB, newseg: IKCPSEG) {
404 let sn = newseg.sn;
405 let mut repeat = false;
406
407 if ikcp_timediff(sn, kcp.rcv_nxt + kcp.rcv_wnd) >= 0 || ikcp_timediff(sn, kcp.rcv_nxt) < 0 {
408 return;
409 }
410
411 let mut index = kcp.rcv_buf.len();
412 for (i, seg) in kcp.rcv_buf.iter().enumerate().rev() {
413 if seg.sn == sn {
414 repeat = true;
415 break;
416 }
417
418 if ikcp_timediff(sn, seg.sn) > 0 {
419 index = i + 1;
420 break;
421 }
422 }
423
424 if !repeat {
425 kcp.rcv_buf.insert(index, newseg);
426 }
427
428 while let Some(seg) = kcp.rcv_buf.front() {
430 if seg.sn == kcp.rcv_nxt && (kcp.rcv_queue.len() as u32) < kcp.rcv_wnd {
431 let x = kcp.rcv_buf.pop_front().unwrap();
432 kcp.rcv_queue.push_back(x);
433 kcp.rcv_nxt += 1;
434 } else {
435 break;
436 }
437 }
438}
439
440pub fn ikcp_input(kcp: &mut IKCPCB, mut data: &[u8], mut size: i64) -> i32 {
442 let prev_una = kcp.snd_una;
443
444 let mut maxack: u32 = 0;
445 let mut latest_ts: u32 = 0;
446
447 let mut flag = false;
448
449 if size < (IKCP_OVERHEAD as i64) {
450 return -1;
451 }
452
453 let mut ts: u32 = 0;
454 let mut sn: u32 = 0;
455 let mut len: u32 = 0;
456 let mut una: u32 = 0;
457 let mut conv: u32 = 0;
458
459 let mut wnd: u16 = 0;
460 let mut cmd: u8 = 0;
461 let mut frg: u8 = 0;
462
463 loop {
464 let mut seg: IKCPSEG;
465
466 if size < (IKCP_OVERHEAD as i64) {
467 break;
468 }
469
470 data = ikcp_decode32u(data, &mut conv);
471
472 if conv != kcp.conv {
473 return -1;
474 }
475
476 data = ikcp_decode8u(data, &mut cmd);
477 data = ikcp_decode8u(data, &mut frg);
478 data = ikcp_decode16u(data, &mut wnd);
479 data = ikcp_decode32u(data, &mut ts);
480 data = ikcp_decode32u(data, &mut sn);
481 data = ikcp_decode32u(data, &mut una);
482 data = ikcp_decode32u(data, &mut len);
483
484 size -= IKCP_OVERHEAD as i64;
485
486 if size < (len as i64) || (len as i32) < 0 {
487 return -2;
488 }
489
490 if !matches!(
491 cmd,
492 IKCP_CMD_PUSH | IKCP_CMD_ACK | IKCP_CMD_WASK | IKCP_CMD_WINS
493 ) {
494 return -3;
495 }
496
497 kcp.rmt_wnd = wnd as u32;
498 ikcp_parse_una(kcp, una);
499 ikcp_shrink_buf(kcp);
500
501 match cmd {
502 IKCP_CMD_ACK => {
503 if ikcp_timediff(kcp.current, ts) >= 0 {
504 ikcp_update_ack(kcp, ikcp_timediff(kcp.current, ts));
505 }
506
507 ikcp_parse_ack(kcp, sn);
508 ikcp_shrink_buf(kcp);
509 if !flag {
510 flag = true;
511 maxack = sn;
512 latest_ts = ts;
513 } else if ikcp_timediff(sn, maxack) > 0 && ikcp_timediff(ts, latest_ts) > 0 {
514 maxack = sn;
515 latest_ts = ts;
516 }
517 }
518
519 IKCP_CMD_PUSH => {
520 if ikcp_timediff(sn, kcp.rcv_nxt + kcp.rcv_wnd) < 0 {
521 ikcp_ack_push(kcp, sn, ts);
522 if ikcp_timediff(sn, kcp.rcv_nxt) >= 0 {
523 seg = ikcp_segment_new(len as i32);
524 seg.conv = conv;
525 seg.cmd = cmd as u32;
526 seg.frg = frg as u32;
527 seg.wnd = wnd as u32;
528 seg.ts = ts;
529 seg.sn = sn;
530 seg.una = una;
531
532 if len > 0 {
533 memcpy(&mut seg.data, data, len as usize);
534 }
535
536 ikcp_parse_data(kcp, seg);
537 }
538 }
539 }
540
541 IKCP_CMD_WASK => {
542 kcp.probe |= IKCP_ASK_TELL;
545 }
546
547 IKCP_CMD_WINS => {
548 }
550
551 _ => return -3,
552 }
553
554 data = &data[len as usize..];
555 size -= len as i64;
556 }
557
558 if flag {
559 ikcp_parse_fastack(kcp, maxack, latest_ts);
560 }
561
562 if ikcp_timediff(kcp.snd_una, prev_una) > 0 && kcp.cwnd < kcp.rmt_wnd {
563 let mss = kcp.mss;
564 if kcp.cwnd < kcp.ssthresh {
565 kcp.cwnd += 1;
566 kcp.incr += mss;
567 } else {
568 kcp.incr = kcp.incr.max(mss);
569 kcp.incr += (mss * mss) / kcp.incr + (mss / 16);
570 if (kcp.cwnd + 1) * mss <= kcp.incr {
571 kcp.cwnd = (kcp.incr + mss - 1) / mss.max(1);
572 }
573 }
574
575 if kcp.cwnd > kcp.rmt_wnd {
576 kcp.cwnd = kcp.rmt_wnd;
577 kcp.incr = kcp.rmt_wnd * mss;
578 }
579 }
580
581 0
582}
583
584pub(crate) fn ikcp_encode_seg<'a>(mut ptr: &'a mut [u8], seg: &IKCPSEG) -> &'a mut [u8] {
586 ptr = ikcp_encode32u(ptr, seg.conv);
587 ptr = ikcp_encode8u(ptr, seg.cmd as u8);
588 ptr = ikcp_encode8u(ptr, seg.frg as u8);
589 ptr = ikcp_encode16u(ptr, seg.wnd as u16);
590 ptr = ikcp_encode32u(ptr, seg.ts);
591 ptr = ikcp_encode32u(ptr, seg.sn);
592 ptr = ikcp_encode32u(ptr, seg.una);
593 ptr = ikcp_encode32u(ptr, seg.data.len() as u32);
594 ptr
595}
596
597pub(crate) fn ikcp_wnd_unused(kcp: &IKCPCB) -> i32 {
598 kcp.rcv_wnd.saturating_sub(kcp.rcv_queue.len() as u32) as i32
599}
600
601pub fn ikcp_flush<T, F>(kcp: &mut IKCPCB, buffer: &mut [u8], user: &mut T, output: &mut F)
603where
604 F: FnMut(&mut [u8], i32, &IKCPCB, &mut T),
605{
606 let current = kcp.current;
607 let position = buffer.as_ptr() as usize;
608 let mut ptr = &mut *buffer;
609
610 let mut size: i32;
611
612 let mut cwnd: u32;
613
614 let mut change = 0;
615 let mut lost = false;
616
617 let mut seg: IKCPSEG;
618
619 if !kcp.updated {
621 return;
622 }
623
624 seg = IKCPSEG {
625 conv: kcp.conv,
626 cmd: IKCP_CMD_ACK as u32,
627 frg: 0,
628 wnd: ikcp_wnd_unused(kcp) as u32,
629 ts: 0,
630 sn: 0,
631 una: kcp.rcv_nxt,
632 resendts: 0,
633 rto: 0,
634 fastack: 0,
635 xmit: 0,
636 data: vec![],
637 };
638
639 for i in 0..(kcp.acklist.len() as i32) {
641 size = memoffset(ptr, position) as i32;
642 if size + (IKCP_OVERHEAD as i32) > (kcp.mtu as i32) {
643 ikcp_output(kcp, size, buffer, user, output);
644 ptr = &mut *buffer;
645 }
646
647 ikcp_ack_get(kcp, i, &mut seg.sn, &mut seg.ts);
648 ptr = ikcp_encode_seg(ptr, &seg);
649 }
650
651 kcp.acklist.clear();
652
653 if kcp.rmt_wnd == 0 {
655 if kcp.probe_wait == 0 {
656 kcp.probe_wait = IKCP_PROBE_INIT;
657 kcp.ts_probe = kcp.current + kcp.probe_wait;
658 } else if ikcp_timediff(kcp.current, kcp.ts_probe) >= 0 {
659 kcp.probe_wait = kcp.probe_wait.max(IKCP_PROBE_INIT);
660 kcp.probe_wait += kcp.probe_wait / 2;
661 kcp.probe_wait = kcp.probe_wait.min(IKCP_PROBE_LIMIT);
662 kcp.ts_probe = kcp.current + kcp.probe_wait;
663 kcp.probe |= IKCP_ASK_SEND;
664 }
665 } else {
666 kcp.ts_probe = 0;
667 kcp.probe_wait = 0;
668 }
669
670 if (kcp.probe & IKCP_ASK_SEND) != 0 {
672 seg.cmd = IKCP_CMD_WASK as u32;
673 size = memoffset(ptr, position) as i32;
674 if size + (IKCP_OVERHEAD as i32) > (kcp.mtu as i32) {
675 ikcp_output(kcp, size, buffer, user, output);
676 ptr = &mut *buffer;
677 }
678
679 ptr = ikcp_encode_seg(ptr, &seg);
680 }
681
682 if (kcp.probe & IKCP_ASK_TELL) != 0 {
684 seg.cmd = IKCP_CMD_WINS as u32;
685 size = memoffset(ptr, position) as i32;
686 if size + (IKCP_OVERHEAD as i32) > (kcp.mtu as i32) {
687 ikcp_output(kcp, size, buffer, user, output);
688 ptr = &mut *buffer;
689 }
690
691 ptr = ikcp_encode_seg(ptr, &seg);
692 }
693
694 kcp.probe = 0;
695
696 cwnd = kcp.snd_wnd.min(kcp.rmt_wnd);
698 if !kcp.nocwnd {
699 cwnd = kcp.cwnd.min(cwnd);
700 }
701
702 while ikcp_timediff(kcp.snd_nxt, kcp.snd_una + cwnd) < 0 {
704 if let Some(mut newseg) = kcp.snd_queue.pop_front() {
705 newseg.conv = kcp.conv;
706 newseg.cmd = IKCP_CMD_PUSH as u32;
707 newseg.wnd = seg.wnd;
708 newseg.ts = current;
709 newseg.sn = kcp.snd_nxt;
710 kcp.snd_nxt += 1;
711 newseg.una = kcp.rcv_nxt;
712 newseg.resendts = current;
713 newseg.rto = kcp.rx_rto as u32;
714 newseg.fastack = 0;
715 newseg.xmit = 0;
716 kcp.snd_buf.push_back(newseg);
717 } else {
718 break;
719 }
720 }
721
722 let resent = match kcp.fastresend > 0 {
724 true => kcp.fastresend as u32,
725 false => 0xffffffff,
726 };
727
728 let rtomin = match kcp.nodelay == 0 {
729 true => (kcp.rx_rto >> 3) as u32,
730 false => 0,
731 };
732
733 for i in 0..kcp.snd_buf.len() {
735 let mut segment = &mut kcp.snd_buf[i];
736 let mut needsend = false;
737 if segment.xmit == 0 {
738 needsend = true;
739 segment.xmit += 1;
740 segment.rto = kcp.rx_rto as u32;
741 segment.resendts = current + segment.rto + rtomin;
742 } else if ikcp_timediff(current, segment.resendts) >= 0 {
743 needsend = true;
744 segment.xmit += 1;
745 kcp.xmit += 1;
746 if kcp.nodelay == 0 {
747 segment.rto += segment.rto.max(kcp.rx_rto as u32);
748 } else {
749 let step = match kcp.nodelay < 2 {
750 true => segment.rto as i32,
751 false => kcp.rx_rto,
752 };
753
754 segment.rto += (step / 2) as u32;
755 }
756
757 segment.resendts = current + segment.rto;
758 lost = true;
759 } else if segment.fastack >= resent
760 && (segment.xmit <= (kcp.fastlimit as u32) || kcp.fastlimit <= 0)
761 {
762 needsend = true;
763 segment.xmit += 1;
764 segment.fastack = 0;
765 segment.resendts = current + segment.rto;
766 change += 1;
767 }
768
769 if needsend {
770 segment.ts = current;
771 segment.wnd = seg.wnd;
772 segment.una = kcp.rcv_nxt;
773
774 size = memoffset(ptr, position) as i32;
775 let need = (IKCP_OVERHEAD as i32) + (segment.data.len() as i32);
776
777 if (size + need) > (kcp.mtu as i32) {
778 ikcp_output(kcp, size, buffer, user, output);
779 ptr = &mut *buffer;
780
781 segment = &mut kcp.snd_buf[i];
782 }
783
784 ptr = ikcp_encode_seg(ptr, segment);
785
786 if !segment.data.is_empty() {
787 memcpy(ptr, &segment.data, segment.data.len());
788 ptr = &mut ptr[segment.data.len()..];
789 }
790
791 if segment.xmit >= kcp.dead_link {
792 kcp.state = true;
793 }
794 }
795 }
796
797 size = memoffset(ptr, position) as i32;
799 if size > 0 {
800 ikcp_output(kcp, size, buffer, user, output);
801 }
802
803 if change != 0 {
805 let inflight = kcp.snd_nxt - kcp.snd_una;
806 kcp.ssthresh = inflight / 2;
807 kcp.ssthresh = kcp.ssthresh.max(IKCP_THRESH_MIN);
808 kcp.cwnd = kcp.ssthresh + resent;
809 kcp.incr = kcp.cwnd * kcp.mss;
810 }
811
812 if lost {
813 kcp.ssthresh = cwnd / 2;
814 kcp.ssthresh = kcp.ssthresh.max(IKCP_THRESH_MIN);
815 kcp.cwnd = 1;
816 kcp.incr = kcp.mss;
817 }
818
819 if kcp.cwnd < 1 {
820 kcp.cwnd = 1;
821 kcp.incr = kcp.mss;
822 }
823}
824
825pub fn ikcp_update<T, F>(
829 kcp: &mut IKCPCB,
830 current: u32,
831 buffer: &mut [u8],
832 user: &mut T,
833 output: &mut F,
834) where
835 F: FnMut(&mut [u8], i32, &IKCPCB, &mut T),
836{
837 let mut slap: i32;
838
839 kcp.current = current;
840
841 if !kcp.updated {
842 kcp.updated = true;
843 kcp.ts_flush = kcp.current;
844 }
845
846 slap = ikcp_timediff(kcp.current, kcp.ts_flush);
847
848 if !(-10000..10000).contains(&slap) {
849 kcp.ts_flush = kcp.current;
850 slap = 0;
851 }
852
853 if slap >= 0 {
854 kcp.ts_flush += kcp.interval;
855 if ikcp_timediff(kcp.current, kcp.ts_flush) >= 0 {
856 kcp.ts_flush = kcp.current + kcp.interval;
857 }
858
859 ikcp_flush(kcp, buffer, user, output);
860 }
861}
862
863pub fn ikcp_check(kcp: &IKCPCB, current: u32) -> u32 {
872 let mut ts_flush = kcp.ts_flush;
873 let mut tm_packet = i32::MAX;
874
875 if !kcp.updated {
876 return current;
877 }
878
879 match ikcp_timediff(current, ts_flush) {
880 diff if !(-10000..10000).contains(&diff) => ts_flush = current,
881 diff if diff >= 0 => return current,
882 _ => {}
883 }
884
885 let tm_flush = ikcp_timediff(ts_flush, current);
886
887 for seg in &kcp.snd_buf {
888 match ikcp_timediff(seg.resendts, current) {
889 diff if diff <= 0 => return current,
890 diff if diff < tm_packet => tm_packet = diff,
891 _ => {}
892 }
893 }
894
895 let minimal = (tm_packet.min(tm_flush) as u32).min(kcp.interval);
896
897 current + minimal
898}
899
900pub fn ikcp_setmtu(kcp: &mut IKCPCB, mtu: i32) -> bool {
902 if mtu < 50 || mtu < (IKCP_OVERHEAD as i32) {
903 return false;
904 }
905
906 kcp.mtu = mtu as u32;
907 kcp.mss = kcp.mtu - IKCP_OVERHEAD;
908 true
909}
910
911pub(crate) fn ikcp_interval(kcp: &mut IKCPCB, mut interval: i32) {
912 interval = interval.clamp(10, 5000);
913 kcp.interval = interval as u32;
914}
915
916pub fn ikcp_nodelay(kcp: &mut IKCPCB, nodelay: i32, interval: i32, resend: i32, nc: bool) {
922 if nodelay >= 0 {
923 kcp.nodelay = nodelay as u32;
924 kcp.rx_minrto = match nodelay {
925 0 => IKCP_RTO_MIN as i32,
926 _ => IKCP_RTO_NDL as i32,
927 };
928 }
929
930 if interval >= 0 {
931 ikcp_interval(kcp, interval);
932 }
933
934 if resend >= 0 {
935 kcp.fastresend = resend;
936 }
937
938 kcp.nocwnd = nc;
939}
940
941pub fn ikcp_wndsize(kcp: &mut IKCPCB, sndwnd: i32, rcvwnd: i32) {
943 if sndwnd > 0 {
944 kcp.snd_wnd = sndwnd as u32;
945 }
946
947 if rcvwnd > 0 {
948 kcp.rcv_wnd = (rcvwnd as u32).max(IKCP_WND_RCV);
950 }
951}
952
953pub fn ikcp_waitsnd(kcp: &IKCPCB) -> i32 {
955 (kcp.snd_buf.len() + kcp.snd_queue.len()) as i32
956}
957
958pub fn ikcp_getconv(kcp: &IKCPCB) -> u32 {
960 kcp.conv
961}
962
963pub fn ikcp_fastlimit(kcp: &mut IKCPCB, fastlimit: i32) {
964 kcp.fastlimit = fastlimit
965}
966
967pub fn ikcp_stream(kcp: &mut IKCPCB, stream: bool) {
968 kcp.stream = stream
969}
970
971pub struct IKCPSEG {
972 pub(crate) conv: u32,
973 pub(crate) cmd: u32,
974 pub(crate) frg: u32,
975 pub(crate) wnd: u32,
976 pub(crate) ts: u32,
977 pub(crate) sn: u32,
978 pub(crate) una: u32,
979 pub(crate) resendts: u32,
980 pub(crate) rto: u32,
981 pub(crate) fastack: u32,
982 pub(crate) xmit: u32,
983 pub(crate) data: Vec<u8>,
984}
985
986impl IKCPSEG {
987 pub fn conv(&self) -> u32 {
988 self.conv
989 }
990 pub fn cmd(&self) -> u32 {
991 self.cmd
992 }
993 pub fn frg(&self) -> u32 {
994 self.frg
995 }
996 pub fn wnd(&self) -> u32 {
997 self.wnd
998 }
999 pub fn ts(&self) -> u32 {
1000 self.ts
1001 }
1002 pub fn sn(&self) -> u32 {
1003 self.sn
1004 }
1005 pub fn una(&self) -> u32 {
1006 self.una
1007 }
1008 pub fn resendts(&self) -> u32 {
1009 self.resendts
1010 }
1011 pub fn rto(&self) -> u32 {
1012 self.rto
1013 }
1014 pub fn fastack(&self) -> u32 {
1015 self.fastack
1016 }
1017 pub fn xmit(&self) -> u32 {
1018 self.xmit
1019 }
1020 pub fn data(&self) -> &Vec<u8> {
1021 &self.data
1022 }
1023}
1024
1025pub struct IKCPCB {
1026 pub(crate) conv: u32,
1027 pub(crate) mtu: u32,
1028 pub(crate) mss: u32,
1029 pub(crate) state: bool,
1030
1031 pub(crate) snd_una: u32,
1032 pub(crate) snd_nxt: u32,
1033 pub(crate) rcv_nxt: u32,
1034
1035 pub(crate) ssthresh: u32,
1036
1037 pub(crate) rx_rttval: i32,
1038 pub(crate) rx_srtt: i32,
1039 pub(crate) rx_rto: i32,
1040 pub(crate) rx_minrto: i32,
1041
1042 pub(crate) snd_wnd: u32,
1043 pub(crate) rcv_wnd: u32,
1044 pub(crate) rmt_wnd: u32,
1045 pub(crate) cwnd: u32,
1046 pub(crate) probe: u32,
1047
1048 pub(crate) current: u32,
1049 pub(crate) interval: u32,
1050 pub(crate) ts_flush: u32,
1051 pub(crate) xmit: u32,
1052
1053 pub(crate) nodelay: u32,
1054 pub(crate) updated: bool,
1055
1056 pub(crate) ts_probe: u32,
1057 pub(crate) probe_wait: u32,
1058
1059 pub(crate) dead_link: u32,
1060 pub(crate) incr: u32,
1061
1062 pub(crate) snd_queue: VecDeque<IKCPSEG>,
1063 pub(crate) rcv_queue: VecDeque<IKCPSEG>,
1064 pub(crate) snd_buf: VecDeque<IKCPSEG>,
1065 pub(crate) rcv_buf: VecDeque<IKCPSEG>,
1066 pub(crate) acklist: Vec<(u32, u32)>,
1067 pub(crate) fastresend: i32,
1068 pub(crate) fastlimit: i32,
1069
1070 pub(crate) nocwnd: bool,
1071 pub(crate) stream: bool,
1072}
1073
1074impl IKCPCB {
1075 pub fn conv(&self) -> u32 {
1076 self.conv
1077 }
1078 pub fn mtu(&self) -> u32 {
1079 self.mtu
1080 }
1081 pub fn mss(&self) -> u32 {
1082 self.mss
1083 }
1084 pub fn state(&self) -> bool {
1085 self.state
1086 }
1087
1088 pub fn snd_una(&self) -> u32 {
1089 self.snd_una
1090 }
1091 pub fn snd_nxt(&self) -> u32 {
1092 self.snd_nxt
1093 }
1094 pub fn rcv_nxt(&self) -> u32 {
1095 self.rcv_nxt
1096 }
1097
1098 pub fn ssthresh(&self) -> u32 {
1099 self.ssthresh
1100 }
1101
1102 pub fn rx_rttval(&self) -> i32 {
1103 self.rx_rttval
1104 }
1105 pub fn rx_srtt(&self) -> i32 {
1106 self.rx_srtt
1107 }
1108 pub fn rx_rto(&self) -> i32 {
1109 self.rx_rto
1110 }
1111 pub fn rx_minrto(&self) -> i32 {
1112 self.rx_minrto
1113 }
1114
1115 pub fn snd_wnd(&self) -> u32 {
1116 self.snd_wnd
1117 }
1118 pub fn rcv_wnd(&self) -> u32 {
1119 self.rcv_wnd
1120 }
1121 pub fn rmt_wnd(&self) -> u32 {
1122 self.rmt_wnd
1123 }
1124 pub fn cwnd(&self) -> u32 {
1125 self.cwnd
1126 }
1127 pub fn probe(&self) -> u32 {
1128 self.probe
1129 }
1130
1131 pub fn current(&self) -> u32 {
1132 self.current
1133 }
1134 pub fn interval(&self) -> u32 {
1135 self.interval
1136 }
1137 pub fn ts_flush(&self) -> u32 {
1138 self.ts_flush
1139 }
1140 pub fn xmit(&self) -> u32 {
1141 self.xmit
1142 }
1143
1144 pub fn nodelay(&self) -> u32 {
1145 self.nodelay
1146 }
1147 pub fn updated(&self) -> bool {
1148 self.updated
1149 }
1150
1151 pub fn ts_probe(&self) -> u32 {
1152 self.ts_probe
1153 }
1154 pub fn probe_wait(&self) -> u32 {
1155 self.probe_wait
1156 }
1157
1158 pub fn dead_link(&self) -> u32 {
1159 self.dead_link
1160 }
1161 pub fn incr(&self) -> u32 {
1162 self.incr
1163 }
1164
1165 pub fn snd_queue(&self) -> &VecDeque<IKCPSEG> {
1166 &self.snd_queue
1167 }
1168 pub fn rcv_queue(&self) -> &VecDeque<IKCPSEG> {
1169 &self.rcv_queue
1170 }
1171 pub fn snd_buf(&self) -> &VecDeque<IKCPSEG> {
1172 &self.snd_buf
1173 }
1174 pub fn rcv_buf(&self) -> &VecDeque<IKCPSEG> {
1175 &self.rcv_buf
1176 }
1177 pub fn acklist(&self) -> &Vec<(u32, u32)> {
1178 &self.acklist
1179 }
1180 pub fn fastresend(&self) -> i32 {
1181 self.fastresend
1182 }
1183 pub fn fastlimit(&self) -> i32 {
1184 self.fastlimit
1185 }
1186
1187 pub fn nocwnd(&self) -> bool {
1188 self.nocwnd
1189 }
1190 pub fn stream(&self) -> bool {
1191 self.stream
1192 }
1193}