kcp_rust/
kcp.rs

1use std::collections::VecDeque;
2
3pub const IKCP_RTO_NDL: u32 = 30; // no delay min rto
4pub const IKCP_RTO_MIN: u32 = 100; // normal min rto
5pub const IKCP_RTO_DEF: u32 = 200;
6pub const IKCP_RTO_MAX: u32 = 60000;
7pub const IKCP_CMD_PUSH: u8 = 81; // cmd: push data
8pub const IKCP_CMD_ACK: u8 = 82; // cmd: ack
9pub const IKCP_CMD_WASK: u8 = 83; // cmd: window probe (ask)
10pub const IKCP_CMD_WINS: u8 = 84; // cmd: window size (tell)
11pub const IKCP_ASK_SEND: u32 = 1; // need to send IKCP_CMD_WASK
12pub const IKCP_ASK_TELL: u32 = 2; // need to send IKCP_CMD_WINS
13pub const IKCP_WND_SND: u32 = 32;
14pub const IKCP_WND_RCV: u32 = 128; // must >= max fragment size
15pub const IKCP_MTU_DEF: u32 = 1400;
16pub const IKCP_ACK_FAST: u32 = 3;
17pub const IKCP_INTERVAL: u32 = 100;
18pub const IKCP_OVERHEAD: u32 = 24;
19pub const IKCP_DEADLINK: u32 = 20;
20pub const IKCP_THRESH_INIT: u32 = 2;
21pub const IKCP_THRESH_MIN: u32 = 2;
22pub const IKCP_PROBE_INIT: u32 = 7000; // 7 secs to probe window size
23pub const IKCP_PROBE_LIMIT: u32 = 120000; // up to 120 secs to probe window
24pub const IKCP_FASTACK_LIMIT: u32 = 5; // max times to trigger fastack
25
26pub(crate) fn ikcp_memcpy(dst: &mut [u8], src: &[u8], len: usize) {
27    dst[..len].copy_from_slice(&src[..len]);
28}
29
30pub(crate) fn ikcp_memoffset(dst: &[u8], src: usize) -> usize {
31    (dst.as_ptr() as usize) - src
32}
33
34// encode 8 bits unsigned int
35pub fn ikcp_encode8u(p: &mut [u8], c: u8) -> &mut [u8] {
36    p[0] = c;
37    &mut p[1..]
38}
39
40// decode 8 bits unsigned int
41pub fn ikcp_decode8u<'a>(p: &'a [u8], c: &mut u8) -> &'a [u8] {
42    *c = p[0];
43    &p[1..]
44}
45
46// encode 16 bits unsigned int (lsb)
47pub fn ikcp_encode16u(p: &mut [u8], w: u16) -> &mut [u8] {
48    ikcp_memcpy(p, &w.to_le_bytes(), 2);
49    &mut p[2..]
50}
51
52// decode 16 bits unsigned int (lsb)
53pub fn ikcp_decode16u<'a>(p: &'a [u8], w: &mut u16) -> &'a [u8] {
54    *w = u16::from_le_bytes([p[0], p[1]]);
55    &p[2..]
56}
57
58// encode 32 bits unsigned int (lsb)
59pub fn ikcp_encode32u(p: &mut [u8], l: u32) -> &mut [u8] {
60    ikcp_memcpy(p, &l.to_le_bytes(), 4);
61    &mut p[4..]
62}
63
64// decode 32 bits unsigned int (lsb)
65pub fn ikcp_decode32u<'a>(p: &'a [u8], l: &mut u32) -> &'a [u8] {
66    *l = u32::from_le_bytes([p[0], p[1], p[2], p[3]]);
67    &p[4..]
68}
69
70pub const fn ikcp_timediff(later: u32, earlier: u32) -> i32 {
71    later.wrapping_sub(earlier) as i32
72}
73
74// allocate a new kcp segment
75pub(crate) fn ikcp_segment_new(size: i32) -> IKCPSEG {
76    IKCPSEG {
77        conv: 0,
78        cmd: 0,
79        frg: 0,
80        wnd: 0,
81        ts: 0,
82        sn: 0,
83        una: 0,
84        resendts: 0,
85        rto: 0,
86        fastack: 0,
87        xmit: 0,
88        data: vec![0; size as usize],
89    }
90}
91
92// output segment
93pub(crate) fn ikcp_output<T, F>(
94    kcp: &IKCPCB,
95    size: i32,
96    data: &mut [u8],
97    user: &mut T,
98    output: &mut F,
99) where
100    F: FnMut(&mut [u8], i32, &IKCPCB, &mut T),
101{
102    if size == 0 {
103        return;
104    }
105
106    (*output)(data, size, kcp, user);
107}
108
109// create a new kcp control object, 'conv' must equal in two endpoint
110pub const fn ikcp_create(conv: u32) -> IKCPCB {
111    IKCPCB {
112        conv,
113        snd_una: 0,
114        snd_nxt: 0,
115        rcv_nxt: 0,
116        ts_probe: 0,
117        probe_wait: 0,
118        snd_wnd: IKCP_WND_SND,
119        rcv_wnd: IKCP_WND_RCV,
120        rmt_wnd: IKCP_WND_RCV,
121        cwnd: 0,
122        incr: 0,
123        probe: 0,
124        mtu: IKCP_MTU_DEF,
125        mss: IKCP_MTU_DEF - IKCP_OVERHEAD,
126        stream: false,
127        snd_queue: VecDeque::new(),
128        rcv_queue: VecDeque::new(),
129        snd_buf: VecDeque::new(),
130        rcv_buf: VecDeque::new(),
131        state: false,
132        acklist: vec![],
133        rx_srtt: 0,
134        rx_rttval: 0,
135        rx_rto: IKCP_RTO_DEF as i32,
136        rx_minrto: IKCP_RTO_MIN as i32,
137        current: 0,
138        interval: IKCP_INTERVAL,
139        ts_flush: IKCP_INTERVAL,
140        nodelay: 0,
141        updated: false,
142        ssthresh: IKCP_THRESH_INIT,
143        fastresend: 0,
144        fastlimit: IKCP_FASTACK_LIMIT as i32,
145        nocwnd: false,
146        xmit: 0,
147        dead_link: IKCP_DEADLINK,
148    }
149}
150
151// user/upper level recv: returns size, returns below zero for EAGAIN
152pub fn ikcp_recv(kcp: &mut IKCPCB, mut buffer: Option<&mut [u8]>, mut len: i32) -> i32 {
153    let ispeek = len < 0;
154    let peeksize: i32;
155    let mut recover = false;
156
157    if kcp.rcv_queue.is_empty() {
158        return -1;
159    }
160
161    len = len.abs();
162
163    peeksize = ikcp_peeksize(kcp);
164
165    match peeksize {
166        x if x < 0 => return -2,
167        x if x > len => return -3,
168        _ => {}
169    }
170
171    if (kcp.rcv_queue.len() as u32) >= kcp.rcv_wnd {
172        recover = true;
173    }
174
175    // merge fragment
176    len = 0;
177    while let Some(seg) = kcp.rcv_queue.front() {
178        let fragment: i32;
179
180        if let Some(data) = buffer {
181            ikcp_memcpy(data, &seg.data, seg.data.len());
182            buffer = Some(&mut data[..seg.data.len()]);
183        }
184
185        len += seg.data.len() as i32;
186        fragment = seg.frg as i32;
187
188        if !ispeek {
189            kcp.rcv_queue.pop_front();
190        }
191
192        if fragment == 0 {
193            break;
194        }
195    }
196
197    // move available data from rcv_buf -> rcv_queue
198    while let Some(seg) = kcp.rcv_buf.front() {
199        if seg.sn == kcp.rcv_nxt && (kcp.rcv_queue.len() as u32) < kcp.rcv_wnd {
200            let x = kcp.rcv_buf.pop_front().unwrap();
201            kcp.rcv_queue.push_back(x);
202            kcp.rcv_nxt += 1;
203        } else {
204            break;
205        }
206    }
207
208    // fast recover
209    if (kcp.rcv_queue.len() as u32) < kcp.rcv_wnd && recover {
210        // ready to send back IKCP_CMD_WINS in ikcp_flush
211        // tell remote my window size
212        kcp.probe |= IKCP_ASK_TELL;
213    }
214
215    len
216}
217
218// check the size of next message in the recv queue
219pub fn ikcp_peeksize(kcp: &IKCPCB) -> i32 {
220    let mut length = 0;
221
222    let seg = match kcp.rcv_queue.front() {
223        Some(x) => x,
224        None => return -1,
225    };
226
227    if seg.frg == 0 {
228        return seg.data.len() as i32;
229    }
230
231    if (kcp.rcv_queue.len() as u32) < (seg.frg + 1) {
232        return -1;
233    }
234
235    for seg in &kcp.rcv_queue {
236        length += seg.data.len() as i32;
237        if seg.frg == 0 {
238            break;
239        }
240    }
241
242    length
243}
244
245// user/upper level send, returns below zero for error
246pub fn ikcp_send(kcp: &mut IKCPCB, mut buffer: Option<&mut [u8]>, mut len: i32) -> i32 {
247    let mut count: i32;
248
249    let mut sent = 0;
250
251    if len < 0 {
252        return -1;
253    }
254
255    // append to previous segment in streaming mode (if possible)
256    if kcp.stream {
257        if let Some(old) = kcp.snd_queue.back() {
258            if (old.data.len() as u32) < kcp.mss {
259                let capacity = (kcp.mss - (old.data.len() as u32)) as i32;
260                let extend = len.min(capacity);
261                let mut seg = ikcp_segment_new(((old.data.len() as i64) + (extend as i64)) as i32);
262
263                ikcp_memcpy(&mut seg.data, &old.data, old.data.len());
264
265                if let Some(data) = buffer {
266                    ikcp_memcpy(&mut seg.data[old.data.len()..], data, extend as usize);
267                    buffer = Some(&mut data[extend as usize..]);
268                }
269
270                seg.frg = 0;
271                len -= extend;
272                kcp.snd_queue.pop_back();
273                kcp.snd_queue.push_back(seg);
274                sent = extend;
275            }
276
277            if len <= 0 {
278                return sent;
279            }
280        }
281    }
282
283    count = match len <= (kcp.mss as i32) {
284        true => 1,
285        false => (((len as i64) + (kcp.mss as i64) - 1) / (kcp.mss as i64)) as i32,
286    };
287
288    if count >= (kcp.rcv_wnd as i32).min(255) {
289        if kcp.stream && sent > 0 {
290            return sent;
291        }
292
293        return -2;
294    }
295
296    if count == 0 {
297        count = 1;
298    }
299
300    // fragment
301    for i in 0..count {
302        let size = len.min(kcp.mss as i32);
303
304        let mut seg = ikcp_segment_new(size);
305
306        if let Some(data) = buffer {
307            if size > 0 {
308                ikcp_memcpy(&mut seg.data, data, size as usize);
309            }
310
311            buffer = Some(&mut data[..size as usize]);
312        }
313
314        seg.frg = match !kcp.stream {
315            true => (count - i - 1) as u32,
316            false => 0,
317        };
318
319        kcp.snd_queue.push_back(seg);
320
321        len -= size;
322        sent += size;
323    }
324
325    sent
326}
327
328// parse ack
329pub(crate) fn ikcp_update_ack(kcp: &mut IKCPCB, rtt: i32) {
330    let rto: i32;
331    if kcp.rx_srtt == 0 {
332        kcp.rx_srtt = rtt;
333        kcp.rx_rttval = rtt / 2;
334    } else {
335        let mut delta = (rtt - kcp.rx_srtt) as i64;
336        delta = delta.abs();
337        kcp.rx_rttval = ((((3 * kcp.rx_rttval) as i64) + delta) / 4) as i32;
338        kcp.rx_srtt = (7 * kcp.rx_srtt + rtt) / 8;
339        kcp.rx_srtt = kcp.rx_srtt.max(1);
340    }
341
342    rto = ((kcp.rx_srtt as i64) + (kcp.interval.max((4 * kcp.rx_rttval) as u32) as i64)) as i32;
343    kcp.rx_rto = (rto as u32).clamp(kcp.rx_minrto as u32, IKCP_RTO_MAX) as i32;
344}
345
346pub(crate) fn ikcp_shrink_buf(kcp: &mut IKCPCB) {
347    kcp.snd_una = match kcp.snd_buf.front() {
348        Some(seg) => seg.sn,
349        None => kcp.snd_nxt,
350    }
351}
352
353pub(crate) fn ikcp_parse_ack(kcp: &mut IKCPCB, sn: u32) {
354    if ikcp_timediff(sn, kcp.snd_una) < 0 || ikcp_timediff(sn, kcp.snd_nxt) >= 0 {
355        return;
356    }
357
358    for (i, seg) in kcp.snd_buf.iter().enumerate() {
359        if sn == seg.sn {
360            kcp.snd_buf.remove(i);
361            break;
362        }
363
364        if ikcp_timediff(sn, seg.sn) < 0 {
365            break;
366        }
367    }
368}
369
370pub(crate) fn ikcp_parse_una(kcp: &mut IKCPCB, una: u32) {
371    for (i, seg) in kcp.snd_buf.iter().enumerate() {
372        if ikcp_timediff(una, seg.sn) <= 0 {
373            kcp.snd_buf.drain(0..i);
374            return;
375        }
376    }
377
378    kcp.snd_buf.clear();
379}
380
381pub(crate) fn ikcp_parse_fastack(kcp: &mut IKCPCB, sn: u32, ts: u32) {
382    if ikcp_timediff(sn, kcp.snd_una) < 0 || ikcp_timediff(sn, kcp.snd_nxt) >= 0 {
383        return;
384    }
385
386    for seg in &mut kcp.snd_buf {
387        if ikcp_timediff(sn, seg.sn) < 0 {
388            break;
389        }
390
391        if sn != seg.sn && ikcp_timediff(ts, seg.ts) >= 0 {
392            seg.fastack += 1;
393        }
394    }
395}
396
397// ack append
398pub(crate) fn ikcp_ack_push(kcp: &mut IKCPCB, sn: u32, ts: u32) {
399    kcp.acklist.push((sn, ts));
400}
401
402pub(crate) fn ikcp_ack_get(kcp: &IKCPCB, p: i32, sn: &mut u32, ts: &mut u32) {
403    (*sn, *ts) = kcp.acklist[p as usize];
404}
405
406// parse data
407pub(crate) fn ikcp_parse_data(kcp: &mut IKCPCB, newseg: IKCPSEG) {
408    let sn = newseg.sn;
409    let mut repeat = false;
410
411    if ikcp_timediff(sn, kcp.rcv_nxt + kcp.rcv_wnd) >= 0 || ikcp_timediff(sn, kcp.rcv_nxt) < 0 {
412        return;
413    }
414
415    let mut index = kcp.rcv_buf.len();
416    for (i, seg) in kcp.rcv_buf.iter().enumerate().rev() {
417        if seg.sn == sn {
418            repeat = true;
419            break;
420        }
421
422        if ikcp_timediff(sn, seg.sn) > 0 {
423            index = i + 1;
424            break;
425        }
426    }
427
428    if !repeat {
429        kcp.rcv_buf.insert(index, newseg);
430    }
431
432    // move available data from rcv_buf -> rcv_queue
433    while let Some(seg) = kcp.rcv_buf.front() {
434        if seg.sn == kcp.rcv_nxt && (kcp.rcv_queue.len() as u32) < kcp.rcv_wnd {
435            let x = kcp.rcv_buf.pop_front().unwrap();
436            kcp.rcv_queue.push_back(x);
437            kcp.rcv_nxt += 1;
438        } else {
439            break;
440        }
441    }
442}
443
444// when you received a low level packet (for example. UDP packet), call it
445pub fn ikcp_input(kcp: &mut IKCPCB, mut data: &[u8], mut size: i64) -> i32 {
446    let prev_una = kcp.snd_una;
447
448    let mut maxack: u32 = 0;
449    let mut latest_ts: u32 = 0;
450
451    let mut flag = false;
452
453    if size < (IKCP_OVERHEAD as i64) {
454        return -1;
455    }
456
457    let mut ts: u32 = 0;
458    let mut sn: u32 = 0;
459    let mut len: u32 = 0;
460    let mut una: u32 = 0;
461    let mut conv: u32 = 0;
462
463    let mut wnd: u16 = 0;
464    let mut cmd: u8 = 0;
465    let mut frg: u8 = 0;
466
467    loop {
468        let mut seg: IKCPSEG;
469
470        if size < (IKCP_OVERHEAD as i64) {
471            break;
472        }
473
474        data = ikcp_decode32u(data, &mut conv);
475
476        if conv != kcp.conv {
477            return -1;
478        }
479
480        data = ikcp_decode8u(data, &mut cmd);
481        data = ikcp_decode8u(data, &mut frg);
482        data = ikcp_decode16u(data, &mut wnd);
483        data = ikcp_decode32u(data, &mut ts);
484        data = ikcp_decode32u(data, &mut sn);
485        data = ikcp_decode32u(data, &mut una);
486        data = ikcp_decode32u(data, &mut len);
487
488        size -= IKCP_OVERHEAD as i64;
489
490        if size < (len as i64) || (len as i32) < 0 {
491            return -2;
492        }
493
494        if !matches!(
495            cmd,
496            IKCP_CMD_PUSH | IKCP_CMD_ACK | IKCP_CMD_WASK | IKCP_CMD_WINS
497        ) {
498            return -3;
499        }
500
501        kcp.rmt_wnd = wnd as u32;
502        ikcp_parse_una(kcp, una);
503        ikcp_shrink_buf(kcp);
504
505        match cmd {
506            IKCP_CMD_ACK => {
507                if ikcp_timediff(kcp.current, ts) >= 0 {
508                    ikcp_update_ack(kcp, ikcp_timediff(kcp.current, ts));
509                }
510
511                ikcp_parse_ack(kcp, sn);
512                ikcp_shrink_buf(kcp);
513                if !flag {
514                    flag = true;
515                    maxack = sn;
516                    latest_ts = ts;
517                } else if ikcp_timediff(sn, maxack) > 0 && ikcp_timediff(ts, latest_ts) > 0 {
518                    maxack = sn;
519                    latest_ts = ts;
520                }
521            }
522
523            IKCP_CMD_PUSH => {
524                if ikcp_timediff(sn, kcp.rcv_nxt + kcp.rcv_wnd) < 0 {
525                    ikcp_ack_push(kcp, sn, ts);
526                    if ikcp_timediff(sn, kcp.rcv_nxt) >= 0 {
527                        seg = ikcp_segment_new(len as i32);
528                        seg.conv = conv;
529                        seg.cmd = cmd as u32;
530                        seg.frg = frg as u32;
531                        seg.wnd = wnd as u32;
532                        seg.ts = ts;
533                        seg.sn = sn;
534                        seg.una = una;
535
536                        if len > 0 {
537                            ikcp_memcpy(&mut seg.data, data, len as usize);
538                        }
539
540                        ikcp_parse_data(kcp, seg);
541                    }
542                }
543            }
544
545            IKCP_CMD_WASK => {
546                // ready to send back IKCP_CMD_WINS in ikcp_flush
547                // tell remote my window size
548                kcp.probe |= IKCP_ASK_TELL;
549            }
550
551            IKCP_CMD_WINS => {
552                // do nothing
553            }
554
555            _ => return -3,
556        }
557
558        data = &data[len as usize..];
559        size -= len as i64;
560    }
561
562    if flag {
563        ikcp_parse_fastack(kcp, maxack, latest_ts);
564    }
565
566    if ikcp_timediff(kcp.snd_una, prev_una) > 0 && kcp.cwnd < kcp.rmt_wnd {
567        let mss = kcp.mss;
568        if kcp.cwnd < kcp.ssthresh {
569            kcp.cwnd += 1;
570            kcp.incr += mss;
571        } else {
572            kcp.incr = kcp.incr.max(mss);
573            kcp.incr += (mss * mss) / kcp.incr + (mss / 16);
574            if (kcp.cwnd + 1) * mss <= kcp.incr {
575                kcp.cwnd = (kcp.incr + mss - 1) / mss.max(1);
576            }
577        }
578
579        if kcp.cwnd > kcp.rmt_wnd {
580            kcp.cwnd = kcp.rmt_wnd;
581            kcp.incr = kcp.rmt_wnd * mss;
582        }
583    }
584
585    0
586}
587
588// ikcp_encode_seg
589pub(crate) fn ikcp_encode_seg<'a>(mut ptr: &'a mut [u8], seg: &IKCPSEG) -> &'a mut [u8] {
590    ptr = ikcp_encode32u(ptr, seg.conv);
591    ptr = ikcp_encode8u(ptr, seg.cmd as u8);
592    ptr = ikcp_encode8u(ptr, seg.frg as u8);
593    ptr = ikcp_encode16u(ptr, seg.wnd as u16);
594    ptr = ikcp_encode32u(ptr, seg.ts);
595    ptr = ikcp_encode32u(ptr, seg.sn);
596    ptr = ikcp_encode32u(ptr, seg.una);
597    ptr = ikcp_encode32u(ptr, seg.data.len() as u32);
598    ptr
599}
600
601pub(crate) fn ikcp_wnd_unused(kcp: &IKCPCB) -> i32 {
602    kcp.rcv_wnd.saturating_sub(kcp.rcv_queue.len() as u32) as i32
603}
604
605// flush pending data
606pub fn ikcp_flush<T, F>(kcp: &mut IKCPCB, buffer: &mut [u8], user: &mut T, output: &mut F)
607where
608    F: FnMut(&mut [u8], i32, &IKCPCB, &mut T),
609{
610    let current = kcp.current;
611    let position = buffer.as_ptr() as usize;
612    let mut ptr = &mut *buffer;
613
614    let mut size: i32;
615
616    let resent: u32;
617    let mut cwnd: u32;
618
619    let rtomin: u32;
620
621    let mut change = 0;
622    let mut lost = false;
623
624    let mut seg: IKCPSEG;
625
626    // 'ikcp_update' haven't been called.
627    if !kcp.updated {
628        return;
629    }
630
631    seg = IKCPSEG {
632        conv: kcp.conv,
633        cmd: IKCP_CMD_ACK as u32,
634        frg: 0,
635        wnd: ikcp_wnd_unused(kcp) as u32,
636        ts: 0,
637        sn: 0,
638        una: kcp.rcv_nxt,
639        resendts: 0,
640        rto: 0,
641        fastack: 0,
642        xmit: 0,
643        data: vec![],
644    };
645
646    // flush acknowledges
647    for i in 0..(kcp.acklist.len() as i32) {
648        size = ikcp_memoffset(ptr, position) as i32;
649        if size + (IKCP_OVERHEAD as i32) > (kcp.mtu as i32) {
650            ikcp_output(kcp, size, buffer, user, output);
651            ptr = &mut *buffer;
652        }
653
654        ikcp_ack_get(kcp, i, &mut seg.sn, &mut seg.ts);
655        ptr = ikcp_encode_seg(ptr, &seg);
656    }
657
658    kcp.acklist.clear();
659
660    // probe window size (if remote window size equals zero)
661    if kcp.rmt_wnd == 0 {
662        if kcp.probe_wait == 0 {
663            kcp.probe_wait = IKCP_PROBE_INIT;
664            kcp.ts_probe = kcp.current + kcp.probe_wait;
665        } else if ikcp_timediff(kcp.current, kcp.ts_probe) >= 0 {
666            kcp.probe_wait = kcp.probe_wait.max(IKCP_PROBE_INIT);
667            kcp.probe_wait += kcp.probe_wait / 2;
668            kcp.probe_wait = kcp.probe_wait.min(IKCP_PROBE_LIMIT);
669            kcp.ts_probe = kcp.current + kcp.probe_wait;
670            kcp.probe |= IKCP_ASK_SEND;
671        }
672    } else {
673        kcp.ts_probe = 0;
674        kcp.probe_wait = 0;
675    }
676
677    // flush window probing commands
678    if (kcp.probe & IKCP_ASK_SEND) != 0 {
679        seg.cmd = IKCP_CMD_WASK as u32;
680        size = ikcp_memoffset(ptr, position) as i32;
681        if size + (IKCP_OVERHEAD as i32) > (kcp.mtu as i32) {
682            ikcp_output(kcp, size, buffer, user, output);
683            ptr = &mut *buffer;
684        }
685
686        ptr = ikcp_encode_seg(ptr, &seg);
687    }
688
689    // flush window probing commands
690    if (kcp.probe & IKCP_ASK_TELL) != 0 {
691        seg.cmd = IKCP_CMD_WINS as u32;
692        size = ikcp_memoffset(ptr, position) as i32;
693        if size + (IKCP_OVERHEAD as i32) > (kcp.mtu as i32) {
694            ikcp_output(kcp, size, buffer, user, output);
695            ptr = &mut *buffer;
696        }
697
698        ptr = ikcp_encode_seg(ptr, &seg);
699    }
700
701    kcp.probe = 0;
702
703    // calculate window size
704    cwnd = kcp.snd_wnd.min(kcp.rmt_wnd);
705    if !kcp.nocwnd {
706        cwnd = kcp.cwnd.min(cwnd);
707    }
708
709    // move data from snd_queue to snd_buf
710    while ikcp_timediff(kcp.snd_nxt, kcp.snd_una + cwnd) < 0 {
711        if let Some(mut newseg) = kcp.snd_queue.pop_front() {
712            newseg.conv = kcp.conv;
713            newseg.cmd = IKCP_CMD_PUSH as u32;
714            newseg.wnd = seg.wnd;
715            newseg.ts = current;
716            newseg.sn = kcp.snd_nxt;
717            kcp.snd_nxt += 1;
718            newseg.una = kcp.rcv_nxt;
719            newseg.resendts = current;
720            newseg.rto = kcp.rx_rto as u32;
721            newseg.fastack = 0;
722            newseg.xmit = 0;
723            kcp.snd_buf.push_back(newseg);
724        } else {
725            break;
726        }
727    }
728
729    // calculate resent
730    resent = match kcp.fastresend > 0 {
731        true => kcp.fastresend as u32,
732        false => u32::MAX,
733    };
734
735    rtomin = match kcp.nodelay == 0 {
736        true => (kcp.rx_rto >> 3) as u32,
737        false => 0,
738    };
739
740    // flush data segments
741    for i in 0..kcp.snd_buf.len() {
742        let mut segment = &mut kcp.snd_buf[i];
743        let mut needsend = false;
744        if segment.xmit == 0 {
745            needsend = true;
746            segment.xmit += 1;
747            segment.rto = kcp.rx_rto as u32;
748            segment.resendts = current + segment.rto + rtomin;
749        } else if ikcp_timediff(current, segment.resendts) >= 0 {
750            needsend = true;
751            segment.xmit += 1;
752            kcp.xmit += 1;
753            if kcp.nodelay == 0 {
754                segment.rto += segment.rto.max(kcp.rx_rto as u32);
755            } else {
756                let step = match kcp.nodelay < 2 {
757                    true => segment.rto as i32,
758                    false => kcp.rx_rto,
759                };
760
761                segment.rto += (step / 2) as u32;
762            }
763
764            segment.resendts = current + segment.rto;
765            lost = true;
766        } else if segment.fastack >= resent
767            && (segment.xmit <= (kcp.fastlimit as u32) || kcp.fastlimit <= 0)
768        {
769            needsend = true;
770            segment.xmit += 1;
771            segment.fastack = 0;
772            segment.resendts = current + segment.rto;
773            change += 1;
774        }
775
776        if needsend {
777            let need: i32;
778            segment.ts = current;
779            segment.wnd = seg.wnd;
780            segment.una = kcp.rcv_nxt;
781
782            size = ikcp_memoffset(ptr, position) as i32;
783            need = (IKCP_OVERHEAD as i32) + (segment.data.len() as i32);
784
785            if (size + need) > (kcp.mtu as i32) {
786                ikcp_output(kcp, size, buffer, user, output);
787                ptr = &mut *buffer;
788
789                segment = &mut kcp.snd_buf[i];
790            }
791
792            ptr = ikcp_encode_seg(ptr, segment);
793
794            if !segment.data.is_empty() {
795                ikcp_memcpy(ptr, &segment.data, segment.data.len());
796                ptr = &mut ptr[segment.data.len()..];
797            }
798
799            if segment.xmit >= kcp.dead_link {
800                kcp.state = true;
801            }
802        }
803    }
804
805    // flash remain segments
806    size = ikcp_memoffset(ptr, position) as i32;
807    if size > 0 {
808        ikcp_output(kcp, size, buffer, user, output);
809    }
810
811    // update ssthresh
812    if change != 0 {
813        let inflight = kcp.snd_nxt - kcp.snd_una;
814        kcp.ssthresh = inflight / 2;
815        kcp.ssthresh = kcp.ssthresh.max(IKCP_THRESH_MIN);
816        kcp.cwnd = kcp.ssthresh + resent;
817        kcp.incr = kcp.cwnd * kcp.mss;
818    }
819
820    if lost {
821        kcp.ssthresh = cwnd / 2;
822        kcp.ssthresh = kcp.ssthresh.max(IKCP_THRESH_MIN);
823        kcp.cwnd = 1;
824        kcp.incr = kcp.mss;
825    }
826
827    if kcp.cwnd < 1 {
828        kcp.cwnd = 1;
829        kcp.incr = kcp.mss;
830    }
831}
832
833// update state (call it repeatedly, every 10ms-100ms), or you can ask
834// ikcp_check when to call it again (without ikcp_input/_send calling).
835// 'current' - current timestamp in millisec.
836pub fn ikcp_update<T, F>(
837    kcp: &mut IKCPCB,
838    current: u32,
839    buffer: &mut [u8],
840    user: &mut T,
841    output: &mut F,
842) where
843    F: FnMut(&mut [u8], i32, &IKCPCB, &mut T),
844{
845    let mut slap: i32;
846
847    kcp.current = current;
848
849    if !kcp.updated {
850        kcp.updated = true;
851        kcp.ts_flush = kcp.current;
852    }
853
854    slap = ikcp_timediff(kcp.current, kcp.ts_flush);
855
856    if !(-10000..10000).contains(&slap) {
857        kcp.ts_flush = kcp.current;
858        slap = 0;
859    }
860
861    if slap >= 0 {
862        kcp.ts_flush += kcp.interval;
863        if ikcp_timediff(kcp.current, kcp.ts_flush) >= 0 {
864            kcp.ts_flush = kcp.current + kcp.interval;
865        }
866
867        ikcp_flush(kcp, buffer, user, output);
868    }
869}
870
871// Determine when should you invoke ikcp_update:
872// returns when you should invoke ikcp_update in millisec,
873// if there is no ikcp_input/_send calling.
874// you can call ikcp_update in that time,
875// instead of call update repeatly.
876// Important to reduce unnacessary ikcp_update invoking.
877// use it to schedule ikcp_update (for example. implementing an epoll-like mechanism,
878// or optimize ikcp_update when handling massive kcp connections)
879pub fn ikcp_check(kcp: &IKCPCB, current: u32) -> u32 {
880    let mut ts_flush = kcp.ts_flush;
881    let tm_flush: i32;
882    let mut tm_packet = i32::MAX;
883    let minimal: u32;
884
885    if !kcp.updated {
886        return current;
887    }
888
889    match ikcp_timediff(current, ts_flush) {
890        diff if !(-10000..10000).contains(&diff) => ts_flush = current,
891        diff if diff >= 0 => return current,
892        _ => {}
893    }
894
895    tm_flush = ikcp_timediff(ts_flush, current);
896
897    for seg in &kcp.snd_buf {
898        match ikcp_timediff(seg.resendts, current) {
899            diff if diff <= 0 => return current,
900            diff if diff < tm_packet => tm_packet = diff,
901            _ => {}
902        }
903    }
904
905    minimal = (tm_packet.min(tm_flush) as u32).min(kcp.interval);
906
907    current + minimal
908}
909
910// change MTU size, default is 1400
911pub fn ikcp_setmtu(kcp: &mut IKCPCB, mtu: i32) -> bool {
912    if mtu < 50 || mtu < (IKCP_OVERHEAD as i32) {
913        return false;
914    }
915
916    kcp.mtu = mtu as u32;
917    kcp.mss = kcp.mtu - IKCP_OVERHEAD;
918    true
919}
920
921pub fn ikcp_interval(kcp: &mut IKCPCB, mut interval: i32) {
922    if interval >= 0 {
923        interval = interval.clamp(10, 5000);
924        kcp.interval = interval as u32;
925    }
926}
927
928// fastest: ikcp_nodelay(kcp, 1, 20, 2, 1)
929// nodelay: 0: disable(default), 1: enable
930// interval: internal update timer interval in millisec, default is 100ms
931// resend: 0: disable fast resend(default), 1: enable fast resend
932// nc: false: normal congestion control(default), true: disable congestion control
933pub fn ikcp_nodelay(kcp: &mut IKCPCB, nodelay: i32, interval: i32, resend: i32, nc: bool) {
934    if nodelay >= 0 {
935        kcp.nodelay = nodelay as u32;
936        kcp.rx_minrto = match nodelay {
937            0 => IKCP_RTO_MIN as i32,
938            _ => IKCP_RTO_NDL as i32,
939        };
940    }
941
942    ikcp_interval(kcp, interval);
943
944    if resend >= 0 {
945        kcp.fastresend = resend;
946    }
947
948    kcp.nocwnd = nc;
949}
950
951// set maximum window size: sndwnd = 32, rcvwnd = 32 by default
952pub fn ikcp_wndsize(kcp: &mut IKCPCB, sndwnd: i32, rcvwnd: i32) {
953    if sndwnd > 0 {
954        kcp.snd_wnd = sndwnd as u32;
955    }
956
957    if rcvwnd > 0 {
958        // must >= max fragment size
959        kcp.rcv_wnd = (rcvwnd as u32).max(IKCP_WND_RCV);
960    }
961}
962
963// get how many packet is waiting to be sent
964pub fn ikcp_waitsnd(kcp: &IKCPCB) -> i32 {
965    (kcp.snd_buf.len() + kcp.snd_queue.len()) as i32
966}
967
968// read conv
969pub const fn ikcp_getconv(kcp: &IKCPCB) -> u32 {
970    kcp.conv
971}
972
973pub fn ikcp_state(kcp: &mut IKCPCB, state: bool) {
974    kcp.state = state;
975}
976
977pub fn ikcp_rx_minrto(kcp: &mut IKCPCB, rx_minrto: i32) {
978    kcp.rx_minrto = rx_minrto.max(1);
979}
980
981pub fn ikcp_dead_link(kcp: &mut IKCPCB, dead_link: u32) {
982    kcp.dead_link = dead_link;
983}
984
985pub fn ikcp_fastlimit(kcp: &mut IKCPCB, fastlimit: i32) {
986    kcp.fastlimit = fastlimit;
987}
988
989pub fn ikcp_stream(kcp: &mut IKCPCB, stream: bool) {
990    kcp.stream = stream;
991}
992
993pub struct IKCPSEG {
994    pub(crate) conv: u32,
995    pub(crate) cmd: u32,
996    pub(crate) frg: u32,
997    pub(crate) wnd: u32,
998    pub(crate) ts: u32,
999    pub(crate) sn: u32,
1000    pub(crate) una: u32,
1001    pub(crate) resendts: u32,
1002    pub(crate) rto: u32,
1003    pub(crate) fastack: u32,
1004    pub(crate) xmit: u32,
1005    pub(crate) data: Vec<u8>,
1006}
1007
1008impl IKCPSEG {
1009    pub const fn conv(&self) -> u32 {
1010        self.conv
1011    }
1012    pub const fn cmd(&self) -> u32 {
1013        self.cmd
1014    }
1015    pub const fn frg(&self) -> u32 {
1016        self.frg
1017    }
1018    pub const fn wnd(&self) -> u32 {
1019        self.wnd
1020    }
1021    pub const fn ts(&self) -> u32 {
1022        self.ts
1023    }
1024    pub const fn sn(&self) -> u32 {
1025        self.sn
1026    }
1027    pub const fn una(&self) -> u32 {
1028        self.una
1029    }
1030    pub const fn resendts(&self) -> u32 {
1031        self.resendts
1032    }
1033    pub const fn rto(&self) -> u32 {
1034        self.rto
1035    }
1036    pub const fn fastack(&self) -> u32 {
1037        self.fastack
1038    }
1039    pub const fn xmit(&self) -> u32 {
1040        self.xmit
1041    }
1042    pub const fn data(&self) -> &Vec<u8> {
1043        &self.data
1044    }
1045}
1046
1047pub struct IKCPCB {
1048    pub(crate) conv: u32,
1049    pub(crate) mtu: u32,
1050    pub(crate) mss: u32,
1051    pub(crate) state: bool,
1052
1053    pub(crate) snd_una: u32,
1054    pub(crate) snd_nxt: u32,
1055    pub(crate) rcv_nxt: u32,
1056
1057    pub(crate) ssthresh: u32,
1058
1059    pub(crate) rx_rttval: i32,
1060    pub(crate) rx_srtt: i32,
1061    pub(crate) rx_rto: i32,
1062    pub(crate) rx_minrto: i32,
1063
1064    pub(crate) snd_wnd: u32,
1065    pub(crate) rcv_wnd: u32,
1066    pub(crate) rmt_wnd: u32,
1067    pub(crate) cwnd: u32,
1068    pub(crate) probe: u32,
1069
1070    pub(crate) current: u32,
1071    pub(crate) interval: u32,
1072    pub(crate) ts_flush: u32,
1073    pub(crate) xmit: u32,
1074
1075    pub(crate) nodelay: u32,
1076    pub(crate) updated: bool,
1077
1078    pub(crate) ts_probe: u32,
1079    pub(crate) probe_wait: u32,
1080
1081    pub(crate) dead_link: u32,
1082    pub(crate) incr: u32,
1083
1084    pub(crate) snd_queue: VecDeque<IKCPSEG>,
1085    pub(crate) rcv_queue: VecDeque<IKCPSEG>,
1086    pub(crate) snd_buf: VecDeque<IKCPSEG>,
1087    pub(crate) rcv_buf: VecDeque<IKCPSEG>,
1088    pub(crate) acklist: Vec<(u32, u32)>,
1089    pub(crate) fastresend: i32,
1090    pub(crate) fastlimit: i32,
1091
1092    pub(crate) nocwnd: bool,
1093    pub(crate) stream: bool,
1094}
1095
1096impl IKCPCB {
1097    pub const fn conv(&self) -> u32 {
1098        self.conv
1099    }
1100    pub const fn mtu(&self) -> u32 {
1101        self.mtu
1102    }
1103    pub const fn mss(&self) -> u32 {
1104        self.mss
1105    }
1106    pub const fn state(&self) -> bool {
1107        self.state
1108    }
1109
1110    pub const fn snd_una(&self) -> u32 {
1111        self.snd_una
1112    }
1113    pub const fn snd_nxt(&self) -> u32 {
1114        self.snd_nxt
1115    }
1116    pub const fn rcv_nxt(&self) -> u32 {
1117        self.rcv_nxt
1118    }
1119
1120    pub const fn ssthresh(&self) -> u32 {
1121        self.ssthresh
1122    }
1123
1124    pub const fn rx_rttval(&self) -> i32 {
1125        self.rx_rttval
1126    }
1127    pub const fn rx_srtt(&self) -> i32 {
1128        self.rx_srtt
1129    }
1130    pub const fn rx_rto(&self) -> i32 {
1131        self.rx_rto
1132    }
1133    pub const fn rx_minrto(&self) -> i32 {
1134        self.rx_minrto
1135    }
1136
1137    pub const fn snd_wnd(&self) -> u32 {
1138        self.snd_wnd
1139    }
1140    pub const fn rcv_wnd(&self) -> u32 {
1141        self.rcv_wnd
1142    }
1143    pub const fn rmt_wnd(&self) -> u32 {
1144        self.rmt_wnd
1145    }
1146    pub const fn cwnd(&self) -> u32 {
1147        self.cwnd
1148    }
1149    pub const fn probe(&self) -> u32 {
1150        self.probe
1151    }
1152
1153    pub const fn current(&self) -> u32 {
1154        self.current
1155    }
1156    pub const fn interval(&self) -> u32 {
1157        self.interval
1158    }
1159    pub const fn ts_flush(&self) -> u32 {
1160        self.ts_flush
1161    }
1162    pub const fn xmit(&self) -> u32 {
1163        self.xmit
1164    }
1165
1166    pub const fn nodelay(&self) -> u32 {
1167        self.nodelay
1168    }
1169    pub const fn updated(&self) -> bool {
1170        self.updated
1171    }
1172
1173    pub const fn ts_probe(&self) -> u32 {
1174        self.ts_probe
1175    }
1176    pub const fn probe_wait(&self) -> u32 {
1177        self.probe_wait
1178    }
1179
1180    pub const fn dead_link(&self) -> u32 {
1181        self.dead_link
1182    }
1183    pub const fn incr(&self) -> u32 {
1184        self.incr
1185    }
1186
1187    pub const fn snd_queue(&self) -> &VecDeque<IKCPSEG> {
1188        &self.snd_queue
1189    }
1190    pub const fn rcv_queue(&self) -> &VecDeque<IKCPSEG> {
1191        &self.rcv_queue
1192    }
1193    pub const fn snd_buf(&self) -> &VecDeque<IKCPSEG> {
1194        &self.snd_buf
1195    }
1196    pub const fn rcv_buf(&self) -> &VecDeque<IKCPSEG> {
1197        &self.rcv_buf
1198    }
1199    pub const fn acklist(&self) -> &Vec<(u32, u32)> {
1200        &self.acklist
1201    }
1202    pub const fn fastresend(&self) -> i32 {
1203        self.fastresend
1204    }
1205    pub const fn fastlimit(&self) -> i32 {
1206        self.fastlimit
1207    }
1208
1209    pub const fn nocwnd(&self) -> bool {
1210        self.nocwnd
1211    }
1212    pub const fn stream(&self) -> bool {
1213        self.stream
1214    }
1215}