mctp_estack/lib.rs
1// SPDX-License-Identifier: MIT OR Apache-2.0
2/*
3 * Copyright (c) 2024-2025 Code Construct
4 */
5
6//! # MCTP Stack
7//!
8//! This crate provides a MCTP stack that can be embedded in other programs
9//! or devices.
10//!
11//! A [`Router`] object lets programs use a [`Stack`] with
12//! MCTP transport binding links. Each *Port* handles transmitting and receiving
13//! packets independently. Messages destined for the stack's own EID will
14//! be passed to applications.
15//!
16//! Applications can create [`router::RouterAsyncListener`] and [`router::RouterAsyncReqChannel`]
17//! instances to communicate over MCTP. Those implement the standard [`mctp` crate](mctp)
18//! async traits.
19//!
20//! The IO-less [`Stack`] handles MCTP message formatting and parsing, independent
21//! of any particular MCTP transport binding.
22//!
23//! ## Configuration
24//!
25//! `mctp-estack` uses fixed sizes to be suitable on no-alloc platforms.
26//! These can be configured at build time, see [`config`]
27
28#![cfg_attr(not(any(feature = "std", test)), no_std)]
29#![forbid(unsafe_code)]
30#![allow(clippy::int_plus_one)]
31#![allow(clippy::too_many_arguments)]
32
33/// Re-exported so that callers can use the same `heapless` version.
34///
35/// `heapless::Vec` is currently an argument of `send_fill()` in transports.
36///
37/// TODO: will be replaced with something else, maybe `heapless::VecView` once
38/// released.
39pub use heapless::Vec;
40
41use heapless::FnvIndexMap;
42
43use mctp::{Eid, Error, MsgIC, MsgType, Result, Tag, TagValue};
44
45pub mod control;
46pub mod fragment;
47pub mod i2c;
48mod reassemble;
49pub mod router;
50pub mod serial;
51pub mod usb;
52#[macro_use]
53mod util;
54
55use fragment::{Fragmenter, SendOutput};
56use reassemble::Reassembler;
57pub use router::Router;
58
59use crate::fmt::*;
60pub(crate) use config::*;
61
62/// Timeout for message reassembly.
63///
64/// In milliseconds.
65const REASSEMBLY_EXPIRY_TIMEOUT: u32 = 6000;
66
67/// Timeout for [`get_deferred()`](Stack::get_deferred).
68///
69/// Reassembled messages will remain available for this length of time
70/// unless `finished_receive` etc is called.
71/// In milliseconds.
72pub const DEFERRED_TIMEOUT: u32 = 6000;
73
74/// Timeout granularity.
75///
76/// Timeouts will be checked no more often than this interval (in milliseconds).
77/// See [`Stack::update()`].
78pub const TIMEOUT_INTERVAL: u32 = 100;
79
80pub(crate) const HEADER_LEN: usize = 4;
81
82/// Build-time configuration and defaults
83///
84/// To set a non-default value, set the `MCTP_ESTACK_...` environment variable
85/// during the build. Those variables can be set in the `[env]`
86/// section of `.cargo/config.toml`.
87pub mod config {
88 /// Maximum size of a MCTP message payload in bytes, default 1032
89 ///
90 /// This does not include the MCTP type byte.
91 ///
92 /// Customise with `MCTP_ESTACK_MAX_MESSAGE` environment variable.
93 pub const MAX_PAYLOAD: usize =
94 get_build_var!("MCTP_ESTACK_MAX_MESSAGE", 1032);
95
96 /// Number of concurrent receive messages, default 4
97 ///
98 /// The number of in-progress message reassemblies is limited to `NUM_RECEIVE`.
99 /// Total memory used for reassembly buffers is roughly
100 /// `MAX_PAYLOAD` * `NUM_RECEIVE` bytes.
101 ///
102 /// Customise with `MCTP_ESTACK_NUM_RECEIVE` environment variable.
103 /// Number of outstanding waiting responses, default 64
104 pub const NUM_RECEIVE: usize = get_build_var!("MCTP_ESTACK_NUM_RECEIVE", 4);
105 ///
106 /// After a message is sent with Tag Owner (TO) bit set, the stack will accept
107 /// response messages with the same tag and TO _unset_. `FLOWS` defines
108 /// the number of slots available for pending responses.
109 ///
110 /// Customise with `MCTP_ESTACK_FLOWS` environment variable.
111 /// Must be a power of two.
112 pub const FLOWS: usize = get_build_var!("MCTP_ESTACK_FLOWS", 64);
113
114 /// Maximum allowed MTU, default 255
115 ///
116 /// The largest MTU allowed for any link.
117 ///
118 /// Customise with `MCTP_ESTACK_MAX_MTU` environment variable.
119 pub const MAX_MTU: usize = get_build_var!("MCTP_ESTACK_MAX_MTU", 255);
120 const _: () =
121 assert!(MAX_MTU >= crate::HEADER_LEN + 1, "MAX_MTU too small");
122}
123
124#[derive(Debug)]
125struct Flow {
126 // preallocated flows have None expiry
127 expiry_stamp: Option<EventStamp>,
128 cookie: Option<AppCookie>,
129}
130
131/// An opaque identifier that applications can use to associate responses.
132#[derive(Debug, Eq, PartialEq, Clone, Copy, Hash, PartialOrd, Ord)]
133pub struct AppCookie(pub usize);
134
135type Header = libmctp::base_packet::MCTPTransportHeader<[u8; HEADER_LEN]>;
136
137/// A handle to a received message.
138///
139/// Must be returned to the stack with [`finished_receive`](Stack::finished_receive)
140/// or [`fetch_message_with`](Stack::fetch_message_with)
141/// otherwise the reassembly slot will not be released for further messages.
142#[must_use]
143// This is an opaque index into `Stack.reassemblers`. Is deliberately not `Copy`,
144// so that it can't be held longer than the reassembler is valid.
145#[derive(Debug)]
146pub struct ReceiveHandle(usize);
147
148/// Low level MCTP stack.
149///
150/// This is an IO-less MCTP stack, independent of any particular transport.
151#[derive(Debug)]
152pub struct Stack {
153 own_eid: Eid,
154
155 // flows where we own the tag
156 flows: FnvIndexMap<(Eid, TagValue), Flow, FLOWS>,
157
158 // The buffer is kept outside of the Reassembler, in case it is borrowed
159 // from other storage locations in future.
160 // This is [Option<>] rather than Vec so that indices remain stable
161 // for the ReceiveHandle. Could use a Map instead?
162 reassemblers: [Option<(Reassembler, Vec<u8, MAX_PAYLOAD>)>; NUM_RECEIVE],
163
164 /// monotonic time and counter.
165 now: EventStamp,
166 /// cached next expiry time from update()
167 next_timeout: u64,
168
169 mtu: usize,
170
171 // Arbitrary counter to make tag allocation more variable.
172 next_tag: u8,
173
174 // Arbitrary next sequence number to start a fragmenter
175 next_seq: u8,
176}
177
178impl Stack {
179 /// Create a new `Stack`.
180 ///
181 /// `own_eid` is the EID for this stack. It may be 0 (`MCTP_ADDR_NULL`).
182 ///
183 /// `now_millis` is the current timestamp, the same style as would be
184 /// passed to `update_clock()`.
185 ///
186 /// `mtu` is the default MTU of the stack. Specific [`start_send()`](Self::start_send)
187 /// calls may use a smaller MTU if needed (for example a per-link or per-EID MTU).
188 /// `new()` will panic if a MTU smaller than 5 is given (minimum MCTP header and type byte).
189 pub fn new(own_eid: Eid, mtu: usize, now_millis: u64) -> Self {
190 let now = EventStamp {
191 clock: now_millis,
192 counter: 0,
193 };
194 assert!(mtu >= HEADER_LEN + 1);
195 Self {
196 own_eid,
197 now,
198 next_timeout: 0,
199 mtu,
200 flows: Default::default(),
201 reassemblers: Default::default(),
202 next_tag: 0,
203 next_seq: 0,
204 }
205 }
206
207 /// Update the internal timestamp of the stack.
208 ///
209 /// This is used for expiring flows and reassembly.
210 ///
211 /// Returns [`Error::InvalidInput`] if time goes backwards.
212 fn update_clock(&mut self, now_millis: u64) -> Result<()> {
213 if now_millis < self.now.clock {
214 Err(Error::InvalidInput)
215 } else {
216 if now_millis > self.now.clock {
217 self.now.clock = now_millis;
218 self.now.counter = 0;
219 } else {
220 // update_clock was called with the same millisecond as previously.
221 // Don't do anything.
222 }
223 Ok(())
224 }
225 }
226
227 /// Updates timeouts and returns the next timeout in milliseconds
228 ///
229 /// Must be called regularly to update the current clock value.
230 /// Returns [`Error::InvalidInput`] if time goes backwards.
231 ///
232 /// Returns `(next_timeout, any_expired)`.
233 /// `next_timeout` is a suitable interval (milliseconds) for the next
234 /// call to `update()`, currently a maximum of 100 ms.
235 ///
236 /// `any_expired` is set true if any message receive timeouts expired with this call.
237 pub fn update(&mut self, now_millis: u64) -> Result<(u64, bool)> {
238 self.update_clock(now_millis)?;
239
240 if let Some(remain) = self.next_timeout.checked_sub(now_millis) {
241 if remain > 0 {
242 // Skip timeout checks if within previous interval
243 return Ok((remain, false));
244 }
245 }
246
247 let mut timeout = TIMEOUT_INTERVAL;
248 let mut any_expired = false;
249
250 // Check reassembler expiry for incomplete packets
251 for r in self.reassemblers.iter_mut() {
252 if let Some((re, _buf)) = r {
253 match re.check_expired(
254 &self.now,
255 REASSEMBLY_EXPIRY_TIMEOUT,
256 DEFERRED_TIMEOUT,
257 ) {
258 None => {
259 trace!("Expired");
260 any_expired = true;
261 *r = None;
262 }
263 // Not expired, update the timeout
264 Some(t) => timeout = timeout.min(t),
265 }
266 }
267 }
268
269 // Expire reply-packet flows
270 self.flows.retain(|_k, flow| {
271 match flow.expiry_stamp {
272 // no expiry
273 None => true,
274 Some(stamp) => {
275 match stamp
276 .check_timeout(&self.now, REASSEMBLY_EXPIRY_TIMEOUT)
277 {
278 // expired, remove it
279 None => {
280 any_expired = true;
281 false
282 }
283 Some(t) => {
284 // still time left
285 timeout = timeout.min(t);
286 true
287 }
288 }
289 }
290 }
291 });
292
293 self.next_timeout = timeout as u64 + now_millis;
294
295 Ok((timeout as u64, any_expired))
296 }
297
298 /// Initiates a MCTP message send.
299 ///
300 /// Returns a [`Fragmenter`] that will packetize the message.
301 ///
302 /// `mtu` is an optional override, will be the min of the stack MTU and the argument.
303 ///
304 /// The provided cookie will be returned when `send_fill()` completes.
305 ///
306 /// When sending a with `tag.is_owner() == true`,
307 /// the cookie will be stored with the flow, and the reply [`MctpMessage`] `cookie`
308 /// field will be set.
309 pub fn start_send(
310 &mut self,
311 dest: Eid,
312 typ: MsgType,
313 tag: Option<Tag>,
314 tag_expires: bool,
315 ic: MsgIC,
316 mtu: Option<usize>,
317 cookie: Option<AppCookie>,
318 ) -> Result<Fragmenter> {
319 // Add an entry to the flow table for owned tags
320 let tag = match tag {
321 None => {
322 // allocate a tag
323 Tag::Owned(self.set_flow(dest, None, tag_expires, cookie)?)
324 }
325 Some(Tag::Owned(tv)) => {
326 let check =
327 self.set_flow(dest, Some(tv), tag_expires, cookie)?;
328 debug_assert!(check == tv);
329 Tag::Owned(tv)
330 }
331 Some(Tag::Unowned(tv)) => Tag::Unowned(tv),
332 };
333
334 let mut frag_mtu = self.mtu;
335 if let Some(m) = mtu {
336 frag_mtu = frag_mtu.min(m);
337 }
338
339 // Vary the starting seq
340 self.next_seq = (self.next_seq + 1) & mctp::MCTP_SEQ_MASK;
341
342 Fragmenter::new(
343 typ,
344 self.own_eid,
345 dest,
346 tag,
347 frag_mtu,
348 cookie,
349 ic,
350 self.next_seq,
351 )
352 }
353
354 /// Receive a packet.
355 ///
356 /// Returns `Ok(Some(_))` when a full message is reassembled.
357 /// Returns `Ok(None)` on success when the message is incomplete.
358 /// Callers must call [`finished_receive`](Stack::finished_receive)
359 /// or [`fetch_message_with`](Stack::fetch_message_with)
360 /// for any returned [`ReceiveHandle`].
361 pub fn receive(
362 &mut self,
363 packet: &[u8],
364 ) -> Result<Option<(MctpMessage<'_>, ReceiveHandle)>> {
365 // Get or insert a reassembler for this packet
366 let idx = self.get_reassembler(packet)?;
367 let (re, buf) = if let Some(r) = &mut self.reassemblers[idx] {
368 r
369 } else {
370 // Create a new one
371 let mut re =
372 Reassembler::new(self.own_eid, packet, self.now.increment())?;
373
374 if !re.tag.is_owner() {
375 // Only allow it if we had an existing flow
376 if let Some(f) = self.lookup_flow(re.peer, re.tag.tag()) {
377 re.set_cookie(f.cookie);
378 } else {
379 return Err(Error::Unreachable);
380 }
381 }
382 self.reassemblers[idx].insert((re, Vec::new()))
383 };
384
385 // Feed the packet to the reassembler
386 match re.receive(packet, buf, self.now.increment()) {
387 // Received a complete message
388 Ok(Some(_msg)) => {
389 // Have received a "response", flow is finished.
390 // TODO preallocated tags won't remove the flow.
391 if !re.tag.is_owner() {
392 let (peer, tv) = (re.peer, re.tag.tag());
393 self.remove_flow(peer, tv);
394 }
395
396 // Required to reborrow `re` and `buf`. Otherwise
397 // we hit lifetime problems setting `= None` in the Err case.
398 // These two lines can be removed once Rust "polonius" borrow
399 // checker is added.
400 let (re, buf) = self.reassemblers[idx].as_mut().unwrap();
401 let msg = re.message(buf)?;
402
403 let handle = re.take_handle(idx);
404 Ok(Some((msg, handle)))
405 }
406 // Message isn't complete, no error
407 Ok(None) => Ok(None),
408 // Error
409 Err(e) => {
410 // Something went wrong, release the reassembler.
411 self.reassemblers[idx] = None;
412 Err(e)
413 }
414 }
415 }
416
417 /// Retrieves a MCTP message for a receive handle.
418 ///
419 /// The message is provided to a closure.
420 /// This allows using a closure that takes ownership of non-copyable objects.
421 pub fn fetch_message_with<F>(&mut self, handle: ReceiveHandle, f: F)
422 where
423 F: FnOnce(MctpMessage),
424 {
425 let m = self.fetch_message(&handle);
426 f(m);
427
428 // Always call finished_receive() regardless of errors
429 self.finished_receive(handle);
430 }
431
432 /// Provides a message previously returned from [`receive`](Self::receive)
433 pub fn fetch_message(&mut self, handle: &ReceiveHandle) -> MctpMessage<'_> {
434 let Some(Some((re, buf))) = self.reassemblers.get_mut(handle.0) else {
435 // ReceiveHandle can only be constructed when
436 // a completed message exists, so this should be impossible.
437 unreachable!("Bad ReceiveHandle");
438 };
439
440 let Ok(msg) = re.message(buf) else {
441 unreachable!("Bad ReceiveHandle");
442 };
443 msg
444 }
445
446 /// Returns a handle to the `Stack` and complete the message
447 pub fn finished_receive(&mut self, handle: ReceiveHandle) {
448 if let Some(r) = self.reassemblers.get_mut(handle.0) {
449 if let Some((re, _buf)) = r {
450 re.return_handle(handle);
451 *r = None;
452 return;
453 }
454 }
455 unreachable!("Bad ReceiveHandle");
456 }
457
458 /// Returns a handle to the `Stack`, the message will be kept (until timeouts)
459 pub fn return_handle(&mut self, handle: ReceiveHandle) {
460 // OK unwrap: handle can't be invalid
461 let (re, _buf) = self.reassemblers[handle.0].as_mut().unwrap();
462 re.return_handle(handle);
463 }
464
465 /// Retrieves a message deferred from a previous [`receive`](Self::receive) callback.
466 ///
467 /// Messages are selected by `(source_eid, tag)`.
468 /// If multiple match the earliest is returned.
469 ///
470 /// Messages are only available for [`DEFERRED_TIMEOUT`], after
471 /// that time they will be discarded and the message slot/tag may
472 /// be reused.
473 pub fn get_deferred(
474 &mut self,
475 source: Eid,
476 tag: Tag,
477 ) -> Option<ReceiveHandle> {
478 // Find the earliest matching entry
479 self.done_reassemblers()
480 .filter(|(_i, re)| re.tag == tag && re.peer == source)
481 .min_by_key(|(_i, re)| re.stamp)
482 .map(|(i, re)| re.take_handle(i))
483 }
484
485 /// Retrieves a message deferred from a previous [`receive`](Self::receive) callback.
486 ///
487 /// If multiple match the earliest is returned.
488 /// Multiple cookies to match may be provided.
489 ///
490 /// Messages are only available for [`DEFERRED_TIMEOUT`], after
491 /// that time they will be discarded and the message slot may
492 /// be reused.
493 pub fn get_deferred_bycookie(
494 &mut self,
495 cookies: &[AppCookie],
496 ) -> Option<ReceiveHandle> {
497 // Find the earliest matching entry
498 self.done_reassemblers()
499 .filter(|(_i, re)| {
500 if let Some(c) = re.cookie {
501 if cookies.contains(&c) {
502 return true;
503 }
504 }
505 false
506 })
507 .min_by_key(|(_i, re)| re.stamp)
508 .map(|(i, re)| re.take_handle(i))
509 }
510
511 /// Returns an iterator over completed reassemblers.
512 ///
513 /// The Item is (enumerate_index, reassembler)
514 fn done_reassemblers(
515 &mut self,
516 ) -> impl Iterator<Item = (usize, &mut Reassembler)> {
517 self.reassemblers
518 .iter_mut()
519 .enumerate()
520 .filter_map(|(i, r)| {
521 // re must be Some and is_done
522 r.as_mut()
523 .and_then(|(re, _buf)| re.is_done().then_some((i, re)))
524 })
525 }
526
527 pub fn set_cookie(
528 &mut self,
529 handle: &ReceiveHandle,
530 cookie: Option<AppCookie>,
531 ) {
532 // OK unwrap: handle can't be invalid
533 let (re, _buf) = self.reassemblers[handle.0].as_mut().unwrap();
534 re.set_cookie(cookie)
535 }
536
537 /// Sets the local Endpoint ID.
538 pub fn set_eid(&mut self, eid: u8) -> Result<()> {
539 self.own_eid = Eid::new_normal(eid)
540 .inspect_err(|_e| warn!("Invalid Set EID {}", eid))?;
541 info!("Set EID to {}", eid);
542 Ok(())
543 }
544
545 /// Retrieves the local Endpoint ID.
546 pub fn eid(&self) -> Eid {
547 self.own_eid
548 }
549
550 pub fn is_local_dest(&self, packet: &[u8]) -> bool {
551 Reassembler::is_local_dest(self.own_eid, packet)
552 }
553
554 /// Returns an index in to the `reassemblers` array
555 fn get_reassembler(&mut self, packet: &[u8]) -> Result<usize> {
556 // Look for an existing match
557 let pos = self.reassemblers.iter().position(|r| {
558 r.as_ref()
559 .is_some_and(|(re, _buf)| re.matches_packet(packet))
560 });
561 if let Some(pos) = pos {
562 return Ok(pos);
563 }
564
565 // Find a spare slot
566 let pos = self.reassemblers.iter().position(|r| r.is_none());
567 if let Some(pos) = pos {
568 return Ok(pos);
569 }
570
571 trace!("out of reassemblers");
572 Err(Error::NoSpace)
573 }
574
575 fn alloc_tag(&mut self, peer: Eid) -> Option<TagValue> {
576 // Find used tags as a bitmask
577 let mut used = 0u8;
578 for (_fpeer, tag) in
579 self.flows.keys().filter(|(fpeer, _tag)| *fpeer == peer)
580 {
581 debug_assert!(tag.0 <= mctp::MCTP_TAG_MAX);
582 let bit = 1u8 << tag.0;
583 debug_assert!(used & bit == 0);
584 used |= bit;
585 }
586
587 let mut tag = None;
588
589 // Find an unset bit
590 self.next_tag = (self.next_tag + 1) & mctp::MCTP_TAG_MAX;
591 let end = self.next_tag + mctp::MCTP_TAG_MAX;
592 for t in self.next_tag..=end {
593 let t = t & mctp::MCTP_TAG_MAX;
594 let tagmask = 1 << t;
595 if used & tagmask == 0 {
596 tag = Some(TagValue(t));
597 break;
598 }
599 }
600
601 tag
602 }
603
604 /// Inserts a new flow. Called when we are the tag owner.
605 ///
606 /// A tag will be allocated if fixedtag = None
607 /// Returns [`Error::TagUnavailable`] if all tags or flows are used.
608 fn new_flow(
609 &mut self,
610 peer: Eid,
611 fixedtag: Option<TagValue>,
612 flow_expires: bool,
613 cookie: Option<AppCookie>,
614 ) -> Result<TagValue> {
615 let tag = fixedtag.or_else(|| self.alloc_tag(peer));
616 trace!("new flow tag {}", peer);
617
618 let Some(tag) = tag else {
619 return Err(Error::TagUnavailable);
620 };
621
622 let expiry_stamp = flow_expires.then(|| self.now.increment());
623
624 let f = Flow {
625 expiry_stamp,
626 cookie,
627 };
628 let r = self
629 .flows
630 .insert((peer, tag), f)
631 .map_err(|_| Error::TagUnavailable)?;
632 debug_assert!(r.is_none(), "Duplicate flow insertion");
633 trace!("new flow {}", peer);
634 Ok(tag)
635 }
636
637 /// Creates a new tag, or ensures that an existing one matches.
638 fn set_flow(
639 &mut self,
640 peer: Eid,
641 tag: Option<TagValue>,
642 flow_expires: bool,
643 cookie: Option<AppCookie>,
644 ) -> Result<TagValue> {
645 trace!("set flow {}", peer);
646
647 if let Some(tv) = tag {
648 if let Some(f) = self.flows.get_mut(&(peer, tv)) {
649 if f.expiry_stamp.is_some() {
650 // An Owned tag given to start_send() must have been initially created
651 // tag_expires=false.
652 trace!("Can't specify an owned tag that didn't have tag_expires=false");
653 return Err(Error::BadArgument);
654 }
655
656 if f.cookie != cookie {
657 trace!("varying app for flow");
658 }
659 return Ok(tv);
660 }
661 }
662
663 self.new_flow(peer, tag, flow_expires, cookie)
664 }
665
666 fn lookup_flow(&self, peer: Eid, tv: TagValue) -> Option<&Flow> {
667 self.flows.get(&(peer, tv))
668 }
669
670 fn remove_flow(&mut self, peer: Eid, tv: TagValue) {
671 trace!("remove flow");
672 let r = self.flows.remove(&(peer, tv));
673
674 debug_assert!(r.is_some(), "non-existent remove_flow");
675 }
676
677 pub fn cancel_flow(&mut self, source: Eid, tv: TagValue) -> Result<()> {
678 trace!("cancel flow {}", source);
679 let tag = Tag::Unowned(tv);
680 let mut removed = false;
681 for r in self.reassemblers.iter_mut() {
682 if let Some((re, _buf)) = r.as_mut() {
683 if re.tag == tag && re.peer == source {
684 if re.handle_taken() {
685 trace!("Outstanding handle");
686 return Err(Error::BadArgument);
687 } else {
688 *r = None;
689 removed = true;
690 }
691 }
692 }
693 }
694
695 trace!("removed flow");
696 let r = self.flows.remove(&(source, tv));
697 if removed {
698 debug_assert!(r.is_some());
699 }
700 Ok(())
701 }
702}
703
704// For received reassembled messages
705pub struct MctpMessage<'a> {
706 pub source: Eid,
707 pub dest: Eid,
708 pub tag: Tag,
709
710 pub typ: MsgType,
711 pub ic: MsgIC,
712 pub payload: &'a [u8],
713
714 /// Set for response messages when the request had `cookie` set in the [`Stack::start_send`] call.
715 /// "Response" message refers having `TO` bit unset.
716 pub cookie: Option<AppCookie>,
717}
718
719impl core::fmt::Debug for MctpMessage<'_> {
720 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
721 f.debug_struct("Mctpmessage")
722 .field("source", &self.source)
723 .field("dest", &self.dest)
724 .field("tag", &self.tag)
725 .field("typ", &self.typ)
726 .field("ic", &self.ic)
727 .field("cookie", &self.cookie)
728 .field("payload length", &self.payload.len())
729 .finish_non_exhaustive()
730 }
731}
732
733#[derive(Default, Debug, Ord, PartialOrd, PartialEq, Eq, Copy, Clone)]
734pub(crate) struct EventStamp {
735 // Ordering of members matters here for `Ord` derive
736 /// Monotonic real clock in milliseconds
737 pub clock: u64,
738 /// A counter to order events having the same realclock value
739 pub counter: u32,
740}
741
742impl EventStamp {
743 // Performs a pre-increment on the `counter`. `clock` is unmodified.
744 fn increment(&mut self) -> Self {
745 self.counter += 1;
746 Self {
747 clock: self.clock,
748 counter: self.counter,
749 }
750 }
751
752 /// Check timeout
753 ///
754 /// Returns `None` if expired, or `Some(time_remaining)`.
755 /// Times are in milliseconds.
756 pub fn check_timeout(&self, now: &EventStamp, timeout: u32) -> Option<u32> {
757 let Some(elapsed) = now.clock.checked_sub(self.clock) else {
758 debug_assert!(false, "Timestamp backwards");
759 return None;
760 };
761 let Ok(elapsed) = u32::try_from(elapsed) else {
762 // Longer than 49 days elapsed. It's expired.
763 return None;
764 };
765
766 timeout.checked_sub(elapsed)
767 }
768}
769
770#[cfg(not(any(feature = "log", feature = "defmt")))]
771compile_error!("Either log or defmt feature must be enabled");
772
773pub(crate) mod fmt {
774 #[cfg(feature = "defmt")]
775 pub use defmt::{debug, error, info, trace, warn};
776
777 #[cfg(feature = "log")]
778 pub use log::{debug, error, info, trace, warn};
779}
780
781#[cfg(test)]
782mod tests {
783
784 // TODO:
785 // back to back fragmenter/reassembler
786
787 // back to back stacks?
788}