1#[cfg(feature = "alloc")]
75#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
76pub use alloc_mod::*;
77use core::fmt::{Display, Formatter};
78use delegate::delegate;
79#[cfg(feature = "serde")]
80use serde::{Deserialize, Serialize};
81use spacepackets::ByteConversionError;
82#[cfg(feature = "std")]
83use std::error::Error;
84
85type NumBlocks = u16;
86pub type StoreAddr = u64;
87
88#[derive(Debug, Copy, Clone, PartialEq, Eq)]
90#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
91pub struct StaticPoolAddr {
92 pub(crate) pool_idx: u16,
93 pub(crate) packet_idx: NumBlocks,
94}
95
96impl StaticPoolAddr {
97 pub const INVALID_ADDR: u32 = 0xFFFFFFFF;
98
99 pub fn raw(&self) -> u32 {
100 ((self.pool_idx as u32) << 16) | self.packet_idx as u32
101 }
102}
103
104impl From<StaticPoolAddr> for StoreAddr {
105 fn from(value: StaticPoolAddr) -> Self {
106 ((value.pool_idx as u64) << 16) | value.packet_idx as u64
107 }
108}
109
110impl From<StoreAddr> for StaticPoolAddr {
111 fn from(value: StoreAddr) -> Self {
112 Self {
113 pool_idx: ((value >> 16) & 0xff) as u16,
114 packet_idx: (value & 0xff) as u16,
115 }
116 }
117}
118
119impl Display for StaticPoolAddr {
120 fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
121 write!(
122 f,
123 "StoreAddr(pool index: {}, packet index: {})",
124 self.pool_idx, self.packet_idx
125 )
126 }
127}
128
129#[derive(Debug, Clone, PartialEq, Eq)]
130#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
131pub enum StoreIdError {
132 InvalidSubpool(u16),
133 InvalidPacketIdx(u16),
134}
135
136impl Display for StoreIdError {
137 fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
138 match self {
139 StoreIdError::InvalidSubpool(pool) => {
140 write!(f, "invalid subpool, index: {pool}")
141 }
142 StoreIdError::InvalidPacketIdx(packet_idx) => {
143 write!(f, "invalid packet index: {packet_idx}")
144 }
145 }
146 }
147}
148
149#[cfg(feature = "std")]
150impl Error for StoreIdError {}
151
152#[derive(Debug, Clone, PartialEq, Eq)]
153#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
154pub enum StoreError {
155 DataTooLarge(usize),
157 StoreFull(u16),
159 InvalidStoreId(StoreIdError, Option<StoreAddr>),
161 DataDoesNotExist(StoreAddr),
163 ByteConversionError(spacepackets::ByteConversionError),
164 LockError,
165 InternalError(u32),
167}
168
169impl Display for StoreError {
170 fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
171 match self {
172 StoreError::DataTooLarge(size) => {
173 write!(f, "data to store with size {size} is too large")
174 }
175 StoreError::StoreFull(u16) => {
176 write!(f, "store is too full. index for full subpool: {u16}")
177 }
178 StoreError::InvalidStoreId(id_e, addr) => {
179 write!(f, "invalid store ID: {id_e}, address: {addr:?}")
180 }
181 StoreError::DataDoesNotExist(addr) => {
182 write!(f, "no data exists at address {addr:?}")
183 }
184 StoreError::InternalError(e) => {
185 write!(f, "internal error: {e}")
186 }
187 StoreError::ByteConversionError(e) => {
188 write!(f, "store error: {e}")
189 }
190 StoreError::LockError => {
191 write!(f, "lock error")
192 }
193 }
194 }
195}
196
197impl From<ByteConversionError> for StoreError {
198 fn from(value: ByteConversionError) -> Self {
199 Self::ByteConversionError(value)
200 }
201}
202
203#[cfg(feature = "std")]
204impl Error for StoreError {
205 fn source(&self) -> Option<&(dyn Error + 'static)> {
206 if let StoreError::InvalidStoreId(e, _) = self {
207 return Some(e);
208 }
209 None
210 }
211}
212
213pub trait PoolProvider {
220 fn add(&mut self, data: &[u8]) -> Result<StoreAddr, StoreError>;
224
225 fn free_element<W: FnMut(&mut [u8])>(
231 &mut self,
232 len: usize,
233 writer: W,
234 ) -> Result<StoreAddr, StoreError>;
235
236 fn modify<U: FnMut(&mut [u8])>(
241 &mut self,
242 addr: &StoreAddr,
243 updater: U,
244 ) -> Result<(), StoreError>;
245
246 fn read(&self, addr: &StoreAddr, buf: &mut [u8]) -> Result<usize, StoreError>;
249
250 fn delete(&mut self, addr: StoreAddr) -> Result<(), StoreError>;
252 fn has_element_at(&self, addr: &StoreAddr) -> Result<bool, StoreError>;
253
254 fn len_of_data(&self, addr: &StoreAddr) -> Result<usize, StoreError>;
256
257 #[cfg(feature = "alloc")]
258 fn read_as_vec(&self, addr: &StoreAddr) -> Result<alloc::vec::Vec<u8>, StoreError> {
259 let mut vec = alloc::vec![0; self.len_of_data(addr)?];
260 self.read(addr, &mut vec)?;
261 Ok(vec)
262 }
263}
264
265pub trait PoolProviderWithGuards: PoolProvider {
267 fn read_with_guard(&mut self, addr: StoreAddr) -> PoolGuard<Self>;
276
277 fn modify_with_guard(&mut self, addr: StoreAddr) -> PoolRwGuard<Self>;
286}
287
288pub struct PoolGuard<'a, MemProvider: PoolProvider + ?Sized> {
289 pool: &'a mut MemProvider,
290 pub addr: StoreAddr,
291 no_deletion: bool,
292 deletion_failed_error: Option<StoreError>,
293}
294
295impl<'a, MemProvider: PoolProvider> PoolGuard<'a, MemProvider> {
298 pub fn new(pool: &'a mut MemProvider, addr: StoreAddr) -> Self {
299 Self {
300 pool,
301 addr,
302 no_deletion: false,
303 deletion_failed_error: None,
304 }
305 }
306
307 pub fn read(&self, buf: &mut [u8]) -> Result<usize, StoreError> {
308 self.pool.read(&self.addr, buf)
309 }
310
311 #[cfg(feature = "alloc")]
312 pub fn read_as_vec(&self) -> Result<alloc::vec::Vec<u8>, StoreError> {
313 self.pool.read_as_vec(&self.addr)
314 }
315
316 pub fn release(&mut self) {
319 self.no_deletion = true;
320 }
321}
322
323impl<MemProvider: PoolProvider + ?Sized> Drop for PoolGuard<'_, MemProvider> {
324 fn drop(&mut self) {
325 if !self.no_deletion {
326 if let Err(e) = self.pool.delete(self.addr) {
327 self.deletion_failed_error = Some(e);
328 }
329 }
330 }
331}
332
333pub struct PoolRwGuard<'a, MemProvider: PoolProvider + ?Sized> {
334 guard: PoolGuard<'a, MemProvider>,
335}
336
337impl<'a, MemProvider: PoolProvider> PoolRwGuard<'a, MemProvider> {
338 pub fn new(pool: &'a mut MemProvider, addr: StoreAddr) -> Self {
339 Self {
340 guard: PoolGuard::new(pool, addr),
341 }
342 }
343
344 pub fn update<U: FnMut(&mut [u8])>(&mut self, updater: &mut U) -> Result<(), StoreError> {
345 self.guard.pool.modify(&self.guard.addr, updater)
346 }
347
348 delegate!(
349 to self.guard {
350 pub fn read(&self, buf: &mut [u8]) -> Result<usize, StoreError>;
351 pub fn release(&mut self);
354 }
355 );
356}
357
358#[cfg(feature = "alloc")]
359mod alloc_mod {
360 use super::{PoolGuard, PoolProvider, PoolProviderWithGuards, PoolRwGuard, StaticPoolAddr};
361 use crate::pool::{NumBlocks, StoreAddr, StoreError, StoreIdError};
362 use alloc::vec;
363 use alloc::vec::Vec;
364 use spacepackets::ByteConversionError;
365 #[cfg(feature = "std")]
366 use std::sync::{Arc, RwLock};
367
368 #[cfg(feature = "std")]
369 pub type SharedStaticMemoryPool = Arc<RwLock<StaticMemoryPool>>;
370
371 type PoolSize = usize;
372 const STORE_FREE: PoolSize = PoolSize::MAX;
373 pub const POOL_MAX_SIZE: PoolSize = STORE_FREE - 1;
374
375 #[derive(Clone)]
386 pub struct StaticPoolConfig {
387 cfg: Vec<(NumBlocks, usize)>,
388 spill_to_higher_subpools: bool,
389 }
390
391 impl StaticPoolConfig {
392 pub fn new(cfg: Vec<(NumBlocks, usize)>, spill_to_higher_subpools: bool) -> Self {
393 StaticPoolConfig {
394 cfg,
395 spill_to_higher_subpools,
396 }
397 }
398
399 pub fn cfg(&self) -> &Vec<(NumBlocks, usize)> {
400 &self.cfg
401 }
402
403 pub fn sanitize(&mut self) -> usize {
404 self.cfg
405 .retain(|&(bucket_num, size)| bucket_num > 0 && size < POOL_MAX_SIZE);
406 self.cfg
407 .sort_unstable_by(|(_, sz0), (_, sz1)| sz0.partial_cmp(sz1).unwrap());
408 self.cfg.len()
409 }
410 }
411
412 pub struct StaticMemoryPool {
430 pool_cfg: StaticPoolConfig,
431 pool: Vec<Vec<u8>>,
432 sizes_lists: Vec<Vec<PoolSize>>,
433 }
434
435 impl StaticMemoryPool {
436 pub fn new(mut cfg: StaticPoolConfig) -> StaticMemoryPool {
439 let subpools_num = cfg.sanitize();
440 let mut local_pool = StaticMemoryPool {
441 pool_cfg: cfg,
442 pool: Vec::with_capacity(subpools_num),
443 sizes_lists: Vec::with_capacity(subpools_num),
444 };
445 for &(num_elems, elem_size) in local_pool.pool_cfg.cfg.iter() {
446 let next_pool_len = elem_size * num_elems as usize;
447 local_pool.pool.push(vec![0; next_pool_len]);
448 let next_sizes_list_len = num_elems as usize;
449 local_pool
450 .sizes_lists
451 .push(vec![STORE_FREE; next_sizes_list_len]);
452 }
453 local_pool
454 }
455
456 fn addr_check(&self, addr: &StaticPoolAddr) -> Result<usize, StoreError> {
457 self.validate_addr(addr)?;
458 let pool_idx = addr.pool_idx as usize;
459 let size_list = self.sizes_lists.get(pool_idx).unwrap();
460 let curr_size = size_list[addr.packet_idx as usize];
461 if curr_size == STORE_FREE {
462 return Err(StoreError::DataDoesNotExist(StoreAddr::from(*addr)));
463 }
464 Ok(curr_size)
465 }
466
467 fn validate_addr(&self, addr: &StaticPoolAddr) -> Result<(), StoreError> {
468 let pool_idx = addr.pool_idx as usize;
469 if pool_idx >= self.pool_cfg.cfg.len() {
470 return Err(StoreError::InvalidStoreId(
471 StoreIdError::InvalidSubpool(addr.pool_idx),
472 Some(StoreAddr::from(*addr)),
473 ));
474 }
475 if addr.packet_idx >= self.pool_cfg.cfg[addr.pool_idx as usize].0 {
476 return Err(StoreError::InvalidStoreId(
477 StoreIdError::InvalidPacketIdx(addr.packet_idx),
478 Some(StoreAddr::from(*addr)),
479 ));
480 }
481 Ok(())
482 }
483
484 fn reserve(&mut self, data_len: usize) -> Result<StaticPoolAddr, StoreError> {
485 let mut subpool_idx = self.find_subpool(data_len, 0)?;
486
487 if self.pool_cfg.spill_to_higher_subpools {
488 while let Err(StoreError::StoreFull(_)) = self.find_empty(subpool_idx) {
489 if (subpool_idx + 1) as usize == self.sizes_lists.len() {
490 return Err(StoreError::StoreFull(subpool_idx));
491 }
492 subpool_idx += 1;
493 }
494 }
495
496 let (slot, size_slot_ref) = self.find_empty(subpool_idx)?;
497 *size_slot_ref = data_len;
498 Ok(StaticPoolAddr {
499 pool_idx: subpool_idx,
500 packet_idx: slot,
501 })
502 }
503
504 fn find_subpool(&self, req_size: usize, start_at_subpool: u16) -> Result<u16, StoreError> {
505 for (i, &(_, elem_size)) in self.pool_cfg.cfg.iter().enumerate() {
506 if i < start_at_subpool as usize {
507 continue;
508 }
509 if elem_size >= req_size {
510 return Ok(i as u16);
511 }
512 }
513 Err(StoreError::DataTooLarge(req_size))
514 }
515
516 fn write(&mut self, addr: &StaticPoolAddr, data: &[u8]) -> Result<(), StoreError> {
517 let packet_pos = self.raw_pos(addr).ok_or(StoreError::InternalError(0))?;
518 let subpool = self
519 .pool
520 .get_mut(addr.pool_idx as usize)
521 .ok_or(StoreError::InternalError(1))?;
522 let pool_slice = &mut subpool[packet_pos..packet_pos + data.len()];
523 pool_slice.copy_from_slice(data);
524 Ok(())
525 }
526
527 fn find_empty(&mut self, subpool: u16) -> Result<(u16, &mut usize), StoreError> {
528 if let Some(size_list) = self.sizes_lists.get_mut(subpool as usize) {
529 for (i, elem_size) in size_list.iter_mut().enumerate() {
530 if *elem_size == STORE_FREE {
531 return Ok((i as u16, elem_size));
532 }
533 }
534 } else {
535 return Err(StoreError::InvalidStoreId(
536 StoreIdError::InvalidSubpool(subpool),
537 None,
538 ));
539 }
540 Err(StoreError::StoreFull(subpool))
541 }
542
543 fn raw_pos(&self, addr: &StaticPoolAddr) -> Option<usize> {
544 let (_, size) = self.pool_cfg.cfg.get(addr.pool_idx as usize)?;
545 Some(addr.packet_idx as usize * size)
546 }
547 }
548
549 impl PoolProvider for StaticMemoryPool {
550 fn add(&mut self, data: &[u8]) -> Result<StoreAddr, StoreError> {
551 let data_len = data.len();
552 if data_len > POOL_MAX_SIZE {
553 return Err(StoreError::DataTooLarge(data_len));
554 }
555 let addr = self.reserve(data_len)?;
556 self.write(&addr, data)?;
557 Ok(addr.into())
558 }
559
560 fn free_element<W: FnMut(&mut [u8])>(
561 &mut self,
562 len: usize,
563 mut writer: W,
564 ) -> Result<StoreAddr, StoreError> {
565 if len > POOL_MAX_SIZE {
566 return Err(StoreError::DataTooLarge(len));
567 }
568 let addr = self.reserve(len)?;
569 let raw_pos = self.raw_pos(&addr).unwrap();
570 let block =
571 &mut self.pool.get_mut(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + len];
572 writer(block);
573 Ok(addr.into())
574 }
575
576 fn modify<U: FnMut(&mut [u8])>(
577 &mut self,
578 addr: &StoreAddr,
579 mut updater: U,
580 ) -> Result<(), StoreError> {
581 let addr = StaticPoolAddr::from(*addr);
582 let curr_size = self.addr_check(&addr)?;
583 let raw_pos = self.raw_pos(&addr).unwrap();
584 let block = &mut self.pool.get_mut(addr.pool_idx as usize).unwrap()
585 [raw_pos..raw_pos + curr_size];
586 updater(block);
587 Ok(())
588 }
589
590 fn read(&self, addr: &StoreAddr, buf: &mut [u8]) -> Result<usize, StoreError> {
591 let addr = StaticPoolAddr::from(*addr);
592 let curr_size = self.addr_check(&addr)?;
593 if buf.len() < curr_size {
594 return Err(ByteConversionError::ToSliceTooSmall {
595 found: buf.len(),
596 expected: curr_size,
597 }
598 .into());
599 }
600 let raw_pos = self.raw_pos(&addr).unwrap();
601 let block =
602 &self.pool.get(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + curr_size];
603 buf[..curr_size].copy_from_slice(block);
605 Ok(curr_size)
606 }
607
608 fn delete(&mut self, addr: StoreAddr) -> Result<(), StoreError> {
609 let addr = StaticPoolAddr::from(addr);
610 self.addr_check(&addr)?;
611 let block_size = self.pool_cfg.cfg.get(addr.pool_idx as usize).unwrap().1;
612 let raw_pos = self.raw_pos(&addr).unwrap();
613 let block = &mut self.pool.get_mut(addr.pool_idx as usize).unwrap()
614 [raw_pos..raw_pos + block_size];
615 let size_list = self.sizes_lists.get_mut(addr.pool_idx as usize).unwrap();
616 size_list[addr.packet_idx as usize] = STORE_FREE;
617 block.fill(0);
618 Ok(())
619 }
620
621 fn has_element_at(&self, addr: &StoreAddr) -> Result<bool, StoreError> {
622 let addr = StaticPoolAddr::from(*addr);
623 self.validate_addr(&addr)?;
624 let pool_idx = addr.pool_idx as usize;
625 let size_list = self.sizes_lists.get(pool_idx).unwrap();
626 let curr_size = size_list[addr.packet_idx as usize];
627 if curr_size == STORE_FREE {
628 return Ok(false);
629 }
630 Ok(true)
631 }
632
633 fn len_of_data(&self, addr: &StoreAddr) -> Result<usize, StoreError> {
634 let addr = StaticPoolAddr::from(*addr);
635 self.validate_addr(&addr)?;
636 let pool_idx = addr.pool_idx as usize;
637 let size_list = self.sizes_lists.get(pool_idx).unwrap();
638 let size = size_list[addr.packet_idx as usize];
639 Ok(match size {
640 STORE_FREE => 0,
641 _ => size,
642 })
643 }
644 }
645
646 impl PoolProviderWithGuards for StaticMemoryPool {
647 fn modify_with_guard(&mut self, addr: StoreAddr) -> PoolRwGuard<Self> {
648 PoolRwGuard::new(self, addr)
649 }
650
651 fn read_with_guard(&mut self, addr: StoreAddr) -> PoolGuard<Self> {
652 PoolGuard::new(self, addr)
653 }
654 }
655}
656
657#[cfg(test)]
658mod tests {
659 use crate::pool::{
660 PoolGuard, PoolProvider, PoolProviderWithGuards, PoolRwGuard, StaticMemoryPool,
661 StaticPoolAddr, StaticPoolConfig, StoreError, StoreIdError, POOL_MAX_SIZE,
662 };
663 use std::vec;
664
665 fn basic_small_pool() -> StaticMemoryPool {
666 let pool_cfg = StaticPoolConfig::new(vec![(4, 4), (2, 8), (1, 16)], false);
668 StaticMemoryPool::new(pool_cfg)
669 }
670
671 #[test]
672 fn test_cfg() {
673 let mut pool_cfg = StaticPoolConfig::new(vec![(0, 0), (1, 0), (2, POOL_MAX_SIZE)], false);
675 pool_cfg.sanitize();
676 assert_eq!(*pool_cfg.cfg(), vec![(1, 0)]);
677 pool_cfg = StaticPoolConfig::new(vec![(16, 6), (32, 3), (8, 12)], false);
679 pool_cfg.sanitize();
680 assert_eq!(*pool_cfg.cfg(), vec![(32, 3), (16, 6), (8, 12)]);
681 pool_cfg = StaticPoolConfig::new(vec![(12, 12), (14, 16), (10, 12)], false);
683 pool_cfg.sanitize();
684 assert!(
685 *pool_cfg.cfg() == vec![(12, 12), (10, 12), (14, 16)]
686 || *pool_cfg.cfg() == vec![(10, 12), (12, 12), (14, 16)]
687 );
688 }
689
690 #[test]
691 fn test_add_and_read() {
692 let mut local_pool = basic_small_pool();
693 let mut test_buf: [u8; 16] = [0; 16];
694 for (i, val) in test_buf.iter_mut().enumerate() {
695 *val = i as u8;
696 }
697 let mut other_buf: [u8; 16] = [0; 16];
698 let addr = local_pool.add(&test_buf).expect("Adding data failed");
699 let res = local_pool.read(&addr, &mut other_buf);
701 assert!(res.is_ok());
702 let read_len = res.unwrap();
703 assert_eq!(read_len, 16);
704 for (i, &val) in other_buf.iter().enumerate() {
705 assert_eq!(val, i as u8);
706 }
707 }
708
709 #[test]
710 fn test_add_smaller_than_full_slot() {
711 let mut local_pool = basic_small_pool();
712 let test_buf: [u8; 12] = [0; 12];
713 let addr = local_pool.add(&test_buf).expect("Adding data failed");
714 let res = local_pool
715 .read(&addr, &mut [0; 12])
716 .expect("Read back failed");
717 assert_eq!(res, 12);
718 }
719
720 #[test]
721 fn test_delete() {
722 let mut local_pool = basic_small_pool();
723 let test_buf: [u8; 16] = [0; 16];
724 let addr = local_pool.add(&test_buf).expect("Adding data failed");
725 let res = local_pool.delete(addr);
727 assert!(res.is_ok());
728 let mut writer = |buf: &mut [u8]| {
729 assert_eq!(buf.len(), 12);
730 };
731 let res = local_pool.free_element(12, &mut writer);
733 assert!(res.is_ok());
734 let addr = res.unwrap();
735 assert_eq!(
736 addr,
737 u64::from(StaticPoolAddr {
738 pool_idx: 2,
739 packet_idx: 0
740 })
741 );
742 }
743
744 #[test]
745 fn test_modify() {
746 let mut local_pool = basic_small_pool();
747 let mut test_buf: [u8; 16] = [0; 16];
748 for (i, val) in test_buf.iter_mut().enumerate() {
749 *val = i as u8;
750 }
751 let addr = local_pool.add(&test_buf).expect("Adding data failed");
752
753 {
754 local_pool
756 .modify(&addr, &mut |buf: &mut [u8]| {
757 buf[0] = 0;
758 buf[1] = 0x42;
759 })
760 .expect("Modifying data failed");
761 }
762
763 local_pool
764 .read(&addr, &mut test_buf)
765 .expect("Reading back data failed");
766 assert_eq!(test_buf[0], 0);
767 assert_eq!(test_buf[1], 0x42);
768 assert_eq!(test_buf[2], 2);
769 assert_eq!(test_buf[3], 3);
770 }
771
772 #[test]
773 fn test_consecutive_reservation() {
774 let mut local_pool = basic_small_pool();
775 let res = local_pool.free_element(8, |_| {});
777 assert!(res.is_ok());
778 let addr0 = res.unwrap();
779 let res = local_pool.free_element(8, |_| {});
780 assert!(res.is_ok());
781 let addr1 = res.unwrap();
782 let res = local_pool.free_element(8, |_| {});
783 assert!(res.is_err());
784 let err = res.unwrap_err();
785 assert_eq!(err, StoreError::StoreFull(1));
786
787 assert!(local_pool.delete(addr0).is_ok());
789 assert!(local_pool.delete(addr1).is_ok());
790 }
791
792 #[test]
793 fn test_read_does_not_exist() {
794 let local_pool = basic_small_pool();
795 let res = local_pool.read(
797 &StaticPoolAddr {
798 packet_idx: 0,
799 pool_idx: 0,
800 }
801 .into(),
802 &mut [],
803 );
804 assert!(res.is_err());
805 assert!(matches!(
806 res.unwrap_err(),
807 StoreError::DataDoesNotExist { .. }
808 ));
809 }
810
811 #[test]
812 fn test_store_full() {
813 let mut local_pool = basic_small_pool();
814 let test_buf: [u8; 16] = [0; 16];
815 assert!(local_pool.add(&test_buf).is_ok());
816 let res = local_pool.add(&test_buf);
818 assert!(res.is_err());
819 let err = res.unwrap_err();
820 assert!(matches!(err, StoreError::StoreFull { .. }));
821 if let StoreError::StoreFull(subpool) = err {
822 assert_eq!(subpool, 2);
823 }
824 }
825
826 #[test]
827 fn test_invalid_pool_idx() {
828 let local_pool = basic_small_pool();
829 let addr = StaticPoolAddr {
830 pool_idx: 3,
831 packet_idx: 0,
832 }
833 .into();
834 let res = local_pool.read(&addr, &mut []);
835 assert!(res.is_err());
836 let err = res.unwrap_err();
837 assert!(matches!(
838 err,
839 StoreError::InvalidStoreId(StoreIdError::InvalidSubpool(3), Some(_))
840 ));
841 }
842
843 #[test]
844 fn test_invalid_packet_idx() {
845 let local_pool = basic_small_pool();
846 let addr = StaticPoolAddr {
847 pool_idx: 2,
848 packet_idx: 1,
849 };
850 assert_eq!(addr.raw(), 0x00020001);
851 let res = local_pool.read(&addr.into(), &mut []);
852 assert!(res.is_err());
853 let err = res.unwrap_err();
854 assert!(matches!(
855 err,
856 StoreError::InvalidStoreId(StoreIdError::InvalidPacketIdx(1), Some(_))
857 ));
858 }
859
860 #[test]
861 fn test_add_too_large() {
862 let mut local_pool = basic_small_pool();
863 let data_too_large = [0; 20];
864 let res = local_pool.add(&data_too_large);
865 assert!(res.is_err());
866 let err = res.unwrap_err();
867 assert_eq!(err, StoreError::DataTooLarge(20));
868 }
869
870 #[test]
871 fn test_data_too_large_1() {
872 let mut local_pool = basic_small_pool();
873 let res = local_pool.free_element(POOL_MAX_SIZE + 1, |_| {});
874 assert!(res.is_err());
875 assert_eq!(
876 res.unwrap_err(),
877 StoreError::DataTooLarge(POOL_MAX_SIZE + 1)
878 );
879 }
880
881 #[test]
882 fn test_free_element_too_large() {
883 let mut local_pool = basic_small_pool();
884 let res = local_pool.free_element(20, |_| {});
886 assert!(res.is_err());
887 assert_eq!(res.unwrap_err(), StoreError::DataTooLarge(20));
888 }
889
890 #[test]
891 fn test_pool_guard_deletion_man_creation() {
892 let mut local_pool = basic_small_pool();
893 let test_buf: [u8; 16] = [0; 16];
894 let addr = local_pool.add(&test_buf).expect("Adding data failed");
895 let read_guard = PoolGuard::new(&mut local_pool, addr);
896 drop(read_guard);
897 assert!(!local_pool.has_element_at(&addr).expect("Invalid address"));
898 }
899
900 #[test]
901 fn test_pool_guard_deletion() {
902 let mut local_pool = basic_small_pool();
903 let test_buf: [u8; 16] = [0; 16];
904 let addr = local_pool.add(&test_buf).expect("Adding data failed");
905 let read_guard = local_pool.read_with_guard(addr);
906 drop(read_guard);
907 assert!(!local_pool.has_element_at(&addr).expect("Invalid address"));
908 }
909
910 #[test]
911 fn test_pool_guard_with_release() {
912 let mut local_pool = basic_small_pool();
913 let test_buf: [u8; 16] = [0; 16];
914 let addr = local_pool.add(&test_buf).expect("Adding data failed");
915 let mut read_guard = PoolGuard::new(&mut local_pool, addr);
916 read_guard.release();
917 drop(read_guard);
918 assert!(local_pool.has_element_at(&addr).expect("Invalid address"));
919 }
920
921 #[test]
922 fn test_pool_modify_guard_man_creation() {
923 let mut local_pool = basic_small_pool();
924 let test_buf: [u8; 16] = [0; 16];
925 let addr = local_pool.add(&test_buf).expect("Adding data failed");
926 let mut rw_guard = PoolRwGuard::new(&mut local_pool, addr);
927 rw_guard.update(&mut |_| {}).expect("modify failed");
928 drop(rw_guard);
929 assert!(!local_pool.has_element_at(&addr).expect("Invalid address"));
930 }
931
932 #[test]
933 fn test_pool_modify_guard() {
934 let mut local_pool = basic_small_pool();
935 let test_buf: [u8; 16] = [0; 16];
936 let addr = local_pool.add(&test_buf).expect("Adding data failed");
937 let mut rw_guard = local_pool.modify_with_guard(addr);
938 rw_guard.update(&mut |_| {}).expect("modify failed");
939 drop(rw_guard);
940 assert!(!local_pool.has_element_at(&addr).expect("Invalid address"));
941 }
942
943 #[test]
944 fn modify_pool_index_above_0() {
945 let mut local_pool = basic_small_pool();
946 let test_buf_0: [u8; 4] = [1; 4];
947 let test_buf_1: [u8; 4] = [2; 4];
948 let test_buf_2: [u8; 4] = [3; 4];
949 let test_buf_3: [u8; 4] = [4; 4];
950 let addr0 = local_pool.add(&test_buf_0).expect("Adding data failed");
951 let addr1 = local_pool.add(&test_buf_1).expect("Adding data failed");
952 let addr2 = local_pool.add(&test_buf_2).expect("Adding data failed");
953 let addr3 = local_pool.add(&test_buf_3).expect("Adding data failed");
954 local_pool
955 .modify(&addr0, |buf| {
956 assert_eq!(buf, test_buf_0);
957 })
958 .expect("Modifying data failed");
959 local_pool
960 .modify(&addr1, |buf| {
961 assert_eq!(buf, test_buf_1);
962 })
963 .expect("Modifying data failed");
964 local_pool
965 .modify(&addr2, |buf| {
966 assert_eq!(buf, test_buf_2);
967 })
968 .expect("Modifying data failed");
969 local_pool
970 .modify(&addr3, |buf| {
971 assert_eq!(buf, test_buf_3);
972 })
973 .expect("Modifying data failed");
974 }
975
976 #[test]
977 fn test_spills_to_higher_subpools() {
978 let pool_cfg = StaticPoolConfig::new(vec![(2, 8), (2, 16)], true);
979 let mut local_pool = StaticMemoryPool::new(pool_cfg);
980 local_pool.free_element(8, |_| {}).unwrap();
981 local_pool.free_element(8, |_| {}).unwrap();
982 let mut in_larger_subpool_now = local_pool.free_element(8, |_| {});
983 assert!(in_larger_subpool_now.is_ok());
984 let generic_addr = in_larger_subpool_now.unwrap();
985 let pool_addr = StaticPoolAddr::from(generic_addr);
986 assert_eq!(pool_addr.pool_idx, 1);
987 assert_eq!(pool_addr.packet_idx, 0);
988 assert!(local_pool.has_element_at(&generic_addr).unwrap());
989 in_larger_subpool_now = local_pool.free_element(8, |_| {});
990 assert!(in_larger_subpool_now.is_ok());
991 let generic_addr = in_larger_subpool_now.unwrap();
992 let pool_addr = StaticPoolAddr::from(generic_addr);
993 assert_eq!(pool_addr.pool_idx, 1);
994 assert_eq!(pool_addr.packet_idx, 1);
995 assert!(local_pool.has_element_at(&generic_addr).unwrap());
996 }
997
998 #[test]
999 fn test_spillage_fails_as_well() {
1000 let pool_cfg = StaticPoolConfig::new(vec![(1, 8), (1, 16)], true);
1001 let mut local_pool = StaticMemoryPool::new(pool_cfg);
1002 local_pool.free_element(8, |_| {}).unwrap();
1003 local_pool.free_element(8, |_| {}).unwrap();
1004 let should_fail = local_pool.free_element(8, |_| {});
1005 assert!(should_fail.is_err());
1006 if let Err(err) = should_fail {
1007 assert_eq!(err, StoreError::StoreFull(1));
1008 } else {
1009 panic!("unexpected store address");
1010 }
1011 }
1012
1013 #[test]
1014 fn test_spillage_works_across_multiple_subpools() {
1015 let pool_cfg = StaticPoolConfig::new(vec![(1, 8), (1, 12), (1, 16)], true);
1016 let mut local_pool = StaticMemoryPool::new(pool_cfg);
1017 local_pool.free_element(8, |_| {}).unwrap();
1018 local_pool.free_element(12, |_| {}).unwrap();
1019 let in_larger_subpool_now = local_pool.free_element(8, |_| {});
1020 assert!(in_larger_subpool_now.is_ok());
1021 let generic_addr = in_larger_subpool_now.unwrap();
1022 let pool_addr = StaticPoolAddr::from(generic_addr);
1023 assert_eq!(pool_addr.pool_idx, 2);
1024 assert_eq!(pool_addr.packet_idx, 0);
1025 assert!(local_pool.has_element_at(&generic_addr).unwrap());
1026 }
1027
1028 #[test]
1029 fn test_spillage_fails_across_multiple_subpools() {
1030 let pool_cfg = StaticPoolConfig::new(vec![(1, 8), (1, 12), (1, 16)], true);
1031 let mut local_pool = StaticMemoryPool::new(pool_cfg);
1032 local_pool.free_element(8, |_| {}).unwrap();
1033 local_pool.free_element(12, |_| {}).unwrap();
1034 local_pool.free_element(16, |_| {}).unwrap();
1035 let should_fail = local_pool.free_element(8, |_| {});
1036 assert!(should_fail.is_err());
1037 if let Err(err) = should_fail {
1038 assert_eq!(err, StoreError::StoreFull(2));
1039 } else {
1040 panic!("unexpected store address");
1041 }
1042 }
1043}