1use super::{
2 MemStore, MummyItem, MummyObj, Obj, ObjPtr, ObjRef, ShaleError, ShaleStore,
3};
4use std::cell::UnsafeCell;
5use std::io::{Cursor, Write};
6use std::rc::Rc;
7
8pub struct CompactHeader {
9 payload_size: u64,
10 is_freed: bool,
11 desc_addr: ObjPtr<CompactDescriptor>,
12}
13
14impl CompactHeader {
15 #![allow(dead_code)]
16
17 pub const MSIZE: u64 = 17;
18 pub fn is_freed(&self) -> bool {
19 self.is_freed
20 }
21
22 pub fn payload_size(&self) -> u64 {
23 self.payload_size
24 }
25}
26
27impl MummyItem for CompactHeader {
28 fn hydrate(addr: u64, mem: &dyn MemStore) -> Result<Self, ShaleError> {
29 let raw = mem
30 .get_view(addr, Self::MSIZE)
31 .ok_or(ShaleError::LinearMemStoreError)?;
32 let payload_size = u64::from_le_bytes(raw[..8].try_into().unwrap());
33 let is_freed = if raw[8] == 0 { false } else { true };
34 let desc_addr = u64::from_le_bytes(raw[9..17].try_into().unwrap());
35 Ok(Self {
36 payload_size,
37 is_freed,
38 desc_addr: ObjPtr::new(desc_addr),
39 })
40 }
41
42 fn dehydrated_len(&self) -> u64 {
43 Self::MSIZE
44 }
45
46 fn dehydrate(&self, to: &mut [u8]) {
47 let mut cur = Cursor::new(to);
48 cur.write_all(&self.payload_size.to_le_bytes()).unwrap();
49 cur.write_all(&[if self.is_freed { 1 } else { 0 }]).unwrap();
50 cur.write_all(&self.desc_addr.addr().to_le_bytes()).unwrap();
51 }
52}
53
54struct CompactFooter {
55 payload_size: u64,
56}
57
58impl CompactFooter {
59 const MSIZE: u64 = 8;
60}
61
62impl MummyItem for CompactFooter {
63 fn hydrate(addr: u64, mem: &dyn MemStore) -> Result<Self, ShaleError> {
64 let raw = mem
65 .get_view(addr, Self::MSIZE)
66 .ok_or(ShaleError::LinearMemStoreError)?;
67 let payload_size = u64::from_le_bytes(raw.deref().try_into().unwrap());
68 Ok(Self { payload_size })
69 }
70
71 fn dehydrated_len(&self) -> u64 {
72 Self::MSIZE
73 }
74
75 fn dehydrate(&self, to: &mut [u8]) {
76 Cursor::new(to)
77 .write_all(&self.payload_size.to_le_bytes())
78 .unwrap();
79 }
80}
81
82#[derive(Clone, Copy)]
83struct CompactDescriptor {
84 payload_size: u64,
85 haddr: u64, }
87
88impl CompactDescriptor {
89 const MSIZE: u64 = 16;
90}
91
92impl MummyItem for CompactDescriptor {
93 fn hydrate(addr: u64, mem: &dyn MemStore) -> Result<Self, ShaleError> {
94 let raw = mem
95 .get_view(addr, Self::MSIZE)
96 .ok_or(ShaleError::LinearMemStoreError)?;
97 let payload_size = u64::from_le_bytes(raw[..8].try_into().unwrap());
98 let haddr = u64::from_le_bytes(raw[8..].try_into().unwrap());
99 Ok(Self {
100 payload_size,
101 haddr,
102 })
103 }
104
105 fn dehydrated_len(&self) -> u64 {
106 Self::MSIZE
107 }
108
109 fn dehydrate(&self, to: &mut [u8]) {
110 let mut cur = Cursor::new(to);
111 cur.write_all(&self.payload_size.to_le_bytes()).unwrap();
112 cur.write_all(&self.haddr.to_le_bytes()).unwrap();
113 }
114}
115
116pub struct CompactSpaceHeader {
117 meta_space_tail: u64,
118 compact_space_tail: u64,
119 base_addr: ObjPtr<CompactDescriptor>,
120 alloc_addr: ObjPtr<CompactDescriptor>,
121}
122
123struct CompactSpaceHeaderSliced {
124 meta_space_tail: Obj<U64Field>,
125 compact_space_tail: Obj<U64Field>,
126 base_addr: Obj<ObjPtr<CompactDescriptor>>,
127 alloc_addr: Obj<ObjPtr<CompactDescriptor>>,
128}
129
130impl CompactSpaceHeaderSliced {
131 fn flush_dirty(&mut self) {
132 self.meta_space_tail.flush_dirty();
133 self.compact_space_tail.flush_dirty();
134 self.base_addr.flush_dirty();
135 self.alloc_addr.flush_dirty();
136 }
137}
138
139impl CompactSpaceHeader {
140 pub const MSIZE: u64 = 32;
141
142 pub fn new(meta_base: u64, compact_base: u64) -> Self {
143 unsafe {
144 Self {
145 meta_space_tail: meta_base,
146 compact_space_tail: compact_base,
147 base_addr: ObjPtr::new_from_addr(meta_base),
148 alloc_addr: ObjPtr::new_from_addr(meta_base),
149 }
150 }
151 }
152
153 fn into_fields(
154 r: Obj<Self>,
155 ) -> Result<CompactSpaceHeaderSliced, ShaleError> {
156 unsafe {
157 Ok(CompactSpaceHeaderSliced {
158 meta_space_tail: MummyObj::slice(
159 &r,
160 0,
161 8,
162 U64Field(r.meta_space_tail),
163 )?,
164 compact_space_tail: MummyObj::slice(
165 &r,
166 8,
167 8,
168 U64Field(r.compact_space_tail),
169 )?,
170 base_addr: MummyObj::slice(&r, 16, 8, r.base_addr)?,
171 alloc_addr: MummyObj::slice(&r, 24, 8, r.alloc_addr)?,
172 })
173 }
174 }
175}
176
177impl MummyItem for CompactSpaceHeader {
178 fn hydrate(addr: u64, mem: &dyn MemStore) -> Result<Self, ShaleError> {
179 let raw = mem
180 .get_view(addr, Self::MSIZE)
181 .ok_or(ShaleError::LinearMemStoreError)?;
182 let meta_space_tail = u64::from_le_bytes(raw[..8].try_into().unwrap());
183 let compact_space_tail =
184 u64::from_le_bytes(raw[8..16].try_into().unwrap());
185 let base_addr = u64::from_le_bytes(raw[16..24].try_into().unwrap());
186 let alloc_addr = u64::from_le_bytes(raw[24..].try_into().unwrap());
187 unsafe {
188 Ok(Self {
189 meta_space_tail,
190 compact_space_tail,
191 base_addr: ObjPtr::new_from_addr(base_addr),
192 alloc_addr: ObjPtr::new_from_addr(alloc_addr),
193 })
194 }
195 }
196
197 fn dehydrated_len(&self) -> u64 {
198 Self::MSIZE
199 }
200
201 fn dehydrate(&self, to: &mut [u8]) {
202 let mut cur = Cursor::new(to);
203 cur.write_all(&self.meta_space_tail.to_le_bytes()).unwrap();
204 cur.write_all(&self.compact_space_tail.to_le_bytes())
205 .unwrap();
206 cur.write_all(&self.base_addr.addr().to_le_bytes()).unwrap();
207 cur.write_all(&self.alloc_addr.addr().to_le_bytes())
208 .unwrap();
209 }
210}
211
212struct ObjPtrField<T>(ObjPtr<T>);
213
214impl<T> ObjPtrField<T> {
215 const MSIZE: u64 = 8;
216}
217
218impl<T> MummyItem for ObjPtrField<T> {
219 fn hydrate(addr: u64, mem: &dyn MemStore) -> Result<Self, ShaleError> {
220 let raw = mem
221 .get_view(addr, Self::MSIZE)
222 .ok_or(ShaleError::LinearMemStoreError)?;
223 unsafe {
224 Ok(Self(ObjPtr::new_from_addr(u64::from_le_bytes(
225 raw.deref().try_into().unwrap(),
226 ))))
227 }
228 }
229
230 fn dehydrate(&self, to: &mut [u8]) {
231 Cursor::new(to)
232 .write_all(&self.0.addr().to_le_bytes())
233 .unwrap()
234 }
235
236 fn dehydrated_len(&self) -> u64 {
237 Self::MSIZE
238 }
239}
240
241impl<T> std::ops::Deref for ObjPtrField<T> {
242 type Target = ObjPtr<T>;
243 fn deref(&self) -> &ObjPtr<T> {
244 &self.0
245 }
246}
247
248impl<T> std::ops::DerefMut for ObjPtrField<T> {
249 fn deref_mut(&mut self) -> &mut ObjPtr<T> {
250 &mut self.0
251 }
252}
253
254struct U64Field(u64);
255
256impl U64Field {
257 const MSIZE: u64 = 8;
258}
259
260impl MummyItem for U64Field {
261 fn hydrate(addr: u64, mem: &dyn MemStore) -> Result<Self, ShaleError> {
262 let raw = mem
263 .get_view(addr, Self::MSIZE)
264 .ok_or(ShaleError::LinearMemStoreError)?;
265 Ok(Self(u64::from_le_bytes(raw.deref().try_into().unwrap())))
266 }
267
268 fn dehydrated_len(&self) -> u64 {
269 Self::MSIZE
270 }
271
272 fn dehydrate(&self, to: &mut [u8]) {
273 Cursor::new(to).write_all(&self.0.to_le_bytes()).unwrap()
274 }
275}
276
277impl std::ops::Deref for U64Field {
278 type Target = u64;
279 fn deref(&self) -> &u64 {
280 &self.0
281 }
282}
283
284impl std::ops::DerefMut for U64Field {
285 fn deref_mut(&mut self) -> &mut u64 {
286 &mut self.0
287 }
288}
289
290struct CompactSpaceInner<T: MummyItem> {
291 meta_space: Rc<dyn MemStore>,
292 compact_space: Rc<dyn MemStore>,
293 header: CompactSpaceHeaderSliced,
294 obj_cache: super::ObjCache<T>,
295 alloc_max_walk: u64,
296 regn_nbit: u64,
297}
298
299impl<T: MummyItem> CompactSpaceInner<T> {
300 fn get_descriptor(
301 &self, ptr: ObjPtr<CompactDescriptor>,
302 ) -> Result<Obj<CompactDescriptor>, ShaleError> {
303 unsafe {
304 MummyObj::ptr_to_obj(
305 self.meta_space.as_ref(),
306 ptr,
307 CompactDescriptor::MSIZE,
308 )
309 }
310 }
311
312 fn get_data_ref<U: MummyItem + 'static>(
313 &self, ptr: ObjPtr<U>, len_limit: u64,
314 ) -> Result<Obj<U>, ShaleError> {
315 unsafe {
316 MummyObj::ptr_to_obj(self.compact_space.as_ref(), ptr, len_limit)
317 }
318 }
319
320 fn get_header(
321 &self, ptr: ObjPtr<CompactHeader>,
322 ) -> Result<Obj<CompactHeader>, ShaleError> {
323 self.get_data_ref::<CompactHeader>(ptr, CompactHeader::MSIZE)
324 }
325
326 fn get_footer(
327 &self, ptr: ObjPtr<CompactFooter>,
328 ) -> Result<Obj<CompactFooter>, ShaleError> {
329 self.get_data_ref::<CompactFooter>(ptr, CompactFooter::MSIZE)
330 }
331
332 fn del_desc(
333 &mut self, desc_addr: ObjPtr<CompactDescriptor>,
334 ) -> Result<(), ShaleError> {
335 let desc_size = CompactDescriptor::MSIZE;
336 debug_assert!(
337 (desc_addr.addr - self.header.base_addr.addr) % desc_size == 0
338 );
339 self.header.meta_space_tail.write(|r| **r -= desc_size);
340 if desc_addr.addr != **self.header.meta_space_tail {
341 let desc_last = self.get_descriptor(unsafe {
342 ObjPtr::new_from_addr(**self.header.meta_space_tail)
343 })?;
344 let mut desc = self.get_descriptor(unsafe {
345 ObjPtr::new_from_addr(desc_addr.addr)
346 })?;
347 desc.write(|r| *r = *desc_last);
348 let mut header = self.get_header(ObjPtr::new(desc.haddr))?;
349 header.write(|h| h.desc_addr = desc_addr);
350 }
351 Ok(())
352 }
353
354 fn new_desc(&mut self) -> Result<ObjPtr<CompactDescriptor>, ShaleError> {
355 let addr = **self.header.meta_space_tail;
356 self.header
357 .meta_space_tail
358 .write(|r| **r += CompactDescriptor::MSIZE);
359 Ok(unsafe { ObjPtr::new_from_addr(addr) })
360 }
361
362 fn free(&mut self, addr: u64) -> Result<(), ShaleError> {
363 let hsize = CompactHeader::MSIZE;
364 let fsize = CompactFooter::MSIZE;
365 let regn_size = 1 << self.regn_nbit;
366
367 let mut offset = addr - hsize;
368 let header_payload_size = {
369 let header = self.get_header(ObjPtr::new(offset))?;
370 assert!(!header.is_freed);
371 header.payload_size
372 };
373 let mut h = offset;
374 let mut payload_size = header_payload_size;
375
376 if offset & (regn_size - 1) > 0 {
377 offset -= fsize;
379 let (pheader_is_freed, pheader_payload_size, pheader_desc_addr) = {
380 let pfooter = self.get_footer(ObjPtr::new(offset))?;
381 offset -= pfooter.payload_size + hsize;
382 let pheader = self.get_header(ObjPtr::new(offset))?;
383 (pheader.is_freed, pheader.payload_size, pheader.desc_addr)
384 };
385 if pheader_is_freed {
386 h = offset;
387 payload_size += hsize + fsize + pheader_payload_size;
388 self.del_desc(pheader_desc_addr)?;
389 }
390 }
391
392 offset = addr + header_payload_size;
393 let mut f = offset;
394
395 if offset + fsize < **self.header.compact_space_tail &&
396 (regn_size - (offset & (regn_size - 1))) >= fsize + hsize
397 {
398 offset += fsize;
400 let (nheader_is_freed, nheader_payload_size, nheader_desc_addr) = {
401 let nheader = self.get_header(ObjPtr::new(offset))?;
402 (nheader.is_freed, nheader.payload_size, nheader.desc_addr)
403 };
404 if nheader_is_freed {
405 offset += hsize + nheader_payload_size;
406 f = offset;
407 {
408 let nfooter = self.get_footer(ObjPtr::new(offset))?;
409 assert!(nheader_payload_size == nfooter.payload_size);
410 }
411 payload_size += hsize + fsize + nheader_payload_size;
412 self.del_desc(nheader_desc_addr)?;
413 }
414 }
415
416 let desc_addr = self.new_desc()?;
417 {
418 let mut desc = self.get_descriptor(desc_addr)?;
419 desc.write(|d| {
420 d.payload_size = payload_size;
421 d.haddr = h;
422 });
423 }
424 let mut h = self.get_header(ObjPtr::new(h))?;
425 let mut f = self.get_footer(ObjPtr::new(f))?;
426 h.write(|h| {
427 h.payload_size = payload_size;
428 h.is_freed = true;
429 h.desc_addr = desc_addr;
430 });
431 f.write(|f| f.payload_size = payload_size);
432 Ok(())
433 }
434
435 fn alloc_from_freed(
436 &mut self, length: u64,
437 ) -> Result<Option<u64>, ShaleError> {
438 let tail = **self.header.meta_space_tail;
439 if tail == self.header.base_addr.addr {
440 return Ok(None)
441 }
442
443 let hsize = CompactHeader::MSIZE;
444 let fsize = CompactFooter::MSIZE;
445 let dsize = CompactDescriptor::MSIZE;
446
447 let mut old_alloc_addr = *self.header.alloc_addr;
448
449 if old_alloc_addr.addr >= tail {
450 old_alloc_addr = *self.header.base_addr;
451 }
452
453 let mut ptr = old_alloc_addr;
454 let mut res: Option<u64> = None;
455 for _ in 0..self.alloc_max_walk {
456 assert!(ptr.addr < tail);
457 let (desc_payload_size, desc_haddr) = {
458 let desc = self.get_descriptor(ptr)?;
459 (desc.payload_size, desc.haddr)
460 };
461 let exit = if desc_payload_size == length {
462 {
464 let mut header =
465 self.get_header(ObjPtr::new(desc_haddr))?;
466 assert_eq!(header.payload_size, desc_payload_size);
467 assert!(header.is_freed);
468 header.write(|h| h.is_freed = false);
469 }
470 self.del_desc(ptr)?;
471 true
472 } else if desc_payload_size > length + hsize + fsize {
473 {
475 let mut lheader =
476 self.get_header(ObjPtr::new(desc_haddr))?;
477 assert_eq!(lheader.payload_size, desc_payload_size);
478 assert!(lheader.is_freed);
479 lheader.write(|h| {
480 h.is_freed = false;
481 h.payload_size = length;
482 });
483 }
484 {
485 let mut lfooter = self
486 .get_footer(ObjPtr::new(desc_haddr + hsize + length))?;
487 lfooter.write(|f| f.payload_size = length);
489 }
490
491 let offset = desc_haddr + hsize + length + fsize;
492 let rpayload_size = desc_payload_size - length - fsize - hsize;
493 let rdesc_addr = self.new_desc()?;
494 {
495 let mut rdesc = self.get_descriptor(rdesc_addr)?;
496 rdesc.write(|rd| {
497 rd.payload_size = rpayload_size;
498 rd.haddr = offset;
499 });
500 }
501 {
502 let mut rheader = self.get_header(ObjPtr::new(offset))?;
503 rheader.write(|rh| {
504 rh.is_freed = true;
505 rh.payload_size = rpayload_size;
506 rh.desc_addr = rdesc_addr;
507 });
508 }
509 {
510 let mut rfooter = self.get_footer(ObjPtr::new(
511 offset + hsize + rpayload_size,
512 ))?;
513 rfooter.write(|f| f.payload_size = rpayload_size);
514 }
515 self.del_desc(ptr)?;
516 true
517 } else {
518 false
519 };
520 if exit {
521 self.header.alloc_addr.write(|r| *r = ptr);
522 res = Some(desc_haddr + hsize);
523 break
524 }
525 ptr.addr += dsize;
526 if ptr.addr >= tail {
527 ptr = *self.header.base_addr;
528 }
529 if ptr == old_alloc_addr {
530 break
531 }
532 }
533 Ok(res)
534 }
535
536 fn alloc_new(&mut self, length: u64) -> Result<u64, ShaleError> {
537 let regn_size = 1 << self.regn_nbit;
538 let total_length = CompactHeader::MSIZE + length + CompactFooter::MSIZE;
539 let mut offset = **self.header.compact_space_tail;
540 self.header.compact_space_tail.write(|r| {
541 let rem = regn_size - (offset & (regn_size - 1));
543 if rem < total_length {
544 offset += rem;
545 **r += rem;
546 }
547 **r += total_length
548 });
549 let mut h = self.get_header(ObjPtr::new(offset))?;
550 let mut f = self
551 .get_footer(ObjPtr::new(offset + CompactHeader::MSIZE + length))?;
552 h.write(|h| {
553 h.payload_size = length;
554 h.is_freed = false;
555 h.desc_addr = ObjPtr::new(0);
556 });
557 f.write(|f| f.payload_size = length);
558 Ok(offset + CompactHeader::MSIZE)
559 }
560}
561
562pub struct CompactSpace<T: MummyItem> {
563 inner: UnsafeCell<CompactSpaceInner<T>>,
564}
565
566impl<T: MummyItem> CompactSpace<T> {
567 pub fn new(
568 meta_space: Rc<dyn MemStore>, compact_space: Rc<dyn MemStore>,
569 header: Obj<CompactSpaceHeader>, obj_cache: super::ObjCache<T>,
570 alloc_max_walk: u64, regn_nbit: u64,
571 ) -> Result<Self, ShaleError> {
572 let cs = CompactSpace {
573 inner: UnsafeCell::new(CompactSpaceInner {
574 meta_space,
575 compact_space,
576 header: CompactSpaceHeader::into_fields(header)?,
577 obj_cache,
578 alloc_max_walk,
579 regn_nbit,
580 }),
581 };
582 Ok(cs)
583 }
584}
585
586impl<T: MummyItem + 'static> ShaleStore<T> for CompactSpace<T> {
587 fn put_item(&self, item: T, extra: u64) -> Result<ObjRef<T>, ShaleError> {
588 let size = item.dehydrated_len() + extra;
589 let inner = unsafe { &mut *self.inner.get() };
590 let ptr: ObjPtr<T> = unsafe {
591 ObjPtr::new_from_addr(
592 if let Some(addr) = inner.alloc_from_freed(size)? {
593 addr
594 } else {
595 inner.alloc_new(size)?
596 },
597 )
598 };
599 let mut u = inner.obj_cache.put(unsafe {
600 MummyObj::item_to_obj(
601 inner.compact_space.as_ref(),
602 ptr.addr(),
603 size,
604 item,
605 )?
606 });
607 u.write(|_| {}).unwrap();
608 Ok(u)
609 }
610
611 fn free_item(&mut self, ptr: ObjPtr<T>) -> Result<(), ShaleError> {
612 let inner = self.inner.get_mut();
613 inner.obj_cache.pop(ptr);
614 inner.free(ptr.addr())
615 }
616
617 fn get_item(&self, ptr: ObjPtr<T>) -> Result<ObjRef<T>, ShaleError> {
618 let inner = unsafe { &*self.inner.get() };
619 if let Some(r) = inner.obj_cache.get(ptr)? {
620 return Ok(r)
621 }
622 if ptr.addr() < CompactSpaceHeader::MSIZE {
623 return Err(ShaleError::ObjPtrInvalid)
624 }
625 let h =
626 inner.get_header(ObjPtr::new(ptr.addr() - CompactHeader::MSIZE))?;
627 Ok(inner
628 .obj_cache
629 .put(inner.get_data_ref(ptr, h.payload_size)?))
630 }
631
632 fn flush_dirty(&self) -> Option<()> {
633 let inner = unsafe { &mut *self.inner.get() };
634 inner.header.flush_dirty();
635 inner.obj_cache.flush_dirty()
636 }
637}