use super::{
aux::Aux,
cigar::{self, CigarOp},
record::{self, DecodeError},
seq,
};
use seqair_types::{BamFlags, Base, BaseQuality, Pos0};
pub struct SlimRecord {
pub pos: Pos0,
pub end_pos: Pos0,
pub flags: BamFlags,
pub n_cigar_ops: u16,
pub mapq: u8,
pub seq_len: u32,
pub matching_bases: u32,
pub indel_bases: u32,
pub tid: i32,
pub next_ref_id: i32,
pub next_pos: i32,
pub template_len: i32,
name_off: u32,
name_len: u16,
bases_off: u32,
cigar_off: u32,
qual_off: u32,
aux_off: u32,
aux_len: u32,
extras_idx: u32,
}
impl SlimRecord {
fn cigar_len(&self) -> usize {
self.n_cigar_ops as usize
}
pub fn qname<'store, U>(
&self,
store: &'store RecordStore<U>,
) -> Result<&'store [u8], RecordAccessError> {
let start = self.name_off as usize;
let end =
start.checked_add(self.name_len as usize).ok_or(RecordAccessError::OffsetOverflow {
slab: Slab::Names,
offset: self.name_off,
len: self.name_len as usize,
})?;
store.names.get(start..end).ok_or(RecordAccessError::SlabOffsetOutOfRange {
slab: Slab::Names,
offset: self.name_off,
})
}
pub fn seq<'store, U>(
&self,
store: &'store RecordStore<U>,
) -> Result<&'store [Base], RecordAccessError> {
let start = self.bases_off as usize;
let end =
start.checked_add(self.seq_len as usize).ok_or(RecordAccessError::OffsetOverflow {
slab: Slab::Bases,
offset: self.bases_off,
len: self.seq_len as usize,
})?;
store.bases.get(start..end).ok_or(RecordAccessError::SlabOffsetOutOfRange {
slab: Slab::Bases,
offset: self.bases_off,
})
}
pub fn qual<'store, U>(
&self,
store: &'store RecordStore<U>,
) -> Result<&'store [BaseQuality], RecordAccessError> {
let start = self.qual_off as usize;
let end =
start.checked_add(self.seq_len as usize).ok_or(RecordAccessError::OffsetOverflow {
slab: Slab::Qual,
offset: self.qual_off,
len: self.seq_len as usize,
})?;
let q = store.qual.get(start..end).ok_or(RecordAccessError::SlabOffsetOutOfRange {
slab: Slab::Qual,
offset: self.qual_off,
})?;
Ok(BaseQuality::slice_from_bytes(q))
}
pub fn cigar<'store, U>(
&self,
store: &'store RecordStore<U>,
) -> Result<&'store [CigarOp], RecordAccessError> {
let start = self.cigar_off as usize;
let end = start.checked_add(self.cigar_len()).ok_or(RecordAccessError::OffsetOverflow {
slab: Slab::Cigar,
offset: self.cigar_off,
len: self.cigar_len(),
})?;
store.cigar.get(start..end).ok_or(RecordAccessError::SlabOffsetOutOfRange {
slab: Slab::Cigar,
offset: self.cigar_off,
})
}
pub fn aux<'store, U>(
&self,
store: &'store RecordStore<U>,
) -> Result<Aux<'store>, RecordAccessError> {
let start = self.aux_off as usize;
let end =
start.checked_add(self.aux_len as usize).ok_or(RecordAccessError::OffsetOverflow {
slab: Slab::Aux,
offset: self.aux_off,
len: self.aux_len as usize,
})?;
let bytes = store.aux.get(start..end).ok_or(RecordAccessError::SlabOffsetOutOfRange {
slab: Slab::Aux,
offset: self.aux_off,
})?;
Ok(Aux::new(bytes))
}
pub fn extra<'store, U>(
&self,
store: &'store RecordStore<U>,
) -> Result<&'store U, RecordAccessError> {
store.extras.get(self.extras_idx as usize).ok_or(RecordAccessError::SlabOffsetOutOfRange {
slab: Slab::Extras,
offset: self.extras_idx,
})
}
}
const _: () =
assert!(std::mem::size_of::<SlimRecord>() <= 72, "SlimRecord grew unexpectedly large");
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum RecordAccessError {
#[error("Cannot get {slab:?} entry for record: offset {offset} out of range")]
SlabOffsetOutOfRange { slab: Slab, offset: u32 },
#[error(
"Cannot get {slab:?} entry for record: offset {offset} + length {len} overflows slab limits"
)]
OffsetOverflow { slab: Slab, offset: u32, len: usize },
}
#[derive(Debug)]
#[non_exhaustive]
pub enum Slab {
Records,
Names,
Bases,
Cigar,
Qual,
Aux,
Extras,
}
pub struct RecordStore<U = ()> {
records: Vec<SlimRecord>,
names: Vec<u8>,
bases: Vec<Base>,
cigar: Vec<CigarOp>,
qual: Vec<u8>,
aux: Vec<u8>,
extras: Vec<U>,
}
impl<U> RecordStore<U> {
pub fn new() -> Self {
Self {
records: Vec::new(),
names: Vec::new(),
bases: Vec::new(),
cigar: Vec::new(),
qual: Vec::new(),
aux: Vec::new(),
extras: Vec::new(),
}
}
pub fn with_byte_hint(compressed_bytes: usize) -> Self {
let uncompressed_est = compressed_bytes.saturating_mul(5);
let record_count_est = (uncompressed_est / 550).max(64);
let names_est = record_count_est.saturating_mul(56); let bases_est = record_count_est.saturating_mul(150);
let cigar_est = record_count_est.saturating_mul(2);
let qual_est = record_count_est.saturating_mul(150);
let aux_est = record_count_est.saturating_mul(180);
Self {
records: Vec::with_capacity(record_count_est),
names: Vec::with_capacity(names_est),
bases: Vec::with_capacity(bases_est),
cigar: Vec::with_capacity(cigar_est),
qual: Vec::with_capacity(qual_est),
aux: Vec::with_capacity(aux_est),
extras: Vec::with_capacity(record_count_est),
}
}
}
impl<U> RecordStore<U> {
fn rollback_last_push(&mut self) {
let rec = self.records.pop().expect("rollback_last_push called with empty records Vec");
self.extras
.pop()
.expect("push_raw/push_fields always append a matching () to extras before filter");
self.names.truncate(rec.name_off as usize);
self.bases.truncate(rec.bases_off as usize);
self.cigar.truncate(rec.cigar_off as usize);
self.qual.truncate(rec.qual_off as usize);
self.aux.truncate(rec.aux_off as usize);
}
pub fn push_raw<E: CustomizeRecordStore<Extra = U>>(
&mut self,
raw: &[u8],
customize: &mut E,
) -> Result<Option<u32>, DecodeError> {
let h = record::parse_header(raw)?;
let idx = u32::try_from(self.records.len()).map_err(|_| DecodeError::SlabOverflow)?;
debug_assert!(h.qual_end <= raw.len(), "qual_end overrun: {} > {}", h.qual_end, raw.len());
#[allow(clippy::indexing_slicing, reason = "all bounds ≤ qual_end ≤ raw.len()")]
let qname_raw = &raw[32..h.var_start];
let qname_actual_len = qname_raw.iter().position(|&b| b == 0).unwrap_or(qname_raw.len());
#[allow(clippy::indexing_slicing, reason = "all bounds ≤ qual_end ≤ raw.len()")]
let cigar_bytes = &raw[h.var_start..h.cigar_end];
{
#[allow(clippy::indexing_slicing, reason = "qname_actual_len <= qname_raw.len()")]
let qname_stripped = &qname_raw[..qname_actual_len];
#[allow(
clippy::indexing_slicing,
reason = "all bounds \u{2264} qual_end \u{2264} raw.len()"
)]
let qual_slice = &raw[h.seq_end..h.qual_end];
#[allow(clippy::indexing_slicing, reason = "qual_end \u{2264} raw.len()")]
let aux_slice = &raw[h.qual_end..];
#[allow(
clippy::indexing_slicing,
reason = "all bounds \u{2264} qual_end \u{2264} raw.len()"
)]
let packed_seq_slice = &raw[h.cigar_end..h.seq_end];
if !customize.filter_raw(&FilterRawFields {
pos: h.pos,
end_pos: h.pos, flags: h.flags,
mapq: h.mapq,
n_cigar_ops: h.n_cigar_ops,
seq_len: h.seq_len,
matching_bases: 0,
indel_bases: 0,
tid: h.tid,
next_ref_id: h.next_ref_id,
next_pos: h.next_pos,
template_len: h.template_len,
qname: qname_stripped,
qual_bytes: qual_slice,
aux_bytes: aux_slice,
raw_cigar_bytes: Some(cigar_bytes),
cigar_ops: None,
packed_seq: Some(packed_seq_slice),
bases: None,
}) {
return Ok(None);
}
}
let cigar_off = u32::try_from(self.cigar.len()).map_err(|_| DecodeError::SlabOverflow)?;
CigarOp::extend_from_bam_bytes(&mut self.cigar, cigar_bytes);
let cigar_start = cigar_off as usize;
#[allow(clippy::indexing_slicing, reason = "just appended; offsets within slab bounds")]
let cigar_ops = &self.cigar[cigar_start..];
let end_pos = cigar::compute_end_pos(h.pos, cigar_ops)
.ok_or(DecodeError::InvalidPosition { value: h.pos.as_i32() })?;
let (matching_bases, indel_bases) = cigar::calc_matches_indels(cigar_ops);
let name_off = u32::try_from(self.names.len()).map_err(|_| DecodeError::SlabOverflow)?;
#[allow(clippy::indexing_slicing, reason = "qname_actual_len ≤ qname_raw.len()")]
self.names.extend_from_slice(&qname_raw[..qname_actual_len]);
let bases_off = u32::try_from(self.bases.len()).map_err(|_| DecodeError::SlabOverflow)?;
#[allow(clippy::indexing_slicing, reason = "all bounds ≤ qual_end ≤ raw.len()")]
let packed_seq = &raw[h.cigar_end..h.seq_end];
let seq_len = h.seq_len as usize;
self.bases.reserve(seq_len);
let bases_spare = self.bases.spare_capacity_mut();
unsafe {
let out = std::slice::from_raw_parts_mut(bases_spare.as_mut_ptr() as *mut u8, seq_len);
seq::decode_bases_into(packed_seq, seq_len, out);
self.bases.set_len(
self.bases.len().checked_add(seq_len).expect("bases slab length overflow"),
);
}
let qual_off = u32::try_from(self.qual.len()).map_err(|_| DecodeError::SlabOverflow)?;
#[allow(clippy::indexing_slicing, reason = "all bounds ≤ qual_end ≤ raw.len()")]
self.qual.extend_from_slice(&raw[h.seq_end..h.qual_end]);
let aux_off = u32::try_from(self.aux.len()).map_err(|_| DecodeError::SlabOverflow)?;
#[allow(clippy::indexing_slicing, reason = "qual_end ≤ raw.len()")]
let aux_slice = &raw[h.qual_end..];
self.aux.extend_from_slice(aux_slice);
self.records.push(SlimRecord {
pos: h.pos,
end_pos,
flags: h.flags,
n_cigar_ops: h.n_cigar_ops,
mapq: h.mapq,
seq_len: h.seq_len,
matching_bases,
indel_bases,
tid: h.tid,
next_ref_id: h.next_ref_id,
next_pos: h.next_pos,
template_len: h.template_len,
name_off,
#[expect(
clippy::cast_possible_truncation,
reason = "BAM qname is validated to ≤ 254 bytes by parse_header (l_read_name is u8); fits in u16"
)]
name_len: qname_actual_len as u16,
bases_off,
cigar_off,
qual_off,
aux_off,
#[expect(
clippy::cast_possible_truncation,
reason = "aux data is bounded by slab limits (u32); slab overflow checked above via SlabOverflow"
)]
aux_len: aux_slice.len() as u32,
extras_idx: idx,
});
self.extras.push(customize.compute(
self.records.last().expect("just pushed a SlimRecord above; records.last() is Some"),
self,
));
if customize.filter(
self.records.last().expect("just pushed a SlimRecord above; records.last() is Some"),
self,
) {
Ok(Some(idx))
} else {
self.rollback_last_push();
Ok(None)
}
}
#[expect(
clippy::too_many_arguments,
reason = "all fields are needed for zero-copy push into the record store slabs"
)]
pub fn push_fields<E: CustomizeRecordStore<Extra = U>>(
&mut self,
pos: Pos0,
end_pos: Pos0,
flags: BamFlags,
mapq: u8,
matching_bases: u32,
indel_bases: u32,
qname: &[u8],
cigar_ops: &[CigarOp],
bases: &[Base],
qual: &[u8],
aux: &[u8],
tid: i32,
next_ref_id: i32,
next_pos: i32,
template_len: i32,
customize: &mut E,
) -> Result<Option<u32>, DecodeError> {
if qual.len() != bases.len() {
return Err(DecodeError::QualLenMismatch {
qual_len: qual.len(),
seq_len: bases.len(),
});
}
let idx = u32::try_from(self.records.len()).map_err(|_| DecodeError::SlabOverflow)?;
#[expect(
clippy::cast_possible_truncation,
reason = "caller validates cigar op count ≤ 65535 (BAM n_cigar_op is u16); fits in u16"
)]
let n_cigar_ops = cigar_ops.len() as u16;
#[expect(
clippy::cast_possible_truncation,
reason = "seq length is bounded by slab limits (u32); slab overflow checked via SlabOverflow"
)]
let seq_len = bases.len() as u32;
if !customize.filter_raw(&FilterRawFields {
pos,
end_pos,
flags,
mapq,
n_cigar_ops,
seq_len,
matching_bases,
indel_bases,
tid,
next_ref_id,
next_pos,
template_len,
qname,
qual_bytes: qual,
aux_bytes: aux,
raw_cigar_bytes: None,
cigar_ops: Some(cigar_ops),
packed_seq: None,
bases: Some(bases),
}) {
return Ok(None);
}
let name_off = u32::try_from(self.names.len()).map_err(|_| DecodeError::SlabOverflow)?;
self.names.extend_from_slice(qname);
let bases_off = u32::try_from(self.bases.len()).map_err(|_| DecodeError::SlabOverflow)?;
self.bases.extend_from_slice(bases);
let cigar_off = u32::try_from(self.cigar.len()).map_err(|_| DecodeError::SlabOverflow)?;
self.cigar.extend_from_slice(cigar_ops);
let qual_off = u32::try_from(self.qual.len()).map_err(|_| DecodeError::SlabOverflow)?;
self.qual.extend_from_slice(qual);
let aux_off = u32::try_from(self.aux.len()).map_err(|_| DecodeError::SlabOverflow)?;
self.aux.extend_from_slice(aux);
self.records.push(SlimRecord {
pos,
end_pos,
flags,
n_cigar_ops,
mapq,
seq_len,
matching_bases,
indel_bases,
tid,
next_ref_id,
next_pos,
template_len,
name_off,
#[expect(
clippy::cast_possible_truncation,
reason = "caller validates qname ≤ 254 bytes; fits in u16"
)]
name_len: qname.len() as u16,
bases_off,
cigar_off,
qual_off,
aux_off,
#[expect(
clippy::cast_possible_truncation,
reason = "aux data bounded by slab limits (u32); slab overflow checked via SlabOverflow"
)]
aux_len: aux.len() as u32,
extras_idx: idx,
});
let record =
self.records.last().expect("just pushed a SlimRecord above; records.last() is Some");
self.extras.push(customize.compute(record, self));
if customize.filter(record, self) {
Ok(Some(idx))
} else {
self.rollback_last_push();
Ok(None)
}
}
}
#[derive(Debug)]
#[non_exhaustive]
pub struct FilterRawFields<'a> {
pub pos: Pos0,
pub end_pos: Pos0,
pub flags: BamFlags,
pub mapq: u8,
pub n_cigar_ops: u16,
pub seq_len: u32,
pub matching_bases: u32,
pub indel_bases: u32,
pub tid: i32,
pub next_ref_id: i32,
pub next_pos: i32,
pub template_len: i32,
pub qname: &'a [u8],
pub qual_bytes: &'a [u8],
pub aux_bytes: &'a [u8],
pub raw_cigar_bytes: Option<&'a [u8]>,
pub cigar_ops: Option<&'a [CigarOp]>,
pub packed_seq: Option<&'a [u8]>,
pub bases: Option<&'a [Base]>,
}
pub trait CustomizeRecordStore: Clone {
type Extra;
#[inline]
fn filter_raw(&mut self, _fields: &FilterRawFields<'_>) -> bool {
true
}
#[inline]
fn filter(&mut self, _rec: &SlimRecord, _store: &RecordStore<Self::Extra>) -> bool {
true
}
fn compute(&mut self, rec: &SlimRecord, store: &RecordStore<Self::Extra>) -> Self::Extra;
}
impl CustomizeRecordStore for () {
type Extra = ();
#[inline]
fn compute(&mut self, _rec: &SlimRecord, _store: &RecordStore<()>) {}
}
impl<U> RecordStore<U> {
pub fn sort_by_pos(&mut self) {
self.records.sort_by_key(|r| r.pos);
}
pub fn dedup(&mut self) {
let names = &self.names;
self.records.dedup_by(|a, b| {
a.pos == b.pos && a.flags == b.flags && a.name_len == b.name_len && {
let a_start = a.name_off as usize;
let b_start = b.name_off as usize;
let len = a.name_len as usize;
debug_assert!(a_start.saturating_add(len) <= names.len(), "name slice OOB");
debug_assert!(b_start.saturating_add(len) <= names.len(), "name slice OOB");
#[allow(clippy::indexing_slicing, reason = "offsets validated at push time")]
{
names[a_start..a_start.saturating_add(len)]
== names[b_start..b_start.saturating_add(len)]
}
}
});
}
pub fn len(&self) -> usize {
self.records.len()
}
pub fn is_empty(&self) -> bool {
self.records.is_empty()
}
#[allow(clippy::indexing_slicing, reason = "idx is always a valid index returned by push_raw")]
pub fn record(&self, idx: u32) -> &SlimRecord {
debug_assert!(
(idx as usize) < self.records.len(),
"record idx {idx} out of bounds (len={})",
self.records.len()
);
&self.records[idx as usize]
}
pub fn records(&self) -> impl ExactSizeIterator<Item = &SlimRecord> + '_ {
self.records.iter()
}
pub fn set_template_len(&mut self, idx: u32, tlen: i32) -> Option<()> {
debug_assert!(
(idx as usize) < self.records.len(),
"set_template_len idx {idx} out of bounds (len={})",
self.records.len()
);
let idx = usize::try_from(idx).ok()?;
self.records.get_mut(idx)?.template_len = tlen;
Some(())
}
pub fn set_mate_info(&mut self, idx: u32, next_ref_id: i32, next_pos: i32) -> Option<()> {
let idx = usize::try_from(idx).ok()?;
let rec = self.records.get_mut(idx)?;
rec.next_ref_id = next_ref_id;
rec.next_pos = next_pos;
Some(())
}
#[allow(clippy::indexing_slicing, reason = "offsets written by push_raw; within slab bounds")]
pub fn qname(&self, idx: u32) -> &[u8] {
let rec = self.record(idx);
let start = rec.name_off as usize;
let end = start.checked_add(rec.name_len as usize).expect("qname end overflow");
debug_assert!(end <= self.names.len(), "qname slab overrun: {end} > {}", self.names.len());
&self.names[start..end]
}
#[allow(clippy::indexing_slicing, reason = "offsets written by push_raw; within slab bounds")]
pub fn cigar(&self, idx: u32) -> &[CigarOp] {
let rec = self.record(idx);
let start = rec.cigar_off as usize;
let end = start.checked_add(rec.cigar_len()).expect("cigar end overflow");
debug_assert!(end <= self.cigar.len(), "cigar slab overrun: {end} > {}", self.cigar.len());
&self.cigar[start..end]
}
#[allow(clippy::indexing_slicing, reason = "offsets written by push_raw; within slab bounds")]
pub fn seq(&self, idx: u32) -> &[Base] {
let rec = self.record(idx);
let start = rec.bases_off as usize;
let end = start.checked_add(rec.seq_len as usize).expect("seq end overflow");
debug_assert!(end <= self.bases.len(), "bases slab overrun: {end} > {}", self.bases.len());
&self.bases[start..end]
}
pub fn seq_at(&self, idx: u32, pos: usize) -> Base {
let rec = self.record(idx);
self.bases
.get((rec.bases_off as usize).checked_add(pos).expect("seq_at offset overflow"))
.copied()
.unwrap_or(Base::Unknown)
}
#[allow(clippy::indexing_slicing, reason = "offsets written by push_raw; within slab bounds")]
pub fn qual(&self, idx: u32) -> &[BaseQuality] {
let rec = self.record(idx);
let start = rec.qual_off as usize;
let end = start.checked_add(rec.seq_len as usize).expect("qual end overflow");
debug_assert!(end <= self.qual.len(), "qual slab overrun: {end} > {}", self.qual.len());
BaseQuality::slice_from_bytes(&self.qual[start..end])
}
#[allow(clippy::indexing_slicing, reason = "offsets written by push_raw; within slab bounds")]
pub fn aux(&self, idx: u32) -> &[u8] {
let rec = self.record(idx);
let start = rec.aux_off as usize;
let end = start.checked_add(rec.aux_len as usize).expect("aux end overflow");
debug_assert!(end <= self.aux.len(), "aux slab overrun: {end} > {}", self.aux.len());
&self.aux[start..end]
}
pub fn set_alignment(
&mut self,
idx: u32,
new_pos: Pos0,
new_cigar_ops: &[CigarOp],
) -> Result<(), DecodeError> {
let rec = self.record(idx);
let seq_len = rec.seq_len;
let new_query_len = cigar::calc_query_len(new_cigar_ops);
if new_query_len != seq_len {
return Err(DecodeError::CigarQueryLenMismatch {
cigar_query_len: new_query_len,
seq_len,
});
}
let n_ops = new_cigar_ops.len();
let n_cigar_ops =
u16::try_from(n_ops).map_err(|_| DecodeError::CigarOpCountOverflow { count: n_ops })?;
let end_pos = cigar::compute_end_pos(new_pos, new_cigar_ops)
.ok_or(DecodeError::InvalidPosition { value: new_pos.as_i32() })?;
let (matching_bases, indel_bases) = cigar::calc_matches_indels(new_cigar_ops);
let new_cigar_off =
u32::try_from(self.cigar.len()).map_err(|_| DecodeError::SlabOverflow)?;
self.cigar.extend_from_slice(new_cigar_ops);
#[allow(clippy::indexing_slicing, reason = "idx validated by self.record() above")]
let rec = &mut self.records[idx as usize];
rec.pos = new_pos;
rec.end_pos = end_pos;
rec.n_cigar_ops = n_cigar_ops;
rec.cigar_off = new_cigar_off;
rec.matching_bases = matching_bases;
rec.indel_bases = indel_bases;
Ok(())
}
#[allow(clippy::indexing_slicing, reason = "idx is always a valid index returned by push_raw")]
pub fn extra(&self, idx: u32) -> &U {
let rec = self.record(idx);
let ei = rec.extras_idx as usize;
debug_assert!(
ei < self.extras.len(),
"extras_idx {ei} out of bounds (len={})",
self.extras.len()
);
&self.extras[ei]
}
#[allow(clippy::indexing_slicing, reason = "idx is always a valid index returned by push_raw")]
pub fn extra_mut(&mut self, idx: u32) -> &mut U {
let ei = self.record(idx).extras_idx as usize;
debug_assert!(
ei < self.extras.len(),
"extras_idx {ei} out of bounds (len={})",
self.extras.len()
);
&mut self.extras[ei]
}
pub(crate) fn take_contents(&mut self) -> Self {
RecordStore {
records: std::mem::take(&mut self.records),
names: std::mem::take(&mut self.names),
bases: std::mem::take(&mut self.bases),
cigar: std::mem::take(&mut self.cigar),
qual: std::mem::take(&mut self.qual),
aux: std::mem::take(&mut self.aux),
extras: std::mem::take(&mut self.extras),
}
}
pub fn clear(&mut self) {
self.records.clear();
self.names.clear();
self.bases.clear();
self.cigar.clear();
self.qual.clear();
self.aux.clear();
self.extras.clear();
}
pub fn records_capacity(&self) -> usize {
self.records.capacity()
}
pub fn names_capacity(&self) -> usize {
self.names.capacity()
}
pub fn bases_capacity(&self) -> usize {
self.bases.capacity()
}
pub fn cigar_capacity(&self) -> usize {
self.cigar.capacity()
}
pub fn qual_capacity(&self) -> usize {
self.qual.capacity()
}
pub fn aux_capacity(&self) -> usize {
self.aux.capacity()
}
pub fn extras_capacity(&self) -> usize {
self.extras.capacity()
}
}
impl<U> Default for RecordStore<U> {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
#[allow(
clippy::arithmetic_side_effects,
clippy::cast_possible_truncation,
reason = "test code with known small values"
)]
mod tests {
use super::*;
#[derive(Clone, Default)]
struct KeepNone;
impl CustomizeRecordStore for KeepNone {
type Extra = ();
fn filter(&mut self, _: &SlimRecord, _: &RecordStore<()>) -> bool {
false
}
fn compute(&mut self, _: &SlimRecord, _: &RecordStore<()>) {}
}
#[derive(Clone)]
struct AcceptFlag(bool);
impl CustomizeRecordStore for AcceptFlag {
type Extra = ();
fn filter(&mut self, _: &SlimRecord, _: &RecordStore<()>) -> bool {
self.0
}
fn compute(&mut self, _: &SlimRecord, _: &RecordStore<()>) {}
}
fn make_raw_record(qname: &[u8], seq_len: u32, n_cigar_ops: u16) -> Vec<u8> {
let name_len = qname.len() as u8 + 1; let cigar_bytes = n_cigar_ops as usize * 4;
let seq_bytes = (seq_len as usize).div_ceil(2);
let total = 32 + name_len as usize + cigar_bytes + seq_bytes + seq_len as usize;
let mut raw = vec![0u8; total];
raw[0..4].copy_from_slice(&0i32.to_le_bytes()); raw[4..8].copy_from_slice(&0i32.to_le_bytes()); raw[8] = name_len;
raw[9] = 0; raw[12..14].copy_from_slice(&n_cigar_ops.to_le_bytes());
raw[14..16].copy_from_slice(&0u16.to_le_bytes()); raw[16..20].copy_from_slice(&seq_len.to_le_bytes());
let var_start = 32;
raw[var_start..var_start + qname.len()].copy_from_slice(qname);
raw[var_start + qname.len()] = 0;
let cigar_start = var_start + name_len as usize;
for i in 0..n_cigar_ops as usize {
let op = seq_len << 4; let off = cigar_start + i * 4;
if off + 4 <= raw.len() {
raw[off..off + 4].copy_from_slice(&op.to_le_bytes());
}
}
raw
}
#[test]
fn push_raw_rejects_offset_overflow() {
let mut store = RecordStore::new();
let mut raw = [0u8; 32];
raw[8] = 255; raw[12..14].copy_from_slice(&u16::MAX.to_le_bytes()); raw[16..20].copy_from_slice(&u32::MAX.to_le_bytes());
let result = store.push_raw(&raw, &mut ());
assert!(result.is_err());
}
#[test]
fn push_fields_rejects_offset_overflow() {
use seqair_types::Base;
let mut store = RecordStore::new();
let result: Result<Option<u32>, _> = store.push_fields(
Pos0::new(0).unwrap(),
Pos0::new(0).unwrap(),
BamFlags::empty(),
0,
0,
0,
b"read1",
&[],
&[Base::A],
&[30],
&[],
0, -1, 0, 0, &mut (),
);
assert!(result.is_ok());
}
#[test]
fn push_raw_normal_record_succeeds() {
let mut store = RecordStore::new();
let raw = make_raw_record(b"read1", 4, 1);
let result = store.push_raw(&raw, &mut ());
assert!(result.is_ok());
assert_eq!(store.len(), 1);
}
#[test]
fn iter_yields_all_records_with_indices() {
let mut store = RecordStore::new();
store.push_raw(&make_raw_record(b"read0", 4, 1), &mut ()).unwrap();
store.push_raw(&make_raw_record(b"read1", 4, 1), &mut ()).unwrap();
store.push_raw(&make_raw_record(b"read2", 4, 1), &mut ()).unwrap();
let qnames: Vec<&[u8]> = store.records().map(|rec| rec.qname(&store).unwrap()).collect();
assert_eq!(qnames, vec![b"read0".as_slice(), b"read1".as_slice(), b"read2".as_slice()]);
assert_eq!(store.records().len(), store.len());
let positions: Vec<usize> = store.records().enumerate().map(|(i, _)| i).collect();
assert_eq!(positions, vec![0, 1, 2]);
}
#[test]
fn iter_empty_store() {
let store: RecordStore = RecordStore::new();
assert_eq!(store.records().count(), 0);
assert_eq!(store.records().len(), 0);
}
#[test]
fn push_raw_preserves_next_ref_id() {
let mut store = RecordStore::new();
let mut raw = make_raw_record(b"read1", 4, 1);
raw[20..24].copy_from_slice(&7i32.to_le_bytes());
store.push_raw(&raw, &mut ()).unwrap();
assert_eq!(store.record(0).next_ref_id, 7);
}
#[test]
fn push_raw_with_rejecting_filter_truncates_all_slabs() {
let mut store = RecordStore::new();
let a = make_raw_record(b"read1", 4, 1);
let idx_a = store.push_raw(&a, &mut ()).unwrap();
assert_eq!(idx_a, Some(0));
let after_a = (
store.records.len(),
store.names.len(),
store.bases.len(),
store.cigar.len(),
store.qual.len(),
store.aux.len(),
store.extras.len(),
);
let b = make_raw_record(b"read2", 8, 2);
let idx_b = store.push_raw(&b, &mut KeepNone).unwrap();
assert_eq!(idx_b, None, "rejected record must not yield an index");
let after_b = (
store.records.len(),
store.names.len(),
store.bases.len(),
store.cigar.len(),
store.qual.len(),
store.aux.len(),
store.extras.len(),
);
assert_eq!(after_a, after_b, "slab lengths must match pre-reject state exactly");
let c = make_raw_record(b"read3", 4, 1);
let idx_c = store.push_raw(&c, &mut ()).unwrap();
assert_eq!(idx_c, Some(1), "rejected record must not consume an index");
assert_eq!(store.len(), 2, "store should hold A and C only");
}
#[test]
fn push_raw_filter_raw_rejection_avoids_slab_write() {
#[derive(Clone, Default)]
struct RejectRaw;
impl CustomizeRecordStore for RejectRaw {
type Extra = ();
fn filter_raw(&mut self, _: &FilterRawFields<'_>) -> bool {
false
}
fn compute(&mut self, _: &SlimRecord, _: &RecordStore<()>) {}
}
let mut store = RecordStore::new();
let before = (
store.names.len(),
store.bases.len(),
store.cigar.len(),
store.qual.len(),
store.aux.len(),
);
let raw = make_raw_record(b"readX", 8, 2);
let result = store.push_raw(&raw, &mut RejectRaw).unwrap();
assert_eq!(result, None);
let after = (
store.names.len(),
store.bases.len(),
store.cigar.len(),
store.qual.len(),
store.aux.len(),
);
assert_eq!(before, after, "filter_raw rejection must not extend any slab");
}
#[test]
fn push_fields_filter_raw_rejection_avoids_slab_write() {
use seqair_types::Base;
#[derive(Clone, Default)]
struct RejectRaw;
impl CustomizeRecordStore for RejectRaw {
type Extra = ();
fn filter_raw(&mut self, _: &FilterRawFields<'_>) -> bool {
false
}
fn compute(&mut self, _: &SlimRecord, _: &RecordStore<()>) {}
}
let mut store = RecordStore::new();
let before = (
store.names.len(),
store.bases.len(),
store.cigar.len(),
store.qual.len(),
store.aux.len(),
);
let result = store
.push_fields(
Pos0::new(0).unwrap(),
Pos0::new(4).unwrap(),
BamFlags::empty(),
42,
4,
0,
b"readY",
&[CigarOp::new(cigar::CigarOpType::Match, 5)],
&[Base::A; 5],
&[30; 5],
b"tagdata",
0,
-1,
0,
0,
&mut RejectRaw,
)
.unwrap();
assert_eq!(result, None);
let after = (
store.names.len(),
store.bases.len(),
store.cigar.len(),
store.qual.len(),
store.aux.len(),
);
assert_eq!(before, after, "filter_raw rejection must not extend any slab");
}
#[test]
fn push_raw_filter_can_read_slim_record_and_store() {
#[derive(Clone, Default)]
struct AssertReadX;
impl CustomizeRecordStore for AssertReadX {
type Extra = ();
fn filter(&mut self, rec: &SlimRecord, store: &RecordStore<()>) -> bool {
assert_eq!(rec.mapq, 0);
let qname = rec.qname(store).expect("qname slab must be readable");
assert_eq!(qname, b"readX");
true
}
fn compute(&mut self, _: &SlimRecord, _: &RecordStore<()>) {}
}
let mut store = RecordStore::new();
let raw = make_raw_record(b"readX", 4, 1);
let kept_idx = store.push_raw(&raw, &mut AssertReadX).unwrap();
assert_eq!(kept_idx, Some(0));
}
#[test]
fn push_fields_with_rejecting_filter_rolls_back() {
use seqair_types::Base;
let mut store = RecordStore::new();
store
.push_fields(
Pos0::new(0).unwrap(),
Pos0::new(4).unwrap(),
BamFlags::empty(),
30,
4,
0,
b"kept",
&[CigarOp::new(cigar::CigarOpType::Match, 4)],
&[Base::A, Base::C, Base::G, Base::T],
&[30, 31, 32, 33],
&[],
0,
-1,
0,
0,
&mut (),
)
.unwrap();
let names_len = store.names.len();
let bases_len = store.bases.len();
let cigar_len = store.cigar.len();
let qual_len = store.qual.len();
let rejected = store
.push_fields(
Pos0::new(10).unwrap(),
Pos0::new(14).unwrap(),
BamFlags::empty(),
30,
4,
0,
b"dropped",
&[CigarOp::new(cigar::CigarOpType::Match, 4)],
&[Base::A, Base::C, Base::G, Base::T],
&[30, 31, 32, 33],
b"NM:i:0",
0,
-1,
0,
0,
&mut KeepNone,
)
.unwrap();
assert_eq!(rejected, None);
assert_eq!(store.len(), 1);
assert_eq!(store.names.len(), names_len);
assert_eq!(store.bases.len(), bases_len);
assert_eq!(store.cigar.len(), cigar_len);
assert_eq!(store.qual.len(), qual_len);
}
#[test]
fn push_fields_preserves_next_ref_id() {
use seqair_types::Base;
let mut store = RecordStore::new();
store
.push_fields(
Pos0::new(100).unwrap(),
Pos0::new(105).unwrap(),
BamFlags::empty(),
30,
5,
0,
b"read1",
&[CigarOp::new(cigar::CigarOpType::Match, 5)],
&[Base::A, Base::C, Base::G, Base::T, Base::A],
&[30, 31, 32, 33, 34],
&[],
0, 3, 500, 200, &mut (),
)
.unwrap();
assert_eq!(store.record(0).next_ref_id, 3);
}
mod rollback_props {
use super::super::*;
use super::AcceptFlag;
use proptest::prelude::*;
use seqair_types::{BamFlags, Base};
#[derive(Debug, Clone)]
struct PushInput {
qname: Vec<u8>,
bases: Vec<Base>,
quals: Vec<u8>,
aux: Vec<u8>,
pos: u32,
mapq: u8,
accept: bool,
}
impl PushInput {
fn cigar_op(&self) -> CigarOp {
#[expect(
clippy::cast_possible_truncation,
reason = "bases.len() is bounded by strategy (≤ 16)"
)]
let len = self.bases.len() as u32;
CigarOp::new(cigar::CigarOpType::Match, len)
}
fn end_pos(&self) -> Pos0 {
#[expect(
clippy::cast_possible_truncation,
clippy::arithmetic_side_effects,
reason = "bases.len() ≥ 1 ≤ 16, pos < 1_000_000; sum fits in u32"
)]
let end = self.pos + self.bases.len() as u32 - 1;
Pos0::new(end).expect("bounded by strategy to < i32::MAX")
}
}
fn arb_base() -> impl Strategy<Value = Base> {
prop_oneof![
Just(Base::A),
Just(Base::C),
Just(Base::G),
Just(Base::T),
Just(Base::Unknown),
]
}
fn arb_push_input() -> impl Strategy<Value = PushInput> {
(
prop::collection::vec(1u8..=126, 1..=16),
prop::collection::vec(arb_base(), 1..=16),
prop::collection::vec(any::<u8>(), 0..=32),
0u32..1_000_000,
0u8..=60,
any::<bool>(),
)
.prop_map(|(qname, bases, aux, pos, mapq, accept)| {
let quals = bases.iter().map(|_| 30u8).collect();
PushInput { qname, bases, quals, aux, pos, mapq, accept }
})
}
#[allow(
clippy::expect_used,
clippy::unwrap_in_result,
reason = "proptest synthetic input bounded by strategy; panic on violation is informative"
)]
fn push_one(store: &mut RecordStore<()>, input: &PushInput) -> Option<u32> {
let cigar = [input.cigar_op()];
#[expect(clippy::cast_possible_truncation, reason = "bases.len() bounded ≤ 16")]
let matching = input.bases.len() as u32;
store
.push_fields(
Pos0::new(input.pos).expect("strategy bounds pos < 1_000_000"),
input.end_pos(),
BamFlags::empty(),
input.mapq,
matching,
0,
&input.qname,
&cigar,
&input.bases,
&input.quals,
&input.aux,
0,
-1,
0,
0,
&mut AcceptFlag(input.accept),
)
.expect("push_fields must not error on synthetic input")
}
fn push_kept(store: &mut RecordStore<()>, input: &PushInput) -> u32 {
let mut forced_keep = input.clone();
forced_keep.accept = true;
push_one(store, &forced_keep).expect("accept=true always yields Some")
}
type SlabSnapshot = (usize, Vec<u8>, Vec<Base>, Vec<CigarOp>, Vec<u8>, Vec<u8>, usize);
fn dump_slabs(store: &RecordStore<()>) -> SlabSnapshot {
(
store.records.len(),
store.names.clone(),
store.bases.clone(),
store.cigar.clone(),
store.qual.clone(),
store.aux.clone(),
store.extras.len(),
)
}
proptest! {
#[test]
fn push_fields_rollback_self_consistency(
inputs in prop::collection::vec(arb_push_input(), 0..=60),
) {
let mut a = RecordStore::new();
for inp in &inputs {
push_one(&mut a, inp);
}
let mut b = RecordStore::new();
for inp in inputs.iter().filter(|i| i.accept) {
push_kept(&mut b, inp);
}
prop_assert_eq!(a.records.len(), b.records.len(), "records len");
for (ra, rb) in a.records.iter().zip(b.records.iter()) {
prop_assert_eq!(ra.pos, rb.pos);
prop_assert_eq!(ra.end_pos, rb.end_pos);
prop_assert_eq!(ra.flags, rb.flags);
prop_assert_eq!(ra.mapq, rb.mapq);
prop_assert_eq!(ra.seq_len, rb.seq_len);
prop_assert_eq!(ra.name_off, rb.name_off, "name_off");
prop_assert_eq!(ra.name_len, rb.name_len, "name_len");
prop_assert_eq!(ra.bases_off, rb.bases_off, "bases_off");
prop_assert_eq!(ra.cigar_off, rb.cigar_off, "cigar_off");
prop_assert_eq!(ra.qual_off, rb.qual_off, "qual_off");
prop_assert_eq!(ra.aux_off, rb.aux_off, "aux_off");
prop_assert_eq!(ra.aux_len, rb.aux_len, "aux_len");
prop_assert_eq!(ra.extras_idx, rb.extras_idx, "extras_idx");
}
prop_assert_eq!(dump_slabs(&a), dump_slabs(&b), "slab bytes");
}
#[test]
fn push_fields_slab_lengths_track_accepted_prefix(
inputs in prop::collection::vec(arb_push_input(), 0..=40),
) {
let mut store = RecordStore::new();
let mut expected_names = 0usize;
let mut expected_bases = 0usize;
let mut expected_cigar = 0usize;
let mut expected_qual = 0usize;
let mut expected_aux = 0usize;
let mut expected_records = 0usize;
for inp in &inputs {
let before = dump_slabs(&store);
let result = push_one(&mut store, inp);
if inp.accept {
prop_assert!(result.is_some(), "accept=true must return Some");
expected_records += 1;
expected_names += inp.qname.len();
expected_bases += inp.bases.len();
expected_cigar += 1; expected_qual += inp.quals.len();
expected_aux += inp.aux.len();
} else {
prop_assert!(result.is_none(), "accept=false must return None");
prop_assert_eq!(before, dump_slabs(&store), "rollback left state altered");
}
prop_assert_eq!(store.records.len(), expected_records, "records len");
prop_assert_eq!(store.names.len(), expected_names, "names len");
prop_assert_eq!(store.bases.len(), expected_bases, "bases len");
prop_assert_eq!(store.cigar.len(), expected_cigar, "cigar len");
prop_assert_eq!(store.qual.len(), expected_qual, "qual len");
prop_assert_eq!(store.aux.len(), expected_aux, "aux len");
prop_assert_eq!(store.extras.len(), expected_records, "extras len");
}
}
#[test]
fn push_fields_indices_are_dense_for_accepted(
inputs in prop::collection::vec(arb_push_input(), 0..=40),
) {
let mut store = RecordStore::new();
let mut kept: Vec<u32> = Vec::new();
for inp in &inputs {
if let Some(idx) = push_one(&mut store, inp) {
kept.push(idx);
}
}
let expected: Vec<u32> =
(0..kept.len()).map(|i| u32::try_from(i).unwrap()).collect();
prop_assert_eq!(kept, expected);
}
}
use crate::bam::aux_data::AuxData;
use crate::bam::owned_record::OwnedBamRecord;
use seqair_types::BaseQuality;
use seqair_types::Pos0;
fn build_owned_bam(input: &PushInput) -> OwnedBamRecord {
#[expect(
clippy::cast_possible_truncation,
reason = "bases.len() bounded ≤ 16 by strategy; fits in u32"
)]
let len = input.bases.len() as u32;
OwnedBamRecord::builder(0, Some(Pos0::new(input.pos).unwrap()), input.qname.clone())
.flags(BamFlags::empty())
.mapq(input.mapq)
.cigar(vec![CigarOp::new(cigar::CigarOpType::Match, len)])
.seq(input.bases.clone())
.qual(input.quals.iter().copied().map(BaseQuality::from_byte).collect())
.aux(AuxData::from_bytes(input.aux.clone()))
.build()
.expect("synthetic OwnedBamRecord must build")
}
proptest! {
#[test]
fn push_fields_matches_owned_bam_round_trip(
inputs in prop::collection::vec(arb_push_input(), 0..=20),
) {
let mut a = RecordStore::new();
let mut raw_buf = Vec::new();
for inp in inputs.iter().filter(|i| i.accept) {
let owned = build_owned_bam(inp);
raw_buf.clear();
owned.to_bam_bytes(&mut raw_buf)
.expect("to_bam_bytes must succeed on synthetic input");
a.push_raw(&raw_buf, &mut ())
.expect("push_raw must accept BAM bytes from to_bam_bytes")
.expect("default filter returns true");
}
let mut b = RecordStore::new();
for inp in inputs.iter().filter(|i| i.accept) {
push_kept(&mut b, inp);
}
prop_assert_eq!(a.records.len(), b.records.len(), "records len");
for (ra, rb) in a.records.iter().zip(b.records.iter()) {
prop_assert_eq!(ra.pos, rb.pos, "pos");
prop_assert_eq!(ra.end_pos, rb.end_pos, "end_pos");
prop_assert_eq!(ra.flags, rb.flags, "flags");
prop_assert_eq!(ra.mapq, rb.mapq, "mapq");
prop_assert_eq!(ra.seq_len, rb.seq_len, "seq_len");
prop_assert_eq!(ra.n_cigar_ops, rb.n_cigar_ops, "n_cigar_ops");
prop_assert_eq!(ra.matching_bases, rb.matching_bases, "matching_bases");
prop_assert_eq!(ra.indel_bases, rb.indel_bases, "indel_bases");
}
prop_assert_eq!(dump_slabs(&a), dump_slabs(&b), "slab bytes");
}
}
proptest! {
#[test]
fn push_fields_input_is_readable_via_getters(
inputs in prop::collection::vec(arb_push_input(), 0..=40),
) {
let mut store = RecordStore::new();
let mut kept_inputs: Vec<&PushInput> = Vec::new();
for inp in &inputs {
if let Some(idx) = push_one(&mut store, inp) {
prop_assert_eq!(idx as usize, kept_inputs.len(), "dense indices");
kept_inputs.push(inp);
}
}
prop_assert_eq!(store.len(), kept_inputs.len(), "store len matches kept count");
for (i, inp) in kept_inputs.iter().enumerate() {
let idx = i as u32;
prop_assert_eq!(store.qname(idx), inp.qname.as_slice(), "qname rec {}", i);
prop_assert_eq!(store.seq(idx), inp.bases.as_slice(), "seq rec {}", i);
let qual_bytes = BaseQuality::slice_to_bytes(store.qual(idx));
prop_assert_eq!(qual_bytes, inp.quals.as_slice(), "qual rec {}", i);
prop_assert_eq!(store.aux(idx), inp.aux.as_slice(), "aux rec {}", i);
let cigar = store.cigar(idx);
prop_assert_eq!(cigar.len(), 1, "cigar op count rec {}", i);
let op = cigar[0];
prop_assert_eq!(op.op_type(), cigar::CigarOpType::Match, "cigar op type rec {}", i);
prop_assert_eq!(op.len() as usize, inp.bases.len(), "cigar op len rec {}", i);
let rec = store.record(idx);
#[expect(
clippy::cast_sign_loss,
reason = "PushInput.pos is bounded < 1_000_000 so always nonneg"
)]
let rec_pos_u32 = rec.pos.as_i32() as u32;
prop_assert_eq!(rec_pos_u32, inp.pos, "pos rec {}", i);
prop_assert_eq!(rec.mapq, inp.mapq, "mapq rec {}", i);
#[expect(
clippy::cast_possible_truncation,
reason = "bases.len() bounded ≤ 16; fits in u32"
)]
let expected_seq_len = inp.bases.len() as u32;
prop_assert_eq!(rec.seq_len, expected_seq_len, "seq_len rec {}", i);
}
}
}
fn build_bam_raw(qname: &[u8], seq_len: u32, pos: i32, mapq: u8) -> Vec<u8> {
let mut name_with_nul: Vec<u8> = qname.to_vec();
name_with_nul.push(0);
while !name_with_nul.len().is_multiple_of(4) {
name_with_nul.push(0);
}
let name_len = name_with_nul.len();
let cigar_bytes = 4usize; let seq_bytes = (seq_len as usize).div_ceil(2);
let total = 32 + name_len + cigar_bytes + seq_bytes + seq_len as usize;
let mut raw = vec![0u8; total];
raw[0..4].copy_from_slice(&0i32.to_le_bytes());
raw[4..8].copy_from_slice(&pos.to_le_bytes());
#[expect(
clippy::cast_possible_truncation,
reason = "name_len bounded ≤ 20 by strategy"
)]
{
raw[8] = name_len as u8;
}
raw[9] = mapq;
raw[12..14].copy_from_slice(&1u16.to_le_bytes()); raw[14..16].copy_from_slice(&0u16.to_le_bytes()); raw[16..20].copy_from_slice(&seq_len.to_le_bytes());
raw[20..24].copy_from_slice(&(-1i32).to_le_bytes()); raw[24..28].copy_from_slice(&(-1i32).to_le_bytes()); raw[28..32].copy_from_slice(&0i32.to_le_bytes());
raw[32..32 + name_len].copy_from_slice(&name_with_nul);
let cigar_start = 32 + name_len;
let op = seq_len << 4; raw[cigar_start..cigar_start + 4].copy_from_slice(&op.to_le_bytes());
raw
}
#[derive(Debug, Clone)]
struct RawInput {
qname: Vec<u8>,
seq_len: u32,
pos: i32,
mapq: u8,
accept: bool,
}
fn arb_raw_input() -> impl Strategy<Value = RawInput> {
(
prop::collection::vec(b'a'..=b'z', 1..=10), 1u32..=16,
0i32..=1_000,
0u8..=60,
any::<bool>(),
)
.prop_map(|(qname, seq_len, pos, mapq, accept)| RawInput {
qname,
seq_len,
pos,
mapq,
accept,
})
}
fn push_raw_one(store: &mut RecordStore<()>, input: &RawInput) -> Option<u32> {
let raw = build_bam_raw(&input.qname, input.seq_len, input.pos, input.mapq);
store
.push_raw(&raw, &mut AcceptFlag(input.accept))
.expect("synthetic BAM record is always parseable")
}
fn push_raw_kept(store: &mut RecordStore<()>, input: &RawInput) -> u32 {
let mut forced_keep = input.clone();
forced_keep.accept = true;
push_raw_one(store, &forced_keep).expect("accept=true always yields Some")
}
proptest! {
#[test]
fn push_raw_rollback_matches_filtered_replay(
inputs in prop::collection::vec(arb_raw_input(), 0..=40),
) {
let mut a = RecordStore::new();
for inp in &inputs {
push_raw_one(&mut a, inp);
}
let mut b = RecordStore::new();
for inp in inputs.iter().filter(|i| i.accept) {
push_raw_kept(&mut b, inp);
}
prop_assert_eq!(dump_slabs(&a), dump_slabs(&b), "slab bytes");
}
}
}
}