#![deny(missing_docs)]
pub mod nixl;
pub mod utils;
use utils::*;
use derive_getters::Getters;
use thiserror::Error;
use crate::block_manager::storage::{Storage, StorageAllocator};
use derive_builder::Builder;
use serde::{Deserialize, Serialize};
use tracing::instrument;
use validator::Validate;
use super::storage::StorageType;
#[derive(Debug, Error)]
#[allow(missing_docs)]
pub enum LayoutError {
#[error("Invalid configuration: {0}")]
InvalidConfig(String),
#[error("Validation failed: {0}")]
ValidationError(#[from] validator::ValidationErrors),
#[error("Invalid block index: {0}")]
InvalidBlockIndex(usize),
#[error("Invalid layer index: {0}")]
InvalidLayerIndex(usize),
#[error("Invalid outer index: {0}")]
InvalidOuterIndex(usize),
#[error("Operation failed: {0}")]
OperationFailed(String),
#[error("Serialization error: {0}")]
SerdeError(#[from] serde_json::Error),
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum LayoutType {
FullyContiguous,
LayerSeparate {
outer_contiguous: bool,
},
}
impl LayoutType {
pub fn layer_separate_auto(shape: &[usize], num_device_blocks: usize) -> anyhow::Result<Self> {
let outer_contiguous = if shape[0] >= num_device_blocks {
false } else if shape[1] >= num_device_blocks {
true } else {
return Err(anyhow::anyhow!(format!(
"Unsupported kv cache layout. Got shape: {:?}",
shape
)));
};
Ok(LayoutType::LayerSeparate { outer_contiguous })
}
pub fn layer_separate_auto_default() -> Self {
LayoutType::LayerSeparate {
outer_contiguous: true,
}
}
pub fn layer_separate(outer_contiguous: bool) -> Self {
LayoutType::LayerSeparate { outer_contiguous }
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Getters)]
pub struct LocalMemoryRegion {
#[getter(copy)]
addr: usize,
#[getter(copy)]
size: usize,
#[getter(copy)]
storage_type: StorageType,
}
pub trait BlockLayout: GenericBlockLayout {
type StorageType: Storage;
fn layout_type(&self) -> LayoutType;
fn storage(&self) -> Vec<&Self::StorageType>;
fn storage_mut(&mut self) -> Vec<&mut Self::StorageType>;
}
pub trait GenericBlockLayout: BlockLayoutConfig + Send + Sync {
fn storage_type(&self) -> &StorageType;
fn config(&self) -> &LayoutConfig;
fn memory_region(
&self,
block_idx: usize,
layer_idx: usize,
outer_idx: usize,
) -> Result<LocalMemoryRegion, LayoutError>;
}
pub trait BlockLayoutConfig: std::fmt::Debug {
fn layout_config(&self) -> LayoutConfig;
fn num_blocks(&self) -> usize {
self.layout_config().num_blocks
}
fn num_layers(&self) -> usize {
self.layout_config().num_layers
}
fn outer_dim(&self) -> usize {
self.layout_config().outer_dim
}
fn page_size(&self) -> usize {
self.layout_config().page_size
}
fn inner_dim(&self) -> usize {
self.layout_config().inner_dim
}
fn layout_data_bytes(&self) -> usize;
}
#[derive(Debug, Clone, Builder, Validate, Serialize, Deserialize, PartialEq, Eq)]
pub struct LayoutConfig {
#[validate(range(min = 1))]
pub num_blocks: usize,
#[validate(range(min = 1))]
pub num_layers: usize,
#[validate(range(min = 1, max = 2))]
pub outer_dim: usize,
#[validate(range(min = 1))]
pub page_size: usize,
#[validate(range(min = 1))]
pub inner_dim: usize,
#[validate(custom(function = "validate_power_of_2"))]
#[builder(default = "1")]
pub alignment: usize,
#[builder(default = "2")]
pub dtype_width_bytes: usize,
}
impl LayoutConfig {
pub fn builder() -> LayoutConfigBuilder {
LayoutConfigBuilder::default()
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct FullyContiguousConfig {
inner: LayoutConfig,
memory_region_size: usize,
outer_dim_stride_in_bytes: usize,
layer_stride_in_bytes: usize,
natural_block_stride: usize,
block_stride_in_bytes: usize,
layout_data_bytes: usize, }
impl FullyContiguousConfig {
fn new(config: LayoutConfig) -> Result<Self, LayoutError> {
config.validate()?;
let alignment = config.alignment;
let outer_dim_stride_in_bytes =
config.page_size * config.inner_dim * config.dtype_width_bytes;
let layer_stride_in_bytes = outer_dim_stride_in_bytes * config.outer_dim;
let memory_region_size = outer_dim_stride_in_bytes;
let natural_block_stride = config.num_layers * layer_stride_in_bytes;
let block_stride_in_bytes = if alignment > 1 {
align_up(natural_block_stride, alignment)
} else {
natural_block_stride
};
let layout_data_bytes =
(config.num_blocks - 1) * block_stride_in_bytes + natural_block_stride;
Ok(Self {
inner: config,
memory_region_size,
outer_dim_stride_in_bytes,
layer_stride_in_bytes,
natural_block_stride,
block_stride_in_bytes,
layout_data_bytes,
})
}
pub fn required_allocation_size(&self) -> usize {
let initial_padding = self.inner.alignment.saturating_sub(1);
self.layout_data_bytes + initial_padding
}
}
impl BlockLayoutConfig for FullyContiguousConfig {
fn layout_config(&self) -> LayoutConfig {
self.inner.clone()
}
fn layout_data_bytes(&self) -> usize {
self.layout_data_bytes
}
}
#[derive(Debug)]
pub struct FullyContiguous<S: Storage> {
config: FullyContiguousConfig,
storage: S,
storage_type: StorageType,
base_offset: usize,
}
impl<S: Storage> FullyContiguous<S> {
#[instrument(level = "debug", skip(storage), fields(config = ?config))]
pub fn new(config: LayoutConfig, mut storage: Vec<S>) -> Result<Self, LayoutError> {
let config = FullyContiguousConfig::new(config)?;
if storage.len() != 1 {
return Err(LayoutError::InvalidConfig(
"FullyContiguous layout requires exactly one storage region".to_string(),
));
}
let storage = storage.remove(0);
let storage_type = storage.storage_type();
let base_offset = validate_storage(&storage, &config)?;
tracing::debug!(
config.memory_region_size,
config.layer_stride_in_bytes,
config.block_stride_in_bytes,
config.natural_block_stride,
alignment = config.inner.alignment,
base_offset,
"Calculated layout strides (aligned)"
);
Ok(Self {
config,
storage,
storage_type,
base_offset,
})
}
pub(crate) fn new_internal(
config: FullyContiguousConfig,
storage: S,
storage_type: StorageType,
base_offset: usize,
) -> Result<Self, LayoutError> {
Ok(Self {
config,
storage,
storage_type,
base_offset,
})
}
#[instrument(level = "debug", skip(allocator), fields(config = ?config))]
pub fn allocate(
config: LayoutConfig,
allocator: &dyn StorageAllocator<S>,
) -> Result<Self, LayoutError> {
let config = FullyContiguousConfig::new(config)?;
let bytes_to_allocate = config.required_allocation_size();
tracing::debug!(
bytes_to_allocate,
alignment = config.inner.alignment,
"Calculated storage size for allocation (with alignment padding)"
);
let storage = allocator.allocate(bytes_to_allocate).map_err(|e| {
LayoutError::OperationFailed(format!("Storage allocation failed: {}", e))
})?;
tracing::debug!(
allocated_size = storage.size(),
allocated_addr = storage.addr(),
"Storage allocated successfully"
);
Self::new(config.inner, vec![storage])
}
}
impl<S: Storage> BlockLayout for FullyContiguous<S> {
type StorageType = S;
fn layout_type(&self) -> LayoutType {
LayoutType::FullyContiguous
}
fn storage(&self) -> Vec<&Self::StorageType> {
vec![&self.storage]
}
fn storage_mut(&mut self) -> Vec<&mut Self::StorageType> {
vec![&mut self.storage]
}
}
impl<S: Storage> GenericBlockLayout for FullyContiguous<S> {
fn storage_type(&self) -> &StorageType {
&self.storage_type
}
fn config(&self) -> &LayoutConfig {
&self.config.inner
}
fn memory_region(
&self,
block_idx: usize,
layer_idx: usize,
outer_idx: usize,
) -> Result<LocalMemoryRegion, LayoutError> {
validate_indices(&self.config, block_idx, layer_idx, outer_idx)?;
let aligned_start_addr = self.storage.addr() as usize + self.base_offset;
let block_offset = block_idx * self.config.block_stride_in_bytes;
let layer_offset = layer_idx * self.config.layer_stride_in_bytes;
let outer_offset = outer_idx * self.config.outer_dim_stride_in_bytes;
let final_addr = aligned_start_addr + block_offset + layer_offset + outer_offset;
Ok(LocalMemoryRegion {
addr: final_addr,
size: self.config.memory_region_size,
storage_type: self.storage_type,
})
}
}
impl<S: Storage> BlockLayoutConfig for FullyContiguous<S> {
fn layout_config(&self) -> LayoutConfig {
self.config.inner.clone()
}
fn layout_data_bytes(&self) -> usize {
self.config.layout_data_bytes
}
}
impl<S: Storage> FullyContiguous<S> {
pub fn verify_memory_regions(&self) -> Result<(), LayoutError> {
use crate::block_manager::layout::utils::WorkerLayoutVerifier;
let mut verifier = WorkerLayoutVerifier::new();
let results = verifier.verify_layout_consistency(self)?;
if verifier.has_critical_mismatches() {
tracing::error!(
"FullyContiguous layout verification failed: {} regions checked, {} size mismatches",
results.len(),
results.iter().filter(|r| !r.size_matches).count()
);
return Err(LayoutError::InvalidConfig(
"Memory region verification failed".to_string(),
));
}
tracing::debug!(
"FullyContiguous layout verification passed: {} regions checked",
results.len()
);
Ok(())
}
pub fn expected_memory_address(
&self,
block_idx: usize,
layer_idx: usize,
outer_idx: usize,
) -> Result<usize, LayoutError> {
validate_indices(&self.config, block_idx, layer_idx, outer_idx)?;
let aligned_start_addr = self.storage.addr() as usize + self.base_offset;
let block_offset = block_idx * self.config.block_stride_in_bytes;
let layer_offset = layer_idx * self.config.layer_stride_in_bytes;
let outer_offset = outer_idx * self.config.outer_dim_stride_in_bytes;
Ok(aligned_start_addr + block_offset + layer_offset + outer_offset)
}
pub fn verify_memory_region(
&self,
block_idx: usize,
layer_idx: usize,
outer_idx: usize,
) -> Result<bool, LayoutError> {
let actual_region = self.memory_region(block_idx, layer_idx, outer_idx)?;
let expected_addr = self.expected_memory_address(block_idx, layer_idx, outer_idx)?;
let expected_size = self.config.memory_region_size;
let addr_matches = actual_region.addr == expected_addr;
let size_matches = actual_region.size == expected_size;
if !addr_matches || !size_matches {
tracing::warn!(
"Memory region mismatch at ({}, {}, {}): addr {} vs {} (expected), size {} vs {} (expected)",
block_idx,
layer_idx,
outer_idx,
actual_region.addr,
expected_addr,
actual_region.size,
expected_size
);
}
Ok(addr_matches && size_matches)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct LayerSeparateConfig {
inner: LayoutConfig,
memory_region_size: usize,
outer_dim_stride_in_bytes: usize,
block_stride_in_bytes: usize,
layout_data_bytes: usize,
is_outer_contiguous: bool,
}
impl LayerSeparateConfig {
fn new(config: LayoutConfig, is_outer_contiguous: bool) -> Result<Self, LayoutError> {
config.validate()?;
let alignment = config.alignment;
let memory_region_size = config.page_size * config.inner_dim * config.dtype_width_bytes;
let outer_dim_stride_in_bytes;
let block_stride_in_bytes;
let layout_data_bytes;
if is_outer_contiguous {
block_stride_in_bytes = if alignment > 1 {
align_up(memory_region_size, alignment)
} else {
memory_region_size
};
outer_dim_stride_in_bytes = block_stride_in_bytes * config.num_blocks;
layout_data_bytes = outer_dim_stride_in_bytes * config.outer_dim;
} else {
outer_dim_stride_in_bytes = memory_region_size;
let natural_block_stride = outer_dim_stride_in_bytes * config.outer_dim;
block_stride_in_bytes = if alignment > 1 {
align_up(natural_block_stride, alignment)
} else {
natural_block_stride
};
layout_data_bytes = block_stride_in_bytes * config.num_blocks;
}
Ok(Self {
inner: config,
memory_region_size,
outer_dim_stride_in_bytes,
block_stride_in_bytes,
layout_data_bytes,
is_outer_contiguous,
})
}
pub fn required_allocation_size(&self) -> usize {
let initial_padding = self.inner.alignment.saturating_sub(1);
self.layout_data_bytes + initial_padding
}
}
impl BlockLayoutConfig for LayerSeparateConfig {
fn layout_config(&self) -> LayoutConfig {
self.inner.clone()
}
fn layout_data_bytes(&self) -> usize {
self.layout_data_bytes
}
}
#[derive(Debug)]
pub struct LayerSeparate<S: Storage> {
config: LayerSeparateConfig,
storages: Vec<S>,
storage_type: StorageType,
base_offsets: Vec<usize>,
}
impl<S: Storage> LayerSeparate<S> {
#[instrument(level = "debug", skip(storages), fields(config = ?config))]
pub fn new(
config: LayoutConfig,
storages: Vec<S>,
is_outer_contiguous: bool,
) -> Result<Self, LayoutError> {
if storages.len() != config.num_layers {
return Err(LayoutError::InvalidConfig(
"LayerSeparate layout requires exactly one storage region per layer".to_string(),
));
}
let config = LayerSeparateConfig::new(config, is_outer_contiguous)?;
let storage_type = storages[0].storage_type();
let mut base_offsets = Vec::new();
for storage in &storages {
let base_offset = validate_storage(storage, &config)?;
tracing::debug!(
config.memory_region_size,
config.block_stride_in_bytes,
config.outer_dim_stride_in_bytes,
alignment = config.inner.alignment,
base_offset,
"Calculated layout strides (aligned)"
);
base_offsets.push(base_offset);
}
Ok(Self {
config,
storages,
storage_type,
base_offsets,
})
}
pub(crate) fn new_internal(
config: LayerSeparateConfig,
storages: Vec<S>,
storage_type: StorageType,
base_offsets: Vec<usize>,
) -> Result<Self, LayoutError> {
Ok(Self {
config,
storages,
storage_type,
base_offsets,
})
}
pub fn allocate(
config: LayoutConfig,
allocator: &dyn StorageAllocator<S>,
is_outer_contiguous: bool,
) -> Result<Self, LayoutError> {
let config = LayerSeparateConfig::new(config, is_outer_contiguous)?;
let bytes_to_allocate = config.required_allocation_size();
tracing::debug!(
bytes_to_allocate,
alignment = config.inner.alignment,
"Calculated storage size for allocation (with alignment padding)"
);
let mut storages = Vec::new();
for _ in 0..config.inner.num_layers {
let storage = allocator.allocate(bytes_to_allocate).map_err(|e| {
LayoutError::OperationFailed(format!("Storage allocation failed: {}", e))
})?;
storages.push(storage);
}
tracing::debug!(
allocated_size = storages[0].size(),
allocated_addr = storages[0].addr(),
"Storage allocated successfully"
);
Self::new(config.inner, storages, is_outer_contiguous)
}
}
impl<S: Storage> GenericBlockLayout for LayerSeparate<S> {
fn storage_type(&self) -> &StorageType {
&self.storage_type
}
fn config(&self) -> &LayoutConfig {
&self.config.inner
}
fn memory_region(
&self,
block_idx: usize,
layer_idx: usize,
outer_idx: usize,
) -> Result<LocalMemoryRegion, LayoutError> {
validate_indices(&self.config, block_idx, layer_idx, outer_idx)?;
let aligned_start_addr =
self.storages[layer_idx].addr() as usize + self.base_offsets[layer_idx];
let block_offset = block_idx * self.config.block_stride_in_bytes;
let outer_offset = outer_idx * self.config.outer_dim_stride_in_bytes;
let final_addr = aligned_start_addr + block_offset + outer_offset;
Ok(LocalMemoryRegion {
addr: final_addr,
size: self.config.memory_region_size,
storage_type: self.storages[layer_idx].storage_type(),
})
}
}
impl<S: Storage> BlockLayout for LayerSeparate<S> {
type StorageType = S;
fn layout_type(&self) -> LayoutType {
LayoutType::LayerSeparate {
outer_contiguous: self.config.is_outer_contiguous,
}
}
fn storage(&self) -> Vec<&Self::StorageType> {
self.storages.iter().collect()
}
fn storage_mut(&mut self) -> Vec<&mut Self::StorageType> {
self.storages.iter_mut().collect()
}
}
impl<S: Storage> BlockLayoutConfig for LayerSeparate<S> {
fn layout_config(&self) -> LayoutConfig {
self.config.inner.clone()
}
fn layout_data_bytes(&self) -> usize {
self.config.layout_data_bytes
}
}
impl<S: Storage> LayerSeparate<S> {
pub fn verify_memory_regions(&self) -> Result<(), LayoutError> {
use crate::block_manager::layout::utils::WorkerLayoutVerifier;
let mut verifier = WorkerLayoutVerifier::new();
let results = verifier.verify_layout_consistency(self)?;
if verifier.has_critical_mismatches() {
tracing::error!(
"LayerSeparate layout verification failed: {} regions checked, {} size mismatches",
results.len(),
results.iter().filter(|r| !r.size_matches).count()
);
return Err(LayoutError::InvalidConfig(
"Memory region verification failed".to_string(),
));
}
tracing::debug!(
"LayerSeparate layout verification passed: {} regions checked",
results.len()
);
Ok(())
}
pub fn expected_memory_address(
&self,
block_idx: usize,
layer_idx: usize,
outer_idx: usize,
) -> Result<usize, LayoutError> {
validate_indices(&self.config, block_idx, layer_idx, outer_idx)?;
let aligned_start_addr =
self.storages[layer_idx].addr() as usize + self.base_offsets[layer_idx];
let block_offset = block_idx * self.config.block_stride_in_bytes;
let outer_offset = outer_idx * self.config.outer_dim_stride_in_bytes;
Ok(aligned_start_addr + block_offset + outer_offset)
}
pub fn verify_memory_region(
&self,
block_idx: usize,
layer_idx: usize,
outer_idx: usize,
) -> Result<bool, LayoutError> {
let actual_region = self.memory_region(block_idx, layer_idx, outer_idx)?;
let expected_addr = self.expected_memory_address(block_idx, layer_idx, outer_idx)?;
let expected_size = self.config.memory_region_size;
let addr_matches = actual_region.addr == expected_addr;
let size_matches = actual_region.size == expected_size;
if !addr_matches || !size_matches {
tracing::warn!(
"LayerSeparate memory region mismatch at ({}, {}, {}): addr {} vs {} (expected), size {} vs {} (expected)",
block_idx,
layer_idx,
outer_idx,
actual_region.addr,
expected_addr,
actual_region.size,
expected_size
);
}
Ok(addr_matches && size_matches)
}
pub fn verify_storage_alignment(&self) -> Result<(), LayoutError> {
let alignment = self.config.inner.alignment;
for (layer_idx, storage) in self.storages.iter().enumerate() {
let storage_addr = storage.addr() as usize;
let base_offset = self.base_offsets[layer_idx];
let aligned_addr = storage_addr + base_offset;
if alignment > 1 && !aligned_addr.is_multiple_of(alignment) {
return Err(LayoutError::InvalidConfig(format!(
"Layer {} storage not properly aligned: addr {} + offset {} = {} is not {} byte aligned",
layer_idx, storage_addr, base_offset, aligned_addr, alignment
)));
}
let required_size = self.config.layout_data_bytes + base_offset;
if storage.size() < required_size {
return Err(LayoutError::InvalidConfig(format!(
"Layer {} storage too small: {} bytes available, {} bytes required",
layer_idx,
storage.size(),
required_size
)));
}
}
tracing::debug!(
"LayerSeparate storage alignment verification passed for {} layers",
self.storages.len()
);
Ok(())
}
}
#[allow(missing_docs)]
#[cfg(test)]
pub mod tests {
use super::*;
use crate::block_manager::storage::tests::{NullDeviceAllocator, NullDeviceStorage};
use crate::block_manager::storage::{StorageType, SystemAllocator};
use dynamo_runtime::logging::init as init_logging;
const NUM_BLOCKS: usize = 7;
const NUM_LAYERS: usize = 5;
const OUTER_DIM: usize = 2;
const PAGE_SIZE: usize = 4;
const INNER_DIM: usize = 13;
const DTYPE_WIDTH_BYTES: usize = 4;
fn calculate_expected_offset(
base_addr: u64,
block_idx: usize,
layer_idx: usize,
block_stride: usize,
layer_stride: usize,
) -> u64 {
base_addr + (block_idx * block_stride + layer_idx * layer_stride) as u64
}
pub fn setup_layout(
alignment: Option<usize>, ) -> Result<FullyContiguous<NullDeviceStorage>, LayoutError> {
let config = LayoutConfig {
num_blocks: NUM_BLOCKS,
num_layers: NUM_LAYERS,
outer_dim: OUTER_DIM,
page_size: PAGE_SIZE,
inner_dim: INNER_DIM,
alignment: alignment.unwrap_or(1),
dtype_width_bytes: DTYPE_WIDTH_BYTES,
};
FullyContiguous::allocate(config, &NullDeviceAllocator)
}
#[test]
fn test_fc_creation_invalid_alignment() {
let config = LayoutConfig::builder()
.num_blocks(NUM_BLOCKS)
.num_layers(NUM_LAYERS)
.outer_dim(OUTER_DIM)
.page_size(PAGE_SIZE)
.inner_dim(INNER_DIM)
.alignment(3)
.build()
.unwrap();
assert!(config.validate().is_err());
}
#[test]
fn test_fc_creation_success() {
let layout_result = setup_layout(None);
assert!(
layout_result.is_ok(),
"Layout creation failed: {:?}",
layout_result.err()
);
}
#[test]
fn test_fc_creation_insufficient_storage() {
init_logging();
let config = LayoutConfig {
num_blocks: NUM_BLOCKS,
num_layers: NUM_LAYERS,
outer_dim: OUTER_DIM,
page_size: PAGE_SIZE,
inner_dim: INNER_DIM,
alignment: 1,
dtype_width_bytes: DTYPE_WIDTH_BYTES,
};
let fc_config = FullyContiguousConfig::new(config.clone()).unwrap();
let required_size = fc_config.required_allocation_size();
let storage = NullDeviceStorage::new((required_size - 1) as u64);
let layout_result = FullyContiguous::new(config, vec![storage]);
assert!(layout_result.is_err());
match layout_result.err().unwrap() {
LayoutError::InvalidConfig(_) => {} e => panic!("Expected InvalidConfig error, got {:?}", e),
}
}
#[test]
fn test_fc_accessor_methods() {
let layout = setup_layout(None).expect("Layout setup failed");
assert_eq!(layout.num_blocks(), NUM_BLOCKS);
assert_eq!(layout.num_layers(), NUM_LAYERS);
assert_eq!(layout.outer_dim(), OUTER_DIM);
assert_eq!(layout.page_size(), PAGE_SIZE);
assert_eq!(layout.inner_dim(), INNER_DIM);
}
#[test]
fn test_fc_offset_calculation() {
let layout = setup_layout(None).expect("Layout setup failed");
let dims = layout.config.clone();
let block_stride = dims.block_stride_in_bytes;
let layer_stride = dims.layer_stride_in_bytes;
let base_addr = layout.storage.addr() + layout.base_offset as u64;
let expected_offset_0_0 =
calculate_expected_offset(base_addr, 0, 0, block_stride, layer_stride);
assert_eq!(
layout.memory_region(0, 0, 0).unwrap().addr as u64,
expected_offset_0_0
);
let last_layer_idx = NUM_LAYERS - 1;
let expected_offset_0_last =
calculate_expected_offset(base_addr, 0, last_layer_idx, block_stride, layer_stride);
assert_eq!(
layout.memory_region(0, last_layer_idx, 0).unwrap().addr as u64,
expected_offset_0_last
);
let last_block_idx = NUM_BLOCKS - 1;
let expected_offset_last_0 =
calculate_expected_offset(base_addr, last_block_idx, 0, block_stride, layer_stride);
assert_eq!(
layout.memory_region(last_block_idx, 0, 0).unwrap().addr as u64,
expected_offset_last_0
);
let expected_offset_last_last = calculate_expected_offset(
base_addr,
last_block_idx,
last_layer_idx,
block_stride,
layer_stride,
);
assert_eq!(
layout
.memory_region(last_block_idx, last_layer_idx, 0)
.unwrap()
.addr as u64,
expected_offset_last_last
);
let mid_block_idx = NUM_BLOCKS / 2;
let mid_layer_idx = NUM_LAYERS / 2;
let expected_offset_mid_mid = calculate_expected_offset(
base_addr,
mid_block_idx,
mid_layer_idx,
block_stride,
layer_stride,
);
assert_eq!(
layout
.memory_region(mid_block_idx, mid_layer_idx, 0)
.unwrap()
.addr as u64,
expected_offset_mid_mid
);
}
#[test]
fn test_fc_invalid_block_index() {
let layout = setup_layout(None).expect("Layout setup failed");
let result = layout.memory_region(NUM_BLOCKS, 0, 0); assert!(result.is_err());
assert!(matches!(
result.err().unwrap(),
LayoutError::InvalidBlockIndex(NUM_BLOCKS)
));
}
#[test]
fn test_fc_invalid_layer_index() {
let layout = setup_layout(None).expect("Layout setup failed");
let result = layout.memory_region(0, NUM_LAYERS, 0); assert!(result.is_err());
assert!(matches!(
result.err().unwrap(),
LayoutError::InvalidLayerIndex(NUM_LAYERS)
));
}
#[test]
fn test_fc_invalid_outer_index() {
let layout = setup_layout(None).expect("Layout setup failed");
let result = layout.memory_region(0, 0, OUTER_DIM); assert!(result.is_err());
assert!(matches!(
result.err().unwrap(),
LayoutError::InvalidOuterIndex(OUTER_DIM)
));
}
#[test]
fn test_fc_allocation_system() {
init_logging();
let config = LayoutConfig {
num_blocks: NUM_BLOCKS,
num_layers: NUM_LAYERS,
outer_dim: OUTER_DIM,
page_size: PAGE_SIZE,
inner_dim: INNER_DIM,
alignment: 1,
dtype_width_bytes: DTYPE_WIDTH_BYTES,
};
let allocator = SystemAllocator;
let layout_result = FullyContiguous::allocate(config, &allocator);
assert!(layout_result.is_ok());
let layout = layout_result.unwrap();
assert_eq!(layout.num_blocks(), NUM_BLOCKS);
assert_eq!(layout.num_layers(), NUM_LAYERS);
assert_eq!(layout.page_size(), PAGE_SIZE);
assert_eq!(layout.inner_dim(), INNER_DIM);
assert_eq!(layout.storage.storage_type(), StorageType::System);
assert_eq!(
layout.storage.size(),
layout.config.required_allocation_size()
);
assert_eq!(
layout.storage.size(),
NUM_BLOCKS * NUM_LAYERS * OUTER_DIM * PAGE_SIZE * INNER_DIM * DTYPE_WIDTH_BYTES
);
}
#[test]
fn test_fc_alignment() {
init_logging();
const ALIGNMENT: usize = 256;
let config = LayoutConfig {
num_blocks: NUM_BLOCKS,
num_layers: NUM_LAYERS,
outer_dim: OUTER_DIM,
page_size: PAGE_SIZE,
inner_dim: INNER_DIM,
alignment: ALIGNMENT,
dtype_width_bytes: DTYPE_WIDTH_BYTES,
};
let memory_region_size = PAGE_SIZE * INNER_DIM * DTYPE_WIDTH_BYTES;
assert_eq!(memory_region_size, 208);
let natural_block_stride = OUTER_DIM * NUM_LAYERS * memory_region_size;
assert_eq!(natural_block_stride, 2080);
let aligned_block_stride = align_up(natural_block_stride, ALIGNMENT);
assert_eq!(aligned_block_stride, 2304);
let fc_config = FullyContiguousConfig::new(config.clone()).unwrap();
let expected_allocated_size = fc_config.required_allocation_size();
let allocator = SystemAllocator;
let layout_result = FullyContiguous::allocate(config.clone(), &allocator);
assert!(
layout_result.is_ok(),
"Allocation failed: {:?}",
layout_result.err()
);
let layout = layout_result.unwrap();
assert_eq!(
layout.storage.size(),
expected_allocated_size,
"Allocated storage size mismatch"
);
assert_eq!(
layout.config.block_stride_in_bytes, aligned_block_stride,
"Stored block stride mismatch"
);
let addr_block_0 = layout
.memory_region(0, 0, 0)
.expect("Failed to get addr block 0");
let addr_block_1 = layout
.memory_region(1, 0, 0)
.expect("Failed to get addr block 1");
let addr_block_2 = layout
.memory_region(2, 0, 0)
.expect("Failed to get addr block 2");
assert_eq!(
addr_block_0.addr as u64 % ALIGNMENT as u64,
0,
"Block 0 start address is not aligned"
);
assert_eq!(
addr_block_1.addr as u64 % ALIGNMENT as u64,
0,
"Block 1 start address is not aligned"
);
assert_eq!(
addr_block_2.addr as u64 % ALIGNMENT as u64,
0,
"Block 2 start address is not aligned"
);
assert_eq!(
addr_block_1.addr as u64 - addr_block_0.addr as u64,
aligned_block_stride as u64,
"Stride between block 0 and 1 mismatch"
);
assert_eq!(
addr_block_2.addr as u64 - addr_block_1.addr as u64,
aligned_block_stride as u64,
"Stride between block 1 and 2 mismatch"
);
}
pub fn setup_layer_separate_layout(
alignment: Option<usize>,
is_outer_contiguous: bool,
) -> Result<LayerSeparate<NullDeviceStorage>, LayoutError> {
let config = LayoutConfig {
num_blocks: NUM_BLOCKS,
num_layers: NUM_LAYERS,
outer_dim: OUTER_DIM,
page_size: PAGE_SIZE,
inner_dim: INNER_DIM,
alignment: alignment.unwrap_or(1),
dtype_width_bytes: DTYPE_WIDTH_BYTES,
};
let ls_config = LayerSeparateConfig::new(config.clone(), is_outer_contiguous)?;
let required_size = ls_config.required_allocation_size();
let mut storages = Vec::new();
for _ in 0..NUM_LAYERS {
storages.push(NullDeviceStorage::new(required_size as u64));
}
LayerSeparate::new(config, storages, is_outer_contiguous)
}
#[test]
fn test_ls_creation_success_outer_contiguous() {
let layout_result = setup_layer_separate_layout(None, true);
assert!(
layout_result.is_ok(),
"LayerSeparate creation failed: {:?}",
layout_result.err()
);
let layout = layout_result.unwrap();
assert_eq!(
layout.layout_type(),
LayoutType::LayerSeparate {
outer_contiguous: true
}
);
}
#[test]
fn test_ls_creation_success_block_contiguous() {
let layout_result = setup_layer_separate_layout(None, false);
assert!(
layout_result.is_ok(),
"LayerSeparate creation failed: {:?}",
layout_result.err()
);
let layout = layout_result.unwrap();
assert_eq!(
layout.layout_type(),
LayoutType::LayerSeparate {
outer_contiguous: false
}
);
}
#[test]
fn test_ls_creation_wrong_storage_count() {
let config = LayoutConfig {
num_blocks: NUM_BLOCKS,
num_layers: NUM_LAYERS,
outer_dim: OUTER_DIM,
page_size: PAGE_SIZE,
inner_dim: INNER_DIM,
alignment: 1,
dtype_width_bytes: DTYPE_WIDTH_BYTES,
};
let mut storages = Vec::new();
for _ in 0..(NUM_LAYERS - 1) {
storages.push(NullDeviceStorage::new(1000));
}
let layout_result = LayerSeparate::new(config, storages, true);
assert!(layout_result.is_err());
match layout_result.err().unwrap() {
LayoutError::InvalidConfig(_) => {} e => panic!("Expected InvalidConfig error, got {:?}", e),
}
}
#[test]
fn test_ls_accessor_methods() {
let layout = setup_layer_separate_layout(None, true).expect("Layout setup failed");
assert_eq!(layout.num_blocks(), NUM_BLOCKS);
assert_eq!(layout.num_layers(), NUM_LAYERS);
assert_eq!(layout.outer_dim(), OUTER_DIM);
assert_eq!(layout.page_size(), PAGE_SIZE);
assert_eq!(layout.inner_dim(), INNER_DIM);
assert_eq!(layout.storage().len(), NUM_LAYERS);
assert_eq!(layout.storage_type(), &StorageType::Null);
}
#[test]
fn test_ls_memory_region_outer_contiguous() {
let layout = setup_layer_separate_layout(None, true).expect("Layout setup failed");
let region_0_0_0 = layout.memory_region(0, 0, 0).unwrap();
let region_1_0_0 = layout.memory_region(1, 0, 0).unwrap();
let expected_block_stride = layout.config.block_stride_in_bytes;
assert_eq!(
region_1_0_0.addr - region_0_0_0.addr,
expected_block_stride,
"Block stride mismatch in outer_contiguous mode"
);
let region_0_0_1 = layout.memory_region(0, 0, 1).unwrap();
let expected_outer_stride = layout.config.outer_dim_stride_in_bytes;
assert_eq!(
region_0_0_1.addr - region_0_0_0.addr,
expected_outer_stride,
"Outer dimension stride mismatch"
);
let region_0_1_0 = layout.memory_region(0, 1, 0).unwrap();
let region_0_0_0_storage_addr = layout.storages[0].addr() as usize + layout.base_offsets[0];
let region_0_1_0_storage_addr = layout.storages[1].addr() as usize + layout.base_offsets[1];
assert_eq!(region_0_0_0.addr, region_0_0_0_storage_addr);
assert_eq!(region_0_1_0.addr, region_0_1_0_storage_addr);
}
#[test]
fn test_ls_memory_region_block_contiguous() {
let layout = setup_layer_separate_layout(None, false).expect("Layout setup failed");
let region_0_0_0 = layout.memory_region(0, 0, 0).unwrap();
let region_1_0_0 = layout.memory_region(1, 0, 0).unwrap();
let expected_block_stride = layout.config.block_stride_in_bytes;
assert_eq!(
region_1_0_0.addr - region_0_0_0.addr,
expected_block_stride,
"Block stride mismatch in block_contiguous mode"
);
let region_0_0_1 = layout.memory_region(0, 0, 1).unwrap();
let expected_outer_stride = layout.config.outer_dim_stride_in_bytes;
assert_eq!(
region_0_0_1.addr - region_0_0_0.addr,
expected_outer_stride,
"Outer dimension stride mismatch in block_contiguous mode"
);
}
#[test]
fn test_ls_invalid_indices() {
let layout = setup_layer_separate_layout(None, true).expect("Layout setup failed");
let result = layout.memory_region(NUM_BLOCKS, 0, 0);
assert!(result.is_err());
assert!(matches!(
result.err().unwrap(),
LayoutError::InvalidBlockIndex(NUM_BLOCKS)
));
let result = layout.memory_region(0, NUM_LAYERS, 0);
assert!(result.is_err());
assert!(matches!(
result.err().unwrap(),
LayoutError::InvalidLayerIndex(NUM_LAYERS)
));
let result = layout.memory_region(0, 0, OUTER_DIM);
assert!(result.is_err());
assert!(matches!(
result.err().unwrap(),
LayoutError::InvalidOuterIndex(OUTER_DIM)
));
}
#[test]
fn test_ls_memory_region_size() {
let layout = setup_layer_separate_layout(None, true).expect("Layout setup failed");
let region = layout.memory_region(0, 0, 0).unwrap();
let expected_size = PAGE_SIZE * INNER_DIM * DTYPE_WIDTH_BYTES;
assert_eq!(region.size, expected_size);
}
#[test]
fn test_ls_all_blocks_layers_accessible() {
let layout = setup_layer_separate_layout(None, true).expect("Layout setup failed");
for block_idx in 0..NUM_BLOCKS {
for layer_idx in 0..NUM_LAYERS {
for outer_idx in 0..OUTER_DIM {
let result = layout.memory_region(block_idx, layer_idx, outer_idx);
assert!(
result.is_ok(),
"Failed to access block {}, layer {}, outer {}: {:?}",
block_idx,
layer_idx,
outer_idx,
result.err()
);
}
}
}
}
#[test]
fn test_ls_storage_mutability() {
let mut layout = setup_layer_separate_layout(None, true).expect("Layout setup failed");
let mut_storages = layout.storage_mut();
assert_eq!(mut_storages.len(), NUM_LAYERS);
for (i, storage) in mut_storages.iter().enumerate() {
assert!(storage.size() > 0, "Storage {} has zero size", i);
}
}
#[test]
fn test_ls_alignment() {
init_logging();
const ALIGNMENT: usize = 128;
let config = LayoutConfig {
num_blocks: NUM_BLOCKS,
num_layers: NUM_LAYERS,
outer_dim: OUTER_DIM,
page_size: PAGE_SIZE,
inner_dim: INNER_DIM,
alignment: ALIGNMENT,
dtype_width_bytes: DTYPE_WIDTH_BYTES,
};
let ls_config = LayerSeparateConfig::new(config.clone(), true).unwrap();
let required_size = ls_config.required_allocation_size();
let mut storages = Vec::new();
for _ in 0..NUM_LAYERS {
storages.push(NullDeviceStorage::new(required_size as u64));
}
let layout_result = LayerSeparate::new(config, storages, true);
assert!(
layout_result.is_ok(),
"Layout creation with alignment failed"
);
let layout = layout_result.unwrap();
for layer_idx in 0..NUM_LAYERS {
let addr_block_0 = layout.memory_region(0, layer_idx, 0).unwrap();
let addr_block_1 = layout.memory_region(1, layer_idx, 0).unwrap();
assert_eq!(
addr_block_0.addr % ALIGNMENT,
0,
"Block 0 in layer {} is not aligned",
layer_idx
);
assert_eq!(
addr_block_1.addr % ALIGNMENT,
0,
"Block 1 in layer {} is not aligned",
layer_idx
);
}
}
#[test]
fn test_ls_stride_calculations_outer_contiguous() {
let layout = setup_layer_separate_layout(None, true).expect("Layout setup failed");
let memory_region_size = PAGE_SIZE * INNER_DIM * DTYPE_WIDTH_BYTES;
assert_eq!(layout.config.memory_region_size, memory_region_size);
assert_eq!(layout.config.block_stride_in_bytes, memory_region_size); assert_eq!(
layout.config.outer_dim_stride_in_bytes,
layout.config.block_stride_in_bytes * NUM_BLOCKS
);
}
#[test]
fn test_ls_stride_calculations_block_contiguous() {
let layout = setup_layer_separate_layout(None, false).expect("Layout setup failed");
let memory_region_size = PAGE_SIZE * INNER_DIM * DTYPE_WIDTH_BYTES;
assert_eq!(layout.config.memory_region_size, memory_region_size);
assert_eq!(layout.config.outer_dim_stride_in_bytes, memory_region_size);
assert_eq!(
layout.config.block_stride_in_bytes,
memory_region_size * OUTER_DIM
);
}
#[test]
fn test_ls_layout_data_bytes() {
let layout_outer = setup_layer_separate_layout(None, true).expect("Layout setup failed");
let layout_block = setup_layer_separate_layout(None, false).expect("Layout setup failed");
let expected_outer = layout_outer.config.outer_dim_stride_in_bytes * OUTER_DIM;
assert_eq!(layout_outer.layout_data_bytes(), expected_outer);
let expected_block = layout_block.config.block_stride_in_bytes * NUM_BLOCKS;
assert_eq!(layout_block.layout_data_bytes(), expected_block);
}
mod layout_correctness_tests {
use super::*;
use std::collections::HashSet;
#[test]
fn test_fc_memory_regions_no_overlap() {
let layout = setup_layout(None).expect("Layout setup failed");
let mut used_ranges = Vec::new();
for block_idx in 0..NUM_BLOCKS {
for layer_idx in 0..NUM_LAYERS {
for outer_idx in 0..OUTER_DIM {
let region = layout
.memory_region(block_idx, layer_idx, outer_idx)
.unwrap();
used_ranges.push((region.addr, region.addr + region.size));
}
}
}
for i in 0..used_ranges.len() {
for j in (i + 1)..used_ranges.len() {
let (start_i, end_i) = used_ranges[i];
let (start_j, end_j) = used_ranges[j];
let overlaps = !(end_i <= start_j || end_j <= start_i);
assert!(
!overlaps,
"Memory regions overlap: [{}, {}) and [{}, {})",
start_i, end_i, start_j, end_j
);
}
}
}
#[test]
fn test_ls_memory_regions_no_overlap() {
let layout = setup_layer_separate_layout(None, true).expect("Layout setup failed");
for layer_idx in 0..NUM_LAYERS {
let mut used_ranges = Vec::new();
for block_idx in 0..NUM_BLOCKS {
for outer_idx in 0..OUTER_DIM {
let region = layout
.memory_region(block_idx, layer_idx, outer_idx)
.unwrap();
used_ranges.push((region.addr, region.addr + region.size));
}
}
for i in 0..used_ranges.len() {
for j in (i + 1)..used_ranges.len() {
let (start_i, end_i) = used_ranges[i];
let (start_j, end_j) = used_ranges[j];
let overlaps = !(end_i <= start_j || end_j <= start_i);
assert!(
!overlaps,
"Memory regions overlap in layer {}: [{}, {}) and [{}, {})",
layer_idx, start_i, end_i, start_j, end_j
);
}
}
}
}
#[test]
fn test_fc_memory_alignment_correctness() {
const ALIGNMENT: usize = 256;
let config = LayoutConfig {
num_blocks: NUM_BLOCKS,
num_layers: NUM_LAYERS,
outer_dim: OUTER_DIM,
page_size: PAGE_SIZE,
inner_dim: INNER_DIM,
alignment: ALIGNMENT,
dtype_width_bytes: DTYPE_WIDTH_BYTES,
};
let layout = FullyContiguous::allocate(config, &SystemAllocator).unwrap();
for block_idx in 0..NUM_BLOCKS {
let region = layout.memory_region(block_idx, 0, 0).unwrap();
assert_eq!(
region.addr % ALIGNMENT,
0,
"Block {} is not aligned to {} bytes",
block_idx,
ALIGNMENT
);
}
}
#[test]
fn test_layout_data_integrity_patterns() {
init_logging();
let fc_layout = setup_layout(None).expect("FC Layout setup failed");
let ls_layout =
setup_layer_separate_layout(None, true).expect("LS Layout setup failed");
test_data_integrity_for_layout(&fc_layout, "FullyContiguous");
test_data_integrity_for_layout(&ls_layout, "LayerSeparate");
}
fn test_data_integrity_for_layout<L: GenericBlockLayout>(layout: &L, layout_name: &str) {
let mut written_patterns = HashSet::new();
for block_idx in 0..layout.num_blocks() {
for layer_idx in 0..layout.num_layers() {
for outer_idx in 0..layout.outer_dim() {
let region = layout
.memory_region(block_idx, layer_idx, outer_idx)
.unwrap();
let pattern = (block_idx << 16) | (layer_idx << 8) | outer_idx;
assert!(
!written_patterns.contains(&pattern),
"Duplicate pattern {} in {} layout",
pattern,
layout_name
);
written_patterns.insert(pattern);
let expected_size = layout.page_size()
* layout.inner_dim()
* layout.layout_config().dtype_width_bytes;
assert_eq!(
region.size, expected_size,
"Region size mismatch in {} layout at ({}, {}, {})",
layout_name, block_idx, layer_idx, outer_idx
);
}
}
}
}
#[test]
fn test_layout_stride_correctness() {
let fc_layout = setup_layout(None).expect("FC Layout setup failed");
let ls_outer = setup_layer_separate_layout(None, true).expect("LS outer setup failed");
let ls_block = setup_layer_separate_layout(None, false).expect("LS block setup failed");
test_fc_stride_correctness(&fc_layout);
test_ls_stride_correctness(&ls_outer, true);
test_ls_stride_correctness(&ls_block, false);
}
fn test_fc_stride_correctness(layout: &FullyContiguous<NullDeviceStorage>) {
let memory_region_size = PAGE_SIZE * INNER_DIM * DTYPE_WIDTH_BYTES;
let region_0_0_0 = layout.memory_region(0, 0, 0).unwrap();
let region_0_1_0 = layout.memory_region(0, 1, 0).unwrap();
let layer_stride = region_0_1_0.addr - region_0_0_0.addr;
assert_eq!(layer_stride, memory_region_size * OUTER_DIM);
let region_0_0_1 = layout.memory_region(0, 0, 1).unwrap();
let outer_stride = region_0_0_1.addr - region_0_0_0.addr;
assert_eq!(outer_stride, memory_region_size);
let region_1_0_0 = layout.memory_region(1, 0, 0).unwrap();
let block_stride = region_1_0_0.addr - region_0_0_0.addr;
assert_eq!(block_stride, memory_region_size * OUTER_DIM * NUM_LAYERS);
}
fn test_ls_stride_correctness(
layout: &LayerSeparate<NullDeviceStorage>,
is_outer_contiguous: bool,
) {
let memory_region_size = PAGE_SIZE * INNER_DIM * DTYPE_WIDTH_BYTES;
let region_0_0_0 = layout.memory_region(0, 0, 0).unwrap();
let region_1_0_0 = layout.memory_region(1, 0, 0).unwrap();
let region_0_0_1 = layout.memory_region(0, 0, 1).unwrap();
let block_stride = region_1_0_0.addr - region_0_0_0.addr;
let outer_stride = region_0_0_1.addr - region_0_0_0.addr;
if is_outer_contiguous {
assert_eq!(block_stride, memory_region_size);
assert_eq!(outer_stride, memory_region_size * NUM_BLOCKS);
} else {
assert_eq!(block_stride, memory_region_size * OUTER_DIM);
assert_eq!(outer_stride, memory_region_size);
}
}
#[test]
fn test_layout_compatibility_scenarios() {
init_logging();
let host_fc = setup_layout(None).expect("Host FC setup failed");
let device_ls =
setup_layer_separate_layout(None, true).expect("Device LS setup failed");
assert_eq!(host_fc.num_blocks(), device_ls.num_blocks());
assert_eq!(host_fc.num_layers(), device_ls.num_layers());
assert_eq!(host_fc.outer_dim(), device_ls.outer_dim());
assert_eq!(host_fc.page_size(), device_ls.page_size());
assert_eq!(host_fc.inner_dim(), device_ls.inner_dim());
for block_idx in 0..host_fc.num_blocks() {
for layer_idx in 0..host_fc.num_layers() {
for outer_idx in 0..host_fc.outer_dim() {
let host_region = host_fc
.memory_region(block_idx, layer_idx, outer_idx)
.unwrap();
let device_region = device_ls
.memory_region(block_idx, layer_idx, outer_idx)
.unwrap();
assert_eq!(
host_region.size, device_region.size,
"Memory region size mismatch at ({}, {}, {})",
block_idx, layer_idx, outer_idx
);
}
}
}
}
#[test]
fn test_layout_edge_cases() {
let minimal_config = LayoutConfig {
num_blocks: 1,
num_layers: 1,
outer_dim: 1,
page_size: 1,
inner_dim: 1,
alignment: 1,
dtype_width_bytes: 1,
};
let minimal_fc =
FullyContiguous::allocate(minimal_config.clone(), &SystemAllocator).unwrap();
let region = minimal_fc.memory_region(0, 0, 0).unwrap();
assert_eq!(region.size, 1);
let max_outer_config = LayoutConfig {
num_blocks: 2,
num_layers: 2,
outer_dim: 2, page_size: 4,
inner_dim: 4,
alignment: 1,
dtype_width_bytes: 2,
};
let max_outer_fc =
FullyContiguous::allocate(max_outer_config, &SystemAllocator).unwrap();
for block_idx in 0..2 {
for layer_idx in 0..2 {
for outer_idx in 0..2 {
let region = max_outer_fc.memory_region(block_idx, layer_idx, outer_idx);
assert!(
region.is_ok(),
"Failed to access region ({}, {}, {})",
block_idx,
layer_idx,
outer_idx
);
}
}
}
}
}
#[test]
fn test_ls_allocate() {
let config = LayoutConfig {
num_blocks: NUM_BLOCKS,
num_layers: NUM_LAYERS,
outer_dim: OUTER_DIM,
page_size: PAGE_SIZE,
inner_dim: INNER_DIM,
alignment: 1,
dtype_width_bytes: DTYPE_WIDTH_BYTES,
};
LayerSeparate::allocate(config, &NullDeviceAllocator, true)
.expect("Layout allocation failed");
}
mod memory_region_verification_tests {
use super::*;
#[test]
fn test_fc_memory_region_verification() {
let layout = setup_layout(None).expect("Layout setup failed");
assert!(
layout.verify_memory_regions().is_ok(),
"Memory region verification should pass"
);
for block_idx in 0..NUM_BLOCKS {
for layer_idx in 0..NUM_LAYERS {
for outer_idx in 0..OUTER_DIM {
let matches = layout
.verify_memory_region(block_idx, layer_idx, outer_idx)
.expect("Memory region verification failed");
assert!(
matches,
"Memory region ({}, {}, {}) should match expected calculations",
block_idx, layer_idx, outer_idx
);
}
}
}
}
#[test]
fn test_fc_expected_address_calculation() {
let layout = setup_layout(None).expect("Layout setup failed");
for block_idx in 0..NUM_BLOCKS {
for layer_idx in 0..NUM_LAYERS {
for outer_idx in 0..OUTER_DIM {
let actual_region = layout
.memory_region(block_idx, layer_idx, outer_idx)
.unwrap();
let expected_addr = layout
.expected_memory_address(block_idx, layer_idx, outer_idx)
.unwrap();
assert_eq!(
actual_region.addr, expected_addr,
"Address mismatch at ({}, {}, {})",
block_idx, layer_idx, outer_idx
);
}
}
}
}
#[test]
fn test_ls_memory_region_verification() {
let layout = setup_layer_separate_layout(None, true).expect("Layout setup failed");
assert!(
layout.verify_memory_regions().is_ok(),
"LayerSeparate memory region verification should pass"
);
assert!(
layout.verify_storage_alignment().is_ok(),
"LayerSeparate storage alignment verification should pass"
);
for block_idx in 0..NUM_BLOCKS {
for layer_idx in 0..NUM_LAYERS {
for outer_idx in 0..OUTER_DIM {
let matches = layout
.verify_memory_region(block_idx, layer_idx, outer_idx)
.expect("Memory region verification failed");
assert!(
matches,
"LayerSeparate memory region ({}, {}, {}) should match expected calculations",
block_idx, layer_idx, outer_idx
);
}
}
}
}
#[test]
fn test_ls_expected_address_calculation() {
let layout = setup_layer_separate_layout(None, false).expect("Layout setup failed");
for block_idx in 0..NUM_BLOCKS {
for layer_idx in 0..NUM_LAYERS {
for outer_idx in 0..OUTER_DIM {
let actual_region = layout
.memory_region(block_idx, layer_idx, outer_idx)
.unwrap();
let expected_addr = layout
.expected_memory_address(block_idx, layer_idx, outer_idx)
.unwrap();
assert_eq!(
actual_region.addr, expected_addr,
"LayerSeparate address mismatch at ({}, {}, {})",
block_idx, layer_idx, outer_idx
);
}
}
}
}
#[test]
fn test_memory_region_verification_with_alignment() {
const ALIGNMENT: usize = 512;
let config = LayoutConfig {
num_blocks: NUM_BLOCKS,
num_layers: NUM_LAYERS,
outer_dim: OUTER_DIM,
page_size: PAGE_SIZE,
inner_dim: INNER_DIM,
alignment: ALIGNMENT,
dtype_width_bytes: DTYPE_WIDTH_BYTES,
};
let fc_layout = FullyContiguous::allocate(config.clone(), &SystemAllocator).unwrap();
let ls_layout = LayerSeparate::allocate(config, &NullDeviceAllocator, true).unwrap();
assert!(
fc_layout.verify_memory_regions().is_ok(),
"FullyContiguous with alignment should pass verification"
);
assert!(
ls_layout.verify_memory_regions().is_ok(),
"LayerSeparate with alignment should pass verification"
);
assert!(
ls_layout.verify_storage_alignment().is_ok(),
"LayerSeparate storage alignment should pass verification"
);
}
#[test]
fn test_cross_layout_address_compatibility() {
let config = LayoutConfig {
num_blocks: 2,
num_layers: 2,
outer_dim: 1,
page_size: 8,
inner_dim: 16,
alignment: 1,
dtype_width_bytes: 2,
};
let fc_layout = FullyContiguous::allocate(config.clone(), &SystemAllocator).unwrap();
let ls_layout = LayerSeparate::allocate(config, &NullDeviceAllocator, true).unwrap();
for block_idx in 0..2 {
for layer_idx in 0..2 {
let fc_region = fc_layout.memory_region(block_idx, layer_idx, 0).unwrap();
let ls_region = ls_layout.memory_region(block_idx, layer_idx, 0).unwrap();
assert_eq!(
fc_region.size, ls_region.size,
"Memory region sizes should be compatible between layouts at ({}, {})",
block_idx, layer_idx
);
}
}
}
#[test]
fn test_memory_region_bounds_checking() {
let layout = setup_layout(None).expect("Layout setup failed");
assert!(
layout.verify_memory_region(NUM_BLOCKS, 0, 0).is_err(),
"Should fail for invalid block index"
);
assert!(
layout.verify_memory_region(0, NUM_LAYERS, 0).is_err(),
"Should fail for invalid layer index"
);
assert!(
layout.verify_memory_region(0, 0, OUTER_DIM).is_err(),
"Should fail for invalid outer index"
);
}
}
}