1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
// SPDX-FileCopyrightText: Copyright (c) 2024-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//! Transfer module for copying blocks between layouts with different storage locations.
//!
//! This module provides functionality for transferring KV cache blocks between layouts
//! that may be backed by different storage types (GPU memory, pinned host memory, disk, etc.)
//! and potentially across NIXL-connected remote nodes.
//!
//! # Core Concepts
//!
//! - [`PhysicalLayout`]: Wraps a layout with its physical storage location and NIXL metadata
//! - [`LayoutDescriptor`]: Serializable representation for cross-node communication
//! - Transfer strategies: memcpy, CUDA, NIXL based on source/destination locations
//! - Block-wise and layer-wise transfer operations
//!
//! # Usage
//!
//! ```rust,ignore
//! use dynamo_kvbm::v2::transfer::{PhysicalLayout, transfer_blocks};
//!
//! // Create local physical layout with NIXL registration
//! let src = PhysicalLayout::new_local(src_layout, StorageKind::Device(0))
//! .with_nixl_registration("local_agent".to_string())?;
//!
//! // Create remote physical layout
//! let dst = PhysicalLayout::new_remote(
//! dst_layout,
//! StorageKind::Pinned,
//! "remote_agent".to_string()
//! );
//!
//! // Transfer blocks from local to remote
//! let src_block_ids = [0, 1, 2];
//! let dst_block_ids = [0, 1, 2];
//! let future = transfer_blocks(&src, &dst, &src_block_ids, &dst_block_ids, &ctx)?;
//! future.await?;
//! ```
// Re-export StorageKind
pub use crateStorageKind;
pub use TransferCapabilities;
pub use ;
pub use ;
pub use ;
pub use ;
pub use ;
pub use ;
pub use BlockValidationError;
// Internal - TransferContext is now managed by TransportManager
pub use TransferContext;
pub use PhysicalLayout;
// Re-export manager types - TransportManager is the primary public API
pub use ;
// #[cfg(test)]
// pub use testing::{RoundTripTest, RoundTripTestResult};
use Result;
/// Future representing an in-progress transfer operation.
///
/// The transfer completes when this future resolves.
pub type TransferFuture = Pin;
/// Specification for bounce buffer in multi-hop transfers.
///
/// This structure provides the layout and block IDs to use as an intermediate
/// staging area when direct transfers are not allowed.
// #[cfg(all(test, feature = "testing-cuda"))]
// mod cuda_integration_tests {
// use super::*;
// use crate::block_manager::v2::layout::{
// FullyContiguousLayout, Layout, LayoutConfig, MemoryRegion, OwnedMemoryRegion,
// };
// use cudarc::driver::CudaContext;
// use std::sync::Arc;
// // TODO: Add CUDA-specific integration tests
// // These would test:
// // - H2D transfers
// // - D2H transfers
// // - D2D transfers
// // - Async completion via event synchronization
// }
// #[cfg(all(test, feature = "testing-nixl"))]
// mod nixl_integration_tests {
// use super::*;
// // TODO: Add NIXL-specific integration tests
// // These would test:
// // - Remote memory access via NIXL Read
// // - Disk-backed transfers via NIXL Write
// // - Cross-node serialization with LayoutDescriptor
// }