ibverbs_rs/ibverbs/memory/memory_region.rs
1use crate::ibverbs::access_config::AccessFlags;
2use crate::ibverbs::error::{IbvError, IbvResult};
3use crate::ibverbs::memory::{
4 GatherElement, RemoteMemoryRegion, ScatterElement, ScatterGatherElementError,
5};
6use crate::ibverbs::protection_domain::ProtectionDomain;
7use ibverbs_sys::*;
8use std::ffi::c_void;
9use std::io;
10
11/// A handle to a registered Memory Region.
12///
13/// A `MemoryRegion` represents a block of memory registered with the NIC for RDMA operations.
14///
15/// # What is Registration?
16///
17/// Registration pins a memory buffer (preventing OS swapping) and provides the NIC with
18/// virtual-to-physical address translation, enabling direct memory access (DMA) without CPU
19/// involvement.
20///
21/// # Ownership Model
22///
23/// `MemoryRegion` does **not** own the underlying buffer. This design allows:
24/// * Registering the same buffer in multiple Protection Domains.
25/// * Registering memory owned by other structures.
26/// * Flexible memory management strategies.
27///
28/// Safety is enforced at **usage time** when creating [`GatherElement`] or [`ScatterElement`]
29/// instances (see the [memory module](crate::ibverbs::memory) for details).
30///
31/// # Registration Methods
32///
33/// ## Safe Registration
34///
35/// * [`register_local_mr`](MemoryRegion::register_local_mr) — Local write access only.
36/// Safe because all operations require creating SGEs with valid Rust references.
37///
38/// ## Unsafe Registration
39///
40/// * [`register_shared_mr`](MemoryRegion::register_shared_mr) — Adds remote read/write access.
41/// Unsafe because remote peers can access memory asynchronously, breaking aliasing guarantees.
42/// * [`register_mr_with_access`](MemoryRegion::register_mr_with_access) — Full manual control.
43/// Unsafe when remote access flags are enabled.
44#[doc(alias = "ibv_mr")]
45#[doc(alias = "ibv_reg_mr")]
46pub struct MemoryRegion {
47 pd: ProtectionDomain,
48 mr: *mut ibv_mr,
49}
50
51/// SAFETY: libibverbs components are thread safe.
52unsafe impl Sync for MemoryRegion {}
53/// SAFETY: libibverbs components are thread safe.
54unsafe impl Send for MemoryRegion {}
55
56impl Drop for MemoryRegion {
57 fn drop(&mut self) {
58 log::debug!("MemoryRegion deregistered");
59 // SAFETY: self.mr is valid.
60 let errno = unsafe { ibv_dereg_mr(self.mr) };
61 if errno != 0 {
62 let error = IbvError::from_errno_with_msg(errno, "Failed to deregister memory region");
63 log::error!("{error}");
64 }
65 }
66}
67
68impl std::fmt::Debug for MemoryRegion {
69 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
70 f.debug_struct("MemoryRegion")
71 .field("address", &(unsafe { (*self.mr).addr }))
72 .field("length", &(unsafe { (*self.mr).length }))
73 .field("handle", &(unsafe { (*self.mr).handle }))
74 .field("lkey", &(unsafe { (*self.mr).lkey }))
75 .field("rkey", &(unsafe { (*self.mr).rkey }))
76 .field("pd", &self.pd)
77 .finish()
78 }
79}
80
81impl MemoryRegion {
82 /// Registers a memory region with specific access permissions.
83 ///
84 /// # Arguments
85 ///
86 /// * `pd` — The Protection Domain to register this memory region in.
87 /// * `address` — Pointer to the start of the memory buffer to register.
88 /// * `length` — The size of the buffer in bytes.
89 /// * `access_flags` — The permissions to grant to the NIC for this memory region.
90 ///
91 /// # Safety
92 ///
93 /// This function is `unsafe` because enabling remote read or write access
94 /// breaks local safety guarantees. If remote access is enabled:
95 /// 1. **Aliasing**: Remote peers can modify this memory at any time.
96 /// You must manually ensure Rust's aliasing rules are respected.
97 /// 2. **Lifetime**: You must manually ensure the memory remains allocated as long as
98 /// remote peers are actively performing RDMA operations on it.
99 pub unsafe fn register_mr_with_access(
100 pd: &ProtectionDomain,
101 address: *mut u8,
102 length: usize,
103 access_flags: AccessFlags,
104 ) -> IbvResult<MemoryRegion> {
105 // ibv_access_flags values are small bitmasks (max 31), always fit in i32
106 #[allow(clippy::cast_possible_wrap)]
107 let mr = unsafe {
108 ibv_reg_mr(
109 pd.inner.pd,
110 address as *mut c_void,
111 length,
112 access_flags.code() as i32,
113 )
114 };
115 if mr.is_null() {
116 Err(IbvError::from_errno_with_msg(
117 io::Error::last_os_error()
118 .raw_os_error()
119 .expect("ibv_reg_mr should set errno on error"),
120 "Failed to register memory region",
121 ))
122 } else {
123 log::debug!("MemoryRegion registered");
124 Ok(MemoryRegion { pd: pd.clone(), mr })
125 }
126 }
127
128 /// Registers a local memory region (Local Write access).
129 ///
130 /// This enables local write access only.
131 ///
132 /// # Arguments
133 ///
134 /// * `pd` — The Protection Domain.
135 /// * `address` — Pointer to the start of the memory buffer.
136 /// * `length` — The size of the buffer in bytes.
137 ///
138 /// # Why is this Safe?
139 ///
140 /// Even though this does not take ownership of the memory, it is safe because:
141 /// 1. It does not allow Remote access (no aliasing risk).
142 /// 2. To use this MR locally (Send/Recv/Write-Source), you must create an SGE.
143 /// The SGE creation requires a valid reference to the memory, proving it is still alive.
144 // address is passed to libibverbs for registration, not locally dereferenced; hardware enforces validity
145 #[allow(clippy::not_unsafe_ptr_arg_deref)]
146 pub fn register_local_mr(
147 pd: &ProtectionDomain,
148 address: *mut u8,
149 length: usize,
150 ) -> IbvResult<MemoryRegion> {
151 unsafe {
152 Self::register_mr_with_access(
153 pd,
154 address,
155 length,
156 AccessFlags::new().with_local_write(),
157 )
158 }
159 }
160
161 /// Registers a shared memory region with local write and remote read and write access.
162 ///
163 /// # Arguments
164 ///
165 /// * `pd` — The Protection Domain.
166 /// * `address` — Pointer to the start of the memory buffer.
167 /// * `length` — The size of the buffer in bytes.
168 ///
169 /// # Safety
170 ///
171 /// This is `unsafe` because it allows remote peers to access the memory.
172 /// * **Aliasing** — The memory effectively becomes shared mutable state. It is your
173 /// responsibility to ensure aliasing rules are respected while remote peers perform
174 /// RDMA operations on it.
175 /// * **Lifetime** — You must manually ensure the memory remains allocated as long as
176 /// remote peers are actively performing RDMA operations on it.
177 pub unsafe fn register_shared_mr(
178 pd: &ProtectionDomain,
179 address: *mut u8,
180 length: usize,
181 ) -> IbvResult<MemoryRegion> {
182 unsafe {
183 Self::register_mr_with_access(
184 pd,
185 address,
186 length,
187 AccessFlags::new()
188 .with_local_write()
189 .with_remote_read()
190 .with_remote_write(),
191 )
192 }
193 }
194
195 /// Registers a DMA-BUF with the given access flags.
196 ///
197 /// # Arguments
198 ///
199 /// * `pd` — The Protection Domain to register this memory region in.
200 /// * `fd` — The file descriptor of the DMA-BUF to be registered.
201 /// * `offset` — The start offset within the DMA-BUF file. The MR begins at this offset.
202 /// * `length` — The size of the region to register (in bytes).
203 /// * `iova` — The Input/Output Virtual Address. This is the virtual base address the NIC
204 /// will use when accessing this MR via lkey/rkey.
205 /// **Important**: `iova` must have the same page offset as `offset`.
206 /// * `access_flags` — The permissions for this memory region.
207 ///
208 /// # Safety
209 ///
210 /// Same safety rules as [`register_mr_with_access`](Self::register_mr_with_access).
211 /// If `access_flags` includes remote capabilities, the user must manage aliasing and lifetimes manually.
212 pub unsafe fn register_dmabuf_mr_with_access(
213 pd: &ProtectionDomain,
214 fd: i32,
215 offset: u64,
216 length: usize,
217 iova: u64,
218 access_flags: AccessFlags,
219 ) -> IbvResult<MemoryRegion> {
220 // ibv_access_flags values are small bitmasks (max 31), always fit in i32
221 #[allow(clippy::cast_possible_wrap)]
222 let mr = unsafe {
223 ibv_reg_dmabuf_mr(
224 pd.inner.pd,
225 offset,
226 length,
227 iova,
228 fd,
229 access_flags.code() as i32,
230 )
231 };
232
233 if mr.is_null() {
234 Err(IbvError::from_errno_with_msg(
235 io::Error::last_os_error()
236 .raw_os_error()
237 .expect("ibv_reg_dmabuf_mr should set errno on error"),
238 "Failed to register memory region",
239 ))
240 } else {
241 log::debug!("IbvMemoryRegion registered");
242 Ok(MemoryRegion { pd: pd.clone(), mr })
243 }
244 }
245
246 /// Registers a DMA-BUF for local access only.
247 ///
248 /// # Arguments
249 ///
250 /// * `pd` — The Protection Domain.
251 /// * `fd` — The file descriptor of the DMA-BUF.
252 /// * `offset` — The start offset within the DMA-BUF file.
253 /// * `length` — The size of the region to register (in bytes).
254 /// * `iova` — The virtual base address. Must have the same page offset as `offset`.
255 ///
256 /// Safe for the same reasons as [`register_local_mr`](Self::register_local_mr): usages are gated by SGE creation.
257 pub fn register_local_dmabuf_mr(
258 pd: &ProtectionDomain,
259 fd: i32,
260 offset: u64,
261 length: usize,
262 iova: u64,
263 ) -> IbvResult<MemoryRegion> {
264 unsafe {
265 Self::register_dmabuf_mr_with_access(
266 pd,
267 fd,
268 offset,
269 length,
270 iova,
271 AccessFlags::new().with_local_write(),
272 )
273 }
274 }
275
276 /// Registers a DMA-BUF for shared access.
277 ///
278 /// # Arguments
279 ///
280 /// * `pd` — The Protection Domain.
281 /// * `fd` — The file descriptor of the DMA-BUF.
282 /// * `offset` — The start offset within the DMA-BUF file.
283 /// * `length` — The size of the region to register (in bytes).
284 /// * `iova` — The virtual base address. Must have the same page offset as `offset`.
285 ///
286 /// # Safety
287 ///
288 /// Unsafe due to remote access risks. See [`register_shared_mr`](Self::register_shared_mr).
289 pub unsafe fn register_shared_dmabuf_mr(
290 pd: &ProtectionDomain,
291 fd: i32,
292 offset: u64,
293 length: usize,
294 iova: u64,
295 ) -> IbvResult<MemoryRegion> {
296 unsafe {
297 Self::register_dmabuf_mr_with_access(
298 pd,
299 fd,
300 offset,
301 length,
302 iova,
303 AccessFlags::new()
304 .with_local_write()
305 .with_remote_read()
306 .with_remote_write(),
307 )
308 }
309 }
310}
311
312impl MemoryRegion {
313 /// Returns the Remote Key (rkey) for this MR.
314 ///
315 /// This key is used by remote peers to access this memory region via RDMA operations.
316 pub fn rkey(&self) -> u32 {
317 unsafe { *self.mr }.rkey
318 }
319
320 /// Returns the starting virtual address of the registered memory buffer.
321 pub fn address(&self) -> usize {
322 unsafe { (*self.mr).addr as usize }
323 }
324
325 /// Returns the length of the registered memory region in bytes.
326 pub fn length(&self) -> usize {
327 unsafe { (*self.mr).length }
328 }
329
330 /// Returns the Local Key (lkey) for this MR.
331 ///
332 /// This key is used locally in Work Requests (within Scatter/Gather Elements) to prove
333 /// to the NIC that the application has the right to access this memory.
334 pub fn lkey(&self) -> u32 {
335 unsafe { *self.mr }.lkey
336 }
337
338 /// Returns a remote endpoint of this MR for remote peers to use in one-sided operations.
339 ///
340 /// This struct contains the triplet (Address, Length, RKey) needed by a remote node
341 /// to perform RDMA Read or Write operations on this memory.
342 ///
343 /// # Warning
344 ///
345 /// If the peer attempts an operation (e.g., RDMA Write) that was not enabled during
346 /// registration, their operation will fail with a **Remote Access Error**.
347 pub fn remote(&self) -> RemoteMemoryRegion {
348 RemoteMemoryRegion::new(self.address() as u64, self.length(), self.rkey())
349 }
350}
351
352impl MemoryRegion {
353 /// Creates a **Gather Element** (for Sending/Writing) using the "raw" constructor.
354 ///
355 /// # Debug checks
356 ///
357 /// In debug builds, this validates MR containment and the `u32` length limit and may panic if
358 /// they are violated (because it uses `debug_assert!`). In release builds, these checks are
359 /// not executed by default.
360 pub fn gather_element<'a>(&'a self, data: &'a [u8]) -> GatherElement<'a> {
361 GatherElement::new(self, data)
362 }
363
364 /// Creates a **Gather Element** (for Sending/Writing) from a shared slice.
365 ///
366 /// # Checks
367 ///
368 /// This method validates that:
369 /// 1. The slice is fully contained within this [`MemoryRegion`].
370 /// 2. The slice's length fits in a `u32` (hardware limit for a single SGE).
371 ///
372 /// If these checks fail, it returns an error immediately.
373 ///
374 /// # Safety Guarantee
375 ///
376 /// This takes a `&'a [u8]`, ensuring the memory is initialized and cannot be mutated
377 /// while the operation is pending (Rust borrowing rules).
378 pub fn gather_element_checked<'a>(
379 &'a self,
380 data: &'a [u8],
381 ) -> Result<GatherElement<'a>, ScatterGatherElementError> {
382 GatherElement::new_checked(self, data)
383 }
384
385 /// Creates a **Gather Element** without immediate bounds checking.
386 ///
387 /// # Behavior
388 ///
389 /// This bypasses the software checks for:
390 /// * Memory region containment.
391 /// * Length limits (`u32`).
392 ///
393 /// # Safety
394 ///
395 /// This method is safe to call. If the slice is not within the memory region, or if the
396 /// length is invalid, the library will create the SGE anyway.
397 ///
398 /// However, the **hardware** will catch this mismatch when the Work Request is executed.
399 /// The operation will fail with a **Local Protection Error**,
400 /// but it will not cause Undefined Behavior.
401 pub fn gather_element_unchecked<'a>(&'a self, data: &'a [u8]) -> GatherElement<'a> {
402 GatherElement::new_unchecked(self, data)
403 }
404
405 /// Creates a **Scatter Element** (for Receiving/Reading) using the "raw" constructor.
406 ///
407 /// # Debug checks
408 ///
409 /// In debug builds, this validates MR containment and the `u32` length limit and may panic if
410 /// they are violated (because it uses `debug_assert!`). In release builds, these checks are
411 /// not executed by default.
412 ///
413 pub fn scatter_element<'a>(&'a self, data: &'a mut [u8]) -> ScatterElement<'a> {
414 ScatterElement::new(self, data)
415 }
416
417 /// Creates a **Scatter Element** (for Receiving/Reading) from a mutable slice.
418 ///
419 /// # Checks
420 ///
421 /// This method validates that:
422 /// 1. The slice is fully contained within this [`MemoryRegion`].
423 /// 2. The slice's length fits in a `u32`.
424 ///
425 /// # Safety Guarantee
426 ///
427 /// This takes a `&'a mut [u8]`, ensuring you have exclusive access to the buffer
428 /// and no other part of your program is reading it while the NIC writes to it.
429 pub fn scatter_element_checked<'a>(
430 &'a self,
431 data: &'a mut [u8],
432 ) -> Result<ScatterElement<'a>, ScatterGatherElementError> {
433 ScatterElement::new_checked(self, data)
434 }
435
436 /// Creates a **Scatter Element** without immediate bounds checking.
437 ///
438 /// # Behavior
439 ///
440 /// This bypasses the software checks for:
441 /// - Memory region containment.
442 /// - Length limits (`u32`).
443 ///
444 /// # Safety
445 ///
446 /// This method is safe to call. If the slice is not within the memory region, or if the
447 /// length is invalid, the library will create the SGE anyway.
448 ///
449 /// However, the **hardware** will catch this mismatch when the Work Request is executed.
450 /// The operation will fail with a **Local Protection Error**,
451 /// but it will not cause Undefined Behavior.
452 pub fn scatter_element_unchecked<'a>(&'a self, data: &'a mut [u8]) -> ScatterElement<'a> {
453 ScatterElement::new_unchecked(self, data)
454 }
455
456 /// Checks if the given address range is fully contained within this MR.
457 pub fn encloses(&self, address: *const u8, length: usize) -> bool {
458 let mr_start = self.address();
459 let data_start = address as usize;
460 if data_start < mr_start {
461 return false;
462 }
463 let offset = data_start - mr_start;
464 let remaining = self.length().saturating_sub(offset);
465 length <= remaining
466 }
467
468 /// Checks if the given slice is fully contained within this MR.
469 pub fn encloses_slice(&self, slice: &[u8]) -> bool {
470 self.encloses(slice.as_ptr(), slice.len())
471 }
472}