dma_api/osal/
mod.rs

1use core::{num::NonZeroUsize, ptr::NonNull};
2
3use crate::{Direction, DmaError, DmaHandle};
4
5cfg_if::cfg_if! {
6    if #[cfg(target_arch = "aarch64")] {
7        #[path = "aarch64.rs"]
8        pub mod arch;
9    } else{
10        #[path = "nop.rs"]
11        pub mod arch;
12    }
13}
14
15pub trait DmaOp: Sync + Send + 'static {
16    fn page_size(&self) -> usize;
17
18    /// 将虚拟地址映射到 DMA 地址
19    ///
20    /// # Safety
21    /// 只能是单个连续内存块
22    unsafe fn map_single(
23        &self,
24        dma_mask: u64,
25        addr: NonNull<u8>,
26        size: NonZeroUsize,
27        align: usize,
28        direction: Direction,
29    ) -> Result<DmaHandle, DmaError>;
30
31    /// 解除 DMA 映射
32    ///
33    /// # Safety
34    /// 必须与 map_single 配对使用
35    unsafe fn unmap_single(&self, handle: DmaHandle);
36
37    /// 写回缓存到内存 (clean)
38    fn flush(&self, addr: NonNull<u8>, size: usize) {
39        arch::flush(addr, size)
40    }
41
42    /// 使缓存无效 (invalidate)
43    fn invalidate(&self, addr: NonNull<u8>, size: usize) {
44        arch::invalidate(addr, size)
45    }
46
47    /// 分配 DMA 可访问内存
48    /// # Safety
49    ///
50    /// - 调用者必须确保 layout 合法
51    /// - 返回的内存必须保证连续
52    unsafe fn alloc_coherent(
53        &self,
54        dma_mask: u64,
55        layout: core::alloc::Layout,
56    ) -> Option<DmaHandle>;
57
58    /// 释放 DMA 内存
59    /// # Safety
60    /// 调用者必须确保 ptr 和 layout 与 alloc 时匹配
61    unsafe fn dealloc_coherent(&self, handle: DmaHandle);
62
63    fn prepare_read(&self, handle: &DmaHandle, offset: usize, size: usize, direction: Direction) {
64        if matches!(direction, Direction::FromDevice | Direction::Bidirectional) {
65            let ptr = unsafe { handle.dma_virt().add(offset) };
66
67            self.invalidate(ptr, size);
68
69            if let Some(virt) = handle.alloc_virt
70                && virt != handle.origin_virt
71            {
72                unsafe {
73                    let src = core::slice::from_raw_parts(ptr.as_ptr(), size);
74                    let dst = core::slice::from_raw_parts_mut(
75                        handle.origin_virt.as_ptr().add(offset),
76                        size,
77                    );
78
79                    dst.copy_from_slice(src);
80                }
81            }
82        }
83    }
84
85    fn confirm_write(&self, handle: &DmaHandle, offset: usize, size: usize, direction: Direction) {
86        if matches!(direction, Direction::ToDevice | Direction::Bidirectional) {
87            let ptr = unsafe { handle.dma_virt().add(offset) };
88
89            if let Some(virt) = handle.alloc_virt
90                && virt != handle.origin_virt
91            {
92                unsafe {
93                    core::ptr::copy_nonoverlapping(
94                        handle.origin_virt.as_ptr().add(offset),
95                        ptr.as_ptr(),
96                        size,
97                    );
98                }
99            }
100
101            self.flush(ptr, size)
102        }
103    }
104}