Skip to main content

dma_api/osal/
mod.rs

1use core::{num::NonZeroUsize, ptr::NonNull};
2
3use mbarrier::mb;
4
5use crate::{DmaDirection, DmaError, DmaHandle, DmaMapHandle};
6
7cfg_if::cfg_if! {
8    if #[cfg(target_arch = "aarch64")] {
9        #[path = "aarch64.rs"]
10        pub mod arch;
11    } else{
12        #[path = "nop.rs"]
13        pub mod arch;
14    }
15}
16
17pub trait DmaOp: Sync + Send + 'static {
18    fn page_size(&self) -> usize;
19
20    /// 将虚拟地址映射到 DMA 地址
21    ///
22    /// # Safety
23    /// 只能是单个连续内存块
24    unsafe fn map_single(
25        &self,
26        dma_mask: u64,
27        addr: NonNull<u8>,
28        size: NonZeroUsize,
29        align: usize,
30        direction: DmaDirection,
31    ) -> Result<DmaMapHandle, DmaError>;
32
33    /// 解除 DMA 映射
34    ///
35    /// # Safety
36    /// 必须与 map_single 配对使用
37    unsafe fn unmap_single(&self, handle: DmaMapHandle);
38
39    /// 写回缓存到内存 (clean)
40    fn flush(&self, addr: NonNull<u8>, size: usize) {
41        mb();
42        arch::flush(addr, size)
43    }
44
45    /// 使缓存无效 (invalidate)
46    fn invalidate(&self, addr: NonNull<u8>, size: usize) {
47        arch::invalidate(addr, size);
48        mb();
49    }
50
51    fn flush_invalidate(&self, addr: NonNull<u8>, size: usize) {
52        mb();
53        arch::flush_invalidate(addr, size);
54        mb();
55    }
56
57    /// 分配 DMA 可访问内存
58    /// # Safety
59    ///
60    /// - 调用者必须确保 layout 合法
61    /// - 返回的内存必须保证连续
62    unsafe fn alloc_coherent(
63        &self,
64        dma_mask: u64,
65        layout: core::alloc::Layout,
66    ) -> Option<DmaHandle>;
67
68    /// 释放 DMA 内存
69    /// # Safety
70    /// 调用者必须确保 ptr 和 layout 与 alloc 时匹配
71    unsafe fn dealloc_coherent(&self, handle: DmaHandle);
72
73    fn prepare_read(
74        &self,
75        handle: &DmaMapHandle,
76        offset: usize,
77        size: usize,
78        direction: DmaDirection,
79    ) {
80        if matches!(
81            direction,
82            DmaDirection::FromDevice | DmaDirection::Bidirectional
83        ) {
84            let origin_ptr = unsafe { handle.cpu_addr.add(offset) };
85
86            if let Some(virt) = handle.map_alloc_virt
87                && virt != handle.cpu_addr
88            {
89                let map_ptr = unsafe { virt.add(offset) };
90                self.invalidate(map_ptr, size);
91                unsafe {
92                    origin_ptr.copy_from_nonoverlapping(map_ptr, size);
93                }
94            } else {
95                self.invalidate(origin_ptr, size);
96            }
97        }
98    }
99
100    fn confirm_write(
101        &self,
102        handle: &DmaMapHandle,
103        offset: usize,
104        size: usize,
105        direction: DmaDirection,
106    ) {
107        if matches!(
108            direction,
109            DmaDirection::ToDevice | DmaDirection::Bidirectional
110        ) {
111            let ptr = unsafe { handle.cpu_addr.add(offset) };
112
113            if let Some(virt) = handle.map_alloc_virt
114                && virt != handle.cpu_addr
115            {
116                unsafe {
117                    let src = core::slice::from_raw_parts(ptr.as_ptr(), size);
118                    let dst = core::slice::from_raw_parts_mut(virt.as_ptr().add(offset), size);
119
120                    dst.copy_from_slice(src);
121                }
122            }
123
124            self.flush(ptr, size)
125        }
126    }
127}