use core::{num::NonZeroUsize, ptr::NonNull};
use mbarrier::mb;
use crate::{DmaDirection, DmaError, DmaHandle, DmaMapHandle};
cfg_if::cfg_if! {
if #[cfg(target_arch = "aarch64")] {
#[path = "aarch64.rs"]
pub mod arch;
} else{
#[path = "nop.rs"]
pub mod arch;
}
}
pub trait DmaOp: Sync + Send + 'static {
fn page_size(&self) -> usize;
unsafe fn map_single(
&self,
dma_mask: u64,
addr: NonNull<u8>,
size: NonZeroUsize,
align: usize,
direction: DmaDirection,
) -> Result<DmaMapHandle, DmaError>;
unsafe fn unmap_single(&self, handle: DmaMapHandle);
fn flush(&self, addr: NonNull<u8>, size: usize) {
mb();
arch::flush(addr, size)
}
fn invalidate(&self, addr: NonNull<u8>, size: usize) {
arch::invalidate(addr, size);
mb();
}
fn flush_invalidate(&self, addr: NonNull<u8>, size: usize) {
mb();
arch::flush_invalidate(addr, size);
mb();
}
unsafe fn alloc_coherent(
&self,
dma_mask: u64,
layout: core::alloc::Layout,
) -> Option<DmaHandle>;
unsafe fn dealloc_coherent(&self, handle: DmaHandle);
fn prepare_read(
&self,
handle: &DmaMapHandle,
offset: usize,
size: usize,
direction: DmaDirection,
) {
if matches!(
direction,
DmaDirection::FromDevice | DmaDirection::Bidirectional
) {
let origin_ptr = unsafe { handle.cpu_addr.add(offset) };
if let Some(virt) = handle.map_alloc_virt
&& virt != handle.cpu_addr
{
let map_ptr = unsafe { virt.add(offset) };
self.invalidate(map_ptr, size);
unsafe {
origin_ptr.copy_from_nonoverlapping(map_ptr, size);
}
} else {
self.invalidate(origin_ptr, size);
}
}
}
fn confirm_write(
&self,
handle: &DmaMapHandle,
offset: usize,
size: usize,
direction: DmaDirection,
) {
if matches!(
direction,
DmaDirection::ToDevice | DmaDirection::Bidirectional
) {
let ptr = unsafe { handle.cpu_addr.add(offset) };
if let Some(virt) = handle.map_alloc_virt
&& virt != handle.cpu_addr
{
unsafe {
let src = core::slice::from_raw_parts(ptr.as_ptr(), size);
let dst = core::slice::from_raw_parts_mut(virt.as_ptr().add(offset), size);
dst.copy_from_slice(src);
}
}
self.flush(ptr, size)
}
}
}