use crate::{
Error,
element::Element,
ral::{self, Static, dma, dmamux, tcd::BandwidthControl},
};
impl<const CHANNELS: usize> super::Dma<CHANNELS> {
pub unsafe fn channel(&'static self, index: usize) -> Channel {
assert!(index < CHANNELS);
Channel {
index,
registers: self.controller,
multiplexer: self.multiplexer,
waker: &self.wakers[index],
}
}
}
pub struct Channel {
index: usize,
registers: Static<dma::RegisterBlock>,
multiplexer: Static<dmamux::RegisterBlock>,
pub(crate) waker: &'static super::SharedWaker,
}
impl Channel {
pub unsafe fn enable(&self) {
self.registers.SERQ.write(self.index as u8);
}
pub fn channel(&self) -> usize {
self.index
}
pub fn set_bandwidth_control(&mut self, bandwidth: Option<BandwidthControl>) {
let raw = BandwidthControl::raw(bandwidth);
let tcd = self.tcd();
ral::modify_reg!(crate::ral::tcd, tcd, CSR, BWC: raw);
}
pub fn reset(&mut self) {
self.tcd().reset();
}
fn tcd(&self) -> &crate::ral::tcd::RegisterBlock {
&self.registers.TCD[self.index]
}
pub unsafe fn set_source_address<E: Element>(&self, saddr: *const E) {
let tcd = self.tcd();
ral::write_reg!(crate::ral::tcd, tcd, SADDR, saddr as u32);
}
pub unsafe fn set_source_offset(&self, offset: i16) {
let tcd = self.tcd();
ral::write_reg!(crate::ral::tcd, tcd, SOFF, offset);
}
pub unsafe fn set_destination_address<E: Element>(&self, daddr: *const E) {
let tcd = self.tcd();
ral::write_reg!(crate::ral::tcd, tcd, DADDR, daddr as u32);
}
pub unsafe fn set_destination_offset(&self, offset: i16) {
let tcd = self.tcd();
ral::write_reg!(crate::ral::tcd, tcd, DOFF, offset);
}
pub unsafe fn set_source_attributes<E: Element>(&self, modulo: u8) {
let tcd = self.tcd();
ral::write_reg!(
crate::ral::tcd,
tcd,
SATTR,
MOD: modulo,
SIZE: E::DATA_TRANSFER_ID
);
}
pub unsafe fn set_source_last_address_adjustment(&self, adjustment: i32) {
let tcd = self.tcd();
ral::write_reg!(crate::ral::tcd, tcd, SLAST, adjustment);
}
pub unsafe fn set_destination_last_address_adjustment(&self, adjustment: i32) {
let tcd = self.tcd();
ral::write_reg!(crate::ral::tcd, tcd, DLAST_SGA, adjustment);
}
pub unsafe fn set_destination_attributes<E: Element>(&self, modulo: u8) {
let tcd = self.tcd();
ral::write_reg!(
crate::ral::tcd,
tcd,
DATTR,
MOD: modulo,
SIZE: E::DATA_TRANSFER_ID
);
}
pub unsafe fn set_minor_loop_bytes(&self, nbytes: u32) {
let tcd = self.tcd();
ral::write_reg!(crate::ral::tcd, tcd, NBYTES, nbytes);
}
pub unsafe fn set_transfer_iterations(&mut self, iterations: u16) {
let tcd = self.tcd();
ral::modify_reg!(crate::ral::tcd, tcd, CITER, CITER: iterations);
ral::modify_reg!(crate::ral::tcd, tcd, BITER, BITER: iterations);
}
pub fn beginning_transfer_iterations(&self) -> u16 {
let tcd = self.tcd();
ral::read_reg!(crate::ral::tcd, tcd, BITER, BITER)
}
pub fn set_channel_configuration(&mut self, configuration: Configuration) {
let chcfg = &self.multiplexer.chcfg[self.index];
match configuration {
Configuration::Off => chcfg.write(0),
Configuration::Enable { source, periodic } => {
let mut v = source | dmamux::RegisterBlock::ENBL;
if periodic {
assert!(
self.channel() < 4,
"Requested DMA periodic triggering on an unsupported channel."
);
v |= dmamux::RegisterBlock::TRIG;
}
chcfg.write(v);
}
Configuration::AlwaysOn => {
chcfg.write(dmamux::RegisterBlock::ENBL | dmamux::RegisterBlock::A_ON)
}
}
}
pub fn is_hardware_signaling(&self) -> bool {
self.registers.HRS.read() & (1 << self.index) != 0
}
pub fn disable(&self) {
self.registers.CERQ.write(self.index as u8);
}
pub fn is_interrupt(&self) -> bool {
self.registers.INT.read() & (1 << self.index) != 0
}
pub fn clear_interrupt(&self) {
self.registers.CINT.write(self.index as u8);
}
pub fn set_disable_on_completion(&mut self, dreq: bool) {
let tcd = self.tcd();
ral::modify_reg!(crate::ral::tcd, tcd, CSR, DREQ: dreq as u16);
}
pub fn set_interrupt_on_completion(&mut self, intr: bool) {
let tcd = self.tcd();
ral::modify_reg!(crate::ral::tcd, tcd, CSR, INTMAJOR: intr as u16);
}
pub fn is_complete(&self) -> bool {
let tcd = self.tcd();
ral::read_reg!(crate::ral::tcd, tcd, CSR, DONE == 1)
}
pub fn clear_complete(&self) {
self.registers.CDNE.write(self.index as u8);
}
pub fn is_error(&self) -> bool {
self.registers.ERR.read() & (1 << self.index) != 0
}
pub fn clear_error(&self) {
self.registers.CERR.write(self.index as u8);
}
pub fn is_active(&self) -> bool {
let tcd = self.tcd();
ral::read_reg!(crate::ral::tcd, tcd, CSR, ACTIVE == 1)
}
pub fn is_enabled(&self) -> bool {
self.registers.ERQ.read() & (1 << self.index) != 0
}
pub fn error_status(&self) -> Error {
Error::new(self.registers.ES.read())
}
pub fn start(&self) {
self.registers.SSRT.write(self.index as u8);
}
}
unsafe impl Send for Channel {}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[non_exhaustive]
pub enum Configuration {
Off,
Enable {
source: u32,
periodic: bool,
},
AlwaysOn,
}
impl Configuration {
pub const fn enable(source: u32) -> Self {
Configuration::Enable {
source,
periodic: false,
}
}
}
pub unsafe fn set_source_hardware<E: Element>(chan: &mut Channel, hardware_source: *const E) {
unsafe {
chan.set_source_address(hardware_source);
chan.set_source_offset(0);
chan.set_source_attributes::<E>(0);
chan.set_source_last_address_adjustment(0);
}
}
pub unsafe fn set_destination_hardware<E: Element>(
chan: &mut Channel,
hardware_destination: *const E,
) {
unsafe {
chan.set_destination_address(hardware_destination);
chan.set_destination_offset(0);
chan.set_destination_attributes::<E>(0);
chan.set_destination_last_address_adjustment(0);
}
}
pub unsafe fn set_source_linear_buffer<E: Element>(chan: &mut Channel, source: &[E]) {
unsafe {
chan.set_source_address(source.as_ptr());
chan.set_source_offset(core::mem::size_of::<E>() as i16);
chan.set_source_attributes::<E>(0);
chan.set_source_last_address_adjustment(
(core::mem::size_of_val(source) as i32).wrapping_neg(),
);
}
}
pub unsafe fn set_destination_linear_buffer<E: Element>(chan: &mut Channel, destination: &mut [E]) {
unsafe {
chan.set_destination_address(destination.as_ptr());
chan.set_destination_offset(core::mem::size_of::<E>() as i16);
chan.set_destination_attributes::<E>(0);
chan.set_destination_last_address_adjustment(
(core::mem::size_of_val(destination) as i32).wrapping_neg(),
);
}
}
fn circular_buffer_asserts<E>(buffer: &[E]) {
let len = buffer.len();
assert!(
len.is_power_of_two(),
"DMA circular buffer size is not power of two"
);
let start = buffer.as_ptr();
let size = core::mem::size_of_val(buffer);
assert!(
(start as usize).is_multiple_of(size),
"DMA circular buffer is not properly aligned"
);
}
fn circular_buffer_modulo<E>(buffer: &[E]) -> u32 {
31 - core::mem::size_of_val(buffer).leading_zeros()
}
pub unsafe fn set_source_circular_buffer<E: Element>(chan: &mut Channel, source: &[E]) {
circular_buffer_asserts(source);
let modulo = circular_buffer_modulo(source);
unsafe {
chan.set_source_address(source.as_ptr());
chan.set_source_offset(core::mem::size_of::<E>() as i16);
chan.set_source_attributes::<E>(modulo as u8);
chan.set_source_last_address_adjustment(0);
}
}
pub unsafe fn set_destination_circular_buffer<E: Element>(
chan: &mut Channel,
destination: &mut [E],
) {
circular_buffer_asserts(destination);
let modulo = circular_buffer_modulo(destination);
unsafe {
chan.set_destination_address(destination.as_ptr());
chan.set_destination_offset(core::mem::size_of::<E>() as i16);
chan.set_destination_attributes::<E>(modulo as u8);
chan.set_destination_last_address_adjustment(0);
}
}