#![allow(dead_code)]
use core::fmt;
use core::marker::PhantomData;
use core::mem::MaybeUninit;
use core::ops::{Deref, DerefMut};
use core::ptr;
use core::slice;
use crate::pac::RCC;
use as_slice::AsSlice;
pub use generic_array::typenum::{self, consts};
use generic_array::{ArrayLength, GenericArray};
use stable_deref_trait::StableDeref;
use paste::paste;
#[non_exhaustive]
#[derive(Debug)]
pub enum Error {
Overrun,
BufferError,
}
pub enum Event {
HalfTransfer,
TransferComplete,
}
#[derive(Clone, Copy, PartialEq)]
pub enum Half {
First,
Second,
}
pub struct FrameReader<BUFFER, CHANNEL, N>
where
BUFFER: Sized + StableDeref<Target = DMAFrame<N>> + DerefMut + 'static,
N: ArrayLength<MaybeUninit<u8>>,
{
buffer: BUFFER,
channel: CHANNEL,
matching_character: u8,
_marker: core::marker::PhantomData<N>,
}
impl<BUFFER, CHANNEL, N> FrameReader<BUFFER, CHANNEL, N>
where
BUFFER: Sized + StableDeref<Target = DMAFrame<N>> + DerefMut + 'static,
N: ArrayLength<MaybeUninit<u8>>,
{
pub(crate) fn new(
buffer: BUFFER,
channel: CHANNEL,
matching_character: u8,
) -> FrameReader<BUFFER, CHANNEL, N> {
Self {
buffer,
channel,
matching_character,
_marker: core::marker::PhantomData,
}
}
}
pub struct FrameSender<BUFFER, CHANNEL, N>
where
BUFFER: Sized + StableDeref<Target = DMAFrame<N>> + DerefMut + 'static,
N: ArrayLength<MaybeUninit<u8>>,
{
buffer: Option<BUFFER>,
channel: CHANNEL,
_marker: core::marker::PhantomData<N>,
}
impl<BUFFER, CHANNEL, N> FrameSender<BUFFER, CHANNEL, N>
where
BUFFER: Sized + StableDeref<Target = DMAFrame<N>> + DerefMut + 'static,
N: ArrayLength<MaybeUninit<u8>>,
{
pub(crate) fn new(channel: CHANNEL) -> FrameSender<BUFFER, CHANNEL, N> {
Self {
buffer: None,
channel,
_marker: core::marker::PhantomData,
}
}
}
pub struct DMAFrame<N>
where
N: ArrayLength<MaybeUninit<u8>>,
{
len: u16,
buf: GenericArray<MaybeUninit<u8>, N>,
}
impl<N> fmt::Debug for DMAFrame<N>
where
N: ArrayLength<MaybeUninit<u8>>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self.read())
}
}
impl<N> fmt::Write for DMAFrame<N>
where
N: ArrayLength<MaybeUninit<u8>>,
{
fn write_str(&mut self, s: &str) -> fmt::Result {
let free = self.free();
if s.len() > free {
Err(fmt::Error)
} else {
self.write_slice(s.as_bytes());
Ok(())
}
}
}
impl<N> Default for DMAFrame<N>
where
N: ArrayLength<MaybeUninit<u8>>,
{
fn default() -> Self {
Self::new()
}
}
impl<N> DMAFrame<N>
where
N: ArrayLength<MaybeUninit<u8>>,
{
#[inline]
pub fn new() -> Self {
#[allow(clippy::uninit_assumed_init)]
Self {
len: 0,
buf: unsafe { MaybeUninit::uninit().assume_init() },
}
}
pub fn write(&mut self) -> &mut [u8] {
for elem in &mut self.buf[self.len as usize..] {
*elem = MaybeUninit::zeroed();
}
self.len = self.max_len() as u16;
unsafe { slice::from_raw_parts_mut(self.buf.as_mut_ptr() as *mut _, self.max_len()) }
}
#[inline]
pub fn commit(&mut self, shrink_to: usize) {
if shrink_to < self.len as _ {
self.len = shrink_to as _;
}
}
#[inline]
pub fn write_uninit(&mut self) -> &mut [MaybeUninit<u8>] {
&mut self.buf
}
#[inline]
pub unsafe fn set_len(&mut self, len: usize) {
assert!(len <= self.max_len());
self.len = len as _;
}
pub fn write_slice(&mut self, buf: &[u8]) -> usize {
let count = buf.len().min(self.free());
unsafe {
ptr::copy_nonoverlapping(
buf.as_ptr(),
(self.buf.as_mut_ptr() as *mut u8).add(self.len.into()),
count,
);
}
self.len += count as u16;
count
}
#[inline]
pub fn clear(&mut self) {
self.len = 0;
}
#[inline]
pub fn read(&self) -> &[u8] {
unsafe { slice::from_raw_parts(self.buf.as_ptr() as *const _, self.len as usize) }
}
#[inline]
pub fn read_mut(&mut self) -> &mut [u8] {
unsafe { slice::from_raw_parts_mut(self.buf.as_mut_ptr() as *mut _, self.len as usize) }
}
#[inline]
pub fn len(&self) -> usize {
self.len as usize
}
#[inline]
pub fn free(&self) -> usize {
self.max_len() - self.len as usize
}
#[inline]
pub fn max_len(&self) -> usize {
N::to_usize()
}
#[inline]
pub fn is_empty(&self) -> bool {
self.len == 0
}
#[inline]
pub(crate) unsafe fn buffer_address_for_dma(&self) -> u32 {
self.buf.as_ptr() as u32
}
#[inline]
pub(crate) fn buffer_as_ptr(&self) -> *const MaybeUninit<u8> {
self.buf.as_ptr()
}
#[inline]
pub(crate) fn buffer_as_mut_ptr(&mut self) -> *mut MaybeUninit<u8> {
self.buf.as_mut_ptr()
}
}
impl<N> AsSlice for DMAFrame<N>
where
N: ArrayLength<MaybeUninit<u8>>,
{
type Element = u8;
fn as_slice(&self) -> &[Self::Element] {
self.read()
}
}
pub struct CircBuffer<BUFFER, CHANNEL>
where
BUFFER: 'static,
{
buffer: BUFFER,
channel: CHANNEL,
readable_half: Half,
consumed_offset: usize,
}
impl<BUFFER, CHANNEL> CircBuffer<BUFFER, CHANNEL> {
pub(crate) fn new<H>(buf: BUFFER, chan: CHANNEL) -> Self
where
BUFFER: StableDeref<Target = [H; 2]> + 'static,
{
CircBuffer {
buffer: buf,
channel: chan,
readable_half: Half::Second,
consumed_offset: 0,
}
}
}
pub trait DmaExt {
type Channels;
fn split(self, ahb: &mut RCC) -> Self::Channels;
}
pub struct Transfer<MODE, BUFFER, CHANNEL, PAYLOAD> {
_mode: PhantomData<MODE>,
buffer: BUFFER,
channel: CHANNEL,
payload: PAYLOAD,
}
impl<BUFFER, CHANNEL, PAYLOAD> Transfer<R, BUFFER, CHANNEL, PAYLOAD>
where
BUFFER: StableDeref + 'static,
{
pub(crate) fn r(buffer: BUFFER, channel: CHANNEL, payload: PAYLOAD) -> Self {
Transfer {
_mode: PhantomData,
buffer,
channel,
payload,
}
}
}
impl<BUFFER, CHANNEL, PAYLOAD> Transfer<W, BUFFER, CHANNEL, PAYLOAD>
where
BUFFER: StableDeref + 'static,
{
pub(crate) fn w(buffer: BUFFER, channel: CHANNEL, payload: PAYLOAD) -> Self {
Transfer {
_mode: PhantomData,
buffer,
channel,
payload,
}
}
}
impl<BUFFER, CHANNEL, PAYLOAD> Deref for Transfer<R, BUFFER, CHANNEL, PAYLOAD> {
type Target = BUFFER;
fn deref(&self) -> &BUFFER {
&self.buffer
}
}
pub struct R;
pub struct W;
macro_rules! dma {
($($DMAX:ident: ($dma:ident, $enr:ident, $rst:ident, {
$($CX:ident: (
$ccrX:ident,
$CCRX:ident,
$cndtrX:ident,
$CNDTRX:ident,
$cparX:ident,
$CPARX:ident,
$cmarX:ident,
$CMARX:ident,
$htifX:ident,
$tcifX:ident,
$chtifX:ident,
$ctcifX:ident,
$cgifX:ident,
$teifX:ident,
$cteifX:ident
),)+
}),)+) => {
$(
pub mod $dma {
use core::sync::atomic::{self, Ordering};
use as_slice::{AsSlice};
use crate::pac::{$DMAX, dma1};
use core::mem::MaybeUninit;
use generic_array::ArrayLength;
use core::ops::DerefMut;
use core::ptr;
use stable_deref_trait::StableDeref;
use crate::dma::{CircBuffer, FrameReader, FrameSender, DMAFrame, DmaExt, Error, Event, Half, Transfer, W};
use crate::pac::RCC;
use paste::paste;
#[allow(clippy::manual_non_exhaustive)]
pub struct Channels((), $(pub $CX),+);
$(
pub struct $CX;
impl $CX {
#[inline]
pub fn set_peripheral_address(&mut self, address: u32, inc: bool) {
self.cpar().write(|w|
unsafe { w.pa().bits(address) }
);
self.ccr().modify(|_, w| w.pinc().bit(inc) );
}
#[inline]
pub fn set_memory_address(&mut self, address: u32, inc: bool) {
self.cmar().write(|w|
unsafe { w.ma().bits(address) }
);
self.ccr().modify(|_, w| w.minc().bit(inc) );
}
#[inline]
pub fn set_transfer_length(&mut self, len: u16) {
self.cndtr().write(|w| w.ndt().bits(len));
}
#[inline]
pub fn start(&mut self) {
self.ccr().modify(|_, w| w.en().set_bit() );
}
#[inline]
pub fn stop(&mut self) {
self.ifcr().write(|w| w.$cgifX().set_bit());
self.ccr().modify(|_, w| w.en().clear_bit() );
}
#[inline]
pub fn in_progress(&self) -> bool {
self.isr().$tcifX().bit_is_clear()
}
#[inline]
pub fn listen(&mut self, event: Event) {
match event {
Event::HalfTransfer => self.ccr().modify(|_, w| w.htie().set_bit()),
Event::TransferComplete => {
self.ccr().modify(|_, w| w.tcie().set_bit())
}
}
}
#[inline]
pub fn unlisten(&mut self, event: Event) {
match event {
Event::HalfTransfer => {
self.ccr().modify(|_, w| w.htie().clear_bit())
},
Event::TransferComplete => {
self.ccr().modify(|_, w| w.tcie().clear_bit())
}
}
}
#[inline]
pub(crate) fn isr(&self) -> dma1::isr::R {
unsafe { (*$DMAX::ptr()).isr.read() }
}
#[inline]
pub(crate) fn ifcr(&self) -> &dma1::IFCR {
unsafe { &(*$DMAX::ptr()).ifcr }
}
#[inline]
pub(crate) fn ccr(&mut self) -> &dma1::$CCRX {
unsafe { &(*$DMAX::ptr()).$ccrX }
}
#[inline]
pub(crate) fn cndtr(&mut self) -> &dma1::$CNDTRX {
unsafe { &(*$DMAX::ptr()).$cndtrX }
}
#[inline]
pub(crate) fn cpar(&mut self) -> &dma1::$CPARX {
unsafe { &(*$DMAX::ptr()).$cparX }
}
#[inline]
pub(crate) fn cmar(&mut self) -> &dma1::$CMARX {
unsafe { &(*$DMAX::ptr()).$cmarX }
}
#[inline]
pub(crate) fn cselr(&mut self) -> &dma1::CSELR {
unsafe { &(*$DMAX::ptr()).cselr }
}
#[inline]
pub(crate) fn get_cndtr(&self) -> u32 {
unsafe { (*$DMAX::ptr()).$cndtrX.read().bits() }
}
}
impl<BUFFER, N> FrameSender<BUFFER, $CX, N>
where
BUFFER: Sized + StableDeref<Target = DMAFrame<N>> + DerefMut + 'static,
N: ArrayLength<MaybeUninit<u8>>,
{
pub fn transfer_complete_interrupt(
&mut self,
) -> Option<BUFFER> {
if !self.channel.in_progress() {
self.channel.ifcr().write(|w| w.$ctcifX().set_bit());
} else {
return None;
}
self.channel.stop();
atomic::compiler_fence(Ordering::SeqCst);
self.buffer.take()
}
#[inline]
pub fn ongoing_transfer(&self) -> bool {
self.buffer.is_some()
}
pub fn send(
&mut self,
frame: BUFFER,
) -> Result<(), BUFFER> {
if self.ongoing_transfer() {
return Err(frame);
}
let new_buf = &*frame;
self.channel.set_memory_address(new_buf.buffer_as_ptr() as u32, true);
self.channel.set_transfer_length(new_buf.len() as u16);
if self.channel.isr().$teifX().bit_is_set() {
self.channel.ifcr().write(|w| w.$cteifX().set_bit());
}
atomic::compiler_fence(Ordering::Release);
self.channel.start();
self.buffer = Some(frame);
Ok(())
}
}
impl<BUFFER, N> FrameReader<BUFFER, $CX, N>
where
BUFFER: Sized + StableDeref<Target = DMAFrame<N>> + DerefMut + 'static,
N: ArrayLength<MaybeUninit<u8>>,
{
#[inline]
pub fn transfer_complete_interrupt(&mut self, next_frame: BUFFER) -> BUFFER {
self.internal_interrupt(next_frame, false)
}
#[inline]
pub fn character_match_interrupt(&mut self, next_frame: BUFFER) -> BUFFER {
self.internal_interrupt(next_frame, true)
}
#[inline]
pub fn receiver_timeout_interrupt(&mut self, next_frame: BUFFER) -> BUFFER {
self.internal_interrupt(next_frame, false)
}
fn internal_interrupt(
&mut self,
mut next_frame: BUFFER,
character_match_interrupt: bool,
) -> BUFFER {
let old_buf = &mut *self.buffer;
let new_buf = &mut *next_frame;
new_buf.clear();
if !self.channel.in_progress() {
self.channel.ifcr().write(|w| w.$ctcifX().set_bit());
} else if character_match_interrupt {
let left_in_buffer = self.channel.get_cndtr() as usize;
for _ in 0..5 {
let now_left = self.channel.get_cndtr() as usize;
if left_in_buffer - now_left >= 4 {
break;
}
}
}
self.channel.stop();
atomic::compiler_fence(Ordering::SeqCst);
let left_in_buffer = self.channel.get_cndtr() as usize;
let got_data_len = old_buf.max_len() - left_in_buffer;
unsafe {
old_buf.set_len(got_data_len);
}
let len = if character_match_interrupt && got_data_len > 0 {
let search_buf = old_buf.read();
let ch = self.matching_character;
if let Some(pos) = search_buf.iter().rposition(|&x| x == ch) {
pos+1
} else {
0
}
} else {
old_buf.len()
};
let diff = if len < got_data_len {
let diff = got_data_len - len;
let new_buf_ptr = new_buf.buffer_as_mut_ptr();
let old_buf_ptr = old_buf.buffer_as_ptr();
unsafe {
ptr::copy_nonoverlapping(old_buf_ptr.add(len), new_buf_ptr, diff);
}
diff
} else {
0
};
self.channel.set_memory_address(unsafe { new_buf.buffer_as_ptr().add(diff) } as u32, true);
self.channel.set_transfer_length((new_buf.max_len() - diff) as u16);
let received_buffer = core::mem::replace(&mut self.buffer, next_frame);
atomic::compiler_fence(Ordering::Release);
self.channel.start();
received_buffer
}
}
impl<B> CircBuffer<B, $CX> {
pub fn partial_peek<R, F, H, T>(&mut self, f: F) -> Result<R, Error>
where
F: FnOnce(&[T], Half) -> Result<(usize, R), ()>,
B: StableDeref<Target = [H; 2]> + 'static,
H: AsSlice<Element=T>,
{
let buf = match self.readable_half {
Half::First => &self.buffer[1],
Half::Second => &self.buffer[0],
};
let pending = self.channel.get_cndtr() as usize;
let slice = buf.as_slice();
let capacity = slice.len();
let pending = if pending > capacity {
pending - capacity
} else {
pending
};
let end = capacity - pending;
let slice = &buf.as_slice()[self.consumed_offset..end];
match f(slice, self.readable_half) {
Ok((l, r)) => { self.consumed_offset += l; Ok(r) },
Err(_) => Err(Error::BufferError),
}
}
pub fn peek<R, F, H, T>(&mut self, f: F) -> Result<R, Error>
where
F: FnOnce(&[T], Half) -> R,
B: StableDeref<Target = [H; 2]> + 'static,
H: AsSlice<Element=T>,
{
let half_being_read = self.readable_half()?;
let buf = match half_being_read {
Half::First => &self.buffer[0],
Half::Second => &self.buffer[1],
};
let slice = &buf.as_slice()[self.consumed_offset..];
self.consumed_offset = 0;
Ok(f(slice, half_being_read))
}
pub fn readable_half(&mut self) -> Result<Half, Error> {
let isr = self.channel.isr();
let first_half_is_done = isr.$htifX().bit_is_set();
let second_half_is_done = isr.$tcifX().bit_is_set();
if first_half_is_done && second_half_is_done {
return Err(Error::Overrun);
}
let last_read_half = self.readable_half;
Ok(match last_read_half {
Half::First => {
if second_half_is_done {
self.channel.ifcr().write(|w| w.$ctcifX().set_bit());
self.readable_half = Half::Second;
Half::Second
} else {
last_read_half
}
}
Half::Second => {
if first_half_is_done {
self.channel.ifcr().write(|w| w.$chtifX().set_bit());
self.readable_half = Half::First;
Half::First
} else {
last_read_half
}
}
})
}
}
impl<BUFFER, PAYLOAD, MODE> Transfer<MODE, BUFFER, $CX, PAYLOAD> {
pub fn is_done(&self) -> bool {
self.channel.isr().$tcifX().bit_is_set()
}
pub fn wait(mut self) -> (BUFFER, $CX, PAYLOAD) {
while !self.is_done() {}
self.channel.ifcr().write(|w| w.$cgifX().set_bit());
self.channel.ccr().modify(|_, w| w.en().clear_bit());
atomic::compiler_fence(Ordering::SeqCst);
(self.buffer, self.channel, self.payload)
}
}
impl<BUFFER, PAYLOAD> Transfer<W, &'static mut BUFFER, $CX, PAYLOAD> {
pub fn peek<T>(&self) -> &[T]
where
BUFFER: AsSlice<Element=T>,
{
let pending = self.channel.get_cndtr() as usize;
let capacity = self.buffer.as_slice().len();
&self.buffer.as_slice()[..(capacity - pending)]
}
}
)+
impl DmaExt for $DMAX {
type Channels = Channels;
fn split(self, rcc: &mut RCC) -> Channels {
cfg_if::cfg_if! {
if #[cfg(feature = "f3")] {
paste! {
rcc.[<ahb1 enr>].modify(|_, w| w.[<$dma en>]().set_bit());
}
} else if #[cfg(any(feature = "l4", feature = "l5"))] {
paste! {
rcc.[<ahb1 $enr>].modify(|_, w| w.[<$dma en>]().set_bit());
}
}
}
$(
self.$ccrX.reset();
)+
Channels((), $($CX { }),+)
}
}
}
)+
}
}
#[cfg(any(feature = "l4", feature = "l5"))]
dma! {
DMA1: (dma1, enr, rstr1, {
C1: (
ccr1, CCR1,
cndtr1, CNDTR1,
cpar1, CPAR1,
cmar1, CMAR1,
htif1, tcif1,
chtif1, ctcif1, cgif1,
teif1, cteif1
),
C2: (
ccr2, CCR2,
cndtr2, CNDTR2,
cpar2, CPAR2,
cmar2, CMAR2,
htif2, tcif2,
chtif2, ctcif2, cgif2,
teif2, cteif2
),
C3: (
ccr3, CCR3,
cndtr3, CNDTR3,
cpar3, CPAR3,
cmar3, CMAR3,
htif3, tcif3,
chtif3, ctcif3, cgif3,
teif3, cteif3
),
C4: (
ccr4, CCR4,
cndtr4, CNDTR4,
cpar4, CPAR4,
cmar4, CMAR4,
htif4, tcif4,
chtif4, ctcif4, cgif4,
teif4, cteif4
),
C5: (
ccr5, CCR5,
cndtr5, CNDTR5,
cpar5, CPAR5,
cmar5, CMAR5,
htif5, tcif5,
chtif5, ctcif5, cgif5,
teif5, cteif5
),
C6: (
ccr6, CCR6,
cndtr6, CNDTR6,
cpar6, CPAR6,
cmar6, CMAR6,
htif6, tcif6,
chtif6, ctcif6, cgif6,
teif6, cteif6
),
C7: (
ccr7, CCR7,
cndtr7, CNDTR7,
cpar7, CPAR7,
cmar7, CMAR7,
htif7, tcif7,
chtif7, ctcif7, cgif7,
teif7, cteif7
),
}),
DMA2: (dma2, enr, rstr1, {
C1: (
ccr1, CCR1,
cndtr1, CNDTR1,
cpar1, CPAR1,
cmar1, CMAR1,
htif1, tcif1,
chtif1, ctcif1, cgif1,
teif1, cteif1
),
C2: (
ccr2, CCR2,
cndtr2, CNDTR2,
cpar2, CPAR2,
cmar2, CMAR2,
htif2, tcif2,
chtif2, ctcif2, cgif2,
teif2, cteif2
),
C3: (
ccr3, CCR3,
cndtr3, CNDTR3,
cpar3, CPAR3,
cmar3, CMAR3,
htif3, tcif3,
chtif3, ctcif3, cgif3,
teif3, cteif3
),
C4: (
ccr4, CCR4,
cndtr4, CNDTR4,
cpar4, CPAR4,
cmar4, CMAR4,
htif4, tcif4,
chtif4, ctcif4, cgif4,
teif4, cteif4
),
C5: (
ccr5, CCR5,
cndtr5, CNDTR5,
cpar5, CPAR5,
cmar5, CMAR5,
htif5, tcif5,
chtif5, ctcif5, cgif5,
teif5, cteif5
),
C6: (
ccr6, CCR6,
cndtr6, CNDTR6,
cpar6, CPAR6,
cmar6, CMAR6,
htif6, tcif6,
chtif6, ctcif6, cgif6,
teif6, cteif6
),
C7: (
ccr7, CCR7,
cndtr7, CNDTR7,
cpar7, CPAR7,
cmar7, CMAR7,
htif7, tcif7,
chtif7, ctcif7, cgif7,
teif7, cteif7
),
}),
}
#[cfg(any(feature = "f3"))]
dma! {
DMA1: (dma1, enr, rstr1, {
C1: (
cr1, CR1,
cndtr1, CNDTR1,
cpar1, CPAR1,
cmar1, CMAR1,
htif1, tcif1,
chtif1, ctcif1, cgif1,
teif1, cteif1
),
C2: (
cr2, CR2,
cndtr2, CNDTR2,
cpar2, CPAR2,
cmar2, CMAR2,
htif2, tcif2,
chtif2, ctcif2, cgif2,
teif2, cteif2
),
C3: (
cr3, CR3,
cndtr3, CNDTR3,
cpar3, CPAR3,
cmar3, CMAR3,
htif3, tcif3,
chtif3, ctcif3, cgif3,
teif3, cteif3
),
C4: (
cr4, CR4,
cndtr4, CNDTR4,
cpar4, CPAR4,
cmar4, CMAR4,
htif4, tcif4,
chtif4, ctcif4, cgif4,
teif4, cteif4
),
C5: (
cr5, CR5,
cndtr5, CNDTR5,
cpar5, CPAR5,
cmar5, CMAR5,
htif5, tcif5,
chtif5, ctcif5, cgif5,
teif5, cteif5
),
C6: (
cr6, CR6,
cndtr6, CNDTR6,
cpar6, CPAR6,
cmar6, CMAR6,
htif6, tcif6,
chtif6, ctcif6, cgif6,
teif6, cteif6
),
C7: (
cr7, CR7,
cndtr7, CNDTR7,
cpar7, CPAR7,
cmar7, CMAR7,
htif7, tcif7,
chtif7, ctcif7, cgif7,
teif7, cteif7
),
}),
DMA2: (dma2, enr, rstr1, {
C1: (
cr1, CR1,
cndtr1, CNDTR1,
cpar1, CPAR1,
cmar1, CMAR1,
htif1, tcif1,
chtif1, ctcif1, cgif1,
teif1, cteif1
),
C2: (
cr2, CR2,
cndtr2, CNDTR2,
cpar2, CPAR2,
cmar2, CMAR2,
htif2, tcif2,
chtif2, ctcif2, cgif2,
teif2, cteif2
),
C3: (
cr3, CR3,
cndtr3, CNDTR3,
cpar3, CPAR3,
cmar3, CMAR3,
htif3, tcif3,
chtif3, ctcif3, cgif3,
teif3, cteif3
),
C4: (
cr4, CR4,
cndtr4, CNDTR4,
cpar4, CPAR4,
cmar4, CMAR4,
htif4, tcif4,
chtif4, ctcif4, cgif4,
teif4, cteif4
),
C5: (
cr5, CR5,
cndtr5, CNDTR5,
cpar5, CPAR5,
cmar5, CMAR5,
htif5, tcif5,
chtif5, ctcif5, cgif5,
teif5, cteif5
),
C6: (
cr6, CR6,
cndtr6, CNDTR6,
cpar6, CPAR6,
cmar6, CMAR6,
htif6, tcif6,
chtif6, ctcif6, cgif6,
teif6, cteif6
),
C7: (
cr7, CR7,
cndtr7, CNDTR7,
cpar7, CPAR7,
cmar7, CMAR7,
htif7, tcif7,
chtif7, ctcif7, cgif7,
teif7, cteif7
),
}),
}