1use core::sync::atomic::compiler_fence;
6
7use cortex_ar::{
8 asm::dsb,
9 cache::{
10 clean_and_invalidate_data_cache_line_to_poc, clean_data_cache_line_to_poc,
11 invalidate_data_cache_line_to_poc,
12 },
13};
14use zynq7000::l2_cache::{L2Cache, MmioL2Cache};
15
16pub const CACHE_LINE_SIZE: usize = 32;
17
18#[derive(Debug, Clone, Copy, PartialEq, Eq, thiserror::Error)]
19#[error("alignment error, addresses and lengths must be aligned to 32 byte cache line length")]
20pub struct AlignmentError;
21
22pub fn clean_and_invalidate_l2c_line(l2c: &mut MmioL2Cache<'static>, addr: u32) {
23 l2c.write_clean_by_pa(addr);
24 l2c.write_invalidate_by_pa(addr);
25}
26
27pub fn clean_and_invalidate_data_cache() {
29 dsb();
30
31 cortex_ar::cache::clean_l1_data_cache::<2, 5, 8>();
32 dsb();
33
34 let mut l2c = unsafe { L2Cache::new_mmio_fixed() };
36 l2c.write_clean_invalidate_by_way(0xffff);
37 while l2c.read_cache_sync().busy() {}
38 compiler_fence(core::sync::atomic::Ordering::SeqCst);
39
40 cortex_ar::cache::clean_and_invalidate_l1_data_cache::<2, 5, 8>();
41 dsb();
42}
43
44pub fn invalidate_data_cache_range(addr: u32, len: usize) -> Result<(), AlignmentError> {
52 if !addr.is_multiple_of(CACHE_LINE_SIZE as u32) || !len.is_multiple_of(CACHE_LINE_SIZE) {
53 return Err(AlignmentError);
54 }
55 let mut current_addr = addr;
56 let end_addr = addr.saturating_add(len as u32);
57 let mut l2c = unsafe { L2Cache::new_mmio_fixed() };
58
59 dsb();
60 while current_addr < end_addr {
63 l2c.write_invalidate_by_pa(current_addr);
64 current_addr = current_addr.saturating_add(CACHE_LINE_SIZE as u32);
65 }
66 while l2c.read_cache_sync().busy() {}
67
68 current_addr = addr;
70 compiler_fence(core::sync::atomic::Ordering::SeqCst);
71
72 while current_addr < end_addr {
73 invalidate_data_cache_line_to_poc(addr);
74 current_addr = current_addr.saturating_add(CACHE_LINE_SIZE as u32);
75 }
76 dsb();
78 Ok(())
79}
80
81pub fn clean_and_invalidate_data_cache_range(addr: u32, len: usize) -> Result<(), AlignmentError> {
89 if !addr.is_multiple_of(CACHE_LINE_SIZE as u32) || !len.is_multiple_of(CACHE_LINE_SIZE) {
90 return Err(AlignmentError);
91 }
92 let end_addr = addr.saturating_add(len as u32);
93 let mut current_addr = addr;
94 dsb();
95
96 while current_addr < end_addr {
100 clean_data_cache_line_to_poc(current_addr);
101 current_addr = current_addr.saturating_add(CACHE_LINE_SIZE as u32);
102 }
103 dsb();
104
105 let mut l2c = unsafe { L2Cache::new_mmio_fixed() };
107 current_addr = addr;
108 while current_addr < end_addr {
109 l2c.write_clean_invalidate_by_pa(current_addr);
116 current_addr = current_addr.saturating_add(CACHE_LINE_SIZE as u32);
117 }
118 while l2c.read_cache_sync().busy() {}
119
120 current_addr = addr;
122 compiler_fence(core::sync::atomic::Ordering::SeqCst);
123
124 while current_addr < end_addr {
125 clean_and_invalidate_data_cache_line_to_poc(current_addr);
126 current_addr = current_addr.saturating_add(CACHE_LINE_SIZE as u32);
127 }
128 dsb();
129 Ok(())
130}
131
132pub fn clean_data_cache_range(addr: u32, len: usize) -> Result<(), AlignmentError> {
140 if !addr.is_multiple_of(32) || !len.is_multiple_of(32) {
141 return Err(AlignmentError);
142 }
143
144 let end_addr = addr.saturating_add(len as u32);
145 let mut current_addr = addr;
146 dsb();
147
148 while current_addr < end_addr {
152 clean_data_cache_line_to_poc(current_addr);
153 current_addr = current_addr.saturating_add(CACHE_LINE_SIZE as u32);
154 }
155 dsb();
156
157 let mut l2c = unsafe { L2Cache::new_mmio_fixed() };
159 current_addr = addr;
160 while current_addr < end_addr {
161 l2c.write_clean_by_pa(current_addr);
162 current_addr = current_addr.saturating_add(CACHE_LINE_SIZE as u32);
163 }
164 while l2c.read_cache_sync().busy() {}
165 compiler_fence(core::sync::atomic::Ordering::SeqCst);
166 Ok(())
167}