zynq7000_hal/
cache.rs

1//! # Cache management module
2//!
3//! A lot of cache maintenance operations for this SoC have to be performed on both the L1 and the
4//! L2 cache in the correct order. This module provides commonly required operations.
5use core::sync::atomic::compiler_fence;
6
7use cortex_ar::{
8    asm::dsb,
9    cache::{
10        clean_and_invalidate_data_cache_line_to_poc, clean_data_cache_line_to_poc,
11        invalidate_data_cache_line_to_poc,
12    },
13};
14use zynq7000::l2_cache::{L2Cache, MmioL2Cache};
15
16pub const CACHE_LINE_SIZE: usize = 32;
17
18#[derive(Debug, Clone, Copy, PartialEq, Eq, thiserror::Error)]
19#[error("alignment error, addresses and lengths must be aligned to 32 byte cache line length")]
20pub struct AlignmentError;
21
22pub fn clean_and_invalidate_l2c_line(l2c: &mut MmioL2Cache<'static>, addr: u32) {
23    l2c.write_clean_by_pa(addr);
24    l2c.write_invalidate_by_pa(addr);
25}
26
27/// Cleans and invalidates the full L1 and L2 cache.
28pub fn clean_and_invalidate_data_cache() {
29    dsb();
30
31    cortex_ar::cache::clean_l1_data_cache::<2, 5, 8>();
32    dsb();
33
34    // Clean all ways in L2 cache.
35    let mut l2c = unsafe { L2Cache::new_mmio_fixed() };
36    l2c.write_clean_invalidate_by_way(0xffff);
37    while l2c.read_cache_sync().busy() {}
38    compiler_fence(core::sync::atomic::Ordering::SeqCst);
39
40    cortex_ar::cache::clean_and_invalidate_l1_data_cache::<2, 5, 8>();
41    dsb();
42}
43
44/// Invalidate an address range.
45///
46/// This function invalidates both the L1 and L2 cache. The L2C must be enabled and set up
47/// correctly for this function to work correctly.
48///
49/// The provided address and the range to invalidate must both be aligned to the 32 byte cache line
50/// length.
51pub fn invalidate_data_cache_range(addr: u32, len: usize) -> Result<(), AlignmentError> {
52    if !addr.is_multiple_of(CACHE_LINE_SIZE as u32) || !len.is_multiple_of(CACHE_LINE_SIZE) {
53        return Err(AlignmentError);
54    }
55    let mut current_addr = addr;
56    let end_addr = addr.saturating_add(len as u32);
57    let mut l2c = unsafe { L2Cache::new_mmio_fixed() };
58
59    dsb();
60    // Invalidate outer caches lines first, see chapter 3.3.10 of the L2C technical reference
61    // manual.
62    while current_addr < end_addr {
63        l2c.write_invalidate_by_pa(current_addr);
64        current_addr = current_addr.saturating_add(CACHE_LINE_SIZE as u32);
65    }
66    while l2c.read_cache_sync().busy() {}
67
68    // Invalidate inner cache lines.
69    current_addr = addr;
70    compiler_fence(core::sync::atomic::Ordering::SeqCst);
71
72    while current_addr < end_addr {
73        invalidate_data_cache_line_to_poc(addr);
74        current_addr = current_addr.saturating_add(CACHE_LINE_SIZE as u32);
75    }
76    // Synchronize the cache maintenance.
77    dsb();
78    Ok(())
79}
80
81/// Clean and then invalidate an address range.
82///
83/// This is commonly also called cache flushing. This function cleans and invalidates both L1
84/// and L2 cache. The L2C must be enabled and set up correctly for this function to work correctly.
85///
86/// Both the address and length to clean and invalidate must be a multiple of the 32 byte cache
87/// line.
88pub fn clean_and_invalidate_data_cache_range(addr: u32, len: usize) -> Result<(), AlignmentError> {
89    if !addr.is_multiple_of(CACHE_LINE_SIZE as u32) || !len.is_multiple_of(CACHE_LINE_SIZE) {
90        return Err(AlignmentError);
91    }
92    let end_addr = addr.saturating_add(len as u32);
93    let mut current_addr = addr;
94    dsb();
95
96    // For details on the following section, see chapter 3.3.10 of the L2C technical reference
97    // manual.
98    // Clean inner cache lines first.
99    while current_addr < end_addr {
100        clean_data_cache_line_to_poc(current_addr);
101        current_addr = current_addr.saturating_add(CACHE_LINE_SIZE as u32);
102    }
103    dsb();
104
105    // Clean and invalidates outer cache.
106    let mut l2c = unsafe { L2Cache::new_mmio_fixed() };
107    current_addr = addr;
108    while current_addr < end_addr {
109        // ARM errate 588369 specifies that clean and invalidate need to be separate, but the
110        // current revision of the L2C on the Zynq7000 seems to be revision 8 (r3p2), and the
111        // errata was fixed in r2p0. Both Xilinx and zynq-rs use the clean and invalidate operation,
112        // so it should be fine. Considering the debug control in Xilinx code which disable
113        // linefills and write-backs, zynq-rs does not appear to do that and it should not be
114        // necessary.. I think this was related to the errata.
115        l2c.write_clean_invalidate_by_pa(current_addr);
116        current_addr = current_addr.saturating_add(CACHE_LINE_SIZE as u32);
117    }
118    while l2c.read_cache_sync().busy() {}
119
120    // Now clean and invalidate inner cache.
121    current_addr = addr;
122    compiler_fence(core::sync::atomic::Ordering::SeqCst);
123
124    while current_addr < end_addr {
125        clean_and_invalidate_data_cache_line_to_poc(current_addr);
126        current_addr = current_addr.saturating_add(CACHE_LINE_SIZE as u32);
127    }
128    dsb();
129    Ok(())
130}
131
132/// Cleans an address range.
133///
134/// This function cleans and invalidates both L1
135/// and L2 cache. The L2C must be enabled and set up correctly for this function to work correctly.
136///
137/// Both the address and length to clean and invalidate must be a multiple of the 32 byte cache
138/// line.
139pub fn clean_data_cache_range(addr: u32, len: usize) -> Result<(), AlignmentError> {
140    if !addr.is_multiple_of(32) || !len.is_multiple_of(32) {
141        return Err(AlignmentError);
142    }
143
144    let end_addr = addr.saturating_add(len as u32);
145    let mut current_addr = addr;
146    dsb();
147
148    // For details on the following section, see chapter 3.3.10 of the L2C technical reference
149    // manual.
150    // Clean inner cache lines first.
151    while current_addr < end_addr {
152        clean_data_cache_line_to_poc(current_addr);
153        current_addr = current_addr.saturating_add(CACHE_LINE_SIZE as u32);
154    }
155    dsb();
156
157    // Clean and invalidates outer cache.
158    let mut l2c = unsafe { L2Cache::new_mmio_fixed() };
159    current_addr = addr;
160    while current_addr < end_addr {
161        l2c.write_clean_by_pa(current_addr);
162        current_addr = current_addr.saturating_add(CACHE_LINE_SIZE as u32);
163    }
164    while l2c.read_cache_sync().busy() {}
165    compiler_fence(core::sync::atomic::Ordering::SeqCst);
166    Ok(())
167}