probe_rs/memory/
mod.rs

1use crate::error::Error;
2
3use scroll::Pread;
4
5/// {function_name} was called with data length that is not a multiple of {alignment}
6#[derive(Debug, thiserror::Error, docsplay::Display)]
7pub struct InvalidDataLengthError {
8    /// Name of the function that caused the error.
9    pub function_name: &'static str,
10    /// The alignment required on the data length.
11    pub alignment: usize,
12}
13impl InvalidDataLengthError {
14    pub fn new(function_name: &'static str, alignment: usize) -> Self {
15        Self {
16            function_name,
17            alignment,
18        }
19    }
20}
21
22/// Memory access to address {address:#X?} was not aligned to {alignment} bytes.
23#[derive(Debug, thiserror::Error, docsplay::Display)]
24pub struct MemoryNotAlignedError {
25    /// The address of the register.
26    pub address: u64,
27    /// The required alignment in bytes (address increments).
28    pub alignment: usize,
29}
30
31/// An interface to be implemented for drivers that allow target memory access.
32pub trait MemoryInterface<ERR = Error>
33where
34    ERR: std::error::Error + From<InvalidDataLengthError> + From<MemoryNotAlignedError>,
35{
36    /// Does this interface support native 64-bit wide accesses
37    ///
38    /// If false all 64-bit operations may be split into 32 or 8 bit operations.
39    /// Most callers will not need to pivot on this but it can be useful for
40    /// picking the fastest bulk data transfer method.
41    fn supports_native_64bit_access(&mut self) -> bool;
42
43    /// Read a 64bit word of at `address`.
44    ///
45    /// The address where the read should be performed at has to be a multiple of 8.
46    /// Returns `AccessPortError::MemoryNotAligned` if this does not hold true.
47    fn read_word_64(&mut self, address: u64) -> Result<u64, ERR> {
48        let mut word = 0;
49        self.read_64(address, std::slice::from_mut(&mut word))?;
50        Ok(word)
51    }
52
53    /// Read a 32bit word of at `address`.
54    ///
55    /// The address where the read should be performed at has to be a multiple of 4.
56    /// Returns [`Error::MemoryNotAligned`] if this does not hold true.
57    fn read_word_32(&mut self, address: u64) -> Result<u32, ERR> {
58        let mut word = 0;
59        self.read_32(address, std::slice::from_mut(&mut word))?;
60        Ok(word)
61    }
62
63    /// Read a 16bit word of at `address`.
64    ///
65    /// The address where the read should be performed at has to be a multiple of 2.
66    /// Returns [`Error::MemoryNotAligned`] if this does not hold true.
67    fn read_word_16(&mut self, address: u64) -> Result<u16, ERR> {
68        let mut word = 0;
69        self.read_16(address, std::slice::from_mut(&mut word))?;
70        Ok(word)
71    }
72
73    /// Read an 8bit word of at `address`.
74    fn read_word_8(&mut self, address: u64) -> Result<u8, ERR> {
75        let mut word = 0;
76        self.read_8(address, std::slice::from_mut(&mut word))?;
77        Ok(word)
78    }
79
80    /// Read a block of 64bit words at `address` in the target's endianness.
81    ///
82    /// The number of words read is `data.len()`.
83    /// The address where the read should be performed at has to be a multiple of 8.
84    /// Returns [`Error::MemoryNotAligned`] if this does not hold true.
85    fn read_64(&mut self, address: u64, data: &mut [u64]) -> Result<(), ERR>;
86
87    /// Read a block of 32bit words at `address` in the target's endianness.
88    ///
89    /// The number of words read is `data.len()`.
90    /// The address where the read should be performed at has to be a multiple of 4.
91    /// Returns [`Error::MemoryNotAligned`] if this does not hold true.
92    fn read_32(&mut self, address: u64, data: &mut [u32]) -> Result<(), ERR>;
93
94    /// Read a block of 16bit words at `address` in the target's endianness.
95    ///
96    /// The number of words read is `data.len()`.
97    /// The address where the read should be performed at has to be a multiple of 2.
98    /// Returns [`Error::MemoryNotAligned`] if this does not hold true.
99    fn read_16(&mut self, address: u64, data: &mut [u16]) -> Result<(), ERR>;
100
101    /// Read a block of 8bit words at `address`.
102    fn read_8(&mut self, address: u64, data: &mut [u8]) -> Result<(), ERR>;
103
104    /// Reads bytes using 64 bit memory access.
105    ///
106    /// The address where the read should be performed at has to be a multiple of 8.
107    /// Returns [`Error::MemoryNotAligned`] if this does not hold true.
108    fn read_mem_64bit(&mut self, address: u64, data: &mut [u8]) -> Result<(), ERR> {
109        // Default implementation uses `read_64`, then converts u64 values back
110        // to bytes. Assumes target is little endian. May be overridden to
111        // provide an implementation that avoids heap allocation and endian
112        // conversions. Must be overridden for big endian targets.
113        if !data.len().is_multiple_of(8) {
114            return Err(InvalidDataLengthError::new("read_mem_64bit", 8).into());
115        }
116        let mut buffer = vec![0u64; data.len() / 8];
117        self.read_64(address, &mut buffer)?;
118        for (bytes, value) in data.chunks_exact_mut(8).zip(buffer.iter()) {
119            bytes.copy_from_slice(&u64::to_le_bytes(*value));
120        }
121        Ok(())
122    }
123
124    /// Reads bytes using 32 bit memory access.
125    ///
126    /// The address where the read should be performed at has to be a multiple of 4.
127    /// Returns [`Error::MemoryNotAligned`] if this does not hold true.
128    fn read_mem_32bit(&mut self, address: u64, data: &mut [u8]) -> Result<(), ERR> {
129        // Default implementation uses `read_32`, then converts u32 values back
130        // to bytes. Assumes target is little endian. May be overridden to
131        // provide an implementation that avoids heap allocation and endian
132        // conversions. Must be overridden for big endian targets.
133        if !data.len().is_multiple_of(4) {
134            return Err(InvalidDataLengthError::new("read_mem_32bit", 4).into());
135        }
136        let mut buffer = vec![0u32; data.len() / 4];
137        self.read_32(address, &mut buffer)?;
138        for (bytes, value) in data.chunks_exact_mut(4).zip(buffer.iter()) {
139            bytes.copy_from_slice(&u32::to_le_bytes(*value));
140        }
141        Ok(())
142    }
143
144    /// Read data from `address`.
145    ///
146    /// This function tries to use the fastest way of reading data, so there is no
147    /// guarantee which kind of memory access is used. The function might also read more
148    /// data than requested, e.g. when the start address is not aligned to a 32-bit boundary.
149    ///
150    /// For more control, the `read_x` functions, e.g. [`MemoryInterface::read_32()`], can be
151    /// used.
152    ///
153    ///  Generally faster than `read_8`.
154    fn read(&mut self, address: u64, data: &mut [u8]) -> Result<(), ERR> {
155        if self.supports_native_64bit_access() {
156            // Avoid heap allocation and copy if we don't need it.
157            self.read_8(address, data)?;
158        } else if address.is_multiple_of(4) && data.len().is_multiple_of(4) {
159            // Avoid heap allocation and copy if we don't need it.
160            self.read_mem_32bit(address, data)?;
161        } else {
162            let start_extra_count = (address % 4) as usize;
163            let mut buffer = vec![0u8; (start_extra_count + data.len()).div_ceil(4) * 4];
164            self.read_mem_32bit(address - start_extra_count as u64, &mut buffer)?;
165            data.copy_from_slice(&buffer[start_extra_count..start_extra_count + data.len()]);
166        }
167        Ok(())
168    }
169
170    /// Write a 64bit word at `address`.
171    ///
172    /// The address where the write should be performed at has to be a multiple of 8.
173    /// Returns [`Error::MemoryNotAligned`] if this does not hold true.
174    fn write_word_64(&mut self, address: u64, data: u64) -> Result<(), ERR> {
175        self.write_64(address, std::slice::from_ref(&data))
176    }
177
178    /// Write a 32bit word at `address`.
179    ///
180    /// The address where the write should be performed at has to be a multiple of 4.
181    /// Returns [`Error::MemoryNotAligned`] if this does not hold true.
182    fn write_word_32(&mut self, address: u64, data: u32) -> Result<(), ERR> {
183        self.write_32(address, std::slice::from_ref(&data))
184    }
185
186    /// Write a 16bit word at `address`.
187    ///
188    /// The address where the write should be performed at has to be a multiple of 2.
189    /// Returns [`Error::MemoryNotAligned`] if this does not hold true.
190    fn write_word_16(&mut self, address: u64, data: u16) -> Result<(), ERR> {
191        self.write_16(address, std::slice::from_ref(&data))
192    }
193
194    /// Write an 8bit word at `address`.
195    fn write_word_8(&mut self, address: u64, data: u8) -> Result<(), ERR> {
196        self.write_8(address, std::slice::from_ref(&data))
197    }
198
199    /// Write a block of 64bit words at `address` in the target's endianness.
200    ///
201    /// The number of words written is `data.len()`.
202    /// The address where the write should be performed at has to be a multiple of 8.
203    /// Returns [`Error::MemoryNotAligned`] if this does not hold true.
204    fn write_64(&mut self, address: u64, data: &[u64]) -> Result<(), ERR>;
205
206    /// Write a block of 32bit words at `address` in the target's endianness.
207    ///
208    /// The number of words written is `data.len()`.
209    /// The address where the write should be performed at has to be a multiple of 4.
210    /// Returns [`Error::MemoryNotAligned`] if this does not hold true.
211    fn write_32(&mut self, address: u64, data: &[u32]) -> Result<(), ERR>;
212
213    /// Write a block of 16bit words at `address` in the target's endianness.
214    ///
215    /// The number of words written is `data.len()`.
216    /// The address where the write should be performed at has to be a multiple of 2.
217    /// Returns [`Error::MemoryNotAligned`] if this does not hold true.
218    fn write_16(&mut self, address: u64, data: &[u16]) -> Result<(), ERR>;
219
220    /// Write a block of 8bit words at `address`.
221    fn write_8(&mut self, address: u64, data: &[u8]) -> Result<(), ERR>;
222
223    /// Writes bytes using 64 bit memory access. Address must be 64 bit aligned
224    /// and data must be an exact multiple of 8.
225    fn write_mem_64bit(&mut self, address: u64, data: &[u8]) -> Result<(), ERR> {
226        // Default implementation uses `write_64`, then converts u64 values back
227        // to bytes. Assumes target is little endian. May be overridden to
228        // provide an implementation that avoids heap allocation and endian
229        // conversions. Must be overridden for big endian targets.
230        if !data.len().is_multiple_of(8) {
231            return Err(InvalidDataLengthError::new("write_mem_64bit", 8).into());
232        }
233        let mut buffer = vec![0u64; data.len() / 8];
234        for (bytes, value) in data.chunks_exact(8).zip(buffer.iter_mut()) {
235            *value = bytes
236                .pread_with(0, scroll::LE)
237                .expect("an u64 - this is a bug, please report it");
238        }
239
240        self.write_64(address, &buffer)?;
241        Ok(())
242    }
243
244    /// Writes bytes using 32 bit memory access. Address must be 32 bit aligned
245    /// and data must be an exact multiple of 8.
246    fn write_mem_32bit(&mut self, address: u64, data: &[u8]) -> Result<(), ERR> {
247        // Default implementation uses `write_32`, then converts u32 values back
248        // to bytes. Assumes target is little endian. May be overridden to
249        // provide an implementation that avoids heap allocation and endian
250        // conversions. Must be overridden for big endian targets.
251        if !data.len().is_multiple_of(4) {
252            return Err(InvalidDataLengthError::new("write_mem_32bit", 4).into());
253        }
254        let mut buffer = vec![0u32; data.len() / 4];
255        for (bytes, value) in data.chunks_exact(4).zip(buffer.iter_mut()) {
256            *value = bytes
257                .pread_with(0, scroll::LE)
258                .expect("an u32 - this is a bug, please report it");
259        }
260
261        self.write_32(address, &buffer)?;
262        Ok(())
263    }
264
265    /// Write a block of 8bit words at `address`. May use 64 bit memory access,
266    /// so should only be used if reading memory locations that don't have side
267    /// effects. Generally faster than [`MemoryInterface::write_8`].
268    ///
269    /// If the target does not support 8-bit aligned access, and `address` is not
270    /// aligned on a 32-bit boundary, this function will return a [`Error::MemoryNotAligned`] error.
271    fn write(&mut self, mut address: u64, mut data: &[u8]) -> Result<(), ERR> {
272        let len = data.len();
273        let start_extra_count = ((4 - (address % 4) as usize) % 4).min(len);
274        let end_extra_count = (len - start_extra_count) % 4;
275        let inbetween_count = len - start_extra_count - end_extra_count;
276        assert!(start_extra_count < 4);
277        assert!(end_extra_count < 4);
278        assert!(inbetween_count.is_multiple_of(4));
279
280        if start_extra_count != 0 || end_extra_count != 0 {
281            // If we do not support 8 bit transfers we have to bail
282            // because we have to do unaligned writes but can only do
283            // 32 bit word aligned transers.
284            if !self.supports_8bit_transfers()? {
285                return Err(MemoryNotAlignedError {
286                    address,
287                    alignment: 4,
288                }
289                .into());
290            }
291        }
292
293        if start_extra_count != 0 {
294            // We first do an 8 bit write of the first < 4 bytes up until the 4 byte aligned boundary.
295            self.write_8(address, &data[..start_extra_count])?;
296
297            address += start_extra_count as u64;
298            data = &data[start_extra_count..];
299        }
300
301        // Make sure we don't try to do an empty but potentially unaligned write
302        if inbetween_count > 0 {
303            // We do a 32 bit write of the remaining bytes that are 4 byte aligned.
304            let mut buffer = vec![0u32; inbetween_count / 4];
305            for (bytes, value) in data.chunks_exact(4).zip(buffer.iter_mut()) {
306                *value = u32::from_le_bytes([bytes[0], bytes[1], bytes[2], bytes[3]]);
307            }
308            self.write_32(address, &buffer)?;
309
310            address += inbetween_count as u64;
311            data = &data[inbetween_count..];
312        }
313
314        // We write the remaining bytes that we did not write yet which is always n < 4.
315        if end_extra_count > 0 {
316            self.write_8(address, &data[..end_extra_count])?;
317        }
318
319        Ok(())
320    }
321
322    /// Returns whether the current platform supports native 8bit transfers.
323    fn supports_8bit_transfers(&self) -> Result<bool, ERR>;
324
325    /// Flush any outstanding operations.
326    ///
327    /// For performance, debug probe implementations may choose to batch writes;
328    /// to assure that any such batched writes have in fact been issued, `flush`
329    /// can be called.  Takes no arguments, but may return failure if a batched
330    /// operation fails.
331    fn flush(&mut self) -> Result<(), ERR>;
332}
333
334// Helper functions to validate address space constraints
335
336/// Validate that an input address is valid for 32-bit only systems
337pub(crate) fn valid_32bit_address(address: u64) -> Result<u32, Error> {
338    let address: u32 = address
339        .try_into()
340        .map_err(|_| Error::Other(format!("Address {address:#08x} out of range")))?;
341
342    Ok(address)
343}
344
345/// Simplifies delegating MemoryInterface implementations, with additional error type conversion.
346pub trait CoreMemoryInterface {
347    type ErrorType: std::error::Error + From<InvalidDataLengthError> + From<MemoryNotAlignedError>;
348
349    /// Returns a reference to the underlying memory interface.
350    fn memory(&self) -> &dyn MemoryInterface<Self::ErrorType>;
351
352    /// Returns a mutable reference to the underlying memory interface.
353    fn memory_mut(&mut self) -> &mut dyn MemoryInterface<Self::ErrorType>;
354}
355
356impl<T> MemoryInterface<Error> for T
357where
358    T: CoreMemoryInterface,
359    Error: From<<T as CoreMemoryInterface>::ErrorType>,
360{
361    fn supports_native_64bit_access(&mut self) -> bool {
362        self.memory_mut().supports_native_64bit_access()
363    }
364
365    fn read_word_64(&mut self, address: u64) -> Result<u64, Error> {
366        self.memory_mut().read_word_64(address).map_err(Error::from)
367    }
368
369    fn read_word_32(&mut self, address: u64) -> Result<u32, Error> {
370        self.memory_mut().read_word_32(address).map_err(Error::from)
371    }
372
373    fn read_word_16(&mut self, address: u64) -> Result<u16, Error> {
374        self.memory_mut().read_word_16(address).map_err(Error::from)
375    }
376
377    fn read_word_8(&mut self, address: u64) -> Result<u8, Error> {
378        self.memory_mut().read_word_8(address).map_err(Error::from)
379    }
380
381    fn read_64(&mut self, address: u64, data: &mut [u64]) -> Result<(), Error> {
382        self.memory_mut()
383            .read_64(address, data)
384            .map_err(Error::from)
385    }
386
387    fn read_32(&mut self, address: u64, data: &mut [u32]) -> Result<(), Error> {
388        self.memory_mut()
389            .read_32(address, data)
390            .map_err(Error::from)
391    }
392
393    fn read_16(&mut self, address: u64, data: &mut [u16]) -> Result<(), Error> {
394        self.memory_mut()
395            .read_16(address, data)
396            .map_err(Error::from)
397    }
398
399    fn read_8(&mut self, address: u64, data: &mut [u8]) -> Result<(), Error> {
400        self.memory_mut().read_8(address, data).map_err(Error::from)
401    }
402
403    fn read(&mut self, address: u64, data: &mut [u8]) -> Result<(), Error> {
404        self.memory_mut().read(address, data).map_err(Error::from)
405    }
406
407    fn write_word_64(&mut self, address: u64, data: u64) -> Result<(), Error> {
408        self.memory_mut()
409            .write_word_64(address, data)
410            .map_err(Error::from)
411    }
412
413    fn write_word_32(&mut self, address: u64, data: u32) -> Result<(), Error> {
414        self.memory_mut()
415            .write_word_32(address, data)
416            .map_err(Error::from)
417    }
418
419    fn write_word_16(&mut self, address: u64, data: u16) -> Result<(), Error> {
420        self.memory_mut()
421            .write_word_16(address, data)
422            .map_err(Error::from)
423    }
424
425    fn write_word_8(&mut self, address: u64, data: u8) -> Result<(), Error> {
426        self.memory_mut()
427            .write_word_8(address, data)
428            .map_err(Error::from)
429    }
430
431    fn write_64(&mut self, address: u64, data: &[u64]) -> Result<(), Error> {
432        self.memory_mut()
433            .write_64(address, data)
434            .map_err(Error::from)
435    }
436
437    fn write_32(&mut self, address: u64, data: &[u32]) -> Result<(), Error> {
438        self.memory_mut()
439            .write_32(address, data)
440            .map_err(Error::from)
441    }
442
443    fn write_16(&mut self, address: u64, data: &[u16]) -> Result<(), Error> {
444        self.memory_mut()
445            .write_16(address, data)
446            .map_err(Error::from)
447    }
448
449    fn write_8(&mut self, address: u64, data: &[u8]) -> Result<(), Error> {
450        self.memory_mut()
451            .write_8(address, data)
452            .map_err(Error::from)
453    }
454
455    fn write(&mut self, address: u64, data: &[u8]) -> Result<(), Error> {
456        self.memory_mut().write(address, data).map_err(Error::from)
457    }
458
459    fn supports_8bit_transfers(&self) -> Result<bool, Error> {
460        self.memory().supports_8bit_transfers().map_err(Error::from)
461    }
462
463    fn flush(&mut self) -> Result<(), Error> {
464        self.memory_mut().flush().map_err(Error::from)
465    }
466}