probe_rs/architecture/xtensa/
communication_interface.rs

1//! Xtensa Debug Module Communication
2
3use std::{
4    collections::HashMap,
5    ops::Range,
6    time::{Duration, Instant},
7};
8
9use zerocopy::IntoBytes;
10
11use crate::{
12    BreakpointCause, Error as ProbeRsError, HaltReason, MemoryInterface,
13    architecture::xtensa::{
14        arch::{CpuRegister, Register, SpecialRegister, instruction::Instruction},
15        register_cache::RegisterCache,
16        xdm::{DebugStatus, XdmState},
17    },
18    memory::{Operation, OperationKind},
19    probe::{DebugProbeError, DeferredResultIndex, JtagAccess},
20};
21
22use super::xdm::{Error as XdmError, Xdm};
23
24/// Possible Xtensa errors
25#[derive(thiserror::Error, Debug, docsplay::Display)]
26pub enum XtensaError {
27    /// An error originating from the DebugProbe occurred.
28    DebugProbe(#[from] DebugProbeError),
29
30    /// Xtensa debug module error.
31    XdmError(#[from] XdmError),
32
33    /// The core is not enabled.
34    CoreDisabled,
35
36    /// The operation has timed out.
37    // TODO: maybe we could be a bit more specific
38    Timeout,
39
40    /// The connected target is not an Xtensa device.
41    NoXtensaTarget,
42
43    /// The requested register is not available.
44    RegisterNotAvailable,
45
46    /// The result index of a batched command is not available.
47    BatchedResultNotAvailable,
48}
49
50impl From<XtensaError> for ProbeRsError {
51    fn from(err: XtensaError) -> Self {
52        match err {
53            XtensaError::DebugProbe(e) => e.into(),
54            other => ProbeRsError::Xtensa(other),
55        }
56    }
57}
58
59/// Debug interrupt level values.
60#[derive(Clone, Copy)]
61pub enum DebugLevel {
62    /// The CPU was configured to take Debug interrupts at level 2.
63    L2 = 2,
64    /// The CPU was configured to take Debug interrupts at level 3.
65    L3 = 3,
66    /// The CPU was configured to take Debug interrupts at level 4.
67    L4 = 4,
68    /// The CPU was configured to take Debug interrupts at level 5.
69    L5 = 5,
70    /// The CPU was configured to take Debug interrupts at level 6.
71    L6 = 6,
72    /// The CPU was configured to take Debug interrupts at level 7.
73    L7 = 7,
74}
75
76impl DebugLevel {
77    /// The register that contains the current program counter value.
78    pub fn pc(self) -> SpecialRegister {
79        match self {
80            DebugLevel::L2 => SpecialRegister::Epc2,
81            DebugLevel::L3 => SpecialRegister::Epc3,
82            DebugLevel::L4 => SpecialRegister::Epc4,
83            DebugLevel::L5 => SpecialRegister::Epc5,
84            DebugLevel::L6 => SpecialRegister::Epc6,
85            DebugLevel::L7 => SpecialRegister::Epc7,
86        }
87    }
88
89    /// The register that contains the current program status value.
90    pub fn ps(self) -> SpecialRegister {
91        match self {
92            DebugLevel::L2 => SpecialRegister::Eps2,
93            DebugLevel::L3 => SpecialRegister::Eps3,
94            DebugLevel::L4 => SpecialRegister::Eps4,
95            DebugLevel::L5 => SpecialRegister::Eps5,
96            DebugLevel::L6 => SpecialRegister::Eps6,
97            DebugLevel::L7 => SpecialRegister::Eps7,
98        }
99    }
100}
101
102/// Xtensa interface state.
103#[derive(Default)]
104pub(super) struct XtensaInterfaceState {
105    /// The register cache.
106    pub(super) register_cache: RegisterCache,
107
108    /// Whether the core is halted.
109    // This roughly relates to Core Debug States (true = Running, false = [Stopped, Stepping])
110    pub(super) is_halted: bool,
111}
112
113/// Properties of a memory region.
114#[derive(Clone, Copy, Default)]
115pub struct MemoryRegionProperties {
116    /// Whether the CPU supports unaligned stores. (Hardware Alignment Option)
117    pub unaligned_store: bool,
118
119    /// Whether the CPU supports unaligned loads. (Hardware Alignment Option)
120    pub unaligned_load: bool,
121
122    /// Whether the CPU supports fast memory access in this region. (LDDR32.P/SDDR32.P instructions)
123    pub fast_memory_access: bool,
124}
125
126/// Properties of an Xtensa CPU core.
127pub struct XtensaCoreProperties {
128    /// The number of hardware breakpoints the target supports. CPU-specific configuration value.
129    pub hw_breakpoint_num: u32,
130
131    /// The interrupt level at which debug exceptions are generated. CPU-specific configuration value.
132    pub debug_level: DebugLevel,
133
134    /// Known memory ranges with special properties.
135    pub memory_ranges: HashMap<Range<u64>, MemoryRegionProperties>,
136
137    /// Configurable options in the Windowed Register Option
138    pub window_option_properties: WindowProperties,
139}
140
141impl Default for XtensaCoreProperties {
142    fn default() -> Self {
143        Self {
144            hw_breakpoint_num: 2,
145            debug_level: DebugLevel::L6,
146            memory_ranges: HashMap::new(),
147            window_option_properties: WindowProperties::lx(64),
148        }
149    }
150}
151
152impl XtensaCoreProperties {
153    /// Returns the memory range for the given address.
154    pub fn memory_properties_at(&self, address: u64) -> MemoryRegionProperties {
155        self.memory_ranges
156            .iter()
157            .find(|(range, _)| range.contains(&address))
158            .map(|(_, region)| *region)
159            .unwrap_or_default()
160    }
161
162    /// Returns the conservative memory range properties for the given address range.
163    pub fn memory_range_properties(&self, range: Range<u64>) -> MemoryRegionProperties {
164        let mut start = range.start;
165        let end = range.end;
166
167        if start == end {
168            return MemoryRegionProperties::default();
169        }
170
171        let mut properties = MemoryRegionProperties {
172            unaligned_store: true,
173            unaligned_load: true,
174            fast_memory_access: true,
175        };
176        while start < end {
177            // Find region that contains the start address.
178            let containing_region = self
179                .memory_ranges
180                .iter()
181                .find(|(range, _)| range.contains(&start));
182
183            let Some((range, region_properties)) = containing_region else {
184                // no point in continuing
185                return MemoryRegionProperties::default();
186            };
187
188            properties.unaligned_store &= region_properties.unaligned_store;
189            properties.unaligned_load &= region_properties.unaligned_load;
190            properties.fast_memory_access &= region_properties.fast_memory_access;
191
192            // Move start to the end of the region.
193            start = range.end;
194        }
195
196        properties
197    }
198}
199
200/// Properties of the windowed register file.
201#[derive(Clone, Copy, Debug)]
202pub struct WindowProperties {
203    /// Whether the CPU has windowed registers.
204    pub has_windowed_registers: bool,
205
206    /// The total number of AR registers in the register file.
207    pub num_aregs: u8,
208
209    /// The number of registers in a single window.
210    pub window_regs: u8,
211
212    /// The number of registers rotated by an LSB of the ROTW instruction.
213    pub rotw_rotates: u8,
214}
215
216impl WindowProperties {
217    /// Create a new WindowProperties instance with the given number of AR registers.
218    pub fn lx(num_aregs: u8) -> Self {
219        Self {
220            has_windowed_registers: true,
221            num_aregs,
222            window_regs: 16,
223            rotw_rotates: 4,
224        }
225    }
226
227    /// Returns the number of different valid WindowBase values.
228    pub fn windowbase_size(&self) -> u8 {
229        self.num_aregs / self.rotw_rotates
230    }
231}
232
233/// Debug module and transport state.
234#[derive(Default)]
235pub struct XtensaDebugInterfaceState {
236    interface_state: XtensaInterfaceState,
237    core_properties: XtensaCoreProperties,
238    xdm_state: XdmState,
239}
240
241/// The higher level of the XDM functionality.
242// TODO: this includes core state and CPU configuration that don't exactly belong
243// here but one layer up.
244pub struct XtensaCommunicationInterface<'probe> {
245    /// The Xtensa debug module
246    pub(crate) xdm: Xdm<'probe>,
247    pub(super) state: &'probe mut XtensaInterfaceState,
248    core_properties: &'probe mut XtensaCoreProperties,
249}
250
251impl<'probe> XtensaCommunicationInterface<'probe> {
252    /// Create the Xtensa communication interface using the underlying probe driver
253    pub fn new(
254        probe: &'probe mut dyn JtagAccess,
255        state: &'probe mut XtensaDebugInterfaceState,
256    ) -> Self {
257        let XtensaDebugInterfaceState {
258            interface_state,
259            core_properties,
260            xdm_state,
261        } = state;
262        let xdm = Xdm::new(probe, xdm_state);
263
264        Self {
265            xdm,
266            state: interface_state,
267            core_properties,
268        }
269    }
270
271    /// Access the properties of the CPU core.
272    pub fn core_properties(&mut self) -> &mut XtensaCoreProperties {
273        self.core_properties
274    }
275
276    /// Read the targets IDCODE.
277    pub fn read_idcode(&mut self) -> Result<u32, XtensaError> {
278        self.xdm.read_idcode()
279    }
280
281    /// Enter debug mode.
282    pub fn enter_debug_mode(&mut self) -> Result<(), XtensaError> {
283        self.state.register_cache = RegisterCache::new();
284        self.xdm.enter_debug_mode()?;
285
286        self.state.is_halted = self.xdm.status()?.stopped();
287
288        Ok(())
289    }
290
291    pub(crate) fn leave_debug_mode(&mut self) -> Result<(), XtensaError> {
292        if self.xdm.status()?.stopped() {
293            self.restore_registers()?;
294            self.resume_core()?;
295        }
296        self.xdm.leave_ocd_mode()?;
297
298        tracing::debug!("Left OCD mode");
299
300        Ok(())
301    }
302
303    /// Returns the number of hardware breakpoints the target supports.
304    ///
305    /// On the Xtensa architecture this is the `NIBREAK` configuration parameter.
306    pub fn available_breakpoint_units(&self) -> u32 {
307        self.core_properties.hw_breakpoint_num
308    }
309
310    /// Returns whether the core is halted.
311    pub fn core_halted(&mut self) -> Result<bool, XtensaError> {
312        if !self.state.is_halted {
313            self.state.is_halted = self.xdm.status()?.stopped();
314        }
315
316        Ok(self.state.is_halted)
317    }
318
319    /// Waits until the core is halted.
320    ///
321    /// This function lowers the interrupt level to allow halting on debug exceptions.
322    pub fn wait_for_core_halted(&mut self, timeout: Duration) -> Result<(), XtensaError> {
323        // Wait until halted state is active again.
324        let start = Instant::now();
325
326        while !self.core_halted()? {
327            if start.elapsed() >= timeout {
328                return Err(XtensaError::Timeout);
329            }
330            // Wait a bit before polling again.
331            std::thread::sleep(Duration::from_millis(1));
332        }
333
334        Ok(())
335    }
336
337    /// Halts the core.
338    pub(crate) fn halt(&mut self, timeout: Duration) -> Result<(), XtensaError> {
339        self.xdm.schedule_halt();
340        self.wait_for_core_halted(timeout)?;
341        Ok(())
342    }
343
344    /// Halts the core and returns `true` if the core was running before the halt.
345    pub(crate) fn halt_with_previous(&mut self, timeout: Duration) -> Result<bool, XtensaError> {
346        let was_running = if self.state.is_halted {
347            // Core is already halted, we don't need to do anything.
348            false
349        } else {
350            // If we have not halted the core, it may still be halted on a breakpoint, for example.
351            // Let's check status.
352            let status_idx = self.xdm.schedule_read_nexus_register::<DebugStatus>();
353            self.halt(timeout)?;
354            let before_status = DebugStatus(self.xdm.read_deferred_result(status_idx)?.into_u32());
355
356            !before_status.stopped()
357        };
358
359        Ok(was_running)
360    }
361
362    fn fast_halted_access(
363        &mut self,
364        mut op: impl FnMut(&mut Self) -> Result<(), XtensaError>,
365    ) -> Result<(), XtensaError> {
366        if self.state.is_halted {
367            // Core is already halted, we don't need to do anything.
368            return op(self);
369        }
370
371        // If we have not halted the core, it may still be halted on a breakpoint, for example.
372        // Let's check status.
373        let status_idx = self.xdm.schedule_read_nexus_register::<DebugStatus>();
374
375        // Queue up halting.
376        self.xdm.schedule_halt();
377
378        // We will need to check if we managed to halt the core.
379        let is_halted_idx = self.xdm.schedule_read_nexus_register::<DebugStatus>();
380        self.state.is_halted = true;
381
382        // Execute the operation while the core is presumed halted. If it is not, we will have
383        // various errors, but we will retry the operation.
384        let result = op(self);
385
386        // If we did not manage to halt the core at once, let's retry using the slow path.
387        let after_status = DebugStatus(self.xdm.read_deferred_result(is_halted_idx)?.into_u32());
388
389        if after_status.stopped() {
390            // If the core was running, resume it.
391            let before_status = DebugStatus(self.xdm.read_deferred_result(status_idx)?.into_u32());
392            if !before_status.stopped() {
393                self.resume_core()?;
394            }
395
396            return result;
397        }
398        self.state.is_halted = false;
399        self.halted_access(|this| op(this))
400    }
401
402    /// Executes a closure while ensuring the core is halted.
403    pub fn halted_access<R>(
404        &mut self,
405        op: impl FnOnce(&mut Self) -> Result<R, XtensaError>,
406    ) -> Result<R, XtensaError> {
407        let was_running = self.halt_with_previous(Duration::from_millis(100))?;
408
409        let result = op(self);
410
411        if was_running {
412            self.resume_core()?;
413        }
414
415        result
416    }
417
418    /// Steps the core by one instruction.
419    pub fn step(&mut self, by: u32, intlevel: u32) -> Result<(), XtensaError> {
420        // Instructions executed below icountlevel increment the ICOUNT register.
421        self.schedule_write_register(ICountLevel(intlevel + 1))?;
422
423        // An exception is generated at the beginning of an instruction that would overflow ICOUNT.
424        self.schedule_write_register(ICount(-((1 + by) as i32) as u32))?;
425
426        self.resume_core()?;
427        // TODO: instructions like WAITI should be emulated as they are not single steppable.
428        // For now it's good enough to force a halt on timeout (instead of crashing) although it can
429        // stop in a long-running interrupt handler which isn't necessarily what the user wants.
430        // Even then, WAITI should be detected and emulated.
431        match self.wait_for_core_halted(Duration::from_millis(100)) {
432            Ok(()) => {}
433            Err(XtensaError::Timeout) => self.halt(Duration::from_millis(100))?,
434            Err(e) => return Err(e),
435        }
436
437        // Avoid stopping again
438        self.schedule_write_register(ICountLevel(0))?;
439
440        Ok(())
441    }
442
443    /// Resumes program execution.
444    pub fn resume_core(&mut self) -> Result<(), XtensaError> {
445        // Any time we resume the core, we need to restore the registers so the the program
446        // doesn't crash.
447        self.restore_registers()?;
448        // We also need to clear the register cache, as the CPU will likely change the registers.
449        self.clear_register_cache();
450
451        tracing::debug!("Resuming core");
452        self.state.is_halted = false;
453        self.xdm.resume()?;
454
455        Ok(())
456    }
457
458    fn schedule_read_cpu_register(&mut self, register: CpuRegister) -> DeferredResultIndex {
459        self.xdm
460            .schedule_execute_instruction(Instruction::Wsr(SpecialRegister::Ddr, register));
461        self.xdm.schedule_read_ddr()
462    }
463
464    fn schedule_read_special_register(
465        &mut self,
466        register: SpecialRegister,
467    ) -> Result<DeferredResultIndex, XtensaError> {
468        self.ensure_register_saved(CpuRegister::A3)?;
469        self.state.register_cache.mark_dirty(CpuRegister::A3.into());
470
471        // Read special register into the scratch register
472        self.xdm
473            .schedule_execute_instruction(Instruction::Rsr(register, CpuRegister::A3));
474
475        Ok(self.schedule_read_cpu_register(CpuRegister::A3))
476    }
477
478    fn schedule_write_special_register(
479        &mut self,
480        register: SpecialRegister,
481        value: u32,
482    ) -> Result<(), XtensaError> {
483        tracing::debug!("Writing special register: {:?}", register);
484        self.ensure_register_saved(CpuRegister::A3)?;
485        self.state.register_cache.mark_dirty(CpuRegister::A3.into());
486
487        self.xdm.schedule_write_ddr(value);
488
489        // DDR -> scratch
490        self.xdm
491            .schedule_execute_instruction(Instruction::Rsr(SpecialRegister::Ddr, CpuRegister::A3));
492
493        // scratch -> target special register
494        self.xdm
495            .schedule_execute_instruction(Instruction::Wsr(register, CpuRegister::A3));
496
497        Ok(())
498    }
499
500    #[tracing::instrument(skip(self), level = "debug")]
501    fn schedule_write_cpu_register(
502        &mut self,
503        register: CpuRegister,
504        value: u32,
505    ) -> Result<(), XtensaError> {
506        tracing::debug!("Writing {:x} to register: {:?}", value, register);
507
508        self.xdm.schedule_write_ddr(value);
509        self.xdm
510            .schedule_execute_instruction(Instruction::Rsr(SpecialRegister::Ddr, register));
511
512        Ok(())
513    }
514
515    /// Read a register.
516    pub fn read_register<R: TypedRegister>(&mut self) -> Result<R, XtensaError> {
517        let value = self.read_register_untyped(R::register())?;
518
519        Ok(R::from_u32(value))
520    }
521
522    /// Write a register.
523    pub fn write_register<R: TypedRegister>(&mut self, reg: R) -> Result<(), XtensaError> {
524        self.write_register_untyped(R::register(), reg.as_u32())?;
525
526        Ok(())
527    }
528
529    /// Schedules writing a register.
530    pub(crate) fn schedule_write_register<R: TypedRegister>(
531        &mut self,
532        reg: R,
533    ) -> Result<(), XtensaError> {
534        self.schedule_write_register_untyped(R::register(), reg.as_u32())?;
535
536        Ok(())
537    }
538
539    /// Schedules reading a register.
540    ///
541    /// If the register is already in the cache, it will return the value from there.
542    pub(crate) fn schedule_read_register(
543        &mut self,
544        register: impl Into<Register>,
545    ) -> Result<MaybeDeferredResultIndex, XtensaError> {
546        let register = register.into();
547        if let Some(value) = self.state.register_cache.original_value_of(register) {
548            // Already read, can be accessed from the cache.
549            return Ok(value);
550        }
551
552        let reader = match register {
553            Register::Cpu(register) => self.schedule_read_cpu_register(register),
554            Register::Special(register) => self.schedule_read_special_register(register)?,
555            Register::CurrentPc => {
556                self.schedule_read_special_register(self.core_properties.debug_level.pc())?
557            }
558            Register::CurrentPs => {
559                self.schedule_read_special_register(self.core_properties.debug_level.ps())?
560            }
561        };
562        self.state.register_cache.store_deferred(register, reader);
563        Ok(MaybeDeferredResultIndex::Deferred(register))
564    }
565
566    /// Read a register.
567    pub fn read_register_untyped(
568        &mut self,
569        register: impl Into<Register>,
570    ) -> Result<u32, XtensaError> {
571        let reader = self.schedule_read_register(register)?;
572        self.read_deferred_result(reader)
573    }
574
575    /// Schedules writing a register.
576    ///
577    /// This function primes the register cache with the value to be written, therefore
578    /// it is not suitable for writing scratch registers.
579    pub fn schedule_write_register_untyped(
580        &mut self,
581        register: impl Into<Register>,
582        value: u32,
583    ) -> Result<(), XtensaError> {
584        let register = register.into();
585
586        self.state.register_cache.store(register, value);
587
588        match register {
589            Register::Cpu(register) => self.schedule_write_cpu_register(register, value),
590            Register::Special(register) => self.schedule_write_special_register(register, value),
591            Register::CurrentPc => {
592                self.schedule_write_special_register(self.core_properties.debug_level.pc(), value)
593            }
594            Register::CurrentPs => {
595                self.schedule_write_special_register(self.core_properties.debug_level.ps(), value)
596            }
597        }
598    }
599
600    /// Write a register.
601    pub fn write_register_untyped(
602        &mut self,
603        register: impl Into<Register>,
604        value: u32,
605    ) -> Result<(), XtensaError> {
606        self.schedule_write_register_untyped(register, value)?;
607        self.xdm.execute()
608    }
609
610    /// Ensures that a scratch register is saved in the register cache before overwriting it.
611    #[tracing::instrument(skip(self, register), fields(register), level = "debug")]
612    fn ensure_register_saved(&mut self, register: impl Into<Register>) -> Result<(), XtensaError> {
613        let register = register.into();
614
615        tracing::debug!("Saving register: {:?}", register);
616        self.schedule_read_register(register)?;
617
618        Ok(())
619    }
620
621    #[tracing::instrument(skip(self), level = "debug")]
622    pub(super) fn restore_registers(&mut self) -> Result<(), XtensaError> {
623        tracing::debug!("Restoring registers");
624
625        let filters = [
626            // First, we restore special registers, as they may need to use scratch registers.
627            |r: &Register| !r.is_cpu_register(),
628            // Next, we restore CPU registers, which include scratch registers.
629            |r: &Register| r.is_cpu_register(),
630        ];
631        for filter in filters {
632            // Clone the list of saved registers so we can iterate over it, but code may still save
633            // new registers. We can't take it otherwise the restore loop would unnecessarily save
634            // registers.
635            let dirty_regs = self
636                .state
637                .register_cache
638                .iter()
639                .filter(|(r, entry)| entry.is_dirty() && filter(r))
640                .map(|(r, _)| r)
641                .collect::<Vec<_>>();
642
643            for register in dirty_regs {
644                let restore_value = self
645                    .state
646                    .register_cache
647                    .resolved_original_value_of(register, &mut self.xdm)
648                    .unwrap_or_else(|| panic!("Saved register {register:?} is not in the cache. This is a bug, please report it."))?;
649
650                self.schedule_write_register_untyped(register, restore_value)?;
651            }
652        }
653
654        Ok(())
655    }
656
657    fn memory_access_for(&self, address: u64, len: usize) -> Box<dyn MemoryAccess> {
658        if self
659            .core_properties
660            .memory_range_properties(address..address + len as u64)
661            .fast_memory_access
662        {
663            Box::new(FastMemoryAccess::new())
664        } else {
665            Box::new(SlowMemoryAccess::new())
666        }
667    }
668
669    fn read_memory(&mut self, address: u64, dst: &mut [u8]) -> Result<(), XtensaError> {
670        tracing::debug!("Reading {} bytes from address {:08x}", dst.len(), address);
671        if dst.is_empty() {
672            return Ok(());
673        }
674
675        let mut memory_access = self.memory_access_for(address, dst.len());
676
677        memory_access.halted_access(self, &mut |this, memory_access| {
678            memory_access.save_scratch_registers(this)?;
679            this.read_memory_impl(memory_access, address, dst)
680        })
681    }
682
683    fn read_memory_impl(
684        &mut self,
685        memory_access: &mut dyn MemoryAccess,
686        address: u64,
687        mut dst: &mut [u8],
688    ) -> Result<(), XtensaError> {
689        let mut to_read = dst.len();
690
691        // Let's assume we can just do 32b reads, so let's
692        // do some pre-massaging on unaligned reads if needed.
693        let first_read = if !address.is_multiple_of(4)
694            && !self
695                .core_properties
696                .memory_range_properties(address..address + dst.len() as u64)
697                .unaligned_load
698        {
699            memory_access.load_initial_address_for_read(self, address as u32 & !0x3)?;
700            let offset = address as usize % 4;
701
702            // Avoid executing another read if we only have to read a single word
703            let first_read = if offset + to_read <= 4 {
704                memory_access.read_one(self)?
705            } else {
706                memory_access.read_one_and_continue(self)?
707            };
708
709            let bytes_to_copy = (4 - offset).min(to_read);
710
711            to_read -= bytes_to_copy;
712
713            Some((first_read, offset, bytes_to_copy))
714        } else {
715            // The read is either aligned or the core supports unaligned loads.
716            memory_access.load_initial_address_for_read(self, address as u32)?;
717            None
718        };
719
720        let mut aligned_reads = vec![];
721        if to_read > 0 {
722            let words = to_read.div_ceil(4);
723
724            for _ in 0..words - 1 {
725                aligned_reads.push(memory_access.read_one_and_continue(self)?);
726            }
727            aligned_reads.push(memory_access.read_one(self)?);
728        };
729
730        if let Some((read, offset, bytes_to_copy)) = first_read {
731            let word = self
732                .xdm
733                .read_deferred_result(read)?
734                .into_u32()
735                .to_le_bytes();
736
737            dst[..bytes_to_copy].copy_from_slice(&word[offset..][..bytes_to_copy]);
738            dst = &mut dst[bytes_to_copy..];
739        }
740
741        for read in aligned_reads {
742            let word = self
743                .xdm
744                .read_deferred_result(read)?
745                .into_u32()
746                .to_le_bytes();
747
748            let bytes = dst.len().min(4);
749
750            dst[..bytes].copy_from_slice(&word[..bytes]);
751            dst = &mut dst[bytes..];
752        }
753
754        Ok(())
755    }
756
757    pub(crate) fn write_memory(&mut self, address: u64, data: &[u8]) -> Result<(), XtensaError> {
758        tracing::debug!("Writing {} bytes to address {:08x}", data.len(), address);
759        if data.is_empty() {
760            return Ok(());
761        }
762
763        let mut memory_access = self.memory_access_for(address, data.len());
764
765        memory_access.halted_access(self, &mut |this, memory_access| {
766            memory_access.save_scratch_registers(this)?;
767            this.write_memory_impl(memory_access, address, data)
768        })
769    }
770
771    /// Executes a single memory operation when the processor is halted.
772    fn execute_single_memory_operation_halted(
773        &mut self,
774        operation: Operation<'_>,
775    ) -> Result<(), ProbeRsError> {
776        enum Op<'a> {
777            Read(&'a mut [u8]),
778            Write(&'a [u8]),
779        }
780        impl Op<'_> {
781            fn data_len(&self) -> usize {
782                match self {
783                    Op::Read(bytes) => bytes.len(),
784                    Op::Write(bytes) => bytes.len(),
785                }
786            }
787        }
788
789        let mut temp_bytes = [0; 8];
790        let op = match operation.operation {
791            OperationKind::Read(data) => Op::Read(data),
792            OperationKind::Read8(data) => Op::Read(data),
793            OperationKind::Read16(data) => Op::Read(data.as_mut_bytes()),
794            OperationKind::Read32(data) => Op::Read(data.as_mut_bytes()),
795            OperationKind::Read64(data) => Op::Read(data.as_mut_bytes()),
796            OperationKind::Write(data) => Op::Write(data),
797            OperationKind::Write8(data) => Op::Write(data),
798            OperationKind::Write16(data) => Op::Write(data.as_bytes()),
799            OperationKind::Write32(data) => Op::Write(data.as_bytes()),
800            OperationKind::Write64(data) => Op::Write(data.as_bytes()),
801            OperationKind::WriteWord8(word) => {
802                let word_bytes = size_of_val(&word);
803                let bytes = &mut temp_bytes[..word_bytes];
804                bytes.copy_from_slice(&word.to_le_bytes());
805                Op::Write(bytes)
806            }
807            OperationKind::WriteWord16(word) => {
808                let word_bytes = size_of_val(&word);
809                let bytes = &mut temp_bytes[..word_bytes];
810                bytes.copy_from_slice(&word.to_le_bytes());
811                Op::Write(bytes)
812            }
813            OperationKind::WriteWord32(word) => {
814                let word_bytes = size_of_val(&word);
815                let bytes = &mut temp_bytes[..word_bytes];
816                bytes.copy_from_slice(&word.to_le_bytes());
817                Op::Write(bytes)
818            }
819            OperationKind::WriteWord64(word) => {
820                let word_bytes = size_of_val(&word);
821                let bytes = &mut temp_bytes[..word_bytes];
822                bytes.copy_from_slice(&word.to_le_bytes());
823                Op::Write(bytes)
824            }
825        };
826
827        if op.data_len() == 0 {
828            return Ok(());
829        }
830
831        let address = operation.address;
832
833        let mut memory_access = self.memory_access_for(address, op.data_len());
834        memory_access.save_scratch_registers(self)?;
835
836        match op {
837            Op::Read(dst) => self
838                .read_memory_impl(memory_access.as_mut(), address, dst)
839                .map_err(ProbeRsError::Xtensa),
840            Op::Write(buffer) => self
841                .write_memory_impl(memory_access.as_mut(), address, buffer)
842                .map_err(ProbeRsError::Xtensa),
843        }
844    }
845
846    fn write_memory_impl(
847        &mut self,
848        memory_access: &mut dyn MemoryAccess,
849        address: u64,
850        mut buffer: &[u8],
851    ) -> Result<(), XtensaError> {
852        let mut addr = address as u32;
853
854        // We store the unaligned head of the data separately. In case the core supports unaligned
855        // load/store, we can just write the data directly.
856        let mut address_loaded = false;
857        if !addr.is_multiple_of(4)
858            && !self
859                .core_properties
860                .memory_range_properties(address..address + buffer.len() as u64)
861                .unaligned_store
862        {
863            // If the core does not support unaligned load/store, read-modify-write the first
864            // few unaligned bytes. We are calculating `unaligned_bytes` so that we are not going
865            // across a word boundary here.
866            let unaligned_bytes = (4 - (addr % 4) as usize).min(buffer.len());
867            let aligned_address = address & !0x3;
868            let offset_in_word = address as usize % 4;
869
870            // Read the aligned word
871            let mut word = [0; 4];
872            self.read_memory_impl(memory_access, aligned_address, &mut word)?;
873
874            // Replace the written bytes.
875            word[offset_in_word..][..unaligned_bytes].copy_from_slice(&buffer[..unaligned_bytes]);
876
877            // Write the word back.
878            memory_access.load_initial_address_for_write(self, aligned_address as u32)?;
879            memory_access.write_one(self, u32::from_le_bytes(word))?;
880
881            buffer = &buffer[unaligned_bytes..];
882            addr += unaligned_bytes as u32;
883
884            address_loaded = true;
885        }
886
887        // Store whole words. If the core needs aligned accesses, the above block will have
888        // already stored the first unaligned part.
889        if buffer.len() >= 4 {
890            if !address_loaded {
891                memory_access.load_initial_address_for_write(self, addr)?;
892            }
893            let mut chunks = buffer.chunks_exact(4);
894            for chunk in chunks.by_ref() {
895                let mut word = [0; 4];
896                word[..].copy_from_slice(chunk);
897                let word = u32::from_le_bytes(word);
898
899                memory_access.write_one(self, word)?;
900
901                addr += 4;
902            }
903
904            buffer = chunks.remainder();
905        }
906
907        // We store the narrow tail of the data (1-3 bytes) separately.
908        if !buffer.is_empty() {
909            // We have 1-3 bytes left to write. If the core does not support unaligned load/store,
910            // the above blocks took care of aligning `addr` so we don't have to worry about
911            // crossing a word boundary here.
912
913            // Read the aligned word
914            let mut word = [0; 4];
915            self.read_memory_impl(memory_access, addr as u64, &mut word)?;
916
917            // Replace the written bytes.
918            word[..buffer.len()].copy_from_slice(buffer);
919
920            // Write the word back. We need to set the address because the read may have changed it.
921            memory_access.load_initial_address_for_write(self, addr)?;
922            memory_access.write_one(self, u32::from_le_bytes(word))?;
923        }
924
925        // TODO: implement cache flushing on CPUs that need it.
926
927        Ok(())
928    }
929
930    pub(crate) fn reset_and_halt(&mut self, timeout: Duration) -> Result<(), XtensaError> {
931        self.clear_register_cache();
932        self.xdm.reset_and_halt()?;
933        self.wait_for_core_halted(timeout)?;
934
935        // TODO: this is only necessary to run code, so this might not be the best place
936        // Make sure the CPU is in a known state and is able to run code we download.
937        self.write_register({
938            let mut ps = ProgramStatus(0);
939            ps.set_intlevel(0);
940            ps.set_user_mode(true);
941            ps.set_woe(true);
942            ps
943        })?;
944        self.state.register_cache = RegisterCache::new();
945
946        Ok(())
947    }
948
949    pub(crate) fn clear_register_cache(&mut self) {
950        self.state.register_cache = RegisterCache::new();
951    }
952
953    pub(crate) fn read_deferred_result(
954        &mut self,
955        result: MaybeDeferredResultIndex,
956    ) -> Result<u32, XtensaError> {
957        self.state.register_cache.resolve(result, &mut self.xdm)
958    }
959}
960
961impl MemoryInterface for XtensaCommunicationInterface<'_> {
962    fn read(&mut self, address: u64, dst: &mut [u8]) -> Result<(), crate::Error> {
963        self.read_memory(address, dst)?;
964
965        Ok(())
966    }
967
968    fn supports_native_64bit_access(&mut self) -> bool {
969        false
970    }
971
972    fn read_word_64(&mut self, address: u64) -> Result<u64, crate::Error> {
973        let mut out = [0; 8];
974        self.read(address, &mut out)?;
975
976        Ok(u64::from_le_bytes(out))
977    }
978
979    fn read_word_32(&mut self, address: u64) -> Result<u32, crate::Error> {
980        let mut out = [0; 4];
981        self.read(address, &mut out)?;
982
983        Ok(u32::from_le_bytes(out))
984    }
985
986    fn read_word_16(&mut self, address: u64) -> Result<u16, crate::Error> {
987        let mut out = [0; 2];
988        self.read(address, &mut out)?;
989
990        Ok(u16::from_le_bytes(out))
991    }
992
993    fn read_word_8(&mut self, address: u64) -> Result<u8, crate::Error> {
994        let mut out = 0;
995        self.read(address, std::slice::from_mut(&mut out))?;
996        Ok(out)
997    }
998
999    fn read_64(&mut self, address: u64, data: &mut [u64]) -> Result<(), crate::Error> {
1000        self.read_8(address, data.as_mut_bytes())
1001    }
1002
1003    fn read_32(&mut self, address: u64, data: &mut [u32]) -> Result<(), crate::Error> {
1004        self.read_8(address, data.as_mut_bytes())
1005    }
1006
1007    fn read_16(&mut self, address: u64, data: &mut [u16]) -> Result<(), crate::Error> {
1008        self.read_8(address, data.as_mut_bytes())
1009    }
1010
1011    fn read_8(&mut self, address: u64, data: &mut [u8]) -> Result<(), crate::Error> {
1012        self.read(address, data)
1013    }
1014
1015    fn write(&mut self, address: u64, data: &[u8]) -> Result<(), crate::Error> {
1016        self.write_memory(address, data)?;
1017
1018        Ok(())
1019    }
1020
1021    fn write_word_64(&mut self, address: u64, data: u64) -> Result<(), crate::Error> {
1022        self.write(address, &data.to_le_bytes())
1023    }
1024
1025    fn write_word_32(&mut self, address: u64, data: u32) -> Result<(), crate::Error> {
1026        self.write(address, &data.to_le_bytes())
1027    }
1028
1029    fn write_word_16(&mut self, address: u64, data: u16) -> Result<(), crate::Error> {
1030        self.write(address, &data.to_le_bytes())
1031    }
1032
1033    fn write_word_8(&mut self, address: u64, data: u8) -> Result<(), crate::Error> {
1034        self.write(address, &[data])
1035    }
1036
1037    fn write_64(&mut self, address: u64, data: &[u64]) -> Result<(), crate::Error> {
1038        self.write_8(address, data.as_bytes())
1039    }
1040
1041    fn write_32(&mut self, address: u64, data: &[u32]) -> Result<(), crate::Error> {
1042        self.write_8(address, data.as_bytes())
1043    }
1044
1045    fn write_16(&mut self, address: u64, data: &[u16]) -> Result<(), crate::Error> {
1046        self.write_8(address, data.as_bytes())
1047    }
1048
1049    fn write_8(&mut self, address: u64, data: &[u8]) -> Result<(), crate::Error> {
1050        self.write(address, data)
1051    }
1052
1053    fn supports_8bit_transfers(&self) -> Result<bool, crate::Error> {
1054        Ok(true)
1055    }
1056
1057    fn flush(&mut self) -> Result<(), crate::Error> {
1058        Ok(())
1059    }
1060
1061    fn execute_memory_operations(&mut self, operations: &mut [Operation<'_>]) {
1062        if operations.is_empty() {
1063            return;
1064        }
1065        let result = self.fast_halted_access(|this| {
1066            for operation in operations.iter_mut() {
1067                let result = this.execute_single_memory_operation_halted(operation.reborrow());
1068                let success = result.is_ok();
1069                operation.result = Some(result);
1070                if !success {
1071                    break;
1072                }
1073            }
1074            Ok(())
1075        });
1076
1077        if result.is_err()
1078            && let Some(op) = operations.get_mut(0)
1079        {
1080            op.result = Some(result.map_err(ProbeRsError::Xtensa));
1081        }
1082    }
1083}
1084
1085/// An Xtensa core register
1086pub trait TypedRegister: Copy {
1087    /// Returns the register ID.
1088    fn register() -> Register;
1089
1090    /// Creates a new register from the given value.
1091    fn from_u32(value: u32) -> Self;
1092
1093    /// Returns the register value.
1094    fn as_u32(self) -> u32;
1095}
1096
1097macro_rules! u32_register {
1098    ($name:ident, $register:expr) => {
1099        impl TypedRegister for $name {
1100            fn register() -> Register {
1101                Register::from($register)
1102            }
1103
1104            fn from_u32(value: u32) -> Self {
1105                Self(value)
1106            }
1107
1108            fn as_u32(self) -> u32 {
1109                self.0
1110            }
1111        }
1112    };
1113}
1114
1115bitfield::bitfield! {
1116    /// The `DEBUGCAUSE` register.
1117    #[derive(Copy, Clone)]
1118    pub struct DebugCause(u32);
1119    impl Debug;
1120
1121    /// Instruction counter exception
1122    pub icount_exception,    set_icount_exception   : 0;
1123
1124    /// Instruction breakpoint exception
1125    pub ibreak_exception,    set_ibreak_exception   : 1;
1126
1127    /// Data breakpoint (watchpoint) exception
1128    pub dbreak_exception,    set_dbreak_exception   : 2;
1129
1130    /// Break instruction exception
1131    pub break_instruction,   set_break_instruction  : 3;
1132
1133    /// Narrow Break instruction exception
1134    pub break_n_instruction, set_break_n_instruction: 4;
1135
1136    /// Debug interrupt exception
1137    pub debug_interrupt,     set_debug_interrupt    : 5;
1138
1139    /// Data breakpoint number
1140    pub dbreak_num,          set_dbreak_num         : 11, 8;
1141}
1142u32_register!(DebugCause, SpecialRegister::DebugCause);
1143
1144impl DebugCause {
1145    /// Returns the reason why the core is halted.
1146    pub fn halt_reason(&self) -> HaltReason {
1147        let is_icount_exception = self.icount_exception();
1148        let is_ibreak_exception = self.ibreak_exception();
1149        let is_break_instruction = self.break_instruction();
1150        let is_break_n_instruction = self.break_n_instruction();
1151        let is_dbreak_exception = self.dbreak_exception();
1152        let is_debug_interrupt = self.debug_interrupt();
1153
1154        let is_breakpoint = is_break_instruction || is_break_n_instruction;
1155
1156        let count = is_icount_exception as u8
1157            + is_ibreak_exception as u8
1158            + is_break_instruction as u8
1159            + is_break_n_instruction as u8
1160            + is_dbreak_exception as u8
1161            + is_debug_interrupt as u8;
1162
1163        if count > 1 {
1164            tracing::debug!("DebugCause: {:?}", self);
1165
1166            // We cannot identify why the chip halted,
1167            // it could be for multiple reasons.
1168
1169            // For debuggers, it's important to know if
1170            // the core halted because of a breakpoint.
1171            // Because of this, we still return breakpoint
1172            // even if other reasons are possible as well.
1173            if is_breakpoint {
1174                HaltReason::Breakpoint(BreakpointCause::Unknown)
1175            } else {
1176                HaltReason::Multiple
1177            }
1178        } else if is_icount_exception {
1179            HaltReason::Step
1180        } else if is_ibreak_exception {
1181            HaltReason::Breakpoint(BreakpointCause::Hardware)
1182        } else if is_breakpoint {
1183            HaltReason::Breakpoint(BreakpointCause::Software)
1184        } else if is_dbreak_exception {
1185            HaltReason::Watchpoint
1186        } else if is_debug_interrupt {
1187            HaltReason::Request
1188        } else {
1189            HaltReason::Unknown
1190        }
1191    }
1192}
1193
1194bitfield::bitfield! {
1195    /// The `PS` (Program Status) register.
1196    ///
1197    /// The physical register depends on the debug level.
1198    #[derive(Copy, Clone)]
1199    pub struct ProgramStatus(u32);
1200    impl Debug;
1201
1202    /// Interrupt level disable
1203    pub intlevel,  set_intlevel : 3, 0;
1204
1205    /// Exception mode
1206    pub excm,      set_excm     : 4;
1207
1208    /// User mode
1209    pub user_mode, set_user_mode: 5;
1210
1211    /// Privilege level (when using the MMU option)
1212    pub ring,      set_ring     : 7, 6;
1213
1214    /// Old window base
1215    pub owb,       set_owb      : 11, 8;
1216
1217    /// Call increment
1218    pub callinc,   set_callinc  : 17, 16;
1219
1220    /// Window overflow-detection enable
1221    pub woe,       set_woe      : 18;
1222}
1223u32_register!(ProgramStatus, Register::CurrentPs);
1224
1225/// The `IBREAKEN` (Instruction Breakpoint Enable) register.
1226#[derive(Copy, Clone, Debug)]
1227pub struct IBreakEn(pub u32);
1228u32_register!(IBreakEn, SpecialRegister::IBreakEnable);
1229
1230/// The `ICOUNT` (Instruction Counter) register.
1231#[derive(Copy, Clone, Debug)]
1232pub struct ICount(pub u32);
1233u32_register!(ICount, SpecialRegister::ICount);
1234
1235/// The `ICOUNTLEVEL` (Instruction Count Level) register.
1236#[derive(Copy, Clone, Debug)]
1237pub struct ICountLevel(pub u32);
1238u32_register!(ICountLevel, SpecialRegister::ICountLevel);
1239
1240/// The Program Counter register.
1241#[derive(Copy, Clone, Debug)]
1242pub struct ProgramCounter(pub u32);
1243u32_register!(ProgramCounter, Register::CurrentPc);
1244
1245trait MemoryAccess {
1246    fn halted_access(
1247        &mut self,
1248        interface: &mut XtensaCommunicationInterface,
1249        op: &mut dyn FnMut(
1250            &mut XtensaCommunicationInterface,
1251            &mut dyn MemoryAccess,
1252        ) -> Result<(), XtensaError>,
1253    ) -> Result<(), XtensaError>;
1254
1255    fn save_scratch_registers(
1256        &mut self,
1257        interface: &mut XtensaCommunicationInterface,
1258    ) -> Result<(), XtensaError>;
1259
1260    fn load_initial_address_for_read(
1261        &mut self,
1262        interface: &mut XtensaCommunicationInterface,
1263        address: u32,
1264    ) -> Result<(), XtensaError>;
1265    fn load_initial_address_for_write(
1266        &mut self,
1267        interface: &mut XtensaCommunicationInterface,
1268        address: u32,
1269    ) -> Result<(), XtensaError>;
1270
1271    fn read_one(
1272        &mut self,
1273        interface: &mut XtensaCommunicationInterface,
1274    ) -> Result<DeferredResultIndex, XtensaError>;
1275
1276    fn read_one_and_continue(
1277        &mut self,
1278        interface: &mut XtensaCommunicationInterface,
1279    ) -> Result<DeferredResultIndex, XtensaError>;
1280
1281    fn write_one(
1282        &mut self,
1283        interface: &mut XtensaCommunicationInterface,
1284        data: u32,
1285    ) -> Result<(), XtensaError>;
1286}
1287
1288/// Memory access using LDDR32.P and SDDR32.P instructions.
1289struct FastMemoryAccess;
1290impl FastMemoryAccess {
1291    fn new() -> Self {
1292        Self
1293    }
1294}
1295impl MemoryAccess for FastMemoryAccess {
1296    fn halted_access(
1297        &mut self,
1298        interface: &mut XtensaCommunicationInterface,
1299        op: &mut dyn FnMut(
1300            &mut XtensaCommunicationInterface,
1301            &mut dyn MemoryAccess,
1302        ) -> Result<(), XtensaError>,
1303    ) -> Result<(), XtensaError> {
1304        interface.fast_halted_access(|this| op(this, self))
1305    }
1306
1307    fn save_scratch_registers(
1308        &mut self,
1309        interface: &mut XtensaCommunicationInterface,
1310    ) -> Result<(), XtensaError> {
1311        interface.ensure_register_saved(CpuRegister::A3)?;
1312        Ok(())
1313    }
1314
1315    fn load_initial_address_for_read(
1316        &mut self,
1317        interface: &mut XtensaCommunicationInterface,
1318        address: u32,
1319    ) -> Result<(), XtensaError> {
1320        // Write aligned address to the scratch register
1321        interface.schedule_write_cpu_register(CpuRegister::A3, address)?;
1322        interface
1323            .state
1324            .register_cache
1325            .mark_dirty(CpuRegister::A3.into());
1326
1327        // Read from address in the scratch register
1328        interface
1329            .xdm
1330            .schedule_execute_instruction(Instruction::Lddr32P(CpuRegister::A3));
1331
1332        Ok(())
1333    }
1334
1335    fn load_initial_address_for_write(
1336        &mut self,
1337        interface: &mut XtensaCommunicationInterface,
1338        address: u32,
1339    ) -> Result<(), XtensaError> {
1340        interface.schedule_write_cpu_register(CpuRegister::A3, address)?;
1341        interface
1342            .state
1343            .register_cache
1344            .mark_dirty(CpuRegister::A3.into());
1345
1346        interface
1347            .xdm
1348            .schedule_write_instruction(Instruction::Sddr32P(CpuRegister::A3));
1349
1350        Ok(())
1351    }
1352
1353    fn write_one(
1354        &mut self,
1355        interface: &mut XtensaCommunicationInterface,
1356        data: u32,
1357    ) -> Result<(), XtensaError> {
1358        interface.xdm.schedule_write_ddr_and_execute(data);
1359        Ok(())
1360    }
1361
1362    fn read_one(
1363        &mut self,
1364        interface: &mut XtensaCommunicationInterface,
1365    ) -> Result<DeferredResultIndex, XtensaError> {
1366        Ok(interface.xdm.schedule_read_ddr())
1367    }
1368
1369    fn read_one_and_continue(
1370        &mut self,
1371        interface: &mut XtensaCommunicationInterface,
1372    ) -> Result<DeferredResultIndex, XtensaError> {
1373        Ok(interface.xdm.schedule_read_ddr_and_execute())
1374    }
1375}
1376
1377/// Memory access without LDDR32.P and SDDR32.P instructions.
1378struct SlowMemoryAccess {
1379    current_address: u32,
1380    current_offset: u32,
1381    address_written: bool,
1382}
1383impl SlowMemoryAccess {
1384    fn new() -> Self {
1385        Self {
1386            current_address: 0,
1387            current_offset: 0,
1388            address_written: false,
1389        }
1390    }
1391}
1392
1393impl MemoryAccess for SlowMemoryAccess {
1394    fn halted_access(
1395        &mut self,
1396        interface: &mut XtensaCommunicationInterface,
1397        op: &mut dyn FnMut(
1398            &mut XtensaCommunicationInterface,
1399            &mut dyn MemoryAccess,
1400        ) -> Result<(), XtensaError>,
1401    ) -> Result<(), XtensaError> {
1402        interface.halted_access(|this| op(this, self))
1403    }
1404
1405    fn save_scratch_registers(
1406        &mut self,
1407        interface: &mut XtensaCommunicationInterface,
1408    ) -> Result<(), XtensaError> {
1409        interface.ensure_register_saved(CpuRegister::A3)?;
1410        interface.ensure_register_saved(CpuRegister::A4)?;
1411        Ok(())
1412    }
1413
1414    fn load_initial_address_for_read(
1415        &mut self,
1416        _interface: &mut XtensaCommunicationInterface,
1417        address: u32,
1418    ) -> Result<(), XtensaError> {
1419        self.current_address = address;
1420
1421        Ok(())
1422    }
1423
1424    fn load_initial_address_for_write(
1425        &mut self,
1426        _interface: &mut XtensaCommunicationInterface,
1427        address: u32,
1428    ) -> Result<(), XtensaError> {
1429        self.current_address = address;
1430
1431        Ok(())
1432    }
1433
1434    fn read_one(
1435        &mut self,
1436        interface: &mut XtensaCommunicationInterface,
1437    ) -> Result<DeferredResultIndex, XtensaError> {
1438        if !self.address_written {
1439            interface.schedule_write_cpu_register(CpuRegister::A3, self.current_address)?;
1440            interface
1441                .state
1442                .register_cache
1443                .mark_dirty(CpuRegister::A3.into());
1444            self.current_offset = 0;
1445            self.address_written = true;
1446        }
1447
1448        interface
1449            .xdm
1450            .schedule_execute_instruction(Instruction::L32I(
1451                CpuRegister::A3,
1452                CpuRegister::A4,
1453                (self.current_offset / 4) as u8,
1454            ));
1455        self.current_offset += 4;
1456
1457        interface
1458            .state
1459            .register_cache
1460            .mark_dirty(CpuRegister::A4.into());
1461
1462        if self.current_offset == 1024 {
1463            // The maximum offset for L32I is 1020, so we need to
1464            // increment the base address and reset the offset.
1465            self.current_address += self.current_offset;
1466            self.current_offset = 0;
1467            self.address_written = false;
1468        }
1469
1470        Ok(interface.schedule_read_cpu_register(CpuRegister::A4))
1471    }
1472
1473    fn read_one_and_continue(
1474        &mut self,
1475        interface: &mut XtensaCommunicationInterface,
1476    ) -> Result<DeferredResultIndex, XtensaError> {
1477        self.read_one(interface)
1478    }
1479
1480    fn write_one(
1481        &mut self,
1482        interface: &mut XtensaCommunicationInterface,
1483        data: u32,
1484    ) -> Result<(), XtensaError> {
1485        // Store address and data
1486        interface.schedule_write_cpu_register(CpuRegister::A3, self.current_address)?;
1487        interface
1488            .state
1489            .register_cache
1490            .mark_dirty(CpuRegister::A3.into());
1491
1492        interface.schedule_write_cpu_register(CpuRegister::A4, data)?;
1493        interface
1494            .state
1495            .register_cache
1496            .mark_dirty(CpuRegister::A4.into());
1497
1498        // Increment address
1499        self.current_address += 4;
1500
1501        // Store A4 into address A3
1502        interface
1503            .xdm
1504            .schedule_execute_instruction(Instruction::S32I(CpuRegister::A3, CpuRegister::A4, 0));
1505
1506        Ok(())
1507    }
1508}
1509
1510pub(crate) enum MaybeDeferredResultIndex {
1511    /// The result is already available.
1512    Value(u32),
1513
1514    /// The result is deferred and can be accessed via the register cache.
1515    Deferred(Register),
1516}