1use std::{
4 collections::HashMap,
5 ops::Range,
6 time::{Duration, Instant},
7};
8
9use zerocopy::IntoBytes;
10
11use crate::{
12 BreakpointCause, Error as ProbeRsError, HaltReason, MemoryInterface,
13 architecture::xtensa::{
14 arch::{CpuRegister, Register, SpecialRegister, instruction::Instruction},
15 register_cache::RegisterCache,
16 xdm::{DebugStatus, XdmState},
17 },
18 probe::{DebugProbeError, DeferredResultIndex, JtagAccess},
19};
20
21use super::xdm::{Error as XdmError, Xdm};
22
23#[derive(thiserror::Error, Debug, docsplay::Display)]
25pub enum XtensaError {
26 DebugProbe(#[from] DebugProbeError),
28
29 XdmError(#[from] XdmError),
31
32 CoreDisabled,
34
35 Timeout,
38
39 NoXtensaTarget,
41
42 RegisterNotAvailable,
44
45 BatchedResultNotAvailable,
47}
48
49impl From<XtensaError> for ProbeRsError {
50 fn from(err: XtensaError) -> Self {
51 match err {
52 XtensaError::DebugProbe(e) => e.into(),
53 other => ProbeRsError::Xtensa(other),
54 }
55 }
56}
57
58#[derive(Clone, Copy)]
60pub enum DebugLevel {
61 L2 = 2,
63 L3 = 3,
65 L4 = 4,
67 L5 = 5,
69 L6 = 6,
71 L7 = 7,
73}
74
75impl DebugLevel {
76 pub fn pc(self) -> SpecialRegister {
78 match self {
79 DebugLevel::L2 => SpecialRegister::Epc2,
80 DebugLevel::L3 => SpecialRegister::Epc3,
81 DebugLevel::L4 => SpecialRegister::Epc4,
82 DebugLevel::L5 => SpecialRegister::Epc5,
83 DebugLevel::L6 => SpecialRegister::Epc6,
84 DebugLevel::L7 => SpecialRegister::Epc7,
85 }
86 }
87
88 pub fn ps(self) -> SpecialRegister {
90 match self {
91 DebugLevel::L2 => SpecialRegister::Eps2,
92 DebugLevel::L3 => SpecialRegister::Eps3,
93 DebugLevel::L4 => SpecialRegister::Eps4,
94 DebugLevel::L5 => SpecialRegister::Eps5,
95 DebugLevel::L6 => SpecialRegister::Eps6,
96 DebugLevel::L7 => SpecialRegister::Eps7,
97 }
98 }
99}
100
101#[derive(Default)]
103pub(super) struct XtensaInterfaceState {
104 pub(super) register_cache: RegisterCache,
106
107 pub(super) is_halted: bool,
110}
111
112#[derive(Clone, Copy, Default)]
114pub struct MemoryRegionProperties {
115 pub unaligned_store: bool,
117
118 pub unaligned_load: bool,
120
121 pub fast_memory_access: bool,
123}
124
125pub struct XtensaCoreProperties {
127 pub hw_breakpoint_num: u32,
129
130 pub debug_level: DebugLevel,
132
133 pub memory_ranges: HashMap<Range<u64>, MemoryRegionProperties>,
135
136 pub window_option_properties: WindowProperties,
138}
139
140impl Default for XtensaCoreProperties {
141 fn default() -> Self {
142 Self {
143 hw_breakpoint_num: 2,
144 debug_level: DebugLevel::L6,
145 memory_ranges: HashMap::new(),
146 window_option_properties: WindowProperties::lx(64),
147 }
148 }
149}
150
151impl XtensaCoreProperties {
152 pub fn memory_properties_at(&self, address: u64) -> MemoryRegionProperties {
154 self.memory_ranges
155 .iter()
156 .find(|(range, _)| range.contains(&address))
157 .map(|(_, region)| *region)
158 .unwrap_or_default()
159 }
160
161 pub fn memory_range_properties(&self, range: Range<u64>) -> MemoryRegionProperties {
163 let mut start = range.start;
164 let end = range.end;
165
166 if start == end {
167 return MemoryRegionProperties::default();
168 }
169
170 let mut properties = MemoryRegionProperties {
171 unaligned_store: true,
172 unaligned_load: true,
173 fast_memory_access: true,
174 };
175 while start < end {
176 let containing_region = self
178 .memory_ranges
179 .iter()
180 .find(|(range, _)| range.contains(&start));
181
182 let Some((range, region_properties)) = containing_region else {
183 return MemoryRegionProperties::default();
185 };
186
187 properties.unaligned_store &= region_properties.unaligned_store;
188 properties.unaligned_load &= region_properties.unaligned_load;
189 properties.fast_memory_access &= region_properties.fast_memory_access;
190
191 start = range.end;
193 }
194
195 properties
196 }
197}
198
199#[derive(Clone, Copy, Debug)]
201pub struct WindowProperties {
202 pub has_windowed_registers: bool,
204
205 pub num_aregs: u8,
207
208 pub window_regs: u8,
210
211 pub rotw_rotates: u8,
213}
214
215impl WindowProperties {
216 pub fn lx(num_aregs: u8) -> Self {
218 Self {
219 has_windowed_registers: true,
220 num_aregs,
221 window_regs: 16,
222 rotw_rotates: 4,
223 }
224 }
225
226 pub fn windowbase_size(&self) -> u8 {
228 self.num_aregs / self.rotw_rotates
229 }
230}
231
232#[derive(Default)]
234pub struct XtensaDebugInterfaceState {
235 interface_state: XtensaInterfaceState,
236 core_properties: XtensaCoreProperties,
237 xdm_state: XdmState,
238}
239
240pub struct XtensaCommunicationInterface<'probe> {
244 pub(crate) xdm: Xdm<'probe>,
246 pub(super) state: &'probe mut XtensaInterfaceState,
247 core_properties: &'probe mut XtensaCoreProperties,
248}
249
250impl<'probe> XtensaCommunicationInterface<'probe> {
251 pub fn new(
253 probe: &'probe mut dyn JtagAccess,
254 state: &'probe mut XtensaDebugInterfaceState,
255 ) -> Self {
256 let XtensaDebugInterfaceState {
257 interface_state,
258 core_properties,
259 xdm_state,
260 } = state;
261 let xdm = Xdm::new(probe, xdm_state);
262
263 Self {
264 xdm,
265 state: interface_state,
266 core_properties,
267 }
268 }
269
270 pub fn core_properties(&mut self) -> &mut XtensaCoreProperties {
272 self.core_properties
273 }
274
275 pub fn read_idcode(&mut self) -> Result<u32, XtensaError> {
277 self.xdm.read_idcode()
278 }
279
280 pub fn enter_debug_mode(&mut self) -> Result<(), XtensaError> {
282 self.xdm.enter_debug_mode()?;
283
284 self.state.is_halted = self.xdm.status()?.stopped();
285
286 Ok(())
287 }
288
289 pub(crate) fn leave_debug_mode(&mut self) -> Result<(), XtensaError> {
290 if self.xdm.status()?.stopped() {
291 self.restore_registers()?;
292 self.resume_core()?;
293 }
294 self.xdm.leave_ocd_mode()?;
295
296 tracing::debug!("Left OCD mode");
297
298 Ok(())
299 }
300
301 pub fn available_breakpoint_units(&self) -> u32 {
305 self.core_properties.hw_breakpoint_num
306 }
307
308 pub fn core_halted(&mut self) -> Result<bool, XtensaError> {
310 if !self.state.is_halted {
311 self.state.is_halted = self.xdm.status()?.stopped();
312 }
313
314 Ok(self.state.is_halted)
315 }
316
317 pub fn wait_for_core_halted(&mut self, timeout: Duration) -> Result<(), XtensaError> {
321 let start = Instant::now();
323
324 while !self.core_halted()? {
325 if start.elapsed() >= timeout {
326 return Err(XtensaError::Timeout);
327 }
328 std::thread::sleep(Duration::from_millis(1));
330 }
331
332 Ok(())
333 }
334
335 pub(crate) fn halt(&mut self, timeout: Duration) -> Result<(), XtensaError> {
337 self.xdm.schedule_halt();
338 self.wait_for_core_halted(timeout)?;
339 Ok(())
340 }
341
342 pub(crate) fn halt_with_previous(&mut self, timeout: Duration) -> Result<bool, XtensaError> {
344 let was_running = if self.state.is_halted {
345 false
347 } else {
348 let status_idx = self.xdm.schedule_read_nexus_register::<DebugStatus>();
351 self.halt(timeout)?;
352 let before_status = DebugStatus(self.xdm.read_deferred_result(status_idx)?.into_u32());
353
354 !before_status.stopped()
355 };
356
357 Ok(was_running)
358 }
359
360 fn fast_halted_access(
361 &mut self,
362 mut op: impl FnMut(&mut Self) -> Result<(), XtensaError>,
363 ) -> Result<(), XtensaError> {
364 if self.state.is_halted {
365 return op(self);
367 }
368
369 let status_idx = self.xdm.schedule_read_nexus_register::<DebugStatus>();
372
373 self.xdm.schedule_halt();
375
376 let is_halted_idx = self.xdm.schedule_read_nexus_register::<DebugStatus>();
378 self.state.is_halted = true;
379
380 let result = op(self);
383
384 let before_status = DebugStatus(self.xdm.read_deferred_result(status_idx)?.into_u32());
386 if !before_status.stopped() {
387 self.resume_core()?;
388 }
389
390 let after_status = DebugStatus(self.xdm.read_deferred_result(is_halted_idx)?.into_u32());
392
393 if after_status.stopped() {
394 return result;
395 }
396 self.state.is_halted = false;
397 self.halted_access(|this| op(this))
398 }
399
400 pub fn halted_access<R>(
402 &mut self,
403 op: impl FnOnce(&mut Self) -> Result<R, XtensaError>,
404 ) -> Result<R, XtensaError> {
405 let was_running = self.halt_with_previous(Duration::from_millis(100))?;
406
407 let result = op(self);
408
409 if was_running {
410 self.resume_core()?;
411 }
412
413 result
414 }
415
416 pub fn step(&mut self, by: u32, intlevel: u32) -> Result<(), XtensaError> {
418 self.schedule_write_register(ICountLevel(intlevel + 1))?;
420
421 self.schedule_write_register(ICount(-((1 + by) as i32) as u32))?;
423
424 self.resume_core()?;
425 match self.wait_for_core_halted(Duration::from_millis(100)) {
430 Ok(()) => {}
431 Err(XtensaError::Timeout) => self.halt(Duration::from_millis(100))?,
432 Err(e) => return Err(e),
433 }
434
435 self.schedule_write_register(ICountLevel(0))?;
437
438 Ok(())
439 }
440
441 pub fn resume_core(&mut self) -> Result<(), XtensaError> {
443 self.restore_registers()?;
446 self.clear_register_cache();
448
449 tracing::debug!("Resuming core");
450 self.state.is_halted = false;
451 self.xdm.resume()?;
452
453 Ok(())
454 }
455
456 fn schedule_read_cpu_register(&mut self, register: CpuRegister) -> DeferredResultIndex {
457 self.xdm
458 .schedule_execute_instruction(Instruction::Wsr(SpecialRegister::Ddr, register));
459 self.xdm.schedule_read_ddr()
460 }
461
462 fn schedule_read_special_register(
463 &mut self,
464 register: SpecialRegister,
465 ) -> Result<DeferredResultIndex, XtensaError> {
466 self.ensure_register_saved(CpuRegister::A3)?;
467 self.state.register_cache.mark_dirty(CpuRegister::A3.into());
468
469 self.xdm
471 .schedule_execute_instruction(Instruction::Rsr(register, CpuRegister::A3));
472
473 Ok(self.schedule_read_cpu_register(CpuRegister::A3))
474 }
475
476 fn schedule_write_special_register(
477 &mut self,
478 register: SpecialRegister,
479 value: u32,
480 ) -> Result<(), XtensaError> {
481 tracing::debug!("Writing special register: {:?}", register);
482 self.ensure_register_saved(CpuRegister::A3)?;
483 self.state.register_cache.mark_dirty(CpuRegister::A3.into());
484
485 self.xdm.schedule_write_ddr(value);
486
487 self.xdm
489 .schedule_execute_instruction(Instruction::Rsr(SpecialRegister::Ddr, CpuRegister::A3));
490
491 self.xdm
493 .schedule_execute_instruction(Instruction::Wsr(register, CpuRegister::A3));
494
495 Ok(())
496 }
497
498 #[tracing::instrument(skip(self))]
499 fn schedule_write_cpu_register(
500 &mut self,
501 register: CpuRegister,
502 value: u32,
503 ) -> Result<(), XtensaError> {
504 tracing::debug!("Writing {:x} to register: {:?}", value, register);
505
506 self.xdm.schedule_write_ddr(value);
507 self.xdm
508 .schedule_execute_instruction(Instruction::Rsr(SpecialRegister::Ddr, register));
509
510 Ok(())
511 }
512
513 pub fn read_register<R: TypedRegister>(&mut self) -> Result<R, XtensaError> {
515 let value = self.read_register_untyped(R::register())?;
516
517 Ok(R::from_u32(value))
518 }
519
520 pub fn write_register<R: TypedRegister>(&mut self, reg: R) -> Result<(), XtensaError> {
522 self.write_register_untyped(R::register(), reg.as_u32())?;
523
524 Ok(())
525 }
526
527 pub(crate) fn schedule_write_register<R: TypedRegister>(
529 &mut self,
530 reg: R,
531 ) -> Result<(), XtensaError> {
532 self.schedule_write_register_untyped(R::register(), reg.as_u32())?;
533
534 Ok(())
535 }
536
537 pub(crate) fn schedule_read_register(
541 &mut self,
542 register: impl Into<Register>,
543 ) -> Result<MaybeDeferredResultIndex, XtensaError> {
544 let register = register.into();
545 if let Some(entry) = self.state.register_cache.get_mut(register) {
546 return Ok(MaybeDeferredResultIndex::Value(entry.current_value()));
547 }
548
549 let reader = match register {
550 Register::Cpu(register) => self.schedule_read_cpu_register(register),
551 Register::Special(register) => self.schedule_read_special_register(register)?,
552 Register::CurrentPc => {
553 self.schedule_read_special_register(self.core_properties.debug_level.pc())?
554 }
555 Register::CurrentPs => {
556 self.schedule_read_special_register(self.core_properties.debug_level.ps())?
557 }
558 };
559 Ok(MaybeDeferredResultIndex::Deferred(reader))
560 }
561
562 pub fn read_register_untyped(
564 &mut self,
565 register: impl Into<Register>,
566 ) -> Result<u32, XtensaError> {
567 let register = register.into();
568 if let Some(entry) = self.state.register_cache.get_mut(register) {
569 return Ok(entry.current_value());
570 }
571
572 match self.schedule_read_register(register)? {
573 MaybeDeferredResultIndex::Value(value) => Ok(value),
574 MaybeDeferredResultIndex::Deferred(reader) => {
575 let value = self.xdm.read_deferred_result(reader)?.into_u32();
577 self.state.register_cache.store(register, value);
578 Ok(value)
579 }
580 }
581 }
582
583 pub fn schedule_write_register_untyped(
588 &mut self,
589 register: impl Into<Register>,
590 value: u32,
591 ) -> Result<(), XtensaError> {
592 let register = register.into();
593
594 self.state.register_cache.store(register, value);
595
596 match register {
597 Register::Cpu(register) => self.schedule_write_cpu_register(register, value),
598 Register::Special(register) => self.schedule_write_special_register(register, value),
599 Register::CurrentPc => {
600 self.schedule_write_special_register(self.core_properties.debug_level.pc(), value)
601 }
602 Register::CurrentPs => {
603 self.schedule_write_special_register(self.core_properties.debug_level.ps(), value)
604 }
605 }
606 }
607
608 pub fn write_register_untyped(
610 &mut self,
611 register: impl Into<Register>,
612 value: u32,
613 ) -> Result<(), XtensaError> {
614 self.schedule_write_register_untyped(register, value)?;
615 self.xdm.execute()
616 }
617
618 #[tracing::instrument(skip(self, register), fields(register))]
620 fn ensure_register_saved(&mut self, register: impl Into<Register>) -> Result<(), XtensaError> {
621 let register = register.into();
622
623 tracing::debug!("Saving register: {:?}", register);
624 self.read_register_untyped(register)?;
625
626 Ok(())
627 }
628
629 #[tracing::instrument(skip(self))]
630 pub(super) fn restore_registers(&mut self) -> Result<(), XtensaError> {
631 tracing::debug!("Restoring registers");
632
633 let filters = [
634 |r: &Register| !r.is_cpu_register(),
636 |r: &Register| r.is_cpu_register(),
638 ];
639 for filter in filters {
640 let dirty_regs = self
644 .state
645 .register_cache
646 .iter_mut()
647 .filter(|(r, entry)| entry.is_dirty() && filter(r))
648 .map(|(r, _)| r)
649 .collect::<Vec<_>>();
650
651 for register in dirty_regs {
653 let entry = self
654 .state
655 .register_cache
656 .get_mut(register)
657 .unwrap_or_else(|| panic!("Register {register:?} is not in the cache"));
658
659 let value = entry.original_value();
660 self.schedule_write_register_untyped(register, value)?;
661 }
662 }
663
664 Ok(())
665 }
666
667 fn memory_access_for(&self, address: u64, len: usize) -> Box<dyn MemoryAccess> {
668 if self
669 .core_properties
670 .memory_range_properties(address..address + len as u64)
671 .fast_memory_access
672 {
673 Box::new(FastMemoryAccess::new())
674 } else {
675 Box::new(SlowMemoryAccess::new())
676 }
677 }
678
679 fn read_memory(&mut self, address: u64, dst: &mut [u8]) -> Result<(), XtensaError> {
680 tracing::debug!("Reading {} bytes from address {:08x}", dst.len(), address);
681 if dst.is_empty() {
682 return Ok(());
683 }
684
685 let mut memory_access = self.memory_access_for(address, dst.len());
686
687 memory_access.halted_access(self, &mut |this, memory_access| {
688 memory_access.save_scratch_registers(this)?;
689 this.read_memory_impl(memory_access, address, dst)
690 })
691 }
692
693 fn read_memory_impl(
694 &mut self,
695 memory_access: &mut dyn MemoryAccess,
696 address: u64,
697 mut dst: &mut [u8],
698 ) -> Result<(), XtensaError> {
699 let mut to_read = dst.len();
700
701 let first_read = if !address.is_multiple_of(4)
704 && !self
705 .core_properties
706 .memory_range_properties(address..address + dst.len() as u64)
707 .unaligned_load
708 {
709 memory_access.load_initial_address_for_read(self, address as u32 & !0x3)?;
710 let offset = address as usize % 4;
711
712 let first_read = if offset + to_read <= 4 {
714 memory_access.read_one(self)?
715 } else {
716 memory_access.read_one_and_continue(self)?
717 };
718
719 let bytes_to_copy = (4 - offset).min(to_read);
720
721 to_read -= bytes_to_copy;
722
723 Some((first_read, offset, bytes_to_copy))
724 } else {
725 memory_access.load_initial_address_for_read(self, address as u32)?;
727 None
728 };
729
730 let mut aligned_reads = vec![];
731 if to_read > 0 {
732 let words = to_read.div_ceil(4);
733
734 for _ in 0..words - 1 {
735 aligned_reads.push(memory_access.read_one_and_continue(self)?);
736 }
737 aligned_reads.push(memory_access.read_one(self)?);
738 };
739
740 if let Some((read, offset, bytes_to_copy)) = first_read {
741 let word = self
742 .xdm
743 .read_deferred_result(read)?
744 .into_u32()
745 .to_le_bytes();
746
747 dst[..bytes_to_copy].copy_from_slice(&word[offset..][..bytes_to_copy]);
748 dst = &mut dst[bytes_to_copy..];
749 }
750
751 for read in aligned_reads {
752 let word = self
753 .xdm
754 .read_deferred_result(read)?
755 .into_u32()
756 .to_le_bytes();
757
758 let bytes = dst.len().min(4);
759
760 dst[..bytes].copy_from_slice(&word[..bytes]);
761 dst = &mut dst[bytes..];
762 }
763
764 Ok(())
765 }
766
767 pub(crate) fn write_memory(&mut self, address: u64, data: &[u8]) -> Result<(), XtensaError> {
768 tracing::debug!("Writing {} bytes to address {:08x}", data.len(), address);
769 if data.is_empty() {
770 return Ok(());
771 }
772
773 let mut memory_access = self.memory_access_for(address, data.len());
774
775 memory_access.halted_access(self, &mut |this, memory_access| {
776 memory_access.save_scratch_registers(this)?;
777 this.write_memory_impl(memory_access, address, data)
778 })
779 }
780
781 fn write_memory_impl(
782 &mut self,
783 memory_access: &mut dyn MemoryAccess,
784 address: u64,
785 mut buffer: &[u8],
786 ) -> Result<(), XtensaError> {
787 let mut addr = address as u32;
788
789 let mut address_loaded = false;
792 if !addr.is_multiple_of(4)
793 && !self
794 .core_properties
795 .memory_range_properties(address..address + buffer.len() as u64)
796 .unaligned_store
797 {
798 let unaligned_bytes = (4 - (addr % 4) as usize).min(buffer.len());
802 let aligned_address = address & !0x3;
803 let offset_in_word = address as usize % 4;
804
805 let mut word = [0; 4];
807 self.read_memory_impl(memory_access, aligned_address, &mut word)?;
808
809 word[offset_in_word..][..unaligned_bytes].copy_from_slice(&buffer[..unaligned_bytes]);
811
812 memory_access.load_initial_address_for_write(self, aligned_address as u32)?;
814 memory_access.write_one(self, u32::from_le_bytes(word))?;
815
816 buffer = &buffer[unaligned_bytes..];
817 addr += unaligned_bytes as u32;
818
819 address_loaded = true;
820 }
821
822 if buffer.len() >= 4 {
825 if !address_loaded {
826 memory_access.load_initial_address_for_write(self, addr)?;
827 }
828 let mut chunks = buffer.chunks_exact(4);
829 for chunk in chunks.by_ref() {
830 let mut word = [0; 4];
831 word[..].copy_from_slice(chunk);
832 let word = u32::from_le_bytes(word);
833
834 memory_access.write_one(self, word)?;
835
836 addr += 4;
837 }
838
839 buffer = chunks.remainder();
840 }
841
842 if !buffer.is_empty() {
844 let mut word = [0; 4];
850 self.read_memory_impl(memory_access, addr as u64, &mut word)?;
851
852 word[..buffer.len()].copy_from_slice(buffer);
854
855 memory_access.load_initial_address_for_write(self, addr)?;
857 memory_access.write_one(self, u32::from_le_bytes(word))?;
858 }
859
860 Ok(())
863 }
864
865 pub(crate) fn reset_and_halt(&mut self, timeout: Duration) -> Result<(), XtensaError> {
866 self.clear_register_cache();
867 self.xdm.reset_and_halt()?;
868 self.wait_for_core_halted(timeout)?;
869
870 self.write_register({
873 let mut ps = ProgramStatus(0);
874 ps.set_intlevel(0);
875 ps.set_user_mode(true);
876 ps.set_woe(true);
877 ps
878 })?;
879 self.restore_registers()?;
880
881 Ok(())
882 }
883
884 pub(crate) fn clear_register_cache(&mut self) {
885 self.state.register_cache = RegisterCache::new();
886 }
887
888 pub(crate) fn read_deferred_result(
889 &mut self,
890 result: MaybeDeferredResultIndex,
891 ) -> Result<u32, XtensaError> {
892 match result {
893 MaybeDeferredResultIndex::Value(value) => Ok(value),
894 MaybeDeferredResultIndex::Deferred(deferred_result_index) => self
895 .xdm
896 .read_deferred_result(deferred_result_index)
897 .map(|r| r.into_u32()),
898 }
899 }
900}
901
902impl MemoryInterface for XtensaCommunicationInterface<'_> {
903 fn read(&mut self, address: u64, dst: &mut [u8]) -> Result<(), crate::Error> {
904 self.read_memory(address, dst)?;
905
906 Ok(())
907 }
908
909 fn supports_native_64bit_access(&mut self) -> bool {
910 false
911 }
912
913 fn read_word_64(&mut self, address: u64) -> Result<u64, crate::Error> {
914 let mut out = [0; 8];
915 self.read(address, &mut out)?;
916
917 Ok(u64::from_le_bytes(out))
918 }
919
920 fn read_word_32(&mut self, address: u64) -> Result<u32, crate::Error> {
921 let mut out = [0; 4];
922 self.read(address, &mut out)?;
923
924 Ok(u32::from_le_bytes(out))
925 }
926
927 fn read_word_16(&mut self, address: u64) -> Result<u16, crate::Error> {
928 let mut out = [0; 2];
929 self.read(address, &mut out)?;
930
931 Ok(u16::from_le_bytes(out))
932 }
933
934 fn read_word_8(&mut self, address: u64) -> Result<u8, crate::Error> {
935 let mut out = 0;
936 self.read(address, std::slice::from_mut(&mut out))?;
937 Ok(out)
938 }
939
940 fn read_64(&mut self, address: u64, data: &mut [u64]) -> Result<(), crate::Error> {
941 self.read_8(address, data.as_mut_bytes())
942 }
943
944 fn read_32(&mut self, address: u64, data: &mut [u32]) -> Result<(), crate::Error> {
945 self.read_8(address, data.as_mut_bytes())
946 }
947
948 fn read_16(&mut self, address: u64, data: &mut [u16]) -> Result<(), crate::Error> {
949 self.read_8(address, data.as_mut_bytes())
950 }
951
952 fn read_8(&mut self, address: u64, data: &mut [u8]) -> Result<(), crate::Error> {
953 self.read(address, data)
954 }
955
956 fn write(&mut self, address: u64, data: &[u8]) -> Result<(), crate::Error> {
957 self.write_memory(address, data)?;
958
959 Ok(())
960 }
961
962 fn write_word_64(&mut self, address: u64, data: u64) -> Result<(), crate::Error> {
963 self.write(address, &data.to_le_bytes())
964 }
965
966 fn write_word_32(&mut self, address: u64, data: u32) -> Result<(), crate::Error> {
967 self.write(address, &data.to_le_bytes())
968 }
969
970 fn write_word_16(&mut self, address: u64, data: u16) -> Result<(), crate::Error> {
971 self.write(address, &data.to_le_bytes())
972 }
973
974 fn write_word_8(&mut self, address: u64, data: u8) -> Result<(), crate::Error> {
975 self.write(address, &[data])
976 }
977
978 fn write_64(&mut self, address: u64, data: &[u64]) -> Result<(), crate::Error> {
979 self.write_8(address, data.as_bytes())
980 }
981
982 fn write_32(&mut self, address: u64, data: &[u32]) -> Result<(), crate::Error> {
983 self.write_8(address, data.as_bytes())
984 }
985
986 fn write_16(&mut self, address: u64, data: &[u16]) -> Result<(), crate::Error> {
987 self.write_8(address, data.as_bytes())
988 }
989
990 fn write_8(&mut self, address: u64, data: &[u8]) -> Result<(), crate::Error> {
991 self.write(address, data)
992 }
993
994 fn supports_8bit_transfers(&self) -> Result<bool, crate::Error> {
995 Ok(true)
996 }
997
998 fn flush(&mut self) -> Result<(), crate::Error> {
999 Ok(())
1000 }
1001}
1002
1003pub trait TypedRegister: Copy {
1005 fn register() -> Register;
1007
1008 fn from_u32(value: u32) -> Self;
1010
1011 fn as_u32(self) -> u32;
1013}
1014
1015macro_rules! u32_register {
1016 ($name:ident, $register:expr) => {
1017 impl TypedRegister for $name {
1018 fn register() -> Register {
1019 Register::from($register)
1020 }
1021
1022 fn from_u32(value: u32) -> Self {
1023 Self(value)
1024 }
1025
1026 fn as_u32(self) -> u32 {
1027 self.0
1028 }
1029 }
1030 };
1031}
1032
1033bitfield::bitfield! {
1034 #[derive(Copy, Clone)]
1036 pub struct DebugCause(u32);
1037 impl Debug;
1038
1039 pub icount_exception, set_icount_exception : 0;
1041
1042 pub ibreak_exception, set_ibreak_exception : 1;
1044
1045 pub dbreak_exception, set_dbreak_exception : 2;
1047
1048 pub break_instruction, set_break_instruction : 3;
1050
1051 pub break_n_instruction, set_break_n_instruction: 4;
1053
1054 pub debug_interrupt, set_debug_interrupt : 5;
1056
1057 pub dbreak_num, set_dbreak_num : 11, 8;
1059}
1060u32_register!(DebugCause, SpecialRegister::DebugCause);
1061
1062impl DebugCause {
1063 pub fn halt_reason(&self) -> HaltReason {
1065 let is_icount_exception = self.icount_exception();
1066 let is_ibreak_exception = self.ibreak_exception();
1067 let is_break_instruction = self.break_instruction();
1068 let is_break_n_instruction = self.break_n_instruction();
1069 let is_dbreak_exception = self.dbreak_exception();
1070 let is_debug_interrupt = self.debug_interrupt();
1071
1072 let is_breakpoint = is_break_instruction || is_break_n_instruction;
1073
1074 let count = is_icount_exception as u8
1075 + is_ibreak_exception as u8
1076 + is_break_instruction as u8
1077 + is_break_n_instruction as u8
1078 + is_dbreak_exception as u8
1079 + is_debug_interrupt as u8;
1080
1081 if count > 1 {
1082 tracing::debug!("DebugCause: {:?}", self);
1083
1084 if is_breakpoint {
1092 HaltReason::Breakpoint(BreakpointCause::Unknown)
1093 } else {
1094 HaltReason::Multiple
1095 }
1096 } else if is_icount_exception {
1097 HaltReason::Step
1098 } else if is_ibreak_exception {
1099 HaltReason::Breakpoint(BreakpointCause::Hardware)
1100 } else if is_breakpoint {
1101 HaltReason::Breakpoint(BreakpointCause::Software)
1102 } else if is_dbreak_exception {
1103 HaltReason::Watchpoint
1104 } else if is_debug_interrupt {
1105 HaltReason::Request
1106 } else {
1107 HaltReason::Unknown
1108 }
1109 }
1110}
1111
1112bitfield::bitfield! {
1113 #[derive(Copy, Clone)]
1117 pub struct ProgramStatus(u32);
1118 impl Debug;
1119
1120 pub intlevel, set_intlevel : 3, 0;
1122
1123 pub excm, set_excm : 4;
1125
1126 pub user_mode, set_user_mode: 5;
1128
1129 pub ring, set_ring : 7, 6;
1131
1132 pub owb, set_owb : 11, 8;
1134
1135 pub callinc, set_callinc : 17, 16;
1137
1138 pub woe, set_woe : 18;
1140}
1141u32_register!(ProgramStatus, Register::CurrentPs);
1142
1143#[derive(Copy, Clone, Debug)]
1145pub struct IBreakEn(pub u32);
1146u32_register!(IBreakEn, SpecialRegister::IBreakEnable);
1147
1148#[derive(Copy, Clone, Debug)]
1150pub struct ICount(pub u32);
1151u32_register!(ICount, SpecialRegister::ICount);
1152
1153#[derive(Copy, Clone, Debug)]
1155pub struct ICountLevel(pub u32);
1156u32_register!(ICountLevel, SpecialRegister::ICountLevel);
1157
1158#[derive(Copy, Clone, Debug)]
1160pub struct ProgramCounter(pub u32);
1161u32_register!(ProgramCounter, Register::CurrentPc);
1162
1163trait MemoryAccess {
1164 fn halted_access(
1165 &mut self,
1166 interface: &mut XtensaCommunicationInterface,
1167 op: &mut dyn FnMut(
1168 &mut XtensaCommunicationInterface,
1169 &mut dyn MemoryAccess,
1170 ) -> Result<(), XtensaError>,
1171 ) -> Result<(), XtensaError>;
1172
1173 fn save_scratch_registers(
1174 &mut self,
1175 interface: &mut XtensaCommunicationInterface,
1176 ) -> Result<(), XtensaError>;
1177
1178 fn load_initial_address_for_read(
1179 &mut self,
1180 interface: &mut XtensaCommunicationInterface,
1181 address: u32,
1182 ) -> Result<(), XtensaError>;
1183 fn load_initial_address_for_write(
1184 &mut self,
1185 interface: &mut XtensaCommunicationInterface,
1186 address: u32,
1187 ) -> Result<(), XtensaError>;
1188
1189 fn read_one(
1190 &mut self,
1191 interface: &mut XtensaCommunicationInterface,
1192 ) -> Result<DeferredResultIndex, XtensaError>;
1193
1194 fn read_one_and_continue(
1195 &mut self,
1196 interface: &mut XtensaCommunicationInterface,
1197 ) -> Result<DeferredResultIndex, XtensaError>;
1198
1199 fn write_one(
1200 &mut self,
1201 interface: &mut XtensaCommunicationInterface,
1202 data: u32,
1203 ) -> Result<(), XtensaError>;
1204}
1205
1206struct FastMemoryAccess;
1208impl FastMemoryAccess {
1209 fn new() -> Self {
1210 Self
1211 }
1212}
1213impl MemoryAccess for FastMemoryAccess {
1214 fn halted_access(
1215 &mut self,
1216 interface: &mut XtensaCommunicationInterface,
1217 op: &mut dyn FnMut(
1218 &mut XtensaCommunicationInterface,
1219 &mut dyn MemoryAccess,
1220 ) -> Result<(), XtensaError>,
1221 ) -> Result<(), XtensaError> {
1222 interface.fast_halted_access(|this| op(this, self))
1223 }
1224
1225 fn save_scratch_registers(
1226 &mut self,
1227 interface: &mut XtensaCommunicationInterface,
1228 ) -> Result<(), XtensaError> {
1229 interface.ensure_register_saved(CpuRegister::A3)?;
1230 Ok(())
1231 }
1232
1233 fn load_initial_address_for_read(
1234 &mut self,
1235 interface: &mut XtensaCommunicationInterface,
1236 address: u32,
1237 ) -> Result<(), XtensaError> {
1238 interface.schedule_write_cpu_register(CpuRegister::A3, address)?;
1240 interface
1241 .state
1242 .register_cache
1243 .mark_dirty(CpuRegister::A3.into());
1244
1245 interface
1247 .xdm
1248 .schedule_execute_instruction(Instruction::Lddr32P(CpuRegister::A3));
1249
1250 Ok(())
1251 }
1252
1253 fn load_initial_address_for_write(
1254 &mut self,
1255 interface: &mut XtensaCommunicationInterface,
1256 address: u32,
1257 ) -> Result<(), XtensaError> {
1258 interface.schedule_write_cpu_register(CpuRegister::A3, address)?;
1259 interface
1260 .state
1261 .register_cache
1262 .mark_dirty(CpuRegister::A3.into());
1263
1264 interface
1265 .xdm
1266 .schedule_write_instruction(Instruction::Sddr32P(CpuRegister::A3));
1267
1268 Ok(())
1269 }
1270
1271 fn write_one(
1272 &mut self,
1273 interface: &mut XtensaCommunicationInterface,
1274 data: u32,
1275 ) -> Result<(), XtensaError> {
1276 interface.xdm.schedule_write_ddr_and_execute(data);
1277 Ok(())
1278 }
1279
1280 fn read_one(
1281 &mut self,
1282 interface: &mut XtensaCommunicationInterface,
1283 ) -> Result<DeferredResultIndex, XtensaError> {
1284 Ok(interface.xdm.schedule_read_ddr())
1285 }
1286
1287 fn read_one_and_continue(
1288 &mut self,
1289 interface: &mut XtensaCommunicationInterface,
1290 ) -> Result<DeferredResultIndex, XtensaError> {
1291 Ok(interface.xdm.schedule_read_ddr_and_execute())
1292 }
1293}
1294
1295struct SlowMemoryAccess {
1297 current_address: u32,
1298 current_offset: u32,
1299 address_written: bool,
1300}
1301impl SlowMemoryAccess {
1302 fn new() -> Self {
1303 Self {
1304 current_address: 0,
1305 current_offset: 0,
1306 address_written: false,
1307 }
1308 }
1309}
1310
1311impl MemoryAccess for SlowMemoryAccess {
1312 fn halted_access(
1313 &mut self,
1314 interface: &mut XtensaCommunicationInterface,
1315 op: &mut dyn FnMut(
1316 &mut XtensaCommunicationInterface,
1317 &mut dyn MemoryAccess,
1318 ) -> Result<(), XtensaError>,
1319 ) -> Result<(), XtensaError> {
1320 interface.halted_access(|this| op(this, self))
1321 }
1322
1323 fn save_scratch_registers(
1324 &mut self,
1325 interface: &mut XtensaCommunicationInterface,
1326 ) -> Result<(), XtensaError> {
1327 interface.ensure_register_saved(CpuRegister::A3)?;
1328 interface.ensure_register_saved(CpuRegister::A4)?;
1329 Ok(())
1330 }
1331
1332 fn load_initial_address_for_read(
1333 &mut self,
1334 _interface: &mut XtensaCommunicationInterface,
1335 address: u32,
1336 ) -> Result<(), XtensaError> {
1337 self.current_address = address;
1338
1339 Ok(())
1340 }
1341
1342 fn load_initial_address_for_write(
1343 &mut self,
1344 _interface: &mut XtensaCommunicationInterface,
1345 address: u32,
1346 ) -> Result<(), XtensaError> {
1347 self.current_address = address;
1348
1349 Ok(())
1350 }
1351
1352 fn read_one(
1353 &mut self,
1354 interface: &mut XtensaCommunicationInterface,
1355 ) -> Result<DeferredResultIndex, XtensaError> {
1356 if !self.address_written {
1357 interface
1358 .xdm
1359 .schedule_execute_instruction(Instruction::Esync);
1360 interface.schedule_write_cpu_register(CpuRegister::A3, self.current_address)?;
1361 interface
1362 .state
1363 .register_cache
1364 .mark_dirty(CpuRegister::A3.into());
1365 self.current_offset = 0;
1366 self.address_written = true;
1367 }
1368
1369 interface
1370 .xdm
1371 .schedule_execute_instruction(Instruction::L32I(
1372 CpuRegister::A3,
1373 CpuRegister::A4,
1374 (self.current_offset / 4) as u8,
1375 ));
1376 self.current_offset += 4;
1377
1378 interface
1379 .state
1380 .register_cache
1381 .mark_dirty(CpuRegister::A4.into());
1382
1383 if self.current_offset == 1024 {
1384 self.current_address += self.current_offset;
1387 self.current_offset = 0;
1388 self.address_written = false;
1389 }
1390
1391 Ok(interface.schedule_read_cpu_register(CpuRegister::A4))
1392 }
1393
1394 fn read_one_and_continue(
1395 &mut self,
1396 interface: &mut XtensaCommunicationInterface,
1397 ) -> Result<DeferredResultIndex, XtensaError> {
1398 self.read_one(interface)
1399 }
1400
1401 fn write_one(
1402 &mut self,
1403 interface: &mut XtensaCommunicationInterface,
1404 data: u32,
1405 ) -> Result<(), XtensaError> {
1406 interface.schedule_write_cpu_register(CpuRegister::A3, self.current_address)?;
1408 interface
1409 .state
1410 .register_cache
1411 .mark_dirty(CpuRegister::A3.into());
1412
1413 interface.schedule_write_cpu_register(CpuRegister::A4, data)?;
1414 interface
1415 .state
1416 .register_cache
1417 .mark_dirty(CpuRegister::A4.into());
1418
1419 self.current_address += 4;
1421
1422 interface
1424 .xdm
1425 .schedule_execute_instruction(Instruction::S32I(CpuRegister::A3, CpuRegister::A4, 0));
1426
1427 Ok(())
1428 }
1429}
1430
1431pub(crate) enum MaybeDeferredResultIndex {
1432 Value(u32),
1434
1435 Deferred(DeferredResultIndex),
1437}