1use std::{
4 collections::HashMap,
5 ops::Range,
6 time::{Duration, Instant},
7};
8
9use zerocopy::IntoBytes;
10
11use crate::{
12 BreakpointCause, Error as ProbeRsError, HaltReason, MemoryInterface,
13 architecture::xtensa::{
14 arch::{CpuRegister, Register, SpecialRegister, instruction::Instruction},
15 register_cache::RegisterCache,
16 xdm::{DebugStatus, XdmState},
17 },
18 memory::{Operation, OperationKind},
19 probe::{DebugProbeError, DeferredResultIndex, JtagAccess},
20};
21
22use super::xdm::{Error as XdmError, Xdm};
23
24#[derive(thiserror::Error, Debug, docsplay::Display)]
26pub enum XtensaError {
27 DebugProbe(#[from] DebugProbeError),
29
30 XdmError(#[from] XdmError),
32
33 CoreDisabled,
35
36 Timeout,
39
40 NoXtensaTarget,
42
43 RegisterNotAvailable,
45
46 BatchedResultNotAvailable,
48}
49
50impl From<XtensaError> for ProbeRsError {
51 fn from(err: XtensaError) -> Self {
52 match err {
53 XtensaError::DebugProbe(e) => e.into(),
54 other => ProbeRsError::Xtensa(other),
55 }
56 }
57}
58
59#[derive(Clone, Copy)]
61pub enum DebugLevel {
62 L2 = 2,
64 L3 = 3,
66 L4 = 4,
68 L5 = 5,
70 L6 = 6,
72 L7 = 7,
74}
75
76impl DebugLevel {
77 pub fn pc(self) -> SpecialRegister {
79 match self {
80 DebugLevel::L2 => SpecialRegister::Epc2,
81 DebugLevel::L3 => SpecialRegister::Epc3,
82 DebugLevel::L4 => SpecialRegister::Epc4,
83 DebugLevel::L5 => SpecialRegister::Epc5,
84 DebugLevel::L6 => SpecialRegister::Epc6,
85 DebugLevel::L7 => SpecialRegister::Epc7,
86 }
87 }
88
89 pub fn ps(self) -> SpecialRegister {
91 match self {
92 DebugLevel::L2 => SpecialRegister::Eps2,
93 DebugLevel::L3 => SpecialRegister::Eps3,
94 DebugLevel::L4 => SpecialRegister::Eps4,
95 DebugLevel::L5 => SpecialRegister::Eps5,
96 DebugLevel::L6 => SpecialRegister::Eps6,
97 DebugLevel::L7 => SpecialRegister::Eps7,
98 }
99 }
100}
101
102#[derive(Default)]
104pub(super) struct XtensaInterfaceState {
105 pub(super) register_cache: RegisterCache,
107
108 pub(super) is_halted: bool,
111}
112
113#[derive(Clone, Copy, Default)]
115pub struct MemoryRegionProperties {
116 pub unaligned_store: bool,
118
119 pub unaligned_load: bool,
121
122 pub fast_memory_access: bool,
124}
125
126pub struct XtensaCoreProperties {
128 pub hw_breakpoint_num: u32,
130
131 pub debug_level: DebugLevel,
133
134 pub memory_ranges: HashMap<Range<u64>, MemoryRegionProperties>,
136
137 pub window_option_properties: WindowProperties,
139}
140
141impl Default for XtensaCoreProperties {
142 fn default() -> Self {
143 Self {
144 hw_breakpoint_num: 2,
145 debug_level: DebugLevel::L6,
146 memory_ranges: HashMap::new(),
147 window_option_properties: WindowProperties::lx(64),
148 }
149 }
150}
151
152impl XtensaCoreProperties {
153 pub fn memory_properties_at(&self, address: u64) -> MemoryRegionProperties {
155 self.memory_ranges
156 .iter()
157 .find(|(range, _)| range.contains(&address))
158 .map(|(_, region)| *region)
159 .unwrap_or_default()
160 }
161
162 pub fn memory_range_properties(&self, range: Range<u64>) -> MemoryRegionProperties {
164 let mut start = range.start;
165 let end = range.end;
166
167 if start == end {
168 return MemoryRegionProperties::default();
169 }
170
171 let mut properties = MemoryRegionProperties {
172 unaligned_store: true,
173 unaligned_load: true,
174 fast_memory_access: true,
175 };
176 while start < end {
177 let containing_region = self
179 .memory_ranges
180 .iter()
181 .find(|(range, _)| range.contains(&start));
182
183 let Some((range, region_properties)) = containing_region else {
184 return MemoryRegionProperties::default();
186 };
187
188 properties.unaligned_store &= region_properties.unaligned_store;
189 properties.unaligned_load &= region_properties.unaligned_load;
190 properties.fast_memory_access &= region_properties.fast_memory_access;
191
192 start = range.end;
194 }
195
196 properties
197 }
198}
199
200#[derive(Clone, Copy, Debug)]
202pub struct WindowProperties {
203 pub has_windowed_registers: bool,
205
206 pub num_aregs: u8,
208
209 pub window_regs: u8,
211
212 pub rotw_rotates: u8,
214}
215
216impl WindowProperties {
217 pub fn lx(num_aregs: u8) -> Self {
219 Self {
220 has_windowed_registers: true,
221 num_aregs,
222 window_regs: 16,
223 rotw_rotates: 4,
224 }
225 }
226
227 pub fn windowbase_size(&self) -> u8 {
229 self.num_aregs / self.rotw_rotates
230 }
231}
232
233#[derive(Default)]
235pub struct XtensaDebugInterfaceState {
236 interface_state: XtensaInterfaceState,
237 core_properties: XtensaCoreProperties,
238 xdm_state: XdmState,
239}
240
241pub struct XtensaCommunicationInterface<'probe> {
245 pub(crate) xdm: Xdm<'probe>,
247 pub(super) state: &'probe mut XtensaInterfaceState,
248 core_properties: &'probe mut XtensaCoreProperties,
249}
250
251impl<'probe> XtensaCommunicationInterface<'probe> {
252 pub fn new(
254 probe: &'probe mut dyn JtagAccess,
255 state: &'probe mut XtensaDebugInterfaceState,
256 ) -> Self {
257 let XtensaDebugInterfaceState {
258 interface_state,
259 core_properties,
260 xdm_state,
261 } = state;
262 let xdm = Xdm::new(probe, xdm_state);
263
264 Self {
265 xdm,
266 state: interface_state,
267 core_properties,
268 }
269 }
270
271 pub fn core_properties(&mut self) -> &mut XtensaCoreProperties {
273 self.core_properties
274 }
275
276 pub fn read_idcode(&mut self) -> Result<u32, XtensaError> {
278 self.xdm.read_idcode()
279 }
280
281 pub fn enter_debug_mode(&mut self) -> Result<(), XtensaError> {
283 self.state.register_cache = RegisterCache::new();
284 self.xdm.enter_debug_mode()?;
285
286 self.state.is_halted = self.xdm.status()?.stopped();
287
288 Ok(())
289 }
290
291 pub(crate) fn leave_debug_mode(&mut self) -> Result<(), XtensaError> {
292 if self.xdm.status()?.stopped() {
293 self.restore_registers()?;
294 self.resume_core()?;
295 }
296 self.xdm.leave_ocd_mode()?;
297
298 tracing::debug!("Left OCD mode");
299
300 Ok(())
301 }
302
303 pub fn available_breakpoint_units(&self) -> u32 {
307 self.core_properties.hw_breakpoint_num
308 }
309
310 pub fn core_halted(&mut self) -> Result<bool, XtensaError> {
312 if !self.state.is_halted {
313 self.state.is_halted = self.xdm.status()?.stopped();
314 }
315
316 Ok(self.state.is_halted)
317 }
318
319 pub fn wait_for_core_halted(&mut self, timeout: Duration) -> Result<(), XtensaError> {
323 let start = Instant::now();
325
326 while !self.core_halted()? {
327 if start.elapsed() >= timeout {
328 return Err(XtensaError::Timeout);
329 }
330 std::thread::sleep(Duration::from_millis(1));
332 }
333
334 Ok(())
335 }
336
337 pub(crate) fn halt(&mut self, timeout: Duration) -> Result<(), XtensaError> {
339 self.xdm.schedule_halt();
340 self.wait_for_core_halted(timeout)?;
341 Ok(())
342 }
343
344 pub(crate) fn halt_with_previous(&mut self, timeout: Duration) -> Result<bool, XtensaError> {
346 let was_running = if self.state.is_halted {
347 false
349 } else {
350 let status_idx = self.xdm.schedule_read_nexus_register::<DebugStatus>();
353 self.halt(timeout)?;
354 let before_status = DebugStatus(self.xdm.read_deferred_result(status_idx)?.into_u32());
355
356 !before_status.stopped()
357 };
358
359 Ok(was_running)
360 }
361
362 fn fast_halted_access(
363 &mut self,
364 mut op: impl FnMut(&mut Self) -> Result<(), XtensaError>,
365 ) -> Result<(), XtensaError> {
366 if self.state.is_halted {
367 return op(self);
369 }
370
371 let status_idx = self.xdm.schedule_read_nexus_register::<DebugStatus>();
374
375 self.xdm.schedule_halt();
377
378 let is_halted_idx = self.xdm.schedule_read_nexus_register::<DebugStatus>();
380 self.state.is_halted = true;
381
382 let result = op(self);
385
386 let after_status = DebugStatus(self.xdm.read_deferred_result(is_halted_idx)?.into_u32());
388
389 if after_status.stopped() {
390 let before_status = DebugStatus(self.xdm.read_deferred_result(status_idx)?.into_u32());
392 if !before_status.stopped() {
393 self.resume_core()?;
394 }
395
396 return result;
397 }
398 self.state.is_halted = false;
399 self.halted_access(|this| op(this))
400 }
401
402 pub fn halted_access<R>(
404 &mut self,
405 op: impl FnOnce(&mut Self) -> Result<R, XtensaError>,
406 ) -> Result<R, XtensaError> {
407 let was_running = self.halt_with_previous(Duration::from_millis(100))?;
408
409 let result = op(self);
410
411 if was_running {
412 self.resume_core()?;
413 }
414
415 result
416 }
417
418 pub fn step(&mut self, by: u32, intlevel: u32) -> Result<(), XtensaError> {
420 self.schedule_write_register(ICountLevel(intlevel + 1))?;
422
423 self.schedule_write_register(ICount(-((1 + by) as i32) as u32))?;
425
426 self.resume_core()?;
427 match self.wait_for_core_halted(Duration::from_millis(100)) {
432 Ok(()) => {}
433 Err(XtensaError::Timeout) => self.halt(Duration::from_millis(100))?,
434 Err(e) => return Err(e),
435 }
436
437 self.schedule_write_register(ICountLevel(0))?;
439
440 Ok(())
441 }
442
443 pub fn resume_core(&mut self) -> Result<(), XtensaError> {
445 self.restore_registers()?;
448 self.clear_register_cache();
450
451 tracing::debug!("Resuming core");
452 self.state.is_halted = false;
453 self.xdm.resume()?;
454
455 Ok(())
456 }
457
458 fn schedule_read_cpu_register(&mut self, register: CpuRegister) -> DeferredResultIndex {
459 self.xdm
460 .schedule_execute_instruction(Instruction::Wsr(SpecialRegister::Ddr, register));
461 self.xdm.schedule_read_ddr()
462 }
463
464 fn schedule_read_special_register(
465 &mut self,
466 register: SpecialRegister,
467 ) -> Result<DeferredResultIndex, XtensaError> {
468 self.ensure_register_saved(CpuRegister::A3)?;
469 self.state.register_cache.mark_dirty(CpuRegister::A3.into());
470
471 self.xdm
473 .schedule_execute_instruction(Instruction::Rsr(register, CpuRegister::A3));
474
475 Ok(self.schedule_read_cpu_register(CpuRegister::A3))
476 }
477
478 fn schedule_write_special_register(
479 &mut self,
480 register: SpecialRegister,
481 value: u32,
482 ) -> Result<(), XtensaError> {
483 tracing::debug!("Writing special register: {:?}", register);
484 self.ensure_register_saved(CpuRegister::A3)?;
485 self.state.register_cache.mark_dirty(CpuRegister::A3.into());
486
487 self.xdm.schedule_write_ddr(value);
488
489 self.xdm
491 .schedule_execute_instruction(Instruction::Rsr(SpecialRegister::Ddr, CpuRegister::A3));
492
493 self.xdm
495 .schedule_execute_instruction(Instruction::Wsr(register, CpuRegister::A3));
496
497 Ok(())
498 }
499
500 #[tracing::instrument(skip(self), level = "debug")]
501 fn schedule_write_cpu_register(
502 &mut self,
503 register: CpuRegister,
504 value: u32,
505 ) -> Result<(), XtensaError> {
506 tracing::debug!("Writing {:x} to register: {:?}", value, register);
507
508 self.xdm.schedule_write_ddr(value);
509 self.xdm
510 .schedule_execute_instruction(Instruction::Rsr(SpecialRegister::Ddr, register));
511
512 Ok(())
513 }
514
515 pub fn read_register<R: TypedRegister>(&mut self) -> Result<R, XtensaError> {
517 let value = self.read_register_untyped(R::register())?;
518
519 Ok(R::from_u32(value))
520 }
521
522 pub fn write_register<R: TypedRegister>(&mut self, reg: R) -> Result<(), XtensaError> {
524 self.write_register_untyped(R::register(), reg.as_u32())?;
525
526 Ok(())
527 }
528
529 pub(crate) fn schedule_write_register<R: TypedRegister>(
531 &mut self,
532 reg: R,
533 ) -> Result<(), XtensaError> {
534 self.schedule_write_register_untyped(R::register(), reg.as_u32())?;
535
536 Ok(())
537 }
538
539 pub(crate) fn schedule_read_register(
543 &mut self,
544 register: impl Into<Register>,
545 ) -> Result<MaybeDeferredResultIndex, XtensaError> {
546 let register = register.into();
547 if let Some(value) = self.state.register_cache.original_value_of(register) {
548 return Ok(value);
550 }
551
552 let reader = match register {
553 Register::Cpu(register) => self.schedule_read_cpu_register(register),
554 Register::Special(register) => self.schedule_read_special_register(register)?,
555 Register::CurrentPc => {
556 self.schedule_read_special_register(self.core_properties.debug_level.pc())?
557 }
558 Register::CurrentPs => {
559 self.schedule_read_special_register(self.core_properties.debug_level.ps())?
560 }
561 };
562 self.state.register_cache.store_deferred(register, reader);
563 Ok(MaybeDeferredResultIndex::Deferred(register))
564 }
565
566 pub fn read_register_untyped(
568 &mut self,
569 register: impl Into<Register>,
570 ) -> Result<u32, XtensaError> {
571 let reader = self.schedule_read_register(register)?;
572 self.read_deferred_result(reader)
573 }
574
575 pub fn schedule_write_register_untyped(
580 &mut self,
581 register: impl Into<Register>,
582 value: u32,
583 ) -> Result<(), XtensaError> {
584 let register = register.into();
585
586 self.state.register_cache.store(register, value);
587
588 match register {
589 Register::Cpu(register) => self.schedule_write_cpu_register(register, value),
590 Register::Special(register) => self.schedule_write_special_register(register, value),
591 Register::CurrentPc => {
592 self.schedule_write_special_register(self.core_properties.debug_level.pc(), value)
593 }
594 Register::CurrentPs => {
595 self.schedule_write_special_register(self.core_properties.debug_level.ps(), value)
596 }
597 }
598 }
599
600 pub fn write_register_untyped(
602 &mut self,
603 register: impl Into<Register>,
604 value: u32,
605 ) -> Result<(), XtensaError> {
606 self.schedule_write_register_untyped(register, value)?;
607 self.xdm.execute()
608 }
609
610 #[tracing::instrument(skip(self, register), fields(register), level = "debug")]
612 fn ensure_register_saved(&mut self, register: impl Into<Register>) -> Result<(), XtensaError> {
613 let register = register.into();
614
615 tracing::debug!("Saving register: {:?}", register);
616 self.schedule_read_register(register)?;
617
618 Ok(())
619 }
620
621 #[tracing::instrument(skip(self), level = "debug")]
622 pub(super) fn restore_registers(&mut self) -> Result<(), XtensaError> {
623 tracing::debug!("Restoring registers");
624
625 let filters = [
626 |r: &Register| !r.is_cpu_register(),
628 |r: &Register| r.is_cpu_register(),
630 ];
631 for filter in filters {
632 let dirty_regs = self
636 .state
637 .register_cache
638 .iter()
639 .filter(|(r, entry)| entry.is_dirty() && filter(r))
640 .map(|(r, _)| r)
641 .collect::<Vec<_>>();
642
643 for register in dirty_regs {
644 let restore_value = self
645 .state
646 .register_cache
647 .resolved_original_value_of(register, &mut self.xdm)
648 .unwrap_or_else(|| panic!("Saved register {register:?} is not in the cache. This is a bug, please report it."))?;
649
650 self.schedule_write_register_untyped(register, restore_value)?;
651 }
652 }
653
654 Ok(())
655 }
656
657 fn memory_access_for(&self, address: u64, len: usize) -> Box<dyn MemoryAccess> {
658 if self
659 .core_properties
660 .memory_range_properties(address..address + len as u64)
661 .fast_memory_access
662 {
663 Box::new(FastMemoryAccess::new())
664 } else {
665 Box::new(SlowMemoryAccess::new())
666 }
667 }
668
669 fn read_memory(&mut self, address: u64, dst: &mut [u8]) -> Result<(), XtensaError> {
670 tracing::debug!("Reading {} bytes from address {:08x}", dst.len(), address);
671 if dst.is_empty() {
672 return Ok(());
673 }
674
675 let mut memory_access = self.memory_access_for(address, dst.len());
676
677 memory_access.halted_access(self, &mut |this, memory_access| {
678 memory_access.save_scratch_registers(this)?;
679 this.read_memory_impl(memory_access, address, dst)
680 })
681 }
682
683 fn read_memory_impl(
684 &mut self,
685 memory_access: &mut dyn MemoryAccess,
686 address: u64,
687 mut dst: &mut [u8],
688 ) -> Result<(), XtensaError> {
689 let mut to_read = dst.len();
690
691 let first_read = if !address.is_multiple_of(4)
694 && !self
695 .core_properties
696 .memory_range_properties(address..address + dst.len() as u64)
697 .unaligned_load
698 {
699 memory_access.load_initial_address_for_read(self, address as u32 & !0x3)?;
700 let offset = address as usize % 4;
701
702 let first_read = if offset + to_read <= 4 {
704 memory_access.read_one(self)?
705 } else {
706 memory_access.read_one_and_continue(self)?
707 };
708
709 let bytes_to_copy = (4 - offset).min(to_read);
710
711 to_read -= bytes_to_copy;
712
713 Some((first_read, offset, bytes_to_copy))
714 } else {
715 memory_access.load_initial_address_for_read(self, address as u32)?;
717 None
718 };
719
720 let mut aligned_reads = vec![];
721 if to_read > 0 {
722 let words = to_read.div_ceil(4);
723
724 for _ in 0..words - 1 {
725 aligned_reads.push(memory_access.read_one_and_continue(self)?);
726 }
727 aligned_reads.push(memory_access.read_one(self)?);
728 };
729
730 if let Some((read, offset, bytes_to_copy)) = first_read {
731 let word = self
732 .xdm
733 .read_deferred_result(read)?
734 .into_u32()
735 .to_le_bytes();
736
737 dst[..bytes_to_copy].copy_from_slice(&word[offset..][..bytes_to_copy]);
738 dst = &mut dst[bytes_to_copy..];
739 }
740
741 for read in aligned_reads {
742 let word = self
743 .xdm
744 .read_deferred_result(read)?
745 .into_u32()
746 .to_le_bytes();
747
748 let bytes = dst.len().min(4);
749
750 dst[..bytes].copy_from_slice(&word[..bytes]);
751 dst = &mut dst[bytes..];
752 }
753
754 Ok(())
755 }
756
757 pub(crate) fn write_memory(&mut self, address: u64, data: &[u8]) -> Result<(), XtensaError> {
758 tracing::debug!("Writing {} bytes to address {:08x}", data.len(), address);
759 if data.is_empty() {
760 return Ok(());
761 }
762
763 let mut memory_access = self.memory_access_for(address, data.len());
764
765 memory_access.halted_access(self, &mut |this, memory_access| {
766 memory_access.save_scratch_registers(this)?;
767 this.write_memory_impl(memory_access, address, data)
768 })
769 }
770
771 fn execute_single_memory_operation_halted(
773 &mut self,
774 operation: Operation<'_>,
775 ) -> Result<(), ProbeRsError> {
776 enum Op<'a> {
777 Read(&'a mut [u8]),
778 Write(&'a [u8]),
779 }
780 impl Op<'_> {
781 fn data_len(&self) -> usize {
782 match self {
783 Op::Read(bytes) => bytes.len(),
784 Op::Write(bytes) => bytes.len(),
785 }
786 }
787 }
788
789 let mut temp_bytes = [0; 8];
790 let op = match operation.operation {
791 OperationKind::Read(data) => Op::Read(data),
792 OperationKind::Read8(data) => Op::Read(data),
793 OperationKind::Read16(data) => Op::Read(data.as_mut_bytes()),
794 OperationKind::Read32(data) => Op::Read(data.as_mut_bytes()),
795 OperationKind::Read64(data) => Op::Read(data.as_mut_bytes()),
796 OperationKind::Write(data) => Op::Write(data),
797 OperationKind::Write8(data) => Op::Write(data),
798 OperationKind::Write16(data) => Op::Write(data.as_bytes()),
799 OperationKind::Write32(data) => Op::Write(data.as_bytes()),
800 OperationKind::Write64(data) => Op::Write(data.as_bytes()),
801 OperationKind::WriteWord8(word) => {
802 let word_bytes = size_of_val(&word);
803 let bytes = &mut temp_bytes[..word_bytes];
804 bytes.copy_from_slice(&word.to_le_bytes());
805 Op::Write(bytes)
806 }
807 OperationKind::WriteWord16(word) => {
808 let word_bytes = size_of_val(&word);
809 let bytes = &mut temp_bytes[..word_bytes];
810 bytes.copy_from_slice(&word.to_le_bytes());
811 Op::Write(bytes)
812 }
813 OperationKind::WriteWord32(word) => {
814 let word_bytes = size_of_val(&word);
815 let bytes = &mut temp_bytes[..word_bytes];
816 bytes.copy_from_slice(&word.to_le_bytes());
817 Op::Write(bytes)
818 }
819 OperationKind::WriteWord64(word) => {
820 let word_bytes = size_of_val(&word);
821 let bytes = &mut temp_bytes[..word_bytes];
822 bytes.copy_from_slice(&word.to_le_bytes());
823 Op::Write(bytes)
824 }
825 };
826
827 if op.data_len() == 0 {
828 return Ok(());
829 }
830
831 let address = operation.address;
832
833 let mut memory_access = self.memory_access_for(address, op.data_len());
834 memory_access.save_scratch_registers(self)?;
835
836 match op {
837 Op::Read(dst) => self
838 .read_memory_impl(memory_access.as_mut(), address, dst)
839 .map_err(ProbeRsError::Xtensa),
840 Op::Write(buffer) => self
841 .write_memory_impl(memory_access.as_mut(), address, buffer)
842 .map_err(ProbeRsError::Xtensa),
843 }
844 }
845
846 fn write_memory_impl(
847 &mut self,
848 memory_access: &mut dyn MemoryAccess,
849 address: u64,
850 mut buffer: &[u8],
851 ) -> Result<(), XtensaError> {
852 let mut addr = address as u32;
853
854 let mut address_loaded = false;
857 if !addr.is_multiple_of(4)
858 && !self
859 .core_properties
860 .memory_range_properties(address..address + buffer.len() as u64)
861 .unaligned_store
862 {
863 let unaligned_bytes = (4 - (addr % 4) as usize).min(buffer.len());
867 let aligned_address = address & !0x3;
868 let offset_in_word = address as usize % 4;
869
870 let mut word = [0; 4];
872 self.read_memory_impl(memory_access, aligned_address, &mut word)?;
873
874 word[offset_in_word..][..unaligned_bytes].copy_from_slice(&buffer[..unaligned_bytes]);
876
877 memory_access.load_initial_address_for_write(self, aligned_address as u32)?;
879 memory_access.write_one(self, u32::from_le_bytes(word))?;
880
881 buffer = &buffer[unaligned_bytes..];
882 addr += unaligned_bytes as u32;
883
884 address_loaded = true;
885 }
886
887 if buffer.len() >= 4 {
890 if !address_loaded {
891 memory_access.load_initial_address_for_write(self, addr)?;
892 }
893 let mut chunks = buffer.chunks_exact(4);
894 for chunk in chunks.by_ref() {
895 let mut word = [0; 4];
896 word[..].copy_from_slice(chunk);
897 let word = u32::from_le_bytes(word);
898
899 memory_access.write_one(self, word)?;
900
901 addr += 4;
902 }
903
904 buffer = chunks.remainder();
905 }
906
907 if !buffer.is_empty() {
909 let mut word = [0; 4];
915 self.read_memory_impl(memory_access, addr as u64, &mut word)?;
916
917 word[..buffer.len()].copy_from_slice(buffer);
919
920 memory_access.load_initial_address_for_write(self, addr)?;
922 memory_access.write_one(self, u32::from_le_bytes(word))?;
923 }
924
925 Ok(())
928 }
929
930 pub(crate) fn reset_and_halt(&mut self, timeout: Duration) -> Result<(), XtensaError> {
931 self.clear_register_cache();
932 self.xdm.reset_and_halt()?;
933 self.wait_for_core_halted(timeout)?;
934
935 self.write_register({
938 let mut ps = ProgramStatus(0);
939 ps.set_intlevel(0);
940 ps.set_user_mode(true);
941 ps.set_woe(true);
942 ps
943 })?;
944 self.state.register_cache = RegisterCache::new();
945
946 Ok(())
947 }
948
949 pub(crate) fn clear_register_cache(&mut self) {
950 self.state.register_cache = RegisterCache::new();
951 }
952
953 pub(crate) fn read_deferred_result(
954 &mut self,
955 result: MaybeDeferredResultIndex,
956 ) -> Result<u32, XtensaError> {
957 self.state.register_cache.resolve(result, &mut self.xdm)
958 }
959}
960
961impl MemoryInterface for XtensaCommunicationInterface<'_> {
962 fn read(&mut self, address: u64, dst: &mut [u8]) -> Result<(), crate::Error> {
963 self.read_memory(address, dst)?;
964
965 Ok(())
966 }
967
968 fn supports_native_64bit_access(&mut self) -> bool {
969 false
970 }
971
972 fn read_word_64(&mut self, address: u64) -> Result<u64, crate::Error> {
973 let mut out = [0; 8];
974 self.read(address, &mut out)?;
975
976 Ok(u64::from_le_bytes(out))
977 }
978
979 fn read_word_32(&mut self, address: u64) -> Result<u32, crate::Error> {
980 let mut out = [0; 4];
981 self.read(address, &mut out)?;
982
983 Ok(u32::from_le_bytes(out))
984 }
985
986 fn read_word_16(&mut self, address: u64) -> Result<u16, crate::Error> {
987 let mut out = [0; 2];
988 self.read(address, &mut out)?;
989
990 Ok(u16::from_le_bytes(out))
991 }
992
993 fn read_word_8(&mut self, address: u64) -> Result<u8, crate::Error> {
994 let mut out = 0;
995 self.read(address, std::slice::from_mut(&mut out))?;
996 Ok(out)
997 }
998
999 fn read_64(&mut self, address: u64, data: &mut [u64]) -> Result<(), crate::Error> {
1000 self.read_8(address, data.as_mut_bytes())
1001 }
1002
1003 fn read_32(&mut self, address: u64, data: &mut [u32]) -> Result<(), crate::Error> {
1004 self.read_8(address, data.as_mut_bytes())
1005 }
1006
1007 fn read_16(&mut self, address: u64, data: &mut [u16]) -> Result<(), crate::Error> {
1008 self.read_8(address, data.as_mut_bytes())
1009 }
1010
1011 fn read_8(&mut self, address: u64, data: &mut [u8]) -> Result<(), crate::Error> {
1012 self.read(address, data)
1013 }
1014
1015 fn write(&mut self, address: u64, data: &[u8]) -> Result<(), crate::Error> {
1016 self.write_memory(address, data)?;
1017
1018 Ok(())
1019 }
1020
1021 fn write_word_64(&mut self, address: u64, data: u64) -> Result<(), crate::Error> {
1022 self.write(address, &data.to_le_bytes())
1023 }
1024
1025 fn write_word_32(&mut self, address: u64, data: u32) -> Result<(), crate::Error> {
1026 self.write(address, &data.to_le_bytes())
1027 }
1028
1029 fn write_word_16(&mut self, address: u64, data: u16) -> Result<(), crate::Error> {
1030 self.write(address, &data.to_le_bytes())
1031 }
1032
1033 fn write_word_8(&mut self, address: u64, data: u8) -> Result<(), crate::Error> {
1034 self.write(address, &[data])
1035 }
1036
1037 fn write_64(&mut self, address: u64, data: &[u64]) -> Result<(), crate::Error> {
1038 self.write_8(address, data.as_bytes())
1039 }
1040
1041 fn write_32(&mut self, address: u64, data: &[u32]) -> Result<(), crate::Error> {
1042 self.write_8(address, data.as_bytes())
1043 }
1044
1045 fn write_16(&mut self, address: u64, data: &[u16]) -> Result<(), crate::Error> {
1046 self.write_8(address, data.as_bytes())
1047 }
1048
1049 fn write_8(&mut self, address: u64, data: &[u8]) -> Result<(), crate::Error> {
1050 self.write(address, data)
1051 }
1052
1053 fn supports_8bit_transfers(&self) -> Result<bool, crate::Error> {
1054 Ok(true)
1055 }
1056
1057 fn flush(&mut self) -> Result<(), crate::Error> {
1058 Ok(())
1059 }
1060
1061 fn execute_memory_operations(&mut self, operations: &mut [Operation<'_>]) {
1062 if operations.is_empty() {
1063 return;
1064 }
1065 let result = self.fast_halted_access(|this| {
1066 for operation in operations.iter_mut() {
1067 let result = this.execute_single_memory_operation_halted(operation.reborrow());
1068 let success = result.is_ok();
1069 operation.result = Some(result);
1070 if !success {
1071 break;
1072 }
1073 }
1074 Ok(())
1075 });
1076
1077 if result.is_err()
1078 && let Some(op) = operations.get_mut(0)
1079 {
1080 op.result = Some(result.map_err(ProbeRsError::Xtensa));
1081 }
1082 }
1083}
1084
1085pub trait TypedRegister: Copy {
1087 fn register() -> Register;
1089
1090 fn from_u32(value: u32) -> Self;
1092
1093 fn as_u32(self) -> u32;
1095}
1096
1097macro_rules! u32_register {
1098 ($name:ident, $register:expr) => {
1099 impl TypedRegister for $name {
1100 fn register() -> Register {
1101 Register::from($register)
1102 }
1103
1104 fn from_u32(value: u32) -> Self {
1105 Self(value)
1106 }
1107
1108 fn as_u32(self) -> u32 {
1109 self.0
1110 }
1111 }
1112 };
1113}
1114
1115bitfield::bitfield! {
1116 #[derive(Copy, Clone)]
1118 pub struct DebugCause(u32);
1119 impl Debug;
1120
1121 pub icount_exception, set_icount_exception : 0;
1123
1124 pub ibreak_exception, set_ibreak_exception : 1;
1126
1127 pub dbreak_exception, set_dbreak_exception : 2;
1129
1130 pub break_instruction, set_break_instruction : 3;
1132
1133 pub break_n_instruction, set_break_n_instruction: 4;
1135
1136 pub debug_interrupt, set_debug_interrupt : 5;
1138
1139 pub dbreak_num, set_dbreak_num : 11, 8;
1141}
1142u32_register!(DebugCause, SpecialRegister::DebugCause);
1143
1144impl DebugCause {
1145 pub fn halt_reason(&self) -> HaltReason {
1147 let is_icount_exception = self.icount_exception();
1148 let is_ibreak_exception = self.ibreak_exception();
1149 let is_break_instruction = self.break_instruction();
1150 let is_break_n_instruction = self.break_n_instruction();
1151 let is_dbreak_exception = self.dbreak_exception();
1152 let is_debug_interrupt = self.debug_interrupt();
1153
1154 let is_breakpoint = is_break_instruction || is_break_n_instruction;
1155
1156 let count = is_icount_exception as u8
1157 + is_ibreak_exception as u8
1158 + is_break_instruction as u8
1159 + is_break_n_instruction as u8
1160 + is_dbreak_exception as u8
1161 + is_debug_interrupt as u8;
1162
1163 if count > 1 {
1164 tracing::debug!("DebugCause: {:?}", self);
1165
1166 if is_breakpoint {
1174 HaltReason::Breakpoint(BreakpointCause::Unknown)
1175 } else {
1176 HaltReason::Multiple
1177 }
1178 } else if is_icount_exception {
1179 HaltReason::Step
1180 } else if is_ibreak_exception {
1181 HaltReason::Breakpoint(BreakpointCause::Hardware)
1182 } else if is_breakpoint {
1183 HaltReason::Breakpoint(BreakpointCause::Software)
1184 } else if is_dbreak_exception {
1185 HaltReason::Watchpoint
1186 } else if is_debug_interrupt {
1187 HaltReason::Request
1188 } else {
1189 HaltReason::Unknown
1190 }
1191 }
1192}
1193
1194bitfield::bitfield! {
1195 #[derive(Copy, Clone)]
1199 pub struct ProgramStatus(u32);
1200 impl Debug;
1201
1202 pub intlevel, set_intlevel : 3, 0;
1204
1205 pub excm, set_excm : 4;
1207
1208 pub user_mode, set_user_mode: 5;
1210
1211 pub ring, set_ring : 7, 6;
1213
1214 pub owb, set_owb : 11, 8;
1216
1217 pub callinc, set_callinc : 17, 16;
1219
1220 pub woe, set_woe : 18;
1222}
1223u32_register!(ProgramStatus, Register::CurrentPs);
1224
1225#[derive(Copy, Clone, Debug)]
1227pub struct IBreakEn(pub u32);
1228u32_register!(IBreakEn, SpecialRegister::IBreakEnable);
1229
1230#[derive(Copy, Clone, Debug)]
1232pub struct ICount(pub u32);
1233u32_register!(ICount, SpecialRegister::ICount);
1234
1235#[derive(Copy, Clone, Debug)]
1237pub struct ICountLevel(pub u32);
1238u32_register!(ICountLevel, SpecialRegister::ICountLevel);
1239
1240#[derive(Copy, Clone, Debug)]
1242pub struct ProgramCounter(pub u32);
1243u32_register!(ProgramCounter, Register::CurrentPc);
1244
1245trait MemoryAccess {
1246 fn halted_access(
1247 &mut self,
1248 interface: &mut XtensaCommunicationInterface,
1249 op: &mut dyn FnMut(
1250 &mut XtensaCommunicationInterface,
1251 &mut dyn MemoryAccess,
1252 ) -> Result<(), XtensaError>,
1253 ) -> Result<(), XtensaError>;
1254
1255 fn save_scratch_registers(
1256 &mut self,
1257 interface: &mut XtensaCommunicationInterface,
1258 ) -> Result<(), XtensaError>;
1259
1260 fn load_initial_address_for_read(
1261 &mut self,
1262 interface: &mut XtensaCommunicationInterface,
1263 address: u32,
1264 ) -> Result<(), XtensaError>;
1265 fn load_initial_address_for_write(
1266 &mut self,
1267 interface: &mut XtensaCommunicationInterface,
1268 address: u32,
1269 ) -> Result<(), XtensaError>;
1270
1271 fn read_one(
1272 &mut self,
1273 interface: &mut XtensaCommunicationInterface,
1274 ) -> Result<DeferredResultIndex, XtensaError>;
1275
1276 fn read_one_and_continue(
1277 &mut self,
1278 interface: &mut XtensaCommunicationInterface,
1279 ) -> Result<DeferredResultIndex, XtensaError>;
1280
1281 fn write_one(
1282 &mut self,
1283 interface: &mut XtensaCommunicationInterface,
1284 data: u32,
1285 ) -> Result<(), XtensaError>;
1286}
1287
1288struct FastMemoryAccess;
1290impl FastMemoryAccess {
1291 fn new() -> Self {
1292 Self
1293 }
1294}
1295impl MemoryAccess for FastMemoryAccess {
1296 fn halted_access(
1297 &mut self,
1298 interface: &mut XtensaCommunicationInterface,
1299 op: &mut dyn FnMut(
1300 &mut XtensaCommunicationInterface,
1301 &mut dyn MemoryAccess,
1302 ) -> Result<(), XtensaError>,
1303 ) -> Result<(), XtensaError> {
1304 interface.fast_halted_access(|this| op(this, self))
1305 }
1306
1307 fn save_scratch_registers(
1308 &mut self,
1309 interface: &mut XtensaCommunicationInterface,
1310 ) -> Result<(), XtensaError> {
1311 interface.ensure_register_saved(CpuRegister::A3)?;
1312 Ok(())
1313 }
1314
1315 fn load_initial_address_for_read(
1316 &mut self,
1317 interface: &mut XtensaCommunicationInterface,
1318 address: u32,
1319 ) -> Result<(), XtensaError> {
1320 interface.schedule_write_cpu_register(CpuRegister::A3, address)?;
1322 interface
1323 .state
1324 .register_cache
1325 .mark_dirty(CpuRegister::A3.into());
1326
1327 interface
1329 .xdm
1330 .schedule_execute_instruction(Instruction::Lddr32P(CpuRegister::A3));
1331
1332 Ok(())
1333 }
1334
1335 fn load_initial_address_for_write(
1336 &mut self,
1337 interface: &mut XtensaCommunicationInterface,
1338 address: u32,
1339 ) -> Result<(), XtensaError> {
1340 interface.schedule_write_cpu_register(CpuRegister::A3, address)?;
1341 interface
1342 .state
1343 .register_cache
1344 .mark_dirty(CpuRegister::A3.into());
1345
1346 interface
1347 .xdm
1348 .schedule_write_instruction(Instruction::Sddr32P(CpuRegister::A3));
1349
1350 Ok(())
1351 }
1352
1353 fn write_one(
1354 &mut self,
1355 interface: &mut XtensaCommunicationInterface,
1356 data: u32,
1357 ) -> Result<(), XtensaError> {
1358 interface.xdm.schedule_write_ddr_and_execute(data);
1359 Ok(())
1360 }
1361
1362 fn read_one(
1363 &mut self,
1364 interface: &mut XtensaCommunicationInterface,
1365 ) -> Result<DeferredResultIndex, XtensaError> {
1366 Ok(interface.xdm.schedule_read_ddr())
1367 }
1368
1369 fn read_one_and_continue(
1370 &mut self,
1371 interface: &mut XtensaCommunicationInterface,
1372 ) -> Result<DeferredResultIndex, XtensaError> {
1373 Ok(interface.xdm.schedule_read_ddr_and_execute())
1374 }
1375}
1376
1377struct SlowMemoryAccess {
1379 current_address: u32,
1380 current_offset: u32,
1381 address_written: bool,
1382}
1383impl SlowMemoryAccess {
1384 fn new() -> Self {
1385 Self {
1386 current_address: 0,
1387 current_offset: 0,
1388 address_written: false,
1389 }
1390 }
1391}
1392
1393impl MemoryAccess for SlowMemoryAccess {
1394 fn halted_access(
1395 &mut self,
1396 interface: &mut XtensaCommunicationInterface,
1397 op: &mut dyn FnMut(
1398 &mut XtensaCommunicationInterface,
1399 &mut dyn MemoryAccess,
1400 ) -> Result<(), XtensaError>,
1401 ) -> Result<(), XtensaError> {
1402 interface.halted_access(|this| op(this, self))
1403 }
1404
1405 fn save_scratch_registers(
1406 &mut self,
1407 interface: &mut XtensaCommunicationInterface,
1408 ) -> Result<(), XtensaError> {
1409 interface.ensure_register_saved(CpuRegister::A3)?;
1410 interface.ensure_register_saved(CpuRegister::A4)?;
1411 Ok(())
1412 }
1413
1414 fn load_initial_address_for_read(
1415 &mut self,
1416 _interface: &mut XtensaCommunicationInterface,
1417 address: u32,
1418 ) -> Result<(), XtensaError> {
1419 self.current_address = address;
1420
1421 Ok(())
1422 }
1423
1424 fn load_initial_address_for_write(
1425 &mut self,
1426 _interface: &mut XtensaCommunicationInterface,
1427 address: u32,
1428 ) -> Result<(), XtensaError> {
1429 self.current_address = address;
1430
1431 Ok(())
1432 }
1433
1434 fn read_one(
1435 &mut self,
1436 interface: &mut XtensaCommunicationInterface,
1437 ) -> Result<DeferredResultIndex, XtensaError> {
1438 if !self.address_written {
1439 interface.schedule_write_cpu_register(CpuRegister::A3, self.current_address)?;
1440 interface
1441 .state
1442 .register_cache
1443 .mark_dirty(CpuRegister::A3.into());
1444 self.current_offset = 0;
1445 self.address_written = true;
1446 }
1447
1448 interface
1449 .xdm
1450 .schedule_execute_instruction(Instruction::L32I(
1451 CpuRegister::A3,
1452 CpuRegister::A4,
1453 (self.current_offset / 4) as u8,
1454 ));
1455 self.current_offset += 4;
1456
1457 interface
1458 .state
1459 .register_cache
1460 .mark_dirty(CpuRegister::A4.into());
1461
1462 if self.current_offset == 1024 {
1463 self.current_address += self.current_offset;
1466 self.current_offset = 0;
1467 self.address_written = false;
1468 }
1469
1470 Ok(interface.schedule_read_cpu_register(CpuRegister::A4))
1471 }
1472
1473 fn read_one_and_continue(
1474 &mut self,
1475 interface: &mut XtensaCommunicationInterface,
1476 ) -> Result<DeferredResultIndex, XtensaError> {
1477 self.read_one(interface)
1478 }
1479
1480 fn write_one(
1481 &mut self,
1482 interface: &mut XtensaCommunicationInterface,
1483 data: u32,
1484 ) -> Result<(), XtensaError> {
1485 interface.schedule_write_cpu_register(CpuRegister::A3, self.current_address)?;
1487 interface
1488 .state
1489 .register_cache
1490 .mark_dirty(CpuRegister::A3.into());
1491
1492 interface.schedule_write_cpu_register(CpuRegister::A4, data)?;
1493 interface
1494 .state
1495 .register_cache
1496 .mark_dirty(CpuRegister::A4.into());
1497
1498 self.current_address += 4;
1500
1501 interface
1503 .xdm
1504 .schedule_execute_instruction(Instruction::S32I(CpuRegister::A3, CpuRegister::A4, 0));
1505
1506 Ok(())
1507 }
1508}
1509
1510pub(crate) enum MaybeDeferredResultIndex {
1511 Value(u32),
1513
1514 Deferred(Register),
1516}