1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
use super::{
configuration::{self, CoreConfig, SessionConfig},
core_data::{CoreData, CoreHandle},
};
use crate::{
FormatKind,
cmd::dap_server::{
DebuggerError,
debug_adapter::{
dap::{adapter::DebugAdapter, dap_types::Source},
protocol::ProtocolAdapter,
},
},
util::{common_options::OperationError, rtt},
};
use anyhow::{Result, anyhow};
use probe_rs::{
CoreStatus, Session, VectorCatchCondition,
config::{Registry, TargetSelector},
probe::list::Lister,
rtt::ScanRegion,
};
use probe_rs_debug::{
DebugRegisters, SourceLocation, debug_info::DebugInfo, exception_handler_for_core,
};
use std::{env::set_current_dir, time::Duration};
use time::UtcOffset;
/// The supported breakpoint types
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum BreakpointType {
/// A breakpoint was requested using an instruction address, and usually a result of a user requesting a
/// breakpoint while in a 'disassembly' view.
InstructionBreakpoint,
/// A breakpoint that has a Source, and usually a result of a user requesting a breakpoint while in a 'source' view.
SourceBreakpoint {
source: Box<Source>,
location: SourceLocationScope,
},
}
/// Breakpoint requests will either be refer to a specific `SourceLocation`, or unspecified, in which case it will refer to
/// all breakpoints for the Source.
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum SourceLocationScope {
All,
Specific(SourceLocation),
}
/// Provide the storage and methods to handle various [`BreakpointType`]
#[derive(Clone, Debug)]
pub struct ActiveBreakpoint {
pub(crate) breakpoint_type: BreakpointType,
pub(crate) address: u64,
}
/// SessionData is designed to be similar to [probe_rs::Session], in as much that it provides handles to the [CoreHandle] instances for each of the available [probe_rs::Core] involved in the debug session.
/// To get access to the [CoreHandle] for a specific [probe_rs::Core], the
/// TODO: Adjust [SessionConfig] to allow multiple cores (and if appropriate, their binaries) to be specified.
pub(crate) struct SessionData {
pub(crate) session: Session,
/// [SessionData] will manage one [CoreData] per target core, that is also present in [SessionConfig::core_configs]
pub(crate) core_data: Vec<CoreData>,
/// Offset used for RTC timestamps
///
/// Getting the offset can fail, so it's better to store it.
timestamp_offset: UtcOffset,
}
impl SessionData {
pub(crate) fn new(
registry: &mut Registry,
lister: &Lister,
config: &mut configuration::SessionConfig,
timestamp_offset: UtcOffset,
) -> Result<Self, DebuggerError> {
let target_selector = TargetSelector::from(config.chip.as_deref());
let options = config.probe_options().load(registry)?;
let target_probe = options.attach_probe(lister)?;
let mut target_session = options
.attach_session(target_probe, target_selector)
.map_err(|operation_error| {
match operation_error {
OperationError::AttachingFailed {
source,
connect_under_reset,
} => match source {
probe_rs::Error::Timeout => {
let shared_cause = "This can happen if the target is in a state where it can not be attached to. A hard reset during attach usually helps. For probes that support this option, please try using the `connect_under_reset` option.";
if !connect_under_reset {
DebuggerError::UserMessage(format!("{source} {shared_cause}"))
} else {
DebuggerError::UserMessage(format!("{source} {shared_cause} It is possible that your probe does not support this behaviour, or something else is preventing the attach. Please try again without `connect_under_reset`."))
}
}
other_attach_error => other_attach_error.into(),
},
// Return the orginal error.
other => other.into(),
}
})?;
// Change the current working directory if `config.cwd` is `Some(T)`.
if let Some(new_cwd) = config.cwd.clone() {
set_current_dir(new_cwd.as_path()).map_err(|err| {
anyhow!(
"Failed to set current working directory to: {:?}, {:?}",
new_cwd,
err
)
})?;
};
// `FlashingConfig` probe level initialization.
// `CoreConfig` probe level initialization.
if config.core_configs.len() != 1 {
// TODO: For multi-core, allow > 1.
return Err(DebuggerError::Other(anyhow!(
"probe-rs-debugger requires that one, and only one, core be configured for debugging."
)));
}
// Filter `CoreConfig` entries based on those that match an actual core on the target probe.
let valid_core_configs = config
.core_configs
.iter()
.filter(|&core_config| {
target_session
.list_cores()
.iter()
.any(|(target_core_index, _)| *target_core_index == core_config.core_index)
})
.collect::<Vec<_>>();
let mut core_data_vec = vec![];
for core_configuration in valid_core_configs {
if core_configuration.catch_hardfault || core_configuration.catch_reset {
let mut core = target_session.core(core_configuration.core_index)?;
let was_halted = core.core_halted()?;
if !was_halted {
core.halt(Duration::from_millis(100))?;
}
if core_configuration.catch_hardfault {
match core.enable_vector_catch(VectorCatchCondition::HardFault) {
Ok(_) | Err(probe_rs::Error::NotImplemented(_)) => {} // Don't output an error if vector_catch hasn't been implemented
Err(e) => tracing::error!("Failed to enable_vector_catch: {:?}", e),
}
}
if core_configuration.catch_reset {
match core.enable_vector_catch(VectorCatchCondition::CoreReset) {
Ok(_) | Err(probe_rs::Error::NotImplemented(_)) => {} // Don't output an error if vector_catch hasn't been implemented
Err(e) => tracing::error!("Failed to enable_vector_catch: {:?}", e),
}
}
if was_halted {
core.run()?;
}
}
core_data_vec.push(CoreData {
core_index: core_configuration.core_index,
last_known_status: CoreStatus::Unknown,
target_name: format!(
"{}-{}",
core_configuration.core_index,
target_session.target().name
),
debug_info: debug_info_from_binary(core_configuration)?,
static_variables: None,
core_peripherals: None,
stack_frames: vec![],
breakpoints: vec![],
rtt_scan_ranges: ScanRegion::Ranges(vec![]),
rtt_connection: None,
rtt_client: None,
clear_rtt_header: false,
rtt_header_cleared: false,
})
}
let mut this = SessionData {
session: target_session,
core_data: core_data_vec,
timestamp_offset,
};
this.load_rtt_location(config)?;
Ok(this)
}
pub(crate) fn load_rtt_location(
&mut self,
config: &configuration::SessionConfig,
) -> Result<(), DebuggerError> {
// Filter `CoreConfig` entries based on those that match an actual core on the target probe.
let valid_core_configs = config.core_configs.iter().filter(|&core_config| {
self.session
.list_cores()
.iter()
.any(|(target_core_index, _)| *target_core_index == core_config.core_index)
});
let image_format = config
.flashing_config
.format_options
.to_format_kind(self.session.target());
for core_configuration in valid_core_configs {
let Some(core_data) = self
.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_configuration.core_index)
else {
continue;
};
core_data.rtt_scan_ranges = match core_configuration.program_binary.as_ref() {
Some(program_binary)
if matches!(image_format, FormatKind::Elf | FormatKind::Idf) =>
{
let elf = std::fs::read(program_binary)
.map_err(|error| anyhow!("Error attempting to attach to RTT: {error}"))?;
match rtt::get_rtt_symbol_from_bytes(&elf) {
Ok(address) => ScanRegion::Exact(address),
// Do not scan the memory for the control block.
_ => ScanRegion::Ranges(vec![]),
}
}
_ => ScanRegion::Ranges(vec![]),
};
}
Ok(())
}
/// Reload the a specific core's debug info from the binary file.
pub(crate) fn load_debug_info_for_core(
&mut self,
core_configuration: &CoreConfig,
) -> Result<(), DebuggerError> {
if let Some(core_data) = self
.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_configuration.core_index)
{
core_data.debug_info = debug_info_from_binary(core_configuration)?;
Ok(())
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// Do a 'light weight'(just get references to existing data structures) attach to the core and return relevant debug data.
pub(crate) fn attach_core(&mut self, core_index: usize) -> Result<CoreHandle, DebuggerError> {
if let (Ok(target_core), Some(core_data)) = (
self.session.core(core_index),
self.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_index),
) {
Ok(CoreHandle {
core: target_core,
core_data,
})
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// The target has no way of notifying the debug adapter when things changes, so we have to constantly poll it to determine:
/// - Whether the target cores are running, and what their actual status is.
/// - Whether the target cores have data in their RTT buffers that we need to read and pass to the client.
///
/// To optimize this polling process while also optimizing the reading of RTT data, we apply a couple of principles:
/// 1. Sleep (nap for a short duration) between polling each target core, but:
/// - Only sleep IF the core's status hasn't changed AND there was no RTT data in the last poll.
/// - Otherwise move on without delay, to keep things flowing as fast as possible.
/// - The justification is that any client side CPU used to keep polling is a small price to pay for maximum throughput of debug requests and RTT from the probe.
/// 2. Check all target cores to ensure they have a configured and initialized RTT connections and if they do, process the RTT data.
/// - To keep things efficient, the polling of RTT data is done only when we expect there to be data available.
/// - We check for RTT only when the core has an RTT connection configured, and one of the following is true:
/// - While the core is NOT halted, because core processing can generate new data at any time.
/// - The first time we have entered halted status, to ensure the buffers are drained. After that, for as long as we remain in halted state, we don't need to check RTT again.
///
/// Return a Vec of [`CoreStatus`] (one entry per core) after this process has completed, as well as a boolean indicating whether we should consider a short delay before the next poll.
#[tracing::instrument(level = "trace", skip_all)]
pub(crate) async fn poll_cores<P: ProtocolAdapter>(
&mut self,
session_config: &SessionConfig,
debug_adapter: &mut DebugAdapter<P>,
) -> Result<(Vec<CoreStatus>, bool), DebuggerError> {
// By default, we will have a small delay between polls, and will disable it if we know the last poll returned data, on the assumption that there might be at least one more batch of data.
let mut suggest_delay_required = true;
let mut status_of_cores: Vec<CoreStatus> = vec![];
let timestamp_offset = self.timestamp_offset;
let cores_halted_previously = debug_adapter.all_cores_halted;
// Always set `all_cores_halted` to true, until one core is found to be running.
debug_adapter.all_cores_halted = true;
for core_config in session_config.core_configs.iter() {
let Ok(mut target_core) = self.attach_core(core_config.core_index) else {
tracing::debug!(
"Failed to attach to target core #{}. Cannot poll for RTT data.",
core_config.core_index
);
continue;
};
// We need to poll the core to determine its status.
let current_core_status =
target_core.poll_core(debug_adapter).inspect_err(|error| {
let _ = debug_adapter.show_error_message(error);
})?;
// If appropriate, check for RTT data.
if core_config.rtt_config.enabled {
if let Some(core_rtt) = &mut target_core.core_data.rtt_connection {
// We should poll the target for rtt data, and if any RTT data was processed, we clear the flag.
if core_rtt
.process_rtt_data(debug_adapter, &mut target_core.core)
.await
{
suggest_delay_required = false;
}
} else {
#[allow(clippy::unwrap_used)]
if let Err(error) = target_core.attach_to_rtt(
debug_adapter,
core_config.program_binary.as_ref().unwrap(),
&core_config.rtt_config,
timestamp_offset,
) {
debug_adapter
.show_error_message(&DebuggerError::Other(error))
.ok();
}
}
}
// If the core is running, we set the flag to indicate that at least one core is not halted.
// By setting it here, we ensure that RTT will be checked at least once after the core has halted.
if !current_core_status.is_halted() {
debug_adapter.all_cores_halted = false;
// If currently halted, and was previously running
// update the stack frames
} else if !cores_halted_previously {
let _stackframe_span = tracing::debug_span!("Update Stack Frames").entered();
tracing::debug!(
"Updating the stack frame data for core #{}",
target_core.core.id()
);
let initial_registers = DebugRegisters::from_core(&mut target_core.core);
let exception_interface = exception_handler_for_core(target_core.core.core_type());
let instruction_set = target_core.core.instruction_set().ok();
target_core.core_data.static_variables =
Some(target_core.core_data.debug_info.create_static_scope_cache());
target_core.core_data.stack_frames = target_core.core_data.debug_info.unwind(
&mut target_core.core,
initial_registers,
exception_interface.as_ref(),
instruction_set,
)?;
}
status_of_cores.push(current_core_status);
}
Ok((status_of_cores, suggest_delay_required))
}
pub(crate) fn clean_up(&mut self, session_config: &SessionConfig) -> Result<(), DebuggerError> {
for core_config in session_config.core_configs.iter() {
if core_config.rtt_config.enabled {
let Ok(mut target_core) = self.attach_core(core_config.core_index) else {
tracing::debug!(
"Failed to attach to target core #{}. Cannot clean up.",
core_config.core_index
);
continue;
};
if let Some(core_rtt) = &mut target_core.core_data.rtt_connection {
core_rtt.clean_up(&mut target_core.core)?;
}
}
}
Ok(())
}
}
fn debug_info_from_binary(core_configuration: &CoreConfig) -> anyhow::Result<DebugInfo> {
let Some(ref binary_path) = core_configuration.program_binary else {
return Err(anyhow!(
"Please provide a valid `program_binary` for debug core: {}",
core_configuration.core_index
));
};
DebugInfo::from_file(binary_path).map_err(|error| anyhow!(error))
}