1use crate::dwarf_analyzer::FunctionInfo;
7use crate::error::Result;
8use crate::register_reader::{ArgumentValue, CapturedArgument, RegisterContext};
9use dashmap::DashMap;
10use std::sync::atomic::{AtomicU32, AtomicU64, Ordering};
11use std::sync::{Arc, Mutex, RwLock};
12use std::time::{SystemTime, UNIX_EPOCH};
13
14pub type NodeId = u32;
16
17pub type ThreadId = u64;
19
20#[derive(Debug)]
22pub struct CallNode {
23 pub id: NodeId,
24 pub function_name: String,
25 pub function_address: u64,
26 pub call_site: u64,
27
28 pub start_time: u64,
30 pub end_time: Option<u64>,
31 pub duration_us: Option<u64>,
32
33 pub thread_id: ThreadId,
35 pub call_depth: usize,
36
37 pub function_info: Option<FunctionInfo>,
39 pub arguments: Vec<CapturedArgument>,
40 pub register_context: Option<RegisterContext>,
41 pub return_value: Option<ArgumentValue>,
42
43 pub parent: Option<NodeId>,
45 pub children: Vec<NodeId>,
46
47 pub is_pthread_create: bool,
49 pub created_thread_id: Option<ThreadId>,
50
51 pub is_completed: bool,
53}
54
55#[derive(Debug)]
57pub struct FastCallNode {
58 pub id: NodeId,
59 pub function_address: u64,
60 pub call_site: u64,
61 pub start_time: u64,
62 pub end_time: Option<u64>,
63 pub thread_id: ThreadId,
64 pub call_depth: usize,
65 pub parent: Option<NodeId>,
66 pub children: Vec<NodeId>,
67 pub is_completed: bool,
68}
69
70#[derive(Debug)]
72pub struct ThreadCallTree {
73 pub thread_id: ThreadId,
74 pub thread_name: String,
75 pub start_time: u64,
76 pub end_time: Option<u64>,
77 pub is_active: bool,
78
79 pub parent_thread_id: Option<ThreadId>,
81 pub is_main_thread: bool,
82 pub creation_function: Option<String>,
83
84 pub root_node: Option<NodeId>,
86 pub current_node: Option<NodeId>,
87 pub call_stack: Vec<NodeId>,
88 pub max_depth: usize,
89
90 pub total_calls: u64,
92 pub total_errors: u64,
93}
94
95#[derive(Debug, Clone)]
97pub struct ThreadRelationship {
98 pub parent_thread_id: ThreadId,
99 pub child_thread_id: ThreadId,
100 pub creation_node_id: NodeId,
101 pub creation_function: String,
102 pub creation_time: u64,
103}
104
105pub struct CallTreeManager {
107 nodes: DashMap<NodeId, Arc<RwLock<CallNode>>>,
109 fast_nodes: DashMap<NodeId, Arc<RwLock<FastCallNode>>>,
110 thread_trees: DashMap<ThreadId, Arc<RwLock<ThreadCallTree>>>,
111 thread_relationships: DashMap<ThreadId, ThreadRelationship>,
112
113 next_node_id: AtomicU32,
115
116 total_nodes: AtomicU64,
118 total_threads: AtomicU64,
119 session_start_time: u64,
120
121 pending_thread_creations: Arc<Mutex<Vec<PendingThreadCreation>>>,
123}
124
125#[derive(Debug)]
127struct PendingThreadCreation {
128 parent_thread_id: ThreadId,
129 creation_node_id: NodeId,
130 creation_time: u64,
131}
132
133impl Default for CallTreeManager {
134 fn default() -> Self {
135 Self::new()
136 }
137}
138
139impl CallTreeManager {
140 pub fn new() -> Self {
142 Self {
143 nodes: DashMap::new(),
144 fast_nodes: DashMap::new(),
145 thread_trees: DashMap::new(),
146 thread_relationships: DashMap::new(),
147 next_node_id: AtomicU32::new(1),
148 total_nodes: AtomicU64::new(0),
149 total_threads: AtomicU64::new(0),
150 session_start_time: current_timestamp_us(),
151 pending_thread_creations: Arc::new(Mutex::new(Vec::new())),
152 }
153 }
154
155 pub fn function_enter(
157 &self,
158 function_address: u64,
159 call_site: u64,
160 function_info: Option<FunctionInfo>,
161 arguments: Vec<CapturedArgument>,
162 register_context: Option<RegisterContext>,
163 ) -> Result<NodeId> {
164 let thread_id = current_thread_id();
165
166 self.ensure_thread_tree(thread_id);
168
169 let node_id = self.next_node_id.fetch_add(1, Ordering::Relaxed);
171
172 let function_name = function_info
173 .as_ref()
174 .map(|info| crate::intern_string(&info.name))
175 .unwrap_or_else(|| crate::intern_string(&crate::format_address(function_address)));
176
177 let node = CallNode {
178 id: node_id,
179 function_name,
180 function_address,
181 call_site,
182 start_time: current_timestamp_us(),
183 end_time: None,
184 duration_us: None,
185 thread_id,
186 call_depth: 0, function_info,
188 arguments,
189 register_context,
190 return_value: None,
191 parent: None,
192 children: Vec::with_capacity(4), is_pthread_create: false,
194 created_thread_id: None,
195 is_completed: false,
196 };
197
198 self.nodes.insert(node_id, Arc::new(RwLock::new(node)));
200 self.total_nodes.fetch_add(1, Ordering::Relaxed);
201
202 if let Some(tree_ref) = self.thread_trees.get(&thread_id) {
204 let mut tree = tree_ref.write().unwrap();
205
206 let call_depth = tree.call_stack.len();
208 if let Some(node_ref) = self.nodes.get(&node_id) {
209 node_ref.write().unwrap().call_depth = call_depth;
210 }
211
212 if let Some(parent_id) = tree.current_node {
214 if let Some(node_ref) = self.nodes.get(&node_id) {
215 node_ref.write().unwrap().parent = Some(parent_id);
216 }
217
218 if let Some(parent_ref) = self.nodes.get(&parent_id) {
220 parent_ref.write().unwrap().children.push(node_id);
221 }
222 } else {
223 tree.root_node = Some(node_id);
225 }
226
227 tree.current_node = Some(node_id);
229 tree.call_stack.push(node_id);
230 tree.total_calls += 1;
231
232 if tree.call_stack.len() > tree.max_depth {
233 tree.max_depth = tree.call_stack.len();
234 }
235 }
236
237 Ok(node_id)
238 }
239
240 #[inline]
243 pub fn function_enter_fast_path(
244 &self,
245 function_address: u64,
246 call_site: u64,
247 ) -> Result<NodeId> {
248 let thread_id = current_thread_id();
249
250 self.ensure_thread_tree(thread_id);
252
253 let node_id = self.next_node_id.fetch_add(1, Ordering::Relaxed);
255
256 let node = FastCallNode {
257 id: node_id,
258 function_address,
259 call_site,
260 start_time: current_timestamp_us(),
261 end_time: None,
262 thread_id,
263 call_depth: 0, parent: None,
265 children: Vec::with_capacity(4), is_completed: false,
267 };
268
269 self.fast_nodes.insert(node_id, Arc::new(RwLock::new(node)));
271 self.total_nodes.fetch_add(1, Ordering::Relaxed);
272
273 if let Some(tree_ref) = self.thread_trees.get(&thread_id) {
275 let mut tree = tree_ref.write().unwrap();
276
277 let call_depth = tree.call_stack.len();
279 if let Some(node_ref) = self.fast_nodes.get(&node_id) {
280 node_ref.write().unwrap().call_depth = call_depth;
281 }
282
283 if let Some(parent_id) = tree.current_node {
285 if let Some(node_ref) = self.fast_nodes.get(&node_id) {
286 node_ref.write().unwrap().parent = Some(parent_id);
287 }
288
289 if let Some(parent_ref) = self.fast_nodes.get(&parent_id) {
291 parent_ref.write().unwrap().children.push(node_id);
292 } else if let Some(parent_ref) = self.nodes.get(&parent_id) {
293 parent_ref.write().unwrap().children.push(node_id);
294 }
295 } else {
296 tree.root_node = Some(node_id);
298 }
299
300 tree.current_node = Some(node_id);
302 tree.call_stack.push(node_id);
303 tree.total_calls += 1;
304
305 if tree.call_stack.len() > tree.max_depth {
306 tree.max_depth = tree.call_stack.len();
307 }
308 }
309
310 Ok(node_id)
311 }
312
313 pub fn function_exit(&self, function_address: u64) -> Result<()> {
315 self.function_exit_with_return_value(function_address, None)
316 }
317
318 pub fn function_exit_with_return_value(
320 &self,
321 _function_address: u64,
322 return_value: Option<ArgumentValue>,
323 ) -> Result<()> {
324 let thread_id = current_thread_id();
325
326 if let Some(tree_ref) = self.thread_trees.get(&thread_id) {
327 let mut tree = tree_ref.write().unwrap();
328
329 if let Some(current_id) = tree.current_node {
330 if let Some(node_ref) = self.nodes.get(¤t_id) {
332 let mut node = node_ref.write().unwrap();
333 let now = current_timestamp_us();
334 node.end_time = Some(now);
335 node.duration_us = Some(now.saturating_sub(node.start_time));
336 node.return_value = return_value;
337 node.is_completed = true;
338 }
339
340 tree.call_stack.pop();
342
343 if let Some(parent_id) = tree.call_stack.last() {
345 tree.current_node = Some(*parent_id);
346 } else {
347 tree.current_node = None;
348 }
349 }
350 }
351
352 Ok(())
353 }
354
355 #[inline]
358 pub fn function_exit_fast_path(&self, _function_address: u64) -> Result<()> {
359 let thread_id = current_thread_id();
360
361 if let Some(tree_ref) = self.thread_trees.get(&thread_id) {
362 let mut tree = tree_ref.write().unwrap();
363
364 if let Some(current_id) = tree.current_node {
365 if let Some(node_ref) = self.fast_nodes.get(¤t_id) {
367 let mut node = node_ref.write().unwrap();
368 let now = current_timestamp_us();
369 node.end_time = Some(now);
370 node.is_completed = true;
371 } else if let Some(node_ref) = self.nodes.get(¤t_id) {
372 let mut node = node_ref.write().unwrap();
373 let now = current_timestamp_us();
374 node.end_time = Some(now);
375 node.duration_us = Some(now.saturating_sub(node.start_time));
376 node.is_completed = true;
377 }
378
379 tree.call_stack.pop();
381
382 if let Some(parent_id) = tree.call_stack.last() {
384 tree.current_node = Some(*parent_id);
385 } else {
386 tree.current_node = None;
387 }
388 }
389 }
390
391 Ok(())
392 }
393
394 pub fn mark_pthread_create(&self, node_id: NodeId) -> Result<()> {
396 if let Some(node_ref) = self.nodes.get(&node_id) {
397 let mut node = node_ref.write().unwrap();
398 node.is_pthread_create = true;
399
400 let pending = PendingThreadCreation {
402 parent_thread_id: node.thread_id,
403 creation_node_id: node_id,
404 creation_time: current_timestamp_us(),
405 };
406
407 self.pending_thread_creations.lock().unwrap().push(pending);
408 }
409
410 Ok(())
411 }
412
413 pub fn thread_started(&self, new_thread_id: ThreadId) -> Result<()> {
415 let mut pending = self.pending_thread_creations.lock().unwrap();
416
417 if let Some(pos) = pending.iter().position(|_| true) {
418 let creation = pending.remove(pos);
419
420 let relationship = ThreadRelationship {
422 parent_thread_id: creation.parent_thread_id,
423 child_thread_id: new_thread_id,
424 creation_node_id: creation.creation_node_id,
425 creation_function: crate::string_constants::PTHREAD_CREATE.to_string(),
426 creation_time: creation.creation_time,
427 };
428
429 self.thread_relationships
430 .insert(new_thread_id, relationship);
431
432 if let Some(node_ref) = self.nodes.get(&creation.creation_node_id) {
434 node_ref.write().unwrap().created_thread_id = Some(new_thread_id);
435 }
436 }
437
438 Ok(())
439 }
440
441 fn ensure_thread_tree(&self, thread_id: ThreadId) {
443 if !self.thread_trees.contains_key(&thread_id) {
444 let tree = ThreadCallTree {
445 thread_id,
446 thread_name: {
447 if let Ok(result) = std::panic::catch_unwind(|| {
449 crate::FORMAT_BUFFER.with(|buffer| {
450 let mut buf = buffer.borrow_mut();
451 buf.clear();
452 use std::fmt::Write;
453 write!(buf, "Thread-{}", thread_id).unwrap();
454 buf.clone()
455 })
456 }) {
457 result
458 } else {
459 format!("Thread-{}", thread_id)
460 }
461 },
462 start_time: current_timestamp_us(),
463 end_time: None,
464 is_active: true,
465 parent_thread_id: None,
466 is_main_thread: false, creation_function: None,
468 root_node: None,
469 current_node: None,
470 call_stack: Vec::with_capacity(32), max_depth: 0,
472 total_calls: 0,
473 total_errors: 0,
474 };
475
476 self.thread_trees
477 .insert(thread_id, Arc::new(RwLock::new(tree)));
478 self.total_threads.fetch_add(1, Ordering::Relaxed);
479
480 let _ = self.thread_started(thread_id);
482 }
483 }
484
485 pub fn get_statistics(&self) -> CallTreeStatistics {
487 CallTreeStatistics {
488 total_nodes: self.total_nodes.load(Ordering::Relaxed),
489 total_threads: self.total_threads.load(Ordering::Relaxed),
490 active_threads: self.thread_trees.len() as u64,
491 session_duration_us: current_timestamp_us().saturating_sub(self.session_start_time),
492 }
493 }
494
495 pub fn get_thread_ids(&self) -> Vec<ThreadId> {
497 self.thread_trees.iter().map(|entry| *entry.key()).collect()
498 }
499
500 pub fn get_thread_tree(&self, thread_id: ThreadId) -> Option<Arc<RwLock<ThreadCallTree>>> {
502 self.thread_trees
503 .get(&thread_id)
504 .map(|entry| entry.value().clone())
505 }
506
507 pub fn get_node(&self, node_id: NodeId) -> Option<Arc<RwLock<CallNode>>> {
509 self.nodes.get(&node_id).map(|entry| entry.value().clone())
510 }
511
512 pub fn get_any_node(&self, node_id: NodeId) -> Option<Arc<RwLock<CallNode>>> {
515 if let Some(node) = self.nodes.get(&node_id) {
517 return Some(node.value().clone());
518 }
519
520 if let Some(fast_node_ref) = self.fast_nodes.get(&node_id) {
522 let fast_node = fast_node_ref.read().unwrap();
523
524 let full_node = CallNode {
526 id: fast_node.id,
527 function_name: crate::format_address(fast_node.function_address), function_address: fast_node.function_address,
529 call_site: fast_node.call_site,
530 start_time: fast_node.start_time,
531 end_time: fast_node.end_time,
532 duration_us: fast_node
533 .end_time
534 .map(|end| end.saturating_sub(fast_node.start_time)),
535 thread_id: fast_node.thread_id,
536 call_depth: fast_node.call_depth,
537 function_info: None, arguments: Vec::new(), register_context: None, return_value: None, parent: fast_node.parent,
542 children: fast_node.children.clone(),
543 is_pthread_create: false, created_thread_id: None,
545 is_completed: fast_node.is_completed,
546 };
547
548 return Some(Arc::new(RwLock::new(full_node)));
549 }
550
551 None
552 }
553}
554
555#[derive(Debug, Clone)]
557pub struct CallTreeStatistics {
558 pub total_nodes: u64,
559 pub total_threads: u64,
560 pub active_threads: u64,
561 pub session_duration_us: u64,
562}
563
564fn current_timestamp_us() -> u64 {
566 SystemTime::now()
567 .duration_since(UNIX_EPOCH)
568 .unwrap_or_default()
569 .as_micros() as u64
570}
571
572fn current_thread_id() -> ThreadId {
574 #[cfg(target_os = "linux")]
576 {
577 unsafe { libc::syscall(libc::SYS_gettid) as u64 }
578 }
579
580 #[cfg(not(target_os = "linux"))]
581 {
582 use std::collections::hash_map::DefaultHasher;
584 use std::hash::{Hash, Hasher};
585
586 let mut hasher = DefaultHasher::new();
587 thread::current().id().hash(&mut hasher);
588 hasher.finish()
589 }
590}