1use candid::CandidType;
6use canic_cdk::utils::time::now_millis;
7use serde::{Deserialize, Serialize};
8use std::{cell::RefCell, cmp::Ordering, collections::BTreeMap};
9
10#[derive(CandidType, Clone, Debug, Deserialize, Serialize)]
15pub struct EventState {
16 pub(crate) ops: EventOps,
17 pub(crate) perf: EventPerf,
18 pub(crate) entities: BTreeMap<String, EntityCounters>,
19 pub(crate) window_start_ms: u64,
20}
21
22impl EventState {
23 #[must_use]
24 pub const fn new(
25 ops: EventOps,
26 perf: EventPerf,
27 entities: BTreeMap<String, EntityCounters>,
28 window_start_ms: u64,
29 ) -> Self {
30 Self {
31 ops,
32 perf,
33 entities,
34 window_start_ms,
35 }
36 }
37
38 #[must_use]
39 pub const fn ops(&self) -> &EventOps {
40 &self.ops
41 }
42
43 #[must_use]
44 pub const fn perf(&self) -> &EventPerf {
45 &self.perf
46 }
47
48 #[must_use]
49 pub const fn entities(&self) -> &BTreeMap<String, EntityCounters> {
50 &self.entities
51 }
52
53 #[must_use]
54 pub const fn window_start_ms(&self) -> u64 {
55 self.window_start_ms
56 }
57}
58
59impl Default for EventState {
60 fn default() -> Self {
61 Self {
62 ops: EventOps::default(),
63 perf: EventPerf::default(),
64 entities: BTreeMap::new(),
65 window_start_ms: now_millis(),
66 }
67 }
68}
69
70#[derive(CandidType, Clone, Debug, Default, Deserialize, Serialize)]
76pub struct EventOps {
77 pub(crate) load_calls: u64,
79 pub(crate) save_calls: u64,
80 pub(crate) delete_calls: u64,
81
82 pub(crate) plan_index: u64,
84 pub(crate) plan_keys: u64,
85 pub(crate) plan_range: u64,
86 pub(crate) plan_full_scan: u64,
87 pub(crate) plan_grouped_hash_materialized: u64,
88 pub(crate) plan_grouped_ordered_materialized: u64,
89
90 pub(crate) rows_loaded: u64,
92 pub(crate) rows_scanned: u64,
93 pub(crate) rows_deleted: u64,
94
95 pub(crate) index_inserts: u64,
97 pub(crate) index_removes: u64,
98 pub(crate) reverse_index_inserts: u64,
99 pub(crate) reverse_index_removes: u64,
100 pub(crate) relation_reverse_lookups: u64,
101 pub(crate) relation_delete_blocks: u64,
102 pub(crate) unique_violations: u64,
103 pub(crate) non_atomic_partial_commits: u64,
104 pub(crate) non_atomic_partial_rows_committed: u64,
105}
106
107impl EventOps {
108 #[must_use]
109 pub const fn load_calls(&self) -> u64 {
110 self.load_calls
111 }
112
113 #[must_use]
114 pub const fn save_calls(&self) -> u64 {
115 self.save_calls
116 }
117
118 #[must_use]
119 pub const fn delete_calls(&self) -> u64 {
120 self.delete_calls
121 }
122
123 #[must_use]
124 pub const fn plan_index(&self) -> u64 {
125 self.plan_index
126 }
127
128 #[must_use]
129 pub const fn plan_keys(&self) -> u64 {
130 self.plan_keys
131 }
132
133 #[must_use]
134 pub const fn plan_range(&self) -> u64 {
135 self.plan_range
136 }
137
138 #[must_use]
139 pub const fn plan_full_scan(&self) -> u64 {
140 self.plan_full_scan
141 }
142
143 #[must_use]
144 pub const fn plan_grouped_hash_materialized(&self) -> u64 {
145 self.plan_grouped_hash_materialized
146 }
147
148 #[must_use]
149 pub const fn plan_grouped_ordered_materialized(&self) -> u64 {
150 self.plan_grouped_ordered_materialized
151 }
152
153 #[must_use]
154 pub const fn rows_loaded(&self) -> u64 {
155 self.rows_loaded
156 }
157
158 #[must_use]
159 pub const fn rows_scanned(&self) -> u64 {
160 self.rows_scanned
161 }
162
163 #[must_use]
164 pub const fn rows_deleted(&self) -> u64 {
165 self.rows_deleted
166 }
167
168 #[must_use]
169 pub const fn index_inserts(&self) -> u64 {
170 self.index_inserts
171 }
172
173 #[must_use]
174 pub const fn index_removes(&self) -> u64 {
175 self.index_removes
176 }
177
178 #[must_use]
179 pub const fn reverse_index_inserts(&self) -> u64 {
180 self.reverse_index_inserts
181 }
182
183 #[must_use]
184 pub const fn reverse_index_removes(&self) -> u64 {
185 self.reverse_index_removes
186 }
187
188 #[must_use]
189 pub const fn relation_reverse_lookups(&self) -> u64 {
190 self.relation_reverse_lookups
191 }
192
193 #[must_use]
194 pub const fn relation_delete_blocks(&self) -> u64 {
195 self.relation_delete_blocks
196 }
197
198 #[must_use]
199 pub const fn unique_violations(&self) -> u64 {
200 self.unique_violations
201 }
202
203 #[must_use]
204 pub const fn non_atomic_partial_commits(&self) -> u64 {
205 self.non_atomic_partial_commits
206 }
207
208 #[must_use]
209 pub const fn non_atomic_partial_rows_committed(&self) -> u64 {
210 self.non_atomic_partial_rows_committed
211 }
212}
213
214#[derive(CandidType, Clone, Debug, Default, Deserialize, Serialize)]
219pub struct EntityCounters {
220 pub(crate) load_calls: u64,
221 pub(crate) save_calls: u64,
222 pub(crate) delete_calls: u64,
223 pub(crate) rows_loaded: u64,
224 pub(crate) rows_scanned: u64,
225 pub(crate) rows_deleted: u64,
226 pub(crate) index_inserts: u64,
227 pub(crate) index_removes: u64,
228 pub(crate) reverse_index_inserts: u64,
229 pub(crate) reverse_index_removes: u64,
230 pub(crate) relation_reverse_lookups: u64,
231 pub(crate) relation_delete_blocks: u64,
232 pub(crate) unique_violations: u64,
233 pub(crate) non_atomic_partial_commits: u64,
234 pub(crate) non_atomic_partial_rows_committed: u64,
235}
236
237impl EntityCounters {
238 #[must_use]
239 pub const fn load_calls(&self) -> u64 {
240 self.load_calls
241 }
242
243 #[must_use]
244 pub const fn save_calls(&self) -> u64 {
245 self.save_calls
246 }
247
248 #[must_use]
249 pub const fn delete_calls(&self) -> u64 {
250 self.delete_calls
251 }
252
253 #[must_use]
254 pub const fn rows_loaded(&self) -> u64 {
255 self.rows_loaded
256 }
257
258 #[must_use]
259 pub const fn rows_scanned(&self) -> u64 {
260 self.rows_scanned
261 }
262
263 #[must_use]
264 pub const fn rows_deleted(&self) -> u64 {
265 self.rows_deleted
266 }
267
268 #[must_use]
269 pub const fn index_inserts(&self) -> u64 {
270 self.index_inserts
271 }
272
273 #[must_use]
274 pub const fn index_removes(&self) -> u64 {
275 self.index_removes
276 }
277
278 #[must_use]
279 pub const fn reverse_index_inserts(&self) -> u64 {
280 self.reverse_index_inserts
281 }
282
283 #[must_use]
284 pub const fn reverse_index_removes(&self) -> u64 {
285 self.reverse_index_removes
286 }
287
288 #[must_use]
289 pub const fn relation_reverse_lookups(&self) -> u64 {
290 self.relation_reverse_lookups
291 }
292
293 #[must_use]
294 pub const fn relation_delete_blocks(&self) -> u64 {
295 self.relation_delete_blocks
296 }
297
298 #[must_use]
299 pub const fn unique_violations(&self) -> u64 {
300 self.unique_violations
301 }
302
303 #[must_use]
304 pub const fn non_atomic_partial_commits(&self) -> u64 {
305 self.non_atomic_partial_commits
306 }
307
308 #[must_use]
309 pub const fn non_atomic_partial_rows_committed(&self) -> u64 {
310 self.non_atomic_partial_rows_committed
311 }
312}
313
314#[derive(CandidType, Clone, Debug, Default, Deserialize, Serialize)]
320pub struct EventPerf {
321 pub(crate) load_inst_total: u128,
323 pub(crate) save_inst_total: u128,
324 pub(crate) delete_inst_total: u128,
325
326 pub(crate) load_inst_max: u64,
328 pub(crate) save_inst_max: u64,
329 pub(crate) delete_inst_max: u64,
330}
331
332impl EventPerf {
333 #[must_use]
334 pub const fn new(
335 load_inst_total: u128,
336 save_inst_total: u128,
337 delete_inst_total: u128,
338 load_inst_max: u64,
339 save_inst_max: u64,
340 delete_inst_max: u64,
341 ) -> Self {
342 Self {
343 load_inst_total,
344 save_inst_total,
345 delete_inst_total,
346 load_inst_max,
347 save_inst_max,
348 delete_inst_max,
349 }
350 }
351
352 #[must_use]
353 pub const fn load_inst_total(&self) -> u128 {
354 self.load_inst_total
355 }
356
357 #[must_use]
358 pub const fn save_inst_total(&self) -> u128 {
359 self.save_inst_total
360 }
361
362 #[must_use]
363 pub const fn delete_inst_total(&self) -> u128 {
364 self.delete_inst_total
365 }
366
367 #[must_use]
368 pub const fn load_inst_max(&self) -> u64 {
369 self.load_inst_max
370 }
371
372 #[must_use]
373 pub const fn save_inst_max(&self) -> u64 {
374 self.save_inst_max
375 }
376
377 #[must_use]
378 pub const fn delete_inst_max(&self) -> u64 {
379 self.delete_inst_max
380 }
381}
382
383thread_local! {
384 static EVENT_STATE: RefCell<EventState> = RefCell::new(EventState::default());
385}
386
387pub(crate) fn with_state<R>(f: impl FnOnce(&EventState) -> R) -> R {
389 EVENT_STATE.with(|m| f(&m.borrow()))
390}
391
392pub(crate) fn with_state_mut<R>(f: impl FnOnce(&mut EventState) -> R) -> R {
394 EVENT_STATE.with(|m| f(&mut m.borrow_mut()))
395}
396
397pub(super) fn reset() {
399 with_state_mut(|m| *m = EventState::default());
400}
401
402pub(crate) fn reset_all() {
404 reset();
405}
406
407pub(super) fn add_instructions(total: &mut u128, max: &mut u64, delta_inst: u64) {
409 *total = total.saturating_add(u128::from(delta_inst));
410 if delta_inst > *max {
411 *max = delta_inst;
412 }
413}
414
415#[derive(CandidType, Clone, Debug, Default, Deserialize, Serialize)]
420pub struct EventReport {
421 counters: Option<EventState>,
423 entity_counters: Vec<EntitySummary>,
425}
426
427impl EventReport {
428 #[must_use]
429 pub(crate) const fn new(
430 counters: Option<EventState>,
431 entity_counters: Vec<EntitySummary>,
432 ) -> Self {
433 Self {
434 counters,
435 entity_counters,
436 }
437 }
438
439 #[must_use]
440 pub const fn counters(&self) -> Option<&EventState> {
441 self.counters.as_ref()
442 }
443
444 #[must_use]
445 pub fn entity_counters(&self) -> &[EntitySummary] {
446 &self.entity_counters
447 }
448
449 #[must_use]
450 pub fn into_counters(self) -> Option<EventState> {
451 self.counters
452 }
453
454 #[must_use]
455 pub fn into_entity_counters(self) -> Vec<EntitySummary> {
456 self.entity_counters
457 }
458}
459
460#[derive(CandidType, Clone, Debug, Default, Deserialize, Serialize)]
465pub struct EntitySummary {
466 path: String,
467 load_calls: u64,
468 delete_calls: u64,
469 rows_loaded: u64,
470 rows_scanned: u64,
471 rows_deleted: u64,
472 avg_rows_per_load: f64,
473 avg_rows_scanned_per_load: f64,
474 avg_rows_per_delete: f64,
475 index_inserts: u64,
476 index_removes: u64,
477 reverse_index_inserts: u64,
478 reverse_index_removes: u64,
479 relation_reverse_lookups: u64,
480 relation_delete_blocks: u64,
481 unique_violations: u64,
482 non_atomic_partial_commits: u64,
483 non_atomic_partial_rows_committed: u64,
484}
485
486impl EntitySummary {
487 #[must_use]
488 pub const fn path(&self) -> &str {
489 self.path.as_str()
490 }
491
492 #[must_use]
493 pub const fn load_calls(&self) -> u64 {
494 self.load_calls
495 }
496
497 #[must_use]
498 pub const fn delete_calls(&self) -> u64 {
499 self.delete_calls
500 }
501
502 #[must_use]
503 pub const fn rows_loaded(&self) -> u64 {
504 self.rows_loaded
505 }
506
507 #[must_use]
508 pub const fn rows_scanned(&self) -> u64 {
509 self.rows_scanned
510 }
511
512 #[must_use]
513 pub const fn rows_deleted(&self) -> u64 {
514 self.rows_deleted
515 }
516
517 #[must_use]
518 pub const fn avg_rows_per_load(&self) -> f64 {
519 self.avg_rows_per_load
520 }
521
522 #[must_use]
523 pub const fn avg_rows_scanned_per_load(&self) -> f64 {
524 self.avg_rows_scanned_per_load
525 }
526
527 #[must_use]
528 pub const fn avg_rows_per_delete(&self) -> f64 {
529 self.avg_rows_per_delete
530 }
531
532 #[must_use]
533 pub const fn index_inserts(&self) -> u64 {
534 self.index_inserts
535 }
536
537 #[must_use]
538 pub const fn index_removes(&self) -> u64 {
539 self.index_removes
540 }
541
542 #[must_use]
543 pub const fn reverse_index_inserts(&self) -> u64 {
544 self.reverse_index_inserts
545 }
546
547 #[must_use]
548 pub const fn reverse_index_removes(&self) -> u64 {
549 self.reverse_index_removes
550 }
551
552 #[must_use]
553 pub const fn relation_reverse_lookups(&self) -> u64 {
554 self.relation_reverse_lookups
555 }
556
557 #[must_use]
558 pub const fn relation_delete_blocks(&self) -> u64 {
559 self.relation_delete_blocks
560 }
561
562 #[must_use]
563 pub const fn unique_violations(&self) -> u64 {
564 self.unique_violations
565 }
566
567 #[must_use]
568 pub const fn non_atomic_partial_commits(&self) -> u64 {
569 self.non_atomic_partial_commits
570 }
571
572 #[must_use]
573 pub const fn non_atomic_partial_rows_committed(&self) -> u64 {
574 self.non_atomic_partial_rows_committed
575 }
576}
577
578#[must_use]
588#[expect(clippy::cast_precision_loss)]
589pub(super) fn report_window_start(window_start_ms: Option<u64>) -> EventReport {
590 let snap = with_state(Clone::clone);
591 if let Some(requested_window_start_ms) = window_start_ms
592 && requested_window_start_ms > snap.window_start_ms
593 {
594 return EventReport::default();
595 }
596
597 let mut entity_counters: Vec<EntitySummary> = Vec::new();
598 for (path, ops) in &snap.entities {
599 let avg_load = if ops.load_calls > 0 {
600 ops.rows_loaded as f64 / ops.load_calls as f64
601 } else {
602 0.0
603 };
604 let avg_scanned = if ops.load_calls > 0 {
605 ops.rows_scanned as f64 / ops.load_calls as f64
606 } else {
607 0.0
608 };
609 let avg_delete = if ops.delete_calls > 0 {
610 ops.rows_deleted as f64 / ops.delete_calls as f64
611 } else {
612 0.0
613 };
614
615 entity_counters.push(EntitySummary {
616 path: path.clone(),
617 load_calls: ops.load_calls,
618 delete_calls: ops.delete_calls,
619 rows_loaded: ops.rows_loaded,
620 rows_scanned: ops.rows_scanned,
621 rows_deleted: ops.rows_deleted,
622 avg_rows_per_load: avg_load,
623 avg_rows_scanned_per_load: avg_scanned,
624 avg_rows_per_delete: avg_delete,
625 index_inserts: ops.index_inserts,
626 index_removes: ops.index_removes,
627 reverse_index_inserts: ops.reverse_index_inserts,
628 reverse_index_removes: ops.reverse_index_removes,
629 relation_reverse_lookups: ops.relation_reverse_lookups,
630 relation_delete_blocks: ops.relation_delete_blocks,
631 unique_violations: ops.unique_violations,
632 non_atomic_partial_commits: ops.non_atomic_partial_commits,
633 non_atomic_partial_rows_committed: ops.non_atomic_partial_rows_committed,
634 });
635 }
636
637 entity_counters.sort_by(|a, b| {
638 match b
639 .avg_rows_per_load
640 .partial_cmp(&a.avg_rows_per_load)
641 .unwrap_or(Ordering::Equal)
642 {
643 Ordering::Equal => match b.rows_loaded.cmp(&a.rows_loaded) {
644 Ordering::Equal => a.path.cmp(&b.path),
645 other => other,
646 },
647 other => other,
648 }
649 });
650
651 EventReport::new(Some(snap), entity_counters)
652}
653
654#[cfg(test)]
659#[expect(clippy::float_cmp)]
660mod tests {
661 use crate::obs::metrics::{
662 EntityCounters, EntitySummary, EventOps, EventPerf, EventReport, EventState,
663 report_window_start, reset_all, with_state, with_state_mut,
664 };
665 use serde::Serialize;
666 use serde_cbor::Value as CborValue;
667 use std::collections::BTreeMap;
668
669 fn to_cbor_value<T: Serialize>(value: &T) -> CborValue {
670 let bytes =
671 serde_cbor::to_vec(value).expect("test fixtures must serialize into CBOR payloads");
672 serde_cbor::from_slice::<CborValue>(&bytes)
673 .expect("test fixtures must deserialize into CBOR value trees")
674 }
675
676 fn expect_cbor_map(value: &CborValue) -> &BTreeMap<CborValue, CborValue> {
677 match value {
678 CborValue::Map(map) => map,
679 other => panic!("expected CBOR map, got {other:?}"),
680 }
681 }
682
683 fn map_field<'a>(map: &'a BTreeMap<CborValue, CborValue>, key: &str) -> Option<&'a CborValue> {
684 map.get(&CborValue::Text(key.to_string()))
685 }
686
687 #[test]
688 fn reset_all_clears_state() {
689 with_state_mut(|m| {
690 m.ops.load_calls = 3;
691 m.ops.index_inserts = 2;
692 m.perf.save_inst_max = 9;
693 m.entities.insert(
694 "alpha".to_string(),
695 EntityCounters {
696 load_calls: 1,
697 ..Default::default()
698 },
699 );
700 });
701
702 reset_all();
703
704 with_state(|m| {
705 assert_eq!(m.ops.load_calls, 0);
706 assert_eq!(m.ops.index_inserts, 0);
707 assert_eq!(m.perf.save_inst_max, 0);
708 assert!(m.entities.is_empty());
709 });
710 }
711
712 #[test]
713 fn report_sorts_entities_by_average_rows() {
714 reset_all();
715 with_state_mut(|m| {
716 m.entities.insert(
717 "alpha".to_string(),
718 EntityCounters {
719 load_calls: 2,
720 rows_loaded: 6,
721 ..Default::default()
722 },
723 );
724 m.entities.insert(
725 "beta".to_string(),
726 EntityCounters {
727 load_calls: 1,
728 rows_loaded: 5,
729 ..Default::default()
730 },
731 );
732 m.entities.insert(
733 "gamma".to_string(),
734 EntityCounters {
735 load_calls: 2,
736 rows_loaded: 6,
737 ..Default::default()
738 },
739 );
740 });
741
742 let report = report_window_start(None);
743 let paths: Vec<_> = report
744 .entity_counters
745 .iter()
746 .map(|e| e.path.as_str())
747 .collect();
748
749 assert_eq!(paths, ["beta", "alpha", "gamma"]);
751 assert_eq!(report.entity_counters[0].avg_rows_per_load, 5.0);
752 assert_eq!(report.entity_counters[1].avg_rows_per_load, 3.0);
753 assert_eq!(report.entity_counters[2].avg_rows_per_load, 3.0);
754 }
755
756 #[test]
757 fn event_report_serialization_shape_is_stable() {
758 let report = EventReport {
759 counters: Some(EventState {
760 ops: EventOps {
761 load_calls: 1,
762 rows_loaded: 2,
763 rows_scanned: 3,
764 non_atomic_partial_rows_committed: 4,
765 ..Default::default()
766 },
767 perf: EventPerf {
768 load_inst_total: 11,
769 load_inst_max: 12,
770 ..Default::default()
771 },
772 entities: BTreeMap::from([(
773 "alpha".to_string(),
774 EntityCounters {
775 load_calls: 5,
776 rows_loaded: 8,
777 ..Default::default()
778 },
779 )]),
780 window_start_ms: 99,
781 }),
782 entity_counters: vec![EntitySummary {
783 path: "alpha".to_string(),
784 load_calls: 5,
785 rows_loaded: 8,
786 avg_rows_per_load: 1.6,
787 ..Default::default()
788 }],
789 };
790
791 let encoded = to_cbor_value(&report);
792 let root = expect_cbor_map(&encoded);
793 assert!(
794 map_field(root, "counters").is_some(),
795 "EventReport must keep `counters` as serialized field key",
796 );
797 assert!(
798 map_field(root, "entity_counters").is_some(),
799 "EventReport must keep `entity_counters` as serialized field key",
800 );
801
802 let counters = map_field(root, "counters").expect("counters payload should exist");
803 let counters_map = expect_cbor_map(counters);
804 assert!(
805 map_field(counters_map, "ops").is_some(),
806 "EventState must keep `ops` as serialized field key",
807 );
808 assert!(
809 map_field(counters_map, "perf").is_some(),
810 "EventState must keep `perf` as serialized field key",
811 );
812 assert!(
813 map_field(counters_map, "entities").is_some(),
814 "EventState must keep `entities` as serialized field key",
815 );
816 assert!(
817 map_field(counters_map, "window_start_ms").is_some(),
818 "EventState must keep `window_start_ms` as serialized field key",
819 );
820 }
821
822 #[test]
823 fn entity_summary_serialization_shape_is_stable() {
824 let encoded = to_cbor_value(&EntitySummary {
825 path: "alpha".to_string(),
826 load_calls: 5,
827 delete_calls: 6,
828 rows_loaded: 8,
829 rows_scanned: 9,
830 rows_deleted: 10,
831 avg_rows_per_load: 1.6,
832 avg_rows_scanned_per_load: 1.8,
833 avg_rows_per_delete: 2.0,
834 index_inserts: 11,
835 index_removes: 12,
836 reverse_index_inserts: 13,
837 reverse_index_removes: 14,
838 relation_reverse_lookups: 15,
839 relation_delete_blocks: 16,
840 unique_violations: 17,
841 non_atomic_partial_commits: 18,
842 non_atomic_partial_rows_committed: 19,
843 });
844 let root = expect_cbor_map(&encoded);
845 assert!(
846 map_field(root, "path").is_some(),
847 "EntitySummary must keep `path` as serialized field key",
848 );
849 assert!(
850 map_field(root, "avg_rows_per_load").is_some(),
851 "EntitySummary must keep `avg_rows_per_load` as serialized field key",
852 );
853 assert!(
854 map_field(root, "relation_delete_blocks").is_some(),
855 "EntitySummary must keep `relation_delete_blocks` as serialized field key",
856 );
857 assert!(
858 map_field(root, "non_atomic_partial_rows_committed").is_some(),
859 "EntitySummary must keep `non_atomic_partial_rows_committed` as serialized field key",
860 );
861 }
862}