1use crate::heuristics::{HeuristicEntry, StaticPrior, StaticPriorSet, DEFAULT_ENTRIES};
9use crate::CRATE_VERSION;
10use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
11use base64::Engine;
12use ed25519_dalek::{Signer, SigningKey};
13use serde_json::{json, Value};
14use sha2::{Digest, Sha256};
15use std::collections::{BTreeMap, BTreeSet};
16use std::env;
17use std::fs;
18use std::io;
19use std::path::{Path, PathBuf};
20use time::format_description::well_known::Rfc3339;
21use time::OffsetDateTime;
22
23const MAX_EVIDENCE_PER_HEURISTIC: usize = 6;
24const MAX_EVIDENCE_PER_SIGNAL: usize = 4;
25const DSSE_PAYLOAD_TYPE: &str = "application/vnd.in-toto+json";
26const DSFB_PREDICATE_TYPE: &str =
27 "https://github.com/infinityabundance/dsfb-gray/attestations/crate-scan/v1";
28pub(crate) const AUDIT_SCORE_METHOD: &str = "dsfb-assurance-score-v1";
29pub const DEFAULT_SCAN_OUTPUT_ROOT: &str = "output-dsfb-gray";
31
32#[derive(Debug, Clone)]
34pub struct ScanEvidence {
35 pub path: PathBuf,
37 pub line_number: usize,
39 pub pattern: &'static str,
41 pub snippet: String,
43}
44
45#[derive(Debug, Clone)]
47pub struct HeuristicSourceMatch {
48 pub heuristic: HeuristicEntry,
50 pub matched_patterns: Vec<&'static str>,
52 pub evidence: Vec<ScanEvidence>,
54 pub total_hits: usize,
56}
57
58#[derive(Debug, Clone)]
60pub struct CrateSourceScanReport {
61 pub profile: ScanProfile,
66 pub crate_name: String,
68 pub crate_version: Option<String>,
70 pub generated_at_utc: String,
72 pub root: PathBuf,
74 pub source_sha256: String,
76 pub vcs_commit: Option<String>,
78 pub path_in_vcs: Option<String>,
80 pub files_scanned: usize,
82 pub matched_heuristics: Vec<HeuristicSourceMatch>,
84 pub caveat: &'static str,
86 certification: CertificationProfile,
87}
88
89#[derive(Debug, Clone, Copy, PartialEq, Eq)]
96pub enum ScanProfile {
97 General,
99 CloudNative,
101 DistributedSystems,
103 IndustrialSafety,
105 SupplyChain,
107}
108
109impl ScanProfile {
110 pub fn parse(value: &str) -> Option<Self> {
112 match value.trim().to_ascii_lowercase().as_str() {
113 "general" => Some(Self::General),
114 "cloud" | "cloud-native" | "cloud_native" => Some(Self::CloudNative),
115 "distributed" | "distributed-systems" | "distributed_systems" => {
116 Some(Self::DistributedSystems)
117 }
118 "industrial" | "industrial-safety" | "industrial_safety" | "safety" => {
119 Some(Self::IndustrialSafety)
120 }
121 "supply-chain" | "supply_chain" | "supplychain" => Some(Self::SupplyChain),
122 _other => None,
123 }
124 }
125
126 pub fn as_str(self) -> &'static str {
128 match self {
129 Self::General => "general",
130 Self::CloudNative => "cloud-native",
131 Self::DistributedSystems => "distributed-systems",
132 Self::IndustrialSafety => "industrial-safety",
133 Self::SupplyChain => "supply-chain",
134 }
135 }
136
137 pub fn title(self) -> &'static str {
139 match self {
140 Self::General => "General Rust Crate Review",
141 Self::CloudNative => "Cloud-Native Service Review",
142 Self::DistributedSystems => "Distributed Systems Review",
143 Self::IndustrialSafety => "Industrial / Safety Review",
144 Self::SupplyChain => "Supply-Chain / Provenance Review",
145 }
146 }
147
148 pub fn focus(self) -> &'static str {
150 match self {
151 Self::General => {
152 "Balanced interpretation across safety, verification, lifecycle, and structural findings."
153 }
154 Self::CloudNative => {
155 "Emphasizes async behavior, backpressure, cancellation, detached tasks, and operational noise under service load."
156 }
157 Self::DistributedSystems => {
158 "Emphasizes heartbeat timing, clock integrity, queue growth, retry behavior, quorum-sensitive logic, and partial-write/networking hazards."
159 }
160 Self::IndustrialSafety => {
161 "Emphasizes bounded behavior, fail-safe state handling, resource determinism, Power-of-Ten proxies, and reviewability."
162 }
163 Self::SupplyChain => {
164 "Emphasizes provenance, dependency drift, dynamic loading, FFI surface, lifecycle artifacts, and attestation portability."
165 }
166 }
167 }
168}
169
170#[derive(Debug, Clone)]
172pub struct ScanSigningKey {
173 key_id: String,
174 signing_key: SigningKey,
175}
176
177impl ScanSigningKey {
178 pub fn from_environment() -> io::Result<Option<Self>> {
183 let Some(secret) = env::var_os("DSFB_SCAN_SIGNING_KEY") else {
184 return Ok(None);
185 };
186 let secret = secret.into_string().map_err(|_| {
187 io::Error::new(io::ErrorKind::InvalidInput, "signing key must be UTF-8")
188 })?;
189 let key_id = env::var("DSFB_SCAN_KEY_ID")
190 .ok()
191 .filter(|value| !value.trim().is_empty());
192 Self::from_secret_text(&secret, key_id.as_deref()).map(Some)
193 }
194
195 pub fn from_secret_text(secret: &str, key_id: Option<&str>) -> io::Result<Self> {
197 let secret_bytes = parse_secret_key(secret)?;
198 let signing_key = SigningKey::from_bytes(&secret_bytes);
199 let derived_key_id = match key_id {
200 Some(value) if !value.trim().is_empty() => value.trim().to_string(),
201 _fallback => {
202 let public_key = signing_key.verifying_key().to_bytes();
203 format!("ed25519:{}", hex_encode(&public_key[..8]))
204 }
205 };
206
207 Ok(Self {
208 key_id: derived_key_id,
209 signing_key,
210 })
211 }
212
213 fn key_id(&self) -> &str {
214 &self.key_id
215 }
216
217 fn sign(&self, payload_type: &str, payload: &[u8]) -> String {
218 let pae = dsse_pae(payload_type, payload);
219 let signature = self.signing_key.sign(&pae);
220 BASE64_STANDARD.encode(signature.to_bytes())
221 }
222}
223
224#[derive(Debug, Clone)]
226pub struct ScanArtifactPaths {
227 pub output_dir: PathBuf,
229 pub report_path: PathBuf,
231 pub sarif_path: PathBuf,
233 pub statement_path: PathBuf,
235 pub dsse_path: PathBuf,
237 pub signed: bool,
239}
240
241#[derive(Debug, Clone)]
243pub struct ScanRunPaths {
244 pub base_output_root: PathBuf,
246 pub run_dir: PathBuf,
248 pub timestamp_utc: String,
250}
251
252#[derive(Debug, Clone, Default)]
253struct VcsInfo {
254 git_commit: Option<String>,
255 path_in_vcs: Option<String>,
256}
257
258#[derive(Debug, Clone)]
259struct CertificationProfile {
260 runtime: RuntimeProfile,
261 safety: SafetyProfile,
262 verification: VerificationProfile,
263 build: BuildProfile,
264 lifecycle: LifecycleProfile,
265 power_of_ten: PowerOfTenProfile,
266 advanced: AdvancedStructuralProfile,
267 audit_score: AuditScoreCard,
268 manifest: ManifestMetadata,
269 artifacts_inspected: usize,
270}
271
272#[derive(Debug, Clone)]
273struct AuditScoreCard {
274 overall_percent: f64,
275 earned_weighted_points: f64,
276 possible_weighted_points: f64,
277 band: &'static str,
278 sections: Vec<AuditScoreSection>,
279}
280
281#[derive(Debug, Clone)]
282struct AuditScoreSection {
283 id: &'static str,
284 title: &'static str,
285 weight_percent: f64,
286 checkpoint_count: usize,
287 earned_checkpoints: f64,
288 section_percent: f64,
289 weighted_points: f64,
290}
291
292#[derive(Debug, Clone)]
293struct PowerOfTenProfile {
294 rules: Vec<PowerOfTenRuleAudit>,
295}
296
297#[derive(Debug, Clone)]
298struct AdvancedStructuralProfile {
299 checks: Vec<AdvancedStructuralCheck>,
300 hotspots: Vec<CriticalityHotspot>,
301}
302
303#[derive(Debug, Clone)]
304struct PowerOfTenRuleAudit {
305 number: u8,
306 title: &'static str,
307 status: PowerOfTenStatus,
308 detail: String,
309 evidence: Vec<ScanEvidence>,
310}
311
312#[derive(Debug, Clone, Copy, PartialEq, Eq)]
313enum PowerOfTenStatus {
314 Applied,
315 NotApplied,
316 Indeterminate,
317}
318
319#[derive(Debug, Clone)]
320struct AdvancedStructuralCheck {
321 id: &'static str,
322 title: &'static str,
323 status: StructuralCheckStatus,
324 detail: String,
325 evidence: Vec<ScanEvidence>,
326}
327
328#[derive(Debug, Clone, Copy, PartialEq, Eq)]
329enum StructuralCheckStatus {
330 Elevated,
331 Clear,
332 Indeterminate,
333}
334
335#[derive(Debug, Clone)]
336struct CanonicalFinding {
337 id: String,
338 title: String,
339 category: &'static str,
340 status_label: &'static str,
341 severity_rank: usize,
342 classification: &'static str,
343 confidence: &'static str,
344 impact_kind: &'static str,
345 rust_why: &'static str,
346 readiness_why: &'static str,
347 detail: String,
348 remediation: &'static str,
349 verification: &'static str,
350 evidence: Vec<ScanEvidence>,
351}
352
353#[derive(Debug, Clone)]
354struct AdvisorySubscore {
355 id: &'static str,
356 title: &'static str,
357 percent: f64,
358 basis: &'static str,
359}
360
361#[derive(Debug, Clone)]
362struct CriticalityHotspot {
363 path: PathBuf,
364 function_name: String,
365 start_line: usize,
366 estimated_complexity: usize,
367 risk_score: usize,
368 signals: Vec<&'static str>,
369}
370
371#[derive(Debug, Clone)]
372struct RuntimeProfile {
373 no_std_declared: bool,
374 no_std_evidence: Vec<ScanEvidence>,
375 alloc_crate_hits: usize,
376 alloc_evidence: Vec<ScanEvidence>,
377 heap_allocation_hits: usize,
378 heap_allocation_evidence: Vec<ScanEvidence>,
379 runtime_core_alloc_hits: usize,
380 runtime_core_heap_allocation_hits: usize,
381}
382
383#[derive(Debug, Clone, Copy, PartialEq, Eq)]
384enum UnsafeCodePolicy {
385 Forbid,
386 Deny,
387 NotDeclared,
388}
389
390#[derive(Debug, Clone)]
391struct SafetyProfile {
392 unsafe_policy: UnsafeCodePolicy,
393 unsafe_policy_evidence: Vec<ScanEvidence>,
394 unsafe_sites: usize,
395 unsafe_evidence: Vec<ScanEvidence>,
396 panic_sites: usize,
397 panic_evidence: Vec<ScanEvidence>,
398 unwrap_sites: usize,
399 unwrap_evidence: Vec<ScanEvidence>,
400 ffi_sites: usize,
401 ffi_evidence: Vec<ScanEvidence>,
402 safety_comment_sites: usize,
403 safety_comment_evidence: Vec<ScanEvidence>,
404}
405
406#[derive(Debug, Clone)]
407struct VerificationProfile {
408 tests_dir_present: bool,
409 test_marker_hits: usize,
410 test_marker_evidence: Vec<ScanEvidence>,
411 property_testing_hits: usize,
412 property_testing_evidence: Vec<ScanEvidence>,
413 concurrency_exploration_hits: usize,
414 concurrency_exploration_evidence: Vec<ScanEvidence>,
415 fuzzing_hits: usize,
416 fuzzing_evidence: Vec<ScanEvidence>,
417 formal_methods_hits: usize,
418 formal_methods_evidence: Vec<ScanEvidence>,
419}
420
421#[derive(Debug, Clone)]
422struct BuildProfile {
423 direct_dependencies: usize,
424 build_dependencies: usize,
425 dev_dependencies: usize,
426 has_build_script: bool,
427 proc_macro_crate: bool,
428 codegen_hits: usize,
429 codegen_evidence: Vec<ScanEvidence>,
430}
431
432#[derive(Debug, Clone)]
433struct LifecycleProfile {
434 readme_present: bool,
435 changelog_present: bool,
436 security_md_present: bool,
437 safety_md_present: bool,
438 architecture_doc_present: bool,
439 docs_dir_present: bool,
440 license_files: Vec<PathBuf>,
441}
442
443#[derive(Debug, Clone, Default)]
444struct ManifestMetadata {
445 crate_name: Option<String>,
446 crate_version: Option<String>,
447 edition: Option<String>,
448 license: Option<String>,
449 rust_version: Option<String>,
450 repository: Option<String>,
451 homepage: Option<String>,
452 documentation: Option<String>,
453 readme: Option<String>,
454 build_script: Option<String>,
455 proc_macro: bool,
456 direct_dependencies: usize,
457 build_dependencies: usize,
458 dev_dependencies: usize,
459}
460
461#[derive(Debug, Clone)]
462struct SourceDocument {
463 relative_path: PathBuf,
464 contents: String,
465 analysis_contents: String,
466 risk_contents: String,
467}
468
469#[derive(Debug, Clone)]
470struct FunctionSummary {
471 path: PathBuf,
472 name: String,
473 lowered_name: String,
474 lowered_signature: String,
475 lowered_attributes: String,
476 start_line: usize,
477 line_count: usize,
478 body: String,
479 lowered_body: String,
480 assertion_count: usize,
481 estimated_complexity: usize,
482}
483
484#[derive(Debug, Clone)]
485struct PatternScan {
486 total_hits: usize,
487 matched_patterns: Vec<&'static str>,
488 evidence: Vec<ScanEvidence>,
489}
490
491struct PatternSpec {
492 heuristic_id: &'static str,
493 patterns: &'static [&'static str],
494}
495
496#[derive(Debug, Clone, Copy, PartialEq, Eq)]
497enum ManifestSection {
498 None,
499 Package,
500 Lib,
501 Dependencies,
502 BuildDependencies,
503 DevDependencies,
504}
505
506const PATTERN_SPECS: &[PatternSpec] = &[
507 PatternSpec {
508 heuristic_id: "H-ALLOC-01",
509 patterns: &[
510 "vec::with_capacity",
511 ".reserve(",
512 " reserve(",
513 "reserve_exact(",
514 ],
515 },
516 PatternSpec {
517 heuristic_id: "H-LOCK-01",
518 patterns: &[
519 "rwlock",
520 "tokio::sync::rwlock",
521 "std::sync::rwlock",
522 "parking_lot::rwlock",
523 ],
524 },
525 PatternSpec {
526 heuristic_id: "H-RAFT-01",
527 patterns: &["openraft", "election_timeout", "heartbeat", "leader lease"],
528 },
529 PatternSpec {
530 heuristic_id: "H-ASYNC-01",
531 patterns: &[
532 "spawn_blocking",
533 "block_in_place",
534 "thread::sleep(",
535 "std::thread::sleep",
536 ],
537 },
538 PatternSpec {
539 heuristic_id: "H-TCP-01",
540 patterns: &["tcpstream", "tcplistener", "socket", "connect(", "accept("],
541 },
542 PatternSpec {
543 heuristic_id: "H-CHAN-01",
544 patterns: &[
545 "sync::mpsc",
546 "mpsc::channel(",
547 "mpsc::unbounded_channel",
548 "bounded channel",
549 ],
550 },
551 PatternSpec {
552 heuristic_id: "H-CLOCK-01",
553 patterns: &[
554 "instant::now()",
555 "systemtime::now()",
556 "monotonic",
557 "timestamp",
558 ],
559 },
560 PatternSpec {
561 heuristic_id: "H-THRU-01",
562 patterns: &["throughput", "ops/sec", "bytes/sec", "qps"],
563 },
564 PatternSpec {
565 heuristic_id: "H-SERDE-01",
566 patterns: &[
567 "serde",
568 "serialize",
569 "deserialize",
570 "serde_json",
571 "bincode",
572 "prost",
573 ],
574 },
575 PatternSpec {
576 heuristic_id: "H-GRPC-01",
577 patterns: &[
578 "tonic::",
579 "\"tonic\"",
580 "flow control",
581 "window_size",
582 "http/2",
583 "http2",
584 ],
585 },
586 PatternSpec {
587 heuristic_id: "H-DNS-01",
588 patterns: &["dns", "resolver", "trust-dns", "hickory"],
589 },
590 PatternSpec {
591 heuristic_id: "H-ERR-01",
592 patterns: &[
593 "timeout",
594 "pool exhaustion",
595 "retry",
596 "backoff",
597 "trysenderror",
598 ],
599 },
600];
601
602const NO_STD_PATTERNS: &[&str] = &[
603 "#![no_std]",
604 "cfg_attr(not(feature = \"std\"), no_std)",
605 "cfg_attr(not(any(feature = \"std\")), no_std)",
606 " no_std)]",
607];
608
609const ALLOC_PATTERNS: &[&str] = &[
610 "extern crate alloc",
611 "use alloc::",
612 "alloc::vec::vec",
613 "alloc::string::string",
614 "alloc::boxed::box",
615];
616
617const HEAP_PATTERNS: &[&str] = &[
618 "vec::new(",
619 "vec::with_capacity(",
620 "string::new(",
621 "string::with_capacity(",
622 "box::new(",
623 "arc::new(",
624 "rc::new(",
625 "hashmap<",
626 "hashset<",
627 "btreemap<",
628 "btreeset<",
629 "vecdeque<",
630 "binaryheap<",
631 "format!(",
632 ".to_string()",
633 ".to_owned()",
634];
635
636const FORBID_UNSAFE_PATTERNS: &[&str] = &[
637 "#![forbid(unsafe_code)]",
638 "#![cfg_attr(not(test), forbid(unsafe_code))]",
639];
640const DENY_UNSAFE_PATTERNS: &[&str] = &[
641 "#![deny(unsafe_code)]",
642 "#![cfg_attr(not(test), deny(unsafe_code))]",
643];
644const UNSAFE_PATTERNS: &[&str] = &[
645 "unsafe {",
646 "unsafe{",
647 "unsafe fn",
648 "unsafe impl",
649 "unsafe trait",
650 "unsafe extern",
651];
652const PANIC_PATTERNS: &[&str] = &[
653 "panic!(",
654 "todo!(",
655 "unimplemented!(",
656 "unreachable!(",
657 "panic_any(",
658];
659const UNWRAP_PATTERNS: &[&str] = &[".unwrap(", ".expect(", ".unwrap_err(", ".expect_err("];
660const FFI_PATTERNS: &[&str] = &[
661 "extern \"c\"",
662 "#[repr(c)]",
663 "#[no_mangle]",
664 "cxx::bridge",
665 "bindgen::",
666 "[dependencies.bindgen]",
667 "[build-dependencies.bindgen]",
668 ".dependencies.bindgen]",
669 ".build-dependencies.bindgen]",
670 "::ffi",
671];
672const SAFETY_COMMENT_PATTERNS: &[&str] = &["safety:"];
673
674const TEST_PATTERNS: &[&str] = &["#[test]", "#[tokio::test]", "#[cfg(test)]", "mod tests"];
675const PROPERTY_TEST_PATTERNS: &[&str] = &["proptest!", "quickcheck", "bolero", "arbtest"];
676const CONCURRENCY_EXPLORATION_PATTERNS: &[&str] = &["loom::", "shuttle::"];
677const FUZZING_PATTERNS: &[&str] = &[
678 "libfuzzer_sys",
679 "cargo fuzz",
680 "honggfuzz",
681 "afl::",
682 "arbitrary::",
683];
684const FORMAL_METHOD_PATTERNS: &[&str] = &["kani", "creusot", "prusti", "flux::"];
685
686const CODEGEN_PATTERNS: &[&str] = &[
687 "bindgen::",
688 "[dependencies.bindgen]",
689 "[build-dependencies.bindgen]",
690 ".dependencies.bindgen]",
691 ".build-dependencies.bindgen]",
692 "cc::build",
693 "cmake::config",
694 "cxx_build",
695 "prost_build",
696 "tonic_build",
697 "lalrpop",
698 "autocfg",
699 "vergen::",
700 "include!(concat!(env!(\"out_dir\"",
701];
702
703const INTERIOR_MUTABILITY_PATTERNS: &[&str] = &[
704 "cell<",
705 "cell::",
706 "refcell<",
707 "refcell::",
708 "unsafecell<",
709 "unsafecell::",
710 "atomicbool",
711 "atomicu",
712 "atomici",
713 "atomicptr",
714];
715
716const ASYNC_LOCK_PATTERNS: &[&str] = &[
717 ".lock().await",
718 ".read().await",
719 ".write().await",
720 "tokio::sync::mutex",
721 "tokio::sync::rwlock",
722 "mutexguard",
723 "rwlockreadguard",
724 "rwlockwriteguard",
725];
726
727const CATCH_ALL_MATCH_PATTERNS: &[&str] = &["_ =>"];
728
729const HARD_CODED_WAIT_PATTERNS: &[&str] = &[
730 "duration::from_millis(",
731 "duration::from_secs(",
732 "tokio::time::sleep(",
733 "std::thread::sleep(",
734 "thread::sleep(",
735 "sleep_until(",
736];
737
738const DYNAMIC_LOADING_PATTERNS: &[&str] =
739 &["libloading", "dlopen", "loadlibrary", "getprocaddress"];
740
741const RESOURCE_LIFECYCLE_PATTERNS: &[&str] = &[
742 "mem::forget(",
743 "manuallydrop<",
744 "into_raw_fd(",
745 "from_raw_fd(",
746 "into_raw_handle(",
747 "from_raw_handle(",
748 "memmap",
749 "mmap",
750];
751
752const COMMAND_BUFFER_PATTERNS: &[&str] = &[
753 "mpsc::channel",
754 "mpsc::unbounded_channel",
755 "tokio::sync::mpsc",
756 "crossbeam_channel",
757 "crossbeam::channel",
758];
759
760const TTL_GUARD_PATTERNS: &[&str] = &[
761 "ttl",
762 "deadline",
763 "expires",
764 "stale",
765 "sequence",
766 "nonce",
767 "generation",
768];
769
770const INTERRUPT_ATTRIBUTE_PATTERNS: &[&str] =
771 &["#[interrupt]", "#[interrupt(", "#[cortex_m_rt::interrupt]"];
772
773const ISR_FORBIDDEN_PATTERNS: &[&str] = &[
774 "vec::new(",
775 "vec::with_capacity(",
776 "string::new(",
777 "string::with_capacity(",
778 "box::new(",
779 ".lock(",
780 ".lock().await",
781 ".read().await",
782 ".write().await",
783 "std::sync::mutex",
784 "tokio::sync::mutex",
785 "parking_lot::mutex",
786];
787
788const ITERATOR_TERMINAL_PATTERNS: &[&str] = &[
789 ".collect(",
790 ".collect::<",
791 ".fold(",
792 ".count(",
793 ".last(",
794 ".sum(",
795];
796const ITERATOR_BOUND_PATTERNS: &[&str] = &[".take(", ".nth(", ".next()"];
797const OPEN_ENDED_ITERATOR_PATTERNS: &[&str] = &[
798 "impl iterator",
799 "iterator<",
800 "read_dir(",
801 "args_os(",
802 "args(",
803 "receiver",
804 "stream",
805];
806
807const MANUAL_POLL_PENDING_PATTERNS: &[&str] = &["poll::pending", "return poll::pending"];
808const WAKE_PATTERNS: &[&str] = &["wake_by_ref(", ".wake()", "cx.waker()", "context.waker()"];
809
810const JOIN_HANDLE_DISCARD_SPAWN_PATTERNS: &[&str] = &[
811 "tokio::spawn(",
812 "tokio::task::spawn(",
813 "tokio::spawn_blocking(",
814 "tokio::task::spawn_blocking(",
815];
816const JOIN_HANDLE_DISCARD_CONTEXT_PATTERNS: &[&str] =
817 &["let _ =", "_ =", "drop(", "std::mem::drop("];
818
819const RELAXED_ORDERING_PATTERNS: &[&str] = &["ordering::relaxed"];
820const CRITICAL_STATE_PATTERNS: &[&str] = &[
821 "quorum",
822 "leader",
823 "election",
824 "lease",
825 "term",
826 "epoch",
827 "heartbeat",
828 "commit",
829 "state",
830];
831
832const WRITE_CALL_PATTERNS: &[&str] = &[".write("];
833const WRITE_HANDLING_PATTERNS: &[&str] = &[
834 "write_all(",
835 "errorkind::interrupted",
836 "wouldblock",
837 "shortwrite",
838];
839
840const ASYNC_RECURSION_PATTERNS: &[&str] = &["#[async_recursion", "async_recursion]"];
841const DEPTH_BOUND_PATTERNS: &[&str] = &["depth", "limit", "max_depth", "remaining"];
842
843const UNBOUNDED_CHANNEL_PATTERNS: &[&str] = &["mpsc::unbounded_channel"];
844
845const READ_BUFFER_SIGNATURE_PATTERNS: &[&str] = &[
846 "&[u8]",
847 "&mut [u8]",
848 "bytes",
849 "bytesmut",
850 "packet",
851 "frame",
852 "buffer",
853];
854const COPY_ON_READ_PATTERNS: &[&str] = &[".to_vec()", ".clone()"];
855
856const ASSERT_PATTERNS: &[&str] = &[
857 "assert!(",
858 "assert_eq!(",
859 "assert_ne!(",
860 "debug_assert!(",
861 "debug_assert_eq!(",
862 "debug_assert_ne!(",
863];
864
865const P10_RULE1_PATTERNS: &[&str] = &["goto ", "setjmp", "longjmp", "#[async_recursion"];
866const P10_RULE7_EXPLICIT_IGNORE_PATTERNS: &[&str] = &["let _ =", ".ok();", ".err();"];
867const P10_RULE8_MACRO_PATTERNS: &[&str] = &[
868 "macro_rules!",
869 "#[proc_macro]",
870 "#[proc_macro_derive]",
871 "#[proc_macro_attribute]",
872];
873const P10_RULE10_WARNING_PATTERNS: &[&str] = &[
874 "-d warnings",
875 "#![deny(warnings)]",
876 "deny(warnings)",
877 "warnings = \"deny\"",
878];
879const P10_RULE10_ANALYZER_PATTERNS: &[&str] = &[
880 "cargo clippy",
881 "clippy::",
882 "cargo audit",
883 "cargo deny",
884 "miri",
885 "kani",
886 "prusti",
887 "creusot",
888];
889
890pub fn scan_crate_source(root: &Path) -> io::Result<CrateSourceScanReport> {
892 scan_crate_source_with_profile(root, ScanProfile::General)
893}
894
895pub fn scan_crate_source_with_profile(
901 root: &Path,
902 profile: ScanProfile,
903) -> io::Result<CrateSourceScanReport> {
904 let root = root.canonicalize()?;
905 let all_files = collect_files(&root)?;
906 let generated_at_utc = generated_scan_timestamp();
907 let source_sha256 = compute_tree_sha256(&root, &all_files)?;
908 let vcs_info = scan_vcs_info(&root);
909 let artifact_documents = load_documents(&root, &all_files);
910 let source_files = collect_source_scan_files(&all_files);
911 let documents = load_documents(&root, &source_files);
912 let manifest = scan_manifest(&root.join("Cargo.toml"));
913 let (crate_name, crate_version) = crate_identity_from_manifest(&root, &manifest);
914 let matched_heuristics = scan_matched_heuristics(&documents);
915 let certification = build_certification_profile(
916 &root,
917 &all_files,
918 &documents,
919 &artifact_documents,
920 &manifest,
921 );
922
923 Ok(build_crate_scan_report(ScanReportInputs {
924 profile,
925 generated_at_utc,
926 root,
927 source_sha256,
928 vcs_info,
929 files_scanned: source_files.len(),
930 crate_name,
931 crate_version,
932 matched_heuristics,
933 certification,
934 }))
935}
936
937fn generated_scan_timestamp() -> String {
938 OffsetDateTime::now_utc()
939 .format(&Rfc3339)
940 .unwrap_or_else(|_| "1970-01-01T00:00:00Z".to_string())
941}
942
943fn collect_source_scan_files(all_files: &[PathBuf]) -> Vec<PathBuf> {
944 all_files
945 .iter()
946 .filter(|path| is_source_scan_file(path))
947 .cloned()
948 .collect()
949}
950
951fn crate_identity_from_manifest(
952 root: &Path,
953 manifest: &ManifestMetadata,
954) -> (String, Option<String>) {
955 let crate_name = manifest.crate_name.clone().unwrap_or_else(|| {
956 root.file_name()
957 .and_then(|name| name.to_str())
958 .unwrap_or("unknown-crate")
959 .to_string()
960 });
961 (crate_name, manifest.crate_version.clone())
962}
963
964fn scan_matched_heuristics(documents: &[SourceDocument]) -> Vec<HeuristicSourceMatch> {
965 let mut matched_heuristics = Vec::new();
966
967 for entry in DEFAULT_ENTRIES {
968 let Some(spec) = PATTERN_SPECS
969 .iter()
970 .find(|spec| spec.heuristic_id == entry.id.0)
971 else {
972 continue;
973 };
974
975 let scan = scan_patterns(documents, spec.patterns, MAX_EVIDENCE_PER_HEURISTIC);
976 if scan.total_hits > 0 {
977 matched_heuristics.push(HeuristicSourceMatch {
978 heuristic: *entry,
979 matched_patterns: scan.matched_patterns,
980 evidence: scan.evidence,
981 total_hits: scan.total_hits,
982 });
983 }
984 }
985
986 matched_heuristics.sort_by(|a, b| {
987 b.total_hits
988 .cmp(&a.total_hits)
989 .then_with(|| a.heuristic.id.0.cmp(b.heuristic.id.0))
990 });
991 matched_heuristics
992}
993
994fn build_certification_profile(
995 root: &Path,
996 all_files: &[PathBuf],
997 documents: &[SourceDocument],
998 artifact_documents: &[SourceDocument],
999 manifest: &ManifestMetadata,
1000) -> CertificationProfile {
1001 let runtime = scan_runtime_profile(documents);
1002 let safety = scan_safety_profile(documents);
1003 let verification = scan_verification_profile(all_files, documents);
1004 let build = scan_build_profile(root, documents, manifest);
1005 let lifecycle = scan_lifecycle_profile(all_files);
1006 let functions = extract_function_summaries(documents);
1007 let power_of_ten = scan_power_of_ten_profile(
1008 documents,
1009 artifact_documents,
1010 &functions,
1011 &runtime,
1012 &safety,
1013 &build,
1014 );
1015 let advanced =
1016 scan_advanced_structural_profile(documents, artifact_documents, &functions, &safety);
1017 let audit_score = build_audit_scorecard(
1018 &safety,
1019 &verification,
1020 &build,
1021 &lifecycle,
1022 manifest,
1023 &power_of_ten,
1024 &advanced,
1025 );
1026
1027 CertificationProfile {
1028 runtime,
1029 safety,
1030 verification,
1031 build,
1032 lifecycle,
1033 power_of_ten,
1034 advanced,
1035 audit_score,
1036 manifest: manifest.clone(),
1037 artifacts_inspected: all_files.len(),
1038 }
1039}
1040
1041struct ScanReportInputs {
1042 profile: ScanProfile,
1043 generated_at_utc: String,
1044 root: PathBuf,
1045 source_sha256: String,
1046 vcs_info: VcsInfo,
1047 files_scanned: usize,
1048 crate_name: String,
1049 crate_version: Option<String>,
1050 matched_heuristics: Vec<HeuristicSourceMatch>,
1051 certification: CertificationProfile,
1052}
1053
1054fn build_crate_scan_report(inputs: ScanReportInputs) -> CrateSourceScanReport {
1055 CrateSourceScanReport {
1056 profile: inputs.profile,
1057 crate_name: inputs.crate_name,
1058 crate_version: inputs.crate_version,
1059 generated_at_utc: inputs.generated_at_utc,
1060 root: inputs.root,
1061 source_sha256: inputs.source_sha256,
1062 vcs_commit: inputs.vcs_info.git_commit,
1063 path_in_vcs: inputs.vcs_info.path_in_vcs,
1064 files_scanned: inputs.files_scanned,
1065 matched_heuristics: inputs.matched_heuristics,
1066 caveat: "Static source-visible proxy only: this report highlights structural motifs, constrained-runtime signals, verification evidence, and lifecycle artifacts. It does not certify the crate or infer live gray failures without runtime telemetry.",
1067 certification: inputs.certification,
1068 }
1069}
1070
1071pub fn derive_static_priors_from_scan(report: &CrateSourceScanReport) -> StaticPriorSet {
1077 report
1078 .matched_heuristics
1079 .iter()
1080 .fold(StaticPriorSet::new(), |priors, matched| {
1081 let confidence = ((matched.total_hits as f64).ln_1p() / 4.0).clamp(0.15, 0.95);
1082 let drift_scale = (1.0 - 0.25 * confidence).clamp(0.75, 1.0);
1083 let slew_scale = if matched.heuristic.slew_threshold > 0.0 {
1084 (1.0 - 0.30 * confidence).clamp(0.70, 1.0)
1085 } else {
1086 1.0
1087 };
1088 priors.with_prior(StaticPrior::new(
1089 matched.heuristic.id,
1090 confidence,
1091 drift_scale,
1092 slew_scale,
1093 ))
1094 })
1095}
1096
1097pub fn render_scan_report(report: &CrateSourceScanReport) -> String {
1099 let findings = collect_canonical_findings(report);
1100 let advisory_subscores = advisory_subscores(report);
1101 let derived_priors = derive_static_priors_from_scan(report);
1102 let mut out = String::with_capacity(8192);
1103 render_scan_report_header(&mut out, report);
1104
1105 render_audit_summary(&mut out, report, &findings);
1106 render_report_badge_section(&mut out, report);
1107 render_audit_score_section(&mut out, report, &advisory_subscores);
1108 render_top_findings(&mut out, &findings);
1109 render_hotspots_section(&mut out, &report.certification.advanced.hotspots);
1110 render_code_quality_themes(&mut out, &findings);
1111 render_remediation_guide(&mut out, &findings);
1112 render_verification_suggestions(&mut out, &findings);
1113 render_evidence_ledger(&mut out, &findings);
1114 render_detailed_audit_surface(&mut out, report);
1115 render_derived_priors_section(&mut out, report, &derived_priors);
1116 render_heuristic_motif_section(&mut out, report);
1117 render_conclusion_lenses(&mut out, report, &findings);
1118 out
1119}
1120
1121fn render_scan_report_header(out: &mut String, report: &CrateSourceScanReport) {
1122 out.push_str("╔══════════════════════════════════════════════════════════════╗\n");
1123 out.push_str("║ DSFB Gray Static Crate Scan Report ║\n");
1124 out.push_str("║ Canonical Broad Audit for Code Quality + Review Readiness║\n");
1125 out.push_str("╚══════════════════════════════════════════════════════════════╝\n\n");
1126
1127 out.push_str(&format!("Crate: {}\n", report.crate_name));
1128 if let Some(version) = &report.crate_version {
1129 out.push_str(&format!("Version: {}\n", version));
1130 }
1131 out.push_str(&format!(
1132 "Generated At (UTC): {}\n",
1133 report.generated_at_utc
1134 ));
1135 out.push_str(&format!("Root: {}\n", report.root.display()));
1136 out.push_str(&format!(
1137 "Scanned Crate: https://crates.io/crates/{}\n",
1138 report.crate_name
1139 ));
1140 out.push_str(&format!(
1141 "Scanned Crate Docs: https://docs.rs/{}\n",
1142 report.crate_name
1143 ));
1144 out.push_str("Scanner Crate: https://crates.io/crates/dsfb-gray\n");
1145 out.push_str("Scanner Docs: https://docs.rs/dsfb-gray\n");
1146 out.push_str(&format!("Source SHA-256: {}\n", report.source_sha256));
1147 out.push_str(&format!(
1148 "VCS Commit: {}\n",
1149 report.vcs_commit.as_deref().unwrap_or("not declared")
1150 ));
1151 out.push_str(&format!(
1152 "Path In VCS: {}\n",
1153 report.path_in_vcs.as_deref().unwrap_or("not declared")
1154 ));
1155 out.push_str(&format!("Source Files Scanned: {}\n", report.files_scanned));
1156 out.push_str(&format!(
1157 "Artifact Files Inspected: {}\n",
1158 report.certification.artifacts_inspected
1159 ));
1160 out.push_str(&format!(
1161 "Matched Heuristics: {}\n",
1162 report.matched_heuristics.len()
1163 ));
1164 out.push_str(&format!("Caveat: {}\n\n", report.caveat));
1165}
1166
1167fn render_detailed_audit_surface(out: &mut String, report: &CrateSourceScanReport) {
1168 out.push_str("Detailed Audit Surface\n");
1169 out.push_str("──────────────────────────────────────────────────────────────\n");
1170 out.push_str(
1171 "The sections below preserve the full DSFB audit breadth. They are detailed evidence views, not separate scan modes.\n\n",
1172 );
1173 render_runtime_section(out, &report.certification.runtime);
1174 render_safety_section(out, &report.certification.safety);
1175 render_verification_section(out, &report.certification.verification);
1176 render_build_section(out, &report.certification.build);
1177 render_lifecycle_section(
1178 out,
1179 &report.certification.lifecycle,
1180 &report.certification.manifest,
1181 );
1182 render_power_of_ten_section(out, &report.certification.power_of_ten);
1183 render_advanced_structural_section(out, &report.certification.advanced);
1184}
1185
1186fn render_derived_priors_section(
1187 out: &mut String,
1188 report: &CrateSourceScanReport,
1189 derived_priors: &StaticPriorSet,
1190) {
1191 out.push_str("Derived Runtime Structural Priors\n");
1192 out.push_str("──────────────────────────────────────────────────────────────\n");
1193 out.push_str(
1194 "These bounded priors are derived from static source motifs. They are meant to bias runtime review toward structurally plausible motifs, not to override runtime evidence.\n",
1195 );
1196 if report.matched_heuristics.is_empty() {
1197 out.push_str("No static priors derived because no DSFB source motifs matched.\n\n");
1198 return;
1199 }
1200
1201 for matched in &report.matched_heuristics {
1202 if let Some(prior) = derived_priors.get(matched.heuristic.id) {
1203 out.push_str(&format!(
1204 "{} confidence={:.2} drift_scale={:.2} slew_scale={:.2}\n",
1205 matched.heuristic.id.0, prior.confidence, prior.drift_scale, prior.slew_scale,
1206 ));
1207 }
1208 }
1209 out.push('\n');
1210}
1211
1212fn render_heuristic_motif_section(out: &mut String, report: &CrateSourceScanReport) {
1213 out.push_str("DSFB Heuristic Motifs\n");
1214 out.push_str("──────────────────────────────────────────────────────────────\n");
1215 if report.matched_heuristics.is_empty() {
1216 out.push_str("No DSFB source motifs matched.\n\n");
1217 return;
1218 }
1219
1220 for matched in &report.matched_heuristics {
1221 render_heuristic_motif(out, matched);
1222 }
1223}
1224
1225fn render_heuristic_motif(out: &mut String, matched: &HeuristicSourceMatch) {
1226 out.push_str(&format!(
1227 "{} → {:?}\n",
1228 matched.heuristic.id.0, matched.heuristic.reason_code
1229 ));
1230 out.push_str(&format!(
1231 " Description: {}\n",
1232 matched.heuristic.description
1233 ));
1234 out.push_str(&format!(
1235 " Provenance: {}\n",
1236 matched.heuristic.provenance
1237 ));
1238 out.push_str(&format!(" Total Hits: {}\n", matched.total_hits));
1239 out.push_str(&format!(
1240 " Patterns: {}\n",
1241 matched.matched_patterns.join(", ")
1242 ));
1243 out.push_str(&format!(
1244 " Remediation: {}\n",
1245 heuristic_remediation(matched.heuristic.id.0)
1246 ));
1247 render_named_evidence_block(out, " Evidence", matched.heuristic.id.0, &matched.evidence);
1248 out.push_str(&format!(
1249 " Classification: {}\n",
1250 heuristic_classification(matched.heuristic.id.0)
1251 ));
1252 out.push_str(&format!(
1253 " Confidence: {}\n",
1254 heuristic_confidence(matched.total_hits)
1255 ));
1256 out.push_str(&format!(
1257 " Impact Kind: {}\n",
1258 heuristic_impact_kind(matched.heuristic.id.0)
1259 ));
1260 out.push_str(&format!(
1261 " Why This Matters In Rust: {}\n",
1262 heuristic_rust_why(matched.heuristic.id.0)
1263 ));
1264 out.push_str(&format!(
1265 " Review / Readiness Note: {}\n",
1266 heuristic_readiness_why(matched.heuristic.id.0)
1267 ));
1268 out.push_str(&format!(
1269 " Verification Suggestion: {}\n",
1270 heuristic_verification_suggestion(matched.heuristic.id.0)
1271 ));
1272 out.push('\n');
1273}
1274
1275pub fn render_scan_sarif(report: &CrateSourceScanReport) -> String {
1277 serde_json::to_string_pretty(&build_sarif_value(report))
1278 .unwrap_or_else(|err| format!("{{\"error\":\"failed to render SARIF: {err}\"}}"))
1279}
1280
1281pub fn render_scan_attestation_statement(report: &CrateSourceScanReport) -> String {
1283 let sarif_json = render_scan_sarif(report);
1284 serde_json::to_string_pretty(&build_attestation_statement_value(report, &sarif_json))
1285 .unwrap_or_else(|err| {
1286 format!("{{\"error\":\"failed to render in-toto statement: {err}\"}}")
1287 })
1288}
1289
1290pub fn render_scan_dsse_envelope(
1294 report: &CrateSourceScanReport,
1295 signer: Option<&ScanSigningKey>,
1296) -> String {
1297 let statement_json = render_scan_attestation_statement(report);
1298 let payload = statement_json.as_bytes();
1299 let signatures = signer
1300 .map(|signer| {
1301 vec![json!({
1302 "keyid": signer.key_id(),
1303 "sig": signer.sign(DSSE_PAYLOAD_TYPE, payload),
1304 })]
1305 })
1306 .unwrap_or_default();
1307
1308 serde_json::to_string_pretty(&json!({
1309 "payloadType": DSSE_PAYLOAD_TYPE,
1310 "payload": BASE64_STANDARD.encode(payload),
1311 "signatures": signatures,
1312 }))
1313 .unwrap_or_else(|err| format!("{{\"error\":\"failed to render DSSE envelope: {err}\"}}"))
1314}
1315
1316pub fn prepare_scan_output_run(base_output_root: &Path) -> io::Result<ScanRunPaths> {
1318 let timestamp_utc = scan_run_timestamp(OffsetDateTime::now_utc());
1319 prepare_scan_output_run_at(base_output_root, ×tamp_utc)
1320}
1321
1322pub fn migrate_legacy_scan_artifacts(
1324 legacy_root: &Path,
1325 base_output_root: &Path,
1326) -> io::Result<Option<PathBuf>> {
1327 let legacy_files = collect_legacy_scan_artifacts(legacy_root)?;
1328 if legacy_files.is_empty() {
1329 return Ok(None);
1330 }
1331
1332 fs::create_dir_all(base_output_root)?;
1333 let migration_dir = create_unique_run_dir(
1334 base_output_root,
1335 &format!(
1336 "dsfb-gray-{}-migration",
1337 scan_run_timestamp(OffsetDateTime::now_utc())
1338 ),
1339 )?;
1340
1341 for legacy_path in legacy_files {
1342 let Some(file_name) = legacy_path.file_name() else {
1343 continue;
1344 };
1345 fs::rename(&legacy_path, migration_dir.join(file_name))?;
1346 }
1347
1348 Ok(Some(migration_dir))
1349}
1350
1351pub fn export_scan_artifacts(
1353 report: &CrateSourceScanReport,
1354 out_dir: &Path,
1355 signer: Option<&ScanSigningKey>,
1356) -> io::Result<ScanArtifactPaths> {
1357 fs::create_dir_all(out_dir)?;
1358 let stem = scan_artifact_stem(report);
1359 let report_path = out_dir.join(format!("{stem}.txt"));
1360 let sarif_path = out_dir.join(format!("{stem}.sarif.json"));
1361 let statement_path = out_dir.join(format!("{stem}.intoto.json"));
1362 let dsse_path = out_dir.join(format!("{stem}.dsse.json"));
1363
1364 fs::write(&report_path, render_scan_report(report))?;
1365 fs::write(&sarif_path, render_scan_sarif(report))?;
1366 fs::write(&statement_path, render_scan_attestation_statement(report))?;
1367 fs::write(&dsse_path, render_scan_dsse_envelope(report, signer))?;
1368
1369 Ok(ScanArtifactPaths {
1370 output_dir: out_dir.to_path_buf(),
1371 report_path,
1372 sarif_path,
1373 statement_path,
1374 dsse_path,
1375 signed: signer.is_some(),
1376 })
1377}
1378
1379fn build_sarif_value(report: &CrateSourceScanReport) -> Value {
1380 let findings = collect_canonical_findings(report);
1381 let advisory_subscores = advisory_subscores(report);
1382 let (rules, results) = build_sarif_rules_and_results(report);
1383
1384 json!({
1385 "$schema": "https://json.schemastore.org/sarif-2.1.0.json",
1386 "version": "2.1.0",
1387 "runs": [{
1388 "tool": {
1389 "driver": {
1390 "name": "DSFB Gray Scanner",
1391 "version": CRATE_VERSION,
1392 "informationUri": "https://github.com/infinityabundance/dsfb-gray",
1393 "rules": rules,
1394 }
1395 },
1396 "automationDetails": {
1397 "id": "dsfb-gray/crate-scan",
1398 },
1399 "invocations": [{
1400 "executionSuccessful": true,
1401 "endTimeUtc": report.generated_at_utc,
1402 }],
1403 "results": results,
1404 "properties": build_sarif_properties(report, &advisory_subscores, &findings),
1405 }]
1406 })
1407}
1408
1409fn build_attestation_statement_value(report: &CrateSourceScanReport, sarif_json: &str) -> Value {
1410 let sarif_sha256 = sha256_hex(sarif_json.as_bytes());
1411 let structural_priors = derive_static_priors_from_scan(report);
1412 let findings = collect_canonical_findings(report);
1413 let advisory_subscores = advisory_subscores(report);
1414
1415 json!({
1416 "_type": "https://in-toto.io/Statement/v1",
1417 "subject": [build_attestation_subject(report)],
1418 "predicateType": DSFB_PREDICATE_TYPE,
1419 "predicate": build_attestation_predicate(
1420 report,
1421 sarif_sha256,
1422 &structural_priors,
1423 &findings,
1424 &advisory_subscores,
1425 ),
1426 })
1427}
1428
1429fn build_sarif_rules_and_results(report: &CrateSourceScanReport) -> (Vec<Value>, Vec<Value>) {
1430 let mut rules = Vec::new();
1431 let mut results = Vec::new();
1432 append_heuristic_sarif_entries(&mut rules, &mut results, report);
1433 append_power_of_ten_sarif_entries(&mut rules, &mut results, report);
1434 append_advanced_sarif_entries(&mut rules, &mut results, report);
1435 (rules, results)
1436}
1437
1438fn append_heuristic_sarif_entries(
1439 rules: &mut Vec<Value>,
1440 results: &mut Vec<Value>,
1441 report: &CrateSourceScanReport,
1442) {
1443 for matched in &report.matched_heuristics {
1444 rules.push(sarif_rule_for_heuristic(matched));
1445 results.push(sarif_result_for_heuristic(matched));
1446 }
1447}
1448
1449fn sarif_rule_for_heuristic(matched: &HeuristicSourceMatch) -> Value {
1450 json!({
1451 "id": matched.heuristic.id.0,
1452 "name": matched.heuristic.id.0,
1453 "shortDescription": { "text": matched.heuristic.description },
1454 "fullDescription": { "text": matched.heuristic.provenance },
1455 "help": { "text": heuristic_remediation(matched.heuristic.id.0) },
1456 "properties": {
1457 "dsfbCategory": "heuristic",
1458 "reasonCode": format!("{:?}", matched.heuristic.reason_code),
1459 "classification": heuristic_classification(matched.heuristic.id.0),
1460 "confidence": heuristic_confidence(matched.total_hits),
1461 "impactKind": heuristic_impact_kind(matched.heuristic.id.0),
1462 "guidanceOnly": true,
1463 }
1464 })
1465}
1466
1467fn sarif_result_for_heuristic(matched: &HeuristicSourceMatch) -> Value {
1468 json!({
1469 "ruleId": matched.heuristic.id.0,
1470 "level": "warning",
1471 "kind": "review",
1472 "message": {
1473 "text": format!(
1474 "{} matched {} source motif hit(s) with reason code {:?}.",
1475 matched.heuristic.id.0,
1476 matched.total_hits,
1477 matched.heuristic.reason_code
1478 )
1479 },
1480 "locations": sarif_locations(&matched.evidence),
1481 "properties": {
1482 "dsfbCategory": "heuristic",
1483 "totalHits": matched.total_hits,
1484 "matchedPatterns": matched.matched_patterns,
1485 "provenance": matched.heuristic.provenance,
1486 "classification": heuristic_classification(matched.heuristic.id.0),
1487 "confidence": heuristic_confidence(matched.total_hits),
1488 "impactKind": heuristic_impact_kind(matched.heuristic.id.0),
1489 "verificationSuggestion": heuristic_verification_suggestion(matched.heuristic.id.0),
1490 "remediation": heuristic_remediation(matched.heuristic.id.0),
1491 "evidenceIds": evidence_ids(matched.heuristic.id.0, &matched.evidence),
1492 }
1493 })
1494}
1495
1496fn append_power_of_ten_sarif_entries(
1497 rules: &mut Vec<Value>,
1498 results: &mut Vec<Value>,
1499 report: &CrateSourceScanReport,
1500) {
1501 for rule in &report.certification.power_of_ten.rules {
1502 if rule.status == PowerOfTenStatus::Applied {
1503 continue;
1504 }
1505 rules.push(sarif_rule_for_power_of_ten(rule));
1506 results.push(sarif_result_for_power_of_ten(rule));
1507 }
1508}
1509
1510fn sarif_rule_for_power_of_ten(rule: &PowerOfTenRuleAudit) -> Value {
1511 json!({
1512 "id": format!("P10-{}", rule.number),
1513 "name": format!("P10-{}", rule.number),
1514 "shortDescription": { "text": rule.title },
1515 "fullDescription": { "text": rule.detail },
1516 "help": { "text": power_of_ten_remediation(rule.number) },
1517 "properties": {
1518 "dsfbCategory": "nasa-power-of-ten",
1519 "status": power_of_ten_status_label(rule.status),
1520 "classification": power_of_ten_classification(rule.number),
1521 "confidence": power_of_ten_confidence(rule.status, rule.evidence.len()),
1522 "impactKind": power_of_ten_impact_kind(rule.number),
1523 "guidanceOnly": true,
1524 }
1525 })
1526}
1527
1528fn sarif_result_for_power_of_ten(rule: &PowerOfTenRuleAudit) -> Value {
1529 let rule_id = format!("P10-{}", rule.number);
1530 json!({
1531 "ruleId": rule_id,
1532 "level": if rule.status == PowerOfTenStatus::NotApplied { "warning" } else { "note" },
1533 "kind": "review",
1534 "message": { "text": format!("{}: {}", rule.title, rule.detail) },
1535 "locations": sarif_locations(&rule.evidence),
1536 "properties": {
1537 "dsfbCategory": "nasa-power-of-ten",
1538 "status": power_of_ten_status_label(rule.status),
1539 "classification": power_of_ten_classification(rule.number),
1540 "confidence": power_of_ten_confidence(rule.status, rule.evidence.len()),
1541 "impactKind": power_of_ten_impact_kind(rule.number),
1542 "verificationSuggestion": power_of_ten_verification_suggestion(rule.number),
1543 "remediation": power_of_ten_remediation(rule.number),
1544 "evidenceIds": evidence_ids(&rule_id, &rule.evidence),
1545 }
1546 })
1547}
1548
1549fn append_advanced_sarif_entries(
1550 rules: &mut Vec<Value>,
1551 results: &mut Vec<Value>,
1552 report: &CrateSourceScanReport,
1553) {
1554 for check in &report.certification.advanced.checks {
1555 if check.status == StructuralCheckStatus::Clear {
1556 continue;
1557 }
1558 rules.push(sarif_rule_for_advanced_check(check));
1559 results.push(sarif_result_for_advanced_check(check));
1560 }
1561}
1562
1563fn sarif_rule_for_advanced_check(check: &AdvancedStructuralCheck) -> Value {
1564 json!({
1565 "id": check.id,
1566 "name": check.id,
1567 "shortDescription": { "text": check.title },
1568 "fullDescription": { "text": check.detail },
1569 "help": { "text": advanced_check_remediation(check.id) },
1570 "properties": {
1571 "dsfbCategory": "advanced-structural",
1572 "status": structural_check_status_label(check.status),
1573 "classification": advanced_check_classification(check.id),
1574 "confidence": advanced_check_confidence(check.status, check.evidence.len()),
1575 "impactKind": advanced_check_impact_kind(check.id),
1576 "guidanceOnly": true,
1577 }
1578 })
1579}
1580
1581fn sarif_result_for_advanced_check(check: &AdvancedStructuralCheck) -> Value {
1582 json!({
1583 "ruleId": check.id,
1584 "level": if check.status == StructuralCheckStatus::Elevated { "warning" } else { "note" },
1585 "kind": "review",
1586 "message": { "text": format!("{}: {}", check.title, check.detail) },
1587 "locations": sarif_locations(&check.evidence),
1588 "properties": {
1589 "dsfbCategory": "advanced-structural",
1590 "status": structural_check_status_label(check.status),
1591 "classification": advanced_check_classification(check.id),
1592 "confidence": advanced_check_confidence(check.status, check.evidence.len()),
1593 "impactKind": advanced_check_impact_kind(check.id),
1594 "verificationSuggestion": advanced_check_verification_suggestion(check.id),
1595 "remediation": advanced_check_remediation(check.id),
1596 "evidenceIds": evidence_ids(check.id, &check.evidence),
1597 }
1598 })
1599}
1600
1601fn build_sarif_properties(
1602 report: &CrateSourceScanReport,
1603 advisory_subscores: &[AdvisorySubscore],
1604 findings: &[CanonicalFinding],
1605) -> Value {
1606 json!({
1607 "crateName": report.crate_name,
1608 "crateVersion": report.crate_version,
1609 "auditMode": "canonical-broad-audit",
1610 "sourceRoot": report.root.display().to_string(),
1611 "sourceSha256": report.source_sha256,
1612 "vcsCommit": report.vcs_commit,
1613 "pathInVcs": report.path_in_vcs,
1614 "filesScanned": report.files_scanned,
1615 "artifactsInspected": report.certification.artifacts_inspected,
1616 "auditScore": audit_score_json(&report.certification.audit_score),
1617 "advisorySubscores": advisory_subscores_json(advisory_subscores),
1618 "guidanceSemantics": {
1619 "codeQualityGoal": true,
1620 "reviewReadinessGoal": true,
1621 "nonCertificationStatement": "DSFB does not certify compliance with IEC, ISO, RTCA, MIL, NIST, or other standards. Use this audit as a guideline for improvement and review readiness."
1622 },
1623 "conclusionLenses": conclusion_lenses_json(report, findings),
1624 })
1625}
1626
1627fn advisory_subscores_json(advisory_subscores: &[AdvisorySubscore]) -> Vec<Value> {
1628 advisory_subscores
1629 .iter()
1630 .map(|subscore| {
1631 json!({
1632 "id": subscore.id,
1633 "title": subscore.title,
1634 "percent": round_percent(subscore.percent),
1635 "basis": subscore.basis,
1636 })
1637 })
1638 .collect()
1639}
1640
1641fn build_attestation_subject(report: &CrateSourceScanReport) -> Value {
1642 let subject_name = match &report.crate_version {
1643 Some(version) => format!("pkg:cargo/{}@{}", report.crate_name, version),
1644 None => format!("pkg:cargo/{}", report.crate_name),
1645 };
1646 json!({
1647 "name": subject_name,
1648 "digest": { "sha256": report.source_sha256 }
1649 })
1650}
1651
1652fn build_attestation_predicate(
1653 report: &CrateSourceScanReport,
1654 sarif_sha256: String,
1655 structural_priors: &StaticPriorSet,
1656 findings: &[CanonicalFinding],
1657 advisory_subscores: &[AdvisorySubscore],
1658) -> Value {
1659 json!({
1660 "generatedAtUtc": report.generated_at_utc,
1661 "scanner": attestation_scanner_json(),
1662 "guidanceSemantics": {
1663 "codeQualityGoal": true,
1664 "reviewReadinessGoal": true,
1665 "nonCertificationStatement": "DSFB findings may support internal review against standards-oriented expectations, but DSFB does not certify compliance with IEC, ISO, RTCA, MIL, NIST, or other standards.",
1666 },
1667 "crate": attestation_crate_json(report),
1668 "sarif": {
1669 "mediaType": "application/sarif+json",
1670 "sha256": sarif_sha256,
1671 "resultCount": build_sarif_result_count(report),
1672 },
1673 "summary": build_attestation_summary(report, structural_priors, findings, advisory_subscores),
1674 })
1675}
1676
1677fn attestation_scanner_json() -> Value {
1678 json!({
1679 "name": "dsfb-gray",
1680 "version": CRATE_VERSION,
1681 "recipeVersion": 1,
1682 "auditMode": "canonical-broad-audit",
1683 "recipe": [
1684 "heuristics-bank-default",
1685 "constrained-runtime-audit",
1686 "unsafe-panic-ffi-audit",
1687 "verification-audit",
1688 "lifecycle-audit",
1689 "nasa-power-of-ten-audit",
1690 "advanced-structural-audit"
1691 ]
1692 })
1693}
1694
1695fn attestation_crate_json(report: &CrateSourceScanReport) -> Value {
1696 json!({
1697 "name": report.crate_name,
1698 "version": report.crate_version,
1699 "root": report.root.display().to_string(),
1700 "sourceSha256": report.source_sha256,
1701 "vcsCommit": report.vcs_commit,
1702 "pathInVcs": report.path_in_vcs,
1703 "filesScanned": report.files_scanned,
1704 "artifactsInspected": report.certification.artifacts_inspected,
1705 })
1706}
1707
1708fn build_attestation_summary(
1709 report: &CrateSourceScanReport,
1710 structural_priors: &StaticPriorSet,
1711 findings: &[CanonicalFinding],
1712 advisory_subscores: &[AdvisorySubscore],
1713) -> Value {
1714 let (power_applied, power_not_applied, power_indeterminate) =
1715 power_of_ten_status_counts(report);
1716 let (advanced_elevated, advanced_clear, advanced_indeterminate) =
1717 advanced_status_counts(report);
1718 json!({
1719 "auditScore": audit_score_json(&report.certification.audit_score),
1720 "advisorySubscores": advisory_subscores_json(advisory_subscores),
1721 "matchedHeuristics": attestation_heuristics_json(report, structural_priors),
1722 "powerOfTen": {
1723 "applied": power_applied,
1724 "notApplied": power_not_applied,
1725 "indeterminate": power_indeterminate,
1726 },
1727 "advancedStructural": {
1728 "elevated": advanced_elevated,
1729 "clear": advanced_clear,
1730 "indeterminate": advanced_indeterminate,
1731 },
1732 "criticalityHotspots": attestation_hotspots_json(report),
1733 "findings": attestation_findings_json(findings),
1734 "conclusionLenses": conclusion_lenses_json(report, findings),
1735 })
1736}
1737
1738fn power_of_ten_status_counts(report: &CrateSourceScanReport) -> (usize, usize, usize) {
1739 let applied = report
1740 .certification
1741 .power_of_ten
1742 .rules
1743 .iter()
1744 .filter(|rule| rule.status == PowerOfTenStatus::Applied)
1745 .count();
1746 let not_applied = report
1747 .certification
1748 .power_of_ten
1749 .rules
1750 .iter()
1751 .filter(|rule| rule.status == PowerOfTenStatus::NotApplied)
1752 .count();
1753 let indeterminate = report
1754 .certification
1755 .power_of_ten
1756 .rules
1757 .iter()
1758 .filter(|rule| rule.status == PowerOfTenStatus::Indeterminate)
1759 .count();
1760 (applied, not_applied, indeterminate)
1761}
1762
1763fn advanced_status_counts(report: &CrateSourceScanReport) -> (usize, usize, usize) {
1764 let elevated = report
1765 .certification
1766 .advanced
1767 .checks
1768 .iter()
1769 .filter(|check| check.status == StructuralCheckStatus::Elevated)
1770 .count();
1771 let clear = report
1772 .certification
1773 .advanced
1774 .checks
1775 .iter()
1776 .filter(|check| check.status == StructuralCheckStatus::Clear)
1777 .count();
1778 let indeterminate = report
1779 .certification
1780 .advanced
1781 .checks
1782 .iter()
1783 .filter(|check| check.status == StructuralCheckStatus::Indeterminate)
1784 .count();
1785 (elevated, clear, indeterminate)
1786}
1787
1788fn attestation_heuristics_json(
1789 report: &CrateSourceScanReport,
1790 structural_priors: &StaticPriorSet,
1791) -> Vec<Value> {
1792 report
1793 .matched_heuristics
1794 .iter()
1795 .map(|matched| {
1796 let prior = structural_priors.get(matched.heuristic.id);
1797 json!({
1798 "id": matched.heuristic.id.0,
1799 "reasonCode": format!("{:?}", matched.heuristic.reason_code),
1800 "totalHits": matched.total_hits,
1801 "matchedPatterns": matched.matched_patterns,
1802 "structuralPrior": prior.map(|prior| json!({
1803 "confidence": prior.confidence,
1804 "driftScale": prior.drift_scale,
1805 "slewScale": prior.slew_scale,
1806 })),
1807 })
1808 })
1809 .collect()
1810}
1811
1812fn attestation_hotspots_json(report: &CrateSourceScanReport) -> Vec<Value> {
1813 report
1814 .certification
1815 .advanced
1816 .hotspots
1817 .iter()
1818 .map(|hotspot| {
1819 json!({
1820 "path": hotspot.path.display().to_string(),
1821 "line": hotspot.start_line,
1822 "function": hotspot.function_name,
1823 "riskScore": hotspot.risk_score,
1824 "estimatedComplexity": hotspot.estimated_complexity,
1825 "signals": hotspot.signals,
1826 })
1827 })
1828 .collect()
1829}
1830
1831fn attestation_findings_json(findings: &[CanonicalFinding]) -> Vec<Value> {
1832 findings
1833 .iter()
1834 .map(|finding| {
1835 json!({
1836 "id": finding.id,
1837 "title": finding.title,
1838 "category": finding.category,
1839 "status": finding.status_label,
1840 "classification": finding.classification,
1841 "confidence": finding.confidence,
1842 "impactKind": finding.impact_kind,
1843 "remediation": finding.remediation,
1844 "verificationSuggestion": finding.verification,
1845 "evidenceIds": evidence_ids(&finding.id, &finding.evidence),
1846 })
1847 })
1848 .collect()
1849}
1850
1851fn build_sarif_result_count(report: &CrateSourceScanReport) -> usize {
1852 let p10_results = report
1853 .certification
1854 .power_of_ten
1855 .rules
1856 .iter()
1857 .filter(|rule| rule.status != PowerOfTenStatus::Applied)
1858 .count();
1859 let advanced_results = report
1860 .certification
1861 .advanced
1862 .checks
1863 .iter()
1864 .filter(|check| check.status != StructuralCheckStatus::Clear)
1865 .count();
1866
1867 report.matched_heuristics.len() + p10_results + advanced_results
1868}
1869
1870fn collect_canonical_findings(report: &CrateSourceScanReport) -> Vec<CanonicalFinding> {
1871 let mut findings = collect_heuristic_findings(report);
1872 append_power_of_ten_findings(&mut findings, report);
1873 append_advanced_findings(&mut findings, report);
1874 sort_canonical_findings(&mut findings);
1875 findings
1876}
1877
1878fn collect_heuristic_findings(report: &CrateSourceScanReport) -> Vec<CanonicalFinding> {
1879 report
1880 .matched_heuristics
1881 .iter()
1882 .map(build_heuristic_finding)
1883 .collect()
1884}
1885
1886fn build_heuristic_finding(matched: &HeuristicSourceMatch) -> CanonicalFinding {
1887 CanonicalFinding {
1888 id: matched.heuristic.id.0.to_string(),
1889 title: matched.heuristic.description.to_string(),
1890 category: "heuristic",
1891 status_label: "matched",
1892 severity_rank: 40 + matched.total_hits.min(20),
1893 classification: heuristic_classification(matched.heuristic.id.0),
1894 confidence: heuristic_confidence(matched.total_hits),
1895 impact_kind: heuristic_impact_kind(matched.heuristic.id.0),
1896 rust_why: heuristic_rust_why(matched.heuristic.id.0),
1897 readiness_why: heuristic_readiness_why(matched.heuristic.id.0),
1898 detail: matched.heuristic.provenance.to_string(),
1899 remediation: heuristic_remediation(matched.heuristic.id.0),
1900 verification: heuristic_verification_suggestion(matched.heuristic.id.0),
1901 evidence: matched.evidence.clone(),
1902 }
1903}
1904
1905fn append_power_of_ten_findings(
1906 findings: &mut Vec<CanonicalFinding>,
1907 report: &CrateSourceScanReport,
1908) {
1909 findings.extend(
1910 report
1911 .certification
1912 .power_of_ten
1913 .rules
1914 .iter()
1915 .filter(|rule| rule.status != PowerOfTenStatus::Applied)
1916 .map(build_power_of_ten_finding),
1917 );
1918}
1919
1920fn build_power_of_ten_finding(rule: &PowerOfTenRuleAudit) -> CanonicalFinding {
1921 CanonicalFinding {
1922 id: format!("P10-{}", rule.number),
1923 title: rule.title.to_string(),
1924 category: "nasa-power-of-ten",
1925 status_label: power_of_ten_status_label(rule.status),
1926 severity_rank: match rule.status {
1927 PowerOfTenStatus::NotApplied => 85,
1928 PowerOfTenStatus::Indeterminate => 60,
1929 PowerOfTenStatus::Applied => 0,
1930 },
1931 classification: power_of_ten_classification(rule.number),
1932 confidence: power_of_ten_confidence(rule.status, rule.evidence.len()),
1933 impact_kind: power_of_ten_impact_kind(rule.number),
1934 rust_why: power_of_ten_rust_why(rule.number),
1935 readiness_why: power_of_ten_readiness_why(rule.number),
1936 detail: rule.detail.clone(),
1937 remediation: power_of_ten_remediation(rule.number),
1938 verification: power_of_ten_verification_suggestion(rule.number),
1939 evidence: rule.evidence.clone(),
1940 }
1941}
1942
1943fn append_advanced_findings(findings: &mut Vec<CanonicalFinding>, report: &CrateSourceScanReport) {
1944 findings.extend(
1945 report
1946 .certification
1947 .advanced
1948 .checks
1949 .iter()
1950 .filter(|check| check.status != StructuralCheckStatus::Clear)
1951 .map(build_advanced_finding),
1952 );
1953}
1954
1955fn build_advanced_finding(check: &AdvancedStructuralCheck) -> CanonicalFinding {
1956 CanonicalFinding {
1957 id: check.id.to_string(),
1958 title: check.title.to_string(),
1959 category: "advanced-structural",
1960 status_label: structural_check_status_label(check.status),
1961 severity_rank: match check.status {
1962 StructuralCheckStatus::Elevated => 90,
1963 StructuralCheckStatus::Indeterminate => 65,
1964 StructuralCheckStatus::Clear => 0,
1965 },
1966 classification: advanced_check_classification(check.id),
1967 confidence: advanced_check_confidence(check.status, check.evidence.len()),
1968 impact_kind: advanced_check_impact_kind(check.id),
1969 rust_why: advanced_check_rust_why(check.id),
1970 readiness_why: advanced_check_readiness_why(check.id),
1971 detail: check.detail.clone(),
1972 remediation: advanced_check_remediation(check.id),
1973 verification: advanced_check_verification_suggestion(check.id),
1974 evidence: check.evidence.clone(),
1975 }
1976}
1977
1978fn sort_canonical_findings(findings: &mut [CanonicalFinding]) {
1979 findings.sort_by(|a, b| {
1980 b.severity_rank
1981 .cmp(&a.severity_rank)
1982 .then_with(|| b.evidence.len().cmp(&a.evidence.len()))
1983 .then_with(|| a.id.cmp(&b.id))
1984 });
1985}
1986
1987fn advisory_subscores(report: &CrateSourceScanReport) -> Vec<AdvisorySubscore> {
1988 let safety = score_section_percent(&report.certification.audit_score, "safety");
1989 let verification = score_section_percent(&report.certification.audit_score, "verification");
1990 let build = score_section_percent(&report.certification.audit_score, "build");
1991 let lifecycle = score_section_percent(&report.certification.audit_score, "lifecycle");
1992 let power = score_section_percent(&report.certification.audit_score, "nasa_power_of_ten");
1993 let advanced = score_section_percent(&report.certification.audit_score, "advanced_structural");
1994
1995 vec![
1996 advisory_correctness_subscore(report, safety),
1997 advisory_maintainability_subscore(report, lifecycle),
1998 advisory_concurrency_subscore(report),
1999 advisory_resource_subscore(report),
2000 advisory_verification_subscore(report, verification, build),
2001 advisory_assurance_subscore(safety, verification, build, lifecycle, power, advanced),
2002 ]
2003}
2004
2005fn advisory_correctness_subscore(report: &CrateSourceScanReport, safety: f64) -> AdvisorySubscore {
2006 AdvisorySubscore {
2007 id: "correctness",
2008 title: "Correctness",
2009 percent: mean_percent(&[
2010 safety,
2011 selected_power_of_ten_percent(&report.certification.power_of_ten, &[5, 7, 9]),
2012 selected_advanced_percent(
2013 &report.certification.advanced,
2014 &[
2015 "SAFE-STATE",
2016 "FUTURE-WAKE",
2017 "DROP-PANIC",
2018 "CLOCK-MIX",
2019 "SHORT-WRITE",
2020 "ATOMIC-RELAXED",
2021 ],
2022 ),
2023 ]),
2024 basis:
2025 "Derived from safety surface, correctness-critical Power-of-Ten rules, and correctness-oriented structural checks.",
2026 }
2027}
2028
2029fn advisory_maintainability_subscore(
2030 report: &CrateSourceScanReport,
2031 lifecycle: f64,
2032) -> AdvisorySubscore {
2033 AdvisorySubscore {
2034 id: "maintainability",
2035 title: "Maintainability",
2036 percent: mean_percent(&[
2037 lifecycle,
2038 selected_power_of_ten_percent(&report.certification.power_of_ten, &[4, 6, 8]),
2039 selected_advanced_percent(
2040 &report.certification.advanced,
2041 &["ITER-UNB", "CARGO-VERS", "PART-SPACE"],
2042 ),
2043 ]),
2044 basis:
2045 "Derived from lifecycle/governance evidence, reviewability-oriented Power-of-Ten rules, and maintainability-heavy structural checks.",
2046 }
2047}
2048
2049fn advisory_concurrency_subscore(report: &CrateSourceScanReport) -> AdvisorySubscore {
2050 AdvisorySubscore {
2051 id: "concurrency_async",
2052 title: "Concurrency / Async",
2053 percent: mean_percent(&[
2054 selected_advanced_percent(
2055 &report.certification.advanced,
2056 &[
2057 "ASYNC-LOCK",
2058 "FUTURE-WAKE",
2059 "TASK-LEAK",
2060 "ASYNC-RECUR",
2061 "CHAN-UNB",
2062 ],
2063 ),
2064 selected_power_of_ten_percent(&report.certification.power_of_ten, &[2, 5]),
2065 ]),
2066 basis:
2067 "Derived from async/concurrency structural checks and bounded-control-flow review signals.",
2068 }
2069}
2070
2071fn advisory_resource_subscore(report: &CrateSourceScanReport) -> AdvisorySubscore {
2072 AdvisorySubscore {
2073 id: "resource_discipline",
2074 title: "Resource Discipline",
2075 percent: mean_percent(&[
2076 runtime_resource_percent(&report.certification.runtime),
2077 selected_advanced_percent(
2078 &report.certification.advanced,
2079 &[
2080 "ALLOC-HOT",
2081 "CWE-404",
2082 "CMD-BUF",
2083 "ITER-UNB",
2084 "ZERO-COPY",
2085 "SHORT-WRITE",
2086 ],
2087 ),
2088 selected_power_of_ten_percent(&report.certification.power_of_ten, &[2, 3]),
2089 ]),
2090 basis:
2091 "Derived from runtime-allocation proxies, resource-lifecycle checks, and bounded-allocation / bounded-loop review rules.",
2092 }
2093}
2094
2095fn advisory_verification_subscore(
2096 report: &CrateSourceScanReport,
2097 verification: f64,
2098 build: f64,
2099) -> AdvisorySubscore {
2100 AdvisorySubscore {
2101 id: "verification_reviewability",
2102 title: "Verification / Reviewability",
2103 percent: mean_percent(&[
2104 verification,
2105 build,
2106 selected_power_of_ten_percent(&report.certification.power_of_ten, &[4, 8, 10]),
2107 ]),
2108 basis:
2109 "Derived from verification signals, build/tooling complexity, and analyzability-oriented Power-of-Ten rules.",
2110 }
2111}
2112
2113fn advisory_assurance_subscore(
2114 safety: f64,
2115 verification: f64,
2116 build: f64,
2117 lifecycle: f64,
2118 power: f64,
2119 advanced: f64,
2120) -> AdvisorySubscore {
2121 AdvisorySubscore {
2122 id: "assurance_provenance",
2123 title: "Assurance / Provenance",
2124 percent: mean_percent(&[safety, verification, build, lifecycle, power, advanced]),
2125 basis:
2126 "Derived from the full locked rubric as a broad readiness-oriented advisory synthesis.",
2127 }
2128}
2129
2130fn score_section_percent(score: &AuditScoreCard, id: &str) -> f64 {
2131 score
2132 .sections
2133 .iter()
2134 .find(|section| section.id == id)
2135 .map(|section| section.section_percent)
2136 .unwrap_or(0.0)
2137}
2138
2139fn selected_power_of_ten_percent(profile: &PowerOfTenProfile, rules: &[u8]) -> f64 {
2140 let values = profile
2141 .rules
2142 .iter()
2143 .filter(|rule| rules.contains(&rule.number))
2144 .map(|rule| match rule.status {
2145 PowerOfTenStatus::Applied => 100.0,
2146 PowerOfTenStatus::Indeterminate => 50.0,
2147 PowerOfTenStatus::NotApplied => 0.0,
2148 })
2149 .collect::<Vec<_>>();
2150 mean_percent(&values)
2151}
2152
2153fn selected_advanced_percent(profile: &AdvancedStructuralProfile, ids: &[&str]) -> f64 {
2154 let values = profile
2155 .checks
2156 .iter()
2157 .filter(|check| ids.contains(&check.id))
2158 .map(|check| match check.status {
2159 StructuralCheckStatus::Clear => 100.0,
2160 StructuralCheckStatus::Indeterminate => 50.0,
2161 StructuralCheckStatus::Elevated => 0.0,
2162 })
2163 .collect::<Vec<_>>();
2164 mean_percent(&values)
2165}
2166
2167fn runtime_resource_percent(profile: &RuntimeProfile) -> f64 {
2168 mean_percent(&[
2169 score_threshold(profile.alloc_crate_hits, 0, 2) * 100.0,
2170 score_threshold(profile.heap_allocation_hits, 0, 6) * 100.0,
2171 ])
2172}
2173
2174fn mean_percent(values: &[f64]) -> f64 {
2175 if values.is_empty() {
2176 0.0
2177 } else {
2178 values.iter().sum::<f64>() / values.len() as f64
2179 }
2180}
2181
2182fn heuristic_classification(id: &str) -> &'static str {
2183 match id {
2184 "H-ASYNC-01" | "H-CHAN-01" | "H-LOCK-01" | "H-ALLOC-01" | "H-SERDE-01" => "design-review",
2185 "H-ERR-01" => "defect-candidate",
2186 _other => "context-needed",
2187 }
2188}
2189
2190fn heuristic_confidence(total_hits: usize) -> &'static str {
2191 match total_hits {
2192 0..=1 => "low",
2193 2..=4 => "medium",
2194 _other => "high",
2195 }
2196}
2197
2198fn heuristic_impact_kind(id: &str) -> &'static str {
2199 match id {
2200 "H-ASYNC-01" | "H-LOCK-01" | "H-CHAN-01" | "H-GRPC-01" => "concurrency/async",
2201 "H-ALLOC-01" | "H-THRU-01" | "H-SERDE-01" => "resource discipline",
2202 "H-RAFT-01" | "H-TCP-01" | "H-CLOCK-01" | "H-ERR-01" | "H-DNS-01" => "correctness",
2203 _other => "reviewability",
2204 }
2205}
2206
2207fn heuristic_rust_why(id: &str) -> &'static str {
2208 match id {
2209 "H-ASYNC-01" => "Blocking motifs inside async-heavy code often translate into executor unfairness, starvation, and misleading service-level symptoms.",
2210 "H-CHAN-01" => "Channel motifs are often where Rust services hide backpressure, queue growth, and detached ownership assumptions.",
2211 "H-ALLOC-01" => "Allocation-heavy source motifs often correlate with hot-path latency variance and avoidable memory churn.",
2212 "H-LOCK-01" => "Shared-lock motifs concentrate contention and can turn otherwise-local latency into system-wide tail behavior.",
2213 "H-CLOCK-01" => "Clock-related motifs are where monotonic and wall-clock assumptions can quietly diverge.",
2214 "H-ERR-01" => "Retry and timeout motifs are where error handling frequently shifts from resilience into amplification.",
2215 _other => "The matched motif is source-visible and reviewable in Rust code, but it still needs local reasoning before it should drive design changes.",
2216 }
2217}
2218
2219fn heuristic_readiness_why(id: &str) -> &'static str {
2220 match id {
2221 "H-ASYNC-01" | "H-CHAN-01" | "H-ERR-01" | "H-CLOCK-01" => {
2222 "This motif frequently appears in assurance-oriented reviews because it changes how operators and reviewers reason about timing, boundedness, and fault handling."
2223 }
2224 _other => {
2225 "This motif can support internal review against standards-oriented expectations, but it is still only a structural proxy rather than compliance evidence by itself."
2226 }
2227 }
2228}
2229
2230fn heuristic_verification_suggestion(id: &str) -> &'static str {
2231 match id {
2232 "H-ASYNC-01" => "Replay a representative async workload and confirm the path yields or offloads before poll duration spikes appear.",
2233 "H-CHAN-01" => "Exercise a producer-faster-than-consumer test and confirm queue depth remains bounded and observable.",
2234 "H-ALLOC-01" => "Benchmark the flagged path under steady load and inspect allocation counts before and after preallocation changes.",
2235 "H-LOCK-01" => "Measure lock hold time or add tracing around the shared path to confirm contention boundaries are explicit.",
2236 "H-CLOCK-01" => "Add a regression test that isolates monotonic timing logic from wall-clock presentation or protocol boundaries.",
2237 "H-ERR-01" => "Run a failure-path test and confirm retry pacing, cancellation, and escalation remain bounded.",
2238 _other => "Review the emitted evidence and add a targeted regression or replay check on the affected path.",
2239 }
2240}
2241
2242fn power_of_ten_classification(rule_number: u8) -> &'static str {
2243 match rule_number {
2244 5 | 7 | 9 => "defect-candidate",
2245 8 | 10 => "review-readiness",
2246 _other => "design-review",
2247 }
2248}
2249
2250fn power_of_ten_confidence(status: PowerOfTenStatus, evidence_len: usize) -> &'static str {
2251 match status {
2252 PowerOfTenStatus::Applied => "high",
2253 PowerOfTenStatus::NotApplied if evidence_len >= 2 => "high",
2254 PowerOfTenStatus::NotApplied => "medium",
2255 PowerOfTenStatus::Indeterminate => "medium",
2256 }
2257}
2258
2259fn power_of_ten_impact_kind(rule_number: u8) -> &'static str {
2260 match rule_number {
2261 1 | 2 | 3 | 7 | 9 => "correctness",
2262 4 | 6 => "maintainability",
2263 5 => "concurrency/async",
2264 8 | 10 => "verification/reviewability",
2265 _other => "assurance/provenance",
2266 }
2267}
2268
2269fn power_of_ten_rust_why(rule_number: u8) -> &'static str {
2270 match rule_number {
2271 1 => "Unbounded recursion still threatens reviewability and stack reasoning in Rust, even when ownership is otherwise strong.",
2272 2 => "Explicit bounds are one of the clearest ways to make Rust control flow auditable under failure pressure.",
2273 3 => "Steady-state allocation surfaces are often where long-lived Rust services accumulate jitter and memory debt.",
2274 4 => "Large functions make invariants harder to see, test, and review, even in otherwise safe Rust.",
2275 5 => "Catch-all state handling often hides missing invariants or incomplete transitions in otherwise exhaustive-looking Rust code.",
2276 6 => "Global shared state spreads coupling and makes local reasoning harder across modules and tasks.",
2277 7 => "Unchecked extraction pushes invariant proof onto the reader instead of the code.",
2278 8 => "Macros and cfg forks can hide large semantic deltas behind small source surfaces.",
2279 9 => "Raw-pointer and FFI boundaries are where Rust's usual guarantees weaken and local contracts matter most.",
2280 10 => "Analyzer and warning gates are part of keeping a Rust codebase reviewable over time.",
2281 _other => "This rule is a Rust-adapted analyzability guideline rather than a language-lawyer restriction.",
2282 }
2283}
2284
2285fn power_of_ten_readiness_why(rule_number: u8) -> &'static str {
2286 match rule_number {
2287 8..=10 => {
2288 "This rule is directly relevant to review readiness because it affects whether a reviewer can trust what paths are present and what tools continue to check."
2289 }
2290 _other => {
2291 "This rule supports bounded, reviewable structure that often matters in compliance- or certification-oriented internal reviews."
2292 }
2293 }
2294}
2295
2296fn power_of_ten_verification_suggestion(rule_number: u8) -> &'static str {
2297 match rule_number {
2298 1 => "Add a focused test or review note that proves the remaining recursion is bounded, or refactor it into an explicit loop/work queue.",
2299 2 => "Add a regression test that demonstrates a visible loop bound, timeout, or cancellation path on the flagged logic.",
2300 3 => "Profile the flagged path under steady-state load and confirm no avoidable heap growth remains after initialization.",
2301 4 => "Split the function and add narrower tests that name the local invariants introduced by the refactor.",
2302 5 => "Add state-transition tests that cover the previously catch-all path explicitly.",
2303 6 => "Document ownership/synchronization boundaries or add module-level tests that prove shared state cannot drift silently.",
2304 7 => "Replace unwrap/expect with explicit handling or add an invariant test that proves the extraction precondition.",
2305 8 => "Review feature/macro-expanded paths and add CI coverage for the meaningful forks.",
2306 9 => "Document the local pointer/FFI contract and add the narrowest possible regression around the unsafe edge.",
2307 10 => "Keep analyzer and warnings-as-errors gates in CI and record the expected toolchain surface in the repo docs.",
2308 _other => "Review the evidence and add the smallest regression that proves the intended invariant.",
2309 }
2310}
2311
2312fn advanced_check_classification(check_id: &str) -> &'static str {
2313 match check_id {
2314 "SAFE-STATE" | "ISR-SAFE" | "FUTURE-WAKE" | "DROP-PANIC" | "CLOCK-MIX" | "SHORT-WRITE"
2315 | "CWE-404" => "defect-candidate",
2316 "PLUGIN-LOAD" | "CARGO-VERS" => "review-readiness",
2317 "ATOMIC-RELAXED" | "CMD-BUF" | "ITER-UNB" => "context-needed",
2318 _other => "design-review",
2319 }
2320}
2321
2322fn advanced_check_confidence(status: StructuralCheckStatus, evidence_len: usize) -> &'static str {
2323 match status {
2324 StructuralCheckStatus::Clear => "high",
2325 StructuralCheckStatus::Elevated if evidence_len >= 2 => "high",
2326 StructuralCheckStatus::Elevated => "medium",
2327 StructuralCheckStatus::Indeterminate => "medium",
2328 }
2329}
2330
2331fn advanced_check_impact_kind(check_id: &str) -> &'static str {
2332 match check_id {
2333 "ASYNC-LOCK" | "FUTURE-WAKE" | "TASK-LEAK" | "ASYNC-RECUR" | "CHAN-UNB" => {
2334 "concurrency/async"
2335 }
2336 "ALLOC-HOT" | "CWE-404" | "CMD-BUF" | "ITER-UNB" | "ZERO-COPY" => "resource discipline",
2337 "SAFE-STATE" | "CLOCK-INTEG" | "CLOCK-MIX" | "SHORT-WRITE" | "DROP-PANIC"
2338 | "ATOMIC-RELAXED" | "ISR-SAFE" => "correctness",
2339 "PLUGIN-LOAD" | "CARGO-VERS" | "PART-SPACE" => "assurance/provenance",
2340 _other => "maintainability",
2341 }
2342}
2343
2344fn advanced_check_rust_why(check_id: &str) -> &'static str {
2345 match check_id {
2346 "ASYNC-LOCK" => "Async lock misuse is a common way for otherwise-correct Rust to become operationally brittle under load.",
2347 "ALLOC-HOT" => "Allocation inside hot loops is often the difference between stable throughput and jitter-heavy Rust services.",
2348 "CLOCK-MIX" => "Mixing monotonic and wall-clock time is a classic correctness trap in timeout, lease, and control logic.",
2349 "FUTURE-WAKE" => "Manual futures live on a strict wake contract; getting it wrong produces futures that appear correct but never make progress.",
2350 "TASK-LEAK" => "Detached tasks are easy to create in Rust async code and hard to reason about during shutdown, overload, or retries.",
2351 "ZERO-COPY" => "Avoidable copies on hot paths often show up later as bandwidth and tail-latency debt.",
2352 _other => "This structural check points to a reviewable Rust pattern that often deserves explicit local invariants or tests.",
2353 }
2354}
2355
2356fn advanced_check_readiness_why(check_id: &str) -> &'static str {
2357 match check_id {
2358 "PLUGIN-LOAD" | "CARGO-VERS" | "PART-SPACE" => {
2359 "This finding is especially relevant to review readiness because it affects reproducibility, isolation, or operator trust in what was shipped."
2360 }
2361 _other => {
2362 "This finding can support standards-oriented internal review because it highlights boundedness, determinism, ownership, or resource-discipline questions."
2363 }
2364 }
2365}
2366
2367fn advanced_check_verification_suggestion(check_id: &str) -> &'static str {
2368 match check_id {
2369 "SAFE-STATE" => "Add tests that drive the fallback path explicitly and confirm the intended safe-state behavior is named, not implied.",
2370 "ASYNC-LOCK" => "Add a focused async regression that proves the lock is dropped before await and that cancellation does not strand shared state.",
2371 "ALLOC-HOT" => "Benchmark or instrument the flagged loop and confirm allocation count drops after preallocation or refactoring.",
2372 "CLOCK-INTEG" | "CLOCK-MIX" => "Add tests that isolate monotonic timing behavior from wall-clock use and verify deadline math at the boundary.",
2373 "RETRY-DAMP" => "Exercise a repeated-failure path and confirm backoff is capped, jittered, and observable.",
2374 "HARD-WAIT" => "Replace the fixed wait with a state/deadline condition and add a regression that proves the path is now bounded by state, not sleep time.",
2375 "PART-SPACE" => "Document the shared-resource boundary and add a test or review note that proves ownership/partitioning is intentional.",
2376 "PLUGIN-LOAD" => "Add review notes or CI checks that prove the dynamic-loading boundary is verified, sandboxed, or intentionally excluded from trusted paths.",
2377 "CWE-404" => "Exercise an error path and confirm ownership cleanup happens without raw-handle leakage.",
2378 "CMD-BUF" => "Add queue tests that demonstrate staleness, TTL, cancellation, or sequence handling under backlog.",
2379 "ITER-UNB" => "Add a bound, trusted finite-source proof, or regression test that demonstrates the iterator cannot grow without limit.",
2380 "ISR-SAFE" => "Review interrupt-path code and add a targeted test or note proving it stays allocation-free and lock-free where required.",
2381 "FUTURE-WAKE" => "Add a manual-future regression that proves each Pending path registers a wake before returning.",
2382 "TASK-LEAK" => "Track JoinHandle ownership and add shutdown tests that prove tasks do not outlive their supervisor unintentionally.",
2383 "DROP-PANIC" => "Move failure reporting out of Drop and add a regression proving teardown stays infallible under unwind pressure.",
2384 "ATOMIC-RELAXED" => "Review the state-transition path and add the narrowest concurrency test that proves the ordering is sufficient.",
2385 "SHORT-WRITE" => "Add IO-path tests that inject Interrupted or partial writes and prove the caller handles them correctly.",
2386 "ASYNC-RECUR" => "Add a visible depth bound or refactor to a loop/work queue and prove the new path terminates under stress.",
2387 "CHAN-UNB" => "Add load tests that demonstrate bounded backlog or justify why unbounded growth cannot accumulate invisibly.",
2388 "ZERO-COPY" => "Benchmark the read path before and after borrowing/reference-counting changes and confirm copy count drops.",
2389 "CARGO-VERS" => "Pin or narrow version requirements and verify the attested build stays reproducible across fresh environments.",
2390 _other => "Use the evidence block to write the smallest targeted regression or review note that proves the intended invariant.",
2391 }
2392}
2393
2394fn build_audit_scorecard(
2395 safety: &SafetyProfile,
2396 verification: &VerificationProfile,
2397 build: &BuildProfile,
2398 lifecycle: &LifecycleProfile,
2399 manifest: &ManifestMetadata,
2400 power_of_ten: &PowerOfTenProfile,
2401 advanced: &AdvancedStructuralProfile,
2402) -> AuditScoreCard {
2403 let sections = vec![
2404 build_safety_score_section(safety),
2405 build_verification_score_section(verification),
2406 build_build_score_section(build),
2407 build_lifecycle_score_section(lifecycle, manifest),
2408 build_power_of_ten_score_section(power_of_ten),
2409 build_advanced_score_section(advanced),
2410 ];
2411 finalize_audit_scorecard(sections)
2412}
2413
2414fn build_safety_score_section(safety: &SafetyProfile) -> AuditScoreSection {
2415 let checkpoints = [
2416 score_unsafe_policy(safety.unsafe_policy),
2417 score_binary(safety.unsafe_sites == 0),
2418 score_binary(safety.panic_sites == 0),
2419 score_binary(safety.unwrap_sites == 0),
2420 safety_ffi_checkpoint(safety),
2421 ];
2422 build_score_section("safety", "Safety Surface", 15.0, &checkpoints)
2423}
2424
2425fn safety_ffi_checkpoint(safety: &SafetyProfile) -> f64 {
2426 if safety.ffi_sites == 0 && safety.unsafe_sites == 0 {
2427 1.0
2428 } else if safety.safety_comment_sites > 0 {
2429 0.5
2430 } else {
2431 0.0
2432 }
2433}
2434
2435fn build_verification_score_section(verification: &VerificationProfile) -> AuditScoreSection {
2436 let checkpoints = [
2437 score_binary(verification.tests_dir_present || verification.test_marker_hits > 0),
2438 score_binary(verification.property_testing_hits > 0),
2439 score_binary(verification.concurrency_exploration_hits > 0),
2440 score_binary(verification.fuzzing_hits > 0),
2441 score_binary(verification.formal_methods_hits > 0),
2442 ];
2443 build_score_section("verification", "Verification Evidence", 15.0, &checkpoints)
2444}
2445
2446fn build_build_score_section(build: &BuildProfile) -> AuditScoreSection {
2447 let checkpoints = [
2448 score_threshold(build.direct_dependencies, 10, 25),
2449 score_threshold(build.build_dependencies, 3, 8),
2450 score_threshold(build.dev_dependencies, 15, 30),
2451 score_binary(!build.has_build_script),
2452 score_binary(!build.proc_macro_crate),
2453 score_binary(build.codegen_hits == 0),
2454 ];
2455 build_score_section("build", "Build / Tooling Complexity", 10.0, &checkpoints)
2456}
2457
2458fn build_lifecycle_score_section(
2459 lifecycle: &LifecycleProfile,
2460 manifest: &ManifestMetadata,
2461) -> AuditScoreSection {
2462 let checkpoints = [
2463 score_binary(lifecycle.readme_present),
2464 score_binary(lifecycle.changelog_present),
2465 score_binary(lifecycle.security_md_present),
2466 score_binary(lifecycle.safety_md_present),
2467 score_binary(lifecycle.architecture_doc_present),
2468 score_binary(lifecycle.docs_dir_present),
2469 score_binary(!lifecycle.license_files.is_empty() || manifest.license.is_some()),
2470 score_binary(manifest.rust_version.is_some()),
2471 score_binary(manifest.edition.is_some()),
2472 score_binary(manifest.repository.is_some()),
2473 score_binary(manifest.documentation.is_some()),
2474 score_binary(manifest.homepage.is_some()),
2475 score_binary(manifest.readme.is_some() || lifecycle.readme_present),
2476 ];
2477 build_score_section("lifecycle", "Lifecycle / Governance", 10.0, &checkpoints)
2478}
2479
2480fn build_power_of_ten_score_section(power_of_ten: &PowerOfTenProfile) -> AuditScoreSection {
2481 let checkpoints = power_of_ten
2482 .rules
2483 .iter()
2484 .map(power_of_ten_checkpoint_score)
2485 .collect::<Vec<_>>();
2486 build_score_section(
2487 "nasa_power_of_ten",
2488 "NASA/JPL Power of Ten",
2489 25.0,
2490 &checkpoints,
2491 )
2492}
2493
2494fn power_of_ten_checkpoint_score(rule: &PowerOfTenRuleAudit) -> f64 {
2495 match rule.status {
2496 PowerOfTenStatus::Applied => 1.0,
2497 PowerOfTenStatus::Indeterminate => 0.5,
2498 PowerOfTenStatus::NotApplied => 0.0,
2499 }
2500}
2501
2502fn build_advanced_score_section(advanced: &AdvancedStructuralProfile) -> AuditScoreSection {
2503 let checkpoints = advanced
2504 .checks
2505 .iter()
2506 .map(advanced_checkpoint_score)
2507 .collect::<Vec<_>>();
2508 build_score_section(
2509 "advanced_structural",
2510 "Advanced Structural Checks",
2511 25.0,
2512 &checkpoints,
2513 )
2514}
2515
2516fn advanced_checkpoint_score(check: &AdvancedStructuralCheck) -> f64 {
2517 match check.status {
2518 StructuralCheckStatus::Clear => 1.0,
2519 StructuralCheckStatus::Indeterminate => 0.5,
2520 StructuralCheckStatus::Elevated => 0.0,
2521 }
2522}
2523
2524fn finalize_audit_scorecard(sections: Vec<AuditScoreSection>) -> AuditScoreCard {
2525 let earned_weighted_points = sections.iter().map(|section| section.weighted_points).sum();
2526 let possible_weighted_points = sections.iter().map(|section| section.weight_percent).sum();
2527 let overall_percent = if possible_weighted_points == 0.0 {
2528 0.0
2529 } else {
2530 earned_weighted_points * 100.0 / possible_weighted_points
2531 };
2532
2533 AuditScoreCard {
2534 overall_percent,
2535 earned_weighted_points,
2536 possible_weighted_points,
2537 band: audit_score_band(overall_percent),
2538 sections,
2539 }
2540}
2541
2542fn build_score_section(
2543 id: &'static str,
2544 title: &'static str,
2545 weight_percent: f64,
2546 checkpoints: &[f64],
2547) -> AuditScoreSection {
2548 let checkpoint_count = checkpoints.len();
2549 let earned_checkpoints = checkpoints.iter().sum::<f64>();
2550 let section_ratio = if checkpoint_count == 0 {
2551 0.0
2552 } else {
2553 earned_checkpoints / checkpoint_count as f64
2554 };
2555 let section_percent = section_ratio * 100.0;
2556 let weighted_points = section_ratio * weight_percent;
2557
2558 AuditScoreSection {
2559 id,
2560 title,
2561 weight_percent,
2562 checkpoint_count,
2563 earned_checkpoints,
2564 section_percent,
2565 weighted_points,
2566 }
2567}
2568
2569fn score_binary(value: bool) -> f64 {
2570 if value {
2571 1.0
2572 } else {
2573 0.0
2574 }
2575}
2576
2577fn score_threshold(value: usize, full_threshold: usize, partial_threshold: usize) -> f64 {
2578 if value <= full_threshold {
2579 1.0
2580 } else if value <= partial_threshold {
2581 0.5
2582 } else {
2583 0.0
2584 }
2585}
2586
2587fn score_unsafe_policy(policy: UnsafeCodePolicy) -> f64 {
2588 match policy {
2589 UnsafeCodePolicy::Forbid => 1.0,
2590 UnsafeCodePolicy::Deny => 0.5,
2591 UnsafeCodePolicy::NotDeclared => 0.0,
2592 }
2593}
2594
2595fn audit_score_band(overall_percent: f64) -> &'static str {
2596 if overall_percent >= 85.0 {
2597 "strong assurance posture"
2598 } else if overall_percent >= 70.0 {
2599 "developing but substantial assurance posture"
2600 } else if overall_percent >= 55.0 {
2601 "mixed assurance posture"
2602 } else if overall_percent >= 40.0 {
2603 "limited assurance evidence"
2604 } else {
2605 "low assurance readiness"
2606 }
2607}
2608
2609fn audit_score_guideline_lines() -> [&'static str; 5] {
2610 [
2611 "Method: weighted checkpoint scoring across Safety (15%), Verification (15%), Build/Tooling (10%), Lifecycle/Governance (10%), NASA/JPL Power of Ten (25%), and Advanced Structural Checks (25%).",
2612 "Checkpoint credit: pass/clear/applied = 1.0, indeterminate/partial = 0.5, elevated/not applied = 0.0.",
2613 "Fairness rule: raw motif counts do not linearly reduce the score; each checkpoint contributes once so large crates are not punished simply for having more code.",
2614 "Informational-only signals such as DSFB heuristic motif matches, hotspot counts, and capability flags like no_std/no_alloc are reported but excluded from the score denominator.",
2615 "Interpretation: this is a broad improvement and review-readiness score for source-visible controls and evidence, not a certification and not a measure of runtime correctness.",
2616 ]
2617}
2618
2619fn round_percent(value: f64) -> f64 {
2620 (value * 10.0).round() / 10.0
2621}
2622
2623fn audit_score_json(score: &AuditScoreCard) -> Value {
2624 json!({
2625 "method": AUDIT_SCORE_METHOD,
2626 "overallPercent": round_percent(score.overall_percent),
2627 "earnedWeightedPoints": round_percent(score.earned_weighted_points),
2628 "possibleWeightedPoints": round_percent(score.possible_weighted_points),
2629 "band": score.band,
2630 "guideline": audit_score_guideline_lines(),
2631 "sections": score.sections.iter().map(|section| {
2632 json!({
2633 "id": section.id,
2634 "title": section.title,
2635 "weightPercent": round_percent(section.weight_percent),
2636 "checkpointCount": section.checkpoint_count,
2637 "earnedCheckpoints": round_percent(section.earned_checkpoints),
2638 "sectionPercent": round_percent(section.section_percent),
2639 "weightedPoints": round_percent(section.weighted_points),
2640 })
2641 }).collect::<Vec<_>>(),
2642 })
2643}
2644
2645fn sarif_locations(evidence: &[ScanEvidence]) -> Vec<Value> {
2646 evidence
2647 .iter()
2648 .map(|item| {
2649 json!({
2650 "physicalLocation": {
2651 "artifactLocation": {
2652 "uri": item.path.display().to_string(),
2653 },
2654 "region": {
2655 "startLine": item.line_number,
2656 "snippet": {
2657 "text": item.snippet,
2658 }
2659 }
2660 }
2661 })
2662 })
2663 .collect()
2664}
2665
2666fn render_audit_summary(
2667 out: &mut String,
2668 report: &CrateSourceScanReport,
2669 findings: &[CanonicalFinding],
2670) {
2671 let defect_candidates = findings
2672 .iter()
2673 .filter(|finding| finding.classification == "defect-candidate")
2674 .count();
2675 let design_review = findings
2676 .iter()
2677 .filter(|finding| finding.classification == "design-review")
2678 .count();
2679 let review_readiness = findings
2680 .iter()
2681 .filter(|finding| finding.classification == "review-readiness")
2682 .count();
2683 let context_needed = findings
2684 .iter()
2685 .filter(|finding| finding.classification == "context-needed")
2686 .count();
2687
2688 out.push_str("Audit Summary\n");
2689 out.push_str("──────────────────────────────────────────────────────────────\n");
2690 out.push_str("Purpose:\n");
2691 out.push_str(" - improve Rust code quality across the full crate surface\n");
2692 out.push_str(" - support compliance- and certification-oriented internal review\n");
2693 out.push_str(" - preserve all current DSFB audit breadth in one canonical report\n");
2694 out.push_str("Non-certification statement:\n");
2695 out.push_str(" - DSFB does not certify compliance with IEC, ISO, RTCA, MIL, NIST, or other standards.\n");
2696 out.push_str(
2697 " - Treat this report as a structured guideline for improvement and review readiness.\n",
2698 );
2699 out.push_str("Canonical audit shape:\n");
2700 out.push_str(" - one full audit\n");
2701 out.push_str(" - one overall score plus visible subscores\n");
2702 out.push_str(" - one shared evidence set reused by the concluding interpretation lenses\n");
2703 out.push_str(&format!(
2704 "Finding mix: {} defect-candidate | {} design-review | {} review-readiness | {} context-needed\n",
2705 defect_candidates, design_review, review_readiness, context_needed
2706 ));
2707 out.push_str(
2708 "Audit families preserved: runtime, safety, verification, build, lifecycle, Power of Ten, advanced structural, heuristic motifs, runtime priors, and attestation exports.\n\n",
2709 );
2710 out.push_str(&format!(
2711 "Report scope note: DSFB findings may support internal review against standards-oriented expectations, but the report remains a source-visible structural audit of {} artifact(s), not a certificate.\n\n",
2712 report.certification.artifacts_inspected
2713 ));
2714}
2715
2716fn render_report_badge_section(out: &mut String, report: &CrateSourceScanReport) {
2717 let report_file_name = format!("{}.txt", scan_artifact_stem(report));
2718 let badge_snippet = render_report_badge_markdown(report, &report_file_name);
2719 out.push_str("Add dsfb-gray report badge to your GitHub repo README\n");
2720 out.push_str("──────────────────────────────────────────────────────────────\n");
2721 out.push_str("DSFB-gray crate: https://crates.io/crates/dsfb-gray\n");
2722 out.push_str(
2723 "Use this when you place the audit report in the repository root as a code-quality and review-readiness document.\n",
2724 );
2725 out.push_str(&format!(
2726 "Root-level report link target used below: ./{}\n",
2727 report_file_name
2728 ));
2729 out.push_str("Markdown snippet:\n");
2730 out.push_str("```md\n");
2731 out.push_str(&badge_snippet);
2732 out.push_str("\n```\n");
2733 out.push_str(
2734 "Badge semantics: this links to the DSFB audit report for the crate; it is not a compliance or certification badge.\n\n",
2735 );
2736}
2737
2738fn render_audit_score_section(
2739 out: &mut String,
2740 report: &CrateSourceScanReport,
2741 advisory_subscores: &[AdvisorySubscore],
2742) {
2743 let score = &report.certification.audit_score;
2744 out.push_str("Overall Score and Subscores\n");
2745 out.push_str("──────────────────────────────────────────────────────────────\n");
2746 out.push_str(&format!("Scoring Version: {}\n", AUDIT_SCORE_METHOD));
2747 out.push_str(&format!(
2748 "Overall: {:.1}% ({})\n",
2749 score.overall_percent, score.band
2750 ));
2751 out.push_str(&format!(
2752 "Weighted points earned: {:.1}/{:.1}\n",
2753 score.earned_weighted_points, score.possible_weighted_points
2754 ));
2755 out.push_str(
2756 "Score use: this score is a broad improvement target derived from the locked DSFB audit rubric. It is not a compliance certification.\n",
2757 );
2758 render_advisory_subscore_table(out, advisory_subscores);
2759 render_audit_score_table(out, score);
2760 out.push_str("Locked rubric section breakdown:\n");
2761 for section in &score.sections {
2762 out.push_str(&format!(
2763 " - {}: {:.1}% of section, {:.1}/{:.1} weighted points across {} checkpoint(s)\n",
2764 section.title,
2765 section.section_percent,
2766 section.weighted_points,
2767 section.weight_percent,
2768 section.checkpoint_count
2769 ));
2770 }
2771 out.push_str("Scoring guideline:\n");
2772 for line in audit_score_guideline_lines().into_iter() {
2773 out.push_str(&format!(" - {line}\n"));
2774 }
2775 out.push('\n');
2776}
2777
2778fn render_report_badge_markdown(report: &CrateSourceScanReport, report_file_name: &str) -> String {
2779 let score = round_percent(report.certification.audit_score.overall_percent);
2780 let band = report.certification.audit_score.band;
2781 let color = badge_color_for_score(score);
2782 format!(
2783 "[](./{})",
2784 score, band, score, color, report_file_name
2785 )
2786}
2787
2788fn badge_color_for_score(score: f64) -> &'static str {
2789 if score >= 85.0 {
2790 "brightgreen"
2791 } else if score >= 70.0 {
2792 "green"
2793 } else if score >= 55.0 {
2794 "yellowgreen"
2795 } else if score >= 40.0 {
2796 "orange"
2797 } else {
2798 "red"
2799 }
2800}
2801
2802fn render_top_findings(out: &mut String, findings: &[CanonicalFinding]) {
2803 out.push_str("Top Findings\n");
2804 out.push_str("──────────────────────────────────────────────────────────────\n");
2805 if findings.is_empty() {
2806 out.push_str("No review-worthy findings were emitted from the current evidence set.\n\n");
2807 return;
2808 }
2809
2810 for finding in findings.iter().take(8) {
2811 out.push_str(&format!(
2812 "{} {} [{} | confidence={} | impact={}]\n",
2813 finding.id,
2814 finding.status_label,
2815 finding.classification,
2816 finding.confidence,
2817 finding.impact_kind
2818 ));
2819 out.push_str(&format!(" Title: {}\n", finding.title));
2820 out.push_str(&format!(" Detail: {}\n", finding.detail));
2821 out.push_str(&format!(
2822 " Why This Matters In Rust: {}\n",
2823 finding.rust_why
2824 ));
2825 out.push_str(&format!(
2826 " Review / Readiness Note: {}\n",
2827 finding.readiness_why
2828 ));
2829 if let Some(first_evidence) = finding.evidence.first() {
2830 out.push_str(&format!(
2831 " First Evidence: {} {}:{} [{}] {}\n",
2832 evidence_id(&finding.id, first_evidence, 0),
2833 first_evidence.path.display(),
2834 first_evidence.line_number,
2835 first_evidence.pattern,
2836 first_evidence.snippet
2837 ));
2838 }
2839 out.push('\n');
2840 }
2841}
2842
2843fn render_hotspots_section(out: &mut String, hotspots: &[CriticalityHotspot]) {
2844 out.push_str("Hotspots\n");
2845 out.push_str("──────────────────────────────────────────────────────────────\n");
2846 out.push_str("Guide:\n");
2847 out.push_str(" [##--------] observed score 12-19\n");
2848 out.push_str(" [####------] guarded score 20-29\n");
2849 out.push_str(" [######----] elevated score 30-39\n");
2850 out.push_str(" [########--] high score 40-49\n");
2851 out.push_str(" [##########] severe score 50+\n");
2852 out.push_str(" row format: path:line `function` [bar] band score=<n> complexity~<n>\n");
2853 out.push_str(" signals: comma-separated structural risk contributors\n");
2854 if hotspots.is_empty() {
2855 out.push_str("No function hotspots extracted.\n\n");
2856 return;
2857 }
2858
2859 for hotspot in hotspots {
2860 out.push_str(&format!(
2861 "{}:{} `{}` {} {:<8} score={} complexity~={}\n",
2862 hotspot.path.display(),
2863 hotspot.start_line,
2864 hotspot.function_name,
2865 heatmap_bar(hotspot.risk_score),
2866 heatmap_band_label(hotspot.risk_score),
2867 hotspot.risk_score,
2868 hotspot.estimated_complexity,
2869 ));
2870 out.push_str(&format!(" signals: {}\n", hotspot.signals.join(", ")));
2871 }
2872 out.push('\n');
2873}
2874
2875fn render_code_quality_themes(out: &mut String, findings: &[CanonicalFinding]) {
2876 let mut themes: BTreeMap<&str, Vec<&str>> = BTreeMap::new();
2877 for finding in findings.iter() {
2878 themes
2879 .entry(finding.impact_kind)
2880 .or_default()
2881 .push(finding.id.as_str());
2882 }
2883
2884 out.push_str("Code Quality Themes\n");
2885 out.push_str("──────────────────────────────────────────────────────────────\n");
2886 if themes.is_empty() {
2887 out.push_str(
2888 "No broad code-quality themes were synthesized from the current findings.\n\n",
2889 );
2890 return;
2891 }
2892
2893 for (impact_kind, ids) in themes.into_iter() {
2894 let preview = ids.iter().take(5).copied().collect::<Vec<_>>().join(", ");
2895 out.push_str(&format!(
2896 "{}: {} finding(s) [{}]\n",
2897 impact_kind,
2898 ids.len(),
2899 preview
2900 ));
2901 }
2902 out.push_str(
2903 "\nInterpret these themes as review clusters: they tell you where multiple findings are reinforcing the same kind of engineering debt or risk surface.\n\n",
2904 );
2905}
2906
2907fn render_remediation_guide(out: &mut String, findings: &[CanonicalFinding]) {
2908 out.push_str("Remediation Guide\n");
2909 out.push_str("──────────────────────────────────────────────────────────────\n");
2910 if findings.is_empty() {
2911 out.push_str("No remediation items were emitted.\n\n");
2912 return;
2913 }
2914
2915 for finding in findings.iter().take(12) {
2916 out.push_str(&format!(
2917 "{} [{}]: {}\n",
2918 finding.id, finding.classification, finding.remediation
2919 ));
2920 }
2921 out.push('\n');
2922}
2923
2924fn render_verification_suggestions(out: &mut String, findings: &[CanonicalFinding]) {
2925 out.push_str("Verification Suggestions\n");
2926 out.push_str("──────────────────────────────────────────────────────────────\n");
2927 if findings.is_empty() {
2928 out.push_str("No targeted verification suggestions were emitted.\n\n");
2929 return;
2930 }
2931
2932 for finding in findings.iter().take(12) {
2933 out.push_str(&format!(
2934 "{} [{}]: {}\n",
2935 finding.id, finding.impact_kind, finding.verification
2936 ));
2937 }
2938 out.push('\n');
2939}
2940
2941fn render_evidence_ledger(out: &mut String, findings: &[CanonicalFinding]) {
2942 out.push_str("Evidence Ledger\n");
2943 out.push_str("──────────────────────────────────────────────────────────────\n");
2944 if findings.is_empty() {
2945 out.push_str("No finding evidence was emitted.\n\n");
2946 return;
2947 }
2948
2949 for finding in findings.iter() {
2950 let ids = evidence_ids(&finding.id, &finding.evidence);
2951 if ids.is_empty() {
2952 out.push_str(&format!("{}: no source evidence captured\n", finding.id));
2953 } else {
2954 out.push_str(&format!(
2955 "{}: {} evidence item(s) [{}]\n",
2956 finding.id,
2957 ids.len(),
2958 ids.join(", ")
2959 ));
2960 }
2961 }
2962 out.push('\n');
2963}
2964
2965fn render_conclusion_lenses(
2966 out: &mut String,
2967 report: &CrateSourceScanReport,
2968 findings: &[CanonicalFinding],
2969) {
2970 out.push_str("Conclusion Lenses\n");
2971 out.push_str("──────────────────────────────────────────────────────────────\n");
2972 out.push_str(&format!(
2973 "Rust Maintainer Lens: {}\n",
2974 rust_maintainer_lens(report, findings)
2975 ));
2976 out.push_str(&format!(
2977 "Compliance Readiness Lens: {}\n",
2978 compliance_readiness_lens(report, findings)
2979 ));
2980 out.push_str(&format!(
2981 "Certification Preparation Lens: {}\n",
2982 certification_preparation_lens(report, findings)
2983 ));
2984 out.push_str(&format!(
2985 "Distributed / Operational Lens: {}\n\n",
2986 distributed_operational_lens(report, findings)
2987 ));
2988}
2989
2990fn conclusion_lenses_json(report: &CrateSourceScanReport, findings: &[CanonicalFinding]) -> Value {
2991 json!({
2992 "rustMaintainer": rust_maintainer_lens(report, findings),
2993 "complianceReadiness": compliance_readiness_lens(report, findings),
2994 "certificationPreparation": certification_preparation_lens(report, findings),
2995 "distributedOperational": distributed_operational_lens(report, findings),
2996 })
2997}
2998
2999fn rust_maintainer_lens(report: &CrateSourceScanReport, findings: &[CanonicalFinding]) -> String {
3000 let immediate = finding_id_preview(findings, |finding| {
3001 finding.classification == "defect-candidate" || finding.classification == "design-review"
3002 });
3003 format!(
3004 "Use the {:.1}% overall score as a broad code-improvement target, not a compliance or certification badge. The highest-value maintainer work is concentrated in {}.",
3005 report.certification.audit_score.overall_percent,
3006 immediate
3007 )
3008}
3009
3010fn compliance_readiness_lens(
3011 _report: &CrateSourceScanReport,
3012 findings: &[CanonicalFinding],
3013) -> String {
3014 let readiness_count = findings
3015 .iter()
3016 .filter(|finding| {
3017 finding.classification == "review-readiness"
3018 || finding.classification == "context-needed"
3019 })
3020 .count();
3021 format!(
3022 "{} finding(s) directly affect analyzability, reproducibility, or review traceability. DSFB may support internal review against standards-oriented expectations, but it does not certify compliance.",
3023 readiness_count
3024 )
3025}
3026
3027fn certification_preparation_lens(
3028 _report: &CrateSourceScanReport,
3029 findings: &[CanonicalFinding],
3030) -> String {
3031 let prep = finding_id_preview(findings, |finding| {
3032 finding.category == "nasa-power-of-ten" || finding.category == "advanced-structural"
3033 });
3034 format!(
3035 "For certification-oriented preparation, treat {} as pre-review cleanup targets and evidence-organizing prompts rather than certification outcomes.",
3036 prep
3037 )
3038}
3039
3040fn distributed_operational_lens(
3041 _report: &CrateSourceScanReport,
3042 findings: &[CanonicalFinding],
3043) -> String {
3044 let operational = finding_id_preview(findings, |finding| {
3045 finding.impact_kind == "concurrency/async" || finding.impact_kind == "resource discipline"
3046 });
3047 format!(
3048 "Operational pressure is most visible in {}. These findings are the most likely to matter later in runtime replay, backpressure review, or production-style load investigation.",
3049 operational
3050 )
3051}
3052
3053fn finding_id_preview<F>(findings: &[CanonicalFinding], predicate: F) -> String
3054where
3055 F: Fn(&CanonicalFinding) -> bool,
3056{
3057 let preview = findings
3058 .iter()
3059 .filter(|finding| predicate(finding))
3060 .take(5)
3061 .map(|finding| finding.id.as_str())
3062 .collect::<Vec<_>>();
3063 if preview.is_empty() {
3064 "no dominant finding cluster in the current report".to_string()
3065 } else {
3066 preview.join(", ")
3067 }
3068}
3069
3070fn render_audit_score_table(out: &mut String, score: &AuditScoreCard) {
3071 let total_checkpoints = score
3072 .sections
3073 .iter()
3074 .map(|section| section.checkpoint_count)
3075 .sum::<usize>();
3076
3077 out.push_str("Score Summary Table\n");
3078 out.push_str("+------------------------------+--------+--------+--------+--------+\n");
3079 out.push_str("| Section | Score% | Weight | Points | Checks |\n");
3080 out.push_str("+------------------------------+--------+--------+--------+--------+\n");
3081 for section in &score.sections {
3082 out.push_str(&format!(
3083 "| {:<28} | {:>6.1} | {:>6.1} | {:>6.1} | {:>6} |\n",
3084 truncate_table_label(section.title, 28),
3085 section.section_percent,
3086 section.weight_percent,
3087 section.weighted_points,
3088 section.checkpoint_count
3089 ));
3090 }
3091 out.push_str("+------------------------------+--------+--------+--------+--------+\n");
3092 out.push_str(&format!(
3093 "| {:<28} | {:>6.1} | {:>6.1} | {:>6.1} | {:>6} |\n",
3094 "Overall",
3095 score.overall_percent,
3096 score.possible_weighted_points,
3097 score.earned_weighted_points,
3098 total_checkpoints
3099 ));
3100 out.push_str("+------------------------------+--------+--------+--------+--------+\n");
3101}
3102
3103fn render_advisory_subscore_table(out: &mut String, subscores: &[AdvisorySubscore]) {
3104 out.push_str("Advisory Broad Subscores\n");
3105 out.push_str("+------------------------------+--------+\n");
3106 out.push_str("| Subscore | Score% |\n");
3107 out.push_str("+------------------------------+--------+\n");
3108 for subscore in subscores.iter() {
3109 out.push_str(&format!(
3110 "| {:<28} | {:>6.1} |\n",
3111 truncate_table_label(subscore.title, 28),
3112 subscore.percent
3113 ));
3114 }
3115 out.push_str("+------------------------------+--------+\n");
3116 for subscore in subscores.iter() {
3117 out.push_str(&format!(" - {}: {}\n", subscore.title, subscore.basis));
3118 }
3119}
3120
3121fn truncate_table_label(label: &str, max_width: usize) -> String {
3122 let mut out = String::new();
3123 for ch in label.chars().take(max_width) {
3124 out.push(ch);
3125 }
3126 out
3127}
3128
3129fn render_runtime_section(out: &mut String, profile: &RuntimeProfile) {
3130 out.push_str("Constrained Runtime Profile\n");
3131 out.push_str("──────────────────────────────────────────────────────────────\n");
3132 out.push_str(&format!(
3133 "no_std declared: {}\n",
3134 yes_no(profile.no_std_declared)
3135 ));
3136 out.push_str(&format!(
3137 "no_alloc candidate: {}\n",
3138 yes_no(
3139 profile.no_std_declared
3140 && profile.alloc_crate_hits == 0
3141 && profile.heap_allocation_hits == 0
3142 )
3143 ));
3144 out.push_str(&format!(
3145 "alloc crate references: {}\n",
3146 profile.alloc_crate_hits
3147 ));
3148 out.push_str(&format!(
3149 "heap allocation motifs: {}\n",
3150 profile.heap_allocation_hits
3151 ));
3152 render_evidence_block(out, "no_std evidence", &profile.no_std_evidence);
3153 render_evidence_block(out, "alloc evidence", &profile.alloc_evidence);
3154 render_evidence_block(
3155 out,
3156 "heap-allocation evidence",
3157 &profile.heap_allocation_evidence,
3158 );
3159 out.push('\n');
3160}
3161
3162fn render_safety_section(out: &mut String, profile: &SafetyProfile) {
3163 out.push_str("Unsafe / Panic Surface\n");
3164 out.push_str("──────────────────────────────────────────────────────────────\n");
3165 out.push_str(&format!(
3166 "unsafe policy: {}\n",
3167 match profile.unsafe_policy {
3168 UnsafeCodePolicy::Forbid => "forbid(unsafe_code)",
3169 UnsafeCodePolicy::Deny => "deny(unsafe_code)",
3170 UnsafeCodePolicy::NotDeclared => "not declared",
3171 }
3172 ));
3173 out.push_str(&format!(
3174 "no_unsafe candidate: {}\n",
3175 yes_no(profile.unsafe_sites == 0)
3176 ));
3177 out.push_str(&format!(
3178 "explicit unsafe sites: {}\n",
3179 profile.unsafe_sites
3180 ));
3181 out.push_str(&format!("panic-like sites: {}\n", profile.panic_sites));
3182 out.push_str(&format!(
3183 "unwrap/expect-like sites: {}\n",
3184 profile.unwrap_sites
3185 ));
3186 out.push_str(&format!("FFI boundary sites: {}\n", profile.ffi_sites));
3187 out.push_str(&format!(
3188 "SAFETY: justification comments: {}\n",
3189 profile.safety_comment_sites
3190 ));
3191 render_evidence_block(
3192 out,
3193 "unsafe policy evidence",
3194 &profile.unsafe_policy_evidence,
3195 );
3196 render_evidence_block(out, "unsafe evidence", &profile.unsafe_evidence);
3197 render_evidence_block(out, "panic evidence", &profile.panic_evidence);
3198 render_evidence_block(out, "unwrap evidence", &profile.unwrap_evidence);
3199 render_evidence_block(out, "FFI evidence", &profile.ffi_evidence);
3200 render_evidence_block(
3201 out,
3202 "SAFETY: comment evidence",
3203 &profile.safety_comment_evidence,
3204 );
3205 out.push('\n');
3206}
3207
3208fn render_verification_section(out: &mut String, profile: &VerificationProfile) {
3209 out.push_str("Verification Evidence Signals\n");
3210 out.push_str("──────────────────────────────────────────────────────────────\n");
3211 out.push_str(&format!(
3212 "tests/ directory present: {}\n",
3213 yes_no(profile.tests_dir_present)
3214 ));
3215 out.push_str(&format!("test markers: {}\n", profile.test_marker_hits));
3216 out.push_str(&format!(
3217 "property-testing signals: {}\n",
3218 profile.property_testing_hits
3219 ));
3220 out.push_str(&format!(
3221 "concurrency exploration signals: {}\n",
3222 profile.concurrency_exploration_hits
3223 ));
3224 out.push_str(&format!("fuzzing signals: {}\n", profile.fuzzing_hits));
3225 out.push_str(&format!(
3226 "formal-method signals: {}\n",
3227 profile.formal_methods_hits
3228 ));
3229 render_evidence_block(out, "test evidence", &profile.test_marker_evidence);
3230 render_evidence_block(
3231 out,
3232 "property-testing evidence",
3233 &profile.property_testing_evidence,
3234 );
3235 render_evidence_block(
3236 out,
3237 "concurrency exploration evidence",
3238 &profile.concurrency_exploration_evidence,
3239 );
3240 render_evidence_block(out, "fuzzing evidence", &profile.fuzzing_evidence);
3241 render_evidence_block(
3242 out,
3243 "formal-method evidence",
3244 &profile.formal_methods_evidence,
3245 );
3246 out.push('\n');
3247}
3248
3249fn render_build_section(out: &mut String, profile: &BuildProfile) {
3250 out.push_str("Build / Tooling Complexity\n");
3251 out.push_str("──────────────────────────────────────────────────────────────\n");
3252 out.push_str(&format!(
3253 "direct dependencies: {}\n",
3254 profile.direct_dependencies
3255 ));
3256 out.push_str(&format!(
3257 "build dependencies: {}\n",
3258 profile.build_dependencies
3259 ));
3260 out.push_str(&format!("dev dependencies: {}\n", profile.dev_dependencies));
3261 out.push_str(&format!(
3262 "build.rs present: {}\n",
3263 yes_no(profile.has_build_script)
3264 ));
3265 out.push_str(&format!(
3266 "proc-macro crate: {}\n",
3267 yes_no(profile.proc_macro_crate)
3268 ));
3269 out.push_str(&format!(
3270 "codegen / native-build signals: {}\n",
3271 profile.codegen_hits
3272 ));
3273 render_evidence_block(out, "codegen evidence", &profile.codegen_evidence);
3274 out.push('\n');
3275}
3276
3277fn render_lifecycle_section(
3278 out: &mut String,
3279 profile: &LifecycleProfile,
3280 manifest: &ManifestMetadata,
3281) {
3282 out.push_str("Lifecycle / Governance Artifacts\n");
3283 out.push_str("──────────────────────────────────────────────────────────────\n");
3284 render_lifecycle_presence_lines(out, profile);
3285 if profile.license_files.is_empty() {
3286 out.push_str("license files: none observed\n");
3287 } else {
3288 let names = profile
3289 .license_files
3290 .iter()
3291 .map(|path| path.display().to_string())
3292 .collect::<Vec<_>>()
3293 .join(", ");
3294 out.push_str(&format!("license files: {}\n", names));
3295 }
3296 render_manifest_metadata_lines(out, manifest);
3297 out.push('\n');
3298}
3299
3300fn render_lifecycle_presence_lines(out: &mut String, profile: &LifecycleProfile) {
3301 out.push_str(&format!(
3302 "README present: {}\n",
3303 yes_no(profile.readme_present)
3304 ));
3305 out.push_str(&format!(
3306 "CHANGELOG present: {}\n",
3307 yes_no(profile.changelog_present)
3308 ));
3309 out.push_str(&format!(
3310 "SECURITY.md present: {}\n",
3311 yes_no(profile.security_md_present)
3312 ));
3313 out.push_str(&format!(
3314 "SAFETY.md present: {}\n",
3315 yes_no(profile.safety_md_present)
3316 ));
3317 out.push_str(&format!(
3318 "architecture/design doc present: {}\n",
3319 yes_no(profile.architecture_doc_present)
3320 ));
3321 out.push_str(&format!(
3322 "docs/ content present: {}\n",
3323 yes_no(profile.docs_dir_present)
3324 ));
3325}
3326
3327fn render_manifest_metadata_lines(out: &mut String, manifest: &ManifestMetadata) {
3328 out.push_str(&format!(
3329 "manifest license: {}\n",
3330 manifest.license.as_deref().unwrap_or("not declared")
3331 ));
3332 out.push_str(&format!(
3333 "manifest rust-version: {}\n",
3334 manifest.rust_version.as_deref().unwrap_or("not declared")
3335 ));
3336 out.push_str(&format!(
3337 "manifest edition: {}\n",
3338 manifest.edition.as_deref().unwrap_or("not declared")
3339 ));
3340 out.push_str(&format!(
3341 "repository URL: {}\n",
3342 manifest.repository.as_deref().unwrap_or("not declared")
3343 ));
3344 out.push_str(&format!(
3345 "documentation URL: {}\n",
3346 manifest.documentation.as_deref().unwrap_or("not declared")
3347 ));
3348 out.push_str(&format!(
3349 "homepage URL: {}\n",
3350 manifest.homepage.as_deref().unwrap_or("not declared")
3351 ));
3352 out.push_str(&format!(
3353 "manifest readme: {}\n",
3354 manifest.readme.as_deref().unwrap_or("not declared")
3355 ));
3356}
3357
3358fn render_power_of_ten_section(out: &mut String, profile: &PowerOfTenProfile) {
3359 let applied = profile
3360 .rules
3361 .iter()
3362 .filter(|rule| rule.status == PowerOfTenStatus::Applied)
3363 .count();
3364 let not_applied = profile
3365 .rules
3366 .iter()
3367 .filter(|rule| rule.status == PowerOfTenStatus::NotApplied)
3368 .count();
3369 let indeterminate = profile
3370 .rules
3371 .iter()
3372 .filter(|rule| rule.status == PowerOfTenStatus::Indeterminate)
3373 .count();
3374
3375 out.push_str("NASA/JPL Power of Ten Audit\n");
3376 out.push_str("──────────────────────────────────────────────────────────────\n");
3377 render_power_of_ten_summary(out, applied, not_applied, indeterminate);
3378 for rule in &profile.rules {
3379 render_power_of_ten_rule(out, rule);
3380 }
3381 out.push('\n');
3382}
3383
3384fn render_power_of_ten_summary(
3385 out: &mut String,
3386 applied: usize,
3387 not_applied: usize,
3388 indeterminate: usize,
3389) {
3390 out.push_str(
3391 "Rust adaptation of Holzmann's Power of Ten rules. C-specific rules are approximated with source-visible Rust proxies. This is guidance for review and improvement, not a certification result.\n",
3392 );
3393 out.push_str(&format!(
3394 "Applied: {} | Not Applied: {} | Indeterminate: {}\n",
3395 applied, not_applied, indeterminate
3396 ));
3397}
3398
3399fn render_power_of_ten_rule(out: &mut String, rule: &PowerOfTenRuleAudit) {
3400 out.push_str(&format!(
3401 "P10-{} {}: {}\n",
3402 rule.number,
3403 power_of_ten_status_label(rule.status),
3404 rule.title
3405 ));
3406 out.push_str(&format!(" Detail: {}\n", rule.detail));
3407 out.push_str(&format!(
3408 " Classification: {}\n",
3409 power_of_ten_classification(rule.number)
3410 ));
3411 out.push_str(&format!(
3412 " Confidence: {}\n",
3413 power_of_ten_confidence(rule.status, rule.evidence.len())
3414 ));
3415 out.push_str(&format!(
3416 " Impact Kind: {}\n",
3417 power_of_ten_impact_kind(rule.number)
3418 ));
3419 out.push_str(&format!(
3420 " Why This Matters In Rust: {}\n",
3421 power_of_ten_rust_why(rule.number)
3422 ));
3423 out.push_str(&format!(
3424 " Review / Readiness Note: {}\n",
3425 power_of_ten_readiness_why(rule.number)
3426 ));
3427 out.push_str(&format!(
3428 " Remediation: {}\n",
3429 power_of_ten_remediation(rule.number)
3430 ));
3431 out.push_str(&format!(
3432 " Verification Suggestion: {}\n",
3433 power_of_ten_verification_suggestion(rule.number)
3434 ));
3435 render_named_evidence_block(
3436 out,
3437 " Evidence",
3438 &format!("P10-{}", rule.number),
3439 &rule.evidence,
3440 );
3441}
3442
3443fn render_advanced_structural_section(out: &mut String, profile: &AdvancedStructuralProfile) {
3444 let elevated = profile
3445 .checks
3446 .iter()
3447 .filter(|check| check.status == StructuralCheckStatus::Elevated)
3448 .count();
3449 let clear = profile
3450 .checks
3451 .iter()
3452 .filter(|check| check.status == StructuralCheckStatus::Clear)
3453 .count();
3454 let indeterminate = profile
3455 .checks
3456 .iter()
3457 .filter(|check| check.status == StructuralCheckStatus::Indeterminate)
3458 .count();
3459
3460 out.push_str("Advanced Structural Risk Checks\n");
3461 out.push_str("──────────────────────────────────────────────────────────────\n");
3462 render_advanced_structural_summary(out, elevated, clear, indeterminate);
3463 for check in &profile.checks {
3464 render_advanced_structural_check(out, check);
3465 }
3466 render_criticality_heatmap(out, &profile.hotspots);
3467 out.push('\n');
3468}
3469
3470fn render_advanced_structural_summary(
3471 out: &mut String,
3472 elevated: usize,
3473 clear: usize,
3474 indeterminate: usize,
3475) {
3476 out.push_str(
3477 "These checks are source-visible structural proxies for mission, safety, security, and code-quality review. Elevated means review-worthy, not automatically unsafe and not a certification decision.\n",
3478 );
3479 out.push_str(&format!(
3480 "Elevated: {} | Clear: {} | Indeterminate: {}\n",
3481 elevated, clear, indeterminate
3482 ));
3483}
3484
3485fn render_advanced_structural_check(out: &mut String, check: &AdvancedStructuralCheck) {
3486 out.push_str(&format!(
3487 "{} {}: {}\n",
3488 check.id,
3489 structural_check_status_label(check.status),
3490 check.title
3491 ));
3492 out.push_str(&format!(" Detail: {}\n", check.detail));
3493 out.push_str(&format!(
3494 " Classification: {}\n",
3495 advanced_check_classification(check.id)
3496 ));
3497 out.push_str(&format!(
3498 " Confidence: {}\n",
3499 advanced_check_confidence(check.status, check.evidence.len())
3500 ));
3501 out.push_str(&format!(
3502 " Impact Kind: {}\n",
3503 advanced_check_impact_kind(check.id)
3504 ));
3505 out.push_str(&format!(
3506 " Why This Matters In Rust: {}\n",
3507 advanced_check_rust_why(check.id)
3508 ));
3509 out.push_str(&format!(
3510 " Review / Readiness Note: {}\n",
3511 advanced_check_readiness_why(check.id)
3512 ));
3513 out.push_str(&format!(
3514 " Remediation: {}\n",
3515 advanced_check_remediation(check.id)
3516 ));
3517 out.push_str(&format!(
3518 " Verification Suggestion: {}\n",
3519 advanced_check_verification_suggestion(check.id)
3520 ));
3521 render_named_evidence_block(out, " Evidence", check.id, &check.evidence);
3522}
3523
3524fn render_criticality_heatmap(out: &mut String, hotspots: &[CriticalityHotspot]) {
3525 out.push_str("Criticality Heatmap\n");
3526 out.push_str("──────────────────────────────────────────────────────────────\n");
3527 out.push_str("Guide:\n");
3528 out.push_str(" [##--------] observed score 12-19\n");
3529 out.push_str(" [####------] guarded score 20-29\n");
3530 out.push_str(" [######----] elevated score 30-39\n");
3531 out.push_str(" [########--] high score 40-49\n");
3532 out.push_str(" [##########] severe score 50+\n");
3533 out.push_str(" row format: path:line `function` [bar] band score=<n> complexity~<n>\n");
3534 out.push_str(" signals: comma-separated structural risk contributors\n");
3535 if hotspots.is_empty() {
3536 out.push_str("No function hotspots extracted.\n");
3537 return;
3538 }
3539
3540 for hotspot in hotspots {
3541 out.push_str(&format!(
3542 "{}:{} `{}` {} {:<8} score={} complexity~={}\n",
3543 hotspot.path.display(),
3544 hotspot.start_line,
3545 hotspot.function_name,
3546 heatmap_bar(hotspot.risk_score),
3547 heatmap_band_label(hotspot.risk_score),
3548 hotspot.risk_score,
3549 hotspot.estimated_complexity,
3550 ));
3551 out.push_str(&format!(" signals: {}\n", hotspot.signals.join(", ")));
3552 }
3553}
3554
3555fn render_evidence_block(out: &mut String, label: &str, evidence: &[ScanEvidence]) {
3556 if evidence.is_empty() {
3557 return;
3558 }
3559 out.push_str(&format!("{label}:\n"));
3560 for item in evidence.iter() {
3561 out.push_str(&format!(
3562 " - {}:{} [{}] {}\n",
3563 item.path.display(),
3564 item.line_number,
3565 item.pattern,
3566 item.snippet
3567 ));
3568 }
3569}
3570
3571fn render_named_evidence_block(
3572 out: &mut String,
3573 label: &str,
3574 finding_id: &str,
3575 evidence: &[ScanEvidence],
3576) {
3577 if evidence.is_empty() {
3578 return;
3579 }
3580 out.push_str(&format!("{label}:\n"));
3581 for (idx, item) in evidence.iter().enumerate() {
3582 out.push_str(&format!(
3583 " - {} {}:{} [{}] {}\n",
3584 evidence_id(finding_id, item, idx),
3585 item.path.display(),
3586 item.line_number,
3587 item.pattern,
3588 item.snippet
3589 ));
3590 }
3591}
3592
3593fn heatmap_band_label(score: usize) -> &'static str {
3594 match score {
3595 0..=19 => "observed",
3596 20..=29 => "guarded",
3597 30..=39 => "elevated",
3598 40..=49 => "high",
3599 _other => "severe",
3600 }
3601}
3602
3603fn heatmap_bar(score: usize) -> String {
3604 let filled = match score {
3605 0..=19 => 2,
3606 20..=29 => 4,
3607 30..=39 => 6,
3608 40..=49 => 8,
3609 _other => 10,
3610 };
3611
3612 let mut out = String::with_capacity(12);
3613 out.push('[');
3614 for idx in 0..10 {
3615 out.push(if idx < filled { '#' } else { '-' });
3616 }
3617 out.push(']');
3618 out
3619}
3620
3621fn power_of_ten_status_label(status: PowerOfTenStatus) -> &'static str {
3622 match status {
3623 PowerOfTenStatus::Applied => "applied",
3624 PowerOfTenStatus::NotApplied => "not applied",
3625 PowerOfTenStatus::Indeterminate => "indeterminate",
3626 }
3627}
3628
3629fn structural_check_status_label(status: StructuralCheckStatus) -> &'static str {
3630 match status {
3631 StructuralCheckStatus::Elevated => "elevated",
3632 StructuralCheckStatus::Clear => "clear",
3633 StructuralCheckStatus::Indeterminate => "indeterminate",
3634 }
3635}
3636
3637fn power_of_ten_remediation(rule_number: u8) -> &'static str {
3638 match rule_number {
3639 1 => "Remove recursion where possible, or isolate the pattern behind a bounded proof and explicit review note.",
3640 2 => "Add explicit upper bounds, timeout guards, or fixed-step limits so loop behavior is reviewable.",
3641 3 => "Move dynamic allocation to initialization paths or document and bound the steady-state allocation sites.",
3642 4 => "Split large functions into reviewable units with clearer local invariants and narrower responsibilities.",
3643 5 => "Replace catch-all control flow with explicit state handling or document the fallback state as intentional.",
3644 6 => "Reduce dependence on global mutable state or document synchronization and ownership boundaries.",
3645 7 => "Propagate errors explicitly rather than unwrapping, or document the invariant that justifies the unwrap/expect.",
3646 8 => "Reduce conditional-compilation forks or document why each feature/macro path remains auditable.",
3647 9 => "Tighten raw-pointer / FFI surfaces and document the local safety contract for each remaining site.",
3648 10 => "Keep warnings and analyzer gates active in CI so the audit surface stays reviewable over time.",
3649 _other => "Review the flagged rule against the locked DSFB audit guidance and simplify the local structure where practical.",
3650 }
3651}
3652
3653fn advanced_check_remediation(check_id: &str) -> &'static str {
3654 match check_id {
3655 "SAFE-STATE" => "Make fallback states explicit and document what the safe-state behavior is for the affected control path.",
3656 "ASYNC-LOCK" => "Avoid holding locks across `.await`, or split the critical section so async suspension happens after the guard is dropped.",
3657 "ALLOC-HOT" => "Pre-allocate, bound growth, or move allocation-heavy work out of high-frequency loops.",
3658 "CLOCK-INTEG" => "Prefer monotonic time for durations and timeout logic; isolate wall-clock usage to presentation or signed timestamps.",
3659 "RETRY-DAMP" => "Add capped exponential backoff with jitter and make retry behavior explicit in failure-path tests.",
3660 "HARD-WAIT" => "Replace fixed sleeps with state checks, deadlines, or explicit readiness conditions when possible.",
3661 "PART-SPACE" => "Reduce shared global state, or document the partitioning/ownership rationale for any remaining shared resource.",
3662 "PLUGIN-LOAD" => "Constrain dynamic loading behind verification, sandboxing, or explicit operator review.",
3663 "CWE-404" => "Tighten ownership so resources close on all error paths and avoid raw-handle escape hatches unless documented.",
3664 "CMD-BUF" => "Add TTL, sequence, staleness, or cancellation guards so queued control messages cannot accumulate invisibly.",
3665 "ITER-UNB" => "Add `.take(...)`, explicit bounds, or documented finite-source guarantees on terminal iterator consumption.",
3666 "ISR-SAFE" => "Keep interrupt handlers allocation-free and lock-free where possible, or document the ISR contract explicitly.",
3667 "FUTURE-WAKE" => "Ensure every manual `Poll::Pending` path arranges a wakeup before returning pending.",
3668 "TASK-LEAK" => "Retain JoinHandles, cancellation paths, or supervision ownership for spawned tasks that affect shutdown and backpressure.",
3669 "DROP-PANIC" => "Keep `Drop` implementations infallible; move failure reporting out of destructor paths.",
3670 "ATOMIC-RELAXED" => "Review whether the flagged atomic needs stronger ordering semantics on the observed state-transition path.",
3671 "CLOCK-MIX" => "Avoid mixing `Instant` and `SystemTime` in one duration/control path unless the conversion boundary is explicit.",
3672 "SHORT-WRITE" => "Use `write_all`, retry `Interrupted`, or document why partial writes are already handled by the caller.",
3673 "ASYNC-RECUR" => "Add a visible base-case/depth bound or replace async recursion with an explicit work queue or loop.",
3674 "CHAN-UNB" => "Prefer bounded channels or document why unbounded growth is safe under the expected ingress rate.",
3675 "ZERO-COPY" => "Keep ingress data borrowed or reference-counted longer, and avoid eager `.to_vec()` / `.clone()` on hot read paths.",
3676 "CARGO-VERS" => "Pin or narrow dependency version requirements so builds and attestations remain reproducible.",
3677 _other => "Review the finding against the emitted evidence and either tighten the local structure or document the local invariant.",
3678 }
3679}
3680
3681fn heuristic_remediation(heuristic_id: &str) -> &'static str {
3682 match heuristic_id {
3683 "H-ALLOC-01" => "Audit hot-loop allocation sites and prefer bounded or reserved growth on steady-state paths.",
3684 "H-LOCK-01" => "Review lock hold time, await-under-lock risk, and whether the shared state can be partitioned or copied out earlier.",
3685 "H-RAFT-01" => "Check heartbeat timeout logic, monotonic time usage, and whether election-sensitive paths have explicit headroom.",
3686 "H-ASYNC-01" => "Move blocking work out of async tasks or add explicit offload/yield boundaries.",
3687 "H-TCP-01" => "Review partial-write handling, retry damping, timeout paths, and whether network assumptions are made explicit.",
3688 "H-CHAN-01" => "Check boundedness, receiver saturation handling, and whether producers can observe downstream backpressure.",
3689 "H-CLOCK-01" => "Prefer monotonic clocks for control logic and isolate wall-clock use to presentation or external protocol boundaries.",
3690 "H-THRU-01" => "Inspect hot paths for hidden copies, queue growth, or retry behavior that can erode throughput before alarms fire.",
3691 "H-SERDE-01" => "Review payload growth, eager allocation, and schema-boundary handling on the serialization path.",
3692 "H-GRPC-01" => "Inspect flow-control behavior, buffering, and async fairness on the affected RPC path.",
3693 "H-DNS-01" => "Review cache invalidation, timeout handling, and fallback behavior around name resolution.",
3694 "H-ERR-01" => "Audit retry policy, escalation path, and whether repeated failure surfaces are bounded and jittered.",
3695 _other => "Review the matched motif against its evidence and either tighten the local structure or record the intended invariant.",
3696 }
3697}
3698
3699fn evidence_ids(finding_id: &str, evidence: &[ScanEvidence]) -> Vec<String> {
3700 evidence
3701 .iter()
3702 .enumerate()
3703 .map(|(idx, item)| evidence_id(finding_id, item, idx))
3704 .collect()
3705}
3706
3707fn evidence_id(finding_id: &str, evidence: &ScanEvidence, index: usize) -> String {
3708 format!(
3709 "{}-{:02}-{}-{}",
3710 finding_id,
3711 index + 1,
3712 sanitize_evidence_component(
3713 evidence
3714 .path
3715 .file_stem()
3716 .and_then(|name| name.to_str())
3717 .unwrap_or("file")
3718 ),
3719 evidence.line_number
3720 )
3721}
3722
3723fn sanitize_evidence_component(value: &str) -> String {
3724 value
3725 .chars()
3726 .map(|ch| {
3727 if ch.is_ascii_alphanumeric() {
3728 ch.to_ascii_lowercase()
3729 } else {
3730 '-'
3731 }
3732 })
3733 .collect()
3734}
3735
3736fn scan_runtime_profile(documents: &[SourceDocument]) -> RuntimeProfile {
3737 let no_std = scan_patterns(documents, NO_STD_PATTERNS, MAX_EVIDENCE_PER_SIGNAL);
3738 let alloc = scan_patterns(documents, ALLOC_PATTERNS, MAX_EVIDENCE_PER_SIGNAL);
3739 let heap = scan_patterns(documents, HEAP_PATTERNS, MAX_EVIDENCE_PER_SIGNAL);
3740 let runtime_core_alloc = scan_patterns_filtered(
3741 documents,
3742 ALLOC_PATTERNS,
3743 MAX_EVIDENCE_PER_SIGNAL,
3744 ScanContentMode::Risk,
3745 |path| !is_tooling_support_path(path),
3746 );
3747 let runtime_core_heap = scan_patterns_filtered(
3748 documents,
3749 HEAP_PATTERNS,
3750 MAX_EVIDENCE_PER_SIGNAL,
3751 ScanContentMode::Risk,
3752 |path| !is_tooling_support_path(path),
3753 );
3754
3755 RuntimeProfile {
3756 no_std_declared: no_std.total_hits > 0,
3757 no_std_evidence: no_std.evidence,
3758 alloc_crate_hits: alloc.total_hits,
3759 alloc_evidence: alloc.evidence,
3760 heap_allocation_hits: heap.total_hits,
3761 heap_allocation_evidence: heap.evidence,
3762 runtime_core_alloc_hits: runtime_core_alloc.total_hits,
3763 runtime_core_heap_allocation_hits: runtime_core_heap.total_hits,
3764 }
3765}
3766
3767fn scan_safety_profile(documents: &[SourceDocument]) -> SafetyProfile {
3768 let forbid = scan_patterns(documents, FORBID_UNSAFE_PATTERNS, MAX_EVIDENCE_PER_SIGNAL);
3769 let deny = scan_patterns(documents, DENY_UNSAFE_PATTERNS, MAX_EVIDENCE_PER_SIGNAL);
3770 let unsafe_scan = scan_patterns(documents, UNSAFE_PATTERNS, MAX_EVIDENCE_PER_SIGNAL);
3771 let panic_scan = scan_patterns(documents, PANIC_PATTERNS, MAX_EVIDENCE_PER_SIGNAL);
3772 let unwrap_scan = scan_patterns(documents, UNWRAP_PATTERNS, MAX_EVIDENCE_PER_SIGNAL);
3773 let ffi_scan = scan_patterns(documents, FFI_PATTERNS, MAX_EVIDENCE_PER_SIGNAL);
3774 let safety_comment_scan =
3775 scan_patterns(documents, SAFETY_COMMENT_PATTERNS, MAX_EVIDENCE_PER_SIGNAL);
3776
3777 let (unsafe_policy, unsafe_policy_evidence) = if forbid.total_hits > 0 {
3778 (UnsafeCodePolicy::Forbid, forbid.evidence)
3779 } else if deny.total_hits > 0 {
3780 (UnsafeCodePolicy::Deny, deny.evidence)
3781 } else {
3782 (UnsafeCodePolicy::NotDeclared, Vec::new())
3783 };
3784
3785 SafetyProfile {
3786 unsafe_policy,
3787 unsafe_policy_evidence,
3788 unsafe_sites: unsafe_scan.total_hits,
3789 unsafe_evidence: unsafe_scan.evidence,
3790 panic_sites: panic_scan.total_hits,
3791 panic_evidence: panic_scan.evidence,
3792 unwrap_sites: unwrap_scan.total_hits,
3793 unwrap_evidence: unwrap_scan.evidence,
3794 ffi_sites: ffi_scan.total_hits,
3795 ffi_evidence: ffi_scan.evidence,
3796 safety_comment_sites: safety_comment_scan.total_hits,
3797 safety_comment_evidence: safety_comment_scan.evidence,
3798 }
3799}
3800
3801fn scan_verification_profile(
3802 all_files: &[PathBuf],
3803 documents: &[SourceDocument],
3804) -> VerificationProfile {
3805 let tests_dir_present = all_files
3806 .iter()
3807 .any(|path| has_path_component(path, "tests"));
3808 let test_scan = scan_patterns_analysis(documents, TEST_PATTERNS, MAX_EVIDENCE_PER_SIGNAL);
3809 let property_scan =
3810 scan_patterns_analysis(documents, PROPERTY_TEST_PATTERNS, MAX_EVIDENCE_PER_SIGNAL);
3811 let concurrency_scan = scan_patterns_analysis(
3812 documents,
3813 CONCURRENCY_EXPLORATION_PATTERNS,
3814 MAX_EVIDENCE_PER_SIGNAL,
3815 );
3816 let fuzzing_scan = scan_patterns_analysis(documents, FUZZING_PATTERNS, MAX_EVIDENCE_PER_SIGNAL);
3817 let formal_scan =
3818 scan_patterns_analysis(documents, FORMAL_METHOD_PATTERNS, MAX_EVIDENCE_PER_SIGNAL);
3819
3820 VerificationProfile {
3821 tests_dir_present,
3822 test_marker_hits: test_scan.total_hits,
3823 test_marker_evidence: test_scan.evidence,
3824 property_testing_hits: property_scan.total_hits,
3825 property_testing_evidence: property_scan.evidence,
3826 concurrency_exploration_hits: concurrency_scan.total_hits,
3827 concurrency_exploration_evidence: concurrency_scan.evidence,
3828 fuzzing_hits: fuzzing_scan.total_hits,
3829 fuzzing_evidence: fuzzing_scan.evidence,
3830 formal_methods_hits: formal_scan.total_hits,
3831 formal_methods_evidence: formal_scan.evidence,
3832 }
3833}
3834
3835fn scan_build_profile(
3836 root: &Path,
3837 documents: &[SourceDocument],
3838 manifest: &ManifestMetadata,
3839) -> BuildProfile {
3840 let codegen_scan = scan_patterns(documents, CODEGEN_PATTERNS, MAX_EVIDENCE_PER_SIGNAL);
3841 let has_build_script = manifest.build_script.is_some() || root.join("build.rs").is_file();
3842
3843 BuildProfile {
3844 direct_dependencies: manifest.direct_dependencies,
3845 build_dependencies: manifest.build_dependencies,
3846 dev_dependencies: manifest.dev_dependencies,
3847 has_build_script,
3848 proc_macro_crate: manifest.proc_macro,
3849 codegen_hits: codegen_scan.total_hits,
3850 codegen_evidence: codegen_scan.evidence,
3851 }
3852}
3853
3854fn scan_lifecycle_profile(all_files: &[PathBuf]) -> LifecycleProfile {
3855 let mut license_files = Vec::new();
3856
3857 for path in all_files {
3858 let Some(name) = path.file_name().and_then(|name| name.to_str()) else {
3859 continue;
3860 };
3861 let lowered = name.to_ascii_lowercase();
3862 if lowered.starts_with("license")
3863 || lowered.starts_with("copying")
3864 || lowered.starts_with("notice")
3865 {
3866 license_files.push(path.clone());
3867 }
3868 }
3869
3870 license_files.sort();
3871
3872 LifecycleProfile {
3873 readme_present: has_file_with_prefix(all_files, "readme"),
3874 changelog_present: has_file_with_prefix(all_files, "changelog")
3875 || has_file_with_prefix(all_files, "changes"),
3876 security_md_present: has_exact_file_name(all_files, "security.md"),
3877 safety_md_present: has_exact_file_name(all_files, "safety.md"),
3878 architecture_doc_present: has_exact_file_name(all_files, "architecture.md")
3879 || has_exact_file_name(all_files, "design.md"),
3880 docs_dir_present: all_files
3881 .iter()
3882 .any(|path| has_path_component(path, "docs")),
3883 license_files,
3884 }
3885}
3886
3887fn scan_power_of_ten_profile(
3888 documents: &[SourceDocument],
3889 artifact_documents: &[SourceDocument],
3890 functions: &[FunctionSummary],
3891 runtime: &RuntimeProfile,
3892 safety: &SafetyProfile,
3893 build: &BuildProfile,
3894) -> PowerOfTenProfile {
3895 PowerOfTenProfile {
3896 rules: vec![
3897 build_power_of_ten_rule1(documents, functions),
3898 build_power_of_ten_rule2(documents),
3899 build_power_of_ten_rule3(runtime),
3900 build_power_of_ten_rule4(functions),
3901 build_power_of_ten_rule5(functions),
3902 build_power_of_ten_rule6(documents),
3903 build_power_of_ten_rule7(documents, safety),
3904 build_power_of_ten_rule8(documents, build),
3905 build_power_of_ten_rule9(documents),
3906 build_power_of_ten_rule10(artifact_documents),
3907 ],
3908 }
3909}
3910
3911fn scan_advanced_structural_profile(
3912 documents: &[SourceDocument],
3913 artifact_documents: &[SourceDocument],
3914 functions: &[FunctionSummary],
3915 safety: &SafetyProfile,
3916) -> AdvancedStructuralProfile {
3917 AdvancedStructuralProfile {
3918 checks: vec![
3919 build_recursion_check(functions),
3920 build_interior_mutability_check(documents),
3921 build_unwrap_safety_check(safety),
3922 build_complexity_check(functions),
3923 build_async_lock_check(documents),
3924 build_safe_state_check(documents),
3925 build_time_wait_check(documents),
3926 build_partition_space_check(documents),
3927 build_plugin_load_check(artifact_documents),
3928 build_resource_lifecycle_check(documents),
3929 build_command_buffer_check(documents),
3930 build_iterator_bound_check(functions),
3931 build_isr_safety_check(functions),
3932 build_future_wake_check(functions),
3933 build_task_leak_check(documents),
3934 build_drop_panic_check(documents),
3935 build_relaxed_atomic_check(functions),
3936 build_clock_mix_check(functions),
3937 build_short_write_check(functions),
3938 build_async_recursion_check(functions),
3939 build_unbounded_channel_check(documents),
3940 build_zero_copy_check(functions),
3941 build_version_drift_check(documents),
3942 ],
3943 hotspots: build_criticality_hotspots(functions),
3944 }
3945}
3946
3947fn build_power_of_ten_rule1(
3948 documents: &[SourceDocument],
3949 functions: &[FunctionSummary],
3950) -> PowerOfTenRuleAudit {
3951 let rule1_scan = scan_patterns(documents, P10_RULE1_PATTERNS, MAX_EVIDENCE_PER_SIGNAL);
3952 let (rule1_recursion_hits, rule1_recursion_evidence) =
3953 collect_direct_recursion_evidence(functions, MAX_EVIDENCE_PER_SIGNAL);
3954 let total_hits = rule1_scan.total_hits + rule1_recursion_hits;
3955 let mut evidence = rule1_scan.evidence;
3956 append_evidence(
3957 &mut evidence,
3958 rule1_recursion_evidence,
3959 MAX_EVIDENCE_PER_SIGNAL,
3960 );
3961
3962 PowerOfTenRuleAudit {
3963 number: 1,
3964 title: "Simple control flow; no recursion or equivalent escapes",
3965 status: if total_hits == 0 {
3966 PowerOfTenStatus::Applied
3967 } else {
3968 PowerOfTenStatus::NotApplied
3969 },
3970 detail: if total_hits == 0 {
3971 "No direct recursion or obvious control-flow escape motifs observed. Indirect recursion is not proven absent by this lightweight scan.".to_string()
3972 } else {
3973 format!(
3974 "{total_hits} direct-recursion site(s) or control-flow escape motif(s) observed."
3975 )
3976 },
3977 evidence,
3978 }
3979}
3980
3981fn build_power_of_ten_rule2(documents: &[SourceDocument]) -> PowerOfTenRuleAudit {
3982 let rule2_unbounded = collect_unbounded_loop_evidence(documents, MAX_EVIDENCE_PER_SIGNAL);
3983 let (ambiguous_for_hits, ambiguous_for_evidence) =
3984 collect_ambiguous_for_loop_evidence(documents, MAX_EVIDENCE_PER_SIGNAL);
3985 let (status, detail, evidence) = if rule2_unbounded.total_hits > 0 {
3986 (
3987 PowerOfTenStatus::NotApplied,
3988 format!(
3989 "{} potentially unbounded `loop`/`while` construct(s) observed.",
3990 rule2_unbounded.total_hits
3991 ),
3992 rule2_unbounded.evidence,
3993 )
3994 } else if ambiguous_for_hits > 0 {
3995 (
3996 PowerOfTenStatus::Indeterminate,
3997 format!(
3998 "{} `for` loop site(s) remain ambiguous after bounded-iterator screening.",
3999 ambiguous_for_hits
4000 ),
4001 ambiguous_for_evidence,
4002 )
4003 } else {
4004 (
4005 PowerOfTenStatus::Applied,
4006 "No unbounded loops or ambiguous iterator-driven `for` loops were observed."
4007 .to_string(),
4008 Vec::new(),
4009 )
4010 };
4011
4012 PowerOfTenRuleAudit {
4013 number: 2,
4014 title: "All loops have a fixed upper bound",
4015 status,
4016 detail,
4017 evidence,
4018 }
4019}
4020
4021fn build_power_of_ten_rule3(runtime: &RuntimeProfile) -> PowerOfTenRuleAudit {
4022 let total_hits = runtime.alloc_crate_hits + runtime.heap_allocation_hits;
4023 let runtime_hits = runtime.runtime_core_alloc_hits + runtime.runtime_core_heap_allocation_hits;
4024 let (status, detail) = if total_hits == 0 {
4025 (
4026 PowerOfTenStatus::Applied,
4027 "No heap-allocation motifs observed.".to_string(),
4028 )
4029 } else if runtime_hits == 0 {
4030 (
4031 PowerOfTenStatus::Applied,
4032 format!(
4033 "No runtime-core heap-allocation motifs were observed. {total_hits} allocation motif(s) remain in tooling, reporting, evaluation, or other non-runtime-support paths."
4034 ),
4035 )
4036 } else {
4037 (
4038 PowerOfTenStatus::NotApplied,
4039 format!(
4040 "{total_hits} heap-allocation motif(s) observed, including {runtime_hits} runtime-core signal(s). This crate-level scan cannot distinguish initialization-only allocation from steady-state allocation."
4041 ),
4042 )
4043 };
4044
4045 PowerOfTenRuleAudit {
4046 number: 3,
4047 title: "No dynamic allocation after initialization",
4048 status,
4049 detail,
4050 evidence: runtime.heap_allocation_evidence.clone(),
4051 }
4052}
4053
4054fn build_power_of_ten_rule4(functions: &[FunctionSummary]) -> PowerOfTenRuleAudit {
4055 let evidence = collect_long_function_evidence(functions, MAX_EVIDENCE_PER_SIGNAL);
4056 let long_function_count = functions
4057 .iter()
4058 .filter(|function| function.line_count > 60)
4059 .count();
4060
4061 PowerOfTenRuleAudit {
4062 number: 4,
4063 title: "Functions stay within a single-sheet size budget (~60 LOC)",
4064 status: if long_function_count == 0 {
4065 PowerOfTenStatus::Applied
4066 } else {
4067 PowerOfTenStatus::NotApplied
4068 },
4069 detail: if long_function_count == 0 {
4070 "No function over 60 lines was observed by the scanner.".to_string()
4071 } else {
4072 format!("{long_function_count} function(s) exceed the 60-line threshold.")
4073 },
4074 evidence,
4075 }
4076}
4077
4078fn build_power_of_ten_rule5(functions: &[FunctionSummary]) -> PowerOfTenRuleAudit {
4079 let total_assertions = functions
4080 .iter()
4081 .map(|function| function.assertion_count)
4082 .sum::<usize>();
4083 let avg_assertions = if functions.is_empty() {
4084 0.0
4085 } else {
4086 total_assertions as f64 / functions.len() as f64
4087 };
4088
4089 PowerOfTenRuleAudit {
4090 number: 5,
4091 title: "Assertion density averages at least two per function",
4092 status: if functions.is_empty() {
4093 PowerOfTenStatus::Indeterminate
4094 } else if avg_assertions >= 2.0 {
4095 PowerOfTenStatus::Applied
4096 } else {
4097 PowerOfTenStatus::NotApplied
4098 },
4099 detail: if functions.is_empty() {
4100 "No function bodies were extracted, so assertion density could not be estimated."
4101 .to_string()
4102 } else {
4103 format!(
4104 "Estimated assertion density is {:.2} per function across {} extracted function(s).",
4105 avg_assertions,
4106 functions.len()
4107 )
4108 },
4109 evidence: collect_low_assertion_evidence(functions, MAX_EVIDENCE_PER_SIGNAL),
4110 }
4111}
4112
4113fn build_power_of_ten_rule6(documents: &[SourceDocument]) -> PowerOfTenRuleAudit {
4114 let scan = scan_global_shared_resource_patterns(documents, MAX_EVIDENCE_PER_SIGNAL);
4115 PowerOfTenRuleAudit {
4116 number: 6,
4117 title: "Data objects remain at the smallest practical scope",
4118 status: if scan.total_hits == 0 {
4119 PowerOfTenStatus::Applied
4120 } else {
4121 PowerOfTenStatus::NotApplied
4122 },
4123 detail: if scan.total_hits == 0 {
4124 "No obvious crate-global mutable/shared state motifs were observed. This is only a proxy for scope minimization.".to_string()
4125 } else {
4126 format!(
4127 "{} crate-global mutable/shared state motif(s) observed.",
4128 scan.total_hits
4129 )
4130 },
4131 evidence: scan.evidence,
4132 }
4133}
4134
4135fn build_power_of_ten_rule7(
4136 documents: &[SourceDocument],
4137 safety: &SafetyProfile,
4138) -> PowerOfTenRuleAudit {
4139 let scan = scan_patterns(
4140 documents,
4141 P10_RULE7_EXPLICIT_IGNORE_PATTERNS,
4142 MAX_EVIDENCE_PER_SIGNAL,
4143 );
4144 let (status, detail) = if scan.total_hits > 0 || safety.unwrap_sites > 0 {
4145 (
4146 PowerOfTenStatus::NotApplied,
4147 format!(
4148 "{} explicit discard site(s) and {} unwrap/expect site(s) observed. Parameter validation cannot be proven by this scan.",
4149 scan.total_hits, safety.unwrap_sites
4150 ),
4151 )
4152 } else {
4153 (
4154 PowerOfTenStatus::Indeterminate,
4155 "No obvious unchecked-return motifs were observed, but parameter validation and full return-value propagation are not mechanically proven by this scanner."
4156 .to_string(),
4157 )
4158 };
4159 let mut evidence = scan.evidence;
4160 append_evidence(
4161 &mut evidence,
4162 safety.unwrap_evidence.clone(),
4163 MAX_EVIDENCE_PER_SIGNAL,
4164 );
4165
4166 PowerOfTenRuleAudit {
4167 number: 7,
4168 title: "Return values are checked and parameters are validated",
4169 status,
4170 detail,
4171 evidence,
4172 }
4173}
4174
4175fn build_power_of_ten_rule8(
4176 documents: &[SourceDocument],
4177 build: &BuildProfile,
4178) -> PowerOfTenRuleAudit {
4179 let rule8_cfg = scan_cfg_surface(documents, MAX_EVIDENCE_PER_SIGNAL);
4180 let rule8_macro = scan_patterns(documents, P10_RULE8_MACRO_PATTERNS, MAX_EVIDENCE_PER_SIGNAL);
4181 let mut evidence = rule8_cfg.evidence;
4182 append_evidence(&mut evidence, rule8_macro.evidence, MAX_EVIDENCE_PER_SIGNAL);
4183
4184 PowerOfTenRuleAudit {
4185 number: 8,
4186 title: "Conditional compilation and metaprogramming stay minimal",
4187 status: if build.proc_macro_crate || rule8_macro.total_hits > 0 || rule8_cfg.total_hits > 12
4188 {
4189 PowerOfTenStatus::NotApplied
4190 } else if rule8_cfg.total_hits > 4 {
4191 PowerOfTenStatus::Indeterminate
4192 } else {
4193 PowerOfTenStatus::Applied
4194 },
4195 detail: format!(
4196 "{} review-relevant conditional-compilation site(s), {} macro-definition/proc-macro site(s) observed. This is a Rust adaptation of the C preprocessor rule.",
4197 rule8_cfg.total_hits,
4198 rule8_macro.total_hits
4199 ),
4200 evidence,
4201 }
4202}
4203
4204fn build_power_of_ten_rule9(documents: &[SourceDocument]) -> PowerOfTenRuleAudit {
4205 let scan = scan_restricted_pointer_use(documents, MAX_EVIDENCE_PER_SIGNAL);
4206 PowerOfTenRuleAudit {
4207 number: 9,
4208 title: "Pointer use remains restricted",
4209 status: if scan.total_hits == 0 {
4210 PowerOfTenStatus::Applied
4211 } else {
4212 PowerOfTenStatus::NotApplied
4213 },
4214 detail: if scan.total_hits == 0 {
4215 "No raw-pointer or function-pointer motifs were observed.".to_string()
4216 } else {
4217 format!(
4218 "{} raw-pointer/function-pointer motif(s) observed.",
4219 scan.total_hits
4220 )
4221 },
4222 evidence: scan.evidence,
4223 }
4224}
4225
4226fn build_power_of_ten_rule10(artifact_documents: &[SourceDocument]) -> PowerOfTenRuleAudit {
4227 let rule10_warnings = scan_patterns(
4228 artifact_documents,
4229 P10_RULE10_WARNING_PATTERNS,
4230 MAX_EVIDENCE_PER_SIGNAL,
4231 );
4232 let rule10_analyzers = scan_patterns(
4233 artifact_documents,
4234 P10_RULE10_ANALYZER_PATTERNS,
4235 MAX_EVIDENCE_PER_SIGNAL,
4236 );
4237 let mut evidence = rule10_warnings.evidence;
4238 append_evidence(
4239 &mut evidence,
4240 rule10_analyzers.evidence,
4241 MAX_EVIDENCE_PER_SIGNAL,
4242 );
4243 let (status, detail) = if rule10_warnings.total_hits > 0 && rule10_analyzers.total_hits > 0 {
4244 (
4245 PowerOfTenStatus::Applied,
4246 format!(
4247 "Observed warning-strictness signal(s) ({}) and static-analysis signal(s) ({}). Daily cadence and zero-warning status are not provable from packaged crate sources.",
4248 rule10_warnings.total_hits, rule10_analyzers.total_hits
4249 ),
4250 )
4251 } else if rule10_warnings.total_hits > 0 || rule10_analyzers.total_hits > 0 {
4252 (
4253 PowerOfTenStatus::Indeterminate,
4254 format!(
4255 "Observed warning/analyzer signal(s), but the full Power-of-Ten requirement for pedantic warnings plus regular analyzer use is not established. Warning signals: {}, analyzer signals: {}.",
4256 rule10_warnings.total_hits, rule10_analyzers.total_hits
4257 ),
4258 )
4259 } else {
4260 (
4261 PowerOfTenStatus::NotApplied,
4262 "No warning-strictness or static-analyzer signals were observed in the packaged crate artifacts. This may under-report projects whose CI metadata is not published with the crate.".to_string(),
4263 )
4264 };
4265
4266 PowerOfTenRuleAudit {
4267 number: 10,
4268 title: "Pedantic warnings and static analyzers are enforced",
4269 status,
4270 detail,
4271 evidence,
4272 }
4273}
4274
4275fn build_recursion_check(functions: &[FunctionSummary]) -> AdvancedStructuralCheck {
4276 let (direct_recursion_hits, direct_recursion_evidence) =
4277 collect_direct_recursion_evidence(functions, MAX_EVIDENCE_PER_SIGNAL);
4278 let (indirect_cycle_hits, indirect_cycle_evidence) =
4279 collect_indirect_recursion_evidence(functions, MAX_EVIDENCE_PER_SIGNAL);
4280 let total_hits = direct_recursion_hits + indirect_cycle_hits;
4281 let mut evidence = direct_recursion_evidence;
4282 append_evidence(
4283 &mut evidence,
4284 indirect_cycle_evidence,
4285 MAX_EVIDENCE_PER_SIGNAL,
4286 );
4287
4288 AdvancedStructuralCheck {
4289 id: "JPL-R0",
4290 title: "Recursion and cyclic call graph audit",
4291 status: if total_hits == 0 {
4292 StructuralCheckStatus::Clear
4293 } else {
4294 StructuralCheckStatus::Elevated
4295 },
4296 detail: if total_hits == 0 {
4297 "No direct recursion or local call-cycle motifs were observed.".to_string()
4298 } else {
4299 format!(
4300 "{} direct-recursion hit(s) and {} local indirect cycle(s) observed.",
4301 direct_recursion_hits, indirect_cycle_hits
4302 )
4303 },
4304 evidence,
4305 }
4306}
4307
4308fn build_interior_mutability_check(documents: &[SourceDocument]) -> AdvancedStructuralCheck {
4309 let scan = scan_patterns(
4310 documents,
4311 INTERIOR_MUTABILITY_PATTERNS,
4312 MAX_EVIDENCE_PER_SIGNAL,
4313 );
4314 AdvancedStructuralCheck {
4315 id: "JPL-R4",
4316 title: "Data-flow traceability / interior mutability audit",
4317 status: if scan.total_hits == 0 {
4318 StructuralCheckStatus::Clear
4319 } else {
4320 StructuralCheckStatus::Elevated
4321 },
4322 detail: if scan.total_hits == 0 {
4323 "No interior-mutability motifs were observed.".to_string()
4324 } else {
4325 format!(
4326 "{} interior-mutability motif(s) observed (Cell/RefCell/UnsafeCell/atomic types).",
4327 scan.total_hits
4328 )
4329 },
4330 evidence: scan.evidence,
4331 }
4332}
4333
4334fn build_unwrap_safety_check(safety: &SafetyProfile) -> AdvancedStructuralCheck {
4335 AdvancedStructuralCheck {
4336 id: "JPL-R9",
4337 title: "Unchecked extraction / dereference safety audit",
4338 status: if safety.unwrap_sites == 0 {
4339 StructuralCheckStatus::Clear
4340 } else {
4341 StructuralCheckStatus::Elevated
4342 },
4343 detail: if safety.unwrap_sites == 0 {
4344 "No unwrap/expect extraction sites were observed.".to_string()
4345 } else {
4346 format!(
4347 "{} unwrap/expect-like site(s) observed; these deserve explicit invariant review in high-assurance code.",
4348 safety.unwrap_sites
4349 )
4350 },
4351 evidence: safety.unwrap_evidence.clone(),
4352 }
4353}
4354
4355fn build_complexity_check(functions: &[FunctionSummary]) -> AdvancedStructuralCheck {
4356 let complexity_hotspots = collect_complexity_hotspots(functions, MAX_EVIDENCE_PER_SIGNAL);
4357 let exceedances = complexity_hotspots
4358 .iter()
4359 .filter(|entry| entry.estimated_complexity > 15)
4360 .count();
4361 let evidence = complexity_hotspots
4362 .iter()
4363 .take(MAX_EVIDENCE_PER_SIGNAL)
4364 .map(|entry| ScanEvidence {
4365 path: entry.path.clone(),
4366 line_number: entry.start_line,
4367 pattern: "estimated cyclomatic complexity",
4368 snippet: format!(
4369 "function `{}` has estimated complexity {}",
4370 entry.function_name, entry.estimated_complexity
4371 ),
4372 })
4373 .collect();
4374
4375 AdvancedStructuralCheck {
4376 id: "NASA-CC",
4377 title: "Cyclomatic complexity hotspot audit (NASA SWE-220 proxy)",
4378 status: if exceedances > 0 {
4379 StructuralCheckStatus::Elevated
4380 } else if complexity_hotspots.is_empty() {
4381 StructuralCheckStatus::Indeterminate
4382 } else {
4383 StructuralCheckStatus::Clear
4384 },
4385 detail: if complexity_hotspots.is_empty() {
4386 "No function summaries were extracted, so complexity could not be estimated."
4387 .to_string()
4388 } else {
4389 format!(
4390 "{} extracted hotspot(s); {} exceed the NASA safety-critical threshold of 15 by this lightweight estimate.",
4391 complexity_hotspots.len(),
4392 exceedances
4393 )
4394 },
4395 evidence,
4396 }
4397}
4398
4399fn build_async_lock_check(documents: &[SourceDocument]) -> AdvancedStructuralCheck {
4400 let scan = scan_patterns(documents, ASYNC_LOCK_PATTERNS, MAX_EVIDENCE_PER_SIGNAL);
4401 AdvancedStructuralCheck {
4402 id: "H-ASYNC-LOCK",
4403 title: "Async lock contention / priority inversion proxy",
4404 status: if scan.total_hits == 0 {
4405 StructuralCheckStatus::Clear
4406 } else {
4407 StructuralCheckStatus::Elevated
4408 },
4409 detail: if scan.total_hits == 0 {
4410 "No async lock contention motifs were observed.".to_string()
4411 } else {
4412 format!(
4413 "{} async lock motif(s) observed. This is a priority-inversion proxy, not proof of scheduler-level priority inversion.",
4414 scan.total_hits
4415 )
4416 },
4417 evidence: scan.evidence,
4418 }
4419}
4420
4421fn build_safe_state_check(documents: &[SourceDocument]) -> AdvancedStructuralCheck {
4422 let scan = scan_patterns(documents, CATCH_ALL_MATCH_PATTERNS, MAX_EVIDENCE_PER_SIGNAL);
4423 AdvancedStructuralCheck {
4424 id: "SAFE-STATE",
4425 title: "Catch-all state handling / safe-state fallback audit",
4426 status: if scan.total_hits == 0 {
4427 StructuralCheckStatus::Clear
4428 } else {
4429 StructuralCheckStatus::Elevated
4430 },
4431 detail: if scan.total_hits == 0 {
4432 "No `_ =>` catch-all state transitions were observed.".to_string()
4433 } else {
4434 format!(
4435 "{} catch-all match arm(s) observed; explicit state enumeration is preferable for safety review.",
4436 scan.total_hits
4437 )
4438 },
4439 evidence: scan.evidence,
4440 }
4441}
4442
4443fn build_time_wait_check(documents: &[SourceDocument]) -> AdvancedStructuralCheck {
4444 let scan = scan_patterns(documents, HARD_CODED_WAIT_PATTERNS, MAX_EVIDENCE_PER_SIGNAL);
4445 AdvancedStructuralCheck {
4446 id: "TIME-WAIT",
4447 title: "Hard-coded timing assumption audit",
4448 status: if scan.total_hits == 0 {
4449 StructuralCheckStatus::Clear
4450 } else {
4451 StructuralCheckStatus::Elevated
4452 },
4453 detail: if scan.total_hits == 0 {
4454 "No hard-coded sleep/timing-wait motifs were observed.".to_string()
4455 } else {
4456 format!(
4457 "{} hard-coded wait motif(s) observed. Review whether these are deterministic control waits or deadline-free timing assumptions.",
4458 scan.total_hits
4459 )
4460 },
4461 evidence: scan.evidence,
4462 }
4463}
4464
4465fn build_partition_space_check(documents: &[SourceDocument]) -> AdvancedStructuralCheck {
4466 let scan = scan_global_shared_resource_patterns(documents, MAX_EVIDENCE_PER_SIGNAL);
4467 AdvancedStructuralCheck {
4468 id: "PART-SPACE",
4469 title: "Global shared-resource / partitioning-risk audit",
4470 status: if scan.total_hits == 0 {
4471 StructuralCheckStatus::Clear
4472 } else {
4473 StructuralCheckStatus::Elevated
4474 },
4475 detail: if scan.total_hits == 0 {
4476 "No obvious global shared-resource motifs were observed.".to_string()
4477 } else {
4478 format!(
4479 "{} global shared-resource motif(s) observed.",
4480 scan.total_hits
4481 )
4482 },
4483 evidence: scan.evidence,
4484 }
4485}
4486
4487fn build_plugin_load_check(artifact_documents: &[SourceDocument]) -> AdvancedStructuralCheck {
4488 let scan = scan_patterns(
4489 artifact_documents,
4490 DYNAMIC_LOADING_PATTERNS,
4491 MAX_EVIDENCE_PER_SIGNAL,
4492 );
4493 AdvancedStructuralCheck {
4494 id: "PLUGIN-LOAD",
4495 title: "Dynamic loading / plugin sandbox audit",
4496 status: if scan.total_hits == 0 {
4497 StructuralCheckStatus::Clear
4498 } else {
4499 StructuralCheckStatus::Elevated
4500 },
4501 detail: if scan.total_hits == 0 {
4502 "No dynamic loading motifs were observed.".to_string()
4503 } else {
4504 format!("{} dynamic loading motif(s) observed.", scan.total_hits)
4505 },
4506 evidence: scan.evidence,
4507 }
4508}
4509
4510fn build_resource_lifecycle_check(documents: &[SourceDocument]) -> AdvancedStructuralCheck {
4511 let scan = scan_patterns(
4512 documents,
4513 RESOURCE_LIFECYCLE_PATTERNS,
4514 MAX_EVIDENCE_PER_SIGNAL,
4515 );
4516 AdvancedStructuralCheck {
4517 id: "CWE-404",
4518 title: "Manual resource-lifecycle / shutdown audit",
4519 status: if scan.total_hits == 0 {
4520 StructuralCheckStatus::Clear
4521 } else {
4522 StructuralCheckStatus::Elevated
4523 },
4524 detail: if scan.total_hits == 0 {
4525 "No manual resource-lifecycle motifs were observed.".to_string()
4526 } else {
4527 format!(
4528 "{} manual lifecycle motif(s) observed (raw handles, forget, ManuallyDrop, mmap).",
4529 scan.total_hits
4530 )
4531 },
4532 evidence: scan.evidence,
4533 }
4534}
4535
4536fn build_command_buffer_check(documents: &[SourceDocument]) -> AdvancedStructuralCheck {
4537 let command_scan = scan_patterns(documents, COMMAND_BUFFER_PATTERNS, MAX_EVIDENCE_PER_SIGNAL);
4538 let ttl_scan = scan_patterns(documents, TTL_GUARD_PATTERNS, MAX_EVIDENCE_PER_SIGNAL);
4539 let mut evidence = command_scan.evidence;
4540 append_evidence(&mut evidence, ttl_scan.evidence, MAX_EVIDENCE_PER_SIGNAL);
4541 let (status, detail) = if command_scan.total_hits == 0 {
4542 (
4543 StructuralCheckStatus::Clear,
4544 "No command/control queue motifs were observed.".to_string(),
4545 )
4546 } else if ttl_scan.total_hits == 0 {
4547 (
4548 StructuralCheckStatus::Elevated,
4549 format!(
4550 "{} command/control queue motif(s) observed without TTL/staleness/sequence guard signals.",
4551 command_scan.total_hits
4552 ),
4553 )
4554 } else {
4555 (
4556 StructuralCheckStatus::Indeterminate,
4557 format!(
4558 "{} command/control queue motif(s) and {} freshness-guard motif(s) observed.",
4559 command_scan.total_hits, ttl_scan.total_hits
4560 ),
4561 )
4562 };
4563
4564 AdvancedStructuralCheck {
4565 id: "CMD-BUF",
4566 title: "Hazardous command buffering audit",
4567 status,
4568 detail,
4569 evidence,
4570 }
4571}
4572
4573fn build_iterator_bound_check(functions: &[FunctionSummary]) -> AdvancedStructuralCheck {
4574 let (hits, evidence) = collect_unbounded_iterator_evidence(functions, MAX_EVIDENCE_PER_SIGNAL);
4575 AdvancedStructuralCheck {
4576 id: "ITER-UNB",
4577 title: "Unbounded iterator terminal-consumption audit",
4578 status: if hits == 0 {
4579 StructuralCheckStatus::Clear
4580 } else {
4581 StructuralCheckStatus::Elevated
4582 },
4583 detail: if hits == 0 {
4584 "No iterator terminal-consumption sites lacking an obvious `.take()` bound were observed."
4585 .to_string()
4586 } else {
4587 format!(
4588 "{hits} iterator terminal site(s) use collect/fold/count/last/sum without an obvious `.take()` or single-step bound."
4589 )
4590 },
4591 evidence,
4592 }
4593}
4594
4595fn build_isr_safety_check(functions: &[FunctionSummary]) -> AdvancedStructuralCheck {
4596 let (hits, evidence) = collect_interrupt_safety_evidence(functions, MAX_EVIDENCE_PER_SIGNAL);
4597 AdvancedStructuralCheck {
4598 id: "ISR-SAFE",
4599 title: "Interrupt-context allocation / lock audit",
4600 status: if hits == 0 {
4601 StructuralCheckStatus::Clear
4602 } else {
4603 StructuralCheckStatus::Elevated
4604 },
4605 detail: if hits == 0 {
4606 "No interrupt handlers with allocation or mutex/lock motifs were observed.".to_string()
4607 } else {
4608 format!(
4609 "{hits} interrupt-context site(s) contain allocation or lock motifs that deserve ISR safety review."
4610 )
4611 },
4612 evidence,
4613 }
4614}
4615
4616fn build_future_wake_check(functions: &[FunctionSummary]) -> AdvancedStructuralCheck {
4617 let (hits, evidence) =
4618 collect_pending_without_waker_evidence(functions, MAX_EVIDENCE_PER_SIGNAL);
4619 AdvancedStructuralCheck {
4620 id: "FUTURE-WAKE",
4621 title: "Manual Future pending-without-waker audit",
4622 status: if hits == 0 {
4623 StructuralCheckStatus::Clear
4624 } else {
4625 StructuralCheckStatus::Elevated
4626 },
4627 detail: if hits == 0 {
4628 "No manual `Poll::Pending` sites without local wake registration were observed."
4629 .to_string()
4630 } else {
4631 format!(
4632 "{hits} manual poll function(s) return `Poll::Pending` without an obvious waker registration motif."
4633 )
4634 },
4635 evidence,
4636 }
4637}
4638
4639fn build_task_leak_check(documents: &[SourceDocument]) -> AdvancedStructuralCheck {
4640 let scan = scan_join_handle_discard(documents, MAX_EVIDENCE_PER_SIGNAL);
4641 AdvancedStructuralCheck {
4642 id: "TASK-LEAK",
4643 title: "Detached-task / discarded JoinHandle audit",
4644 status: if scan.total_hits == 0 {
4645 StructuralCheckStatus::Clear
4646 } else {
4647 StructuralCheckStatus::Elevated
4648 },
4649 detail: if scan.total_hits == 0 {
4650 "No explicit discarded Tokio JoinHandle sites were observed.".to_string()
4651 } else {
4652 format!(
4653 "{} Tokio spawn/spawn_blocking site(s) appear to discard the JoinHandle explicitly.",
4654 scan.total_hits
4655 )
4656 },
4657 evidence: scan.evidence,
4658 }
4659}
4660
4661fn build_drop_panic_check(documents: &[SourceDocument]) -> AdvancedStructuralCheck {
4662 let (hits, evidence) = collect_panic_in_drop_evidence(documents, MAX_EVIDENCE_PER_SIGNAL);
4663 AdvancedStructuralCheck {
4664 id: "DROP-PANIC",
4665 title: "Panic-in-Drop audit",
4666 status: if hits == 0 {
4667 StructuralCheckStatus::Clear
4668 } else {
4669 StructuralCheckStatus::Elevated
4670 },
4671 detail: if hits == 0 {
4672 "No panic-like sites were observed inside `impl Drop` bodies.".to_string()
4673 } else {
4674 format!("{hits} panic-like site(s) were observed inside `impl Drop` bodies.")
4675 },
4676 evidence,
4677 }
4678}
4679
4680fn build_relaxed_atomic_check(functions: &[FunctionSummary]) -> AdvancedStructuralCheck {
4681 let (hits, evidence) =
4682 collect_relaxed_atomic_state_evidence(functions, MAX_EVIDENCE_PER_SIGNAL);
4683 AdvancedStructuralCheck {
4684 id: "ATOMIC-RELAXED",
4685 title: "Relaxed atomic ordering on critical-state paths",
4686 status: if hits == 0 {
4687 StructuralCheckStatus::Clear
4688 } else {
4689 StructuralCheckStatus::Elevated
4690 },
4691 detail: if hits == 0 {
4692 "No `Ordering::Relaxed` sites were observed on functions that also look like critical state-transition logic."
4693 .to_string()
4694 } else {
4695 format!(
4696 "{hits} function(s) combine `Ordering::Relaxed` with consensus/state-transition motifs."
4697 )
4698 },
4699 evidence,
4700 }
4701}
4702
4703fn build_clock_mix_check(functions: &[FunctionSummary]) -> AdvancedStructuralCheck {
4704 let (hits, evidence) = collect_mixed_clock_source_evidence(functions, MAX_EVIDENCE_PER_SIGNAL);
4705 AdvancedStructuralCheck {
4706 id: "CLOCK-MIX",
4707 title: "Mixed monotonic/wall-clock duration audit",
4708 status: if hits == 0 {
4709 StructuralCheckStatus::Clear
4710 } else {
4711 StructuralCheckStatus::Elevated
4712 },
4713 detail: if hits == 0 {
4714 "No functions were observed mixing `Instant::now()` and `SystemTime::now()`."
4715 .to_string()
4716 } else {
4717 format!(
4718 "{hits} function(s) mix monotonic and wall-clock sources and deserve temporal-integrity review."
4719 )
4720 },
4721 evidence,
4722 }
4723}
4724
4725fn build_short_write_check(functions: &[FunctionSummary]) -> AdvancedStructuralCheck {
4726 let (hits, evidence) = collect_short_write_evidence(functions, MAX_EVIDENCE_PER_SIGNAL);
4727 AdvancedStructuralCheck {
4728 id: "SHORT-WRITE",
4729 title: "Partial-write / Interrupted handling audit",
4730 status: if hits == 0 {
4731 StructuralCheckStatus::Clear
4732 } else {
4733 StructuralCheckStatus::Elevated
4734 },
4735 detail: if hits == 0 {
4736 "No single-call `.write(...)` sites lacking an obvious retry or `write_all` handling path were observed."
4737 .to_string()
4738 } else {
4739 format!(
4740 "{hits} function(s) call `.write(...)` without an obvious `write_all` or `Interrupted` handling path."
4741 )
4742 },
4743 evidence,
4744 }
4745}
4746
4747fn build_async_recursion_check(functions: &[FunctionSummary]) -> AdvancedStructuralCheck {
4748 let (unbounded_hits, bounded_hits, evidence) =
4749 collect_async_recursion_evidence(functions, MAX_EVIDENCE_PER_SIGNAL);
4750 let (status, detail) = if unbounded_hits > 0 {
4751 (
4752 StructuralCheckStatus::Elevated,
4753 format!(
4754 "{unbounded_hits} async-recursive function(s) were observed without an obvious depth-limit signal."
4755 ),
4756 )
4757 } else if bounded_hits > 0 {
4758 (
4759 StructuralCheckStatus::Indeterminate,
4760 format!(
4761 "{bounded_hits} async-recursive function(s) were observed with a possible depth/base-case signal, but the limit is not mechanically proven."
4762 ),
4763 )
4764 } else {
4765 (
4766 StructuralCheckStatus::Clear,
4767 "No async-recursive function attributes were observed.".to_string(),
4768 )
4769 };
4770
4771 AdvancedStructuralCheck {
4772 id: "ASYNC-RECUR",
4773 title: "Async recursion depth-bound audit",
4774 status,
4775 detail,
4776 evidence,
4777 }
4778}
4779
4780fn build_unbounded_channel_check(documents: &[SourceDocument]) -> AdvancedStructuralCheck {
4781 let scan = scan_patterns(
4782 documents,
4783 UNBOUNDED_CHANNEL_PATTERNS,
4784 MAX_EVIDENCE_PER_SIGNAL,
4785 );
4786 AdvancedStructuralCheck {
4787 id: "CHAN-UNB",
4788 title: "Unbounded async command-queue audit",
4789 status: if scan.total_hits == 0 {
4790 StructuralCheckStatus::Clear
4791 } else {
4792 StructuralCheckStatus::Elevated
4793 },
4794 detail: if scan.total_hits == 0 {
4795 "No `mpsc::unbounded_channel` sites were observed.".to_string()
4796 } else {
4797 format!(
4798 "{} `mpsc::unbounded_channel` site(s) were observed in the scanned crate.",
4799 scan.total_hits
4800 )
4801 },
4802 evidence: scan.evidence,
4803 }
4804}
4805
4806fn build_zero_copy_check(functions: &[FunctionSummary]) -> AdvancedStructuralCheck {
4807 let (hits, evidence) = collect_zero_copy_violation_evidence(functions, MAX_EVIDENCE_PER_SIGNAL);
4808 AdvancedStructuralCheck {
4809 id: "ZERO-COPY",
4810 title: "Copy-on-read / zero-copy provenance audit",
4811 status: if hits == 0 {
4812 StructuralCheckStatus::Clear
4813 } else {
4814 StructuralCheckStatus::Elevated
4815 },
4816 detail: if hits == 0 {
4817 "No read-buffer copy-on-read motifs were observed.".to_string()
4818 } else {
4819 format!(
4820 "{hits} function(s) copy buffers with `.to_vec()` or `.clone()` on apparent read paths."
4821 )
4822 },
4823 evidence,
4824 }
4825}
4826
4827fn build_version_drift_check(documents: &[SourceDocument]) -> AdvancedStructuralCheck {
4828 let scan = scan_dependency_version_drift(documents, MAX_EVIDENCE_PER_SIGNAL);
4829 AdvancedStructuralCheck {
4830 id: "CARGO-VERS",
4831 title: "Dependency version drift / reproducibility audit",
4832 status: if scan.total_hits == 0 {
4833 StructuralCheckStatus::Clear
4834 } else {
4835 StructuralCheckStatus::Elevated
4836 },
4837 detail: if scan.total_hits == 0 {
4838 "No wildcard or open-ended dependency version requirements were observed.".to_string()
4839 } else {
4840 format!(
4841 "{} dependency requirement(s) look wildcard or open-ended and deserve reproducibility review.",
4842 scan.total_hits
4843 )
4844 },
4845 evidence: scan.evidence,
4846 }
4847}
4848
4849fn append_evidence(target: &mut Vec<ScanEvidence>, source: Vec<ScanEvidence>, limit: usize) {
4850 for item in source.into_iter() {
4851 if target.len() >= limit {
4852 break;
4853 }
4854 let already_present = target.iter().any(|existing| {
4855 existing.path == item.path
4856 && existing.line_number == item.line_number
4857 && existing.pattern == item.pattern
4858 && existing.snippet == item.snippet
4859 });
4860 if !already_present {
4861 target.push(item);
4862 }
4863 }
4864}
4865
4866fn collect_direct_recursion_evidence(
4867 functions: &[FunctionSummary],
4868 limit: usize,
4869) -> (usize, Vec<ScanEvidence>) {
4870 let mut total_hits = 0usize;
4871 let mut evidence = Vec::new();
4872
4873 for function in functions {
4874 let signature_needle = format!("fn {}", function.lowered_name);
4875
4876 for (offset, line) in function.body.lines().enumerate() {
4877 let trimmed = line.trim();
4878 if trimmed.starts_with("//") || trimmed.starts_with("///") || trimmed.starts_with('*') {
4879 continue;
4880 }
4881 let lowered = trimmed.to_ascii_lowercase();
4882 if function_calls_name(&lowered, &function.lowered_name)
4883 && !lowered.contains(&signature_needle)
4884 {
4885 total_hits += 1;
4886 if evidence.len() < limit {
4887 evidence.push(ScanEvidence {
4888 path: function.path.clone(),
4889 line_number: function.start_line + offset,
4890 pattern: "direct recursion",
4891 snippet: trimmed.to_string(),
4892 });
4893 }
4894 }
4895 }
4896 }
4897
4898 (total_hits, evidence)
4899}
4900
4901fn collect_indirect_recursion_evidence(
4902 functions: &[FunctionSummary],
4903 limit: usize,
4904) -> (usize, Vec<ScanEvidence>) {
4905 let mut functions_by_path: BTreeMap<PathBuf, Vec<&FunctionSummary>> = BTreeMap::new();
4906 for function in functions {
4907 functions_by_path
4908 .entry(function.path.clone())
4909 .or_default()
4910 .push(function);
4911 }
4912
4913 let mut hits = 0usize;
4914 let mut evidence = Vec::new();
4915
4916 for local_functions in functions_by_path.values() {
4917 for (idx, function) in local_functions.iter().enumerate() {
4918 for target in local_functions.iter().skip(idx + 1) {
4919 if function_calls_name(&function.lowered_body, &target.lowered_name)
4920 && function_calls_name(&target.lowered_body, &function.lowered_name)
4921 {
4922 hits += 1;
4923 if evidence.len() < limit {
4924 evidence.push(ScanEvidence {
4925 path: function.path.clone(),
4926 line_number: function.start_line,
4927 pattern: "local indirect recursion cycle",
4928 snippet: format!(
4929 "function `{}` appears mutually recursive with `{}`",
4930 function.name, target.name
4931 ),
4932 });
4933 }
4934 }
4935 }
4936 }
4937 }
4938
4939 (hits, evidence)
4940}
4941
4942fn collect_complexity_hotspots(
4943 functions: &[FunctionSummary],
4944 limit: usize,
4945) -> Vec<CriticalityHotspot> {
4946 let mut hotspots = build_criticality_hotspots(functions);
4947 hotspots.sort_by(|a, b| {
4948 b.estimated_complexity
4949 .cmp(&a.estimated_complexity)
4950 .then_with(|| b.risk_score.cmp(&a.risk_score))
4951 });
4952 hotspots.into_iter().take(limit).collect()
4953}
4954
4955fn collect_long_function_evidence(
4956 functions: &[FunctionSummary],
4957 limit: usize,
4958) -> Vec<ScanEvidence> {
4959 let mut long_functions = functions
4960 .iter()
4961 .filter(|function| function.line_count > 60)
4962 .collect::<Vec<_>>();
4963 long_functions.sort_by(|a, b| b.line_count.cmp(&a.line_count));
4964
4965 long_functions
4966 .into_iter()
4967 .take(limit)
4968 .map(|function| ScanEvidence {
4969 path: function.path.clone(),
4970 line_number: function.start_line,
4971 pattern: "function length > 60 lines",
4972 snippet: format!(
4973 "function `{}` spans {} lines",
4974 function.name, function.line_count
4975 ),
4976 })
4977 .collect()
4978}
4979
4980fn collect_unbounded_iterator_evidence(
4981 functions: &[FunctionSummary],
4982 limit: usize,
4983) -> (usize, Vec<ScanEvidence>) {
4984 let mut hits = 0usize;
4985 let mut evidence = Vec::new();
4986
4987 for function in functions {
4988 if !function_contains_code_pattern(function, ITERATOR_TERMINAL_PATTERNS)
4989 || function_contains_code_pattern(function, ITERATOR_BOUND_PATTERNS)
4990 || (!function_contains_code_pattern(function, OPEN_ENDED_ITERATOR_PATTERNS)
4991 && !body_contains_any(&function.lowered_signature, OPEN_ENDED_ITERATOR_PATTERNS))
4992 {
4993 continue;
4994 }
4995 if let Some(item) = first_matching_line(function, ITERATOR_TERMINAL_PATTERNS) {
4996 hits += 1;
4997 if evidence.len() < limit {
4998 evidence.push(item);
4999 }
5000 }
5001 }
5002
5003 (hits, evidence)
5004}
5005
5006fn collect_interrupt_safety_evidence(
5007 functions: &[FunctionSummary],
5008 limit: usize,
5009) -> (usize, Vec<ScanEvidence>) {
5010 let mut hits = 0usize;
5011 let mut evidence = Vec::new();
5012
5013 for function in functions {
5014 if !body_contains_any(&function.lowered_attributes, INTERRUPT_ATTRIBUTE_PATTERNS) {
5015 continue;
5016 }
5017 if let Some(item) = first_matching_line(function, ISR_FORBIDDEN_PATTERNS) {
5018 hits += 1;
5019 if evidence.len() < limit {
5020 evidence.push(item);
5021 }
5022 }
5023 }
5024
5025 (hits, evidence)
5026}
5027
5028fn collect_pending_without_waker_evidence(
5029 functions: &[FunctionSummary],
5030 limit: usize,
5031) -> (usize, Vec<ScanEvidence>) {
5032 let mut hits = 0usize;
5033 let mut evidence = Vec::new();
5034
5035 for function in functions {
5036 let looks_like_manual_poll = function.name == "poll"
5037 && (function.lowered_signature.contains("-> poll<")
5038 || function.lowered_signature.contains("-> core::task::poll<")
5039 || function.lowered_signature.contains("-> std::task::poll<"));
5040 if !looks_like_manual_poll
5041 || !function_contains_code_pattern(function, MANUAL_POLL_PENDING_PATTERNS)
5042 || function_contains_code_pattern(function, WAKE_PATTERNS)
5043 {
5044 continue;
5045 }
5046
5047 if let Some(item) = first_matching_line(function, MANUAL_POLL_PENDING_PATTERNS) {
5048 hits += 1;
5049 if evidence.len() < limit {
5050 evidence.push(item);
5051 }
5052 }
5053 }
5054
5055 (hits, evidence)
5056}
5057
5058fn collect_panic_in_drop_evidence(
5059 documents: &[SourceDocument],
5060 limit: usize,
5061) -> (usize, Vec<ScanEvidence>) {
5062 let mut hits = 0usize;
5063 let mut evidence = Vec::new();
5064
5065 for document in documents.iter().filter(|document| {
5066 document
5067 .relative_path
5068 .extension()
5069 .and_then(|ext| ext.to_str())
5070 == Some("rs")
5071 }) {
5072 let lines = document.risk_contents.lines().collect::<Vec<_>>();
5073 let mut next_idx = 0usize;
5074 for idx in 0..lines.len() {
5075 if idx < next_idx {
5076 continue;
5077 }
5078 let lowered = lines[idx].trim().to_ascii_lowercase();
5079 if !lowered.contains("impl drop for ") {
5080 continue;
5081 }
5082
5083 let Some((block_start, block_end)) = extract_braced_block(&lines, idx) else {
5084 continue;
5085 };
5086
5087 for (line_idx, line) in lines
5088 .iter()
5089 .enumerate()
5090 .skip(block_start)
5091 .take(block_end + 1)
5092 {
5093 let lowered_line = line.trim().to_ascii_lowercase();
5094 for &pattern in PANIC_PATTERNS {
5095 if lowered_line.contains(pattern) {
5096 hits += 1;
5097 if evidence.len() < limit {
5098 evidence.push(ScanEvidence {
5099 path: document.relative_path.clone(),
5100 line_number: line_idx + 1,
5101 pattern,
5102 snippet: line.trim().to_string(),
5103 });
5104 }
5105 }
5106 }
5107 }
5108
5109 next_idx = block_end + 1;
5110 }
5111 }
5112
5113 (hits, evidence)
5114}
5115
5116fn collect_relaxed_atomic_state_evidence(
5117 functions: &[FunctionSummary],
5118 limit: usize,
5119) -> (usize, Vec<ScanEvidence>) {
5120 let mut hits = 0usize;
5121 let mut evidence = Vec::new();
5122
5123 for function in functions {
5124 if !function_contains_code_pattern(function, RELAXED_ORDERING_PATTERNS)
5125 || !function_contains_code_pattern(function, CRITICAL_STATE_PATTERNS)
5126 {
5127 continue;
5128 }
5129 if let Some(item) = first_matching_line(function, RELAXED_ORDERING_PATTERNS) {
5130 hits += 1;
5131 if evidence.len() < limit {
5132 evidence.push(item);
5133 }
5134 }
5135 }
5136
5137 (hits, evidence)
5138}
5139
5140fn collect_mixed_clock_source_evidence(
5141 functions: &[FunctionSummary],
5142 limit: usize,
5143) -> (usize, Vec<ScanEvidence>) {
5144 let mut hits = 0usize;
5145 let mut evidence = Vec::new();
5146
5147 for function in functions {
5148 if !(function_contains_code_pattern(function, &["instant::now("])
5149 && function_contains_code_pattern(function, &["systemtime::now("]))
5150 {
5151 continue;
5152 }
5153 hits += 1;
5154 if evidence.len() < limit {
5155 evidence.push(ScanEvidence {
5156 path: function.path.clone(),
5157 line_number: function.start_line,
5158 pattern: "instant::now() + systemtime::now()",
5159 snippet: format!(
5160 "function `{}` mixes monotonic and wall-clock time sources",
5161 function.name
5162 ),
5163 });
5164 }
5165 }
5166
5167 (hits, evidence)
5168}
5169
5170fn collect_short_write_evidence(
5171 functions: &[FunctionSummary],
5172 limit: usize,
5173) -> (usize, Vec<ScanEvidence>) {
5174 let mut hits = 0usize;
5175 let mut evidence = Vec::new();
5176
5177 for function in functions {
5178 if !function_contains_code_pattern(function, WRITE_CALL_PATTERNS)
5179 || function_contains_code_pattern(function, &["write_all("])
5180 || function_contains_code_pattern(function, WRITE_HANDLING_PATTERNS)
5181 {
5182 continue;
5183 }
5184 if let Some(item) = first_matching_line(function, WRITE_CALL_PATTERNS) {
5185 hits += 1;
5186 if evidence.len() < limit {
5187 evidence.push(item);
5188 }
5189 }
5190 }
5191
5192 (hits, evidence)
5193}
5194
5195fn collect_async_recursion_evidence(
5196 functions: &[FunctionSummary],
5197 limit: usize,
5198) -> (usize, usize, Vec<ScanEvidence>) {
5199 let mut unbounded_hits = 0usize;
5200 let mut bounded_hits = 0usize;
5201 let mut evidence = Vec::new();
5202
5203 for function in functions {
5204 if !body_contains_any(&function.lowered_attributes, ASYNC_RECURSION_PATTERNS) {
5205 continue;
5206 }
5207
5208 let has_depth_signal = function_contains_code_pattern(function, DEPTH_BOUND_PATTERNS)
5209 || body_contains_any(&function.lowered_signature, DEPTH_BOUND_PATTERNS)
5210 || body_contains_any(&function.lowered_attributes, DEPTH_BOUND_PATTERNS);
5211
5212 if has_depth_signal {
5213 bounded_hits += 1;
5214 } else {
5215 unbounded_hits += 1;
5216 }
5217
5218 if evidence.len() < limit {
5219 evidence.push(ScanEvidence {
5220 path: function.path.clone(),
5221 line_number: function.start_line,
5222 pattern: "#[async_recursion]",
5223 snippet: if has_depth_signal {
5224 format!(
5225 "function `{}` uses async recursion with a possible depth/base-case signal",
5226 function.name
5227 )
5228 } else {
5229 format!(
5230 "function `{}` uses async recursion without an obvious depth-limit signal",
5231 function.name
5232 )
5233 },
5234 });
5235 }
5236 }
5237
5238 (unbounded_hits, bounded_hits, evidence)
5239}
5240
5241fn collect_zero_copy_violation_evidence(
5242 functions: &[FunctionSummary],
5243 limit: usize,
5244) -> (usize, Vec<ScanEvidence>) {
5245 let mut hits = 0usize;
5246 let mut evidence = Vec::new();
5247
5248 for function in functions {
5249 let has_copy_input_context =
5250 body_contains_any(&function.lowered_signature, READ_BUFFER_SIGNATURE_PATTERNS);
5251 if !has_copy_input_context
5252 || !function_contains_code_pattern(function, COPY_ON_READ_PATTERNS)
5253 {
5254 continue;
5255 }
5256 if let Some(item) = first_matching_line(function, COPY_ON_READ_PATTERNS) {
5257 hits += 1;
5258 if evidence.len() < limit {
5259 evidence.push(item);
5260 }
5261 }
5262 }
5263
5264 (hits, evidence)
5265}
5266
5267fn build_criticality_hotspots(functions: &[FunctionSummary]) -> Vec<CriticalityHotspot> {
5268 let mut hotspots = Vec::new();
5269
5270 for function in functions {
5271 let signals = collect_hotspot_signals(function);
5272 let risk_score = compute_hotspot_risk_score(function, &signals);
5273
5274 if risk_score >= 12 {
5275 hotspots.push(CriticalityHotspot {
5276 path: function.path.clone(),
5277 function_name: function.name.clone(),
5278 start_line: function.start_line,
5279 estimated_complexity: function.estimated_complexity,
5280 risk_score,
5281 signals,
5282 });
5283 }
5284 }
5285
5286 hotspots.sort_by(|a, b| {
5287 b.risk_score
5288 .cmp(&a.risk_score)
5289 .then_with(|| b.estimated_complexity.cmp(&a.estimated_complexity))
5290 });
5291 hotspots.truncate(8);
5292 hotspots
5293}
5294
5295fn collect_hotspot_signals(function: &FunctionSummary) -> Vec<&'static str> {
5296 let mut signals = Vec::new();
5297
5298 push_basic_hotspot_signals(function, &mut signals);
5299 push_behavioral_hotspot_signals(function, &mut signals);
5300 if has_hotspot_unbounded_iterator(function) {
5301 signals.push("iter-unbounded");
5302 }
5303 if has_hotspot_relaxed_atomic_signal(function) {
5304 signals.push("relaxed-atomic");
5305 }
5306 if has_hotspot_mixed_clock_signal(function) {
5307 signals.push("mixed-clocks");
5308 }
5309 if function_contains_code_pattern(function, UNBOUNDED_CHANNEL_PATTERNS) {
5310 signals.push("unbounded-channel");
5311 }
5312 if has_hotspot_pending_without_wake_signal(function) {
5313 signals.push("pending-no-wake");
5314 }
5315 if has_hotspot_copy_on_read_signal(function) {
5316 signals.push("copy-on-read");
5317 }
5318
5319 signals
5320}
5321
5322fn push_basic_hotspot_signals(function: &FunctionSummary, signals: &mut Vec<&'static str>) {
5323 if function.estimated_complexity > 15 {
5324 signals.push("complexity>15");
5325 }
5326 if function.line_count > 60 {
5327 signals.push("long-function");
5328 }
5329 if function_contains_code_pattern(function, ASSERT_PATTERNS) && function.assertion_count < 2 {
5330 signals.push("low-assert-density");
5331 }
5332 if function_contains_code_pattern(function, UNWRAP_PATTERNS) {
5333 signals.push("unwrap");
5334 }
5335 if function_contains_code_pattern(function, UNSAFE_PATTERNS) {
5336 signals.push("unsafe");
5337 }
5338}
5339
5340fn push_behavioral_hotspot_signals(function: &FunctionSummary, signals: &mut Vec<&'static str>) {
5341 if function_contains_code_pattern(function, HARD_CODED_WAIT_PATTERNS) {
5342 signals.push("hard-coded-wait");
5343 }
5344 if function_contains_code_pattern(function, ASYNC_LOCK_PATTERNS) {
5345 signals.push("async-lock");
5346 }
5347 if function_contains_code_pattern(function, INTERIOR_MUTABILITY_PATTERNS) {
5348 signals.push("interior-mutability");
5349 }
5350 if function_contains_code_pattern(function, COMMAND_BUFFER_PATTERNS) {
5351 signals.push("command-buffer");
5352 }
5353}
5354
5355fn has_hotspot_unbounded_iterator(function: &FunctionSummary) -> bool {
5356 function_contains_code_pattern(function, ITERATOR_TERMINAL_PATTERNS)
5357 && !function_contains_code_pattern(function, ITERATOR_BOUND_PATTERNS)
5358 && (function_contains_code_pattern(function, OPEN_ENDED_ITERATOR_PATTERNS)
5359 || body_contains_any(&function.lowered_signature, OPEN_ENDED_ITERATOR_PATTERNS))
5360}
5361
5362fn has_hotspot_relaxed_atomic_signal(function: &FunctionSummary) -> bool {
5363 function_contains_code_pattern(function, RELAXED_ORDERING_PATTERNS)
5364 && function_contains_code_pattern(function, CRITICAL_STATE_PATTERNS)
5365}
5366
5367fn has_hotspot_mixed_clock_signal(function: &FunctionSummary) -> bool {
5368 function_contains_code_pattern(function, &["instant::now("])
5369 && function_contains_code_pattern(function, &["systemtime::now("])
5370}
5371
5372fn has_hotspot_pending_without_wake_signal(function: &FunctionSummary) -> bool {
5373 function.name == "poll"
5374 && (function.lowered_signature.contains("-> poll<")
5375 || function.lowered_signature.contains("-> core::task::poll<")
5376 || function.lowered_signature.contains("-> std::task::poll<"))
5377 && function_contains_code_pattern(function, MANUAL_POLL_PENDING_PATTERNS)
5378 && !function_contains_code_pattern(function, WAKE_PATTERNS)
5379}
5380
5381fn has_hotspot_copy_on_read_signal(function: &FunctionSummary) -> bool {
5382 function_contains_code_pattern(function, COPY_ON_READ_PATTERNS)
5383 && body_contains_any(&function.lowered_signature, READ_BUFFER_SIGNATURE_PATTERNS)
5384}
5385
5386fn compute_hotspot_risk_score(function: &FunctionSummary, signals: &[&'static str]) -> usize {
5387 function.estimated_complexity
5388 + signals.len() * 3
5389 + usize::from(function.line_count > 60) * 5
5390 + usize::from(function_contains_code_pattern(function, UNSAFE_PATTERNS)) * 6
5391 + usize::from(function_contains_code_pattern(function, UNWRAP_PATTERNS)) * 3
5392}
5393
5394fn collect_low_assertion_evidence(
5395 functions: &[FunctionSummary],
5396 limit: usize,
5397) -> Vec<ScanEvidence> {
5398 let mut sparse_functions = functions
5399 .iter()
5400 .map(|function| (function.assertion_count, function))
5401 .filter(|(assertions, _)| *assertions < 2)
5402 .collect::<Vec<_>>();
5403 sparse_functions.sort_by(|(left_asserts, left_fn), (right_asserts, right_fn)| {
5404 left_asserts
5405 .cmp(right_asserts)
5406 .then_with(|| right_fn.line_count.cmp(&left_fn.line_count))
5407 });
5408
5409 sparse_functions
5410 .into_iter()
5411 .take(limit)
5412 .map(|(assertions, function)| ScanEvidence {
5413 path: function.path.clone(),
5414 line_number: function.start_line,
5415 pattern: "assertion density < 2 per function",
5416 snippet: format!(
5417 "function `{}` has {} assertion site(s) across {} lines",
5418 function.name, assertions, function.line_count
5419 ),
5420 })
5421 .collect()
5422}
5423
5424fn estimate_cyclomatic_complexity_lowered(lowered: &str) -> usize {
5425 let mut complexity = 1usize;
5426 let decision_tokens = [
5427 "if ",
5428 "else if ",
5429 "match ",
5430 "while ",
5431 "while let ",
5432 "for ",
5433 "loop ",
5434 "&&",
5435 "||",
5436 ];
5437
5438 for token in decision_tokens.iter().copied() {
5439 complexity += lowered.match_indices(token).count();
5440 }
5441
5442 complexity
5443}
5444
5445fn body_contains_any(lowered_body: &str, patterns: &[&'static str]) -> bool {
5446 patterns
5447 .iter()
5448 .any(|pattern| lowered_body.contains(pattern))
5449}
5450
5451fn is_code_like_scan_line(trimmed: &str) -> bool {
5452 !(trimmed.is_empty()
5453 || trimmed.starts_with("//")
5454 || trimmed.starts_with("///")
5455 || trimmed.starts_with('*')
5456 || trimmed.starts_with('"')
5457 || trimmed.starts_with("b\"")
5458 || trimmed.starts_with("r\"")
5459 || trimmed.starts_with("r#\""))
5460}
5461
5462fn function_contains_code_pattern(function: &FunctionSummary, patterns: &[&'static str]) -> bool {
5463 function.body.lines().any(|line| {
5464 let trimmed = line.trim();
5465 if !is_code_like_scan_line(trimmed) {
5466 return false;
5467 }
5468 let lowered = strip_rust_comments_and_strings(trimmed).to_ascii_lowercase();
5469 patterns.iter().any(|pattern| lowered.contains(pattern))
5470 })
5471}
5472
5473fn function_calls_name(lowered_body: &str, target_name_lowered: &str) -> bool {
5474 let needle = format!("{target_name_lowered}(");
5475 for (absolute, _) in lowered_body.match_indices(&needle) {
5476 let prefix = &lowered_body[..absolute];
5477
5478 if !prefix.ends_with("fn ")
5479 && (prefix.ends_with("self.")
5480 || prefix.ends_with("self::")
5481 || prefix.ends_with("super::")
5482 || prefix.ends_with("crate::")
5483 || prefix.chars().last().is_none_or(|ch| {
5484 !ch.is_ascii_alphanumeric() && ch != '_' && ch != '.' && ch != ':'
5485 }))
5486 {
5487 return true;
5488 }
5489 }
5490
5491 false
5492}
5493
5494fn count_assertions_in_text(text: &str) -> usize {
5495 text.lines()
5496 .filter(|line| {
5497 let trimmed = line.trim();
5498 !trimmed.starts_with("//") && !trimmed.starts_with("///") && !trimmed.starts_with('*')
5499 })
5500 .map(|line| {
5501 let lowered = line.to_ascii_lowercase();
5502 ASSERT_PATTERNS
5503 .iter()
5504 .map(|pattern| lowered.match_indices(pattern).count())
5505 .sum::<usize>()
5506 })
5507 .sum()
5508}
5509
5510fn first_matching_line(
5511 function: &FunctionSummary,
5512 patterns: &[&'static str],
5513) -> Option<ScanEvidence> {
5514 for (offset, line) in function.body.lines().enumerate() {
5515 let trimmed = line.trim();
5516 if !is_code_like_scan_line(trimmed) {
5517 continue;
5518 }
5519 let lowered = strip_rust_comments_and_strings(trimmed).to_ascii_lowercase();
5520 for &pattern in patterns {
5521 if lowered.contains(pattern) {
5522 return Some(ScanEvidence {
5523 path: function.path.clone(),
5524 line_number: function.start_line + offset,
5525 pattern,
5526 snippet: trimmed.to_string(),
5527 });
5528 }
5529 }
5530 }
5531
5532 None
5533}
5534
5535fn extract_braced_block(lines: &[&str], start_idx: usize) -> Option<(usize, usize)> {
5536 let mut brace_balance = 0isize;
5537 let mut seen_open_brace = false;
5538 let mut block_start = None;
5539
5540 for (idx, line) in lines.iter().enumerate().skip(start_idx) {
5541 for ch in line.chars() {
5542 match ch {
5543 '{' => {
5544 brace_balance += 1;
5545 if !seen_open_brace {
5546 seen_open_brace = true;
5547 block_start = Some(idx);
5548 }
5549 }
5550 '}' => {
5551 brace_balance -= 1;
5552 }
5553 _other => {}
5554 }
5555 }
5556
5557 if seen_open_brace && brace_balance == 0 {
5558 return block_start.map(|start| (start, idx));
5559 }
5560 }
5561
5562 None
5563}
5564
5565fn extract_function_summaries(documents: &[SourceDocument]) -> Vec<FunctionSummary> {
5566 let mut functions = Vec::new();
5567
5568 for document in documents.iter().filter(|document| {
5569 document
5570 .relative_path
5571 .extension()
5572 .and_then(|ext| ext.to_str())
5573 == Some("rs")
5574 }) {
5575 let lines = document.risk_contents.lines().collect::<Vec<_>>();
5576 let test_section_start = lines
5577 .iter()
5578 .position(|line| line.trim_start().starts_with("#[cfg(test)]"));
5579 let mut next_idx = 0usize;
5580 for idx in 0..lines.len() {
5581 if idx < next_idx {
5582 continue;
5583 }
5584 if test_section_start.is_some_and(|start| idx >= start) {
5585 break;
5586 }
5587 if let Some((function, end_idx)) =
5588 try_extract_function(&document.relative_path, &lines, idx)
5589 {
5590 functions.push(function);
5591 next_idx = end_idx + 1;
5592 }
5593 }
5594 }
5595
5596 functions
5597}
5598
5599fn try_extract_function(
5600 path: &Path,
5601 lines: &[&str],
5602 start_idx: usize,
5603) -> Option<(FunctionSummary, usize)> {
5604 let attribute_start_idx = rewind_attribute_start(lines, start_idx);
5605 let lowered_attributes = lowered_attribute_block(lines, attribute_start_idx, start_idx);
5606 let (signature, signature_end_idx) = collect_function_signature(lines, start_idx)?;
5607 let name = extract_function_name_from_signature(&signature)?;
5608 let end_idx = repaired_function_end_idx(
5609 lines,
5610 start_idx,
5611 find_function_end_idx(lines, signature_end_idx)?,
5612 );
5613 let summary = build_function_summary(
5614 path,
5615 name,
5616 signature,
5617 lowered_attributes,
5618 start_idx,
5619 end_idx,
5620 lines,
5621 );
5622
5623 Some((summary, end_idx))
5624}
5625
5626fn repaired_function_end_idx(lines: &[&str], start_idx: usize, end_idx: usize) -> usize {
5627 let mut previous_nonempty_idx = start_idx;
5628
5629 for idx in (start_idx + 1)..=end_idx {
5630 let trimmed = lines[idx].trim();
5631 if trimmed.is_empty() {
5632 continue;
5633 }
5634 if appears_to_start_function_signature(trimmed) && lines[idx].starts_with(trimmed) {
5635 let previous = lines[previous_nonempty_idx].trim();
5636 if previous == "}" {
5637 return previous_nonempty_idx;
5638 }
5639 }
5640 previous_nonempty_idx = idx;
5641 }
5642
5643 end_idx
5644}
5645
5646fn appears_to_start_function_signature(line: &str) -> bool {
5647 if line.is_empty()
5648 || line.starts_with('#')
5649 || line.starts_with("//")
5650 || line.starts_with("///")
5651 || line.starts_with("macro_rules!")
5652 {
5653 return false;
5654 }
5655
5656 line.contains("fn ")
5657 && !line.starts_with("if ")
5658 && !line.starts_with("while ")
5659 && !line.starts_with("for ")
5660 && !line.starts_with("match ")
5661}
5662
5663fn extract_function_name_from_signature(signature: &str) -> Option<String> {
5664 let fn_pos = signature.find("fn ")?;
5665 let rest = &signature[fn_pos + 3..];
5666 let name = rest
5667 .chars()
5668 .take_while(|ch| ch.is_ascii_alphanumeric() || *ch == '_')
5669 .collect::<String>();
5670 if name.is_empty() {
5671 None
5672 } else {
5673 Some(name)
5674 }
5675}
5676
5677fn update_brace_balance_from_code_line(
5678 line: &str,
5679 brace_balance: &mut isize,
5680 seen_open_brace: &mut bool,
5681) {
5682 let sanitized = strip_rust_comments_and_strings(line)
5683 .replace("'{'", " ")
5684 .replace("'}'", " ");
5685 for ch in sanitized.chars() {
5686 match ch {
5687 '{' => {
5688 *brace_balance += 1;
5689 *seen_open_brace = true;
5690 }
5691 '}' => *brace_balance -= 1,
5692 _other => {}
5693 }
5694 }
5695}
5696
5697fn rewind_attribute_start(lines: &[&str], start_idx: usize) -> usize {
5698 let mut attribute_start_idx = start_idx;
5699 for previous_idx in (0..attribute_start_idx).rev() {
5700 let previous = lines[previous_idx].trim();
5701 if previous.starts_with('#') {
5702 attribute_start_idx = previous_idx;
5703 } else {
5704 break;
5705 }
5706 }
5707 attribute_start_idx
5708}
5709
5710fn lowered_attribute_block(lines: &[&str], attribute_start_idx: usize, start_idx: usize) -> String {
5711 lines[attribute_start_idx..start_idx]
5712 .iter()
5713 .map(|line| line.trim())
5714 .collect::<Vec<_>>()
5715 .join("\n")
5716 .to_ascii_lowercase()
5717}
5718
5719fn collect_function_signature(lines: &[&str], start_idx: usize) -> Option<(String, usize)> {
5720 let mut signature = String::new();
5721
5722 for (idx, line) in lines.iter().enumerate().skip(start_idx) {
5723 let trimmed = line.trim();
5724 if idx == start_idx && !appears_to_start_function_signature(trimmed) {
5725 return None;
5726 }
5727 if trimmed.starts_with("//") || trimmed.starts_with("///") || trimmed.starts_with('*') {
5728 return None;
5729 }
5730
5731 if !signature.is_empty() {
5732 signature.push(' ');
5733 }
5734 signature.push_str(trimmed);
5735
5736 if signature.contains(';') && !signature.contains('{') {
5737 return None;
5738 }
5739 if signature.contains('{') {
5740 return Some((signature, idx));
5741 }
5742 }
5743
5744 None
5745}
5746
5747fn find_function_end_idx(lines: &[&str], signature_end_idx: usize) -> Option<usize> {
5748 let mut brace_balance = 0isize;
5749 let mut seen_open_brace = false;
5750 let mut last_standalone_closing_brace_idx = None;
5751
5752 for (idx, line) in lines.iter().enumerate().skip(signature_end_idx) {
5753 update_brace_balance_from_code_line(line, &mut brace_balance, &mut seen_open_brace);
5754 if line.trim() == "}" {
5755 last_standalone_closing_brace_idx = Some(idx);
5756 }
5757 if seen_open_brace && brace_balance == 0 {
5758 return Some(idx);
5759 }
5760 let trimmed = line.trim();
5761 if seen_open_brace
5762 && idx > signature_end_idx
5763 && line.starts_with(trimmed)
5764 && appears_to_start_function_signature(trimmed)
5765 && last_standalone_closing_brace_idx.is_some()
5766 {
5767 return last_standalone_closing_brace_idx;
5768 }
5769 }
5770
5771 None
5772}
5773
5774fn build_function_summary(
5775 path: &Path,
5776 name: String,
5777 signature: String,
5778 lowered_attributes: String,
5779 start_idx: usize,
5780 end_idx: usize,
5781 lines: &[&str],
5782) -> FunctionSummary {
5783 let body = lines[start_idx..=end_idx].join("\n");
5784 let lowered_body = body.to_ascii_lowercase();
5785
5786 FunctionSummary {
5787 path: path.to_path_buf(),
5788 lowered_name: name.to_ascii_lowercase(),
5789 name,
5790 lowered_signature: signature.to_ascii_lowercase(),
5791 lowered_attributes,
5792 start_line: start_idx + 1,
5793 line_count: end_idx - start_idx + 1,
5794 estimated_complexity: estimate_cyclomatic_complexity_lowered(&lowered_body),
5795 assertion_count: count_assertions_in_text(&body),
5796 body,
5797 lowered_body,
5798 }
5799}
5800
5801fn scan_manifest(path: &Path) -> ManifestMetadata {
5802 let Ok(contents) = fs::read_to_string(path) else {
5803 return ManifestMetadata::default();
5804 };
5805
5806 let mut metadata = ManifestMetadata::default();
5807 let mut section = ManifestSection::None;
5808 let mut direct_dependencies = BTreeSet::new();
5809 let mut build_dependencies = BTreeSet::new();
5810 let mut dev_dependencies = BTreeSet::new();
5811
5812 for raw_line in contents.lines() {
5813 let line = raw_line.trim();
5814 if line.is_empty() || line.starts_with('#') {
5815 continue;
5816 }
5817 if line.starts_with('[') && line.ends_with(']') {
5818 section = update_manifest_section(
5819 line,
5820 &mut direct_dependencies,
5821 &mut build_dependencies,
5822 &mut dev_dependencies,
5823 );
5824 continue;
5825 }
5826
5827 apply_manifest_value_line(
5828 &mut metadata,
5829 section,
5830 raw_line,
5831 line,
5832 &mut direct_dependencies,
5833 &mut build_dependencies,
5834 &mut dev_dependencies,
5835 );
5836 }
5837
5838 set_manifest_dependency_counts(
5839 &mut metadata,
5840 &direct_dependencies,
5841 &build_dependencies,
5842 &dev_dependencies,
5843 );
5844 metadata
5845}
5846
5847fn update_manifest_section(
5848 line: &str,
5849 direct_dependencies: &mut BTreeSet<String>,
5850 build_dependencies: &mut BTreeSet<String>,
5851 dev_dependencies: &mut BTreeSet<String>,
5852) -> ManifestSection {
5853 let section = classify_manifest_section(line);
5854 insert_dependency_from_section_header(
5855 direct_dependencies,
5856 line,
5857 ManifestSection::Dependencies,
5858 "dependencies",
5859 section,
5860 );
5861 insert_dependency_from_section_header(
5862 build_dependencies,
5863 line,
5864 ManifestSection::BuildDependencies,
5865 "build-dependencies",
5866 section,
5867 );
5868 insert_dependency_from_section_header(
5869 dev_dependencies,
5870 line,
5871 ManifestSection::DevDependencies,
5872 "dev-dependencies",
5873 section,
5874 );
5875 section
5876}
5877
5878fn apply_manifest_value_line(
5879 metadata: &mut ManifestMetadata,
5880 section: ManifestSection,
5881 raw_line: &str,
5882 line: &str,
5883 direct_dependencies: &mut BTreeSet<String>,
5884 build_dependencies: &mut BTreeSet<String>,
5885 dev_dependencies: &mut BTreeSet<String>,
5886) {
5887 match section {
5888 ManifestSection::Package => apply_package_manifest_line(metadata, line),
5889 ManifestSection::Lib => apply_lib_manifest_line(metadata, line),
5890 ManifestSection::Dependencies => {
5891 insert_dependency_from_key(direct_dependencies, raw_line, line)
5892 }
5893 ManifestSection::BuildDependencies => {
5894 insert_dependency_from_key(build_dependencies, raw_line, line)
5895 }
5896 ManifestSection::DevDependencies => {
5897 insert_dependency_from_key(dev_dependencies, raw_line, line)
5898 }
5899 ManifestSection::None => {}
5900 }
5901}
5902
5903fn set_manifest_dependency_counts(
5904 metadata: &mut ManifestMetadata,
5905 direct_dependencies: &BTreeSet<String>,
5906 build_dependencies: &BTreeSet<String>,
5907 dev_dependencies: &BTreeSet<String>,
5908) {
5909 metadata.direct_dependencies = direct_dependencies.len();
5910 metadata.build_dependencies = build_dependencies.len();
5911 metadata.dev_dependencies = dev_dependencies.len();
5912}
5913
5914fn insert_dependency_from_section_header(
5915 target: &mut BTreeSet<String>,
5916 line: &str,
5917 expected_section: ManifestSection,
5918 section_name: &str,
5919 section: ManifestSection,
5920) {
5921 if section == expected_section {
5922 if let Some(name) = dependency_name_from_section(line, section_name) {
5923 target.insert(name);
5924 }
5925 }
5926}
5927
5928fn apply_package_manifest_line(metadata: &mut ManifestMetadata, line: &str) {
5929 if apply_package_scalar_field(metadata, line) {
5930 return;
5931 }
5932 apply_build_manifest_field(metadata, line);
5933}
5934
5935fn apply_package_scalar_field(metadata: &mut ManifestMetadata, line: &str) -> bool {
5936 let Some((key, value)) = parse_package_scalar_field(line) else {
5937 return false;
5938 };
5939 assign_package_scalar(metadata, key, value);
5940 true
5941}
5942
5943fn assign_package_scalar(metadata: &mut ManifestMetadata, key: &'static str, value: String) {
5944 match key {
5945 "name" => metadata.crate_name = Some(value),
5946 "version" => metadata.crate_version = Some(value),
5947 "edition" => metadata.edition = Some(value),
5948 "license" => metadata.license = Some(value),
5949 "rust-version" => metadata.rust_version = Some(value),
5950 "repository" => metadata.repository = Some(value),
5951 "homepage" => metadata.homepage = Some(value),
5952 "documentation" => metadata.documentation = Some(value),
5953 "readme" => metadata.readme = Some(value),
5954 _other => {}
5955 }
5956}
5957
5958fn apply_build_manifest_field(metadata: &mut ManifestMetadata, line: &str) {
5959 match parse_build_manifest_value(line) {
5960 Some(BuildManifestValue::Disabled) => metadata.build_script = None,
5961 Some(BuildManifestValue::DefaultScript) => {
5962 metadata.build_script = Some("build.rs".to_string());
5963 }
5964 Some(BuildManifestValue::Path(path)) => metadata.build_script = Some(path),
5965 None => {}
5966 }
5967}
5968
5969fn parse_build_manifest_value(line: &str) -> Option<BuildManifestValue> {
5970 match parse_manifest_bool(line, "build") {
5971 Some(false) => Some(BuildManifestValue::Disabled),
5972 Some(true) => Some(BuildManifestValue::DefaultScript),
5973 None => parse_manifest_value(line, "build").map(BuildManifestValue::Path),
5974 }
5975}
5976
5977enum BuildManifestValue {
5978 Disabled,
5979 DefaultScript,
5980 Path(String),
5981}
5982
5983fn parse_package_scalar_field(line: &str) -> Option<(&'static str, String)> {
5984 for key in [
5985 "name",
5986 "version",
5987 "edition",
5988 "license",
5989 "rust-version",
5990 "repository",
5991 "homepage",
5992 "documentation",
5993 "readme",
5994 ] {
5995 if let Some(value) = parse_manifest_value(line, key) {
5996 return Some((key, value));
5997 }
5998 }
5999
6000 None
6001}
6002
6003fn apply_lib_manifest_line(metadata: &mut ManifestMetadata, line: &str) {
6004 if parse_manifest_bool(line, "proc-macro") == Some(true) {
6005 metadata.proc_macro = true;
6006 }
6007}
6008
6009fn insert_dependency_from_key(target: &mut BTreeSet<String>, raw_line: &str, line: &str) {
6010 if let Some(key) = parse_dependency_key(raw_line, line) {
6011 target.insert(key);
6012 }
6013}
6014
6015fn scan_patterns(
6016 documents: &[SourceDocument],
6017 patterns: &[&'static str],
6018 max_evidence: usize,
6019) -> PatternScan {
6020 scan_patterns_with_selector(documents, patterns, max_evidence, ScanContentMode::Risk)
6021}
6022
6023fn scan_patterns_analysis(
6024 documents: &[SourceDocument],
6025 patterns: &[&'static str],
6026 max_evidence: usize,
6027) -> PatternScan {
6028 scan_patterns_with_selector(documents, patterns, max_evidence, ScanContentMode::Analysis)
6029}
6030
6031fn scan_patterns_filtered<F>(
6032 documents: &[SourceDocument],
6033 patterns: &[&'static str],
6034 max_evidence: usize,
6035 mode: ScanContentMode,
6036 include_path: F,
6037) -> PatternScan
6038where
6039 F: Fn(&Path) -> bool,
6040{
6041 let mut total_hits = 0usize;
6042 let mut evidence = Vec::new();
6043 let mut matched_patterns = BTreeSet::new();
6044
6045 for document in documents {
6046 if !include_path(&document.relative_path) {
6047 continue;
6048 }
6049
6050 let source_lines = document.contents.lines();
6051 let scan_lines = match mode {
6052 ScanContentMode::Analysis => document.analysis_contents.lines(),
6053 ScanContentMode::Risk => document.risk_contents.lines(),
6054 };
6055
6056 for (idx, (source_line, scan_line)) in source_lines.zip(scan_lines).enumerate() {
6057 let lowered = scan_line.to_ascii_lowercase();
6058 for &pattern in patterns {
6059 if lowered.contains(pattern) {
6060 total_hits += 1;
6061 matched_patterns.insert(pattern);
6062 if evidence.len() < max_evidence {
6063 evidence.push(ScanEvidence {
6064 path: document.relative_path.clone(),
6065 line_number: idx + 1,
6066 pattern,
6067 snippet: source_line.trim().to_string(),
6068 });
6069 }
6070 }
6071 }
6072 }
6073 }
6074
6075 PatternScan {
6076 total_hits,
6077 matched_patterns: matched_patterns.into_iter().collect(),
6078 evidence,
6079 }
6080}
6081
6082#[derive(Clone, Copy)]
6083enum ScanContentMode {
6084 Analysis,
6085 Risk,
6086}
6087
6088fn scan_patterns_with_selector(
6089 documents: &[SourceDocument],
6090 patterns: &[&'static str],
6091 max_evidence: usize,
6092 mode: ScanContentMode,
6093) -> PatternScan {
6094 let mut total_hits = 0usize;
6095 let mut evidence = Vec::new();
6096 let mut matched_patterns = BTreeSet::new();
6097
6098 for document in documents {
6099 let source_lines = document.contents.lines();
6100 let scan_lines = match mode {
6101 ScanContentMode::Analysis => document.analysis_contents.lines(),
6102 ScanContentMode::Risk => document.risk_contents.lines(),
6103 };
6104
6105 for (idx, (source_line, scan_line)) in source_lines.zip(scan_lines).enumerate() {
6106 let lowered = scan_line.to_ascii_lowercase();
6107 for &pattern in patterns {
6108 if lowered.contains(pattern) {
6109 total_hits += 1;
6110 matched_patterns.insert(pattern);
6111 if evidence.len() < max_evidence {
6112 evidence.push(ScanEvidence {
6113 path: document.relative_path.clone(),
6114 line_number: idx + 1,
6115 pattern,
6116 snippet: source_line.trim().to_string(),
6117 });
6118 }
6119 }
6120 }
6121 }
6122 }
6123
6124 PatternScan {
6125 total_hits,
6126 matched_patterns: matched_patterns.into_iter().collect(),
6127 evidence,
6128 }
6129}
6130
6131fn line_contains_bounded_while_condition(line: &str) -> bool {
6132 let lowered = line.trim().to_ascii_lowercase();
6133 is_bounded_iterator_while(&lowered) || is_bounded_numeric_while(&lowered)
6134}
6135
6136fn is_bounded_iterator_while(lowered: &str) -> bool {
6137 body_contains_any(
6138 lowered,
6139 &[
6140 "while let some(",
6141 "while let some (",
6142 "while let ok(",
6143 "while let ok (",
6144 ],
6145 ) && body_contains_any(lowered, &[".next()", ".pop()", ".find(", ".peek()"])
6146}
6147
6148fn is_bounded_numeric_while(lowered: &str) -> bool {
6149 lowered.contains("while ")
6150 && body_contains_any(lowered, &[".len()", " > 0", " != 0", " < ", " <= "])
6151}
6152
6153fn collect_unbounded_loop_evidence(
6154 documents: &[SourceDocument],
6155 max_evidence: usize,
6156) -> PatternScan {
6157 let mut total_hits = 0usize;
6158 let mut evidence = Vec::new();
6159 let mut matched_patterns = BTreeSet::new();
6160
6161 for document in documents {
6162 for (idx, (source_line, risk_line)) in document
6163 .contents
6164 .lines()
6165 .zip(document.risk_contents.lines())
6166 .enumerate()
6167 {
6168 let lowered = risk_line.trim().to_ascii_lowercase();
6169
6170 if lowered.contains("loop {") || lowered.contains("loop{") {
6171 total_hits += 1;
6172 matched_patterns.insert("loop");
6173 if evidence.len() < max_evidence {
6174 evidence.push(ScanEvidence {
6175 path: document.relative_path.clone(),
6176 line_number: idx + 1,
6177 pattern: "loop",
6178 snippet: source_line.trim().to_string(),
6179 });
6180 }
6181 continue;
6182 }
6183
6184 if (lowered.contains("while let ") || lowered.contains("while "))
6185 && !line_contains_bounded_while_condition(&lowered)
6186 {
6187 total_hits += 1;
6188 matched_patterns.insert("while");
6189 if evidence.len() < max_evidence {
6190 evidence.push(ScanEvidence {
6191 path: document.relative_path.clone(),
6192 line_number: idx + 1,
6193 pattern: "while",
6194 snippet: source_line.trim().to_string(),
6195 });
6196 }
6197 }
6198 }
6199 }
6200
6201 PatternScan {
6202 total_hits,
6203 matched_patterns: matched_patterns.into_iter().collect(),
6204 evidence,
6205 }
6206}
6207
6208fn collect_ambiguous_for_loop_evidence(
6209 documents: &[SourceDocument],
6210 max_evidence: usize,
6211) -> (usize, Vec<ScanEvidence>) {
6212 let mut hits = 0usize;
6213 let mut evidence = Vec::new();
6214
6215 for document in documents.iter().filter(|document| {
6216 document
6217 .relative_path
6218 .extension()
6219 .and_then(|ext| ext.to_str())
6220 == Some("rs")
6221 }) {
6222 for (idx, source_line) in document.contents.lines().enumerate() {
6223 let trimmed = source_line.trim();
6224 if !trimmed.starts_with("for ") {
6225 continue;
6226 }
6227 let loop_signature = collect_for_loop_signature(&document.contents, idx);
6228 if line_contains_bounded_for_loop(&loop_signature) {
6229 continue;
6230 }
6231
6232 hits += 1;
6233 if evidence.len() < max_evidence {
6234 evidence.push(ScanEvidence {
6235 path: document.relative_path.clone(),
6236 line_number: idx + 1,
6237 pattern: "for loop with non-obvious bound",
6238 snippet: trimmed.to_string(),
6239 });
6240 }
6241 }
6242 }
6243
6244 (hits, evidence)
6245}
6246
6247fn collect_for_loop_signature(contents: &str, start_idx: usize) -> String {
6248 let lines = contents.lines().collect::<Vec<_>>();
6249 let mut signature = String::new();
6250
6251 for line in lines.iter().skip(start_idx).take(6) {
6252 if !signature.is_empty() {
6253 signature.push(' ');
6254 }
6255 signature.push_str(line.trim());
6256 if line.contains('{') {
6257 break;
6258 }
6259 }
6260
6261 signature
6262}
6263
6264fn line_contains_bounded_for_loop(line: &str) -> bool {
6265 let lowered = line.trim().to_ascii_lowercase();
6266 if !lowered.starts_with("for ") {
6267 return false;
6268 }
6269
6270 contains_bounded_for_iter_pattern(&lowered) || contains_known_bounded_for_target(&lowered)
6271}
6272
6273fn scan_cfg_surface(documents: &[SourceDocument], max_evidence: usize) -> PatternScan {
6274 let mut total_hits = 0usize;
6275 let mut evidence = Vec::new();
6276 let mut matched_patterns = BTreeSet::new();
6277
6278 for document in documents
6279 .iter()
6280 .filter(|document| !has_path_component(&document.relative_path, "proofs"))
6281 {
6282 for (idx, (source_line, scan_line)) in document
6283 .contents
6284 .lines()
6285 .zip(document.analysis_contents.lines())
6286 .enumerate()
6287 {
6288 if !is_review_relevant_cfg_line(source_line.trim(), scan_line.trim()) {
6289 continue;
6290 }
6291
6292 total_hits += 1;
6293 matched_patterns.insert("cfg");
6294 if evidence.len() < max_evidence {
6295 evidence.push(ScanEvidence {
6296 path: document.relative_path.clone(),
6297 line_number: idx + 1,
6298 pattern: "review-relevant cfg",
6299 snippet: source_line.trim().to_string(),
6300 });
6301 }
6302 }
6303 }
6304
6305 PatternScan {
6306 total_hits,
6307 matched_patterns: matched_patterns.into_iter().collect(),
6308 evidence,
6309 }
6310}
6311
6312fn is_review_relevant_cfg_line(raw_trimmed: &str, scan_trimmed: &str) -> bool {
6313 if !(scan_trimmed.contains("#[cfg")
6314 || scan_trimmed.contains("cfg!(")
6315 || scan_trimmed.contains("cfg_attr("))
6316 {
6317 return false;
6318 }
6319 !is_ignored_cfg_line(raw_trimmed)
6320}
6321
6322fn is_ignored_cfg_line(raw_trimmed: &str) -> bool {
6323 raw_trimmed.contains("cfg(test)")
6324 || raw_trimmed.contains("cfg_attr(test")
6325 || raw_trimmed == "#[cfg(feature = \"std\")]"
6326 || raw_trimmed == "#[cfg(not(feature = \"std\"))]"
6327 || raw_trimmed.contains("cfg_attr(not(feature = \"std\"), no_std)")
6328 || raw_trimmed.contains("cfg_attr(not(any(feature = \"std\")), no_std)")
6329 || raw_trimmed.contains("cfg_attr(not(test), forbid(unsafe_code))")
6330 || raw_trimmed.contains("cfg_attr(not(test), deny(unsafe_code))")
6331}
6332
6333fn contains_bounded_for_iter_pattern(lowered: &str) -> bool {
6334 body_contains_any(
6335 lowered,
6336 &[
6337 " in 0..",
6338 " in 1..",
6339 " in ..",
6340 ".iter(",
6341 ".iter()",
6342 ".iter_mut(",
6343 ".iter_mut()",
6344 ".enumerate(",
6345 ".enumerate()",
6346 ".chunks(",
6347 ".windows(",
6348 ".split(",
6349 ".split_whitespace(",
6350 ".lines(",
6351 ".bytes(",
6352 ".char_indices(",
6353 ".chars(",
6354 ".keys(",
6355 ".match_indices(",
6356 ".values(",
6357 ".values_mut(",
6358 ".drain(",
6359 ".into_iter()",
6360 ".into_iter(",
6361 "fs::read_dir(",
6362 " in &",
6363 " in [",
6364 " in (",
6365 " in vec![",
6366 ],
6367 )
6368}
6369
6370fn contains_known_bounded_for_target(lowered: &str) -> bool {
6371 lowered.contains(" in ")
6372 && [
6373 "documents",
6374 "functions",
6375 "sections",
6376 "checks",
6377 "rows",
6378 "patterns",
6379 "hotspots",
6380 "samples",
6381 "files",
6382 "entries",
6383 "bundle",
6384 "sigma_values",
6385 "p_values",
6386 ]
6387 .iter()
6388 .any(|pattern| lowered.contains(pattern))
6389}
6390
6391fn collect_files(root: &Path) -> io::Result<Vec<PathBuf>> {
6392 let mut files = Vec::new();
6393 let mut pending = vec![PathBuf::from(".")];
6394 let mut pending_idx = 0usize;
6395
6396 while pending_idx < pending.len() {
6397 let relative_dir = pending[pending_idx].clone();
6398 pending_idx += 1;
6399 let current = if relative_dir == Path::new(".") {
6400 root.to_path_buf()
6401 } else {
6402 root.join(&relative_dir)
6403 };
6404
6405 for entry in fs::read_dir(¤t)? {
6406 let entry = entry?;
6407 let path = entry.path();
6408 let relative_path = path.strip_prefix(root).unwrap_or(&path).to_path_buf();
6409 let file_type = entry.file_type()?;
6410
6411 if should_skip_scan_path(&relative_path) {
6412 continue;
6413 }
6414
6415 if file_type.is_dir() {
6416 pending.push(relative_path);
6417 } else if file_type.is_file() {
6418 files.push(relative_path);
6419 }
6420 }
6421 }
6422
6423 files.sort();
6424 Ok(files)
6425}
6426
6427fn should_skip_scan_path(path: &Path) -> bool {
6428 if has_path_component(path, ".git")
6429 || has_path_component(path, "target")
6430 || has_path_component(path, DEFAULT_SCAN_OUTPUT_ROOT)
6431 {
6432 return true;
6433 }
6434
6435 let normalized = path.to_string_lossy().replace('\\', "/");
6436 normalized.starts_with("docs/generated/")
6437 || normalized.starts_with("paper/generated/")
6438 || normalized.ends_with("_scan.txt")
6439 || normalized.ends_with("_scan.sarif.json")
6440 || normalized.ends_with("_scan.intoto.json")
6441 || normalized.ends_with("_scan.dsse.json")
6442}
6443
6444fn is_tooling_support_path(path: &Path) -> bool {
6445 let normalized = path.to_string_lossy().replace('\\', "/");
6446 normalized.starts_with("src/bin/")
6447 || normalized == "src/evaluation.rs"
6448 || normalized == "src/inject.rs"
6449 || normalized == "src/report.rs"
6450 || normalized == "src/scan.rs"
6451 || normalized.starts_with("examples/")
6452 || normalized.starts_with("proofs/")
6453 || normalized.starts_with("fuzz/")
6454 || normalized.starts_with("tests/")
6455}
6456
6457fn load_documents(root: &Path, files: &[PathBuf]) -> Vec<SourceDocument> {
6458 let mut documents = Vec::new();
6459
6460 for relative_path in files {
6461 let absolute_path = root.join(relative_path);
6462 let Ok(contents) = fs::read_to_string(&absolute_path) else {
6463 continue;
6464 };
6465 let analysis_contents = build_analysis_contents(relative_path, &contents);
6466 documents.push(SourceDocument {
6467 relative_path: relative_path.clone(),
6468 risk_contents: build_risk_contents(relative_path, &analysis_contents),
6469 analysis_contents,
6470 contents,
6471 });
6472 }
6473
6474 documents
6475}
6476
6477fn build_analysis_contents(path: &Path, contents: &str) -> String {
6478 if path.extension().and_then(|ext| ext.to_str()) != Some("rs") {
6479 return contents.to_string();
6480 }
6481
6482 strip_rust_comments_and_strings(contents)
6483}
6484
6485fn build_risk_contents(path: &Path, analysis_contents: &str) -> String {
6486 if path.extension().and_then(|ext| ext.to_str()) != Some("rs") {
6487 return analysis_contents.to_string();
6488 }
6489
6490 if has_path_component(path, "tests")
6491 || has_path_component(path, "fuzz")
6492 || has_path_component(path, "benches")
6493 {
6494 return blank_contents_preserving_lines(analysis_contents);
6495 }
6496
6497 strip_cfg_test_modules(analysis_contents)
6498}
6499
6500fn blank_contents_preserving_lines(contents: &str) -> String {
6501 contents.lines().map(|_| "").collect::<Vec<_>>().join("\n")
6502}
6503
6504fn strip_cfg_test_modules(contents: &str) -> String {
6505 let mut output = Vec::new();
6506 let mut in_test_section = false;
6507
6508 for line in contents.lines() {
6509 if line.trim_start().starts_with("#[cfg(test)]") {
6510 in_test_section = true;
6511 }
6512
6513 if in_test_section {
6514 output.push(String::new());
6515 } else {
6516 output.push(line.to_string());
6517 }
6518 }
6519
6520 output.join("\n")
6521}
6522
6523fn strip_rust_comments_and_strings(contents: &str) -> String {
6524 let mut cleaned = String::with_capacity(contents.len());
6525 let mut chars = contents.chars().peekable();
6526 let mut state = StripState::Code;
6527
6528 while let Some(ch) = chars.next() {
6529 let next = chars.peek().copied();
6530 let starts_char_literal = matches!(state, StripState::Code)
6531 && ch == '\''
6532 && starts_rust_char_literal(chars.clone());
6533
6534 match state {
6535 StripState::Code => {
6536 state = handle_code_strip_state(
6537 &mut cleaned,
6538 &mut chars,
6539 ch,
6540 next,
6541 starts_char_literal,
6542 );
6543 }
6544 StripState::LineComment => {
6545 state = handle_line_comment_strip_state(&mut cleaned, ch);
6546 }
6547 StripState::BlockComment => {
6548 state = handle_block_comment_strip_state(&mut cleaned, &mut chars, ch, next);
6549 }
6550 StripState::String { escaped } => {
6551 state = handle_string_strip_state(&mut cleaned, ch, escaped);
6552 }
6553 StripState::Char { escaped } => {
6554 state = handle_char_strip_state(&mut cleaned, ch, escaped);
6555 }
6556 }
6557 }
6558
6559 cleaned
6560}
6561
6562fn handle_code_strip_state(
6563 cleaned: &mut String,
6564 chars: &mut std::iter::Peekable<std::str::Chars<'_>>,
6565 ch: char,
6566 next: Option<char>,
6567 starts_char_literal: bool,
6568) -> StripState {
6569 if ch == '/' && next == Some('*') {
6570 cleaned.push(' ');
6571 cleaned.push(' ');
6572 chars.next();
6573 StripState::BlockComment
6574 } else if ch == '/' && next == Some('/') {
6575 cleaned.push(' ');
6576 cleaned.push(' ');
6577 chars.next();
6578 StripState::LineComment
6579 } else if ch == '"' {
6580 cleaned.push(' ');
6581 StripState::String { escaped: false }
6582 } else if starts_char_literal {
6583 cleaned.push(' ');
6584 StripState::Char { escaped: false }
6585 } else {
6586 cleaned.push(ch);
6587 StripState::Code
6588 }
6589}
6590
6591fn handle_line_comment_strip_state(cleaned: &mut String, ch: char) -> StripState {
6592 if ch == '\n' {
6593 cleaned.push('\n');
6594 StripState::Code
6595 } else {
6596 cleaned.push(' ');
6597 StripState::LineComment
6598 }
6599}
6600
6601fn handle_block_comment_strip_state(
6602 cleaned: &mut String,
6603 chars: &mut std::iter::Peekable<std::str::Chars<'_>>,
6604 ch: char,
6605 next: Option<char>,
6606) -> StripState {
6607 if ch == '*' && next == Some('/') {
6608 cleaned.push(' ');
6609 cleaned.push(' ');
6610 chars.next();
6611 StripState::Code
6612 } else {
6613 cleaned.push(if ch == '\n' { '\n' } else { ' ' });
6614 StripState::BlockComment
6615 }
6616}
6617
6618fn handle_string_strip_state(cleaned: &mut String, ch: char, escaped: bool) -> StripState {
6619 if ch == '\n' {
6620 cleaned.push('\n');
6621 StripState::String { escaped: false }
6622 } else if escaped {
6623 cleaned.push(' ');
6624 StripState::String { escaped: false }
6625 } else if ch == '\\' {
6626 cleaned.push(' ');
6627 StripState::String { escaped: true }
6628 } else if ch == '"' {
6629 cleaned.push(' ');
6630 StripState::Code
6631 } else {
6632 cleaned.push(' ');
6633 StripState::String { escaped: false }
6634 }
6635}
6636
6637fn handle_char_strip_state(cleaned: &mut String, ch: char, escaped: bool) -> StripState {
6638 if ch == '\n' {
6639 cleaned.push('\n');
6640 StripState::Code
6641 } else if escaped {
6642 cleaned.push(' ');
6643 StripState::Char { escaped: false }
6644 } else if ch == '\\' {
6645 cleaned.push(' ');
6646 StripState::Char { escaped: true }
6647 } else if ch == '\'' {
6648 cleaned.push(' ');
6649 StripState::Code
6650 } else {
6651 cleaned.push(' ');
6652 StripState::Char { escaped: false }
6653 }
6654}
6655
6656fn starts_rust_char_literal(mut remaining: std::iter::Peekable<std::str::Chars<'_>>) -> bool {
6657 match (remaining.next(), remaining.next(), remaining.next()) {
6658 (Some('\\'), Some(_escaped), Some('\'')) => true,
6659 (Some(_value), Some('\''), _other) => true,
6660 _other => false,
6661 }
6662}
6663
6664#[derive(Clone, Copy)]
6665enum StripState {
6666 Code,
6667 LineComment,
6668 BlockComment,
6669 String { escaped: bool },
6670 Char { escaped: bool },
6671}
6672
6673fn is_source_scan_file(path: &Path) -> bool {
6674 matches!(path.extension().and_then(|ext| ext.to_str()), Some("rs"))
6675 || path.file_name().and_then(|name| name.to_str()) == Some("Cargo.toml")
6676}
6677
6678fn classify_manifest_section(line: &str) -> ManifestSection {
6679 let normalized = line.trim_start_matches('[').trim_end_matches(']');
6680 direct_manifest_section(normalized)
6681 .or_else(|| dependency_manifest_section(normalized))
6682 .unwrap_or(ManifestSection::None)
6683}
6684
6685fn direct_manifest_section(normalized: &str) -> Option<ManifestSection> {
6686 if normalized == "package" {
6687 Some(ManifestSection::Package)
6688 } else if normalized == "lib" {
6689 Some(ManifestSection::Lib)
6690 } else {
6691 None
6692 }
6693}
6694
6695fn dependency_manifest_section(normalized: &str) -> Option<ManifestSection> {
6696 [
6697 ("build-dependencies", ManifestSection::BuildDependencies),
6698 ("dev-dependencies", ManifestSection::DevDependencies),
6699 ("dependencies", ManifestSection::Dependencies),
6700 ]
6701 .into_iter()
6702 .find_map(|(section_name, section)| {
6703 manifest_section_matches(normalized, section_name).then_some(section)
6704 })
6705}
6706
6707fn manifest_section_matches(normalized: &str, section_name: &str) -> bool {
6708 normalized == section_name
6709 || normalized.starts_with(&format!("{section_name}."))
6710 || normalized.ends_with(&format!(".{section_name}"))
6711 || normalized.contains(&format!(".{section_name}."))
6712}
6713
6714fn dependency_name_from_section(line: &str, section_name: &str) -> Option<String> {
6715 let normalized = line.trim_start_matches('[').trim_end_matches(']');
6716
6717 if let Some(name) = normalized.strip_prefix(&format!("{section_name}.")) {
6718 return Some(name.to_string());
6719 }
6720
6721 let needle = format!(".{section_name}.");
6722 if let Some((_, name)) = normalized.rsplit_once(&needle) {
6723 return Some(name.to_string());
6724 }
6725
6726 None
6727}
6728
6729fn parse_manifest_value(line: &str, key: &str) -> Option<String> {
6730 let (lhs, rhs) = line.split_once('=')?;
6731 if lhs.trim() != key {
6732 return None;
6733 }
6734
6735 let value = rhs.trim().trim_matches('"');
6736 Some(value.to_string())
6737}
6738
6739fn parse_manifest_bool(line: &str, key: &str) -> Option<bool> {
6740 let (lhs, rhs) = line.split_once('=')?;
6741 if lhs.trim() != key {
6742 return None;
6743 }
6744
6745 match rhs.trim() {
6746 "true" => Some(true),
6747 "false" => Some(false),
6748 _other => None,
6749 }
6750}
6751
6752fn parse_dependency_key(raw_line: &str, trimmed_line: &str) -> Option<String> {
6753 if raw_line.starts_with(' ') || raw_line.starts_with('\t') {
6754 return None;
6755 }
6756 if trimmed_line.starts_with('#') || trimmed_line.starts_with('[') {
6757 return None;
6758 }
6759 let (lhs, _) = trimmed_line.split_once('=')?;
6760 Some(lhs.trim().to_string())
6761}
6762
6763fn scan_join_handle_discard(documents: &[SourceDocument], max_evidence: usize) -> PatternScan {
6764 let mut total_hits = 0usize;
6765 let mut evidence = Vec::new();
6766 let mut matched_patterns = BTreeSet::new();
6767
6768 for document in documents {
6769 for (idx, (source_line, scan_line)) in document
6770 .contents
6771 .lines()
6772 .zip(document.risk_contents.lines())
6773 .enumerate()
6774 {
6775 let lowered = scan_line.to_ascii_lowercase();
6776 if JOIN_HANDLE_DISCARD_SPAWN_PATTERNS
6777 .iter()
6778 .any(|pattern| lowered.contains(pattern))
6779 && JOIN_HANDLE_DISCARD_CONTEXT_PATTERNS
6780 .iter()
6781 .any(|pattern| lowered.contains(pattern))
6782 {
6783 total_hits += 1;
6784 matched_patterns.insert("discarded JoinHandle");
6785 if evidence.len() < max_evidence {
6786 evidence.push(ScanEvidence {
6787 path: document.relative_path.clone(),
6788 line_number: idx + 1,
6789 pattern: "discarded JoinHandle",
6790 snippet: source_line.trim().to_string(),
6791 });
6792 }
6793 }
6794 }
6795 }
6796
6797 PatternScan {
6798 total_hits,
6799 matched_patterns: matched_patterns.into_iter().collect(),
6800 evidence,
6801 }
6802}
6803
6804fn scan_dependency_version_drift(documents: &[SourceDocument], max_evidence: usize) -> PatternScan {
6805 let mut total_hits = 0usize;
6806 let mut evidence = Vec::new();
6807 let mut matched_patterns = BTreeSet::new();
6808
6809 for document in documents.iter().filter(|document| {
6810 document
6811 .relative_path
6812 .file_name()
6813 .and_then(|name| name.to_str())
6814 == Some("Cargo.toml")
6815 }) {
6816 for (idx, line) in document.contents.lines().enumerate() {
6817 let trimmed = line.trim();
6818 if trimmed.is_empty() || trimmed.starts_with('#') {
6819 continue;
6820 }
6821
6822 let lowered = trimmed.to_ascii_lowercase();
6823 let Some((_, rhs)) = trimmed.split_once('=') else {
6824 continue;
6825 };
6826 let version_text = rhs.trim().trim_matches('"').to_ascii_lowercase();
6827 let is_manifest_inline_version = lowered.contains("version =");
6828 let is_plain_dependency = !trimmed.starts_with('[')
6829 && !trimmed.starts_with(' ')
6830 && !trimmed.starts_with('\t');
6831
6832 if !(is_manifest_inline_version || is_plain_dependency) {
6833 continue;
6834 }
6835
6836 let pattern = if version_text == "*" {
6837 "wildcard version"
6838 } else if version_text.contains(">=") && !version_text.contains('<') {
6839 "open-ended >= version"
6840 } else {
6841 continue;
6842 };
6843
6844 total_hits += 1;
6845 matched_patterns.insert(pattern);
6846 if evidence.len() < max_evidence {
6847 evidence.push(ScanEvidence {
6848 path: document.relative_path.clone(),
6849 line_number: idx + 1,
6850 pattern,
6851 snippet: trimmed.to_string(),
6852 });
6853 }
6854 }
6855 }
6856
6857 PatternScan {
6858 total_hits,
6859 matched_patterns: matched_patterns.into_iter().collect(),
6860 evidence,
6861 }
6862}
6863
6864fn scan_global_shared_resource_patterns(
6865 documents: &[SourceDocument],
6866 max_evidence: usize,
6867) -> PatternScan {
6868 let mut total_hits = 0usize;
6869 let mut evidence = Vec::new();
6870 let mut matched_patterns = BTreeSet::new();
6871
6872 for document in documents {
6873 for (idx, (source_line, scan_line)) in document
6874 .contents
6875 .lines()
6876 .zip(document.risk_contents.lines())
6877 .enumerate()
6878 {
6879 let trimmed = scan_line.trim_start();
6880 let Some(pattern) = global_shared_pattern_for_line(trimmed) else {
6881 continue;
6882 };
6883
6884 total_hits += 1;
6885 matched_patterns.insert(pattern);
6886 if evidence.len() < max_evidence {
6887 evidence.push(ScanEvidence {
6888 path: document.relative_path.clone(),
6889 line_number: idx + 1,
6890 pattern,
6891 snippet: source_line.trim().to_string(),
6892 });
6893 }
6894 }
6895 }
6896
6897 PatternScan {
6898 total_hits,
6899 matched_patterns: matched_patterns.into_iter().collect(),
6900 evidence,
6901 }
6902}
6903
6904fn global_shared_pattern_for_line(trimmed: &str) -> Option<&'static str> {
6905 let lowered = trimmed.to_ascii_lowercase();
6906
6907 if trimmed.starts_with("static ")
6908 || trimmed.starts_with("pub static ")
6909 || trimmed.starts_with("pub(crate) static ")
6910 {
6911 Some("static declaration")
6912 } else if lowered.contains("static mut") {
6913 Some("static mut")
6914 } else if lowered.contains("lazy_static!") {
6915 Some("lazy_static!")
6916 } else if lowered.contains("oncecell::sync::lazy") {
6917 Some("oncecell::sync::lazy")
6918 } else if lowered.contains("oncelock<") {
6919 Some("oncelock<")
6920 } else if lowered.contains("lazylock<") {
6921 Some("lazylock<")
6922 } else {
6923 None
6924 }
6925}
6926
6927fn scan_restricted_pointer_use(documents: &[SourceDocument], max_evidence: usize) -> PatternScan {
6928 let mut total_hits = 0usize;
6929 let mut evidence = Vec::new();
6930 let mut matched_patterns = BTreeSet::new();
6931
6932 for document in documents {
6933 for (idx, (source_line, scan_line)) in document
6934 .contents
6935 .lines()
6936 .zip(document.risk_contents.lines())
6937 .enumerate()
6938 {
6939 let trimmed = scan_line.trim();
6940 if !is_code_like_scan_line(trimmed) {
6941 continue;
6942 }
6943
6944 let lowered = trimmed.to_ascii_lowercase();
6945 let pattern = if lowered.contains("*const ") {
6946 "*const"
6947 } else if lowered.contains("*mut ") {
6948 "*mut"
6949 } else if lowered.contains("nonnull<") {
6950 "nonnull<"
6951 } else if lowered.contains("addr_of!(") {
6952 "addr_of!"
6953 } else if lowered.contains("extern \"c\" fn") {
6954 "extern fn"
6955 } else {
6956 continue;
6957 };
6958
6959 total_hits += 1;
6960 matched_patterns.insert(pattern);
6961 if evidence.len() < max_evidence {
6962 evidence.push(ScanEvidence {
6963 path: document.relative_path.clone(),
6964 line_number: idx + 1,
6965 pattern,
6966 snippet: source_line.trim().to_string(),
6967 });
6968 }
6969 }
6970 }
6971
6972 PatternScan {
6973 total_hits,
6974 matched_patterns: matched_patterns.into_iter().collect(),
6975 evidence,
6976 }
6977}
6978
6979fn has_file_with_prefix(files: &[PathBuf], prefix: &str) -> bool {
6980 files.iter().any(|path| {
6981 path.file_name()
6982 .and_then(|name| name.to_str())
6983 .map(|name| name.to_ascii_lowercase().starts_with(prefix))
6984 .unwrap_or(false)
6985 })
6986}
6987
6988fn has_exact_file_name(files: &[PathBuf], name: &str) -> bool {
6989 files.iter().any(|path| {
6990 path.file_name()
6991 .and_then(|file_name| file_name.to_str())
6992 .map(|file_name| file_name.eq_ignore_ascii_case(name))
6993 .unwrap_or(false)
6994 })
6995}
6996
6997fn has_path_component(path: &Path, component: &str) -> bool {
6998 path.components().any(|part| {
6999 part.as_os_str()
7000 .to_str()
7001 .map(|value| value.eq_ignore_ascii_case(component))
7002 .unwrap_or(false)
7003 })
7004}
7005
7006fn compute_tree_sha256(root: &Path, files: &[PathBuf]) -> io::Result<String> {
7007 let mut hasher = Sha256::new();
7008
7009 for relative_path in files {
7010 let path_text = relative_path.display().to_string();
7011 hasher.update(path_text.len().to_string().as_bytes());
7012 hasher.update(b":");
7013 hasher.update(path_text.as_bytes());
7014 hasher.update(b":");
7015
7016 let contents = fs::read(root.join(relative_path))?;
7017 hasher.update(contents.len().to_string().as_bytes());
7018 hasher.update(b":");
7019 hasher.update(&contents);
7020 hasher.update(b"\n");
7021 }
7022
7023 Ok(hex_encode(&hasher.finalize()))
7024}
7025
7026fn scan_vcs_info(root: &Path) -> VcsInfo {
7027 let path = root.join(".cargo_vcs_info.json");
7028 let Ok(contents) = fs::read_to_string(path) else {
7029 return VcsInfo::default();
7030 };
7031 let Ok(value) = serde_json::from_str::<Value>(&contents) else {
7032 return VcsInfo::default();
7033 };
7034
7035 VcsInfo {
7036 git_commit: value
7037 .get("git")
7038 .and_then(|git| git.get("sha1"))
7039 .and_then(Value::as_str)
7040 .map(ToOwned::to_owned),
7041 path_in_vcs: value
7042 .get("path_in_vcs")
7043 .and_then(Value::as_str)
7044 .map(ToOwned::to_owned),
7045 }
7046}
7047
7048fn scan_artifact_stem(report: &CrateSourceScanReport) -> String {
7049 format!("{}_scan", sanitize_filename_component(&report.crate_name))
7050}
7051
7052fn prepare_scan_output_run_at(
7053 base_output_root: &Path,
7054 timestamp_utc: &str,
7055) -> io::Result<ScanRunPaths> {
7056 fs::create_dir_all(base_output_root)?;
7057 let run_dir = create_unique_run_dir(base_output_root, &format!("dsfb-gray-{timestamp_utc}"))?;
7058 Ok(ScanRunPaths {
7059 base_output_root: base_output_root.to_path_buf(),
7060 run_dir,
7061 timestamp_utc: timestamp_utc.to_string(),
7062 })
7063}
7064
7065fn create_unique_run_dir(base_output_root: &Path, base_name: &str) -> io::Result<PathBuf> {
7066 let primary = base_output_root.join(base_name);
7067 if !primary.exists() {
7068 fs::create_dir_all(&primary)?;
7069 return Ok(primary);
7070 }
7071
7072 for suffix in 1..=999 {
7073 let candidate = base_output_root.join(format!("{base_name}-{suffix:02}"));
7074 if !candidate.exists() {
7075 fs::create_dir_all(&candidate)?;
7076 return Ok(candidate);
7077 }
7078 }
7079
7080 Err(io::Error::new(
7081 io::ErrorKind::AlreadyExists,
7082 format!("unable to create unique scan output directory for {base_name}"),
7083 ))
7084}
7085
7086fn scan_run_timestamp(now: OffsetDateTime) -> String {
7087 format!(
7088 "{:04}-{:02}-{:02}T{:02}-{:02}-{:02}Z",
7089 now.year(),
7090 u8::from(now.month()),
7091 now.day(),
7092 now.hour(),
7093 now.minute(),
7094 now.second()
7095 )
7096}
7097
7098fn collect_legacy_scan_artifacts(legacy_root: &Path) -> io::Result<Vec<PathBuf>> {
7099 let mut paths = Vec::new();
7100 for entry in fs::read_dir(legacy_root)? {
7101 let entry = entry?;
7102 let path = entry.path();
7103 if !path.is_file() {
7104 continue;
7105 }
7106 let Some(file_name) = path.file_name().and_then(|value| value.to_str()) else {
7107 continue;
7108 };
7109 if is_legacy_scan_artifact(file_name) {
7110 paths.push(path);
7111 }
7112 }
7113 paths.sort();
7114 Ok(paths)
7115}
7116
7117fn is_legacy_scan_artifact(file_name: &str) -> bool {
7118 file_name.ends_with("_scan.txt")
7119 || file_name.ends_with("_scan.sarif.json")
7120 || file_name.ends_with("_scan.intoto.json")
7121 || file_name.ends_with("_scan.dsse.json")
7122}
7123
7124fn sanitize_filename_component(value: &str) -> String {
7125 let mut sanitized = String::with_capacity(value.len());
7126 for ch in value.chars() {
7127 if ch.is_ascii_alphanumeric() {
7128 sanitized.push(ch.to_ascii_lowercase());
7129 } else {
7130 sanitized.push('_');
7131 }
7132 }
7133 sanitized.trim_matches('_').to_string()
7134}
7135
7136fn parse_secret_key(secret: &str) -> io::Result<[u8; 32]> {
7137 let trimmed = secret.trim();
7138 let bytes = if trimmed.len() == 64 && trimmed.chars().all(|ch| ch.is_ascii_hexdigit()) {
7139 hex_decode(trimmed)?
7140 } else {
7141 BASE64_STANDARD.decode(trimmed).map_err(|err| {
7142 io::Error::new(
7143 io::ErrorKind::InvalidInput,
7144 format!("invalid base64 signing key: {err}"),
7145 )
7146 })?
7147 };
7148
7149 <[u8; 32]>::try_from(bytes.as_slice()).map_err(|_| {
7150 io::Error::new(
7151 io::ErrorKind::InvalidInput,
7152 "signing key must decode to exactly 32 bytes",
7153 )
7154 })
7155}
7156
7157fn hex_encode(bytes: &[u8]) -> String {
7158 const HEX: &[u8; 16] = b"0123456789abcdef";
7159 let mut out = String::with_capacity(bytes.len() * 2);
7160 for byte in bytes.iter().copied() {
7161 out.push(char::from(HEX[(byte >> 4) as usize]));
7162 out.push(char::from(HEX[(byte & 0x0f) as usize]));
7163 }
7164 out
7165}
7166
7167fn hex_decode(text: &str) -> io::Result<Vec<u8>> {
7168 let mut bytes = Vec::with_capacity(text.len() / 2);
7169 let mut pairs = text.as_bytes().chunks_exact(2);
7170 for pair in &mut pairs {
7171 let high = decode_hex_nibble(pair[0])?;
7172 let low = decode_hex_nibble(pair[1])?;
7173 bytes.push((high << 4) | low);
7174 }
7175 Ok(bytes)
7176}
7177
7178fn decode_hex_nibble(ch: u8) -> io::Result<u8> {
7179 match ch {
7180 b'0'..=b'9' => Ok(ch - b'0'),
7181 b'a'..=b'f' => Ok(ch - b'a' + 10),
7182 b'A'..=b'F' => Ok(ch - b'A' + 10),
7183 _other => Err(io::Error::new(
7184 io::ErrorKind::InvalidInput,
7185 "invalid hex digit in signing key",
7186 )),
7187 }
7188}
7189
7190fn sha256_hex(bytes: &[u8]) -> String {
7191 let mut hasher = Sha256::new();
7192 hasher.update(bytes);
7193 hex_encode(&hasher.finalize())
7194}
7195
7196fn dsse_pae(payload_type: &str, payload: &[u8]) -> Vec<u8> {
7197 let mut out = Vec::new();
7198 out.extend_from_slice(b"DSSEv1 ");
7199 out.extend_from_slice(payload_type.len().to_string().as_bytes());
7200 out.push(b' ');
7201 out.extend_from_slice(payload_type.as_bytes());
7202 out.push(b' ');
7203 out.extend_from_slice(payload.len().to_string().as_bytes());
7204 out.push(b' ');
7205 out.extend_from_slice(payload);
7206 out
7207}
7208
7209fn yes_no(value: bool) -> &'static str {
7210 if value {
7211 "yes"
7212 } else {
7213 "no"
7214 }
7215}
7216
7217#[cfg(test)]
7218mod tests {
7219 use super::*;
7220 use std::time::{SystemTime, UNIX_EPOCH};
7221
7222 #[test]
7223 fn scan_finds_async_and_channel_motifs() {
7224 let fixture_root = unique_fixture_root();
7225 let src_dir = fixture_root.join("src");
7226 fs::create_dir_all(&src_dir).expect("create fixture dir");
7227 fs::write(
7228 fixture_root.join("Cargo.toml"),
7229 "[package]\nname = \"fixture-scan\"\nversion = \"0.1.0\"\n",
7230 )
7231 .expect("write manifest");
7232 fs::write(
7233 src_dir.join("lib.rs"),
7234 "use tokio::sync::mpsc;\nuse tokio::task::spawn_blocking;\nfn f(){ let _ = mpsc::channel::<u8>(16); let _ = spawn_blocking(|| 1); }\n",
7235 )
7236 .expect("write source");
7237
7238 let report = scan_crate_source(&fixture_root).expect("scan succeeds");
7239 let matched_ids: BTreeSet<&str> = report
7240 .matched_heuristics
7241 .iter()
7242 .map(|matched| matched.heuristic.id.0)
7243 .collect();
7244
7245 assert!(matched_ids.contains("H-ASYNC-01"));
7246 assert!(matched_ids.contains("H-CHAN-01"));
7247
7248 let _ = fs::remove_dir_all(&fixture_root);
7249 }
7250
7251 #[test]
7252 fn scan_reports_certification_signals() {
7253 let fixture_root = unique_fixture_root();
7254 let src_dir = fixture_root.join("src");
7255 let tests_dir = fixture_root.join("tests");
7256 let docs_dir = fixture_root.join("docs");
7257 fs::create_dir_all(&src_dir).expect("create src dir");
7258 fs::create_dir_all(&tests_dir).expect("create tests dir");
7259 fs::create_dir_all(&docs_dir).expect("create docs dir");
7260
7261 fs::write(
7262 fixture_root.join("Cargo.toml"),
7263 "[package]\nname = \"fixture-cert\"\nversion = \"0.1.0\"\nedition = \"2021\"\nlicense = \"MIT\"\nrust-version = \"1.75\"\nrepository = \"https://example.invalid/repo\"\ndocumentation = \"https://docs.example.invalid\"\nhomepage = \"https://example.invalid\"\nreadme = \"README.md\"\n\n[dependencies]\nembedded-hal = \"1\"\n\n[dev-dependencies]\nproptest = \"1\"\n",
7264 )
7265 .expect("write manifest");
7266 fs::write(
7267 src_dir.join("lib.rs"),
7268 "#![no_std]\n#![forbid(unsafe_code)]\n#[cfg(test)]\nmod tests {\n #[test]\n fn smoke() { assert_eq!(2 + 2, 4); }\n}\n",
7269 )
7270 .expect("write source");
7271 fs::write(
7272 tests_dir.join("prop.rs"),
7273 "use proptest::prelude::*;\nproptest! {\n #[test]\n fn roundtrip(x in 0u8..) { prop_assert_eq!(x, x); }\n}\n",
7274 )
7275 .expect("write property test");
7276 fs::write(
7277 src_dir.join("risky.rs"),
7278 "use core::cell::RefCell;\nfn control_path(state: Option<u8>) -> u8 {\n let cell = RefCell::new(0u8);\n match state { Some(v) => v, _ => *cell.borrow() }\n}\nfn wait_for_hw() { let _ = std::time::Duration::from_millis(5); }\n",
7279 )
7280 .expect("write risky source");
7281 fs::write(fixture_root.join("README.md"), "# Fixture\n").expect("write readme");
7282 fs::write(fixture_root.join("SAFETY.md"), "No unsafe code.\n").expect("write safety");
7283 fs::write(fixture_root.join("SECURITY.md"), "Security policy.\n").expect("write security");
7284 fs::write(fixture_root.join("LICENSE"), "MIT\n").expect("write license");
7285 fs::write(docs_dir.join("design.md"), "Design notes.\n").expect("write doc");
7286
7287 let report = scan_crate_source(&fixture_root).expect("scan succeeds");
7288
7289 assert!(report.certification.runtime.no_std_declared);
7290 assert_eq!(report.certification.runtime.alloc_crate_hits, 0);
7291 assert_eq!(report.certification.runtime.heap_allocation_hits, 0);
7292 assert_eq!(
7293 report.certification.safety.unsafe_policy,
7294 UnsafeCodePolicy::Forbid
7295 );
7296 assert_eq!(report.certification.safety.unsafe_sites, 0);
7297 assert!(report.certification.verification.tests_dir_present);
7298 assert!(report.certification.verification.property_testing_hits > 0);
7299 assert!(report.certification.lifecycle.safety_md_present);
7300 assert_eq!(report.certification.build.direct_dependencies, 1);
7301 assert_eq!(report.certification.build.dev_dependencies, 1);
7302
7303 let rendered = render_scan_report(&report);
7304 assert!(rendered.contains("Audit Summary"));
7305 assert!(rendered.contains("Add dsfb-gray report badge to your GitHub repo README"));
7306 assert!(rendered.contains("Overall Score and Subscores"));
7307 assert!(rendered.contains("Scanner Crate: https://crates.io/crates/dsfb-gray"));
7308 assert!(rendered.contains("DSFB-gray crate: https://crates.io/crates/dsfb-gray"));
7309 assert!(rendered.contains("https://img.shields.io/badge/DSFB%20Gray%20Audit-"));
7310 assert!(rendered.contains("[![DSFB Gray Audit:"));
7311 assert!(rendered.contains("(./fixture_cert_scan.txt)"));
7312 assert!(rendered.contains("Score Summary Table"));
7313 assert!(rendered.contains("Advisory Broad Subscores"));
7314 assert!(rendered
7315 .contains("| Section | Score% | Weight | Points | Checks |"));
7316 assert!(rendered.contains("Scoring guideline:"));
7317 assert!(rendered.contains("not a compliance certification"));
7318 assert!(rendered.contains("no_std declared: yes"));
7319 assert!(rendered.contains("no_alloc candidate: yes"));
7320 assert!(rendered.contains("no_unsafe candidate: yes"));
7321 assert!(rendered.contains("NASA/JPL Power of Ten Audit"));
7322 assert!(rendered.contains("Advanced Structural Risk Checks"));
7323 assert!(rendered.contains("Top Findings"));
7324 assert!(rendered.contains("Code Quality Themes"));
7325 assert!(rendered.contains("Verification Suggestions"));
7326 assert!(rendered.contains("Evidence Ledger"));
7327 assert!(rendered.contains("Conclusion Lenses"));
7328 assert!(rendered.contains("Criticality Heatmap"));
7329 assert!(rendered.contains("row format: path:line `function` [bar]"));
7330 assert!(rendered.contains("JPL-R4 elevated"));
7331 assert!(rendered.contains("TIME-WAIT elevated"));
7332 assert!(rendered.contains("Remediation:"));
7333 assert!(rendered.contains("TIME-WAIT-01"));
7334
7335 let _ = fs::remove_dir_all(&fixture_root);
7336 }
7337
7338 #[test]
7339 fn scan_renders_structured_attestations() {
7340 let fixture_root = unique_fixture_root();
7341 let src_dir = fixture_root.join("src");
7342 fs::create_dir_all(&src_dir).expect("create src dir");
7343
7344 fs::write(
7345 fixture_root.join("Cargo.toml"),
7346 "[package]\nname = \"fixture-attest\"\nversion = \"0.1.0\"\n",
7347 )
7348 .expect("write manifest");
7349 fs::write(
7350 fixture_root.join(".cargo_vcs_info.json"),
7351 "{ \"git\": { \"sha1\": \"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef\" }, \"path_in_vcs\": \"fixture-attest\" }\n",
7352 )
7353 .expect("write vcs info");
7354 fs::write(
7355 src_dir.join("lib.rs"),
7356 "use tokio::sync::mpsc;\nfn f(){ let _ = mpsc::channel::<u8>(4); }\n",
7357 )
7358 .expect("write source");
7359
7360 let report = scan_crate_source(&fixture_root).expect("scan succeeds");
7361 let rendered = render_scan_report(&report);
7362 assert!(rendered.contains("Generated At (UTC):"));
7363 assert!(rendered.contains("Source SHA-256:"));
7364 assert!(rendered.contains("Scoring Version: dsfb-assurance-score-v1"));
7365 assert!(rendered.contains("Markdown snippet:"));
7366 assert!(rendered.contains(
7367 "Treat this report as a structured guideline for improvement and review readiness."
7368 ));
7369 assert!(rendered.contains("Conclusion Lenses"));
7370
7371 let sarif_json = render_scan_sarif(&report);
7372 let sarif_value: Value = serde_json::from_str(&sarif_json).expect("parse sarif");
7373 assert_eq!(sarif_value["version"], "2.1.0");
7374 assert_eq!(
7375 sarif_value["runs"][0]["properties"]["sourceSha256"],
7376 report.source_sha256
7377 );
7378 assert_eq!(
7379 sarif_value["runs"][0]["properties"]["auditScore"]["method"],
7380 AUDIT_SCORE_METHOD
7381 );
7382 assert_eq!(
7383 sarif_value["runs"][0]["properties"]["auditMode"],
7384 "canonical-broad-audit"
7385 );
7386 assert!(sarif_value["runs"][0]["properties"]["guidanceSemantics"]
7387 ["nonCertificationStatement"]
7388 .as_str()
7389 .expect("non-cert statement")
7390 .contains("does not certify compliance"));
7391 assert_eq!(
7392 sarif_value["runs"][0]["tool"]["driver"]["rules"][0]["help"]["text"]
7393 .as_str()
7394 .expect("rule help"),
7395 heuristic_remediation("H-CHAN-01")
7396 );
7397
7398 let statement_json = render_scan_attestation_statement(&report);
7399 let statement_value: Value =
7400 serde_json::from_str(&statement_json).expect("parse statement");
7401 assert_eq!(statement_value["_type"], "https://in-toto.io/Statement/v1");
7402 assert_eq!(
7403 statement_value["subject"][0]["digest"]["sha256"],
7404 report.source_sha256
7405 );
7406 assert_eq!(
7407 statement_value["predicate"]["summary"]["auditScore"]["method"],
7408 AUDIT_SCORE_METHOD
7409 );
7410 assert_eq!(
7411 statement_value["predicate"]["scanner"]["auditMode"],
7412 "canonical-broad-audit"
7413 );
7414 assert!(
7415 statement_value["predicate"]["guidanceSemantics"]["nonCertificationStatement"]
7416 .as_str()
7417 .expect("predicate non-cert statement")
7418 .contains("does not certify compliance")
7419 );
7420 assert!(
7421 statement_value["predicate"]["summary"]["matchedHeuristics"][0]["structuralPrior"]
7422 .is_object()
7423 );
7424
7425 let unsigned_dsse = render_scan_dsse_envelope(&report, None);
7426 let unsigned_value: Value =
7427 serde_json::from_str(&unsigned_dsse).expect("parse unsigned dsse");
7428 assert_eq!(unsigned_value["payloadType"], DSSE_PAYLOAD_TYPE);
7429 assert_eq!(
7430 unsigned_value["signatures"]
7431 .as_array()
7432 .expect("signatures array")
7433 .len(),
7434 0
7435 );
7436
7437 let signer = ScanSigningKey::from_secret_text(
7438 "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f",
7439 Some("test-ed25519"),
7440 )
7441 .expect("create signer");
7442 let signed_dsse = render_scan_dsse_envelope(&report, Some(&signer));
7443 let signed_value: Value = serde_json::from_str(&signed_dsse).expect("parse signed dsse");
7444 assert_eq!(
7445 signed_value["signatures"]
7446 .as_array()
7447 .expect("signatures array")
7448 .len(),
7449 1
7450 );
7451 assert_eq!(signed_value["signatures"][0]["keyid"], "test-ed25519");
7452
7453 let export_dir = fixture_root.join("exports");
7454 let paths = export_scan_artifacts(&report, &export_dir, Some(&signer))
7455 .expect("export artifacts succeeds");
7456 assert!(paths.signed);
7457 assert_eq!(paths.output_dir, export_dir);
7458 assert!(paths.report_path.exists());
7459 assert!(paths.sarif_path.exists());
7460 assert!(paths.statement_path.exists());
7461 assert!(paths.dsse_path.exists());
7462 assert_eq!(
7463 paths
7464 .report_path
7465 .file_name()
7466 .and_then(|value| value.to_str()),
7467 Some("fixture_attest_scan.txt")
7468 );
7469
7470 let _ = fs::remove_dir_all(&fixture_root);
7471 }
7472
7473 #[test]
7474 fn derived_static_priors_are_bounded() {
7475 let fixture_root = unique_fixture_root();
7476 let src_dir = fixture_root.join("src");
7477 fs::create_dir_all(&src_dir).expect("create src dir");
7478 fs::write(
7479 fixture_root.join("Cargo.toml"),
7480 "[package]\nname = \"fixture-prior\"\nversion = \"0.1.0\"\n",
7481 )
7482 .expect("write manifest");
7483 fs::write(
7484 src_dir.join("lib.rs"),
7485 "use tokio::sync::mpsc;\nuse tokio::task::spawn_blocking;\nfn f(){ let _ = mpsc::channel::<u8>(4); let _ = mpsc::channel::<u8>(8); let _ = spawn_blocking(|| 1); }\n",
7486 )
7487 .expect("write source");
7488
7489 let report = scan_crate_source(&fixture_root).expect("scan succeeds");
7490 let priors = derive_static_priors_from_scan(&report);
7491
7492 assert!(!priors.is_empty());
7493 let chan_prior = priors
7494 .get(crate::HeuristicId("H-CHAN-01"))
7495 .expect("channel prior");
7496 assert!(chan_prior.confidence >= 0.15);
7497 assert!(chan_prior.confidence <= 0.95);
7498 assert!(chan_prior.drift_scale >= 0.75);
7499 assert!(chan_prior.drift_scale <= 1.0);
7500
7501 let _ = fs::remove_dir_all(&fixture_root);
7502 }
7503
7504 #[test]
7505 fn scan_output_run_paths_are_timestamped_and_unique() {
7506 let fixture_root = unique_fixture_root();
7507 fs::create_dir_all(&fixture_root).expect("create fixture root");
7508
7509 let first = prepare_scan_output_run_at(&fixture_root, "2026-04-14T01-23-45Z")
7510 .expect("create first run dir");
7511 let second = prepare_scan_output_run_at(&fixture_root, "2026-04-14T01-23-45Z")
7512 .expect("create second run dir");
7513
7514 assert_eq!(first.base_output_root, fixture_root);
7515 assert_eq!(first.timestamp_utc, "2026-04-14T01-23-45Z");
7516 assert!(
7517 first.run_dir.ends_with("dsfb-gray-2026-04-14T01-23-45Z"),
7518 "unexpected first run dir: {}",
7519 first.run_dir.display()
7520 );
7521 assert!(
7522 second
7523 .run_dir
7524 .ends_with("dsfb-gray-2026-04-14T01-23-45Z-01"),
7525 "unexpected second run dir: {}",
7526 second.run_dir.display()
7527 );
7528
7529 let _ = fs::remove_dir_all(&fixture_root);
7530 }
7531
7532 #[test]
7533 fn legacy_scan_artifacts_migrate_once() {
7534 let fixture_root = unique_fixture_root();
7535 fs::create_dir_all(&fixture_root).expect("create fixture root");
7536 let output_root = fixture_root.join(DEFAULT_SCAN_OUTPUT_ROOT);
7537
7538 for name in [
7539 "tokio_scan.txt",
7540 "tokio_scan.sarif.json",
7541 "tokio_scan.intoto.json",
7542 "tokio_scan.dsse.json",
7543 ] {
7544 fs::write(fixture_root.join(name), "fixture").expect("write legacy scan artifact");
7545 }
7546
7547 let migration_dir = migrate_legacy_scan_artifacts(&fixture_root, &output_root)
7548 .expect("migrate legacy scan artifacts")
7549 .expect("migration dir");
7550 assert!(migration_dir.exists());
7551 for name in [
7552 "tokio_scan.txt",
7553 "tokio_scan.sarif.json",
7554 "tokio_scan.intoto.json",
7555 "tokio_scan.dsse.json",
7556 ] {
7557 assert!(
7558 migration_dir.join(name).exists(),
7559 "missing migrated file {name}"
7560 );
7561 assert!(
7562 !fixture_root.join(name).exists(),
7563 "legacy file still present {name}"
7564 );
7565 }
7566
7567 assert!(migrate_legacy_scan_artifacts(&fixture_root, &output_root)
7568 .expect("rerun migration")
7569 .is_none());
7570
7571 let _ = fs::remove_dir_all(&fixture_root);
7572 }
7573
7574 #[test]
7575 fn scan_reports_extended_structural_audits() {
7576 let fixture_root = unique_fixture_root();
7577 let src_dir = fixture_root.join("src");
7578 fs::create_dir_all(&src_dir).expect("create src dir");
7579
7580 fs::write(
7581 fixture_root.join("Cargo.toml"),
7582 "[package]\nname = \"fixture-extended\"\nversion = \"0.1.0\"\n\n[dependencies]\ntokio = \"*\"\nbytes = { version = \">=1\" }\n",
7583 )
7584 .expect("write manifest");
7585 fs::write(
7586 src_dir.join("lib.rs"),
7587 "use core::future::Future;\nuse core::pin::Pin;\nuse core::sync::atomic::{AtomicUsize, Ordering};\nuse core::task::{Context, Poll};\nuse std::io::Write;\n\n#[interrupt]\nfn irq() {\n let _ = Box::new(1u8);\n}\n\nfn iter_unbounded(items: impl Iterator<Item = u8>) -> usize {\n items.collect::<Vec<_>>().len()\n}\n\nstruct PendingFuture;\nimpl Future for PendingFuture {\n type Output = ();\n fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Self::Output> {\n Poll::Pending\n }\n}\n\nasync fn launch() {\n let _ = tokio::spawn(async {});\n let _ = tokio::task::spawn_blocking(|| 1usize);\n}\n\nstruct Boom;\nimpl Drop for Boom {\n fn drop(&mut self) {\n panic!(\"boom\");\n }\n}\n\nfn leader_state(counter: &AtomicUsize) -> usize {\n let state = counter.load(Ordering::Relaxed);\n if state > 0 { state } else { 0 }\n}\n\nfn mixed_clock() {\n let _ = std::time::Instant::now();\n let _ = std::time::SystemTime::now();\n}\n\nfn short_write(w: &mut dyn Write, buf: &[u8]) {\n let _ = w.write(buf);\n}\n\n#[async_recursion]\nasync fn recurse() {\n recurse().await;\n}\n\nfn queue() {\n let _ = tokio::sync::mpsc::unbounded_channel::<u8>();\n}\n\nasync fn read_packet(packet: &[u8]) {\n let payload = packet.to_vec();\n drop(payload);\n}\n",
7588 )
7589 .expect("write source");
7590
7591 let report = scan_crate_source(&fixture_root).expect("scan succeeds");
7592 let rendered = render_scan_report(&report);
7593
7594 for expected in [
7595 "ITER-UNB elevated",
7596 "ISR-SAFE elevated",
7597 "FUTURE-WAKE elevated",
7598 "TASK-LEAK elevated",
7599 "DROP-PANIC elevated",
7600 "ATOMIC-RELAXED elevated",
7601 "CLOCK-MIX elevated",
7602 "SHORT-WRITE elevated",
7603 "ASYNC-RECUR elevated",
7604 "CHAN-UNB elevated",
7605 "ZERO-COPY elevated",
7606 "CARGO-VERS elevated",
7607 ] {
7608 assert!(
7609 rendered.contains(expected),
7610 "missing expected check {expected}"
7611 );
7612 }
7613
7614 let _ = fs::remove_dir_all(&fixture_root);
7615 }
7616
7617 fn unique_fixture_root() -> PathBuf {
7618 std::env::temp_dir().join(format!(
7619 "dsfb-gray-scan-{}-{}",
7620 std::process::id(),
7621 SystemTime::now()
7622 .duration_since(UNIX_EPOCH)
7623 .expect("system clock before epoch")
7624 .as_nanos()
7625 ))
7626 }
7627}