1use crate::{NodeKind, Workflow};
34use serde::{Deserialize, Serialize};
35use std::collections::HashMap;
36
37#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
39pub enum RiskLevel {
40 Info,
42 Low,
44 Medium,
46 High,
48 Critical,
50}
51
52impl std::fmt::Display for RiskLevel {
53 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
54 match self {
55 RiskLevel::Info => write!(f, "INFO"),
56 RiskLevel::Low => write!(f, "LOW"),
57 RiskLevel::Medium => write!(f, "MEDIUM"),
58 RiskLevel::High => write!(f, "HIGH"),
59 RiskLevel::Critical => write!(f, "CRITICAL"),
60 }
61 }
62}
63
64#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
66pub enum ThreatCategory {
67 Injection,
69 Xss,
71 DataExposure,
73 AuthN,
75 AccessControl,
77 Misconfiguration,
79 Deserialization,
81 KnownVulnerabilities,
83 InsufficientLogging,
85 DataPrivacy,
87}
88
89impl std::fmt::Display for ThreatCategory {
90 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
91 match self {
92 ThreatCategory::Injection => write!(f, "Injection"),
93 ThreatCategory::Xss => write!(f, "XSS"),
94 ThreatCategory::DataExposure => write!(f, "Data Exposure"),
95 ThreatCategory::AuthN => write!(f, "Authentication"),
96 ThreatCategory::AccessControl => write!(f, "Access Control"),
97 ThreatCategory::Misconfiguration => write!(f, "Misconfiguration"),
98 ThreatCategory::Deserialization => write!(f, "Deserialization"),
99 ThreatCategory::KnownVulnerabilities => write!(f, "Known Vulnerabilities"),
100 ThreatCategory::InsufficientLogging => write!(f, "Insufficient Logging"),
101 ThreatCategory::DataPrivacy => write!(f, "Data Privacy"),
102 }
103 }
104}
105
106#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
108pub enum ComplianceStandard {
109 Gdpr,
111 Hipaa,
113 PciDss,
115 Sox,
117 OwaspTop10,
119}
120
121impl std::fmt::Display for ComplianceStandard {
122 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
123 match self {
124 ComplianceStandard::Gdpr => write!(f, "GDPR"),
125 ComplianceStandard::Hipaa => write!(f, "HIPAA"),
126 ComplianceStandard::PciDss => write!(f, "PCI-DSS"),
127 ComplianceStandard::Sox => write!(f, "SOX"),
128 ComplianceStandard::OwaspTop10 => write!(f, "OWASP Top 10"),
129 }
130 }
131}
132
133#[derive(Debug, Clone, Serialize, Deserialize)]
135pub struct SecurityFinding {
136 pub id: String,
138 pub risk_level: RiskLevel,
140 pub category: ThreatCategory,
142 pub title: String,
144 pub description: String,
146 pub node_id: Option<String>,
148 pub node_name: Option<String>,
150 pub affected_components: Vec<String>,
152 pub remediation: String,
154 pub owasp_category: Option<String>,
156 pub cwe_id: Option<u32>,
158 pub compliance_violations: Vec<ComplianceStandard>,
160}
161
162impl SecurityFinding {
163 pub fn new(
165 id: impl Into<String>,
166 risk_level: RiskLevel,
167 category: ThreatCategory,
168 title: impl Into<String>,
169 description: impl Into<String>,
170 ) -> Self {
171 Self {
172 id: id.into(),
173 risk_level,
174 category,
175 title: title.into(),
176 description: description.into(),
177 node_id: None,
178 node_name: None,
179 affected_components: Vec::new(),
180 remediation: String::new(),
181 owasp_category: None,
182 cwe_id: None,
183 compliance_violations: Vec::new(),
184 }
185 }
186
187 pub fn with_node(mut self, node_id: String, node_name: String) -> Self {
189 self.node_id = Some(node_id);
190 self.node_name = Some(node_name);
191 self
192 }
193
194 pub fn with_remediation(mut self, remediation: impl Into<String>) -> Self {
196 self.remediation = remediation.into();
197 self
198 }
199
200 pub fn with_owasp(mut self, category: impl Into<String>) -> Self {
202 self.owasp_category = Some(category.into());
203 self
204 }
205
206 pub fn with_cwe(mut self, cwe_id: u32) -> Self {
208 self.cwe_id = Some(cwe_id);
209 self
210 }
211
212 pub fn with_compliance(mut self, standard: ComplianceStandard) -> Self {
214 self.compliance_violations.push(standard);
215 self
216 }
217
218 pub fn with_component(mut self, component: impl Into<String>) -> Self {
220 self.affected_components.push(component.into());
221 self
222 }
223}
224
225#[derive(Debug, Clone, Serialize, Deserialize)]
227pub struct SecurityAuditReport {
228 pub workflow_id: String,
230 pub workflow_name: String,
232 pub findings: Vec<SecurityFinding>,
234 pub security_score: f64,
236 pub compliance_status: HashMap<ComplianceStandard, bool>,
238 pub risk_summary: RiskSummary,
240 pub scanned_at: String,
242 pub recommendations: Vec<String>,
244}
245
246impl SecurityAuditReport {
247 pub fn findings_by_severity(&self, level: RiskLevel) -> Vec<&SecurityFinding> {
249 self.findings
250 .iter()
251 .filter(|f| f.risk_level == level)
252 .collect()
253 }
254
255 pub fn findings_by_category(&self, category: ThreatCategory) -> Vec<&SecurityFinding> {
257 self.findings
258 .iter()
259 .filter(|f| f.category == category)
260 .collect()
261 }
262
263 pub fn passed(&self) -> bool {
265 self.findings_by_severity(RiskLevel::Critical).is_empty()
266 && self.findings_by_severity(RiskLevel::High).is_empty()
267 }
268
269 pub fn total_findings(&self) -> usize {
271 self.findings.len()
272 }
273}
274
275#[derive(Debug, Clone, Default, Serialize, Deserialize)]
277pub struct RiskSummary {
278 pub critical: usize,
280 pub high: usize,
282 pub medium: usize,
284 pub low: usize,
286 pub info: usize,
288}
289
290impl RiskSummary {
291 pub fn from_findings(findings: &[SecurityFinding]) -> Self {
293 let mut summary = Self::default();
294 for finding in findings {
295 match finding.risk_level {
296 RiskLevel::Critical => summary.critical += 1,
297 RiskLevel::High => summary.high += 1,
298 RiskLevel::Medium => summary.medium += 1,
299 RiskLevel::Low => summary.low += 1,
300 RiskLevel::Info => summary.info += 1,
301 }
302 }
303 summary
304 }
305
306 pub fn total(&self) -> usize {
308 self.critical + self.high + self.medium + self.low + self.info
309 }
310
311 pub fn risk_score(&self) -> f64 {
313 let weighted = (self.critical as f64 * 10.0)
314 + (self.high as f64 * 5.0)
315 + (self.medium as f64 * 2.0)
316 + (self.low as f64 * 0.5);
317
318 weighted.min(100.0)
320 }
321}
322
323#[derive(Debug, Clone, Serialize, Deserialize)]
325pub struct SecurityConfig {
326 pub check_prompt_injection: bool,
328 pub check_sql_injection: bool,
330 pub check_command_injection: bool,
332 pub check_xss: bool,
334 pub check_secrets: bool,
336 pub check_data_privacy: bool,
338 pub check_compliance: bool,
340 pub compliance_standards: Vec<ComplianceStandard>,
342 pub custom_secret_patterns: Vec<String>,
344}
345
346impl Default for SecurityConfig {
347 fn default() -> Self {
348 Self {
349 check_prompt_injection: true,
350 check_sql_injection: true,
351 check_command_injection: true,
352 check_xss: true,
353 check_secrets: true,
354 check_data_privacy: true,
355 check_compliance: true,
356 compliance_standards: vec![ComplianceStandard::OwaspTop10],
357 custom_secret_patterns: Vec::new(),
358 }
359 }
360}
361
362pub struct SecurityScanner {
364 config: SecurityConfig,
365}
366
367impl SecurityScanner {
368 pub fn new(config: SecurityConfig) -> Self {
370 Self { config }
371 }
372
373 pub fn scan(&self, workflow: &Workflow) -> SecurityAuditReport {
375 let mut findings = Vec::new();
376
377 if self.config.check_prompt_injection {
379 findings.extend(self.check_prompt_injection(workflow));
380 }
381
382 if self.config.check_sql_injection {
383 findings.extend(self.check_sql_injection(workflow));
384 }
385
386 if self.config.check_command_injection {
387 findings.extend(self.check_command_injection(workflow));
388 }
389
390 if self.config.check_xss {
391 findings.extend(self.check_xss(workflow));
392 }
393
394 if self.config.check_secrets {
395 findings.extend(self.check_secrets(workflow));
396 }
397
398 if self.config.check_data_privacy {
399 findings.extend(self.check_data_privacy(workflow));
400 }
401
402 let risk_summary = RiskSummary::from_findings(&findings);
404 let security_score = 100.0 - risk_summary.risk_score();
405
406 let compliance_status = self.check_compliance(workflow, &findings);
408
409 let recommendations = self.generate_recommendations(&findings, &risk_summary);
411
412 SecurityAuditReport {
413 workflow_id: workflow.metadata.id.to_string(),
414 workflow_name: workflow.metadata.name.clone(),
415 findings,
416 security_score,
417 compliance_status,
418 risk_summary,
419 scanned_at: chrono::Utc::now().to_rfc3339(),
420 recommendations,
421 }
422 }
423
424 fn check_prompt_injection(&self, workflow: &Workflow) -> Vec<SecurityFinding> {
426 let mut findings = Vec::new();
427
428 let injection_patterns = [
429 "ignore previous instructions",
430 "ignore all previous",
431 "disregard all prior",
432 "forget previous",
433 "new instructions:",
434 "system:",
435 "assistant:",
436 "{{",
437 "}}",
438 ];
439
440 for node in &workflow.nodes {
441 if let NodeKind::LLM(llm_config) = &node.kind {
442 let prompt = &llm_config.prompt_template;
443
444 if prompt.contains("{{user_input}}") || prompt.contains("${user_input}") {
446 findings.push(
447 SecurityFinding::new(
448 "PROMPT_INJECTION_001",
449 RiskLevel::High,
450 ThreatCategory::Injection,
451 "Potential Prompt Injection Vulnerability",
452 "LLM prompt contains unsanitized user input that could be exploited for prompt injection attacks",
453 )
454 .with_node(node.id.to_string(), node.name.clone())
455 .with_remediation(
456 "Implement input validation and sanitization. Use prompt templates that clearly separate instructions from user input. Consider using a dedicated prompt injection prevention library."
457 )
458 .with_owasp("A03:2021 - Injection")
459 .with_cwe(94)
460 .with_compliance(ComplianceStandard::OwaspTop10)
461 );
462 }
463
464 for pattern in &injection_patterns {
466 if prompt.to_lowercase().contains(&pattern.to_lowercase()) {
467 findings.push(
468 SecurityFinding::new(
469 "PROMPT_INJECTION_002",
470 RiskLevel::Medium,
471 ThreatCategory::Injection,
472 "Suspicious Prompt Content Detected",
473 format!("Prompt contains pattern '{}' that may indicate prompt injection", pattern),
474 )
475 .with_node(node.id.to_string(), node.name.clone())
476 .with_remediation("Review prompt content and ensure it's not vulnerable to manipulation")
477 .with_cwe(94)
478 );
479 }
480 }
481 }
482 }
483
484 findings
485 }
486
487 fn check_sql_injection(&self, workflow: &Workflow) -> Vec<SecurityFinding> {
489 let mut findings = Vec::new();
490
491 let sql_patterns = [
492 "SELECT", "INSERT", "UPDATE", "DELETE", "DROP", "CREATE", "ALTER",
493 ];
494
495 for node in &workflow.nodes {
496 match &node.kind {
497 NodeKind::Code(script_config) => {
498 let code = &script_config.code;
499
500 for pattern in &sql_patterns {
502 if code.contains(pattern) && (code.contains("{{") || code.contains("${")) {
503 findings.push(
504 SecurityFinding::new(
505 "SQL_INJECTION_001",
506 RiskLevel::Critical,
507 ThreatCategory::Injection,
508 "Potential SQL Injection Vulnerability",
509 format!("Code contains SQL statement '{}' with dynamic user input", pattern),
510 )
511 .with_node(node.id.to_string(), node.name.clone())
512 .with_remediation(
513 "Use parameterized queries or prepared statements. Never concatenate user input directly into SQL queries."
514 )
515 .with_owasp("A03:2021 - Injection")
516 .with_cwe(89)
517 .with_compliance(ComplianceStandard::OwaspTop10)
518 .with_compliance(ComplianceStandard::PciDss)
519 );
520 break;
521 }
522 }
523 }
524 NodeKind::LLM(llm_config) => {
525 let prompt = &llm_config.prompt_template.to_lowercase();
527 if (prompt.contains("sql")
528 || prompt.contains("database")
529 || prompt.contains("query"))
530 && (prompt.contains("{{") || prompt.contains("${"))
531 {
532 findings.push(
533 SecurityFinding::new(
534 "SQL_INJECTION_002",
535 RiskLevel::High,
536 ThreatCategory::Injection,
537 "LLM Generating SQL with User Input",
538 "LLM is being used to generate SQL queries with user input, which may be vulnerable to injection",
539 )
540 .with_node(node.id.to_string(), node.name.clone())
541 .with_remediation(
542 "Validate and sanitize all user inputs. Use an allowlist for table/column names. Consider using an ORM instead of raw SQL."
543 )
544 .with_cwe(89)
545 );
546 }
547 }
548 _ => {}
549 }
550 }
551
552 findings
553 }
554
555 fn check_command_injection(&self, workflow: &Workflow) -> Vec<SecurityFinding> {
557 let mut findings = Vec::new();
558
559 let shell_patterns = [
560 "exec",
561 "eval",
562 "system",
563 "popen",
564 "subprocess",
565 "sh",
566 "bash",
567 "cmd",
568 ];
569
570 for node in &workflow.nodes {
571 if let NodeKind::Code(script_config) = &node.kind {
572 let code = &script_config.code;
573
574 for pattern in &shell_patterns {
575 if code.contains(pattern) && (code.contains("{{") || code.contains("${")) {
576 findings.push(
577 SecurityFinding::new(
578 "CMD_INJECTION_001",
579 RiskLevel::Critical,
580 ThreatCategory::Injection,
581 "Potential Command Injection Vulnerability",
582 format!("Code uses '{}' with dynamic user input", pattern),
583 )
584 .with_node(node.id.to_string(), node.name.clone())
585 .with_remediation(
586 "Avoid executing shell commands with user input. Use safe APIs instead. If necessary, use strict input validation and allowlisting."
587 )
588 .with_owasp("A03:2021 - Injection")
589 .with_cwe(78)
590 .with_compliance(ComplianceStandard::OwaspTop10)
591 );
592 break;
593 }
594 }
595 }
596 }
597
598 findings
599 }
600
601 fn check_xss(&self, workflow: &Workflow) -> Vec<SecurityFinding> {
603 let mut findings = Vec::new();
604
605 let html_patterns = ["<script>", "innerHTML", "document.write", "eval("];
606
607 for node in &workflow.nodes {
608 if let NodeKind::Code(script_config) = &node.kind {
609 let code = &script_config.code;
610
611 for pattern in &html_patterns {
612 if code.contains(pattern) && (code.contains("{{") || code.contains("${")) {
613 findings.push(
614 SecurityFinding::new(
615 "XSS_001",
616 RiskLevel::High,
617 ThreatCategory::Xss,
618 "Potential Cross-Site Scripting (XSS) Vulnerability",
619 format!("Code uses '{}' with dynamic content", pattern),
620 )
621 .with_node(node.id.to_string(), node.name.clone())
622 .with_remediation(
623 "Always sanitize and escape user input before rendering in HTML. Use Content Security Policy (CSP) headers."
624 )
625 .with_owasp("A03:2021 - Injection")
626 .with_cwe(79)
627 .with_compliance(ComplianceStandard::OwaspTop10)
628 );
629 break;
630 }
631 }
632 }
633 }
634
635 findings
636 }
637
638 fn check_secrets(&self, workflow: &Workflow) -> Vec<SecurityFinding> {
640 let mut findings = Vec::new();
641
642 let secret_patterns = vec![
643 ("api_key", "API Key"),
644 ("apikey", "API Key"),
645 ("password", "Password"),
646 ("passwd", "Password"),
647 ("secret", "Secret"),
648 ("token", "Token"),
649 ("bearer", "Bearer Token"),
650 ("aws_access_key", "AWS Access Key"),
651 ("private_key", "Private Key"),
652 ("credentials", "Credentials"),
653 ];
654
655 for node in &workflow.nodes {
656 let search_text = match &node.kind {
657 NodeKind::LLM(cfg) => &cfg.prompt_template,
658 NodeKind::Code(cfg) => &cfg.code,
659 _ => continue,
660 };
661
662 for (pattern, secret_type) in &secret_patterns {
663 if search_text.to_lowercase().contains(pattern) {
664 if search_text.contains(&format!("{} =", pattern))
666 || search_text.contains(&format!("{}=", pattern))
667 || search_text.contains(&format!("{}: ", pattern))
668 {
669 findings.push(
670 SecurityFinding::new(
671 "SECRET_001",
672 RiskLevel::Critical,
673 ThreatCategory::DataExposure,
674 format!("Potential Hardcoded {}", secret_type),
675 format!("Node may contain hardcoded {} which should be stored securely", secret_type),
676 )
677 .with_node(node.id.to_string(), node.name.clone())
678 .with_remediation(
679 "Use environment variables or a secure secret management system (e.g., AWS Secrets Manager, HashiCorp Vault). Never hardcode secrets in workflows."
680 )
681 .with_owasp("A02:2021 - Cryptographic Failures")
682 .with_cwe(798)
683 .with_compliance(ComplianceStandard::OwaspTop10)
684 .with_compliance(ComplianceStandard::PciDss)
685 );
686 }
687 }
688 }
689
690 for pattern in &self.config.custom_secret_patterns {
692 if search_text.contains(pattern) {
693 findings.push(
694 SecurityFinding::new(
695 "SECRET_002",
696 RiskLevel::High,
697 ThreatCategory::DataExposure,
698 "Custom Secret Pattern Detected",
699 format!("Node contains custom secret pattern: {}", pattern),
700 )
701 .with_node(node.id.to_string(), node.name.clone())
702 .with_remediation("Review and remove any hardcoded secrets"),
703 );
704 }
705 }
706 }
707
708 findings
709 }
710
711 fn check_data_privacy(&self, workflow: &Workflow) -> Vec<SecurityFinding> {
713 let mut findings = Vec::new();
714
715 let pii_patterns = [
716 "ssn",
717 "social security",
718 "credit card",
719 "card number",
720 "email",
721 "phone",
722 "address",
723 "date of birth",
724 "dob",
725 "passport",
726 "driver license",
727 "medical",
728 "health",
729 ];
730
731 for node in &workflow.nodes {
732 if let NodeKind::LLM(llm_config) = &node.kind {
733 let prompt = &llm_config.prompt_template.to_lowercase();
734
735 for pattern in &pii_patterns {
736 if prompt.contains(pattern) {
737 findings.push(
738 SecurityFinding::new(
739 "PRIVACY_001",
740 RiskLevel::High,
741 ThreatCategory::DataPrivacy,
742 "Potential PII Processing Detected",
743 format!("Workflow may process sensitive personal data: {}", pattern),
744 )
745 .with_node(node.id.to_string(), node.name.clone())
746 .with_remediation(
747 "Ensure PII is processed in compliance with GDPR, HIPAA, or other applicable regulations. Implement data minimization and anonymization where possible."
748 )
749 .with_compliance(ComplianceStandard::Gdpr)
750 .with_compliance(ComplianceStandard::Hipaa)
751 );
752 break;
753 }
754 }
755 }
756 }
757
758 findings
759 }
760
761 fn check_compliance(
763 &self,
764 _workflow: &Workflow,
765 findings: &[SecurityFinding],
766 ) -> HashMap<ComplianceStandard, bool> {
767 let mut status = HashMap::new();
768
769 for standard in &self.config.compliance_standards {
770 let violations = findings
771 .iter()
772 .any(|f| f.compliance_violations.contains(standard));
773 status.insert(*standard, !violations);
774 }
775
776 status
777 }
778
779 fn generate_recommendations(
781 &self,
782 findings: &[SecurityFinding],
783 risk_summary: &RiskSummary,
784 ) -> Vec<String> {
785 let mut recommendations = Vec::new();
786
787 if risk_summary.critical > 0 {
788 recommendations.push(
789 "URGENT: Address all critical security findings immediately before deploying this workflow".to_string()
790 );
791 }
792
793 if risk_summary.high > 0 {
794 recommendations.push(
795 "Address all high-severity security findings as soon as possible".to_string(),
796 );
797 }
798
799 let has_injection = findings
801 .iter()
802 .any(|f| f.category == ThreatCategory::Injection);
803 if has_injection {
804 recommendations.push(
805 "Implement input validation and sanitization for all user inputs".to_string(),
806 );
807 }
808
809 let has_secrets = findings
810 .iter()
811 .any(|f| f.category == ThreatCategory::DataExposure);
812 if has_secrets {
813 recommendations.push(
814 "Use a secure secret management system (e.g., AWS Secrets Manager, HashiCorp Vault)".to_string()
815 );
816 }
817
818 let has_privacy = findings
819 .iter()
820 .any(|f| f.category == ThreatCategory::DataPrivacy);
821 if has_privacy {
822 recommendations.push(
823 "Ensure data privacy compliance (GDPR, HIPAA) for all PII processing".to_string(),
824 );
825 }
826
827 if recommendations.is_empty() {
828 recommendations.push(
829 "No major security issues found. Continue following security best practices."
830 .to_string(),
831 );
832 }
833
834 recommendations
835 }
836}
837
838impl Default for SecurityScanner {
839 fn default() -> Self {
840 Self::new(SecurityConfig::default())
841 }
842}
843
844#[cfg(test)]
845mod tests {
846 use super::*;
847 use crate::{LlmConfig, ScriptConfig, WorkflowBuilder};
848
849 #[test]
850 fn test_risk_level_ordering() {
851 assert!(RiskLevel::Critical > RiskLevel::High);
852 assert!(RiskLevel::High > RiskLevel::Medium);
853 assert!(RiskLevel::Medium > RiskLevel::Low);
854 assert!(RiskLevel::Low > RiskLevel::Info);
855 }
856
857 #[test]
858 fn test_security_finding_builder() {
859 let finding = SecurityFinding::new(
860 "TEST_001",
861 RiskLevel::High,
862 ThreatCategory::Injection,
863 "Test Finding",
864 "Test description",
865 )
866 .with_node("node1".to_string(), "Test Node".to_string())
867 .with_remediation("Fix this issue")
868 .with_owasp("A03:2021")
869 .with_cwe(89)
870 .with_compliance(ComplianceStandard::OwaspTop10);
871
872 assert_eq!(finding.id, "TEST_001");
873 assert_eq!(finding.risk_level, RiskLevel::High);
874 assert_eq!(finding.node_id, Some("node1".to_string()));
875 assert_eq!(finding.cwe_id, Some(89));
876 assert!(finding
877 .compliance_violations
878 .contains(&ComplianceStandard::OwaspTop10));
879 }
880
881 #[test]
882 fn test_risk_summary_calculation() {
883 let findings = vec![
884 SecurityFinding::new(
885 "1",
886 RiskLevel::Critical,
887 ThreatCategory::Injection,
888 "T1",
889 "D1",
890 ),
891 SecurityFinding::new("2", RiskLevel::High, ThreatCategory::Injection, "T2", "D2"),
892 SecurityFinding::new("3", RiskLevel::Medium, ThreatCategory::Xss, "T3", "D3"),
893 SecurityFinding::new(
894 "4",
895 RiskLevel::Low,
896 ThreatCategory::DataExposure,
897 "T4",
898 "D4",
899 ),
900 ];
901
902 let summary = RiskSummary::from_findings(&findings);
903 assert_eq!(summary.critical, 1);
904 assert_eq!(summary.high, 1);
905 assert_eq!(summary.medium, 1);
906 assert_eq!(summary.low, 1);
907 assert_eq!(summary.total(), 4);
908 }
909
910 #[test]
911 fn test_risk_score_calculation() {
912 let summary = RiskSummary {
913 critical: 1,
914 high: 2,
915 medium: 3,
916 low: 0,
917 info: 0,
918 };
919
920 let score = summary.risk_score();
921 assert_eq!(score, 26.0);
923 }
924
925 #[test]
926 fn test_prompt_injection_detection() {
927 let workflow = WorkflowBuilder::new("test")
928 .start("Start")
929 .llm(
930 "LLM",
931 LlmConfig {
932 provider: "openai".to_string(),
933 model: "gpt-4".to_string(),
934 prompt_template: "Process this: {{user_input}}".to_string(),
935 temperature: Some(0.7),
936 max_tokens: None,
937 system_prompt: None,
938 tools: Vec::new(),
939 images: Vec::new(),
940 extra_params: serde_json::Value::Null,
941 },
942 )
943 .end("End")
944 .build();
945
946 let scanner = SecurityScanner::default();
947 let report = scanner.scan(&workflow);
948
949 let injection_findings: Vec<_> = report
950 .findings
951 .iter()
952 .filter(|f| f.category == ThreatCategory::Injection)
953 .collect();
954
955 assert!(!injection_findings.is_empty());
956 assert_eq!(injection_findings[0].risk_level, RiskLevel::High);
957 }
958
959 #[test]
960 fn test_sql_injection_detection() {
961 let workflow = WorkflowBuilder::new("test")
962 .start("Start")
963 .code(
964 "SQL",
965 ScriptConfig {
966 runtime: "python".to_string(),
967 code: "SELECT * FROM users WHERE id = {{user_id}}".to_string(),
968 inputs: Vec::new(),
969 output: "result".to_string(),
970 },
971 )
972 .end("End")
973 .build();
974
975 let scanner = SecurityScanner::default();
976 let report = scanner.scan(&workflow);
977
978 let sql_findings: Vec<_> = report
979 .findings
980 .iter()
981 .filter(|f| f.id.starts_with("SQL_INJECTION"))
982 .collect();
983
984 assert!(!sql_findings.is_empty());
985 assert_eq!(sql_findings[0].risk_level, RiskLevel::Critical);
986 }
987
988 #[test]
989 fn test_command_injection_detection() {
990 let workflow = WorkflowBuilder::new("test")
991 .start("Start")
992 .code(
993 "Shell",
994 ScriptConfig {
995 runtime: "bash".to_string(),
996 code: "exec {{command}}".to_string(),
997 inputs: Vec::new(),
998 output: "result".to_string(),
999 },
1000 )
1001 .end("End")
1002 .build();
1003
1004 let scanner = SecurityScanner::default();
1005 let report = scanner.scan(&workflow);
1006
1007 let cmd_findings: Vec<_> = report
1008 .findings
1009 .iter()
1010 .filter(|f| f.id.starts_with("CMD_INJECTION"))
1011 .collect();
1012
1013 assert!(!cmd_findings.is_empty());
1014 assert_eq!(cmd_findings[0].risk_level, RiskLevel::Critical);
1015 }
1016
1017 #[test]
1018 fn test_secret_detection() {
1019 let workflow = WorkflowBuilder::new("test")
1020 .start("Start")
1021 .llm(
1022 "LLM",
1023 LlmConfig {
1024 provider: "openai".to_string(),
1025 model: "gpt-4".to_string(),
1026 prompt_template: "Use api_key = sk-1234567890".to_string(),
1027 temperature: Some(0.7),
1028 max_tokens: None,
1029 system_prompt: None,
1030 tools: Vec::new(),
1031 images: Vec::new(),
1032 extra_params: serde_json::Value::Null,
1033 },
1034 )
1035 .end("End")
1036 .build();
1037
1038 let scanner = SecurityScanner::default();
1039 let report = scanner.scan(&workflow);
1040
1041 let secret_findings: Vec<_> = report
1042 .findings
1043 .iter()
1044 .filter(|f| f.id.starts_with("SECRET"))
1045 .collect();
1046
1047 assert!(!secret_findings.is_empty());
1048 assert_eq!(secret_findings[0].risk_level, RiskLevel::Critical);
1049 }
1050
1051 #[test]
1052 fn test_pii_detection() {
1053 let workflow = WorkflowBuilder::new("test")
1054 .start("Start")
1055 .llm(
1056 "LLM",
1057 LlmConfig {
1058 provider: "openai".to_string(),
1059 model: "gpt-4".to_string(),
1060 prompt_template: "Process the user's email and social security number"
1061 .to_string(),
1062 temperature: Some(0.7),
1063 max_tokens: None,
1064 system_prompt: None,
1065 tools: Vec::new(),
1066 images: Vec::new(),
1067 extra_params: serde_json::Value::Null,
1068 },
1069 )
1070 .end("End")
1071 .build();
1072
1073 let scanner = SecurityScanner::default();
1074 let report = scanner.scan(&workflow);
1075
1076 let privacy_findings: Vec<_> = report
1077 .findings
1078 .iter()
1079 .filter(|f| f.category == ThreatCategory::DataPrivacy)
1080 .collect();
1081
1082 assert!(!privacy_findings.is_empty());
1083 }
1084
1085 #[test]
1086 fn test_security_score_calculation() {
1087 let workflow = WorkflowBuilder::new("test")
1088 .start("Start")
1089 .end("End")
1090 .build();
1091
1092 let scanner = SecurityScanner::default();
1093 let report = scanner.scan(&workflow);
1094
1095 assert!(report.security_score >= 90.0);
1097 assert!(report.passed());
1098 }
1099
1100 #[test]
1101 fn test_audit_report_filtering() {
1102 let findings = vec![
1103 SecurityFinding::new(
1104 "1",
1105 RiskLevel::Critical,
1106 ThreatCategory::Injection,
1107 "T1",
1108 "D1",
1109 ),
1110 SecurityFinding::new("2", RiskLevel::High, ThreatCategory::Injection, "T2", "D2"),
1111 SecurityFinding::new("3", RiskLevel::Medium, ThreatCategory::Xss, "T3", "D3"),
1112 ];
1113
1114 let report = SecurityAuditReport {
1115 workflow_id: "test".to_string(),
1116 workflow_name: "Test".to_string(),
1117 findings,
1118 security_score: 75.0,
1119 compliance_status: HashMap::new(),
1120 risk_summary: RiskSummary::default(),
1121 scanned_at: "2026-01-31T00:00:00Z".to_string(),
1122 recommendations: Vec::new(),
1123 };
1124
1125 assert_eq!(report.findings_by_severity(RiskLevel::Critical).len(), 1);
1126 assert_eq!(report.findings_by_severity(RiskLevel::High).len(), 1);
1127 assert_eq!(
1128 report.findings_by_category(ThreatCategory::Injection).len(),
1129 2
1130 );
1131 assert_eq!(report.findings_by_category(ThreatCategory::Xss).len(), 1);
1132 }
1133
1134 #[test]
1135 fn test_compliance_checking() {
1136 let config = SecurityConfig {
1137 compliance_standards: vec![ComplianceStandard::OwaspTop10, ComplianceStandard::Gdpr],
1138 ..Default::default()
1139 };
1140
1141 let workflow = WorkflowBuilder::new("test")
1142 .start("Start")
1143 .end("End")
1144 .build();
1145
1146 let scanner = SecurityScanner::new(config);
1147 let report = scanner.scan(&workflow);
1148
1149 assert_eq!(
1151 report
1152 .compliance_status
1153 .get(&ComplianceStandard::OwaspTop10),
1154 Some(&true)
1155 );
1156 assert_eq!(
1157 report.compliance_status.get(&ComplianceStandard::Gdpr),
1158 Some(&true)
1159 );
1160 }
1161
1162 #[test]
1163 fn test_recommendations_generation() {
1164 let workflow = WorkflowBuilder::new("test")
1165 .start("Start")
1166 .code(
1167 "SQL",
1168 ScriptConfig {
1169 runtime: "python".to_string(),
1170 code: "SELECT * FROM users WHERE id = {{user_id}}".to_string(),
1171 inputs: Vec::new(),
1172 output: "result".to_string(),
1173 },
1174 )
1175 .end("End")
1176 .build();
1177
1178 let scanner = SecurityScanner::default();
1179 let report = scanner.scan(&workflow);
1180
1181 assert!(!report.recommendations.is_empty());
1182 assert!(report
1183 .recommendations
1184 .iter()
1185 .any(|r| r.contains("URGENT") || r.contains("critical")));
1186 }
1187
1188 #[test]
1189 fn test_security_config_customization() {
1190 let config = SecurityConfig {
1191 check_prompt_injection: false,
1192 custom_secret_patterns: vec!["CUSTOM_SECRET".to_string()],
1193 ..Default::default()
1194 };
1195
1196 let workflow = WorkflowBuilder::new("test")
1197 .start("Start")
1198 .llm(
1199 "LLM",
1200 LlmConfig {
1201 provider: "openai".to_string(),
1202 model: "gpt-4".to_string(),
1203 prompt_template: "{{user_input}} CUSTOM_SECRET=abc123".to_string(),
1204 temperature: Some(0.7),
1205 max_tokens: None,
1206 system_prompt: None,
1207 tools: Vec::new(),
1208 images: Vec::new(),
1209 extra_params: serde_json::Value::Null,
1210 },
1211 )
1212 .end("End")
1213 .build();
1214
1215 let scanner = SecurityScanner::new(config);
1216 let report = scanner.scan(&workflow);
1217
1218 let injection_findings: Vec<_> = report
1220 .findings
1221 .iter()
1222 .filter(|f| f.id.starts_with("PROMPT_INJECTION"))
1223 .collect();
1224 assert!(injection_findings.is_empty());
1225
1226 let custom_findings: Vec<_> = report
1228 .findings
1229 .iter()
1230 .filter(|f| f.id == "SECRET_002")
1231 .collect();
1232 assert!(!custom_findings.is_empty());
1233 }
1234
1235 #[test]
1236 fn test_xss_detection() {
1237 let workflow = WorkflowBuilder::new("test")
1238 .start("Start")
1239 .code(
1240 "JS",
1241 ScriptConfig {
1242 runtime: "javascript".to_string(),
1243 code: "document.write({{user_content}})".to_string(),
1244 inputs: Vec::new(),
1245 output: "result".to_string(),
1246 },
1247 )
1248 .end("End")
1249 .build();
1250
1251 let scanner = SecurityScanner::default();
1252 let report = scanner.scan(&workflow);
1253
1254 let xss_findings: Vec<_> = report
1255 .findings
1256 .iter()
1257 .filter(|f| f.category == ThreatCategory::Xss)
1258 .collect();
1259
1260 assert!(!xss_findings.is_empty());
1261 assert_eq!(xss_findings[0].risk_level, RiskLevel::High);
1262 }
1263}