mod execution_policy;
mod filters;
mod tenant_policy;
mod tool_policy;
mod input_processor;
mod long_running;
mod pii_input;
pub use execution_policy::{ExecutionLimits, ExecutionPolicy};
pub use filters::{ContentFilter, FilterAction, FilterResult};
pub use long_running::{
CheckpointPolicy, ContextStrategy, LongRunningExecutionPolicy, WorkingMemoryPolicy,
};
pub use tenant_policy::{FeatureFlags, TenantLimits, TenantPolicy};
pub use tool_policy::{ToolPermissions, ToolPolicy, ToolTrustLevel};
pub use input_processor::{InputProcessor, InputProcessorPipeline, InputProcessorResult};
pub use pii_input::{PiiInputMode, PiiInputProcessor};
#[derive(Debug, Clone)]
pub enum PolicyDecision {
Allow,
Deny { reason: String },
Warn { message: String },
}
impl PolicyDecision {
pub fn is_allowed(&self) -> bool {
matches!(self, PolicyDecision::Allow | PolicyDecision::Warn { .. })
}
pub fn is_denied(&self) -> bool {
matches!(self, PolicyDecision::Deny { .. })
}
}
pub trait PolicyEvaluator: Send + Sync {
fn evaluate(&self, context: &PolicyContext) -> PolicyDecision;
}
#[derive(Debug, Clone)]
pub struct PolicyContext {
pub tenant_id: Option<String>,
pub user_id: Option<String>,
pub action: PolicyAction,
pub metadata: std::collections::HashMap<String, String>,
}
#[derive(Debug, Clone)]
pub enum PolicyAction {
StartExecution { graph_id: Option<String> },
InvokeTool { tool_name: String },
LlmCall { model: String },
ExternalAccess { resource: String },
OutputContent { content_type: String },
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_policy_decision_allow() {
let decision = PolicyDecision::Allow;
assert!(decision.is_allowed());
assert!(!decision.is_denied());
}
#[test]
fn test_policy_decision_deny() {
let decision = PolicyDecision::Deny {
reason: "Not authorized".to_string(),
};
assert!(!decision.is_allowed());
assert!(decision.is_denied());
}
#[test]
fn test_policy_decision_warn() {
let decision = PolicyDecision::Warn {
message: "Proceed with caution".to_string(),
};
assert!(decision.is_allowed());
assert!(!decision.is_denied());
}
#[test]
fn test_policy_context_creation() {
let mut metadata = std::collections::HashMap::new();
metadata.insert("key".to_string(), "value".to_string());
let context = PolicyContext {
tenant_id: Some("tenant-123".to_string()),
user_id: Some("user-456".to_string()),
action: PolicyAction::StartExecution {
graph_id: Some("graph-789".to_string()),
},
metadata,
};
assert_eq!(context.tenant_id.as_ref().unwrap(), "tenant-123");
assert_eq!(context.user_id.as_ref().unwrap(), "user-456");
assert!(matches!(
context.action,
PolicyAction::StartExecution { .. }
));
}
#[test]
fn test_policy_action_variants() {
let start = PolicyAction::StartExecution { graph_id: None };
assert!(matches!(start, PolicyAction::StartExecution { .. }));
let invoke = PolicyAction::InvokeTool {
tool_name: "web_search".to_string(),
};
assert!(matches!(invoke, PolicyAction::InvokeTool { .. }));
let llm = PolicyAction::LlmCall {
model: "gpt-4".to_string(),
};
assert!(matches!(llm, PolicyAction::LlmCall { .. }));
let external = PolicyAction::ExternalAccess {
resource: "https://api.example.com".to_string(),
};
assert!(matches!(external, PolicyAction::ExternalAccess { .. }));
let output = PolicyAction::OutputContent {
content_type: "text/plain".to_string(),
};
assert!(matches!(output, PolicyAction::OutputContent { .. }));
}
}