use crate::core::Result;
use serde::{Deserialize, Serialize};
use super::solver::{SMTResult, SMTSolver};
use super::constraints::SMTConstraint;
#[derive(Debug, Serialize, Deserialize)]
struct ContextLiteSMTRequest {
constraints: Vec<String>,
timeout_ms: u32,
cache_key: Option<String>,
}
#[derive(Debug, Serialize, Deserialize)]
struct ContextLiteSMTResponse {
satisfiable: bool,
model: Option<String>,
execution_time_ms: u64,
cached: bool,
}
pub trait ContextLiteIntegration {
fn query_z3_solver(&self, query: &str) -> Result<SMTResult>;
fn cache_result(&self, key: &str, result: &SMTResult) -> Result<()>;
fn get_cached_result(&self, key: &str) -> Result<Option<SMTResult>>;
}
pub struct ContextLiteSMTSolver {
_endpoint: String,
_client: reqwest::Client,
_cache_enabled: bool,
accumulated_constraints: Vec<SMTConstraint>,
}
impl ContextLiteSMTSolver {
pub fn new(endpoint: String, cache_enabled: bool) -> Self {
Self {
_endpoint: endpoint,
_client: reqwest::Client::new(),
_cache_enabled: cache_enabled,
accumulated_constraints: Vec::new(),
}
}
fn local_satisfiability_heuristic(&self, constraints: &[SMTConstraint]) -> Result<bool> {
let mut safety_constraints = 0;
let mut performance_constraints = 0;
for constraint in constraints {
match constraint.constraint_type {
super::constraints::ConstraintType::Safety => {
safety_constraints += 1;
if constraint.expression.contains("dangerous_commands")
|| constraint.expression.contains("system_modification") {
return Ok(false); }
},
super::constraints::ConstraintType::Performance => {
performance_constraints += 1;
if constraint.expression.contains("timeout") && performance_constraints > 10 {
return Ok(false); }
},
_ => {}
}
}
if safety_constraints > 20 {
return Ok(false);
}
Ok(true)
}
}
impl SMTSolver for ContextLiteSMTSolver {
fn solve(&self, constraints: &[SMTConstraint]) -> Result<SMTResult> {
let smt_constraints: Vec<String> = constraints
.iter()
.map(|c| c.expression.clone())
.collect();
let request = ContextLiteSMTRequest {
constraints: smt_constraints,
timeout_ms: 5000,
cache_key: Some(format!("smt_solve_{}", chrono::Utc::now().timestamp_millis())),
};
let rt = tokio::runtime::Runtime::new()?;
rt.block_on(async {
let start_time = std::time::Instant::now();
let response = self._client
.post(&format!("{}/api/smt/solve", self._endpoint))
.json(&request)
.send()
.await
.map_err(|e| crate::core::error::RustChainError::Execution(
crate::core::error::ExecutionError::step_failed(
"contextlite_smt",
"smt_bridge",
format!("Failed to send SMT request: {}", e)
)
))?;
let execution_time_ms = start_time.elapsed().as_millis() as u64;
if response.status().is_success() {
let smt_response: ContextLiteSMTResponse = response
.json()
.await
.map_err(|e| crate::core::error::RustChainError::Execution(
crate::core::error::ExecutionError::step_failed(
"contextlite_parse",
"smt_bridge",
format!("Failed to parse SMT response: {}", e)
)
))?;
Ok(SMTResult {
satisfiable: smt_response.satisfiable,
model: smt_response.model,
execution_time_ms: if smt_response.cached {
smt_response.execution_time_ms
} else {
execution_time_ms
},
})
} else {
tracing::warn!("ContextLite SMT API unavailable, using local heuristics");
let is_satisfiable = self.local_satisfiability_heuristic(constraints)?;
Ok(SMTResult {
satisfiable: is_satisfiable,
model: if is_satisfiable {
Some("(local-heuristic-model)".to_string())
} else {
None
},
execution_time_ms,
})
}
})
}
fn add_constraint(&mut self, constraint: SMTConstraint) -> Result<()> {
self.accumulated_constraints.push(constraint);
tracing::debug!("Added SMT constraint, total constraints: {}", self.accumulated_constraints.len());
Ok(())
}
fn check_satisfiability(&self) -> Result<bool> {
if self.accumulated_constraints.is_empty() {
return Ok(true); }
let satisfiable = self.local_satisfiability_heuristic(&self.accumulated_constraints)?;
if !satisfiable && self._cache_enabled {
let critical_constraints: Vec<&SMTConstraint> = self.accumulated_constraints
.iter()
.filter(|c| matches!(c.severity, super::constraints::ConstraintSeverity::Critical))
.collect();
if !critical_constraints.is_empty() {
let critical_constraints_owned: Vec<SMTConstraint> = critical_constraints
.into_iter()
.cloned()
.collect();
let result = self.solve(&critical_constraints_owned)?;
return Ok(result.satisfiable);
}
}
Ok(satisfiable)
}
}
impl ContextLiteIntegration for ContextLiteSMTSolver {
fn query_z3_solver(&self, query: &str) -> Result<SMTResult> {
let rt = tokio::runtime::Runtime::new()?;
rt.block_on(async {
let start_time = std::time::Instant::now();
let query_request = serde_json::json!({
"query": query,
"solver": "z3",
"timeout_ms": 5000
});
let response = self._client
.post(&format!("{}/api/z3/query", self._endpoint))
.json(&query_request)
.send()
.await
.map_err(|e| crate::core::error::RustChainError::Execution(
crate::core::error::ExecutionError::step_failed(
"z3_query",
"contextlite_bridge",
format!("Failed to send Z3 query: {}", e)
)
))?;
let execution_time_ms = start_time.elapsed().as_millis() as u64;
if response.status().is_success() {
let z3_response: serde_json::Value = response
.json()
.await
.map_err(|e| crate::core::error::RustChainError::Execution(
crate::core::error::ExecutionError::step_failed(
"z3_parse",
"contextlite_bridge",
format!("Failed to parse Z3 response: {}", e)
)
))?;
Ok(SMTResult {
satisfiable: z3_response.get("satisfiable").and_then(|v| v.as_bool()).unwrap_or(false),
model: z3_response.get("model").and_then(|v| v.as_str()).map(String::from),
execution_time_ms,
})
} else {
tracing::warn!("ContextLite Z3 API unavailable for query: {}", query);
Ok(SMTResult {
satisfiable: !query.contains("false") && !query.contains("contradiction"),
model: Some(format!("(heuristic-result-for {})", query)),
execution_time_ms,
})
}
})
}
fn cache_result(&self, key: &str, result: &SMTResult) -> Result<()> {
if !self._cache_enabled {
return Ok(()); }
let rt = tokio::runtime::Runtime::new()?;
rt.block_on(async {
let cache_request = serde_json::json!({
"key": key,
"result": result,
"ttl_seconds": 3600 });
let response = self._client
.post(&format!("{}/api/cache/smt/store", self._endpoint))
.json(&cache_request)
.send()
.await;
match response {
Ok(resp) if resp.status().is_success() => {
tracing::debug!("Cached SMT result for key: {}", key);
Ok(())
},
Ok(resp) => {
tracing::warn!("Failed to cache SMT result: HTTP {}", resp.status());
Ok(()) },
Err(e) => {
tracing::warn!("Failed to cache SMT result: {}", e);
Ok(()) }
}
})
}
fn get_cached_result(&self, key: &str) -> Result<Option<SMTResult>> {
if !self._cache_enabled {
return Ok(None); }
let rt = tokio::runtime::Runtime::new()?;
rt.block_on(async {
let response = self._client
.get(&format!("{}/api/cache/smt/retrieve", self._endpoint))
.query(&[("key", key)])
.send()
.await;
match response {
Ok(resp) if resp.status().is_success() => {
match resp.json::<SMTResult>().await {
Ok(result) => {
tracing::debug!("Retrieved cached SMT result for key: {}", key);
Ok(Some(result))
},
Err(e) => {
tracing::debug!("Failed to deserialize cached result: {}", e);
Ok(None)
}
}
},
Ok(_resp) => Ok(None), Err(e) => {
tracing::debug!("Cache retrieval failed: {}", e);
Ok(None) }
}
})
}
}