use laminae::ollama::OllamaClient;
use laminae::psyche::{EgoBackend, PsycheConfig, PsycheEngine};
struct EchoEgo;
impl EgoBackend for EchoEgo {
fn complete(
&self,
system_prompt: &str,
user_message: &str,
psyche_context: &str,
) -> impl std::future::Future<Output = anyhow::Result<String>> + Send {
let response = format!(
"=== Ego Response ===\n\
System prompt: {}\n\
User message: {}\n\
Psyche context length: {} chars\n\
---\n\
Psyche context preview:\n{}",
if system_prompt.is_empty() {
"(none)"
} else {
system_prompt
},
user_message,
psyche_context.len(),
if psyche_context.is_empty() {
"(no context — Psyche was skipped)".to_string()
} else {
psyche_context.chars().take(500).collect::<String>()
},
);
async move { Ok(response) }
}
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
tracing_subscriber::fmt()
.with_env_filter("laminae=debug")
.init();
let ollama = OllamaClient::new();
if !ollama.is_available().await {
println!("⚠ Ollama is not running — Psyche will skip Id/Superego processing.");
println!(" Start it with: ollama serve");
println!(" Pull a model: ollama pull qwen2.5:7b\n");
}
let config = {
let mut c = PsycheConfig::default();
c.ego_system_prompt = "You are a helpful AI assistant.".to_string();
c
};
let engine = PsycheEngine::with_config(ollama, EchoEgo, config);
println!("━━━ Test 1: Simple greeting (should skip Psyche) ━━━\n");
let response = engine.reply("hello").await?;
println!("{response}\n");
println!("━━━ Test 2: Medium question (should use COP mode) ━━━\n");
let response = engine
.reply("How do I implement a binary search tree in Rust?")
.await?;
println!("{response}\n");
println!("━━━ Test 3: Complex request (should use full pipeline) ━━━\n");
let response = engine
.reply(
"Can you analyze the trade-offs between microservices and monolith architecture \
for a payment processing system that needs to handle 10,000 transactions per second \
with strict consistency requirements?",
)
.await?;
println!("{response}\n");
Ok(())
}