use std::sync::Arc;
use oris_runtime::{
agent::{
create_deep_agent, create_deep_agent_from_llm, detect_and_create_llm, DeepAgentConfig,
LoggingMiddleware,
},
chain::Chain,
prompt_args,
schemas::messages::Message,
tools::SimpleContext,
};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
let ctx = Arc::new(
SimpleContext::new()
.with_session_id("custom-session-123".to_string())
.with_custom(
"workspace_root".to_string(),
std::env::temp_dir().display().to_string(),
),
);
let skill_content =
r#"When the user asks for a summary, always respond in three bullet points."#;
let memory_content = "Project convention: prefer short, actionable responses.";
let config = DeepAgentConfig::new()
.with_planning(true)
.with_filesystem(false)
.with_context(ctx)
.with_middleware(vec![Arc::new(LoggingMiddleware::new())])
.with_planning_system_prompt(Some(
"Use the write_todos tool to break work into steps before answering.".to_string(),
))
.with_custom_tool_description(
"write_todos",
"Update the to-do list for this session. Call when the user mentions tasks or a plan.",
)
.with_skill_content("summary_rules", skill_content)
.with_memory_content("conventions", memory_content);
let agent = create_deep_agent(
"gpt-4o-mini",
&[],
Some("You are a helpful deep agent. Follow the Skills and Memory sections."),
config,
)?;
println!("=== Deep Agent customization ===\n");
let result = agent
.invoke(prompt_args! {
"messages" => vec![
Message::new_human_message("Summarize what you know about project conventions in one sentence.")
]
})
.await?;
println!("Response: {}\n", result);
let llm = detect_and_create_llm("gpt-4o-mini")?;
let config_llm = DeepAgentConfig::new()
.with_planning(true)
.with_filesystem(false)
.with_skill_content("tip", "Always be concise.");
let agent_from_llm = create_deep_agent_from_llm(llm, &[], None, config_llm)?;
let result2 = agent_from_llm
.invoke_messages(vec![Message::new_human_message("Say hello in one word.")])
.await?;
println!("From-LLM response: {}", result2);
Ok(())
}