agent_line/lib.rs
1//! A batteries-included Rust library for building agent workflows.
2//!
3//! Define agents, wire them into workflows, and let the runner execute them.
4//! Agents communicate through shared context ([`Ctx`]) and control flow with
5//! outcomes: [`Outcome::Continue`], [`Outcome::Next`], [`Outcome::Retry`],
6//! [`Outcome::Wait`], [`Outcome::Done`], and [`Outcome::Fail`].
7//!
8//! # Quick start
9//!
10//! ```rust
11//! use agent_line::{Agent, Ctx, Outcome, Runner, StepResult, Workflow};
12//!
13//! #[derive(Clone)]
14//! struct State { n: i32 }
15//!
16//! struct AddOne;
17//! impl Agent<State> for AddOne {
18//! fn name(&self) -> &'static str { "add_one" }
19//! fn run(&mut self, state: State, _ctx: &mut Ctx) -> StepResult<State> {
20//! Ok((State { n: state.n + 1 }, Outcome::Done))
21//! }
22//! }
23//!
24//! let mut ctx = Ctx::new();
25//! let wf = Workflow::builder("demo")
26//! .register(AddOne)
27//! .build()
28//! .unwrap();
29//!
30//! let result = Runner::new(wf).run(State { n: 0 }, &mut ctx).unwrap();
31//! assert_eq!(result.n, 1);
32//! ```
33//!
34//! # LLM access
35//!
36//! Each agent that needs an LLM holds its own [`LlmConfig`] and calls
37//! [`LlmConfig::request`] to start a chat request.
38//!
39//! For the simplest case, build an [`LlmConfig`] from environment variables
40//! and inject it into the agent that needs it:
41//!
42//! ```rust,no_run
43//! # use agent_line::{Agent, Ctx, LlmConfig, Outcome, StepResult};
44//! # #[derive(Clone)]
45//! # struct Draft { body: String, summary: String }
46//! struct Summarize {
47//! llm: LlmConfig,
48//! }
49//!
50//! impl Summarize {
51//! fn new(llm: LlmConfig) -> Self { Self { llm } }
52//! }
53//!
54//! impl Agent<Draft> for Summarize {
55//! fn name(&self) -> &'static str { "summarize" }
56//! fn run(&mut self, mut draft: Draft, _ctx: &mut Ctx) -> StepResult<Draft> {
57//! draft.summary = self.llm.request()
58//! .system("Summarize the draft in one sentence.")
59//! .user(&draft.body)
60//! .send()?;
61//! Ok((draft, Outcome::Done))
62//! }
63//! }
64//!
65//! // In main():
66//! // let llm = LlmConfig::from_env(); // reads AGENT_LINE_PROVIDER, etc.
67//! // register(Summarize::new(llm))
68//! ```
69//!
70//! [`LlmConfig::from_env`] reads `AGENT_LINE_PROVIDER`, `AGENT_LINE_LLM_URL`,
71//! `AGENT_LINE_MODEL`, `AGENT_LINE_API_KEY`, `AGENT_LINE_NUM_CTX`, and
72//! `AGENT_LINE_MAX_TOKENS`. Defaults to a local Ollama configuration when
73//! nothing is set.
74//!
75//! For multi-model pipelines, give each agent its own [`LlmConfig`]. A cheap
76//! local model handles routine extraction; a stronger remote model handles
77//! the harder reasoning step:
78//!
79//! ```rust,no_run
80//! use agent_line::{
81//! Agent, Ctx, LlmConfig, Outcome, Provider, Runner, StepResult, Workflow,
82//! };
83//!
84//! #[derive(Clone)]
85//! struct Draft { body: String, notes: String, review: String }
86//!
87//! struct Researcher { llm: LlmConfig }
88//!
89//! impl Researcher {
90//! fn new(llm: LlmConfig) -> Self { Self { llm } }
91//! }
92//!
93//! impl Agent<Draft> for Researcher {
94//! fn name(&self) -> &'static str { "researcher" }
95//! fn run(&mut self, mut draft: Draft, _ctx: &mut Ctx) -> StepResult<Draft> {
96//! draft.notes = self.llm.request()
97//! .system("Extract the three key claims from the draft, one per line.")
98//! .user(&draft.body)
99//! .send()?;
100//! Ok((draft, Outcome::Continue))
101//! }
102//! }
103//!
104//! struct Reviewer { llm: LlmConfig }
105//!
106//! impl Reviewer {
107//! fn new(llm: LlmConfig) -> Self { Self { llm } }
108//! }
109//!
110//! impl Agent<Draft> for Reviewer {
111//! fn name(&self) -> &'static str { "reviewer" }
112//! fn run(&mut self, mut draft: Draft, _ctx: &mut Ctx) -> StepResult<Draft> {
113//! draft.review = self.llm.request()
114//! .system("Critique the draft against its own claims. Be specific.")
115//! .user(format!("Claims:\n{}\n\nDraft:\n{}", draft.notes, draft.body))
116//! .send()?;
117//! Ok((draft, Outcome::Done))
118//! }
119//! }
120//!
121//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
122//! let cheap = LlmConfig::builder()
123//! .provider(Provider::Ollama)
124//! .base_url("http://localhost:11434")
125//! .model("qwen3:8b")
126//! .build()?;
127//!
128//! let strong = LlmConfig::builder()
129//! .provider(Provider::Anthropic)
130//! .base_url("https://api.anthropic.com")
131//! .model("claude-sonnet-4-20250514")
132//! .api_key(std::env::var("ANTHROPIC_API_KEY")?)
133//! .max_tokens(1200)
134//! .build()?;
135//!
136//! let mut ctx = Ctx::new();
137//! let wf = Workflow::builder("review")
138//! .register(Researcher::new(cheap))
139//! .register(Reviewer::new(strong))
140//! .start_at("researcher")
141//! .then("reviewer")
142//! .build()?;
143//!
144//! Runner::new(wf).run(
145//! Draft {
146//! body: "Rust ownership...".into(),
147//! notes: String::new(),
148//! review: String::new(),
149//! },
150//! &mut ctx,
151//! )?;
152//! # Ok(()) }
153//! ```
154
155mod agent;
156mod ctx;
157mod llm;
158mod runner;
159pub mod tools;
160mod workflow;
161
162pub use agent::{Agent, Outcome, RetryHint, StepError, StepResult};
163pub use ctx::Ctx;
164pub use llm::{LlmConfig, LlmConfigBuilder, LlmConfigError, LlmRequestBuilder, Provider};
165pub use runner::{ErrorEvent, Runner, StepEvent};
166pub use workflow::{Workflow, WorkflowBuilder, WorkflowError};