1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
//! A batteries-included Rust library for building agent workflows.
//!
//! Define agents, wire them into workflows, and let the runner execute them.
//! Agents communicate through shared context ([`Ctx`]) and control flow with
//! outcomes: [`Outcome::Continue`], [`Outcome::Next`], [`Outcome::Retry`],
//! [`Outcome::Wait`], [`Outcome::Done`], and [`Outcome::Fail`].
//!
//! # Quick start
//!
//! ```rust
//! use agent_line::{Agent, Ctx, Outcome, Runner, StepResult, Workflow};
//!
//! #[derive(Clone)]
//! struct State { n: i32 }
//!
//! struct AddOne;
//! impl Agent<State> for AddOne {
//! fn name(&self) -> &'static str { "add_one" }
//! fn run(&mut self, state: State, _ctx: &mut Ctx) -> StepResult<State> {
//! Ok((State { n: state.n + 1 }, Outcome::Done))
//! }
//! }
//!
//! let mut ctx = Ctx::new();
//! let wf = Workflow::builder("demo")
//! .register(AddOne)
//! .build()
//! .unwrap();
//!
//! let result = Runner::new(wf).run(State { n: 0 }, &mut ctx).unwrap();
//! assert_eq!(result.n, 1);
//! ```
//!
//! # LLM access
//!
//! Each agent that needs an LLM holds its own [`LlmConfig`] and calls
//! [`LlmConfig::request`] to start a chat request.
//!
//! For the simplest case, build an [`LlmConfig`] from environment variables
//! and inject it into the agent that needs it:
//!
//! ```rust,no_run
//! # use agent_line::{Agent, Ctx, LlmConfig, Outcome, StepResult};
//! # #[derive(Clone)]
//! # struct Draft { body: String, summary: String }
//! struct Summarize {
//! llm: LlmConfig,
//! }
//!
//! impl Summarize {
//! fn new(llm: LlmConfig) -> Self { Self { llm } }
//! }
//!
//! impl Agent<Draft> for Summarize {
//! fn name(&self) -> &'static str { "summarize" }
//! fn run(&mut self, mut draft: Draft, _ctx: &mut Ctx) -> StepResult<Draft> {
//! draft.summary = self.llm.request()
//! .system("Summarize the draft in one sentence.")
//! .user(&draft.body)
//! .send()?;
//! Ok((draft, Outcome::Done))
//! }
//! }
//!
//! // In main():
//! // let llm = LlmConfig::from_env(); // reads AGENT_LINE_PROVIDER, etc.
//! // register(Summarize::new(llm))
//! ```
//!
//! [`LlmConfig::from_env`] reads `AGENT_LINE_PROVIDER`, `AGENT_LINE_LLM_URL`,
//! `AGENT_LINE_MODEL`, `AGENT_LINE_API_KEY`, `AGENT_LINE_NUM_CTX`, and
//! `AGENT_LINE_MAX_TOKENS`. Defaults to a local Ollama configuration when
//! nothing is set.
//!
//! For multi-model pipelines, give each agent its own [`LlmConfig`]. A cheap
//! local model handles routine extraction; a stronger remote model handles
//! the harder reasoning step:
//!
//! ```rust,no_run
//! use agent_line::{
//! Agent, Ctx, LlmConfig, Outcome, Provider, Runner, StepResult, Workflow,
//! };
//!
//! #[derive(Clone)]
//! struct Draft { body: String, notes: String, review: String }
//!
//! struct Researcher { llm: LlmConfig }
//!
//! impl Researcher {
//! fn new(llm: LlmConfig) -> Self { Self { llm } }
//! }
//!
//! impl Agent<Draft> for Researcher {
//! fn name(&self) -> &'static str { "researcher" }
//! fn run(&mut self, mut draft: Draft, _ctx: &mut Ctx) -> StepResult<Draft> {
//! draft.notes = self.llm.request()
//! .system("Extract the three key claims from the draft, one per line.")
//! .user(&draft.body)
//! .send()?;
//! Ok((draft, Outcome::Continue))
//! }
//! }
//!
//! struct Reviewer { llm: LlmConfig }
//!
//! impl Reviewer {
//! fn new(llm: LlmConfig) -> Self { Self { llm } }
//! }
//!
//! impl Agent<Draft> for Reviewer {
//! fn name(&self) -> &'static str { "reviewer" }
//! fn run(&mut self, mut draft: Draft, _ctx: &mut Ctx) -> StepResult<Draft> {
//! draft.review = self.llm.request()
//! .system("Critique the draft against its own claims. Be specific.")
//! .user(format!("Claims:\n{}\n\nDraft:\n{}", draft.notes, draft.body))
//! .send()?;
//! Ok((draft, Outcome::Done))
//! }
//! }
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let cheap = LlmConfig::builder()
//! .provider(Provider::Ollama)
//! .base_url("http://localhost:11434")
//! .model("qwen3:8b")
//! .build()?;
//!
//! let strong = LlmConfig::builder()
//! .provider(Provider::Anthropic)
//! .base_url("https://api.anthropic.com")
//! .model("claude-sonnet-4-20250514")
//! .api_key(std::env::var("ANTHROPIC_API_KEY")?)
//! .max_tokens(1200)
//! .build()?;
//!
//! let mut ctx = Ctx::new();
//! let wf = Workflow::builder("review")
//! .register(Researcher::new(cheap))
//! .register(Reviewer::new(strong))
//! .start_at("researcher")
//! .then("reviewer")
//! .build()?;
//!
//! Runner::new(wf).run(
//! Draft {
//! body: "Rust ownership...".into(),
//! notes: String::new(),
//! review: String::new(),
//! },
//! &mut ctx,
//! )?;
//! # Ok(()) }
//! ```
pub use ;
pub use Ctx;
pub use ;
pub use ;
pub use ;