pub struct LM {
pub config: LMConfig,
pub cache_handler: Option<Arc<Mutex<ResponseCache>>>,
/* private fields */
}Fields§
§config: LMConfig§cache_handler: Option<Arc<Mutex<ResponseCache>>>Implementations§
Source§impl LM
impl LM
Sourcepub async fn new(config: LMConfig) -> Self
pub async fn new(config: LMConfig) -> Self
Creates a new LM with the given configuration. Uses enum dispatch for optimal runtime performance.
This is an async function because it initializes the cache handler when
config.cache is true. For synchronous contexts where cache initialization
is not needed, use new_sync instead.
Examples found in repository?
examples/01-simple.rs (lines 73-76)
71async fn main() -> Result<()> {
72 configure(
73 LM::new(LMConfig {
74 model: "openai:gpt-4o-mini".to_string(),
75 ..LMConfig::default()
76 })
77 .await,
78 ChatAdapter,
79 );
80
81 let example = example! {
82 "question": "input" => "What is the capital of France?",
83 };
84
85 let qa_rater = QARater::builder().build();
86 let prediction = qa_rater.forward(example).await.unwrap();
87 println!("{prediction:?}");
88
89 Ok(())
90}More examples
examples/06-other-providers-batch.rs (lines 80-83)
77async fn main() {
78 // Anthropic
79 configure(
80 LM::new(LMConfig {
81 model: "anthropic:claude-sonnet-4-5-20250929".to_string(),
82 ..LMConfig::default()
83 })
84 .await,
85 ChatAdapter,
86 );
87
88 let example = vec![
89 example! {
90 "question": "input" => "What is the capital of France?",
91 },
92 example! {
93 "question": "input" => "What is the capital of Germany?",
94 },
95 example! {
96 "question": "input" => "What is the capital of Italy?",
97 },
98 ];
99
100 let qa_rater = QARater::builder().build();
101 let prediction = qa_rater.batch(example.clone(), 2, true).await.unwrap();
102 println!("Anthropic: {prediction:?}");
103
104 // Gemini
105 configure(
106 LM::new(LMConfig {
107 model: "gemini:gemini-2.0-flash".to_string(),
108 ..LMConfig::default()
109 })
110 .await,
111 ChatAdapter,
112 );
113
114 let prediction = qa_rater.batch(example, 2, true).await.unwrap();
115 println!("Gemini: {prediction:?}");
116}examples/09-gepa-sentiment.rs (lines 120-123)
116async fn main() -> Result<()> {
117 println!("GEPA Sentiment Analysis Optimization Example\n");
118
119 // Setup LM
120 let lm = LM::new(LMConfig {
121 temperature: 0.7,
122 ..LMConfig::default()
123 })
124 .await;
125
126 configure(lm.clone(), ChatAdapter);
127
128 // Create training examples with diverse sentiments
129 let trainset = vec![
130 example! {
131 "text": "input" => "This movie was absolutely fantastic! I loved every minute of it.",
132 "expected_sentiment": "input" => "positive"
133 },
134 example! {
135 "text": "input" => "Terrible service, will never come back again.",
136 "expected_sentiment": "input" => "negative"
137 },
138 example! {
139 "text": "input" => "The weather is okay, nothing special.",
140 "expected_sentiment": "input" => "neutral"
141 },
142 example! {
143 "text": "input" => "Despite some minor issues, I'm quite happy with the purchase.",
144 "expected_sentiment": "input" => "positive"
145 },
146 example! {
147 "text": "input" => "I have mixed feelings about this product.",
148 "expected_sentiment": "input" => "neutral"
149 },
150 example! {
151 "text": "input" => "This is the worst experience I've ever had!",
152 "expected_sentiment": "input" => "negative"
153 },
154 example! {
155 "text": "input" => "It's fine. Does what it's supposed to do.",
156 "expected_sentiment": "input" => "neutral"
157 },
158 example! {
159 "text": "input" => "Exceeded all my expectations! Highly recommend!",
160 "expected_sentiment": "input" => "positive"
161 },
162 example! {
163 "text": "input" => "Disappointed and frustrated with the outcome.",
164 "expected_sentiment": "input" => "negative"
165 },
166 example! {
167 "text": "input" => "Standard quality, nothing remarkable.",
168 "expected_sentiment": "input" => "neutral"
169 },
170 ];
171
172 // Create module
173 let mut module = SentimentAnalyzer::builder()
174 .predictor(Predict::new(SentimentSignature::new()))
175 .build();
176
177 // Evaluate baseline performance
178 println!("Baseline Performance:");
179 let baseline_score = module.evaluate(trainset.clone()).await;
180 println!(" Average score: {:.3}\n", baseline_score);
181
182 // Configure GEPA optimizer
183 let gepa = GEPA::builder()
184 .num_iterations(5)
185 .minibatch_size(5)
186 .num_trials(3)
187 .temperature(0.9)
188 .track_stats(true)
189 .build();
190
191 // Run optimization
192 println!("Starting GEPA optimization...\n");
193 let result = gepa
194 .compile_with_feedback(&mut module, trainset.clone())
195 .await?;
196
197 // Display results
198 println!("\nOptimization Results:");
199 println!(
200 " Best average score: {:.3}",
201 result.best_candidate.average_score()
202 );
203 println!(" Total rollouts: {}", result.total_rollouts);
204 println!(" Total LM calls: {}", result.total_lm_calls);
205 println!(" Generations: {}", result.evolution_history.len());
206
207 println!("\nBest Instruction:");
208 println!(" {}", result.best_candidate.instruction);
209
210 if !result.evolution_history.is_empty() {
211 println!("\nEvolution History:");
212 for entry in &result.evolution_history {
213 println!(" Generation {}: {:.3}", entry.0, entry.1);
214 }
215 }
216
217 // Test optimized module on a new example
218 println!("\nTesting Optimized Module:");
219 let test_example = example! {
220 "text": "input" => "This product changed my life! Absolutely amazing!",
221 "expected_sentiment": "input" => "positive"
222 };
223
224 let test_prediction = module.forward(test_example.clone()).await?;
225 let test_feedback = module
226 .feedback_metric(&test_example, &test_prediction)
227 .await;
228
229 println!(
230 " Test prediction: {}",
231 test_prediction.get("sentiment", None)
232 );
233 println!(" Test score: {:.3}", test_feedback.score);
234 println!(" Feedback:\n{}", test_feedback.feedback);
235
236 Ok(())
237}examples/10-gepa-llm-judge.rs (lines 226-229)
219async fn main() -> Result<()> {
220 println!("GEPA with LLM-as-a-Judge Example\n");
221 println!("This example shows how to use an LLM judge to automatically");
222 println!("generate rich feedback for optimizing a math solver.\n");
223
224 // Setup: Configure the LLM
225 // Main LM for the task
226 let task_lm = LM::new(LMConfig {
227 temperature: 0.7,
228 ..LMConfig::default()
229 })
230 .await;
231
232 // Judge LM (could use a different/cheaper model)
233 let judge_lm = LM::new(LMConfig {
234 temperature: 0.3,
235 ..LMConfig::default()
236 })
237 .await;
238
239 configure(task_lm, ChatAdapter);
240
241 // Create training examples
242 let trainset = vec![
243 example! {
244 "problem": "input" => "Sarah has 12 apples. She gives 3 to her friend and buys 5 more. How many apples does she have now?",
245 "expected_answer": "input" => "14"
246 },
247 example! {
248 "problem": "input" => "A train travels 60 miles in 1 hour. How far will it travel in 3.5 hours at the same speed?",
249 "expected_answer": "input" => "210"
250 },
251 example! {
252 "problem": "input" => "There are 24 students in a class. If 1/3 of them are absent, how many students are present?",
253 "expected_answer": "input" => "16"
254 },
255 example! {
256 "problem": "input" => "A rectangle has length 8 cm and width 5 cm. What is its area?",
257 "expected_answer": "input" => "40"
258 },
259 example! {
260 "problem": "input" => "John has $50. He spends $12 on lunch and $8 on a book. How much money does he have left?",
261 "expected_answer": "input" => "30"
262 },
263 ];
264
265 // Create the module
266 let mut module = MathSolver::builder()
267 .solver(Predict::new(MathWordProblem::new()))
268 .judge(Predict::new(MathJudge::new()))
269 .judge_lm(Arc::new(judge_lm))
270 .build();
271
272 // Evaluate baseline performance
273 println!("Step 1: Baseline Performance");
274 println!("Testing the solver before optimization...\n");
275 let baseline_score = module.evaluate(trainset.clone()).await;
276 println!(" Baseline average score: {:.3}\n", baseline_score);
277
278 // Configure GEPA optimizer
279 println!("Step 2: Configure GEPA");
280 println!("Setting up the optimizer with budget controls...\n");
281
282 let gepa = GEPA::builder()
283 .num_iterations(3) // Fewer iterations for demo
284 .minibatch_size(3) // Smaller batches
285 .temperature(0.9)
286 .track_stats(true)
287 .maybe_max_lm_calls(Some(100)) // Important: we're using 2x LM calls (task + judge)
288 .build();
289
290 // Run GEPA optimization
291 println!("Step 3: Run GEPA Optimization");
292 println!("The judge will analyze reasoning quality and provide feedback...\n");
293
294 let result = gepa
295 .compile_with_feedback(&mut module, trainset.clone())
296 .await?;
297
298 // Display results
299 println!("\nStep 4: Results");
300 println!("===============\n");
301 println!("Optimization complete!");
302 println!(
303 " Best average score: {:.3}",
304 result.best_candidate.average_score()
305 );
306 println!(
307 " Improvement: {:.3}",
308 result.best_candidate.average_score() - baseline_score
309 );
310 println!(" Total rollouts: {}", result.total_rollouts);
311 println!(
312 " Total LM calls: {} (includes judge evaluations)",
313 result.total_lm_calls
314 );
315
316 println!("\nEvolution over time:");
317 for (generation, score) in &result.evolution_history {
318 println!(" Generation {}: {:.3}", generation, score);
319 }
320
321 println!("\nOptimized instruction:");
322 println!(" {}", result.best_candidate.instruction);
323
324 // Test the optimized solver
325 println!("\nStep 5: Test Optimized Solver");
326 println!("==============================\n");
327
328 let test_problem = example! {
329 "problem": "input" => "A store sells pencils for $0.25 each. If you buy 8 pencils, how much will you pay?",
330 "expected_answer": "input" => "2"
331 };
332
333 let test_prediction = module.forward(test_problem.clone()).await?;
334 let test_feedback = module
335 .feedback_metric(&test_problem, &test_prediction)
336 .await;
337
338 println!(
339 "Test problem: A store sells pencils for $0.25 each. If you buy 8 pencils, how much will you pay?"
340 );
341 println!("\nAnswer: {}", test_prediction.get("answer", None));
342 println!("Score: {:.3}\n", test_feedback.score);
343 println!("Detailed Feedback from Judge:");
344 println!("{}", test_feedback.feedback);
345
346 Ok(())
347}Sourcepub async fn call(&self, messages: Chat) -> Result<LMResponse>
pub async fn call(&self, messages: Chat) -> Result<LMResponse>
Executes a chat completion against the configured provider.
messages must already be formatted as OpenAI-compatible chat turns.
The call returns an LMResponse containing the assistant output,
token usage, and chat history including the new response.
Sourcepub async fn inspect_history(&self, n: usize) -> Vec<CallResult>
pub async fn inspect_history(&self, n: usize) -> Vec<CallResult>
Returns the n most recent cached calls.
Panics if caching is disabled for this LM.
Examples found in repository?
examples/07-inspect-history.rs (line 42)
30async fn main() {
31 let lm = LM::default();
32 configure(lm, ChatAdapter);
33
34 let example = example! {
35 "question": "input" => "What is the capital of France?",
36 };
37
38 let qa_rater = QARater::builder().build();
39 let prediction = qa_rater.forward(example.clone()).await.unwrap();
40 println!("Prediction: {prediction:?}");
41
42 let history = get_lm().inspect_history(1).await;
43 println!("History: {history:?}");
44}Trait Implementations§
Auto Trait Implementations§
impl Freeze for LM
impl !RefUnwindSafe for LM
impl Send for LM
impl Sync for LM
impl Unpin for LM
impl !UnwindSafe for LM
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<T> Instrument for T
impl<T> Instrument for T
Source§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
Source§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
Source§impl<T> Instrument for T
impl<T> Instrument for T
Source§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
Source§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read more