11_custom_client/
11-custom-client.rs

1/*
2Example demonstrating how to use LMClient::from_custom() with a custom OpenAI client
3in a simple pipeline, similar to 01-simple.rs.
4
5This shows how to create a completion model directly and use it with LM.
6
7Run with:
8```
9cargo run --example 11-custom-client
10```
11*/
12
13use anyhow::Result;
14use dspy_rs::{ChatAdapter, LM, LMClient, Predict, Predictor, Signature, configure, example};
15use reqwest;
16use rig::providers::*;
17use std::env;
18
19#[Signature(cot)]
20struct QASignature {
21    #[input]
22    pub question: String,
23
24    #[output]
25    pub answer: String,
26}
27
28#[tokio::main]
29async fn main() -> Result<()> {
30    // Create a custom OpenAI completion model directly
31    let api_key = env::var("OPENAI_API_KEY").unwrap_or_else(|_| "dummy-key".to_string());
32
33    let openai_client: openai::Client<reqwest::Client> =
34        openai::ClientBuilder::new(&api_key).build();
35    let openai_model = openai::completion::CompletionModel::new(openai_client, "gpt-4o-mini");
36
37    // Convert to LMClient using Into trait (enum_dispatch generates From implementations)
38    let custom_lm_client: LMClient = openai_model.into();
39
40    // Create LM with the custom client
41    let lm = LM::builder()
42        .build()
43        .await?
44        .with_client(custom_lm_client)
45        .await?;
46
47    // Configure the global settings with our custom LM
48    configure(lm, ChatAdapter);
49
50    let example = example! {
51        "question": "input" => "What is the capital of France?",
52    };
53
54    let qa_predictor = Predict::new(QASignature::new());
55    let prediction = qa_predictor.forward(example).await?;
56    println!("{prediction:?}");
57
58    Ok(())
59}