artificial_core/
client.rs

1//! Generic, lightweight client that executes a [`PromptTemplate`] against a
2//! single concrete [`Backend`].
3//!
4//! The client is **generic over the backend type `B`**, so the compiler
5//! guarantees that:
6//! * The prompt’s `Message` type matches what the backend expects.
7//! * No dynamic dispatch or object-safety hurdles appear in user code.
8//!
9//! ```rust
10//! use artificial_core::{ArtificialClient, generic::{GenericMessage, GenericRole},
11//!                      template::*, model::*};
12//!
13//! struct Hello;
14//!
15//! impl PromptTemplate for Hello {
16//!     type Output         = serde_json::Value;
17//!     const MODEL: Model  = Model::OpenAi(OpenAiModel::Gpt4o);
18//! }
19//!
20//! impl IntoPrompt for Hello {
21//!     type Message = GenericMessage;
22//!     fn into_prompt(self) -> Vec<Self::Message> {
23//!         vec![GenericMessage::new("Say hello!".into(), GenericRole::User)]
24//!     }
25//! }
26//!
27//! # fn main() {}
28//! ```
29//!
30//! Any backend crate (e.g. `artificial-openai`, `artificial-ollama`) just
31//! implements [`Backend`] and the same client works out of the box.
32
33use crate::{
34    backend::Backend,
35    error::Result,
36    template::{IntoPrompt, PromptTemplate},
37};
38
39/// A client bound to a single provider backend `B`.
40///
41/// Clone the client if you need to share it across tasks—`B` controls whether
42/// that’s cheap (e.g. wraps an `Arc`) or a deep copy.
43#[derive(Debug, Clone)]
44pub struct ArtificialClient<B> {
45    backend: B,
46}
47
48impl<B> ArtificialClient<B>
49where
50    B: Backend,
51{
52    /// Create a new client that delegates all calls to `backend`.
53    pub fn new(backend: B) -> Self {
54        Self { backend }
55    }
56
57    /// Run a prompt on the backend and return the deserialised output.
58    ///
59    /// # Errors
60    ///
61    /// Any provider-specific failure is converted into
62    /// [`crate::error::ArtificialError`] and bubbled up transparently.
63    pub async fn chat_complete<P>(&self, prompt: P) -> Result<P::Output>
64    where
65        P: PromptTemplate + Send + 'static,
66        <P as IntoPrompt>::Message: Into<B::Message>,
67    {
68        self.backend.chat_complete(prompt).await
69    }
70
71    /// Access the underlying backend (e.g. to tweak provider-specific settings).
72    pub fn backend(&self) -> &B {
73        &self.backend
74    }
75
76    /// Consume the client and return the inner backend.
77    pub fn into_backend(self) -> B {
78        self.backend
79    }
80}