artificial_core/backend.rs
1use std::{future::Future, pin::Pin};
2
3use crate::{
4 error::Result,
5 template::{IntoPrompt, PromptTemplate},
6};
7
8/// A **backend** turns a prompt into a network call to a concrete provider
9/// (OpenAI, Ollama, Anthropic, …) and parses the structured response.
10///
11/// The trait is intentionally minimal:
12///
13/// * **One associated type** – the in-memory `Message` representation this
14/// provider accepts.
15/// * **One async-ish method** – `complete`, which performs a *single*
16/// non-streaming round-trip and returns a value whose type is dictated by
17/// the `PromptTemplate`.
18///
19/// The method returns a [`Pin<Box<dyn Future>>`] so we stay object-safe
20/// without pulling in `async_trait`.
21pub trait Backend: Send + Sync {
22 /// Chat message type consumed by this backend.
23 ///
24 /// A simple setup can re-use `crate::generic::GenericMessage`.
25 /// Providers with richer wire formats (function calls, images …) can
26 /// supply their own struct.
27 type Message: Send + Sync + 'static;
28
29 /// Execute the prompt and deserialize the provider’s reply into
30 /// `P::Output`.
31 ///
32 /// The blanket constraint `P: PromptTemplate<Message = Self::Message>`
33 /// guarantees at **compile time** that callers only feed the backend
34 /// messages it understands.
35 fn chat_complete<P>(
36 &self,
37 prompt: P,
38 ) -> Pin<Box<dyn Future<Output = Result<P::Output>> + Send>>
39 where
40 P: PromptTemplate + Send + 'static,
41 <P as IntoPrompt>::Message: Into<Self::Message>;
42}