artificial_core/template.rs
1//! Abstractions that tie a **prompt** to a concrete **model** and a **typed
2//! response**.
3//!
4//! The *artificial* framework purposely keeps the public surface small. A
5//! developer usually needs only two traits to go from “some string fragments”
6//! to “ready-to-send payload”:
7//!
8//! 1. [`IntoPrompt`] – turns *any* value into a list of chat messages.
9//! 2. [`PromptTemplate`] – adds metadata such as the target model and the
10//! expected JSON response schema.
11//!
12//! Provider back-ends (e.g. `artificial-openai`) accept *any* `P` that
13//! implements **both** traits. Thanks to Rust’s powerful type system the
14//! compiler guarantees at compile time that
15//!
16//! * the message type produced by the prompt matches what the back-end expects,
17//! * the JSON returned by the provider can be deserialised into `P::Output`.
18//!
19//! ```rust
20//! use artificial_core::template::{IntoPrompt, PromptTemplate};
21//! use artificial_core::generic::{GenericMessage, GenericRole};
22//! use artificial_core::model::{Model, OpenAiModel};
23//! use schemars::JsonSchema;
24//! use serde::{Deserialize, Serialize};
25//!
26//! #[derive(Serialize, Deserialize, JsonSchema)]
27//! #[serde(deny_unknown_fields)]
28//! struct Hello { greeting: String }
29//!
30//! struct HelloPrompt;
31//!
32//! impl IntoPrompt for HelloPrompt {
33//! type Message = GenericMessage;
34//! fn into_prompt(self) -> Vec<Self::Message> {
35//! vec![GenericMessage::new("Say hello!".into(), GenericRole::User)]
36//! }
37//! }
38//!
39//! impl PromptTemplate for HelloPrompt {
40//! type Output = Hello;
41//! const MODEL: Model = Model::OpenAi(OpenAiModel::Gpt4oMini);
42//! }
43//! ```
44//!
45//! See `examples/openai_hello_world.rs` for a fully working program.
46use std::any::Any;
47
48use schemars::JsonSchema;
49use serde::Deserialize;
50
51use crate::model::Model;
52
53/// High-level description of a prompt.
54///
55/// Implement this trait **in addition** to [`IntoPrompt`] to specify:
56///
57/// * `Output` – the strongly-typed Rust struct you expect from the LLM.
58/// * `MODEL` – the identifier of the model that should handle the request.
59///
60/// The blanket constraints on `Output` (`JsonSchema + Deserialize + Any`)
61/// enable the OpenAI adapter to automatically derive a JSON Schema and to
62/// down-cast the erased type if necessary.
63pub trait PromptTemplate: IntoPrompt {
64 /// Type produced by the LLM and returned to the caller.
65 type Output: JsonSchema + for<'de> Deserialize<'de> + Any;
66
67 /// Logical model identifier. The back-end will map this to its own naming
68 /// scheme (`"gpt-4o-mini"`, `"claude-3-haiku"`, …).
69 const MODEL: Model;
70}
71
72/// Converts a value into a series of chat messages.
73///
74/// Provider crates typically use [`crate::generic::GenericMessage`], but a
75/// back-end can require its own richer struct. By making the `Message` type
76/// an **associated type** we keep the trait flexible without resorting to
77/// dynamic dispatch.
78pub trait IntoPrompt {
79 /// Chat message representation emitted by the prompt.
80 type Message: Send + Sync + 'static;
81
82 /// Consume `self` and return **all** messages in the desired order.
83 fn into_prompt(self) -> Vec<Self::Message>;
84}
85
86/// Convenience implementation so a single [`crate::generic::GenericMessage`]
87/// can be passed directly to the client without wrapping it in a struct.
88impl IntoPrompt for crate::generic::GenericMessage {
89 type Message = crate::generic::GenericMessage;
90
91 fn into_prompt(self) -> Vec<Self::Message> {
92 vec![self]
93 }
94}