tdln_brain/lib.rs
1//! `tdln-brain` — Deterministic Cognitive Layer for LogLine OS
2//!
3//! Render a narrative frame → call an LLM provider → extract **only** JSON →
4//! validate into `tdln_ast::SemanticUnit`.
5//!
6//! # Why
7//!
8//! - Prevent tool-call hallucinations
9//! - Enforce JSON-only outputs
10//! - Keep reasoning optional and separated
11//! - Make failures machine-legible
12//!
13//! # Example
14//!
15//! ```rust,no_run
16//! use tdln_brain::{Brain, CognitiveContext, GenerationConfig};
17//! use tdln_brain::providers::local::LocalEcho;
18//!
19//! # async fn example() -> Result<(), tdln_brain::BrainError> {
20//! let brain = Brain::new(LocalEcho);
21//! let ctx = CognitiveContext {
22//! system_directive: "You are a deterministic planner.".into(),
23//! ..Default::default()
24//! };
25//! let decision = brain.reason(&ctx, &GenerationConfig::default()).await?;
26//! println!("{:?}", decision.intent);
27//! # Ok(())
28//! # }
29//! ```
30
31#![forbid(unsafe_code)]
32#![cfg_attr(docsrs, feature(doc_cfg))]
33
34pub mod parser;
35pub mod prompt;
36pub mod providers;
37pub mod util;
38
39use async_trait::async_trait;
40use serde::{Deserialize, Serialize};
41use thiserror::Error;
42
43// Re-export core AST type
44pub use tdln_ast::SemanticUnit;
45
46// ══════════════════════════════════════════════════════════════════════════════
47// Errors
48// ══════════════════════════════════════════════════════════════════════════════
49
50/// Errors from the cognitive layer.
51#[derive(Debug, Error)]
52pub enum BrainError {
53 /// Transport or API error from the provider.
54 #[error("provider error: {0}")]
55 Provider(String),
56 /// Model output was not valid TDLN JSON.
57 #[error("hallucination: invalid TDLN JSON: {0}")]
58 Hallucination(String),
59 /// Context window exceeded.
60 #[error("context window exceeded")]
61 ContextOverflow,
62 /// JSON parsing error.
63 #[error("parsing error: {0}")]
64 Parsing(String),
65 /// Prompt rendering error.
66 #[error("render error: {0}")]
67 Render(String),
68}
69
70// ══════════════════════════════════════════════════════════════════════════════
71// Types
72// ══════════════════════════════════════════════════════════════════════════════
73
74/// A chat message with role and content.
75#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
76pub struct Message {
77 /// Role: "system", "user", or "assistant"
78 pub role: String,
79 /// Message content
80 pub content: String,
81}
82
83impl Message {
84 /// Create a system message.
85 #[must_use]
86 pub fn system(s: impl Into<String>) -> Self {
87 Self {
88 role: "system".into(),
89 content: s.into(),
90 }
91 }
92
93 /// Create a user message.
94 #[must_use]
95 pub fn user(s: impl Into<String>) -> Self {
96 Self {
97 role: "user".into(),
98 content: s.into(),
99 }
100 }
101
102 /// Create an assistant message.
103 #[must_use]
104 pub fn assistant(s: impl Into<String>) -> Self {
105 Self {
106 role: "assistant".into(),
107 content: s.into(),
108 }
109 }
110}
111
112/// Cognitive context for prompt rendering.
113///
114/// Contains the system directive, recall (memory), conversation history,
115/// and active constraints (policies) the model must respect.
116#[derive(Clone, Debug, Default, Serialize, Deserialize)]
117pub struct CognitiveContext {
118 /// The system directive (identity + constitution + role).
119 pub system_directive: String,
120 /// Relevant memories for the current context (long-term recall).
121 pub recall: Vec<String>,
122 /// Recent conversation history.
123 pub history: Vec<Message>,
124 /// Active kernel constraints the model must respect.
125 pub constraints: Vec<String>,
126}
127
128/// Configuration for generation requests.
129#[derive(Clone, Debug, Serialize, Deserialize)]
130pub struct GenerationConfig {
131 /// Temperature (0.0 = deterministic).
132 pub temperature: f32,
133 /// Maximum tokens to generate.
134 pub max_tokens: Option<u32>,
135 /// Whether to allow reasoning before JSON output.
136 pub require_reasoning: bool,
137}
138
139impl Default for GenerationConfig {
140 fn default() -> Self {
141 Self {
142 temperature: 0.0,
143 max_tokens: Some(1024),
144 require_reasoning: false,
145 }
146 }
147}
148
149/// Usage metadata from a generation request.
150#[derive(Clone, Debug, Default, Serialize, Deserialize)]
151pub struct UsageMeta {
152 /// Input token count (if available).
153 pub input_tokens: u32,
154 /// Output token count (if available).
155 pub output_tokens: u32,
156 /// Model identifier used.
157 pub model_id: String,
158}
159
160/// Raw output from a neural backend.
161#[derive(Clone, Debug)]
162pub struct RawOutput {
163 /// The raw content returned by the model.
164 pub content: String,
165 /// Usage metadata.
166 pub meta: UsageMeta,
167}
168
169/// A parsed decision containing reasoning and a strict intent.
170#[derive(Debug)]
171pub struct Decision {
172 /// Optional reasoning text extracted from the response.
173 pub reasoning: Option<String>,
174 /// The strictly-parsed TDLN intent.
175 pub intent: SemanticUnit,
176 /// Usage metadata from generation.
177 pub meta: UsageMeta,
178}
179
180// ══════════════════════════════════════════════════════════════════════════════
181// Provider Interface
182// ══════════════════════════════════════════════════════════════════════════════
183
184/// Trait for model providers (LLM backends).
185///
186/// Implement this trait to plug in any LLM (cloud or local).
187#[async_trait]
188pub trait NeuralBackend: Send + Sync {
189 /// Returns the model identifier.
190 fn model_id(&self) -> &str;
191
192 /// Generate a response from the given messages.
193 async fn generate(
194 &self,
195 messages: &[Message],
196 config: &GenerationConfig,
197 ) -> Result<RawOutput, BrainError>;
198}
199
200// ══════════════════════════════════════════════════════════════════════════════
201// Brain: End-to-End Reasoning
202// ══════════════════════════════════════════════════════════════════════════════
203
204/// The deterministic cognitive engine.
205///
206/// Wraps a [`NeuralBackend`] and provides the full pipeline:
207/// render → generate → strict-parse → `SemanticUnit`.
208pub struct Brain<B: NeuralBackend> {
209 backend: B,
210}
211
212impl<B: NeuralBackend> Brain<B> {
213 /// Create a new Brain with the given backend.
214 #[must_use]
215 pub fn new(backend: B) -> Self {
216 Self { backend }
217 }
218
219 /// Get a reference to the backend.
220 #[must_use]
221 pub fn backend(&self) -> &B {
222 &self.backend
223 }
224
225 /// Render → Generate → Strict-parse → `SemanticUnit`.
226 ///
227 /// # Errors
228 ///
229 /// Returns an error if rendering fails, the provider fails,
230 /// or the output cannot be parsed into a valid `SemanticUnit`.
231 pub async fn reason(
232 &self,
233 ctx: &CognitiveContext,
234 cfg: &GenerationConfig,
235 ) -> Result<Decision, BrainError> {
236 // 1) Render narrative
237 let msgs = prompt::render(ctx).map_err(BrainError::Render)?;
238
239 // 2) Call provider
240 let raw = self.backend.generate(&msgs, cfg).await?;
241
242 // 3) Parse & validate
243 parser::parse_decision(&raw.content, raw.meta)
244 }
245}
246
247#[cfg(test)]
248mod tests {
249 use super::*;
250
251 #[test]
252 fn message_builders() {
253 let sys = Message::system("hello");
254 assert_eq!(sys.role, "system");
255 assert_eq!(sys.content, "hello");
256
257 let usr = Message::user("hi");
258 assert_eq!(usr.role, "user");
259
260 let ast = Message::assistant("ok");
261 assert_eq!(ast.role, "assistant");
262 }
263
264 #[test]
265 fn default_config() {
266 let cfg = GenerationConfig::default();
267 assert_eq!(cfg.temperature, 0.0);
268 assert_eq!(cfg.max_tokens, Some(1024));
269 assert!(!cfg.require_reasoning);
270 }
271
272 #[test]
273 fn cognitive_context_default() {
274 let ctx = CognitiveContext::default();
275 assert!(ctx.system_directive.is_empty());
276 assert!(ctx.recall.is_empty());
277 assert!(ctx.history.is_empty());
278 assert!(ctx.constraints.is_empty());
279 }
280}