Skip to main content

openrustclaw_core/
traits.rs

1//! Core traits for OpenRustClaw.
2//!
3//! All major subsystems are defined as traits here, allowing for
4//! pluggable implementations and easy testing via mocks.
5
6use std::pin::Pin;
7
8use async_trait::async_trait;
9use futures::Stream;
10use serde_json::Value;
11
12use crate::error::{Error, MemoryError, Result};
13use crate::types::{
14    CompletionRequest, CompletionResponse, CoreEntry, IncomingMessage, MemoryEntry, MemoryQuery,
15    OutgoingMessage, Platform, ScoredMemory, SkillCapability, StreamChunk, ToolFormat, ToolOutput,
16};
17
18// ---------------------------------------------------------------------------
19// LLM Provider
20// ---------------------------------------------------------------------------
21
22/// Trait for LLM completion providers (Anthropic, OpenAI, OpenRouter, Ollama).
23///
24/// Each provider uses its native SDK and implements this unified interface.
25/// The `native_tool_format()` method indicates which tool schema format the
26/// provider expects, enabling automatic translation via `crates/providers/tool_formats.rs`.
27#[async_trait]
28pub trait LlmProvider: Send + Sync {
29    /// Send a completion request and return the full response.
30    async fn complete(&self, request: CompletionRequest) -> Result<CompletionResponse>;
31
32    /// Send a completion request and return a stream of chunks.
33    async fn stream(
34        &self,
35        request: CompletionRequest,
36    ) -> Result<Pin<Box<dyn Stream<Item = Result<StreamChunk>> + Send>>>;
37
38    /// The model identifier (e.g. "claude-sonnet-4-20250514", "gpt-4o").
39    fn model_id(&self) -> &str;
40
41    /// Maximum context window size in tokens.
42    fn max_tokens(&self) -> usize;
43
44    /// Provider name for logging and fallback chain ("anthropic", "openai", "openrouter", "ollama").
45    fn provider_name(&self) -> &str;
46
47    /// Whether this provider supports strict tool mode (guaranteed schema compliance).
48    fn supports_strict_tools(&self) -> bool;
49
50    /// Whether this provider supports streaming tool call deltas
51    /// (e.g. Anthropic fine-grained tool streaming).
52    fn supports_streaming_tool_deltas(&self) -> bool;
53
54    /// The native tool definition format this provider expects.
55    fn native_tool_format(&self) -> ToolFormat;
56}
57
58// ---------------------------------------------------------------------------
59// Batch Provider
60// ---------------------------------------------------------------------------
61
62/// For providers that support batch processing (e.g. Anthropic Batch API — 50% cheaper).
63#[async_trait]
64pub trait BatchProvider: Send + Sync {
65    /// Submit a batch of completion requests for async processing.
66    async fn batch(&self, requests: Vec<CompletionRequest>) -> Result<Vec<CompletionResponse>>;
67
68    /// Maximum number of requests in a single batch.
69    fn max_batch_size(&self) -> usize;
70}
71
72// ---------------------------------------------------------------------------
73// Embedding Provider
74// ---------------------------------------------------------------------------
75
76/// Trait for text embedding providers used by the memory/RAG system.
77#[async_trait]
78pub trait EmbeddingProvider: Send + Sync {
79    /// Embed one or more texts into vectors.
80    async fn embed(&self, texts: &[&str]) -> Result<Vec<Vec<f32>>>;
81
82    /// The dimensionality of the embedding vectors.
83    fn dimensions(&self) -> usize;
84
85    /// The embedding model identifier.
86    fn model_id(&self) -> &str;
87}
88
89// ---------------------------------------------------------------------------
90// Tool
91// ---------------------------------------------------------------------------
92
93/// Trait for executable tools that the agent can invoke.
94///
95/// Tools are registered in a `ToolRegistry` and matched to LLM tool calls.
96/// Each tool declares its capabilities so the security layer can enforce
97/// permission boundaries.
98#[async_trait]
99pub trait Tool: Send + Sync {
100    /// Unique tool name (e.g. "memory_search", "file_read").
101    fn name(&self) -> &str;
102
103    /// Human-readable description for the LLM.
104    fn description(&self) -> &str;
105
106    /// JSON Schema for the tool's input parameters.
107    fn schema(&self) -> Value;
108
109    /// Capabilities this tool requires (used for WASM sandbox enforcement).
110    fn capabilities_required(&self) -> Vec<SkillCapability>;
111
112    /// Execute the tool with the given input and context.
113    async fn execute(&self, input: Value, ctx: &ToolContext) -> Result<ToolOutput>;
114}
115
116/// Context provided to tools during execution.
117#[derive(Debug, Clone)]
118pub struct ToolContext {
119    /// Current session ID.
120    pub session_id: String,
121    /// Current user ID.
122    pub user_id: String,
123    /// Workspace root path (if applicable).
124    pub workspace_path: Option<String>,
125}
126
127// ---------------------------------------------------------------------------
128// Memory Store
129// ---------------------------------------------------------------------------
130
131/// Trait for the recall memory storage backend.
132///
133/// Implementations use SQLite (sqlx for general, libSQL for vectors).
134/// All writes go through memory policies (dedupe, importance, TTL).
135#[async_trait]
136pub trait MemoryStore: Send + Sync {
137    /// Store a new memory entry (after policy enforcement).
138    async fn store(&self, entry: MemoryEntry) -> Result<()>;
139
140    /// Search memories using hybrid BM25 + vector + MMR + temporal decay.
141    async fn search(&self, query: &MemoryQuery) -> Result<Vec<ScoredMemory>>;
142
143    /// Search memories using a caller-supplied query embedding when available.
144    async fn search_with_embedding(
145        &self,
146        query: &MemoryQuery,
147        _query_embedding: &[f32],
148    ) -> Result<Vec<ScoredMemory>> {
149        let _ = query;
150        Err(Error::Memory(MemoryError::Search(
151            "vector-aware search is not available for this memory store".to_string(),
152        )))
153    }
154
155    /// Get a memory entry by ID.
156    async fn get(&self, id: &str) -> Result<Option<MemoryEntry>>;
157
158    /// Delete a memory entry by ID.
159    async fn delete(&self, id: &str) -> Result<()>;
160
161    /// Check if content with this hash already exists. Returns the existing entry ID if found.
162    async fn dedupe_check(&self, content_hash: &str) -> Result<Option<String>>;
163
164    /// Expire entries past their TTL. Returns count of expired entries.
165    async fn expire_stale(&self) -> Result<u64>;
166}
167
168// ---------------------------------------------------------------------------
169// Core Memory Store
170// ---------------------------------------------------------------------------
171
172/// Trait for the core memory tier (tiny, always in prompt, ~500 tokens).
173#[async_trait]
174pub trait CoreMemoryStore: Send + Sync {
175    /// Get all core memory entries for a user.
176    async fn get_all(&self, user_id: &str) -> Result<Vec<CoreEntry>>;
177
178    /// Set a core memory entry. Overwrites if key exists.
179    async fn set(&self, user_id: &str, entry: CoreEntry) -> Result<()>;
180
181    /// Remove a core memory entry by key.
182    async fn remove(&self, user_id: &str, key: &str) -> Result<()>;
183
184    /// Render all core memory entries as a formatted string for system prompt injection.
185    async fn render(&self, user_id: &str) -> Result<String>;
186
187    /// Get total token count of all core memory entries for a user.
188    async fn total_tokens(&self, user_id: &str) -> Result<usize>;
189}
190
191// ---------------------------------------------------------------------------
192// Channel
193// ---------------------------------------------------------------------------
194
195/// Trait for messaging platform integrations (WebChat, Telegram, Discord, etc.).
196#[async_trait]
197pub trait Channel: Send + Sync {
198    /// Which platform this channel connects to.
199    fn platform(&self) -> Platform;
200
201    /// Send a message through this channel.
202    async fn send(&self, msg: OutgoingMessage) -> Result<()>;
203
204    /// Receive the next incoming message (blocks until available).
205    async fn receive(&self) -> Result<IncomingMessage>;
206
207    /// Establish the connection to the platform.
208    async fn connect(&mut self) -> Result<()>;
209
210    /// Gracefully disconnect.
211    async fn disconnect(&mut self) -> Result<()>;
212}