Skip to main content

algocline_engine/
llm_bridge.rs

1//! Channel-based bridge between Lua coroutines and async MCP Sampling.
2//!
3//! Lua calls `alc.llm(prompt, opts)` or `alc.llm_batch(items)` →
4//! coroutine yields (non-blocking) → request is sent through a tokio mpsc channel →
5//! async handler processes the queries → responses flow back through
6//! tokio oneshot channels (one per query).
7
8use algocline_core::QueryId;
9
10/// A batch of LLM queries from a single Lua call.
11///
12/// For `alc.llm()`: contains exactly one QueryRequest.
13/// For `alc.llm_batch()`: contains N QueryRequests.
14pub struct LlmRequest {
15    pub queries: Vec<QueryRequest>,
16}
17
18/// A single query within an LlmRequest batch.
19pub struct QueryRequest {
20    pub id: QueryId,
21    pub prompt: String,
22    pub system: Option<String>,
23    pub max_tokens: u32,
24    /// Channel to send the response back to the yielded Lua coroutine.
25    pub resp_tx: tokio::sync::oneshot::Sender<Result<String, String>>,
26}