Skip to main content

batuta/agent/driver/
apr_serve.rs

1//! AprServeDriver — first-class inference via `apr serve` subprocess.
2//!
3//! Spawns `apr serve run <model>` as a child process with CUDA/GPU support,
4//! then connects via OpenAI-compatible HTTP API. This is the **preferred**
5//! inference path for `batuta code` / `apr code`:
6//!
7//! - Full CUDA/GPU acceleration (apr-cli has all features)
8//! - APR and GGUF format support (prefers APR)
9//! - No feature flag issues (batuta doesn't need `cuda` feature)
10//! - Sovereign: localhost only, no data egress
11//!
12//! PMAT-160: Replaces embedded RealizarDriver as primary inference.
13//! RealizarDriver remains as fallback when `apr` binary is not on PATH.
14
15use async_trait::async_trait;
16use std::path::PathBuf;
17use std::process::{Child, Command, Stdio};
18
19use super::{CompletionRequest, CompletionResponse, LlmDriver, Message, ToolCall};
20use crate::agent::result::{AgentError, DriverError, StopReason, TokenUsage};
21use crate::serve::backends::PrivacyTier;
22
23/// Driver that uses `apr serve` subprocess for inference.
24pub struct AprServeDriver {
25    /// Base URL for the local server (e.g., `http://127.0.0.1:19384`)
26    base_url: String,
27    /// Model name for OpenAI API requests
28    model_name: String,
29    /// Child process handle (killed on drop)
30    _child: Child,
31    /// Context window size
32    context_window_size: usize,
33}
34
35impl Drop for AprServeDriver {
36    /// PMAT-166: Graceful shutdown — SIGTERM first, SIGKILL after 2s timeout.
37    fn drop(&mut self) {
38        let pid = self._child.id();
39
40        // Try graceful shutdown first (SIGTERM on Unix via kill command)
41        #[cfg(unix)]
42        {
43            let _ = Command::new("kill")
44                .args(["-TERM", &pid.to_string()])
45                .stdout(Stdio::null())
46                .stderr(Stdio::null())
47                .status();
48
49            // Wait up to 2s for graceful exit
50            let deadline = std::time::Instant::now() + std::time::Duration::from_secs(2);
51            loop {
52                match self._child.try_wait() {
53                    Ok(Some(_)) => return, // Exited cleanly
54                    Ok(None) if std::time::Instant::now() < deadline => {
55                        std::thread::sleep(std::time::Duration::from_millis(100));
56                    }
57                    _ => break, // Timeout or error — force kill
58                }
59            }
60        }
61
62        // Fallback: force kill (always runs on Windows, or after SIGTERM timeout)
63        let _ = self._child.kill();
64        let _ = self._child.wait();
65    }
66}
67
68impl AprServeDriver {
69    /// Launch `apr serve run` and wait for readiness.
70    ///
71    /// Picks a random port, spawns the subprocess, polls the health
72    /// endpoint until ready (max 30s). Returns error if `apr` not
73    /// found or server fails to start.
74    pub fn launch(model_path: PathBuf, context_window: Option<usize>) -> Result<Self, AgentError> {
75        let apr_path = find_apr_binary()?;
76
77        // Pick a random high port to avoid conflicts
78        let port = 19384 + (std::process::id() % 1000) as u16;
79        let base_url = format!("http://127.0.0.1:{port}");
80
81        let model_name = model_path
82            .file_stem()
83            .map(|s| s.to_string_lossy().to_string())
84            .unwrap_or_else(|| "local".to_string());
85
86        // PMAT-181: Enable GPU with serial prefill. The FP8 batched prefill produces
87        // wrong output for Qwen3 (Q6K→FP8 requantization bug). Serial prefill uses
88        // Q4K/Q6K GEMV kernels which produce correct output. BATCHED_PREFILL=0 disables
89        // the FP8 path while keeping CUDA acceleration for decode tokens.
90        let child = Command::new(&apr_path)
91            .args([
92                "serve",
93                "run",
94                &model_path.to_string_lossy(),
95                "--port",
96                &port.to_string(),
97                "--host",
98                "127.0.0.1",
99                "--gpu",
100            ])
101            .env("BATCHED_PREFILL", "0")
102            .stdout(Stdio::piped())
103            .stderr(Stdio::piped())
104            .spawn()
105            .map_err(|e| {
106                AgentError::Driver(DriverError::InferenceFailed(format!(
107                    "failed to spawn apr serve: {e}"
108                )))
109            })?;
110
111        eprintln!("Launched apr serve on port {port} (pid {})", child.id());
112
113        let mut driver = Self {
114            base_url,
115            model_name,
116            _child: child,
117            context_window_size: context_window.unwrap_or(4096),
118        };
119
120        // Wait for server to be ready
121        driver.wait_for_ready()?;
122
123        Ok(driver)
124    }
125
126    /// Poll health endpoint until server is ready (max 30s).
127    ///
128    /// PMAT-171: Detects subprocess death during startup. On timeout or crash,
129    /// reads stderr from the child process for actionable debug output.
130    fn wait_for_ready(&mut self) -> Result<(), AgentError> {
131        let addr = self.base_url.trim_start_matches("http://").to_string();
132        let sock_addr: std::net::SocketAddr =
133            addr.parse().unwrap_or_else(|_| std::net::SocketAddr::from(([127, 0, 0, 1], 19384)));
134
135        let start = std::time::Instant::now();
136        let timeout = std::time::Duration::from_secs(30);
137
138        loop {
139            if start.elapsed() > timeout {
140                let stderr = self.drain_stderr();
141                let mut msg = "apr serve did not become ready within 30s".to_string();
142                if !stderr.is_empty() {
143                    msg.push_str(&format!("\nsubprocess stderr:\n{stderr}"));
144                }
145                msg.push_str(&format!(
146                    "\nDebug manually: apr serve run <model> --port {} --host 127.0.0.1",
147                    addr.rsplit(':').next().unwrap_or("19384")
148                ));
149                return Err(AgentError::Driver(DriverError::InferenceFailed(msg)));
150            }
151
152            // Check if subprocess died
153            if let Ok(Some(status)) = self._child.try_wait() {
154                let stderr = self.drain_stderr();
155                let mut msg = format!("apr serve exited with {status} during startup");
156                if !stderr.is_empty() {
157                    msg.push_str(&format!("\nsubprocess stderr:\n{stderr}"));
158                }
159                return Err(AgentError::Driver(DriverError::InferenceFailed(msg)));
160            }
161
162            if std::net::TcpStream::connect_timeout(
163                &sock_addr,
164                std::time::Duration::from_millis(200),
165            )
166            .is_ok()
167            {
168                eprintln!("apr serve ready ({:.1}s)", start.elapsed().as_secs_f64());
169                return Ok(());
170            }
171
172            std::thread::sleep(std::time::Duration::from_millis(500));
173        }
174    }
175
176    /// Read available stderr from the child process (non-blocking, last 2KB).
177    fn drain_stderr(&mut self) -> String {
178        use std::io::Read;
179        let Some(stderr) = self._child.stderr.as_mut() else {
180            return String::new();
181        };
182        let mut buf = vec![0u8; 2048];
183        let n = stderr.read(&mut buf).unwrap_or(0);
184        let text = String::from_utf8_lossy(&buf[..n]).to_string();
185        // Return last few lines for concise output
186        let lines: Vec<&str> = text.lines().collect();
187        if lines.len() > 10 {
188            lines[lines.len() - 10..].join("\n")
189        } else {
190            text
191        }
192    }
193
194    /// Build OpenAI-compatible request body.
195    ///
196    /// PMAT-176: Only strips the verbose `## Available Tools` section injected
197    /// by `build_enriched_system()` (full JSON schemas ~2000 tokens). Preserves
198    /// the compact `## Tools` table from `CODE_SYSTEM_PROMPT` — that table has
199    /// tool names, use cases, and example inputs designed for 1.5B-7B models.
200    fn build_openai_body(&self, request: &CompletionRequest) -> serde_json::Value {
201        let mut messages = Vec::new();
202
203        if let Some(ref system) = request.system {
204            // PMAT-176: Only strip the verbose enriched section (full JSON schemas).
205            // Keep the compact "## Tools" table from CODE_SYSTEM_PROMPT — it has
206            // descriptions and examples that small models need for tool discovery.
207            let compact_system = system
208                .find("\n\n## Available Tools")
209                .map(|i| &system[..i])
210                .unwrap_or(system)
211                .to_string();
212
213            messages.push(serde_json::json!({
214                "role": "system",
215                "content": compact_system
216            }));
217        }
218
219        for msg in &request.messages {
220            match msg {
221                Message::User(text) => messages.push(serde_json::json!({
222                    "role": "user",
223                    "content": text
224                })),
225                Message::Assistant(text) => messages.push(serde_json::json!({
226                    "role": "assistant",
227                    "content": text
228                })),
229                Message::AssistantToolUse(call) => messages.push(serde_json::json!({
230                    "role": "assistant",
231                    "content": format!("<tool_call>\n{}\n</tool_call>",
232                        serde_json::json!({"name": call.name, "input": call.input}))
233                })),
234                Message::ToolResult(result) => messages.push(serde_json::json!({
235                    "role": "user",
236                    "content": format!("<tool_result>\n{}\n</tool_result>", result.content)
237                })),
238                _ => {}
239            }
240        }
241
242        // PMAT-170: Cap max_tokens for HTTP path. The manifest default (4096)
243        // causes very long generation on local models. 1024 accommodates:
244        // - Tool call JSON (~100-200 tokens each)
245        // - File edit content (multi-line diffs)
246        // - Explanation text alongside tool calls
247        // Previous 512 cap truncated complex edits mid-output.
248        let max_tokens = request.max_tokens.min(1024);
249
250        serde_json::json!({
251            "model": self.model_name,
252            "messages": messages,
253            "max_tokens": max_tokens,
254            "temperature": request.temperature,
255            "stream": false
256        })
257    }
258}
259
260#[async_trait]
261impl LlmDriver for AprServeDriver {
262    async fn complete(&self, request: CompletionRequest) -> Result<CompletionResponse, AgentError> {
263        let url = format!("{}/v1/chat/completions", self.base_url);
264        let body = self.build_openai_body(&request);
265
266        let client = reqwest::Client::builder()
267            .timeout(std::time::Duration::from_secs(120))
268            .build()
269            .map_err(|e| AgentError::Driver(DriverError::Network(format!("http client: {e}"))))?;
270        let response = client
271            .post(&url)
272            .header("content-type", "application/json")
273            .json(&body)
274            .send()
275            .await
276            .map_err(|e| AgentError::Driver(DriverError::Network(format!("apr serve: {e}"))))?;
277
278        if !response.status().is_success() {
279            let status = response.status().as_u16();
280            let text = response.text().await.unwrap_or_default();
281            return Err(AgentError::Driver(DriverError::Network(format!(
282                "apr serve HTTP {status}: {text}"
283            ))));
284        }
285
286        let json: serde_json::Value = response
287            .json()
288            .await
289            .map_err(|e| AgentError::Driver(DriverError::InferenceFailed(format!("parse: {e}"))))?;
290
291        // Extract response from OpenAI format
292        let raw_text = json["choices"][0]["message"]["content"].as_str().unwrap_or("").to_string();
293
294        // PMAT-180: Strip Qwen3 thinking blocks. The model may emit
295        // <think>...</think> or bare </think> tokens. Remove them before
296        // parsing tool calls — thinking content is internal reasoning.
297        let text = strip_thinking_blocks(&raw_text);
298
299        let usage = json.get("usage").cloned().unwrap_or(serde_json::json!({}));
300        let input_tokens = usage["prompt_tokens"].as_u64().unwrap_or(0);
301        let output_tokens = usage["completion_tokens"].as_u64().unwrap_or(0);
302
303        // Parse tool calls from text (same parser as RealizarDriver)
304        let (clean_text, tool_calls) = super::realizar::parse_tool_calls_pub(&text);
305
306        let stop_reason =
307            if tool_calls.is_empty() { StopReason::EndTurn } else { StopReason::ToolUse };
308
309        Ok(CompletionResponse {
310            text: clean_text,
311            stop_reason,
312            tool_calls,
313            usage: TokenUsage { input_tokens, output_tokens },
314        })
315    }
316
317    fn context_window(&self) -> usize {
318        self.context_window_size
319    }
320
321    fn privacy_tier(&self) -> PrivacyTier {
322        // Sovereign: apr serve runs on localhost, zero network egress
323        PrivacyTier::Sovereign
324    }
325}
326
327/// Strip Qwen3 thinking blocks (`<think>...</think>`) and bare `</think>` tags.
328fn strip_thinking_blocks(text: &str) -> String {
329    let mut result = text.to_string();
330    // Strip <think>...</think> blocks (may span multiple lines)
331    while let Some(start) = result.find("<think>") {
332        if let Some(end) = result[start..].find("</think>") {
333            result.replace_range(start..start + end + "</think>".len(), "");
334        } else {
335            // Unclosed <think> — strip to end
336            result.truncate(start);
337            break;
338        }
339    }
340    // Strip bare </think> tags (model sometimes emits just closing tags)
341    result = result.replace("</think>", "");
342    result.trim().to_string()
343}
344
345/// Find the `apr` binary on PATH.
346fn find_apr_binary() -> Result<PathBuf, AgentError> {
347    which::which("apr").map_err(|_| {
348        AgentError::Driver(DriverError::InferenceFailed(
349            "apr binary not found on PATH. Install: cargo install apr-cli".into(),
350        ))
351    })
352}
353
354#[cfg(test)]
355#[path = "apr_serve_tests.rs"]
356mod tests;