codex_mobile_bridge/state/
mod.rs1mod directories;
2mod events;
3mod helpers;
4mod management;
5mod render;
6mod runtime;
7mod threads;
8
9#[cfg(test)]
10mod tests;
11
12use std::collections::HashMap;
13use std::fs;
14use std::path::{Path, PathBuf};
15use std::sync::{Arc, Mutex};
16use std::time::{Duration as StdDuration, Instant};
17
18use anyhow::{Result, bail};
19use serde_json::{Value, json};
20use tokio::sync::{RwLock, broadcast, mpsc};
21use tokio::time::Duration;
22use tracing::warn;
23
24use self::directories::seed_directory_bookmarks;
25use self::events::run_app_server_event_loop;
26use self::runtime::ManagedRuntime;
27use crate::app_server::AppServerInbound;
28use crate::bridge_protocol::{
29 DirectoryBookmarkRecord, DirectoryHistoryRecord, PendingServerRequestRecord, PersistedEvent,
30 RuntimeStatusSnapshot, RuntimeSummary, ThreadRenderSnapshot, require_payload,
31};
32use crate::config::Config;
33use crate::storage::Storage;
34
35pub struct BridgeState {
36 token: String,
37 storage: Storage,
38 runtimes: RwLock<HashMap<String, Arc<ManagedRuntime>>>,
39 primary_runtime_id: String,
40 runtime_limit: usize,
41 staging_root: PathBuf,
42 inbound_tx: mpsc::UnboundedSender<AppServerInbound>,
43 events_tx: broadcast::Sender<PersistedEvent>,
44 staged_turn_inputs: Mutex<HashMap<String, Vec<PathBuf>>>,
45 thread_render_snapshots: Mutex<HashMap<String, ThreadRenderSnapshot>>,
46 timeout_warning_tracker: Mutex<HashMap<String, Instant>>,
47 external_event_cursor: Mutex<i64>,
48}
49
50impl BridgeState {
51 pub async fn bootstrap(config: Config) -> Result<Arc<Self>> {
52 let storage = Storage::open(config.db_path.clone())?;
53 seed_directory_bookmarks(&storage, &config.directory_bookmarks)?;
54 let staging_root = staging_root_from_db_path(&config.db_path);
55 prepare_staging_root(&staging_root)?;
56
57 let primary_runtime = storage.ensure_primary_runtime(
58 config
59 .codex_home
60 .as_ref()
61 .map(|path| path.to_string_lossy().to_string()),
62 config.codex_binary.clone(),
63 )?;
64
65 let (events_tx, _) = broadcast::channel(512);
66 let (inbound_tx, inbound_rx) = mpsc::unbounded_channel();
67
68 let mut runtime_map = HashMap::new();
69 for record in storage.list_runtimes()? {
70 let runtime = Arc::new(Self::build_runtime(record, inbound_tx.clone()));
71 runtime_map.insert(runtime.record.runtime_id.clone(), runtime);
72 }
73
74 let state = Arc::new(Self {
75 token: config.token,
76 storage,
77 runtimes: RwLock::new(runtime_map),
78 primary_runtime_id: primary_runtime.runtime_id,
79 runtime_limit: config.runtime_limit.max(1),
80 staging_root,
81 inbound_tx: inbound_tx.clone(),
82 events_tx,
83 staged_turn_inputs: Mutex::new(HashMap::new()),
84 thread_render_snapshots: Mutex::new(HashMap::new()),
85 timeout_warning_tracker: Mutex::new(HashMap::new()),
86 external_event_cursor: Mutex::new(0),
87 });
88
89 *state
90 .external_event_cursor
91 .lock()
92 .expect("external event cursor poisoned") = state.storage.latest_event_seq()?;
93
94 tokio::spawn(run_app_server_event_loop(Arc::clone(&state), inbound_rx));
95 tokio::spawn(management::run_external_event_relay(Arc::clone(&state)));
96
97 for summary in state.runtime_summaries().await {
98 if summary.auto_start {
99 let runtime_id = summary.runtime_id.clone();
100 let state_ref = Arc::clone(&state);
101 tokio::spawn(async move {
102 if let Err(error) = state_ref.start_existing_runtime(&runtime_id).await {
103 let _ = state_ref
104 .emit_runtime_degraded(
105 &runtime_id,
106 format!("自动启动 runtime 失败: {error}"),
107 )
108 .await;
109 }
110 });
111 }
112 }
113
114 Ok(state)
115 }
116
117 pub fn subscribe_events(&self) -> broadcast::Receiver<PersistedEvent> {
118 self.events_tx.subscribe()
119 }
120
121 pub fn config_token(&self) -> &str {
122 &self.token
123 }
124
125 pub async fn hello_payload(
126 &self,
127 device_id: &str,
128 provided_ack_seq: Option<i64>,
129 ) -> Result<(
130 RuntimeStatusSnapshot,
131 Vec<RuntimeSummary>,
132 Vec<DirectoryBookmarkRecord>,
133 Vec<DirectoryHistoryRecord>,
134 Vec<PendingServerRequestRecord>,
135 Vec<PersistedEvent>,
136 )> {
137 let fallback_ack = self.storage.get_mobile_session_ack(device_id)?.unwrap_or(0);
138 let last_ack_seq = provided_ack_seq.unwrap_or(fallback_ack);
139 self.storage
140 .save_mobile_session_ack(device_id, last_ack_seq)?;
141
142 let runtime = self.runtime_snapshot_for_client().await;
143 let runtimes = self.runtime_summaries_for_client().await;
144 let directory_bookmarks = self.storage.list_directory_bookmarks()?;
145 let directory_history = self.storage.list_directory_history(20)?;
146 let pending_requests = self.storage.list_pending_requests()?;
147 let replay_events = self.storage.replay_events_after(last_ack_seq)?;
148
149 Ok((
150 runtime,
151 runtimes,
152 directory_bookmarks,
153 directory_history,
154 pending_requests,
155 replay_events,
156 ))
157 }
158
159 pub fn ack_events(&self, device_id: &str, last_seq: i64) -> Result<()> {
160 self.storage.save_mobile_session_ack(device_id, last_seq)
161 }
162
163 pub async fn handle_request(&self, action: &str, payload: Value) -> Result<Value> {
164 match action {
165 "get_runtime_status" => self.get_runtime_status(require_payload(payload)?).await,
166 "list_runtimes" => Ok(json!({ "runtimes": self.runtime_summaries_for_client().await })),
167 "start_runtime" => self.start_runtime(require_payload(payload)?).await,
168 "stop_runtime" => self.stop_runtime(require_payload(payload)?).await,
169 "restart_runtime" => self.restart_runtime(require_payload(payload)?).await,
170 "prune_runtime" => self.prune_runtime(require_payload(payload)?).await,
171 "read_directory" => self.read_directory(require_payload(payload)?).await,
172 "create_directory_bookmark" => {
173 self.create_directory_bookmark(require_payload(payload)?)
174 .await
175 }
176 "remove_directory_bookmark" => {
177 self.remove_directory_bookmark(require_payload(payload)?)
178 .await
179 }
180 "list_threads" => self.list_threads(require_payload(payload)?).await,
181 "start_thread" => self.start_thread(require_payload(payload)?).await,
182 "read_thread" => self.read_thread(require_payload(payload)?).await,
183 "resume_thread" => self.resume_thread(require_payload(payload)?).await,
184 "update_thread" => self.update_thread(require_payload(payload)?).await,
185 "archive_thread" => self.archive_thread(require_payload(payload)?).await,
186 "unarchive_thread" => self.unarchive_thread(require_payload(payload)?).await,
187 "stage_input_image" => self.stage_input_image(require_payload(payload)?).await,
188 "send_turn" => self.send_turn(require_payload(payload)?).await,
189 "interrupt_turn" => self.interrupt_turn(require_payload(payload)?).await,
190 "respond_pending_request" => {
191 self.respond_pending_request(require_payload(payload)?)
192 .await
193 }
194 "start_bridge_management" => {
195 self.start_bridge_management(require_payload(payload)?)
196 .await
197 }
198 "read_bridge_management" => {
199 self.read_bridge_management(require_payload(payload)?).await
200 }
201 "inspect_remote_state" => self.inspect_remote_state().await,
202 _ => bail!("未知 action: {action}"),
203 }
204 }
205
206 fn log_timeout_warning(&self, key: &str, message: &str) {
207 if self.should_emit_rate_limited_notice(key) {
208 warn!("{message}");
209 }
210 }
211
212 pub(super) fn should_emit_rate_limited_notice(&self, key: &str) -> bool {
213 let now = Instant::now();
214 let mut tracker = self
215 .timeout_warning_tracker
216 .lock()
217 .expect("timeout warning tracker poisoned");
218 let should_emit = tracker
219 .get(key)
220 .map(|last| now.duration_since(*last) >= CLIENT_TIMEOUT_WARN_COOLDOWN)
221 .unwrap_or(true);
222 if should_emit {
223 tracker.insert(key.to_string(), now);
224 }
225 should_emit
226 }
227
228 fn emit_event(
229 &self,
230 event_type: &str,
231 runtime_id: Option<&str>,
232 thread_id: Option<&str>,
233 payload: Value,
234 ) -> Result<()> {
235 let event = self
236 .storage
237 .append_event(event_type, runtime_id, thread_id, &payload)?;
238 *self
239 .external_event_cursor
240 .lock()
241 .expect("external event cursor poisoned") = event.seq;
242 let _ = self.events_tx.send(event);
243 Ok(())
244 }
245
246 pub(super) fn staging_root(&self) -> &Path {
247 &self.staging_root
248 }
249
250 pub(super) fn register_staged_turn_inputs(&self, turn_id: &str, paths: Vec<PathBuf>) {
251 if paths.is_empty() {
252 return;
253 }
254 let mut staged_turn_inputs = self
255 .staged_turn_inputs
256 .lock()
257 .expect("staged turn inputs poisoned");
258 staged_turn_inputs.insert(turn_id.to_string(), paths);
259 }
260
261 pub(super) fn cleanup_staged_turn_inputs(&self, turn_id: &str) -> Result<()> {
262 let paths = self
263 .staged_turn_inputs
264 .lock()
265 .expect("staged turn inputs poisoned")
266 .remove(turn_id)
267 .unwrap_or_default();
268 self.cleanup_staged_paths(paths)
269 }
270
271 pub(super) fn cleanup_staged_paths<I>(&self, paths: I) -> Result<()>
272 where
273 I: IntoIterator<Item = PathBuf>,
274 {
275 for path in paths {
276 self.remove_staged_path(&path)?;
277 }
278 Ok(())
279 }
280
281 fn remove_staged_path(&self, path: &Path) -> Result<()> {
282 if !path.starts_with(&self.staging_root) {
283 bail!("拒绝清理 staging 目录之外的文件: {}", path.display());
284 }
285 if path.exists() {
286 fs::remove_file(path)?;
287 }
288 Ok(())
289 }
290}
291
292fn staging_root_from_db_path(db_path: &Path) -> PathBuf {
293 db_path
294 .parent()
295 .unwrap_or_else(|| Path::new("."))
296 .join("staged-inputs")
297}
298
299fn prepare_staging_root(staging_root: &Path) -> Result<()> {
300 fs::create_dir_all(staging_root)?;
301 for entry in fs::read_dir(staging_root)? {
302 let path = entry?.path();
303 if path.is_dir() {
304 fs::remove_dir_all(&path)?;
305 } else {
306 fs::remove_file(&path)?;
307 }
308 }
309 Ok(())
310}
311
312pub(super) const CLIENT_STATUS_TIMEOUT: Duration = Duration::from_millis(400);
313pub(super) const CLIENT_TIMEOUT_WARN_COOLDOWN: StdDuration = StdDuration::from_secs(30);