Skip to main content

room_cli/broker/
daemon.rs

1//! Multi-room daemon: manages N rooms in a single process.
2//!
3//! `DaemonState` wraps a map of room_id → `RoomState` and provides room
4//! lifecycle (create/destroy/get). The daemon listens on a single UDS
5//! socket at a configurable path and dispatches connections to the correct
6//! room based on an extended handshake protocol.
7//!
8//! ## Handshake protocol
9//!
10//! The first line of a UDS connection to the daemon can carry one of two
11//! prefixes:
12//!
13//! - `ROOM:<room_id>:<rest>` — route to an existing room. The rest of the
14//!   line is the standard per-room handshake (`SEND:`, `TOKEN:`, `JOIN:`,
15//!   or plain username).
16//! - `CREATE:<room_id>` — create a new room. A second line carries the
17//!   room configuration as JSON (`{"visibility":"public","invite":[]}`).
18//! - `DESTROY:<room_id>` — destroy a room. Signals shutdown to connected
19//!   clients and removes the room from the daemon's map.
20//!
21//! If no recognised prefix is present, the connection is rejected with an error.
22//!
23//! Examples:
24//! ```text
25//! ROOM:myroom:JOIN:alice       → join room "myroom" as "alice"
26//! ROOM:myroom:TOKEN:<uuid>     → authenticated send to "myroom"
27//! ROOM:myroom:SEND:bob         → legacy unauthenticated send to "myroom"
28//! ROOM:myroom:alice            → interactive join to "myroom" as "alice"
29//! CREATE:newroom               → create room "newroom" (config on next line)
30//! DESTROY:myroom               → destroy room "myroom"
31//! ```
32
33use std::{
34    collections::HashMap,
35    path::PathBuf,
36    sync::{
37        atomic::{AtomicU64, AtomicUsize, Ordering},
38        Arc,
39    },
40};
41
42use tokio::{
43    net::UnixListener,
44    sync::{broadcast, watch, Mutex},
45};
46
47use crate::registry::UserRegistry;
48
49use super::{
50    handle_oneshot_send,
51    state::{RoomState, TokenMap},
52    ws::{self, DaemonWsState},
53};
54
55/// Characters that are unsafe in filesystem paths or shell contexts.
56const UNSAFE_CHARS: &[char] = &['/', '\\', ':', '*', '?', '"', '<', '>', '|', '\0'];
57
58// ── PID file management ───────────────────────────────────────────────────────
59
60/// Write the current process's PID to `path` (creates or overwrites).
61pub fn write_pid_file(path: &std::path::Path) -> std::io::Result<()> {
62    std::fs::write(path, std::process::id().to_string())
63}
64
65/// Returns `true` if the PID recorded in `path` belongs to a running process.
66///
67/// Returns `false` when the file is missing, unreadable, or unparseable, and
68/// when the process is confirmed dead (ESRCH).
69pub fn is_pid_alive(path: &std::path::Path) -> bool {
70    let Ok(contents) = std::fs::read_to_string(path) else {
71        return false;
72    };
73    let Ok(pid) = contents.trim().parse::<u32>() else {
74        return false;
75    };
76    pid_alive(pid)
77}
78
79/// Remove the PID file, ignoring errors (best-effort cleanup).
80pub fn remove_pid_file(path: &std::path::Path) {
81    let _ = std::fs::remove_file(path);
82}
83
84/// Check whether a process with the given PID is currently running.
85///
86/// Uses POSIX `kill(pid, 0)` — signal 0 never delivers a signal but the kernel
87/// validates whether the calling process may signal `pid`, returning:
88/// - `0`  → process exists
89/// - `-1` with `EPERM` (errno 1)  → process exists, permission denied
90/// - `-1` with `ESRCH` (errno 3)  → no such process
91#[cfg(unix)]
92fn pid_alive(pid: u32) -> bool {
93    extern "C" {
94        fn kill(pid: i32, sig: i32) -> i32;
95    }
96    // SAFETY: kill(pid, 0) never delivers a signal; it only checks liveness.
97    let ret = unsafe { kill(pid as i32, 0) };
98    if ret == 0 {
99        return true;
100    }
101    // EPERM == 1 on Linux and macOS: process exists but we lack permission.
102    std::io::Error::last_os_error().raw_os_error() == Some(1)
103}
104
105#[cfg(not(unix))]
106fn pid_alive(_pid: u32) -> bool {
107    // Conservative: assume the process is alive on non-Unix platforms.
108    true
109}
110
111/// Maximum allowed length for a room ID.
112const MAX_ROOM_ID_LEN: usize = 64;
113
114/// Validate a room ID for filesystem safety.
115///
116/// Rejects IDs that are empty, too long, contain path traversal sequences
117/// (`..`), whitespace, or filesystem-unsafe characters.
118pub fn validate_room_id(room_id: &str) -> Result<(), String> {
119    if room_id.is_empty() {
120        return Err("room ID cannot be empty".into());
121    }
122    if room_id.len() > MAX_ROOM_ID_LEN {
123        return Err(format!(
124            "room ID too long ({} chars, max {MAX_ROOM_ID_LEN})",
125            room_id.len()
126        ));
127    }
128    if room_id == "." || room_id == ".." || room_id.contains("..") {
129        return Err("room ID cannot contain '..'".into());
130    }
131    if room_id.chars().any(|c| c.is_whitespace()) {
132        return Err("room ID cannot contain whitespace".into());
133    }
134    if let Some(bad) = room_id.chars().find(|c| UNSAFE_CHARS.contains(c)) {
135        return Err(format!("room ID contains unsafe character: {bad:?}"));
136    }
137    Ok(())
138}
139
140/// Configuration for the daemon.
141#[derive(Debug, Clone)]
142pub struct DaemonConfig {
143    /// Path to the daemon UDS socket (ephemeral, platform-native temp dir).
144    pub socket_path: PathBuf,
145    /// Directory for chat files. Each room gets `<data_dir>/<room_id>.chat`.
146    /// Defaults to `~/.room/data/`; overridable with `--data-dir`.
147    pub data_dir: PathBuf,
148    /// Directory for state files (token maps, cursors, subscriptions).
149    /// Defaults to `~/.room/state/`.
150    pub state_dir: PathBuf,
151    /// Optional WebSocket/REST port.
152    pub ws_port: Option<u16>,
153    /// Seconds to wait after the last connection closes before shutting down.
154    ///
155    /// Default is 30 seconds. Set to 0 for immediate shutdown when the last
156    /// client disconnects. Has no effect if there are always active connections.
157    pub grace_period_secs: u64,
158}
159
160impl DaemonConfig {
161    /// Resolve the chat file path for a given room.
162    pub fn chat_path(&self, room_id: &str) -> PathBuf {
163        self.data_dir.join(format!("{room_id}.chat"))
164    }
165
166    /// Resolve the token-map persistence path for a given room.
167    pub fn token_map_path(&self, room_id: &str) -> PathBuf {
168        crate::paths::broker_tokens_path(&self.state_dir, room_id)
169    }
170
171    /// System-level token persistence path: `<state_dir>/tokens.json`.
172    ///
173    /// Used by the daemon to share a single token store across all rooms.
174    /// Production default is `~/.room/state/tokens.json`; tests override
175    /// `state_dir` with a temp directory.
176    pub fn system_tokens_path(&self) -> PathBuf {
177        self.state_dir.join("tokens.json")
178    }
179
180    /// Resolve the subscription-map persistence path for a given room.
181    pub fn subscription_map_path(&self, room_id: &str) -> PathBuf {
182        crate::paths::broker_subscriptions_path(&self.state_dir, room_id)
183    }
184}
185
186impl Default for DaemonConfig {
187    fn default() -> Self {
188        Self {
189            socket_path: crate::paths::room_socket_path(),
190            data_dir: crate::paths::room_data_dir(),
191            state_dir: crate::paths::room_state_dir(),
192            ws_port: None,
193            grace_period_secs: 30,
194        }
195    }
196}
197
198/// Registry of active rooms, keyed by room_id.
199pub(crate) type RoomMap = Arc<Mutex<HashMap<String, Arc<RoomState>>>>;
200
201/// Multi-room daemon state.
202pub struct DaemonState {
203    pub(crate) rooms: RoomMap,
204    pub(crate) config: DaemonConfig,
205    /// Global client ID counter shared across all rooms.
206    pub(crate) next_client_id: Arc<AtomicU64>,
207    /// Daemon-level shutdown signal.
208    pub(crate) shutdown: Arc<watch::Sender<bool>>,
209    /// System-level token map shared across all rooms (runtime cache).
210    ///
211    /// A single `Arc<Mutex<HashMap>>` instance is cloned into every room's
212    /// `token_map`. Tokens issued in any room are valid in all rooms managed
213    /// by this daemon. Seeded from `user_registry` on startup; kept in sync
214    /// by [`super::auth::issue_token_via_registry`].
215    pub(crate) system_token_map: TokenMap,
216    /// Daemon-level user registry — sole persistence layer for cross-room identity.
217    ///
218    /// Stores user profiles, room memberships, and tokens to
219    /// `~/.room/state/users.json`. New sessions register/update here;
220    /// `system_token_map` is derived from this registry at startup and kept
221    /// in sync on every join.
222    pub(crate) user_registry: Arc<tokio::sync::Mutex<UserRegistry>>,
223    /// Number of currently active UDS connections.
224    ///
225    /// Incremented when a connection is accepted; decremented when the
226    /// connection task completes. When the count drops to zero the daemon
227    /// starts a grace period timer before sending the shutdown signal.
228    pub(crate) connection_count: Arc<AtomicUsize>,
229}
230
231impl DaemonState {
232    /// Create a new daemon with the given configuration and no rooms.
233    pub fn new(config: DaemonConfig) -> Self {
234        let (shutdown_tx, _) = watch::channel(false);
235
236        // Load UserRegistry from disk (sole source of truth for identity).
237        //
238        // Migration path: if `users.json` (UserRegistry) does not exist but
239        // the legacy `tokens.json` (system_token_map from #334) does, import
240        // the flat token map into a fresh registry so existing sessions survive
241        // the upgrade without requiring a forced re-join.
242        let registry = load_or_migrate_registry(&config);
243
244        // Seed the runtime token map from the registry so existing tokens remain
245        // valid across daemon restarts without requiring a fresh join.
246        let token_snapshot = registry.token_snapshot();
247
248        Self {
249            rooms: Arc::new(Mutex::new(HashMap::new())),
250            config,
251            next_client_id: Arc::new(AtomicU64::new(0)),
252            shutdown: Arc::new(shutdown_tx),
253            system_token_map: Arc::new(Mutex::new(token_snapshot)),
254            user_registry: Arc::new(tokio::sync::Mutex::new(registry)),
255            connection_count: Arc::new(AtomicUsize::new(0)),
256        }
257    }
258
259    /// Create a room and register it. Returns `Err` if the room ID is invalid
260    /// or the room already exists.
261    pub async fn create_room(&self, room_id: &str) -> Result<(), String> {
262        create_room_entry(
263            room_id,
264            None,
265            &self.rooms,
266            &self.config,
267            &self.system_token_map,
268            Some(self.user_registry.clone()),
269        )
270        .await
271    }
272
273    /// Create a room with explicit configuration. Returns `Err` if the room ID
274    /// is invalid or the room already exists.
275    pub async fn create_room_with_config(
276        &self,
277        room_id: &str,
278        config: room_protocol::RoomConfig,
279    ) -> Result<(), String> {
280        create_room_entry(
281            room_id,
282            Some(config),
283            &self.rooms,
284            &self.config,
285            &self.system_token_map,
286            Some(self.user_registry.clone()),
287        )
288        .await
289    }
290
291    /// Get a room's config, if it exists.
292    pub async fn get_room_config(&self, room_id: &str) -> Option<room_protocol::RoomConfig> {
293        self.rooms
294            .lock()
295            .await
296            .get(room_id)
297            .and_then(|s| s.config.clone())
298    }
299
300    /// Destroy a room. Returns `Err` if the room does not exist.
301    ///
302    /// Signals the room's shutdown so connected clients receive EOF.
303    pub async fn destroy_room(&self, room_id: &str) -> Result<(), String> {
304        let mut rooms = self.rooms.lock().await;
305        let state = rooms
306            .remove(room_id)
307            .ok_or_else(|| format!("room not found: {room_id}"))?;
308
309        // Signal the room's shutdown so any connected clients receive EOF.
310        let _ = state.shutdown.send(true);
311        Ok(())
312    }
313
314    /// Check if a room exists.
315    pub async fn has_room(&self, room_id: &str) -> bool {
316        self.rooms.lock().await.contains_key(room_id)
317    }
318
319    /// Get a handle to the daemon-level shutdown sender.
320    pub fn shutdown_handle(&self) -> Arc<watch::Sender<bool>> {
321        self.shutdown.clone()
322    }
323
324    /// List all active room IDs.
325    pub async fn list_rooms(&self) -> Vec<String> {
326        self.rooms.lock().await.keys().cloned().collect()
327    }
328
329    /// Insert a token directly into a room's token map, bypassing the join
330    /// permission check. Intended for integration tests only.
331    #[doc(hidden)]
332    pub async fn test_inject_token(
333        &self,
334        room_id: &str,
335        username: &str,
336        token: &str,
337    ) -> Result<(), String> {
338        let rooms = self.rooms.lock().await;
339        let room = rooms
340            .get(room_id)
341            .ok_or_else(|| format!("room not found: {room_id}"))?;
342        room.token_map
343            .lock()
344            .await
345            .insert(token.to_owned(), username.to_owned());
346        Ok(())
347    }
348
349    /// Run the daemon: listen on UDS, dispatch connections to rooms.
350    ///
351    /// When the last UDS connection closes, starts a grace period timer
352    /// (`config.grace_period_secs`). If no new connection arrives before the
353    /// timer fires, sends a shutdown signal. Any new connection during the
354    /// grace period cancels the timer. On exit, cleans up the PID file and
355    /// socket file.
356    pub async fn run(&self) -> anyhow::Result<()> {
357        // Write PID file only for the default daemon socket.  Daemons with an
358        // explicit socket override (tests, CI) are independent instances and
359        // must not clobber the system PID file.
360        let pid_path = if self.config.socket_path == crate::paths::room_socket_path() {
361            match write_pid_file(&crate::paths::room_pid_path()) {
362                Ok(()) => Some(crate::paths::room_pid_path()),
363                Err(e) => {
364                    eprintln!("[daemon] failed to write PID file: {e}");
365                    None
366                }
367            }
368        } else {
369            None
370        };
371
372        // Remove stale socket synchronously.
373        if self.config.socket_path.exists() {
374            std::fs::remove_file(&self.config.socket_path)?;
375        }
376
377        let listener = UnixListener::bind(&self.config.socket_path)?;
378        eprintln!(
379            "[daemon] listening on {}",
380            self.config.socket_path.display()
381        );
382
383        let mut shutdown_rx = self.shutdown.subscribe();
384        let grace_duration = tokio::time::Duration::from_secs(self.config.grace_period_secs);
385
386        // mpsc channel: connection tasks notify the main loop when they close.
387        let (close_tx, mut close_rx) = tokio::sync::mpsc::channel::<()>(64);
388
389        // Optional grace period sleep — active when the last connection closes.
390        let mut grace_sleep: Option<std::pin::Pin<Box<tokio::time::Sleep>>> = None;
391
392        // Start WebSocket/REST server if configured.
393        if let Some(port) = self.config.ws_port {
394            let ws_state = DaemonWsState {
395                rooms: self.rooms.clone(),
396                next_client_id: self.next_client_id.clone(),
397                config: self.config.clone(),
398                system_token_map: self.system_token_map.clone(),
399                user_registry: self.user_registry.clone(),
400            };
401            let app = ws::create_daemon_router(ws_state);
402            let tcp = tokio::net::TcpListener::bind(("0.0.0.0", port)).await?;
403            eprintln!("[daemon] WebSocket/REST listening on port {port}");
404            tokio::spawn(async move {
405                if let Err(e) = axum::serve(tcp, app).await {
406                    eprintln!("[daemon] WS server error: {e}");
407                }
408            });
409        }
410
411        let result = loop {
412            // Build the grace future: fires if a grace sleep is active,
413            // otherwise stays pending forever.
414            let grace_fut = async {
415                match grace_sleep.as_mut() {
416                    Some(s) => {
417                        s.await;
418                    }
419                    None => std::future::pending::<()>().await,
420                }
421            };
422
423            tokio::select! {
424                accept = listener.accept() => {
425                    let (stream, _) = match accept {
426                        Ok(a) => a,
427                        Err(e) => break Err(e.into()),
428                    };
429                    // Cancel any pending grace timer — we have a new connection.
430                    grace_sleep = None;
431
432                    let count = self.connection_count.clone();
433                    count.fetch_add(1, Ordering::SeqCst);
434                    let rooms = self.rooms.clone();
435                    let next_id = self.next_client_id.clone();
436                    let cfg = self.config.clone();
437                    let sys_tokens = self.system_token_map.clone();
438                    let registry = self.user_registry.clone();
439                    let tx = close_tx.clone();
440
441                    tokio::spawn(async move {
442                        if let Err(e) = dispatch_connection(stream, &rooms, &next_id, &cfg, &sys_tokens, &registry).await {
443                            eprintln!("[daemon] connection error: {e:#}");
444                        }
445                        count.fetch_sub(1, Ordering::SeqCst);
446                        // Notify main loop so it can start the grace timer.
447                        let _ = tx.send(()).await;
448                    });
449                }
450                Some(()) = close_rx.recv() => {
451                    // A connection closed. Start grace period if none remain.
452                    if self.connection_count.load(Ordering::SeqCst) == 0 {
453                        eprintln!(
454                            "[daemon] no connections — grace period {}s started",
455                            self.config.grace_period_secs
456                        );
457                        grace_sleep =
458                            Some(Box::pin(tokio::time::sleep(grace_duration)));
459                    }
460                }
461                _ = grace_fut => {
462                    eprintln!("[daemon] grace period expired, shutting down");
463                    let _ = self.shutdown.send(true);
464                    // The shutdown_rx arm will fire on the next iteration; break
465                    // here directly to avoid a double-exit path.
466                    break Ok(());
467                }
468                _ = shutdown_rx.changed() => {
469                    eprintln!("[daemon] shutdown requested, exiting");
470                    if let Some(ref p) = pid_path {
471                        remove_pid_file(p);
472                    }
473                    break Ok(());
474                }
475            }
476        };
477
478        // Clean up ephemeral files on exit.
479        let _ = std::fs::remove_file(&self.config.socket_path);
480        let _ = std::fs::remove_file(crate::paths::room_pid_path());
481        // Remove per-room meta files written during room creation.
482        for room_id in self.list_rooms().await {
483            let _ = std::fs::remove_file(crate::paths::room_meta_path(&room_id));
484        }
485
486        result
487    }
488}
489
490/// Load `UserRegistry` from `users.json`, or migrate from the legacy
491/// `tokens.json` (written by the #334 system-token-map implementation)
492/// if `users.json` does not yet exist.
493///
494/// After loading (or creating) the registry, always scans the legacy runtime
495/// directory for per-room `.token` files left by older `room join` invocations
496/// and imports any that are not already present. This lets clients that joined
497/// before the `~/.room/state/` migration continue to use their existing tokens
498/// without a forced re-join.
499fn load_or_migrate_registry(config: &DaemonConfig) -> UserRegistry {
500    let users_path = config.state_dir.join("users.json");
501
502    let mut registry = if users_path.exists() {
503        // Fast path: users.json exists — use it directly.
504        UserRegistry::load(config.state_dir.clone()).unwrap_or_else(|e| {
505            eprintln!("[daemon] failed to load user registry: {e}; starting empty");
506            UserRegistry::new(config.state_dir.clone())
507        })
508    } else {
509        // Migration path: import from legacy tokens.json if present.
510        let tokens_path = config.system_tokens_path();
511        if tokens_path.exists() {
512            let legacy = super::auth::load_token_map(&tokens_path);
513            if !legacy.is_empty() {
514                eprintln!(
515                    "[daemon] migrating {} token(s) from tokens.json to users.json",
516                    legacy.len()
517                );
518                let mut reg = UserRegistry::new(config.state_dir.clone());
519                for (token, username) in &legacy {
520                    // register_user_idempotent is a no-op if already present.
521                    if let Err(e) = reg.register_user_idempotent(username) {
522                        eprintln!("[daemon] migration: register {username}: {e}");
523                        continue;
524                    }
525                    // Re-insert the existing token directly via issue_token so the
526                    // UUID is preserved. Since UserRegistry.issue_token generates a
527                    // new UUID, we instead manipulate the token map via the public
528                    // API by revoking nothing and accepting the registry's new token.
529                    // Trade-off: legacy UUIDs are replaced; clients must re-join.
530                    // This is acceptable — migration is a one-time event.
531                    let _ = reg.issue_token(username);
532                    let _ = token; // legacy token not preserved — clients must re-join
533                }
534                if let Err(e) = reg.save() {
535                    eprintln!("[daemon] migration save failed: {e}");
536                }
537                reg
538            } else {
539                // tokens.json exists but is empty — start fresh.
540                UserRegistry::new(config.state_dir.clone())
541            }
542        } else {
543            // Neither file exists — start fresh.
544            UserRegistry::new(config.state_dir.clone())
545        }
546    };
547
548    // Always scan the legacy runtime dir for old per-room token files and
549    // import any that are not already in the registry. Idempotent — safe to
550    // run on every startup.
551    migrate_legacy_tmpdir_tokens(&mut registry);
552
553    registry
554}
555
556/// Scan the legacy runtime directory for per-room token files and import
557/// them into `registry`.
558///
559/// Before `~/.room/state/` was introduced, `room join` wrote token files to
560/// the platform runtime directory (`$TMPDIR` on macOS, `/tmp/` on Linux)
561/// as `room-<room_id>-<username>.token`. This function reads each such file,
562/// parses the `username` and `token` fields, and imports them — preserving
563/// the UUID so existing clients do not need to re-join. Files whose tokens
564/// are already in the registry are silently skipped (idempotent).
565fn migrate_legacy_tmpdir_tokens(registry: &mut UserRegistry) {
566    let legacy_dir = crate::paths::legacy_token_dir();
567    migrate_legacy_tmpdir_tokens_from(&legacy_dir, registry);
568}
569
570/// Inner implementation of [`migrate_legacy_tmpdir_tokens`] that accepts an
571/// explicit directory. Extracted so tests can pass a temp directory without
572/// modifying process environment variables.
573fn migrate_legacy_tmpdir_tokens_from(legacy_dir: &std::path::Path, registry: &mut UserRegistry) {
574    let entries = match std::fs::read_dir(legacy_dir) {
575        Ok(e) => e,
576        Err(_) => return,
577    };
578    let mut count = 0usize;
579    for entry in entries.filter_map(|e| e.ok()) {
580        let path = entry.path();
581        let name = match path.file_name().and_then(|n| n.to_str()) {
582            Some(n) => n.to_owned(),
583            None => continue,
584        };
585        if !name.starts_with("room-") || !name.ends_with(".token") {
586            continue;
587        }
588        let data = match std::fs::read_to_string(&path) {
589            Ok(d) => d,
590            Err(_) => continue,
591        };
592        let v: serde_json::Value = match serde_json::from_str(data.trim()) {
593            Ok(v) => v,
594            Err(_) => continue,
595        };
596        let (username, token) = match (v["username"].as_str(), v["token"].as_str()) {
597            (Some(u), Some(t)) if !u.is_empty() && !t.is_empty() => (u.to_owned(), t.to_owned()),
598            _ => continue,
599        };
600        if let Err(e) = registry.register_user_idempotent(&username) {
601            eprintln!("[daemon] legacy token migration: register {username}: {e}");
602            continue;
603        }
604        match registry.import_token(&username, &token) {
605            Ok(()) => count += 1,
606            Err(e) => {
607                eprintln!("[daemon] legacy token migration: import token for {username}: {e}")
608            }
609        }
610    }
611    if count > 0 {
612        eprintln!(
613            "[daemon] imported {count} legacy token(s) from {}",
614            legacy_dir.display()
615        );
616    }
617}
618
619/// Build the initial subscription map for a room based on its config.
620///
621/// DM rooms auto-subscribe both participants at `Full` so they receive all
622/// messages without an explicit `/subscribe` call. Other room types start
623/// with an empty subscription map (users subscribe explicitly or via
624/// auto-subscribe-on-mention).
625fn build_initial_subscriptions(
626    config: &room_protocol::RoomConfig,
627) -> HashMap<String, room_protocol::SubscriptionTier> {
628    let mut subs = HashMap::new();
629    if config.visibility == room_protocol::RoomVisibility::Dm {
630        for user in &config.invite_list {
631            subs.insert(user.clone(), room_protocol::SubscriptionTier::Full);
632        }
633    }
634    subs
635}
636
637/// Core room-creation logic shared by UDS and REST paths.
638///
639/// Validates the room ID, checks for duplicates, builds a [`RoomState`], and
640/// inserts it into the room map. Pass `config: None` to create a configless
641/// room (no invite list, no visibility constraint).
642///
643/// `registry` is attached to the [`RoomState`] via [`RoomState::set_registry`]
644/// so that admin commands (`/kick`, `/reauth`) can also revoke tokens from the
645/// daemon-level [`UserRegistry`] in addition to the in-memory token map.
646pub(crate) async fn create_room_entry(
647    room_id: &str,
648    config: Option<room_protocol::RoomConfig>,
649    rooms: &RoomMap,
650    daemon_config: &DaemonConfig,
651    system_token_map: &TokenMap,
652    registry: Option<Arc<tokio::sync::Mutex<UserRegistry>>>,
653) -> Result<(), String> {
654    validate_room_id(room_id)?;
655    {
656        let map = rooms.lock().await;
657        if map.contains_key(room_id) {
658            return Err(format!("room already exists: {room_id}"));
659        }
660    }
661
662    let chat_path = daemon_config.chat_path(room_id);
663    let subscription_map_path = daemon_config.subscription_map_path(room_id);
664
665    let persisted_subs = super::commands::load_subscription_map(&subscription_map_path);
666    let merged_subs = if let Some(ref cfg) = config {
667        let mut initial = build_initial_subscriptions(cfg);
668        initial.extend(persisted_subs);
669        initial
670    } else {
671        persisted_subs
672    };
673
674    // All rooms in this daemon share the same token map so a token
675    // issued in any room is valid in all rooms.
676    let state = RoomState::new(
677        room_id.to_owned(),
678        chat_path,
679        daemon_config.system_tokens_path(),
680        subscription_map_path,
681        Arc::clone(system_token_map),
682        Arc::new(Mutex::new(merged_subs)),
683        config,
684    )?;
685    if let Some(reg) = registry {
686        state.set_registry(reg);
687    }
688
689    rooms.lock().await.insert(room_id.to_owned(), state);
690
691    // Write a meta file so one-shot commands (poll, watch, pull) can find the
692    // chat file without a broker connection. The meta file lives in the
693    // platform runtime dir alongside the daemon socket.
694    let meta_path = crate::paths::room_meta_path(room_id);
695    let chat_path_str = daemon_config.chat_path(room_id);
696    let meta_json = serde_json::json!({ "chat_path": chat_path_str });
697    let _ = std::fs::write(&meta_path, meta_json.to_string());
698
699    Ok(())
700}
701
702/// Handle a `DESTROY:<room_id>` request: remove the room from the daemon.
703///
704/// Protocol:
705/// 1. Client sends `DESTROY:<room_id>\n`
706/// 2. Daemon responds with `{"type":"room_destroyed","room":"<id>"}\n` or an error.
707///
708/// Connected clients receive EOF when the room's shutdown signal fires.
709/// Chat files are preserved on disk.
710async fn handle_destroy(
711    room_id: &str,
712    write_half: &mut tokio::net::unix::OwnedWriteHalf,
713    rooms: &RoomMap,
714) -> anyhow::Result<()> {
715    use tokio::io::AsyncWriteExt;
716
717    if room_id.is_empty() {
718        let err = serde_json::json!({
719            "type": "error",
720            "code": "invalid_room_id",
721            "message": "room ID is empty"
722        });
723        write_half.write_all(format!("{err}\n").as_bytes()).await?;
724        return Ok(());
725    }
726
727    // Remove the room and signal shutdown.
728    let state = {
729        let mut map = rooms.lock().await;
730        map.remove(room_id)
731    };
732
733    match state {
734        Some(s) => {
735            // Signal shutdown so connected clients receive EOF.
736            let _ = s.shutdown.send(true);
737            let ok = serde_json::json!({
738                "type": "room_destroyed",
739                "room": room_id
740            });
741            write_half.write_all(format!("{ok}\n").as_bytes()).await?;
742        }
743        None => {
744            let err = serde_json::json!({
745                "type": "error",
746                "code": "room_not_found",
747                "room": room_id
748            });
749            write_half.write_all(format!("{err}\n").as_bytes()).await?;
750        }
751    }
752
753    Ok(())
754}
755
756/// Handle a `CREATE:<room_id>` request: validate, read config, create the room.
757///
758/// Protocol:
759/// 1. Client sends `CREATE:<room_id>\n`
760/// 2. Client sends config JSON on the next line: `{"visibility":"public","invite":[]}\n`
761/// 3. Daemon responds with `{"type":"room_created","room":"<id>"}\n` or an error envelope.
762async fn handle_create(
763    room_id: &str,
764    reader: &mut tokio::io::BufReader<tokio::net::unix::OwnedReadHalf>,
765    write_half: &mut tokio::net::unix::OwnedWriteHalf,
766    rooms: &RoomMap,
767    daemon_config: &DaemonConfig,
768    system_token_map: &TokenMap,
769    user_registry: &Arc<tokio::sync::Mutex<UserRegistry>>,
770) -> anyhow::Result<()> {
771    use tokio::io::{AsyncBufReadExt, AsyncWriteExt};
772
773    // Validate room ID.
774    if let Err(e) = validate_room_id(room_id) {
775        let err = serde_json::json!({
776            "type": "error",
777            "code": "invalid_room_id",
778            "message": e
779        });
780        write_half.write_all(format!("{err}\n").as_bytes()).await?;
781        return Ok(());
782    }
783
784    // Check for duplicate before reading config (fast-fail).
785    {
786        let map = rooms.lock().await;
787        if map.contains_key(room_id) {
788            let err = serde_json::json!({
789                "type": "error",
790                "code": "room_exists",
791                "message": format!("room already exists: {room_id}")
792            });
793            write_half.write_all(format!("{err}\n").as_bytes()).await?;
794            return Ok(());
795        }
796    }
797
798    // Read config JSON from second line.
799    let mut config_line = String::new();
800    reader.read_line(&mut config_line).await?;
801    let config_str = config_line.trim();
802
803    let (visibility_str, invite): (String, Vec<String>) = if config_str.is_empty() {
804        ("public".into(), vec![])
805    } else {
806        let v: serde_json::Value = match serde_json::from_str(config_str) {
807            Ok(v) => v,
808            Err(e) => {
809                let err = serde_json::json!({
810                    "type": "error",
811                    "code": "invalid_config",
812                    "message": format!("invalid config JSON: {e}")
813                });
814                write_half.write_all(format!("{err}\n").as_bytes()).await?;
815                return Ok(());
816            }
817        };
818        let vis = v["visibility"].as_str().unwrap_or("public").to_owned();
819        let inv = v["invite"]
820            .as_array()
821            .map(|arr| {
822                arr.iter()
823                    .filter_map(|v| v.as_str().map(|s| s.to_owned()))
824                    .collect()
825            })
826            .unwrap_or_default();
827        (vis, inv)
828    };
829
830    // Build RoomConfig from the parsed visibility + invite list.
831    let room_config = match visibility_str.as_str() {
832        "public" => room_protocol::RoomConfig {
833            visibility: room_protocol::RoomVisibility::Public,
834            max_members: None,
835            invite_list: invite.into_iter().collect(),
836            created_by: "system".to_owned(),
837            created_at: chrono::Utc::now().to_rfc3339(),
838        },
839        "private" => room_protocol::RoomConfig {
840            visibility: room_protocol::RoomVisibility::Private,
841            max_members: None,
842            invite_list: invite.into_iter().collect(),
843            created_by: "system".to_owned(),
844            created_at: chrono::Utc::now().to_rfc3339(),
845        },
846        "dm" => {
847            if invite.len() != 2 {
848                let err = serde_json::json!({
849                    "type": "error",
850                    "code": "invalid_config",
851                    "message": "dm visibility requires exactly 2 users in invite list"
852                });
853                write_half.write_all(format!("{err}\n").as_bytes()).await?;
854                return Ok(());
855            }
856            room_protocol::RoomConfig::dm(&invite[0], &invite[1])
857        }
858        other => {
859            let err = serde_json::json!({
860                "type": "error",
861                "code": "invalid_config",
862                "message": format!("unknown visibility: {other}")
863            });
864            write_half.write_all(format!("{err}\n").as_bytes()).await?;
865            return Ok(());
866        }
867    };
868
869    // Delegate to the shared room-creation helper.
870    if let Err(e) = create_room_entry(
871        room_id,
872        Some(room_config),
873        rooms,
874        daemon_config,
875        system_token_map,
876        Some(user_registry.clone()),
877    )
878    .await
879    {
880        let err = serde_json::json!({
881            "type": "error",
882            "code": "internal",
883            "message": e
884        });
885        write_half.write_all(format!("{err}\n").as_bytes()).await?;
886        return Ok(());
887    }
888
889    let ok = serde_json::json!({
890        "type": "room_created",
891        "room": room_id
892    });
893    write_half.write_all(format!("{ok}\n").as_bytes()).await?;
894    Ok(())
895}
896
897/// Handle a global `JOIN:<username>` request at daemon level.
898///
899/// Registers the user in the global UserRegistry (or returns the existing token
900/// if already registered) and writes the token response. No room association.
901async fn handle_global_join(
902    username: &str,
903    write_half: &mut tokio::net::unix::OwnedWriteHalf,
904    registry: &Arc<tokio::sync::Mutex<UserRegistry>>,
905) -> anyhow::Result<()> {
906    use tokio::io::AsyncWriteExt;
907
908    let mut reg = registry.lock().await;
909
910    // If user already has a token, return it. Otherwise register and issue.
911    let token = if reg.has_token_for_user(username) {
912        // Find existing token via snapshot (reverse lookup: token→user).
913        reg.token_snapshot()
914            .into_iter()
915            .find(|(_, u)| u == username)
916            .map(|(t, _)| t)
917            .expect("has_token_for_user was true but no token found")
918    } else {
919        reg.register_user_idempotent(username)
920            .map_err(|e| anyhow::anyhow!("registration failed: {e}"))?;
921        reg.issue_token(username)
922            .map_err(|e| anyhow::anyhow!("token issuance failed: {e}"))?
923    };
924
925    let resp = serde_json::json!({
926        "type": "token",
927        "token": token,
928        "username": username
929    });
930    write_half.write_all(format!("{resp}\n").as_bytes()).await?;
931    Ok(())
932}
933
934/// Dispatch a raw UDS connection to the correct room based on the handshake.
935///
936/// Handles two top-level protocols:
937/// - `CREATE:<room_id>` — create a new room (reads config JSON from second line)
938/// - `ROOM:<room_id>:<rest>` — route to an existing room
939async fn dispatch_connection(
940    stream: tokio::net::UnixStream,
941    rooms: &RoomMap,
942    next_client_id: &Arc<AtomicU64>,
943    daemon_config: &DaemonConfig,
944    system_token_map: &TokenMap,
945    user_registry: &Arc<tokio::sync::Mutex<UserRegistry>>,
946) -> anyhow::Result<()> {
947    use tokio::io::{AsyncBufReadExt, AsyncWriteExt, BufReader};
948
949    let (read_half, mut write_half) = stream.into_split();
950    let mut reader = BufReader::new(read_half);
951
952    let mut first = String::new();
953    reader.read_line(&mut first).await?;
954    let first_line = first.trim();
955
956    if first_line.is_empty() {
957        return Ok(());
958    }
959
960    use super::handshake::{
961        parse_client_handshake, parse_daemon_prefix, ClientHandshake, DaemonPrefix,
962    };
963    let (room_id, rest) = match parse_daemon_prefix(first_line) {
964        DaemonPrefix::Destroy(room_id) => {
965            return handle_destroy(&room_id, &mut write_half, rooms).await;
966        }
967        DaemonPrefix::Create(room_id) => {
968            return handle_create(
969                &room_id,
970                &mut reader,
971                &mut write_half,
972                rooms,
973                daemon_config,
974                system_token_map,
975                user_registry,
976            )
977            .await;
978        }
979        DaemonPrefix::Join(username) => {
980            return handle_global_join(&username, &mut write_half, user_registry).await;
981        }
982        DaemonPrefix::Room { room_id, rest } => (room_id, rest),
983        DaemonPrefix::Unknown => {
984            let err = serde_json::json!({
985                "type": "error",
986                "code": "missing_room_prefix",
987                "message": "daemon mode requires ROOM:<room_id>: or CREATE:<room_id> prefix"
988            });
989            write_half.write_all(format!("{err}\n").as_bytes()).await?;
990            return Ok(());
991        }
992    };
993
994    // Look up the room.
995    let state = {
996        let map = rooms.lock().await;
997        map.get(room_id.as_str()).cloned()
998    };
999
1000    let state = match state {
1001        Some(s) => s,
1002        None => {
1003            let err = serde_json::json!({
1004                "type": "error",
1005                "code": "room_not_found",
1006                "room": room_id
1007            });
1008            write_half.write_all(format!("{err}\n").as_bytes()).await?;
1009            return Ok(());
1010        }
1011    };
1012
1013    let cid = next_client_id.fetch_add(1, Ordering::SeqCst) + 1;
1014
1015    // Dispatch based on the per-room handshake after the ROOM: prefix.
1016    let username = match parse_client_handshake(&rest) {
1017        ClientHandshake::Send(u) => {
1018            return handle_oneshot_send(u, reader, write_half, &state).await;
1019        }
1020        ClientHandshake::Token(token) => {
1021            return match super::auth::validate_token(&token, &state.token_map).await {
1022                Some(u) => handle_oneshot_send(u, reader, write_half, &state).await,
1023                None => {
1024                    let err = serde_json::json!({"type":"error","code":"invalid_token"});
1025                    write_half
1026                        .write_all(format!("{err}\n").as_bytes())
1027                        .await
1028                        .map_err(Into::into)
1029                }
1030            };
1031        }
1032        ClientHandshake::Join(u) => {
1033            return super::auth::handle_oneshot_join_with_registry(
1034                u,
1035                write_half,
1036                user_registry,
1037                &state.token_map,
1038                &state.subscription_map,
1039                state.config.as_ref(),
1040            )
1041            .await;
1042        }
1043        ClientHandshake::Interactive(u) => u,
1044    };
1045
1046    // Interactive join.
1047    if username.is_empty() {
1048        return Ok(());
1049    }
1050
1051    // Check join permission before entering interactive session.
1052    if let Err(reason) = super::auth::check_join_permission(&username, state.config.as_ref()) {
1053        let err = serde_json::json!({
1054            "type": "error",
1055            "code": "join_denied",
1056            "message": reason,
1057            "username": username
1058        });
1059        write_half.write_all(format!("{err}\n").as_bytes()).await?;
1060        return Ok(());
1061    }
1062
1063    // Register client in room, then hand off to the full interactive handler.
1064    let (tx, _) = broadcast::channel::<String>(256);
1065    state
1066        .clients
1067        .lock()
1068        .await
1069        .insert(cid, (String::new(), tx.clone()));
1070
1071    let result =
1072        super::run_interactive_session(cid, &username, reader, write_half, tx, &state).await;
1073
1074    state.clients.lock().await.remove(&cid);
1075    result
1076}
1077
1078// ── Tests ─────────────────────────────────────────────────────────────────────
1079
1080#[cfg(test)]
1081mod tests {
1082    use super::*;
1083
1084    // ── PID management ───────────────────────────────────────────────────
1085
1086    #[test]
1087    fn write_pid_file_creates_file_with_current_pid() {
1088        let dir = tempfile::TempDir::new().unwrap();
1089        let path = dir.path().join("test.pid");
1090        write_pid_file(&path).unwrap();
1091        let content = std::fs::read_to_string(&path).unwrap();
1092        let pid: u32 = content.trim().parse().expect("PID should be a number");
1093        assert_eq!(pid, std::process::id());
1094    }
1095
1096    #[test]
1097    fn is_pid_alive_true_for_current_process() {
1098        let dir = tempfile::TempDir::new().unwrap();
1099        let path = dir.path().join("test.pid");
1100        write_pid_file(&path).unwrap();
1101        assert!(is_pid_alive(&path), "current process should be alive");
1102    }
1103
1104    #[test]
1105    fn is_pid_alive_false_for_missing_file() {
1106        let path = std::path::Path::new("/tmp/nonexistent-room-test-99999999.pid");
1107        assert!(!is_pid_alive(path));
1108    }
1109
1110    #[test]
1111    fn remove_pid_file_deletes_file() {
1112        let dir = tempfile::TempDir::new().unwrap();
1113        let path = dir.path().join("remove.pid");
1114        write_pid_file(&path).unwrap();
1115        assert!(path.exists());
1116        remove_pid_file(&path);
1117        assert!(!path.exists());
1118    }
1119
1120    #[test]
1121    fn remove_pid_file_noop_when_missing() {
1122        // Should not panic if the file is already gone.
1123        let path = std::path::Path::new("/tmp/gone-99999999.pid");
1124        remove_pid_file(path); // must not panic
1125    }
1126
1127    // ── DaemonState lifecycle ─────────────────────────────────────────────
1128
1129    /// Test helper: look up a room's state by ID.
1130    async fn get_room(daemon: &DaemonState, room_id: &str) -> Arc<RoomState> {
1131        daemon
1132            .rooms
1133            .lock()
1134            .await
1135            .get(room_id)
1136            .cloned()
1137            .unwrap_or_else(|| panic!("room {room_id} not found"))
1138    }
1139
1140    #[tokio::test]
1141    async fn create_room_succeeds() {
1142        let daemon = DaemonState::new(DaemonConfig::default());
1143        assert!(daemon.create_room("test-room").await.is_ok());
1144        let state = get_room(&daemon, "test-room").await;
1145        assert_eq!(*state.room_id, "test-room");
1146    }
1147
1148    #[tokio::test]
1149    async fn create_duplicate_room_fails() {
1150        let daemon = DaemonState::new(DaemonConfig::default());
1151        daemon.create_room("dup").await.unwrap();
1152        let result = daemon.create_room("dup").await;
1153        assert!(result.is_err());
1154        assert!(result.unwrap_err().contains("already exists"));
1155    }
1156
1157    #[tokio::test]
1158    async fn has_room_returns_true_for_created() {
1159        let daemon = DaemonState::new(DaemonConfig::default());
1160        daemon.create_room("room-a").await.unwrap();
1161        assert!(daemon.has_room("room-a").await);
1162        assert!(!daemon.has_room("room-b").await);
1163    }
1164
1165    #[tokio::test]
1166    async fn destroy_room_removes_it() {
1167        let daemon = DaemonState::new(DaemonConfig::default());
1168        daemon.create_room("doomed").await.unwrap();
1169        assert!(daemon.destroy_room("doomed").await.is_ok());
1170        assert!(!daemon.has_room("doomed").await);
1171    }
1172
1173    #[tokio::test]
1174    async fn destroy_nonexistent_room_fails() {
1175        let daemon = DaemonState::new(DaemonConfig::default());
1176        let result = daemon.destroy_room("nope").await;
1177        assert!(result.is_err());
1178        assert!(result.unwrap_err().contains("not found"));
1179    }
1180
1181    #[tokio::test]
1182    async fn destroy_room_signals_shutdown() {
1183        let daemon = DaemonState::new(DaemonConfig::default());
1184        daemon.create_room("shutme").await.unwrap();
1185        let state = get_room(&daemon, "shutme").await;
1186        let rx = state.shutdown.subscribe();
1187        assert!(!*rx.borrow());
1188
1189        daemon.destroy_room("shutme").await.unwrap();
1190        // The shutdown signal should now be true.
1191        assert!(*rx.borrow());
1192    }
1193
1194    #[tokio::test]
1195    async fn list_rooms_returns_all() {
1196        let daemon = DaemonState::new(DaemonConfig::default());
1197        daemon.create_room("alpha").await.unwrap();
1198        daemon.create_room("beta").await.unwrap();
1199        daemon.create_room("gamma").await.unwrap();
1200
1201        let mut rooms = daemon.list_rooms().await;
1202        rooms.sort();
1203        assert_eq!(rooms, vec!["alpha", "beta", "gamma"]);
1204    }
1205
1206    #[tokio::test]
1207    async fn list_rooms_empty_initially() {
1208        let daemon = DaemonState::new(DaemonConfig::default());
1209        assert!(daemon.list_rooms().await.is_empty());
1210    }
1211
1212    #[tokio::test]
1213    async fn create_room_initializes_plugins() {
1214        let daemon = DaemonState::new(DaemonConfig::default());
1215        daemon.create_room("plugtest").await.unwrap();
1216        let state = get_room(&daemon, "plugtest").await;
1217        // help and stats should be registered
1218        assert!(state.plugin_registry.resolve("help").is_some());
1219        assert!(state.plugin_registry.resolve("stats").is_some());
1220    }
1221
1222    // ── DaemonConfig ──────────────────────────────────────────────────────
1223
1224    #[test]
1225    fn config_chat_path_format() {
1226        let config = DaemonConfig {
1227            data_dir: PathBuf::from("/var/room"),
1228            ..DaemonConfig::default()
1229        };
1230        assert_eq!(
1231            config.chat_path("myroom"),
1232            PathBuf::from("/var/room/myroom.chat")
1233        );
1234    }
1235
1236    #[test]
1237    fn config_default_socket_path() {
1238        let config = DaemonConfig::default();
1239        assert_eq!(config.socket_path, crate::paths::room_socket_path());
1240    }
1241
1242    // ── create_room_with_config ───────────────────────────────────────────
1243
1244    #[tokio::test]
1245    async fn create_room_with_dm_config() {
1246        let daemon = DaemonState::new(DaemonConfig::default());
1247        let config = room_protocol::RoomConfig::dm("alice", "bob");
1248        assert!(daemon
1249            .create_room_with_config("dm-alice-bob", config)
1250            .await
1251            .is_ok());
1252
1253        let state = get_room(&daemon, "dm-alice-bob").await;
1254        let cfg = state.config.as_ref().unwrap();
1255        assert_eq!(cfg.visibility, room_protocol::RoomVisibility::Dm);
1256        assert_eq!(cfg.max_members, Some(2));
1257        assert!(cfg.invite_list.contains("alice"));
1258        assert!(cfg.invite_list.contains("bob"));
1259    }
1260
1261    #[tokio::test]
1262    async fn create_room_with_config_duplicate_fails() {
1263        let daemon = DaemonState::new(DaemonConfig::default());
1264        let config = room_protocol::RoomConfig::public("owner");
1265        daemon
1266            .create_room_with_config("dup", config.clone())
1267            .await
1268            .unwrap();
1269        assert!(daemon.create_room_with_config("dup", config).await.is_err());
1270    }
1271
1272    #[tokio::test]
1273    async fn get_room_config_returns_none_for_unconfigured() {
1274        let daemon = DaemonState::new(DaemonConfig::default());
1275        daemon.create_room("plain").await.unwrap();
1276        assert!(daemon.get_room_config("plain").await.is_none());
1277    }
1278
1279    #[tokio::test]
1280    async fn get_room_config_returns_config_when_present() {
1281        let daemon = DaemonState::new(DaemonConfig::default());
1282        let config = room_protocol::RoomConfig::dm("alice", "bob");
1283        daemon
1284            .create_room_with_config("dm-alice-bob", config)
1285            .await
1286            .unwrap();
1287        let cfg = daemon.get_room_config("dm-alice-bob").await.unwrap();
1288        assert_eq!(cfg.visibility, room_protocol::RoomVisibility::Dm);
1289    }
1290
1291    #[tokio::test]
1292    async fn dm_room_id_deterministic_and_lookup_works() {
1293        let daemon = DaemonState::new(DaemonConfig::default());
1294        let room_id = room_protocol::dm_room_id("bob", "alice").unwrap();
1295        assert_eq!(room_id, "dm-alice-bob");
1296
1297        let config = room_protocol::RoomConfig::dm("bob", "alice");
1298        daemon
1299            .create_room_with_config(&room_id, config)
1300            .await
1301            .unwrap();
1302        assert!(daemon.has_room("dm-alice-bob").await);
1303        // Reverse order gives the same room_id
1304        assert_eq!(
1305            room_protocol::dm_room_id("alice", "bob").unwrap(),
1306            "dm-alice-bob"
1307        );
1308    }
1309
1310    // ── validate_room_id ──────────────────────────────────────────────────
1311
1312    #[test]
1313    fn valid_room_ids() {
1314        for id in [
1315            "lobby",
1316            "agent-room-2",
1317            "my_room",
1318            "Room.1",
1319            "dm-alice-bob",
1320            "a",
1321            &"x".repeat(MAX_ROOM_ID_LEN),
1322        ] {
1323            assert!(validate_room_id(id).is_ok(), "should accept: {id:?}");
1324        }
1325    }
1326
1327    #[test]
1328    fn empty_room_id_rejected() {
1329        let err = validate_room_id("").unwrap_err();
1330        assert!(err.contains("empty"), "{err}");
1331    }
1332
1333    #[test]
1334    fn room_id_too_long_rejected() {
1335        let long = "x".repeat(MAX_ROOM_ID_LEN + 1);
1336        let err = validate_room_id(&long).unwrap_err();
1337        assert!(err.contains("too long"), "{err}");
1338    }
1339
1340    #[test]
1341    fn dot_dot_traversal_rejected() {
1342        for id in ["..", "room/../etc", "..secret", "a..b"] {
1343            let err = validate_room_id(id).unwrap_err();
1344            assert!(err.contains(".."), "should reject {id:?}: {err}");
1345        }
1346    }
1347
1348    #[test]
1349    fn single_dot_rejected() {
1350        let err = validate_room_id(".").unwrap_err();
1351        assert!(err.contains(".."), "{err}");
1352    }
1353
1354    #[test]
1355    fn slash_rejected() {
1356        for id in ["room/sub", "/etc/passwd", "a/b/c"] {
1357            let err = validate_room_id(id).unwrap_err();
1358            assert!(err.contains("unsafe"), "should reject {id:?}: {err}");
1359        }
1360    }
1361
1362    #[test]
1363    fn backslash_rejected() {
1364        let err = validate_room_id("room\\sub").unwrap_err();
1365        assert!(err.contains("unsafe"), "{err}");
1366    }
1367
1368    #[test]
1369    fn null_byte_rejected() {
1370        let err = validate_room_id("room\0id").unwrap_err();
1371        assert!(err.contains("unsafe"), "{err}");
1372    }
1373
1374    #[test]
1375    fn whitespace_rejected() {
1376        for id in ["room name", "room\tid", "room\nid", " leading", "trailing "] {
1377            let err = validate_room_id(id).unwrap_err();
1378            assert!(err.contains("whitespace"), "should reject {id:?}: {err}");
1379        }
1380    }
1381
1382    #[test]
1383    fn other_unsafe_chars_rejected() {
1384        for ch in [':', '*', '?', '"', '<', '>', '|'] {
1385            let id = format!("room{ch}id");
1386            let err = validate_room_id(&id).unwrap_err();
1387            assert!(err.contains("unsafe"), "should reject {ch:?}: {err}");
1388        }
1389    }
1390
1391    #[tokio::test]
1392    async fn create_room_rejects_invalid_id() {
1393        let daemon = DaemonState::new(DaemonConfig::default());
1394        assert!(daemon.create_room("room/sub").await.is_err());
1395        assert!(daemon.create_room("..").await.is_err());
1396        assert!(daemon.create_room("").await.is_err());
1397        assert!(daemon.create_room("room name").await.is_err());
1398    }
1399
1400    #[tokio::test]
1401    async fn create_room_with_config_rejects_invalid_id() {
1402        let daemon = DaemonState::new(DaemonConfig::default());
1403        let config = room_protocol::RoomConfig::public("owner");
1404        assert!(daemon
1405            .create_room_with_config("../etc", config)
1406            .await
1407            .is_err());
1408    }
1409
1410    // ── DM auto-subscribe ─────────────────────────────────────────────────
1411
1412    #[tokio::test]
1413    async fn dm_room_auto_subscribes_both_participants() {
1414        let daemon = DaemonState::new(DaemonConfig::default());
1415        let config = room_protocol::RoomConfig::dm("alice", "bob");
1416        daemon
1417            .create_room_with_config("dm-alice-bob", config)
1418            .await
1419            .unwrap();
1420
1421        let state = get_room(&daemon, "dm-alice-bob").await;
1422        let subs = state.subscription_map.lock().await;
1423        assert_eq!(subs.len(), 2);
1424        assert_eq!(
1425            subs.get("alice"),
1426            Some(&room_protocol::SubscriptionTier::Full)
1427        );
1428        assert_eq!(
1429            subs.get("bob"),
1430            Some(&room_protocol::SubscriptionTier::Full)
1431        );
1432    }
1433
1434    #[tokio::test]
1435    async fn public_room_starts_with_no_subscriptions() {
1436        let daemon = DaemonState::new(DaemonConfig::default());
1437        let config = room_protocol::RoomConfig::public("owner");
1438        daemon
1439            .create_room_with_config("lobby", config)
1440            .await
1441            .unwrap();
1442
1443        let state = get_room(&daemon, "lobby").await;
1444        let subs = state.subscription_map.lock().await;
1445        assert!(subs.is_empty());
1446    }
1447
1448    #[tokio::test]
1449    async fn unconfigured_room_starts_with_no_subscriptions() {
1450        let daemon = DaemonState::new(DaemonConfig::default());
1451        daemon.create_room("plain").await.unwrap();
1452
1453        let state = get_room(&daemon, "plain").await;
1454        let subs = state.subscription_map.lock().await;
1455        assert!(subs.is_empty());
1456    }
1457
1458    #[tokio::test]
1459    async fn dm_auto_subscribe_uses_full_tier() {
1460        let daemon = DaemonState::new(DaemonConfig::default());
1461        let config = room_protocol::RoomConfig::dm("carol", "dave");
1462        daemon
1463            .create_room_with_config("dm-carol-dave", config)
1464            .await
1465            .unwrap();
1466
1467        let state = get_room(&daemon, "dm-carol-dave").await;
1468        let subs = state.subscription_map.lock().await;
1469        // Verify it's Full, not MentionsOnly
1470        for (_, tier) in subs.iter() {
1471            assert_eq!(*tier, room_protocol::SubscriptionTier::Full);
1472        }
1473    }
1474
1475    #[test]
1476    fn build_initial_subscriptions_dm_populates() {
1477        let config = room_protocol::RoomConfig::dm("alice", "bob");
1478        let subs = build_initial_subscriptions(&config);
1479        assert_eq!(subs.len(), 2);
1480        assert_eq!(subs["alice"], room_protocol::SubscriptionTier::Full);
1481        assert_eq!(subs["bob"], room_protocol::SubscriptionTier::Full);
1482    }
1483
1484    #[test]
1485    fn build_initial_subscriptions_public_empty() {
1486        let config = room_protocol::RoomConfig::public("owner");
1487        let subs = build_initial_subscriptions(&config);
1488        assert!(subs.is_empty());
1489    }
1490
1491    // ── DaemonConfig grace_period_secs ────────────────────────────────────
1492
1493    #[test]
1494    fn default_grace_period_is_30() {
1495        let config = DaemonConfig::default();
1496        assert_eq!(config.grace_period_secs, 30);
1497    }
1498
1499    #[test]
1500    fn custom_grace_period_preserved() {
1501        let config = DaemonConfig {
1502            grace_period_secs: 0,
1503            ..DaemonConfig::default()
1504        };
1505        assert_eq!(config.grace_period_secs, 0);
1506    }
1507
1508    // ── connection_count refcount ─────────────────────────────────────────
1509
1510    #[tokio::test]
1511    async fn connection_count_starts_at_zero() {
1512        let daemon = DaemonState::new(DaemonConfig::default());
1513        assert_eq!(daemon.connection_count.load(Ordering::SeqCst), 0);
1514    }
1515
1516    #[tokio::test]
1517    async fn connection_count_increments_and_decrements() {
1518        let count = Arc::new(AtomicUsize::new(0));
1519        count.fetch_add(1, Ordering::SeqCst);
1520        count.fetch_add(1, Ordering::SeqCst);
1521        assert_eq!(count.load(Ordering::SeqCst), 2);
1522        count.fetch_sub(1, Ordering::SeqCst);
1523        assert_eq!(count.load(Ordering::SeqCst), 1);
1524        count.fetch_sub(1, Ordering::SeqCst);
1525        assert_eq!(count.load(Ordering::SeqCst), 0);
1526    }
1527
1528    /// Verify that the daemon exits cleanly when the shutdown signal is sent.
1529    /// Uses an Arc<DaemonState> so the run() task can hold a reference while
1530    /// the test also holds one to send the shutdown signal.
1531    #[tokio::test]
1532    async fn daemon_exits_on_shutdown_signal() {
1533        let dir = tempfile::TempDir::new().unwrap();
1534        let socket = dir.path().join("test-grace.sock");
1535        std::fs::create_dir_all(dir.path().join("data")).unwrap();
1536        std::fs::create_dir_all(dir.path().join("state")).unwrap();
1537
1538        let config = DaemonConfig {
1539            socket_path: socket.clone(),
1540            data_dir: dir.path().join("data"),
1541            state_dir: dir.path().join("state"),
1542            ws_port: None,
1543            grace_period_secs: 0,
1544        };
1545        let daemon = Arc::new(DaemonState::new(config));
1546        let shutdown = daemon.shutdown_handle();
1547
1548        let daemon2 = Arc::clone(&daemon);
1549        let handle = tokio::spawn(async move { daemon2.run().await });
1550
1551        // Wait for socket to become connectable (daemon is up).
1552        for _ in 0..100 {
1553            if tokio::net::UnixStream::connect(&socket).await.is_ok() {
1554                break;
1555            }
1556            tokio::time::sleep(tokio::time::Duration::from_millis(50)).await;
1557        }
1558        assert!(
1559            tokio::net::UnixStream::connect(&socket).await.is_ok(),
1560            "daemon socket not ready"
1561        );
1562
1563        // Send shutdown — daemon should exit quickly.
1564        let _ = shutdown.send(true);
1565        let result = tokio::time::timeout(tokio::time::Duration::from_secs(5), handle).await;
1566        assert!(result.is_ok(), "daemon did not exit within 5s");
1567        assert!(result.unwrap().unwrap().is_ok(), "run() returned error");
1568    }
1569
1570    /// Verify that a new connection during the grace period resets the timer.
1571    /// We check this by confirming connection_count goes from 0 → 1 → 0 without
1572    /// a premature shutdown.
1573    #[tokio::test]
1574    async fn grace_period_cancelled_by_new_connection() {
1575        let dir = tempfile::TempDir::new().unwrap();
1576        let socket = dir.path().join("test-cancel-grace.sock");
1577
1578        let config = DaemonConfig {
1579            socket_path: socket.clone(),
1580            data_dir: dir.path().join("data"),
1581            state_dir: dir.path().join("state"),
1582            ws_port: None,
1583            grace_period_secs: 60, // long grace — should not fire
1584        };
1585        let daemon = DaemonState::new(config);
1586
1587        // Manually exercise the counter: simulate connect + disconnect.
1588        daemon.connection_count.fetch_add(1, Ordering::SeqCst);
1589        assert_eq!(daemon.connection_count.load(Ordering::SeqCst), 1);
1590        daemon.connection_count.fetch_sub(1, Ordering::SeqCst);
1591        assert_eq!(daemon.connection_count.load(Ordering::SeqCst), 0);
1592
1593        // Simulate a second connection arriving (cancels grace timer).
1594        daemon.connection_count.fetch_add(1, Ordering::SeqCst);
1595        assert_eq!(daemon.connection_count.load(Ordering::SeqCst), 1);
1596
1597        // Daemon has not shut down.
1598        assert!(!*daemon.shutdown.borrow());
1599    }
1600
1601    // ── migrate_legacy_tmpdir_tokens ──────────────────────────────────────
1602
1603    /// Write a token file to `dir` in the format written by old `room join`.
1604    fn write_legacy_token(dir: &std::path::Path, room_id: &str, username: &str, token: &str) {
1605        let name = format!("room-{room_id}-{username}.token");
1606        let data = serde_json::json!({"username": username, "token": token});
1607        std::fs::write(dir.join(name), format!("{data}\n")).unwrap();
1608    }
1609
1610    #[test]
1611    fn migrate_legacy_tmpdir_tokens_imports_token() {
1612        let token_dir = tempfile::TempDir::new().unwrap();
1613        let state_dir = tempfile::TempDir::new().unwrap();
1614        write_legacy_token(token_dir.path(), "lobby", "alice", "legacy-uuid-alice");
1615
1616        let mut registry = UserRegistry::new(state_dir.path().to_owned());
1617
1618        // Override the legacy dir by temporarily pointing TMPDIR at token_dir.
1619        // Because legacy_token_dir() reads env on macOS, we run the function
1620        // directly on the directory to avoid touching the process environment.
1621        // Instead we call the inner logic directly with a helper that accepts
1622        // a custom dir.
1623        migrate_legacy_tmpdir_tokens_from(token_dir.path(), &mut registry);
1624
1625        assert_eq!(registry.validate_token("legacy-uuid-alice"), Some("alice"));
1626        assert!(registry.get_user("alice").is_some());
1627    }
1628
1629    #[test]
1630    fn migrate_legacy_tmpdir_tokens_idempotent() {
1631        let token_dir = tempfile::TempDir::new().unwrap();
1632        let state_dir = tempfile::TempDir::new().unwrap();
1633        write_legacy_token(token_dir.path(), "lobby", "bob", "tok-bob");
1634
1635        let mut registry = UserRegistry::new(state_dir.path().to_owned());
1636        migrate_legacy_tmpdir_tokens_from(token_dir.path(), &mut registry);
1637        migrate_legacy_tmpdir_tokens_from(token_dir.path(), &mut registry);
1638
1639        // Token still valid and exactly one entry for bob.
1640        assert_eq!(registry.validate_token("tok-bob"), Some("bob"));
1641        let snap = registry.token_snapshot();
1642        assert_eq!(snap.values().filter(|u| u.as_str() == "bob").count(), 1);
1643    }
1644
1645    #[test]
1646    fn migrate_legacy_tmpdir_tokens_skips_non_token_files() {
1647        let token_dir = tempfile::TempDir::new().unwrap();
1648        let state_dir = tempfile::TempDir::new().unwrap();
1649        std::fs::write(token_dir.path().join("roomd.sock"), "not a token").unwrap();
1650        std::fs::write(token_dir.path().join("something.json"), "{}").unwrap();
1651
1652        let mut registry = UserRegistry::new(state_dir.path().to_owned());
1653        migrate_legacy_tmpdir_tokens_from(token_dir.path(), &mut registry);
1654
1655        assert!(registry.list_users().is_empty());
1656    }
1657
1658    #[test]
1659    fn migrate_legacy_tmpdir_tokens_skips_malformed_json() {
1660        let token_dir = tempfile::TempDir::new().unwrap();
1661        let state_dir = tempfile::TempDir::new().unwrap();
1662        std::fs::write(token_dir.path().join("room-x-bad.token"), "not-json{{{").unwrap();
1663
1664        let mut registry = UserRegistry::new(state_dir.path().to_owned());
1665        migrate_legacy_tmpdir_tokens_from(token_dir.path(), &mut registry);
1666
1667        assert!(registry.list_users().is_empty());
1668    }
1669}