#![cfg(unix)]
use std::{
path::{Path, PathBuf},
process::{Child, Command, Stdio},
sync::{
Arc, Mutex,
atomic::{AtomicBool, Ordering},
},
time::{Duration, Instant},
};
use fs2::FileExt as _;
use tempfile::TempDir;
use tokio::net::UnixStream;
use sqry_daemon::{
DaemonConfig, DaemonError, SocketConfig,
lifecycle::detach::{bootstrap_lock_path, start_detached},
};
static ENV_LOCK: Mutex<()> = Mutex::new(());
fn find_sqryd_binary() -> Option<PathBuf> {
if let Ok(path) = std::env::var("CARGO_BIN_EXE_sqryd") {
let p = PathBuf::from(path);
if p.is_file() {
return Some(p);
}
}
let binary_name = format!("sqryd{}", std::env::consts::EXE_SUFFIX);
let exe = std::env::current_exe().ok()?;
let parent = exe.parent()?; let candidate = parent.join(&binary_name);
if candidate.is_file() {
return Some(candidate);
}
let grandparent = parent.parent()?; let candidate = grandparent.join(&binary_name);
if candidate.is_file() {
return Some(candidate);
}
None
}
struct TestContext {
runtime_dir: TempDir,
valid_empty_config_path: PathBuf,
}
impl TestContext {
fn new() -> Self {
let runtime_dir = TempDir::new().expect("create test runtime_dir");
let valid_empty_config_path = runtime_dir.path().join("empty-daemon.toml");
std::fs::write(&valid_empty_config_path, b"")
.expect("create empty config file for test isolation");
Self {
runtime_dir,
valid_empty_config_path,
}
}
fn socket_path(&self) -> PathBuf {
self.runtime_dir.path().join("sqry").join("sqryd.sock")
}
fn ready_sentinel(&self) -> PathBuf {
self.runtime_dir.path().join("sqry").join("sqryd.ready")
}
fn pidfile_path(&self) -> PathBuf {
self.runtime_dir.path().join("sqry").join("sqryd.pid")
}
fn lockfile_path(&self) -> PathBuf {
self.runtime_dir.path().join("sqry").join("sqryd.lock")
}
fn make_daemon_config(&self, auto_start_ready_timeout_secs: u64) -> DaemonConfig {
let mut cfg = DaemonConfig {
socket: SocketConfig {
path: Some(self.socket_path()),
pipe_name: None,
},
auto_start_ready_timeout_secs,
..DaemonConfig::default()
};
cfg.apply_env_overrides()
.expect("apply_env_overrides in make_daemon_config");
cfg
}
fn spawn_start_detach(&self, sqryd: &Path) -> Child {
Command::new(sqryd)
.args(["start", "--detach"])
.env("XDG_RUNTIME_DIR", self.runtime_dir.path())
.env("SQRY_DAEMON_SOCKET", self.socket_path())
.env("SQRY_DAEMON_CONFIG", &self.valid_empty_config_path)
.env("SQRY_DAEMON_LOG_LEVEL", "warn")
.env_remove("TMPDIR")
.stdin(Stdio::null())
.stdout(Stdio::piped())
.stderr(Stdio::inherit())
.spawn()
.unwrap_or_else(|e| panic!("failed to spawn sqryd start --detach: {e}"))
}
}
struct XdgRuntimeDirGuard {
prior: Option<String>,
}
impl XdgRuntimeDirGuard {
fn set(path: &Path) -> Self {
let prior = std::env::var("XDG_RUNTIME_DIR").ok();
#[allow(unsafe_code)]
unsafe {
std::env::set_var("XDG_RUNTIME_DIR", path);
}
Self { prior }
}
}
impl Drop for XdgRuntimeDirGuard {
fn drop(&mut self) {
#[allow(unsafe_code)]
unsafe {
match self.prior.take() {
Some(v) => std::env::set_var("XDG_RUNTIME_DIR", v),
None => std::env::remove_var("XDG_RUNTIME_DIR"),
}
}
}
}
async fn socket_connectable(path: &Path, timeout: Duration) -> bool {
let deadline = Instant::now() + timeout;
loop {
if UnixStream::connect(path).await.is_ok() {
return true;
}
if Instant::now() >= deadline {
return false;
}
tokio::time::sleep(Duration::from_millis(50)).await;
}
}
fn wait_for_exit(child: &mut Child, timeout: Duration) -> Option<std::process::ExitStatus> {
let deadline = Instant::now() + timeout;
loop {
if let Some(status) = child.try_wait().expect("try_wait") {
return Some(status);
}
if Instant::now() >= deadline {
return None;
}
std::thread::sleep(Duration::from_millis(25));
}
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn start_detached_spawns_child_and_socket_becomes_connectable() {
let sqryd = match find_sqryd_binary() {
Some(p) => p,
None => {
eprintln!(
"SKIP start_detached_spawns_child_and_socket_becomes_connectable: \
sqryd binary not found (run `cargo build -p sqry-daemon` first)"
);
return;
}
};
let ctx = TestContext::new();
let mut parent = ctx.spawn_start_detach(&sqryd);
let parent_status = wait_for_exit(&mut parent, Duration::from_secs(15));
let _ = parent.wait();
let parent_status = match parent_status {
Some(s) => s,
None => {
let _ = parent.kill();
let _ = parent.wait();
if let Ok(pid_str) = std::fs::read_to_string(ctx.pidfile_path())
&& let Ok(pid) = pid_str.trim().parse::<u32>()
{
unsafe {
libc::kill(pid as libc::pid_t, libc::SIGTERM);
}
}
panic!(
"sqryd start --detach parent did not exit within 15 s \
(socket: {})",
ctx.socket_path().display()
);
}
};
assert_eq!(
parent_status.code(),
Some(0),
"sqryd start --detach parent must exit 0 after grandchild signals ready, \
got: {parent_status:?}"
);
let connectable = socket_connectable(&ctx.socket_path(), Duration::from_secs(5)).await;
let grandchild_pid: Option<u32> = std::fs::read_to_string(ctx.pidfile_path())
.ok()
.and_then(|s| s.trim().parse::<u32>().ok());
if let Some(pid) = grandchild_pid {
assert!(
pid > 1,
"grandchild PID from pidfile must be > 1, got {pid}"
);
unsafe {
libc::kill(pid as libc::pid_t, libc::SIGTERM);
}
let deadline = Instant::now() + Duration::from_secs(7);
loop {
if !ctx.socket_path().exists() {
break;
}
if Instant::now() >= deadline {
unsafe {
libc::kill(pid as libc::pid_t, libc::SIGKILL);
}
break;
}
std::thread::sleep(Duration::from_millis(50));
}
}
assert!(
connectable,
"grandchild socket must be connectable after `sqryd start --detach` parent exits 0 \
(socket: {})",
ctx.socket_path().display()
);
assert!(
ctx.ready_sentinel().exists(),
"sqryd.ready sentinel must exist after successful detach startup \
(path: {})",
ctx.ready_sentinel().display()
);
}
#[allow(clippy::await_holding_lock)]
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn start_detached_respects_auto_start_ready_timeout() {
let ctx = TestContext::new();
let _guard = ENV_LOCK.lock().unwrap_or_else(|e| e.into_inner());
let _xdg = XdgRuntimeDirGuard::set(ctx.runtime_dir.path());
let cfg = ctx.make_daemon_config(1);
let result = start_detached(&cfg).await;
drop(_xdg);
drop(_guard);
match result {
Err(DaemonError::AutoStartTimeout { timeout_secs, .. }) => {
assert_eq!(
timeout_secs, 1,
"AutoStartTimeout.timeout_secs must match the configured value"
);
}
Ok(pid) => {
panic!(
"start_detached must return AutoStartTimeout when the socket \
never becomes connectable; got Ok(pid={pid})"
);
}
Err(other) => {
panic!(
"start_detached returned unexpected error: {other:?}; \
expected DaemonError::AutoStartTimeout(timeout_secs=1). \
The polling loop must drain the full timeout before returning."
);
}
}
}
#[allow(clippy::await_holding_lock)]
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn start_detached_bootstrap_lock_serialises_concurrent_callers() {
let ctx = TestContext::new();
let _guard = ENV_LOCK.lock().unwrap_or_else(|e| e.into_inner());
let _xdg = XdgRuntimeDirGuard::set(ctx.runtime_dir.path());
let cfg = Arc::new(ctx.make_daemon_config(1));
let lock_path = bootstrap_lock_path(&cfg);
std::fs::create_dir_all(lock_path.parent().unwrap())
.expect("create runtime dir for bootstrap lock");
let observed_contention = Arc::new(AtomicBool::new(false));
let probe_lock_path = lock_path.clone();
let contention_flag = Arc::clone(&observed_contention);
let probe_handle = tokio::spawn(async move {
for _ in 0..4_000u32 {
if let Ok(f) = std::fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.truncate(false)
.open(&probe_lock_path)
&& f.try_lock_exclusive().is_err()
{
contention_flag.store(true, Ordering::SeqCst);
break; }
tokio::time::sleep(Duration::from_millis(5)).await;
}
});
let mut handles = Vec::with_capacity(10);
for _ in 0..10 {
let cfg_clone = Arc::clone(&cfg);
handles.push(tokio::spawn(async move {
let _ = start_detached(&cfg_clone).await;
}));
}
for h in handles {
h.await.expect("start_detached task panicked");
}
probe_handle.abort();
drop(_xdg);
drop(_guard);
assert!(
lock_path.exists(),
"bootstrap lock file must exist after concurrent start_detached calls \
(path: {})",
lock_path.display()
);
assert!(
observed_contention.load(Ordering::SeqCst),
"probe must observe at least one WouldBlock on the bootstrap lock, \
proving exclusive ownership (M2 single-spawner guarantee); \
bootstrap lock path: {}",
lock_path.display()
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn start_detached_inherited_fd_lock_is_held_by_grandchild() {
let sqryd = match find_sqryd_binary() {
Some(p) => p,
None => {
eprintln!(
"SKIP start_detached_inherited_fd_lock_is_held_by_grandchild: \
sqryd binary not found (run `cargo build -p sqry-daemon` first)"
);
return;
}
};
let ctx = TestContext::new();
let mut parent = ctx.spawn_start_detach(&sqryd);
let parent_status = wait_for_exit(&mut parent, Duration::from_secs(15));
let _ = parent.wait();
let parent_status = match parent_status {
Some(s) => s,
None => {
let _ = parent.kill();
let _ = parent.wait();
eprintln!(
"SKIP start_detached_inherited_fd_lock_is_held_by_grandchild: \
parent did not exit within 15 s (daemon may not have started)"
);
return;
}
};
if parent_status.code() != Some(0) {
eprintln!(
"SKIP start_detached_inherited_fd_lock_is_held_by_grandchild: \
parent exited with non-zero code {parent_status:?} (grandchild may not be running)"
);
if let Ok(pid_str) = std::fs::read_to_string(ctx.pidfile_path())
&& let Ok(pid) = pid_str.trim().parse::<u32>()
&& pid > 1
{
unsafe {
libc::kill(pid as libc::pid_t, libc::SIGTERM);
}
}
return;
}
let connectable = socket_connectable(&ctx.socket_path(), Duration::from_secs(5)).await;
if !connectable {
eprintln!(
"SKIP start_detached_inherited_fd_lock_is_held_by_grandchild: \
grandchild socket not connectable within 5 s"
);
if let Ok(pid_str) = std::fs::read_to_string(ctx.pidfile_path())
&& let Ok(pid) = pid_str.trim().parse::<u32>()
&& pid > 1
{
unsafe {
libc::kill(pid as libc::pid_t, libc::SIGTERM);
}
}
return;
}
let grandchild_pid: u32 = match std::fs::read_to_string(ctx.pidfile_path())
.ok()
.and_then(|s| s.trim().parse::<u32>().ok())
{
Some(pid) => pid,
None => {
eprintln!(
"SKIP start_detached_inherited_fd_lock_is_held_by_grandchild: \
could not read grandchild PID from pidfile ({})",
ctx.pidfile_path().display()
);
if ctx.socket_path().exists() {
let _ = std::fs::remove_file(ctx.socket_path());
}
return;
}
};
assert!(
grandchild_pid > 1,
"grandchild PID must be > 1, got {grandchild_pid}"
);
let lockfile_path = ctx.lockfile_path();
let lockfile_appeared = {
let deadline = Instant::now() + Duration::from_secs(3);
loop {
if lockfile_path.exists() {
break true;
}
if Instant::now() >= deadline {
break false;
}
std::thread::sleep(Duration::from_millis(20));
}
};
if !lockfile_appeared {
eprintln!(
"SKIP start_detached_inherited_fd_lock_is_held_by_grandchild: \
lockfile did not appear within 3 s (path: {})",
lockfile_path.display()
);
unsafe {
libc::kill(grandchild_pid as libc::pid_t, libc::SIGTERM);
}
return;
}
let probe_fd = match std::fs::OpenOptions::new()
.read(true)
.write(true)
.create(false)
.open(&lockfile_path)
{
Ok(f) => f,
Err(e) => {
unsafe {
libc::kill(grandchild_pid as libc::pid_t, libc::SIGTERM);
}
eprintln!(
"SKIP start_detached_inherited_fd_lock_is_held_by_grandchild: \
failed to open lockfile for probe: {e} (path: {})",
lockfile_path.display()
);
return;
}
};
let lock_result = probe_fd.try_lock_exclusive();
#[cfg(target_os = "linux")]
let proc_fd_found = {
let lock_meta = std::fs::metadata(&lockfile_path).ok();
let lock_inode = lock_meta.map(|m| {
use std::os::unix::fs::MetadataExt as _;
m.ino()
});
let mut found = false;
if let Some(target_inode) = lock_inode {
let proc_fd_dir = PathBuf::from(format!("/proc/{grandchild_pid}/fd"));
if let Ok(entries) = std::fs::read_dir(&proc_fd_dir) {
for entry in entries.flatten() {
if let Ok(target) = std::fs::canonicalize(entry.path())
&& let Ok(meta) = std::fs::metadata(&target)
{
use std::os::unix::fs::MetadataExt as _;
if meta.ino() == target_inode {
found = true;
break;
}
}
}
}
}
found
};
unsafe {
libc::kill(grandchild_pid as libc::pid_t, libc::SIGTERM);
}
let shutdown_deadline = Instant::now() + Duration::from_secs(7);
loop {
if !ctx.socket_path().exists() {
break;
}
if Instant::now() >= shutdown_deadline {
unsafe {
libc::kill(grandchild_pid as libc::pid_t, libc::SIGKILL);
}
break;
}
std::thread::sleep(Duration::from_millis(50));
}
assert!(
lock_result.is_err(),
"try_lock_exclusive on a fresh FD to the lockfile must return WouldBlock \
while the grandchild holds the inherited OFD-level flock (M1 proof); \
got: Ok(()) — the grandchild does NOT hold the lock, indicating \
FD-inheritance through pre_exec failed (lockfile: {})",
lockfile_path.display()
);
let lock_err = lock_result.unwrap_err();
assert_eq!(
lock_err.kind(),
std::io::ErrorKind::WouldBlock,
"try_lock_exclusive error must be WouldBlock, got: {lock_err:?}"
);
#[cfg(target_os = "linux")]
assert!(
proc_fd_found,
"lockfile inode must appear in /proc/{grandchild_pid}/fd, \
proving the FD survived pre_exec (FD_CLOEXEC was cleared correctly); \
lockfile: {}",
lockfile_path.display()
);
}
#[test]
fn bootstrap_lock_path_reachable() {
let tmp = TempDir::new().expect("TempDir");
let cfg = DaemonConfig {
socket: SocketConfig {
path: Some(tmp.path().join("sqry").join("sqryd.sock")),
pipe_name: None,
},
..DaemonConfig::default()
};
let p = bootstrap_lock_path(&cfg);
assert_eq!(
p.file_name().and_then(|n| n.to_str()),
Some("sqryd.bootstrap.lock"),
"bootstrap_lock_path filename must be 'sqryd.bootstrap.lock'"
);
}