1use std::collections::{HashMap, HashSet};
4use std::io::{BufRead, BufReader, Write as IoWrite};
5#[cfg(unix)]
6use std::os::unix::net::UnixStream as StdUnixStream;
7use std::path::{Path, PathBuf};
8use std::sync::Arc;
9use std::time::{Duration, Instant};
10
11use notify::{Event, EventKind, RecommendedWatcher, RecursiveMode, Watcher};
12use serde::{Deserialize, Serialize};
13use tokio::io::{AsyncBufReadExt, AsyncWriteExt};
14#[cfg(unix)]
15use tokio::net::UnixListener;
16use tokio::sync::{Mutex, mpsc};
17
18use crate::config::{
19 self, AutoApplyPolicyConfig, CfgdConfig, MergedProfile, NotifyMethod, OriginType, PolicyAction,
20 ResolvedProfile,
21};
22use crate::errors::{DaemonError, Result};
23use crate::output::{Printer, Verbosity};
24use crate::providers::{FileAction, PackageAction, PackageManager, ProviderRegistry};
25use crate::state::StateStore;
26
27pub trait DaemonHooks: Send + Sync {
30 fn build_registry(&self, config: &CfgdConfig) -> ProviderRegistry;
32
33 fn plan_files(&self, config_dir: &Path, resolved: &ResolvedProfile) -> Result<Vec<FileAction>>;
35
36 fn plan_packages(
38 &self,
39 profile: &MergedProfile,
40 managers: &[&dyn PackageManager],
41 ) -> Result<Vec<PackageAction>>;
42
43 fn extend_registry_custom_managers(
45 &self,
46 registry: &mut ProviderRegistry,
47 packages: &config::PackagesSpec,
48 );
49
50 fn expand_tilde(&self, path: &Path) -> PathBuf;
52}
53
54const DEBOUNCE_MS: u64 = 500;
55#[cfg(unix)]
56const DEFAULT_IPC_PATH: &str = "/tmp/cfgd.sock";
57#[cfg(windows)]
58const DEFAULT_IPC_PATH: &str = r"\\.\pipe\cfgd";
59const DEFAULT_RECONCILE_SECS: u64 = 300; const DEFAULT_SYNC_SECS: u64 = 300; #[cfg(unix)]
62const LAUNCHD_LABEL: &str = "com.cfgd.daemon";
63#[cfg(unix)]
64const LAUNCHD_AGENTS_DIR: &str = "Library/LaunchAgents";
65#[cfg(unix)]
66const SYSTEMD_USER_DIR: &str = ".config/systemd/user";
67
68struct SyncTask {
71 source_name: String,
72 repo_path: PathBuf,
73 auto_pull: bool,
74 auto_push: bool,
75 auto_apply: bool,
76 interval: Duration,
77 last_synced: Option<Instant>,
78 require_signed_commits: bool,
80 allow_unsigned: bool,
82}
83
84struct ReconcileTask {
87 entity: String,
89 interval: Duration,
90 auto_apply: bool,
91 drift_policy: config::DriftPolicy,
92 last_reconciled: Option<Instant>,
93}
94
95#[derive(Debug, Clone, Serialize, Deserialize)]
98#[serde(rename_all = "camelCase")]
99pub struct SourceStatus {
100 pub name: String,
101 pub last_sync: Option<String>,
102 pub last_reconcile: Option<String>,
103 pub drift_count: u32,
104 pub status: String,
105}
106
107#[derive(Debug, Clone, Serialize, Deserialize)]
110#[serde(rename_all = "camelCase")]
111pub struct DaemonStatusResponse {
112 pub running: bool,
113 pub pid: u32,
114 pub uptime_secs: u64,
115 pub last_reconcile: Option<String>,
116 pub last_sync: Option<String>,
117 pub drift_count: u32,
118 pub sources: Vec<SourceStatus>,
119 #[serde(skip_serializing_if = "Option::is_none")]
120 pub update_available: Option<String>,
121 #[serde(default, skip_serializing_if = "Vec::is_empty")]
122 pub module_reconcile: Vec<ModuleReconcileStatus>,
123}
124
125#[derive(Debug, Clone, Serialize, Deserialize)]
126#[serde(rename_all = "camelCase")]
127pub struct ModuleReconcileStatus {
128 pub name: String,
129 pub interval: String,
130 pub auto_apply: bool,
131 pub drift_policy: String,
132 pub last_reconcile: Option<String>,
133}
134
135struct DaemonState {
136 started_at: Instant,
137 last_reconcile: Option<String>,
138 last_sync: Option<String>,
139 drift_count: u32,
140 sources: Vec<SourceStatus>,
141 update_available: Option<String>,
142 module_last_reconcile: HashMap<String, String>,
143}
144
145impl DaemonState {
146 fn new() -> Self {
147 Self {
148 started_at: Instant::now(),
149 last_reconcile: None,
150 last_sync: None,
151 drift_count: 0,
152 sources: vec![SourceStatus {
153 name: "local".to_string(),
154 last_sync: None,
155 last_reconcile: None,
156 drift_count: 0,
157 status: "active".to_string(),
158 }],
159 update_available: None,
160 module_last_reconcile: HashMap::new(),
161 }
162 }
163
164 fn to_response(&self) -> DaemonStatusResponse {
165 DaemonStatusResponse {
166 running: true,
167 pid: std::process::id(),
168 uptime_secs: self.started_at.elapsed().as_secs(),
169 last_reconcile: self.last_reconcile.clone(),
170 last_sync: self.last_sync.clone(),
171 drift_count: self.drift_count,
172 sources: self.sources.clone(),
173 update_available: self.update_available.clone(),
174 module_reconcile: vec![],
175 }
176 }
177}
178
179struct Notifier {
182 method: NotifyMethod,
183 webhook_url: Option<String>,
184}
185
186impl Notifier {
187 fn new(method: NotifyMethod, webhook_url: Option<String>) -> Self {
188 Self {
189 method,
190 webhook_url,
191 }
192 }
193
194 fn notify(&self, title: &str, message: &str) {
195 match self.method {
196 NotifyMethod::Desktop => self.notify_desktop(title, message),
197 NotifyMethod::Stdout => self.notify_stdout(title, message),
198 NotifyMethod::Webhook => self.notify_webhook(title, message),
199 }
200 }
201
202 fn notify_desktop(&self, title: &str, message: &str) {
203 match notify_rust::Notification::new()
204 .summary(title)
205 .body(message)
206 .appname("cfgd")
207 .show()
208 {
209 Ok(_) => tracing::debug!(title = %title, "desktop notification sent"),
210 Err(e) => {
211 tracing::warn!(error = %e, "desktop notification failed, falling back to stdout");
212 self.notify_stdout(title, message);
213 }
214 }
215 }
216
217 fn notify_stdout(&self, title: &str, message: &str) {
218 tracing::info!(title = %title, message = %message, "notification");
219 }
220
221 fn notify_webhook(&self, title: &str, message: &str) {
222 let Some(ref url) = self.webhook_url else {
223 tracing::warn!("webhook notification requested but no webhook-url configured");
224 return;
225 };
226
227 let payload = serde_json::json!({
228 "event": title,
229 "message": message,
230 "timestamp": crate::utc_now_iso8601(),
231 "source": "cfgd",
232 });
233
234 let url = url.clone();
235 let body = payload.to_string();
236
237 tokio::task::spawn_blocking(move || {
239 match ureq::AgentBuilder::new()
240 .timeout(std::time::Duration::from_secs(10))
241 .build()
242 .post(&url)
243 .set("Content-Type", "application/json")
244 .send_string(&body)
245 {
246 Ok(_) => tracing::debug!(url = %url, "webhook notification sent"),
247 Err(e) => tracing::warn!(error = %e, "webhook notification failed"),
248 }
249 });
250 }
251}
252
253#[derive(Debug, Serialize)]
256struct CheckinPayload {
257 device_id: String,
258 hostname: String,
259 os: String,
260 arch: String,
261 config_hash: String,
262}
263
264#[derive(Debug, Deserialize)]
265struct CheckinServerResponse {
266 #[serde(rename = "status")]
267 _status: String,
268 config_changed: bool,
269 #[serde(rename = "config")]
270 _config: Option<serde_json::Value>,
271}
272
273fn generate_device_id() -> std::result::Result<String, String> {
275 let host = hostname::get()
276 .map_err(|e| format!("failed to get hostname: {}", e))?
277 .to_string_lossy()
278 .to_string();
279 Ok(crate::sha256_hex(host.as_bytes()))
280}
281
282fn compute_config_hash(resolved: &ResolvedProfile) -> std::result::Result<String, String> {
284 let yaml = serde_yaml::to_string(&resolved.merged.packages)
285 .map_err(|e| format!("failed to serialize profile for hashing: {}", e))?;
286 Ok(crate::sha256_hex(yaml.as_bytes()))
287}
288
289fn server_checkin(server_url: &str, resolved: &ResolvedProfile) -> bool {
292 let device_id = match generate_device_id() {
293 Ok(id) => id,
294 Err(e) => {
295 tracing::warn!(error = %e, "server check-in failed");
296 return false;
297 }
298 };
299
300 let host = match hostname::get() {
301 Ok(h) => h.to_string_lossy().to_string(),
302 Err(e) => {
303 tracing::warn!(error = %e, "server check-in: failed to get hostname");
304 return false;
305 }
306 };
307
308 let config_hash = match compute_config_hash(resolved) {
309 Ok(h) => h,
310 Err(e) => {
311 tracing::warn!(error = %e, "server check-in failed");
312 return false;
313 }
314 };
315
316 let payload = CheckinPayload {
317 device_id,
318 hostname: host,
319 os: std::env::consts::OS.to_string(),
320 arch: std::env::consts::ARCH.to_string(),
321 config_hash,
322 };
323
324 let url = format!("{}/api/v1/checkin", server_url.trim_end_matches('/'));
325
326 let body = match serde_json::to_string(&payload) {
327 Ok(b) => b,
328 Err(e) => {
329 tracing::warn!(error = %e, "server check-in: failed to serialize payload");
330 return false;
331 }
332 };
333
334 tracing::info!(
335 url = %url,
336 device_id = %payload.device_id,
337 "checking in with server"
338 );
339
340 match ureq::post(&url)
341 .set("Content-Type", "application/json")
342 .send_string(&body)
343 {
344 Ok(response) => {
345 let status = response.status();
346 match response.into_string() {
347 Ok(resp_body) => match serde_json::from_str::<CheckinServerResponse>(&resp_body) {
348 Ok(resp) => {
349 tracing::info!(
350 config_changed = resp.config_changed,
351 "server check-in successful"
352 );
353 resp.config_changed
354 }
355 Err(e) => {
356 tracing::warn!(
357 status = status,
358 error = %e,
359 "server check-in: failed to parse response"
360 );
361 false
362 }
363 },
364 Err(e) => {
365 tracing::warn!(error = %e, "server check-in: failed to read response body");
366 false
367 }
368 }
369 }
370 Err(e) => {
371 tracing::warn!(error = %e, "server check-in failed");
372 false
373 }
374 }
375}
376
377fn find_server_url(config: &CfgdConfig) -> Option<String> {
379 config
380 .spec
381 .origin
382 .iter()
383 .find(|o| matches!(o.origin_type, OriginType::Server))
384 .map(|o| o.url.clone())
385}
386
387fn try_server_checkin(config: &CfgdConfig, resolved: &ResolvedProfile) -> bool {
389 match find_server_url(config) {
390 Some(url) => server_checkin(&url, resolved),
391 None => false,
392 }
393}
394
395struct ParsedDaemonConfig {
399 reconcile_interval: Duration,
400 sync_interval: Duration,
401 auto_pull: bool,
402 auto_push: bool,
403 on_change_reconcile: bool,
404 notify_on_drift: bool,
405 notify_method: NotifyMethod,
406 webhook_url: Option<String>,
407 auto_apply: bool,
408}
409
410fn parse_daemon_config(daemon_cfg: &config::DaemonConfig) -> ParsedDaemonConfig {
411 let reconcile_interval = daemon_cfg
412 .reconcile
413 .as_ref()
414 .map(|r| parse_duration_or_default(&r.interval))
415 .unwrap_or(Duration::from_secs(DEFAULT_RECONCILE_SECS));
416
417 let sync_interval = daemon_cfg
418 .sync
419 .as_ref()
420 .map(|s| parse_duration_or_default(&s.interval))
421 .unwrap_or(Duration::from_secs(DEFAULT_SYNC_SECS));
422
423 let auto_pull = daemon_cfg
424 .sync
425 .as_ref()
426 .map(|s| s.auto_pull)
427 .unwrap_or(false);
428
429 let auto_push = daemon_cfg
430 .sync
431 .as_ref()
432 .map(|s| s.auto_push)
433 .unwrap_or(false);
434
435 let on_change_reconcile = daemon_cfg
436 .reconcile
437 .as_ref()
438 .map(|r| r.on_change)
439 .unwrap_or(false);
440
441 let notify_on_drift = daemon_cfg.notify.as_ref().map(|n| n.drift).unwrap_or(false);
442
443 let notify_method = daemon_cfg
444 .notify
445 .as_ref()
446 .map(|n| n.method.clone())
447 .unwrap_or(NotifyMethod::Stdout);
448
449 let webhook_url = daemon_cfg
450 .notify
451 .as_ref()
452 .and_then(|n| n.webhook_url.clone());
453
454 let auto_apply = daemon_cfg
455 .reconcile
456 .as_ref()
457 .map(|r| r.auto_apply)
458 .unwrap_or(false);
459
460 ParsedDaemonConfig {
461 reconcile_interval,
462 sync_interval,
463 auto_pull,
464 auto_push,
465 on_change_reconcile,
466 notify_on_drift,
467 notify_method,
468 webhook_url,
469 auto_apply,
470 }
471}
472
473fn build_reconcile_tasks(
479 daemon_cfg: &config::DaemonConfig,
480 resolved: Option<&config::ResolvedProfile>,
481 profile_chain: &[&str],
482 reconcile_interval: Duration,
483 auto_apply: bool,
484) -> Vec<ReconcileTask> {
485 let reconcile_patches = daemon_cfg
486 .reconcile
487 .as_ref()
488 .map(|r| &r.patches[..])
489 .unwrap_or(&[]);
490
491 let mut tasks: Vec<ReconcileTask> = Vec::new();
492
493 if !reconcile_patches.is_empty() {
494 let mut seen_patches: HashMap<(String, Option<String>), usize> = HashMap::new();
496 for (i, patch) in reconcile_patches.iter().enumerate() {
497 let key = (format!("{:?}", patch.kind), patch.name.clone());
498 if let Some(prev) = seen_patches.insert(key, i) {
499 tracing::warn!(
500 kind = ?patch.kind,
501 name = %patch.name.as_deref().unwrap_or("(all)"),
502 prev_position = prev,
503 position = i,
504 "duplicate reconcile patch — last wins"
505 );
506 }
507 }
508
509 if let Some(resolved) = resolved
511 && let Some(reconcile_cfg) = daemon_cfg.reconcile.as_ref()
512 {
513 for mod_ref in &resolved.merged.modules {
514 let mod_name = crate::modules::resolve_profile_module_name(mod_ref);
515 let eff =
516 crate::resolve_effective_reconcile(mod_name, profile_chain, reconcile_cfg);
517
518 if eff.interval != reconcile_cfg.interval
520 || eff.auto_apply != reconcile_cfg.auto_apply
521 || eff.drift_policy != reconcile_cfg.drift_policy
522 {
523 tasks.push(ReconcileTask {
524 entity: mod_name.to_string(),
525 interval: parse_duration_or_default(&eff.interval),
526 auto_apply: eff.auto_apply,
527 drift_policy: eff.drift_policy,
528 last_reconciled: None,
529 });
530 }
531 }
532 }
533 }
534
535 tasks.push(ReconcileTask {
537 entity: "__default__".to_string(),
538 interval: reconcile_interval,
539 auto_apply,
540 drift_policy: daemon_cfg
541 .reconcile
542 .as_ref()
543 .map(|r| r.drift_policy.clone())
544 .unwrap_or_default(),
545 last_reconciled: None,
546 });
547
548 tasks
549}
550
551fn build_sync_tasks(
556 config_dir: &Path,
557 parsed: &ParsedDaemonConfig,
558 sources: &[config::SourceSpec],
559 allow_unsigned: bool,
560 source_cache_dir: &Path,
561 manifest_detector: impl Fn(&Path) -> Option<bool>,
562) -> Vec<SyncTask> {
563 let mut tasks: Vec<SyncTask> = vec![SyncTask {
564 source_name: "local".to_string(),
565 repo_path: config_dir.to_path_buf(),
566 auto_pull: parsed.auto_pull,
567 auto_push: parsed.auto_push,
568 auto_apply: true,
569 interval: parsed.sync_interval,
570 last_synced: None,
571 require_signed_commits: false,
572 allow_unsigned,
573 }];
574
575 for source_spec in sources {
576 let source_dir = source_cache_dir.join(&source_spec.name);
577 if source_dir.exists() {
578 let require_signed = manifest_detector(&source_dir).unwrap_or(false);
579 tasks.push(SyncTask {
580 source_name: source_spec.name.clone(),
581 repo_path: source_dir,
582 auto_pull: true,
583 auto_push: false,
584 auto_apply: source_spec.sync.auto_apply,
585 interval: parse_duration_or_default(&source_spec.sync.interval),
586 last_synced: None,
587 require_signed_commits: require_signed,
588 allow_unsigned,
589 });
590 }
591 }
592
593 tasks
594}
595
596pub async fn run_daemon(
599 config_path: PathBuf,
600 profile_override: Option<String>,
601 printer: Arc<Printer>,
602 hooks: Arc<dyn DaemonHooks>,
603) -> Result<()> {
604 printer.header("Daemon");
605 printer.info("Starting cfgd daemon...");
606
607 let cfg = config::load_config(&config_path)?;
609 let daemon_cfg = cfg.spec.daemon.clone().unwrap_or(config::DaemonConfig {
610 enabled: true,
611 reconcile: None,
612 sync: None,
613 notify: None,
614 });
615
616 let parsed = parse_daemon_config(&daemon_cfg);
618 let reconcile_interval = parsed.reconcile_interval;
619 let sync_interval = parsed.sync_interval;
620 let auto_pull = parsed.auto_pull;
621 let auto_push = parsed.auto_push;
622 let on_change_reconcile = parsed.on_change_reconcile;
623 let notify_on_drift = parsed.notify_on_drift;
624
625 let notifier = Arc::new(Notifier::new(
626 parsed.notify_method.clone(),
627 parsed.webhook_url.clone(),
628 ));
629 let state = Arc::new(Mutex::new(DaemonState::new()));
630
631 let compliance_config = cfg.spec.compliance.clone();
633 let compliance_interval = compliance_config
634 .as_ref()
635 .filter(|c| c.enabled)
636 .and_then(|c| crate::parse_duration_str(&c.interval).ok());
637
638 let config_dir = config_path
640 .parent()
641 .unwrap_or_else(|| Path::new("."))
642 .to_path_buf();
643
644 let allow_unsigned = cfg.spec.security.as_ref().is_some_and(|s| s.allow_unsigned);
645
646 let source_cache_dir = crate::sources::SourceManager::default_cache_dir()
647 .unwrap_or_else(|_| config_dir.join(".cfgd-sources"));
648 let mut sync_tasks = build_sync_tasks(
649 &config_dir,
650 &parsed,
651 &cfg.spec.sources,
652 allow_unsigned,
653 &source_cache_dir,
654 |source_dir| {
655 crate::sources::detect_source_manifest(source_dir)
656 .ok()
657 .flatten()
658 .map(|m| m.spec.policy.constraints.require_signed_commits)
659 },
660 );
661
662 {
664 let mut st = state.lock().await;
665 for source in &cfg.spec.sources {
666 st.sources.push(SourceStatus {
667 name: source.name.clone(),
668 last_sync: None,
669 last_reconcile: None,
670 drift_count: 0,
671 status: "active".to_string(),
672 });
673 }
674 }
675
676 let managed_paths = discover_managed_paths(&config_path, profile_override.as_deref(), &*hooks);
678
679 let (file_tx, mut file_rx) = mpsc::channel::<PathBuf>(256);
681 let _watcher = setup_file_watcher(file_tx, &managed_paths, &config_dir)?;
682
683 #[cfg(unix)]
685 {
686 let socket_path = PathBuf::from(DEFAULT_IPC_PATH);
687 if socket_path.exists() {
688 if StdUnixStream::connect(&socket_path).is_ok() {
689 return Err(DaemonError::AlreadyRunning {
690 pid: std::process::id(),
691 }
692 .into());
693 }
694 let _ = std::fs::remove_file(&socket_path);
696 }
697 }
698 #[cfg(windows)]
699 {
700 if connect_daemon_ipc().is_some() {
701 return Err(DaemonError::AlreadyRunning {
702 pid: std::process::id(),
703 }
704 .into());
705 }
706 }
707
708 let health_state = Arc::clone(&state);
710 let ipc_path = DEFAULT_IPC_PATH.to_string();
711 let health_handle = tokio::spawn(async move {
712 if let Err(e) = run_health_server(&ipc_path, health_state).await {
713 tracing::error!(error = %e, "health server error");
714 }
715 });
716
717 let mut intervals = vec![format!("reconcile={}s", reconcile_interval.as_secs())];
718 if auto_pull || auto_push {
719 intervals.push(format!(
720 "sync={}s (pull={}, push={})",
721 sync_interval.as_secs(),
722 auto_pull,
723 auto_push
724 ));
725 }
726 if let Some(interval) = compliance_interval {
727 intervals.push(format!("compliance={}s", interval.as_secs()));
728 }
729 printer.success(&format!("Health: {}", DEFAULT_IPC_PATH));
730 printer.success(&format!("Intervals: {}", intervals.join(", ")));
731 printer.info("Daemon running — press Ctrl+C to stop");
732 printer.newline();
733
734 if find_server_url(&cfg).is_some() {
736 let startup_cfg = cfg.clone();
737 let startup_config_path = config_path.clone();
738 let startup_profile_override = profile_override.clone();
739 tokio::task::spawn_blocking(move || {
740 let config_dir = startup_config_path
741 .parent()
742 .unwrap_or_else(|| Path::new("."))
743 .to_path_buf();
744 let profiles_dir = config_dir.join("profiles");
745 let profile_name = match startup_profile_override
746 .as_deref()
747 .or(startup_cfg.spec.profile.as_deref())
748 {
749 Some(p) => p,
750 None => {
751 tracing::error!("no profile configured — skipping reconciliation");
752 return;
753 }
754 };
755 match config::resolve_profile(profile_name, &profiles_dir) {
756 Ok(resolved) => {
757 let changed = try_server_checkin(&startup_cfg, &resolved);
758 if changed {
759 tracing::info!("server reports config changed at startup");
760 }
761 match crate::state::load_pending_server_config() {
764 Ok(Some(_pending)) => {
765 tracing::info!(
766 "startup: found pending server config — first reconcile will apply it"
767 );
768 if let Err(e) = crate::state::clear_pending_server_config() {
769 tracing::warn!(error = %e, "startup: failed to clear pending server config");
770 }
771 }
772 Ok(None) => {}
773 Err(e) => {
774 tracing::warn!(error = %e, "startup: failed to load pending server config");
775 }
776 }
777 }
778 Err(e) => {
779 tracing::warn!(error = %e, "startup check-in: failed to resolve profile");
780 }
781 }
782 })
783 .await
784 .map_err(|e| DaemonError::WatchError {
785 message: format!("startup check-in task failed: {}", e),
786 })?;
787 }
788
789 let profiles_dir = config_dir.join("profiles");
791 let profile_name = profile_override
792 .as_deref()
793 .or(cfg.spec.profile.as_deref())
794 .unwrap_or("default");
795 let resolved_profile = config::resolve_profile(profile_name, &profiles_dir).ok();
796 let profile_chain: Vec<String> = resolved_profile
797 .as_ref()
798 .map(|r| r.layers.iter().map(|l| l.profile_name.clone()).collect())
799 .unwrap_or_else(|| vec![profile_name.to_string()]);
800 let chain_refs: Vec<&str> = profile_chain.iter().map(|s| s.as_str()).collect();
801
802 let mut reconcile_tasks = build_reconcile_tasks(
803 &daemon_cfg,
804 resolved_profile.as_ref(),
805 &chain_refs,
806 reconcile_interval,
807 parsed.auto_apply,
808 );
809
810 let mut last_change: HashMap<PathBuf, Instant> = HashMap::new();
812 let debounce = Duration::from_millis(DEBOUNCE_MS);
813
814 let shortest_reconcile = reconcile_tasks
816 .iter()
817 .map(|t| t.interval)
818 .min()
819 .unwrap_or(reconcile_interval);
820 let shortest_sync = sync_tasks
821 .iter()
822 .map(|t| t.interval)
823 .min()
824 .unwrap_or(sync_interval);
825 let mut reconcile_timer = tokio::time::interval(shortest_reconcile);
826 let mut sync_timer = tokio::time::interval(shortest_sync);
827 let mut version_check_timer = tokio::time::interval(crate::upgrade::version_check_interval());
828
829 let mut compliance_timer = compliance_interval.map(tokio::time::interval);
831
832 #[cfg(unix)]
835 let mut sighup_signal = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::hangup())
836 .map_err(|e| DaemonError::WatchError {
837 message: format!("failed to register SIGHUP handler: {}", e),
838 })?;
839 #[cfg(not(unix))]
840 let mut sighup_signal = ();
841
842 #[cfg(unix)]
845 let mut sigterm_signal =
846 tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate()).map_err(|e| {
847 DaemonError::WatchError {
848 message: format!("failed to register SIGTERM handler: {}", e),
849 }
850 })?;
851 #[cfg(not(unix))]
852 let mut sigterm_signal = ();
853
854 reconcile_timer.tick().await;
856 sync_timer.tick().await;
857 version_check_timer.tick().await;
858 if let Some(ref mut timer) = compliance_timer {
859 timer.tick().await;
860 }
861
862 loop {
863 tokio::select! {
864 Some(path) = file_rx.recv() => {
865 let now = Instant::now();
867 if let Some(last) = last_change.get(&path)
868 && now.duration_since(*last) < debounce
869 {
870 continue;
871 }
872 last_change.insert(path.clone(), now);
873
874 tracing::info!(path = %path.display(), "file changed");
875
876 let drift_recorded = record_file_drift(&path);
878 if drift_recorded {
879 let mut st = state.lock().await;
880 st.drift_count += 1;
881 if let Some(source) = st.sources.first_mut() {
882 source.drift_count += 1;
883 }
884
885 if notify_on_drift {
886 notifier.notify(
887 "cfgd: drift detected",
888 &format!("File changed: {}", path.display()),
889 );
890 }
891 }
892
893 if on_change_reconcile {
895 let cp = config_path.clone();
896 let po = profile_override.clone();
897 let st = Arc::clone(&state);
898 let nt = Arc::clone(¬ifier);
899 let notify_drift = notify_on_drift;
900 let hk = Arc::clone(&hooks);
901 tokio::task::spawn_blocking(move || {
902 handle_reconcile(&cp, po.as_deref(), &st, &nt, notify_drift, &*hk, None);
903 }).await.map_err(|e| DaemonError::WatchError {
904 message: format!("reconcile task failed: {}", e),
905 })?;
906 }
907 }
908
909 _ = reconcile_timer.tick() => {
910 tracing::trace!("reconcile tick");
911 let now = Instant::now();
912
913 let mut ran_default = false;
915 for task in &mut reconcile_tasks {
916 if let Some(last) = task.last_reconciled
917 && now.duration_since(last) < task.interval
918 {
919 continue;
920 }
921 task.last_reconciled = Some(now);
922
923 if task.entity == "__default__" {
924 ran_default = true;
925 let cp = config_path.clone();
926 let po = profile_override.clone();
927 let st = Arc::clone(&state);
928 let nt = Arc::clone(¬ifier);
929 let notify_drift = notify_on_drift;
930 let hk = Arc::clone(&hooks);
931 tokio::task::spawn_blocking(move || {
932 handle_reconcile(&cp, po.as_deref(), &st, &nt, notify_drift, &*hk, None);
933 }).await.map_err(|e| DaemonError::WatchError {
934 message: format!("reconcile task failed: {}", e),
935 })?;
936 } else {
937 let entity_name = task.entity.clone();
941 tracing::info!(
942 module = %entity_name,
943 interval = %task.interval.as_secs(),
944 auto_apply = task.auto_apply,
945 drift_policy = ?task.drift_policy,
946 "per-module reconcile tick"
947 );
948 let rt = tokio::runtime::Handle::current();
949 let st = Arc::clone(&state);
950 let ts = crate::utc_now_iso8601();
951 rt.block_on(async {
952 let mut st = st.lock().await;
953 st.module_last_reconcile
954 .insert(entity_name, ts);
955 });
956 }
957 }
958
959 if !ran_default {
962 tracing::trace!("default reconcile task not due this tick");
963 }
964 }
965
966 _ = sync_timer.tick() => {
967 tracing::trace!("sync tick");
968 let now = Instant::now();
969 for task in &mut sync_tasks {
970 if let Some(last) = task.last_synced
972 && now.duration_since(last) < task.interval
973 {
974 continue;
975 }
976 task.last_synced = Some(now);
977
978 let st = Arc::clone(&state);
979 let repo = task.repo_path.clone();
980 let pull = task.auto_pull;
981 let push = task.auto_push;
982 let auto_apply = task.auto_apply;
983 let source_name = task.source_name.clone();
984 let require_signed = task.require_signed_commits;
985 let allow_uns = task.allow_unsigned;
986 tokio::task::spawn_blocking(move || {
987 let changed = handle_sync(&repo, pull, push, &source_name, &st, require_signed, allow_uns);
988 if changed && !auto_apply {
989 tracing::info!(
990 source = %source_name,
991 "changes detected but auto-apply is disabled — run 'cfgd sync' interactively"
992 );
993 }
994 }).await.map_err(|e| DaemonError::WatchError {
995 message: format!("sync task failed: {}", e),
996 })?;
997 }
998 }
999
1000 _ = version_check_timer.tick() => {
1001 tracing::trace!("version check tick");
1002 let st = Arc::clone(&state);
1003 let nt = Arc::clone(¬ifier);
1004 tokio::task::spawn_blocking(move || {
1005 handle_version_check(&st, &nt);
1006 }).await.map_err(|e| DaemonError::WatchError {
1007 message: format!("version check task failed: {}", e),
1008 })?;
1009 }
1010
1011 _ = async {
1012 match compliance_timer.as_mut() {
1013 Some(timer) => timer.tick().await,
1014 None => std::future::pending().await,
1015 }
1016 } => {
1017 tracing::trace!("compliance snapshot tick");
1018 if let Some(ref cc) = compliance_config {
1019 let cp = config_path.clone();
1020 let po = profile_override.clone();
1021 let hk = Arc::clone(&hooks);
1022 let cc2 = cc.clone();
1023 tokio::task::spawn_blocking(move || {
1024 handle_compliance_snapshot(&cp, po.as_deref(), &*hk, &cc2);
1025 }).await.map_err(|e| DaemonError::WatchError {
1026 message: format!("compliance snapshot task failed: {}", e),
1027 })?;
1028 }
1029 }
1030
1031 _ = recv_sighup(&mut sighup_signal) => {
1034 printer.info("Reloading configuration (SIGHUP)...");
1035 match config::load_config(&config_path) {
1036 Ok(new_cfg) => {
1037 let mut changed = Vec::new();
1041 if let Some(ref rc) = new_cfg.spec.daemon.as_ref().and_then(|d| d.reconcile.clone()) {
1042 let new_interval = parse_duration_or_default(&rc.interval);
1043 reconcile_timer = tokio::time::interval(new_interval);
1044 reconcile_timer.tick().await; changed.push(format!("reconcile={:?}", new_interval));
1046 }
1047 if let Some(ref sc) = new_cfg.spec.daemon.as_ref().and_then(|d| d.sync.clone()) {
1048 let new_interval = parse_duration_or_default(&sc.interval);
1049 sync_timer = tokio::time::interval(new_interval);
1050 sync_timer.tick().await;
1051 changed.push(format!("sync={:?}", new_interval));
1052 }
1053 if changed.is_empty() {
1054 printer.info("Config validated; no timer changes detected");
1055 } else {
1056 printer.success(&format!("Timer intervals reloaded: {}", changed.join(", ")));
1057 }
1058 }
1059 Err(e) => {
1060 printer.warning(&format!("Config reload failed: {}", e));
1061 }
1062 }
1063 }
1064
1065 _ = recv_sigterm(&mut sigterm_signal) => {
1066 printer.info("Received SIGTERM, shutting down daemon...");
1067 break;
1068 }
1069
1070 _ = tokio::signal::ctrl_c() => {
1071 printer.newline();
1072 printer.info("Shutting down daemon...");
1073 break;
1074 }
1075 }
1076 }
1077
1078 health_handle.abort();
1080 let _ = health_handle.await;
1081 #[cfg(unix)]
1083 {
1084 let socket_path = PathBuf::from(DEFAULT_IPC_PATH);
1085 if socket_path.exists() {
1086 let _ = std::fs::remove_file(&socket_path);
1087 }
1088 }
1089
1090 printer.success("Daemon stopped");
1091 Ok(())
1092}
1093
1094fn setup_file_watcher(
1097 tx: mpsc::Sender<PathBuf>,
1098 managed_paths: &[PathBuf],
1099 config_dir: &Path,
1100) -> Result<RecommendedWatcher> {
1101 let sender = tx.clone();
1102 let mut watcher =
1103 notify::recommended_watcher(move |res: std::result::Result<Event, notify::Error>| {
1104 if let Ok(event) = res {
1105 match event.kind {
1106 EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_) => {
1107 for path in event.paths {
1108 match sender.try_send(path) {
1109 Ok(()) => {}
1110 Err(mpsc::error::TrySendError::Full(_)) => {
1111 tracing::debug!("file watcher channel full — event coalesced");
1112 }
1113 Err(e) => {
1114 tracing::warn!(error = %e, "file watcher event dropped");
1115 }
1116 }
1117 }
1118 }
1119 _ => {}
1120 }
1121 }
1122 })
1123 .map_err(|e| DaemonError::WatchError {
1124 message: format!("failed to create file watcher: {}", e),
1125 })?;
1126
1127 for path in managed_paths {
1129 if path.exists() {
1130 if let Err(e) = watcher.watch(path, RecursiveMode::NonRecursive) {
1131 tracing::warn!(path = %path.display(), error = %e, "cannot watch path");
1132 }
1133 } else if let Some(parent) = path.parent() {
1134 if parent.exists()
1136 && let Err(e) = watcher.watch(parent, RecursiveMode::NonRecursive)
1137 {
1138 tracing::warn!(path = %parent.display(), error = %e, "cannot watch path");
1139 }
1140 }
1141 }
1142
1143 if config_dir.exists()
1145 && let Err(e) = watcher.watch(config_dir, RecursiveMode::Recursive)
1146 {
1147 tracing::warn!(path = %config_dir.display(), error = %e, "cannot watch config dir");
1148 }
1149
1150 Ok(watcher)
1151}
1152
1153fn discover_managed_paths(
1154 config_path: &Path,
1155 profile_override: Option<&str>,
1156 hooks: &dyn DaemonHooks,
1157) -> Vec<PathBuf> {
1158 let cfg = match config::load_config(config_path) {
1159 Ok(c) => c,
1160 Err(e) => {
1161 tracing::warn!(error = %e, "cannot load config for file discovery");
1162 return Vec::new();
1163 }
1164 };
1165
1166 let profiles_dir = config_path
1167 .parent()
1168 .unwrap_or_else(|| Path::new("."))
1169 .join("profiles");
1170 let profile_name = match profile_override.or(cfg.spec.profile.as_deref()) {
1171 Some(p) => p,
1172 None => return Vec::new(),
1173 };
1174
1175 let resolved = match config::resolve_profile(profile_name, &profiles_dir) {
1176 Ok(r) => r,
1177 Err(e) => {
1178 tracing::warn!(error = %e, "cannot resolve profile for file discovery");
1179 return Vec::new();
1180 }
1181 };
1182
1183 resolved
1184 .merged
1185 .files
1186 .managed
1187 .iter()
1188 .map(|f| hooks.expand_tilde(&f.target))
1189 .collect()
1190}
1191
1192fn handle_reconcile(
1195 config_path: &Path,
1196 profile_override: Option<&str>,
1197 state: &Arc<Mutex<DaemonState>>,
1198 notifier: &Arc<Notifier>,
1199 notify_on_drift: bool,
1200 hooks: &dyn DaemonHooks,
1201 state_dir_override: Option<&Path>,
1202) {
1203 tracing::info!("running reconciliation check");
1204
1205 let state_dir = match state_dir_override {
1208 Some(d) => d.to_path_buf(),
1209 None => match crate::state::default_state_dir() {
1210 Ok(d) => d,
1211 Err(e) => {
1212 tracing::error!(error = %e, "reconcile: cannot determine state directory");
1213 return;
1214 }
1215 },
1216 };
1217 let _lock = match crate::acquire_apply_lock(&state_dir) {
1218 Ok(guard) => guard,
1219 Err(crate::errors::CfgdError::State(crate::errors::StateError::ApplyLockHeld {
1220 ref holder,
1221 })) => {
1222 tracing::debug!(holder = %holder, "reconcile: skipping — apply lock held");
1223 return;
1224 }
1225 Err(e) => {
1226 tracing::warn!(error = %e, "reconcile: cannot acquire apply lock");
1227 return;
1228 }
1229 };
1230
1231 let cfg = match config::load_config(config_path) {
1232 Ok(c) => c,
1233 Err(e) => {
1234 tracing::error!(error = %e, "reconcile: config load failed");
1235 return;
1236 }
1237 };
1238
1239 let config_dir = config_path
1240 .parent()
1241 .unwrap_or_else(|| Path::new("."))
1242 .to_path_buf();
1243 let profiles_dir = config_dir.join("profiles");
1244 let profile_name = match profile_override.or(cfg.spec.profile.as_deref()) {
1245 Some(p) => p,
1246 None => {
1247 tracing::error!("no profile configured — skipping reconciliation");
1248 return;
1249 }
1250 };
1251
1252 let resolved = match config::resolve_profile(profile_name, &profiles_dir) {
1253 Ok(r) => r,
1254 Err(e) => {
1255 tracing::error!(error = %e, "reconcile: profile resolution failed");
1256 return;
1257 }
1258 };
1259
1260 let mut registry = hooks.build_registry(&cfg);
1262 hooks.extend_registry_custom_managers(&mut registry, &resolved.merged.packages);
1263 let store = match state_dir_override {
1264 Some(d) => {
1265 std::fs::create_dir_all(d).ok();
1266 match StateStore::open(&d.join("cfgd.db")) {
1267 Ok(s) => s,
1268 Err(e) => {
1269 tracing::error!(error = %e, "reconcile: state store error");
1270 return;
1271 }
1272 }
1273 }
1274 None => match StateStore::open_default() {
1275 Ok(s) => s,
1276 Err(e) => {
1277 tracing::error!(error = %e, "reconcile: state store error");
1278 return;
1279 }
1280 },
1281 };
1282
1283 let auto_apply = cfg
1285 .spec
1286 .daemon
1287 .as_ref()
1288 .and_then(|d| d.reconcile.as_ref())
1289 .map(|r| r.auto_apply)
1290 .unwrap_or(false);
1291
1292 let pending_exclusions = if auto_apply && !cfg.spec.sources.is_empty() {
1293 let default_policy = AutoApplyPolicyConfig::default();
1294 let policy = cfg
1295 .spec
1296 .daemon
1297 .as_ref()
1298 .and_then(|d| d.reconcile.as_ref())
1299 .and_then(|r| r.policy.as_ref())
1300 .unwrap_or(&default_policy);
1301
1302 let mut all_excluded = HashSet::new();
1303 for source_spec in &cfg.spec.sources {
1304 let excluded = process_source_decisions(
1305 &store,
1306 &source_spec.name,
1307 &resolved.merged,
1308 policy,
1309 notifier,
1310 );
1311 all_excluded.extend(excluded);
1312 }
1313
1314 let source_names: HashSet<&str> =
1316 cfg.spec.sources.iter().map(|s| s.name.as_str()).collect();
1317 if let Ok(all_pending) = store.pending_decisions() {
1318 for decision in &all_pending {
1319 if !source_names.contains(decision.source.as_str())
1320 && let Err(e) = store.resolve_decisions_for_source(&decision.source, "rejected")
1321 {
1322 tracing::warn!(
1323 source = %decision.source,
1324 error = %e,
1325 "failed to auto-reject decisions for removed source"
1326 );
1327 }
1328 }
1329 }
1330
1331 all_excluded
1332 } else {
1333 HashSet::new()
1334 };
1335
1336 let reconciler = crate::reconciler::Reconciler::new(®istry, &store);
1337
1338 let available_managers = registry.available_package_managers();
1339 let pkg_actions = match hooks.plan_packages(&resolved.merged, &available_managers) {
1340 Ok(a) => a,
1341 Err(e) => {
1342 tracing::error!(error = %e, "reconcile: package planning failed");
1343 return;
1344 }
1345 };
1346
1347 let file_actions = match hooks.plan_files(&config_dir, &resolved) {
1348 Ok(a) => a,
1349 Err(e) => {
1350 tracing::error!(error = %e, "reconcile: file planning failed");
1351 return;
1352 }
1353 };
1354
1355 let resolved_modules = if !resolved.merged.modules.is_empty() {
1357 let platform = crate::platform::Platform::detect();
1358 let mgr_map: std::collections::HashMap<String, &dyn PackageManager> = registry
1359 .package_managers
1360 .iter()
1361 .map(|m| (m.name().to_string(), m.as_ref() as &dyn PackageManager))
1362 .collect();
1363 let cache_base = crate::modules::default_module_cache_dir()
1364 .unwrap_or_else(|_| config_dir.join(".module-cache"));
1365 let quiet_printer = crate::output::Printer::new(crate::output::Verbosity::Quiet);
1366 match crate::modules::resolve_modules(
1367 &resolved.merged.modules,
1368 &config_dir,
1369 &cache_base,
1370 &platform,
1371 &mgr_map,
1372 &quiet_printer,
1373 ) {
1374 Ok(m) => m,
1375 Err(e) => {
1376 tracing::warn!(error = %e, "reconcile: module resolution failed");
1377 Vec::new()
1378 }
1379 }
1380 } else {
1381 Vec::new()
1382 };
1383 let resolved_modules_ref = resolved_modules.clone();
1384 let plan = match reconciler.plan(
1385 &resolved,
1386 file_actions,
1387 pkg_actions,
1388 resolved_modules,
1389 crate::reconciler::ReconcileContext::Reconcile,
1390 ) {
1391 Ok(p) => p,
1392 Err(e) => {
1393 tracing::error!(error = %e, "reconcile: plan generation failed");
1394 return;
1395 }
1396 };
1397
1398 let effective_total = if pending_exclusions.is_empty() {
1400 plan.total_actions()
1401 } else {
1402 let mut count = 0usize;
1403 for phase in &plan.phases {
1404 for action in &phase.actions {
1405 let (_rtype, rid) = action_resource_info(action);
1406 if !pending_exclusions.contains(&rid) {
1407 count += 1;
1408 }
1409 }
1410 }
1411 count
1412 };
1413
1414 let timestamp = crate::utc_now_iso8601();
1415
1416 let rt = tokio::runtime::Handle::current();
1418 rt.block_on(async {
1419 let mut st = state.lock().await;
1420 st.last_reconcile = Some(timestamp.clone());
1421 if let Some(source) = st.sources.first_mut() {
1422 source.last_reconcile = Some(timestamp);
1423 }
1424 });
1425
1426 if effective_total == 0 {
1427 tracing::debug!("reconcile: no drift detected");
1428 } else {
1429 tracing::info!(actions = effective_total, "reconcile: drift detected");
1430
1431 for phase in &plan.phases {
1433 for action in &phase.actions {
1434 let (rtype, rid) = action_resource_info(action);
1435 if pending_exclusions.contains(&rid) {
1437 continue;
1438 }
1439 if let Err(e) =
1440 store.record_drift(&rtype, &rid, None, Some("drift detected"), "local")
1441 {
1442 tracing::warn!(error = %e, "failed to record drift");
1443 }
1444 }
1445 }
1446
1447 if !resolved.merged.scripts.on_drift.is_empty() {
1449 let scripts = &resolved.merged.scripts;
1450 tracing::info!(count = scripts.on_drift.len(), "running onDrift script(s)");
1451 let script_env = crate::reconciler::build_script_env(
1452 &config_dir,
1453 profile_name,
1454 crate::reconciler::ReconcileContext::Reconcile,
1455 &crate::reconciler::ScriptPhase::OnDrift,
1456 false,
1457 None,
1458 None,
1459 );
1460 let printer = Printer::new(Verbosity::Quiet);
1461 let default_timeout = crate::PROFILE_SCRIPT_TIMEOUT;
1462 for entry in &scripts.on_drift {
1463 match crate::reconciler::execute_script(
1464 entry,
1465 &config_dir,
1466 &script_env,
1467 default_timeout,
1468 &printer,
1469 ) {
1470 Ok((desc, _, _)) => {
1471 tracing::info!(script = %desc, "onDrift script completed");
1472 }
1473 Err(e) => {
1474 tracing::error!(error = %e, "onDrift script failed");
1475 }
1476 }
1477 }
1478 }
1479
1480 rt.block_on(async {
1482 let mut st = state.lock().await;
1483 st.drift_count += effective_total as u32;
1484 if let Some(source) = st.sources.first_mut() {
1485 source.drift_count += effective_total as u32;
1486 }
1487 });
1488
1489 let drift_policy = cfg
1491 .spec
1492 .daemon
1493 .as_ref()
1494 .and_then(|d| d.reconcile.as_ref())
1495 .map(|r| r.drift_policy.clone())
1496 .unwrap_or_default();
1497
1498 match drift_policy {
1499 config::DriftPolicy::Auto => {
1500 tracing::info!(
1501 actions = effective_total,
1502 "drift policy is Auto — applying actions"
1503 );
1504 let printer = Printer::new(Verbosity::Quiet);
1505 match reconciler.apply(
1506 &plan,
1507 &resolved,
1508 &config_dir,
1509 &printer,
1510 None,
1511 &resolved_modules_ref,
1512 crate::reconciler::ReconcileContext::Reconcile,
1513 false,
1514 ) {
1515 Ok(result) => {
1516 let succeeded = result.succeeded();
1517 let failed = result.failed();
1518 tracing::info!(
1519 succeeded = succeeded,
1520 failed = failed,
1521 "auto-apply complete"
1522 );
1523 if failed > 0 && notify_on_drift {
1524 notifier.notify(
1525 "cfgd: auto-apply partial failure",
1526 &format!(
1527 "{} action(s) succeeded, {} failed. Run `cfgd status` for details.",
1528 succeeded, failed
1529 ),
1530 );
1531 } else if notify_on_drift {
1532 notifier.notify(
1533 "cfgd: auto-apply succeeded",
1534 &format!("{} action(s) applied successfully.", succeeded),
1535 );
1536 }
1537 }
1538 Err(e) => {
1539 tracing::error!(error = %e, "auto-apply failed");
1540 if notify_on_drift {
1541 notifier.notify(
1542 "cfgd: auto-apply failed",
1543 &format!("Auto-apply failed: {}. Run `cfgd apply` manually.", e),
1544 );
1545 }
1546 }
1547 }
1548 }
1549 config::DriftPolicy::NotifyOnly | config::DriftPolicy::Prompt => {
1550 tracing::info!("drift policy is NotifyOnly — recording drift, not applying");
1551 if notify_on_drift {
1552 notifier.notify(
1553 "cfgd: drift detected",
1554 &format!(
1555 "{} resource(s) have drifted from desired state. Run `cfgd apply` to reconcile.",
1556 effective_total
1557 ),
1558 );
1559 }
1560 }
1561 }
1562 }
1563
1564 let changed = try_server_checkin(&cfg, &resolved);
1566 if changed {
1567 tracing::info!(
1568 "reconcile: server reports config has changed — will reconcile on next tick"
1569 );
1570 }
1571
1572 match crate::state::load_pending_server_config() {
1574 Ok(Some(pending)) => {
1575 let keys: Vec<String> = pending
1576 .as_object()
1577 .map(|obj| obj.keys().cloned().collect())
1578 .unwrap_or_default();
1579 tracing::info!(
1580 keys = ?keys,
1581 "consumed pending server config — next reconcile will pick up changes"
1582 );
1583 if let Err(e) = crate::state::clear_pending_server_config() {
1584 tracing::warn!(error = %e, "failed to clear pending server config");
1585 }
1586 }
1587 Ok(None) => {}
1588 Err(e) => {
1589 tracing::warn!(error = %e, "failed to load pending server config");
1590 }
1591 }
1592}
1593
1594fn action_resource_info(action: &crate::reconciler::Action) -> (String, String) {
1595 use crate::providers::{FileAction, PackageAction, SecretAction};
1596 use crate::reconciler::Action;
1597
1598 match action {
1599 Action::File(fa) => match fa {
1600 FileAction::Create { target, .. } => ("file".to_string(), target.display().to_string()),
1601 FileAction::Update { target, .. } => ("file".to_string(), target.display().to_string()),
1602 FileAction::Delete { target, .. } => ("file".to_string(), target.display().to_string()),
1603 FileAction::SetPermissions { target, .. } => {
1604 ("file".to_string(), target.display().to_string())
1605 }
1606 FileAction::Skip { target, .. } => ("file".to_string(), target.display().to_string()),
1607 },
1608 Action::Package(pa) => match pa {
1609 PackageAction::Bootstrap { manager, .. } => {
1610 ("package".to_string(), format!("{}:bootstrap", manager))
1611 }
1612 PackageAction::Install {
1613 manager, packages, ..
1614 } => (
1615 "package".to_string(),
1616 format!("{}:{}", manager, packages.join(",")),
1617 ),
1618 PackageAction::Uninstall {
1619 manager, packages, ..
1620 } => (
1621 "package".to_string(),
1622 format!("{}:{}", manager, packages.join(",")),
1623 ),
1624 PackageAction::Skip { manager, .. } => ("package".to_string(), manager.clone()),
1625 },
1626 Action::Secret(sa) => match sa {
1627 SecretAction::Decrypt { target, .. } => {
1628 ("secret".to_string(), target.display().to_string())
1629 }
1630 SecretAction::Resolve { reference, .. } => ("secret".to_string(), reference.clone()),
1631 SecretAction::ResolveEnv { envs, .. } => {
1632 ("secret".to_string(), format!("env:[{}]", envs.join(",")))
1633 }
1634 SecretAction::Skip { source, .. } => ("secret".to_string(), source.clone()),
1635 },
1636 Action::System(sa) => {
1637 use crate::reconciler::SystemAction;
1638 match sa {
1639 SystemAction::SetValue {
1640 configurator, key, ..
1641 } => ("system".to_string(), format!("{}:{}", configurator, key)),
1642 SystemAction::Skip { configurator, .. } => {
1643 ("system".to_string(), configurator.clone())
1644 }
1645 }
1646 }
1647 Action::Script(sa) => {
1648 use crate::reconciler::ScriptAction;
1649 match sa {
1650 ScriptAction::Run { entry, .. } => {
1651 ("script".to_string(), entry.run_str().to_string())
1652 }
1653 }
1654 }
1655 Action::Module(ma) => ("module".to_string(), ma.module_name.clone()),
1656 Action::Env(ea) => {
1657 use crate::reconciler::EnvAction;
1658 match ea {
1659 EnvAction::WriteEnvFile { path, .. } => {
1660 ("env".to_string(), path.display().to_string())
1661 }
1662 EnvAction::InjectSourceLine { rc_path, .. } => {
1663 ("env-rc".to_string(), rc_path.display().to_string())
1664 }
1665 }
1666 }
1667 }
1668}
1669
1670fn extract_source_resources(merged: &MergedProfile) -> HashSet<String> {
1675 let mut resources = HashSet::new();
1676
1677 let pkgs = &merged.packages;
1678 if let Some(ref brew) = pkgs.brew {
1679 for f in &brew.formulae {
1680 resources.insert(format!("packages.brew.{}", f));
1681 }
1682 for c in &brew.casks {
1683 resources.insert(format!("packages.brew.{}", c));
1684 }
1685 }
1686 if let Some(ref apt) = pkgs.apt {
1687 for p in &apt.packages {
1688 resources.insert(format!("packages.apt.{}", p));
1689 }
1690 }
1691 if let Some(ref cargo) = pkgs.cargo {
1692 for p in &cargo.packages {
1693 resources.insert(format!("packages.cargo.{}", p));
1694 }
1695 }
1696 for p in &pkgs.pipx {
1697 resources.insert(format!("packages.pipx.{}", p));
1698 }
1699 for p in &pkgs.dnf {
1700 resources.insert(format!("packages.dnf.{}", p));
1701 }
1702 if let Some(ref npm) = pkgs.npm {
1703 for p in &npm.global {
1704 resources.insert(format!("packages.npm.{}", p));
1705 }
1706 }
1707
1708 for file in &merged.files.managed {
1709 resources.insert(format!("files.{}", file.target.display()));
1710 }
1711
1712 for ev in &merged.env {
1713 resources.insert(format!("env.{}", ev.name));
1714 }
1715
1716 for k in merged.system.keys() {
1717 resources.insert(format!("system.{}", k));
1718 }
1719
1720 resources
1721}
1722
1723fn hash_resources(resources: &HashSet<String>) -> String {
1725 let mut sorted: Vec<&String> = resources.iter().collect();
1726 sorted.sort();
1727 let combined: String = sorted.iter().map(|r| format!("{}\n", r)).collect();
1728 crate::sha256_hex(combined.as_bytes())
1729}
1730
1731fn process_source_decisions(
1734 store: &StateStore,
1735 source_name: &str,
1736 merged: &MergedProfile,
1737 policy: &AutoApplyPolicyConfig,
1738 notifier: &Notifier,
1739) -> HashSet<String> {
1740 let current_resources = extract_source_resources(merged);
1741 let current_hash = hash_resources(¤t_resources);
1742
1743 let previous_hash = store
1745 .source_config_hash(source_name)
1746 .ok()
1747 .flatten()
1748 .map(|h| h.config_hash);
1749
1750 if previous_hash.as_deref() == Some(¤t_hash) {
1751 return pending_resource_paths(store);
1753 }
1754
1755 let previous_resources: HashSet<String> = if previous_hash.is_some() {
1757 let mut known = HashSet::new();
1760 if let Ok(managed) = store.managed_resources_by_source(source_name) {
1761 for r in &managed {
1762 known.insert(format!("{}.{}", r.resource_type, r.resource_id));
1763 }
1764 }
1765 if let Ok(decisions) = store.pending_decisions_for_source(source_name) {
1767 for d in &decisions {
1768 known.insert(d.resource.clone());
1769 }
1770 }
1771 known
1772 } else {
1773 HashSet::new()
1775 };
1776
1777 let new_items: Vec<&String> = current_resources
1778 .iter()
1779 .filter(|r| !previous_resources.contains(*r))
1780 .collect();
1781
1782 let mut new_pending_count = 0u32;
1783
1784 for resource in &new_items {
1785 let tier = infer_item_tier(resource);
1790 let policy_action = match tier {
1791 "recommended" => &policy.new_recommended,
1792 "optional" => &policy.new_optional,
1793 "locked" => &policy.locked_conflict,
1794 _ => &policy.new_recommended,
1795 };
1796
1797 match policy_action {
1798 PolicyAction::Accept => {
1799 }
1801 PolicyAction::Reject | PolicyAction::Ignore => {
1802 }
1804 PolicyAction::Notify => {
1805 let summary = format!("{} {} (from {})", tier, resource, source_name);
1806 if let Err(e) =
1807 store.upsert_pending_decision(source_name, resource, tier, "install", &summary)
1808 {
1809 tracing::warn!(error = %e, "failed to record pending decision");
1810 } else {
1811 new_pending_count += 1;
1812 }
1813 }
1814 }
1815 }
1816
1817 if new_pending_count > 0 {
1819 notifier.notify(
1820 "cfgd: pending decisions",
1821 &format!(
1822 "Source \"{}\" has {} new {} item{} pending your review.\n\
1823 Run `cfgd status` to see details, `cfgd decide accept --source {}` to accept all.",
1824 source_name,
1825 new_pending_count,
1826 if new_pending_count == 1 {
1827 "recommended"
1828 } else {
1829 "recommended/optional"
1830 },
1831 if new_pending_count == 1 { "" } else { "s" },
1832 source_name,
1833 ),
1834 );
1835 }
1836
1837 if let Err(e) = store.set_source_config_hash(source_name, ¤t_hash) {
1839 tracing::warn!(error = %e, "failed to store source config hash");
1840 }
1841
1842 pending_resource_paths(store)
1844}
1845
1846fn pending_resource_paths(store: &StateStore) -> HashSet<String> {
1848 store
1849 .pending_decisions()
1850 .unwrap_or_default()
1851 .into_iter()
1852 .map(|d| d.resource)
1853 .collect()
1854}
1855
1856fn infer_item_tier(resource: &str) -> &'static str {
1861 if resource.contains("security") || resource.contains("policy") || resource.contains("locked") {
1863 "locked"
1864 } else {
1865 "recommended"
1866 }
1867}
1868
1869fn handle_sync(
1873 repo_path: &Path,
1874 auto_pull: bool,
1875 auto_push: bool,
1876 source_name: &str,
1877 state: &Arc<Mutex<DaemonState>>,
1878 require_signed_commits: bool,
1879 allow_unsigned: bool,
1880) -> bool {
1881 let timestamp = crate::utc_now_iso8601();
1882 let mut changes = false;
1883
1884 if auto_pull {
1885 match git_pull(repo_path) {
1886 Ok(true) => {
1887 if require_signed_commits
1889 && !allow_unsigned
1890 && let Err(e) = crate::sources::verify_head_signature(source_name, repo_path)
1891 {
1892 tracing::error!(
1893 source = %source_name,
1894 error = %e,
1895 "sync: signature verification failed after pull"
1896 );
1897 return false;
1899 }
1900 tracing::info!("sync: pulled new changes from remote");
1901 changes = true;
1902 }
1903 Ok(false) => tracing::debug!("sync: already up to date"),
1904 Err(e) => tracing::warn!(error = %e, "sync: pull failed"),
1905 }
1906 }
1907
1908 if auto_push {
1909 match git_auto_commit_push(repo_path) {
1910 Ok(true) => tracing::info!("sync: pushed local changes to remote"),
1911 Ok(false) => tracing::debug!("sync: nothing to push"),
1912 Err(e) => tracing::warn!(error = %e, "sync: push failed"),
1913 }
1914 }
1915
1916 let rt = tokio::runtime::Handle::current();
1917 let source = source_name.to_string();
1918 let ts = timestamp.clone();
1919 rt.block_on(async {
1920 let mut st = state.lock().await;
1921 st.last_sync = Some(timestamp);
1922 for s in &mut st.sources {
1923 if s.name == source {
1924 s.last_sync = Some(ts.clone());
1925 }
1926 }
1927 });
1928
1929 changes
1930}
1931
1932fn handle_version_check(state: &Arc<Mutex<DaemonState>>, notifier: &Arc<Notifier>) {
1935 tracing::info!("checking for cfgd updates");
1936
1937 match crate::upgrade::check_with_cache(None, None) {
1938 Ok(check) => {
1939 if check.update_available {
1940 let version_str = check.latest.to_string();
1941 tracing::info!(
1942 current = %check.current,
1943 latest = %check.latest,
1944 "update available"
1945 );
1946
1947 let rt = tokio::runtime::Handle::current();
1949 let already_notified = rt.block_on(async {
1950 let st = state.lock().await;
1951 st.update_available.as_deref() == Some(version_str.as_str())
1952 });
1953
1954 let vs = version_str.clone();
1956 let st = Arc::clone(state);
1957 rt.block_on(async {
1958 let mut st = st.lock().await;
1959 st.update_available = Some(vs);
1960 });
1961
1962 if !already_notified {
1964 notifier.notify(
1965 "cfgd: update available",
1966 &format!(
1967 "Version {} is available (current: {}). Run 'cfgd upgrade' to update.",
1968 version_str, check.current
1969 ),
1970 );
1971 }
1972 } else {
1973 tracing::debug!(
1974 version = %check.current,
1975 "cfgd is up to date"
1976 );
1977 }
1978 }
1979 Err(e) => {
1980 tracing::warn!(error = %e, "version check failed");
1981 }
1982 }
1983}
1984
1985fn handle_compliance_snapshot(
1988 config_path: &Path,
1989 profile_override: Option<&str>,
1990 hooks: &dyn DaemonHooks,
1991 compliance_cfg: &config::ComplianceConfig,
1992) {
1993 tracing::info!("running compliance snapshot");
1994
1995 let cfg = match config::load_config(config_path) {
1996 Ok(c) => c,
1997 Err(e) => {
1998 tracing::error!(error = %e, "compliance: config load failed");
1999 return;
2000 }
2001 };
2002
2003 let config_dir = config_path
2004 .parent()
2005 .unwrap_or_else(|| Path::new("."))
2006 .to_path_buf();
2007 let profiles_dir = config_dir.join("profiles");
2008 let profile_name = match profile_override.or(cfg.spec.profile.as_deref()) {
2009 Some(p) => p,
2010 None => {
2011 tracing::error!("compliance: no profile configured — skipping");
2012 return;
2013 }
2014 };
2015
2016 let resolved = match config::resolve_profile(profile_name, &profiles_dir) {
2017 Ok(r) => r,
2018 Err(e) => {
2019 tracing::error!(error = %e, "compliance: profile resolution failed");
2020 return;
2021 }
2022 };
2023
2024 let mut registry = hooks.build_registry(&cfg);
2025 hooks.extend_registry_custom_managers(&mut registry, &resolved.merged.packages);
2026
2027 let source_names: Vec<String> = std::iter::once("local".to_string())
2028 .chain(cfg.spec.sources.iter().map(|s| s.name.clone()))
2029 .collect();
2030
2031 let snapshot = match crate::compliance::collect_snapshot(
2032 profile_name,
2033 &resolved.merged,
2034 ®istry,
2035 &compliance_cfg.scope,
2036 &source_names,
2037 ) {
2038 Ok(s) => s,
2039 Err(e) => {
2040 tracing::error!(error = %e, "compliance: snapshot collection failed");
2041 return;
2042 }
2043 };
2044
2045 let json = match serde_json::to_string_pretty(&snapshot) {
2047 Ok(j) => j,
2048 Err(e) => {
2049 tracing::error!(error = %e, "compliance: snapshot serialization failed");
2050 return;
2051 }
2052 };
2053
2054 let hash = crate::sha256_hex(json.as_bytes());
2055
2056 let store = match StateStore::open_default() {
2057 Ok(s) => s,
2058 Err(e) => {
2059 tracing::error!(error = %e, "compliance: state store error");
2060 return;
2061 }
2062 };
2063
2064 let latest_hash = match store.latest_compliance_hash() {
2066 Ok(h) => h,
2067 Err(e) => {
2068 tracing::warn!(error = %e, "compliance: failed to query latest hash");
2069 None
2070 }
2071 };
2072
2073 if latest_hash.as_deref() == Some(&hash) {
2074 tracing::debug!("compliance: no state change, skipping snapshot");
2075 return;
2076 }
2077
2078 if let Err(e) = store.store_compliance_snapshot(&snapshot, &hash) {
2080 tracing::error!(error = %e, "compliance: failed to store snapshot");
2081 return;
2082 }
2083
2084 tracing::info!(
2085 compliant = snapshot.summary.compliant,
2086 warning = snapshot.summary.warning,
2087 violation = snapshot.summary.violation,
2088 "compliance snapshot stored"
2089 );
2090
2091 match crate::compliance::export_snapshot_to_file(&snapshot, &compliance_cfg.export) {
2093 Ok(file_path) => {
2094 tracing::info!(path = %file_path.display(), "compliance snapshot exported");
2095 }
2096 Err(e) => {
2097 tracing::error!(error = %e, "compliance: failed to export snapshot");
2098 return;
2099 }
2100 }
2101
2102 if let Ok(retention_dur) = crate::parse_duration_str(&compliance_cfg.retention) {
2104 let cutoff_secs = crate::unix_secs_now().saturating_sub(retention_dur.as_secs());
2105 let cutoff_str = crate::unix_secs_to_iso8601(cutoff_secs);
2106 match store.prune_compliance_snapshots(&cutoff_str) {
2107 Ok(deleted) if deleted > 0 => {
2108 tracing::info!(deleted = deleted, "compliance: pruned old snapshots");
2109 }
2110 Ok(_) => {}
2111 Err(e) => {
2112 tracing::warn!(error = %e, "compliance: failed to prune snapshots");
2113 }
2114 }
2115 }
2116}
2117
2118fn git_pull(repo_path: &Path) -> std::result::Result<bool, String> {
2119 let repo = git2::Repository::open(repo_path).map_err(|e| format!("open repo: {}", e))?;
2120
2121 let head = repo.head().map_err(|e| format!("get HEAD: {}", e))?;
2122 let branch_name = head
2123 .shorthand()
2124 .ok_or_else(|| "cannot determine branch name".to_string())?;
2125
2126 let remote_url = repo
2128 .find_remote("origin")
2129 .ok()
2130 .and_then(|r| r.url().map(String::from));
2131 let repo_dir = &repo_path.display().to_string();
2132 let cli_ok = crate::try_git_cmd(
2133 remote_url.as_deref(),
2134 &["-C", repo_dir, "fetch", "origin", branch_name],
2135 "fetch",
2136 None,
2137 );
2138
2139 if !cli_ok {
2140 let mut remote = repo
2142 .find_remote("origin")
2143 .map_err(|e| format!("find remote: {}", e))?;
2144 let mut fetch_opts = git2::FetchOptions::new();
2145 let mut callbacks = git2::RemoteCallbacks::new();
2146 callbacks.credentials(crate::git_ssh_credentials);
2147 fetch_opts.remote_callbacks(callbacks);
2148 remote
2149 .fetch(&[branch_name], Some(&mut fetch_opts), None)
2150 .map_err(|e| format!("fetch: {}", e))?;
2151 }
2152
2153 let fetch_head = repo
2155 .find_reference("FETCH_HEAD")
2156 .map_err(|e| format!("find FETCH_HEAD: {}", e))?;
2157 let fetch_commit = repo
2158 .reference_to_annotated_commit(&fetch_head)
2159 .map_err(|e| format!("resolve FETCH_HEAD: {}", e))?;
2160
2161 let (analysis, _) = repo
2162 .merge_analysis(&[&fetch_commit])
2163 .map_err(|e| format!("merge analysis: {}", e))?;
2164
2165 if analysis.is_up_to_date() {
2166 return Ok(false);
2167 }
2168
2169 if analysis.is_fast_forward() {
2170 let refname = format!("refs/heads/{}", branch_name);
2171 let mut reference = repo
2172 .find_reference(&refname)
2173 .map_err(|e| format!("find ref: {}", e))?;
2174 reference
2175 .set_target(fetch_commit.id(), "cfgd: fast-forward pull")
2176 .map_err(|e| format!("set target: {}", e))?;
2177 repo.set_head(&refname)
2178 .map_err(|e| format!("set HEAD: {}", e))?;
2179 repo.checkout_head(Some(git2::build::CheckoutBuilder::default().force()))
2180 .map_err(|e| format!("checkout: {}", e))?;
2181 return Ok(true);
2182 }
2183
2184 Err("cannot fast-forward — remote has diverged".to_string())
2185}
2186
2187fn git_auto_commit_push(repo_path: &Path) -> std::result::Result<bool, String> {
2188 let repo = git2::Repository::open(repo_path).map_err(|e| format!("open repo: {}", e))?;
2189
2190 let mut index = repo.index().map_err(|e| format!("get index: {}", e))?;
2192 index
2193 .add_all(["*"].iter(), git2::IndexAddOption::DEFAULT, None)
2194 .map_err(|e| format!("stage changes: {}", e))?;
2195 index.write().map_err(|e| format!("write index: {}", e))?;
2196
2197 let diff = repo
2198 .diff_index_to_workdir(Some(&index), None)
2199 .map_err(|e| format!("diff: {}", e))?;
2200
2201 let head_tree = repo.head().ok().and_then(|h| h.peel_to_tree().ok());
2202
2203 let staged_diff = if let Some(ref tree) = head_tree {
2204 repo.diff_tree_to_index(Some(tree), Some(&index), None)
2205 .map_err(|e| format!("staged diff: {}", e))?
2206 } else {
2207 repo.diff_tree_to_index(None, Some(&index), None)
2209 .map_err(|e| format!("staged diff: {}", e))?
2210 };
2211
2212 if diff.stats().map(|s| s.files_changed()).unwrap_or(0) == 0
2213 && staged_diff.stats().map(|s| s.files_changed()).unwrap_or(0) == 0
2214 {
2215 return Ok(false);
2216 }
2217
2218 let tree_oid = index
2220 .write_tree()
2221 .map_err(|e| format!("write tree: {}", e))?;
2222 let tree = repo
2223 .find_tree(tree_oid)
2224 .map_err(|e| format!("find tree: {}", e))?;
2225
2226 let signature = repo
2227 .signature()
2228 .map_err(|e| format!("get signature: {}", e))?;
2229
2230 let parent = repo.head().ok().and_then(|h| h.peel_to_commit().ok());
2231
2232 let parents: Vec<&git2::Commit> = parent.as_ref().map(|p| vec![p]).unwrap_or_default();
2233
2234 repo.commit(
2235 Some("HEAD"),
2236 &signature,
2237 &signature,
2238 "cfgd: auto-commit configuration changes",
2239 &tree,
2240 &parents,
2241 )
2242 .map_err(|e| format!("commit: {}", e))?;
2243
2244 let head = repo.head().map_err(|e| format!("get HEAD: {}", e))?;
2246 let branch_name = head
2247 .shorthand()
2248 .ok_or_else(|| "cannot determine branch name".to_string())?;
2249
2250 let remote_url = repo
2251 .find_remote("origin")
2252 .ok()
2253 .and_then(|r| r.url().map(String::from));
2254
2255 let repo_dir = &repo_path.display().to_string();
2256 let cli_ok = crate::try_git_cmd(
2257 remote_url.as_deref(),
2258 &["-C", repo_dir, "push", "origin", branch_name],
2259 "push",
2260 None,
2261 );
2262
2263 if !cli_ok {
2264 let mut remote = repo
2266 .find_remote("origin")
2267 .map_err(|e| format!("find remote: {}", e))?;
2268
2269 let mut push_opts = git2::PushOptions::new();
2270 let mut callbacks = git2::RemoteCallbacks::new();
2271 callbacks.credentials(crate::git_ssh_credentials);
2272 push_opts.remote_callbacks(callbacks);
2273
2274 let refspec = format!("refs/heads/{}:refs/heads/{}", branch_name, branch_name);
2275 remote
2276 .push(&[&refspec], Some(&mut push_opts))
2277 .map_err(|e| format!("push: {}", e))?;
2278 }
2279
2280 Ok(true)
2281}
2282
2283#[cfg(unix)]
2286async fn run_health_server(ipc_path: &str, state: Arc<Mutex<DaemonState>>) -> Result<()> {
2287 let listener = UnixListener::bind(ipc_path).map_err(|e| DaemonError::HealthSocketError {
2288 message: format!("bind {}: {}", ipc_path, e),
2289 })?;
2290
2291 loop {
2292 let (stream, _) = listener
2293 .accept()
2294 .await
2295 .map_err(|e| DaemonError::HealthSocketError {
2296 message: format!("accept: {}", e),
2297 })?;
2298
2299 let state = Arc::clone(&state);
2300 tokio::spawn(async move {
2301 if let Err(e) = handle_health_connection(stream, state).await {
2302 tracing::debug!(error = %e, "health connection error");
2303 }
2304 });
2305 }
2306}
2307
2308#[cfg(windows)]
2309async fn run_health_server(ipc_path: &str, state: Arc<Mutex<DaemonState>>) -> Result<()> {
2310 use tokio::net::windows::named_pipe::ServerOptions;
2311
2312 let mut server = ServerOptions::new()
2313 .first_pipe_instance(true)
2314 .create(ipc_path)
2315 .map_err(|e| DaemonError::HealthSocketError {
2316 message: format!("create pipe {}: {}", ipc_path, e),
2317 })?;
2318
2319 loop {
2320 server
2321 .connect()
2322 .await
2323 .map_err(|e| DaemonError::HealthSocketError {
2324 message: format!("accept pipe: {}", e),
2325 })?;
2326
2327 let connected = server;
2328 server = ServerOptions::new()
2329 .first_pipe_instance(false)
2330 .create(ipc_path)
2331 .map_err(|e| DaemonError::HealthSocketError {
2332 message: format!("create pipe {}: {}", ipc_path, e),
2333 })?;
2334
2335 let state = Arc::clone(&state);
2336 tokio::spawn(async move {
2337 if let Err(e) = handle_health_connection(connected, state).await {
2338 tracing::debug!(error = %e, "health connection error");
2339 }
2340 });
2341 }
2342}
2343
2344async fn handle_health_connection<S>(
2345 stream: S,
2346 state: Arc<Mutex<DaemonState>>,
2347) -> std::result::Result<(), Box<dyn std::error::Error + Send + Sync>>
2348where
2349 S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin,
2350{
2351 let (reader, mut writer) = tokio::io::split(stream);
2352 let mut buf_reader = tokio::io::BufReader::new(reader);
2353
2354 let mut request_line = String::new();
2356 buf_reader.read_line(&mut request_line).await?;
2357
2358 let path = request_line.split_whitespace().nth(1).unwrap_or("/health");
2360
2361 loop {
2363 let mut line = String::new();
2364 buf_reader.read_line(&mut line).await?;
2365 if line.trim().is_empty() {
2366 break;
2367 }
2368 }
2369
2370 let st = state.lock().await;
2371
2372 let (status_code, body) = match path {
2373 "/health" => {
2374 let health = serde_json::json!({
2375 "status": "ok",
2376 "pid": std::process::id(),
2377 "uptime_secs": st.started_at.elapsed().as_secs(),
2378 });
2379 ("200 OK", serde_json::to_string_pretty(&health)?)
2380 }
2381 "/status" => {
2382 let response = st.to_response();
2383 ("200 OK", serde_json::to_string_pretty(&response)?)
2384 }
2385 "/drift" => {
2386 let store = StateStore::open_default();
2387 let drift_events = store.and_then(|s| s.unresolved_drift()).unwrap_or_default();
2388
2389 let drift: Vec<serde_json::Value> = drift_events
2390 .iter()
2391 .map(|d| {
2392 serde_json::json!({
2393 "resource_type": d.resource_type,
2394 "resource_id": d.resource_id,
2395 "expected": d.expected,
2396 "actual": d.actual,
2397 "timestamp": d.timestamp,
2398 })
2399 })
2400 .collect();
2401
2402 (
2403 "200 OK",
2404 serde_json::to_string_pretty(&serde_json::json!({
2405 "drift_count": drift.len(),
2406 "events": drift,
2407 }))?,
2408 )
2409 }
2410 _ => (
2411 "404 Not Found",
2412 serde_json::json!({"error": "not found"}).to_string(),
2413 ),
2414 };
2415
2416 let response = format!(
2417 "HTTP/1.1 {}\r\nContent-Type: application/json\r\nContent-Length: {}\r\nConnection: close\r\n\r\n{}",
2418 status_code,
2419 body.len(),
2420 body
2421 );
2422
2423 writer.write_all(response.as_bytes()).await?;
2424 writer.flush().await?;
2425
2426 Ok(())
2427}
2428
2429fn record_file_drift_to(store: &StateStore, path: &Path) -> bool {
2432 match store.record_drift(
2433 "file",
2434 &path.display().to_string(),
2435 None,
2436 Some("modified"),
2437 "local",
2438 ) {
2439 Ok(_) => true,
2440 Err(e) => {
2441 tracing::warn!(error = %e, "failed to record drift");
2442 false
2443 }
2444 }
2445}
2446
2447fn record_file_drift(path: &Path) -> bool {
2448 let store = match StateStore::open_default() {
2449 Ok(s) => s,
2450 Err(e) => {
2451 tracing::warn!(error = %e, "cannot open state store for drift recording");
2452 return false;
2453 }
2454 };
2455 record_file_drift_to(&store, path)
2456}
2457
2458pub fn install_service(config_path: &Path, profile: Option<&str>) -> Result<()> {
2462 let cfgd_binary = std::env::current_exe().map_err(|e| DaemonError::ServiceInstallFailed {
2463 message: format!("cannot determine binary path: {}", e),
2464 })?;
2465 #[cfg(windows)]
2466 {
2467 install_windows_service(&cfgd_binary, config_path, profile)
2468 }
2469 #[cfg(unix)]
2470 {
2471 if cfg!(target_os = "macos") {
2472 install_launchd_service(&cfgd_binary, config_path, profile)
2473 } else {
2474 install_systemd_service(&cfgd_binary, config_path, profile)
2475 }
2476 }
2477}
2478
2479pub fn uninstall_service() -> Result<()> {
2480 #[cfg(windows)]
2481 {
2482 uninstall_windows_service()
2483 }
2484 #[cfg(unix)]
2485 {
2486 if cfg!(target_os = "macos") {
2487 uninstall_launchd_service()
2488 } else {
2489 uninstall_systemd_service()
2490 }
2491 }
2492}
2493
2494#[cfg(windows)]
2496fn install_windows_service(binary: &Path, config_path: &Path, profile: Option<&str>) -> Result<()> {
2497 let config_abs =
2498 std::fs::canonicalize(config_path).unwrap_or_else(|_| config_path.to_path_buf());
2499 let config_str = config_abs.display().to_string();
2500 let config_str = config_str.strip_prefix(r"\\?\").unwrap_or(&config_str);
2501
2502 let binary_str = binary.display().to_string();
2503 let binary_str = binary_str.strip_prefix(r"\\?\").unwrap_or(&binary_str);
2504
2505 let mut bin_args = format!(
2506 "\"{}\" daemon service --config \"{}\"",
2507 binary_str, config_str,
2508 );
2509 if let Some(p) = profile {
2510 bin_args.push_str(&format!(" --profile \"{}\"", p));
2511 }
2512
2513 let output = std::process::Command::new("sc.exe")
2515 .args([
2516 "create",
2517 "cfgd",
2518 "binPath=",
2519 &bin_args,
2520 "start=",
2521 "auto",
2522 "DisplayName=",
2523 "cfgd Configuration Manager",
2524 ])
2525 .output()
2526 .map_err(|e| DaemonError::ServiceInstallFailed {
2527 message: format!("sc.exe create failed: {}", e),
2528 })?;
2529
2530 if !output.status.success() {
2531 return Err(DaemonError::ServiceInstallFailed {
2532 message: format!(
2533 "sc.exe create failed: {}",
2534 crate::stdout_lossy_trimmed(&output)
2535 ),
2536 }
2537 .into());
2538 }
2539
2540 if let Err(e) = std::process::Command::new("sc.exe")
2542 .args([
2543 "description",
2544 "cfgd",
2545 "Declarative machine configuration management daemon",
2546 ])
2547 .output()
2548 {
2549 tracing::warn!(error = %e, "failed to set Windows Service description");
2550 }
2551
2552 if let Err(e) = std::process::Command::new("sc.exe")
2554 .args(["start", "cfgd"])
2555 .output()
2556 {
2557 tracing::warn!(error = %e, "failed to start Windows Service");
2558 }
2559
2560 tracing::info!("installed Windows Service: cfgd");
2561 Ok(())
2562}
2563
2564#[cfg(windows)]
2566fn uninstall_windows_service() -> Result<()> {
2567 if let Err(e) = std::process::Command::new("sc.exe")
2569 .args(["stop", "cfgd"])
2570 .output()
2571 {
2572 tracing::debug!(error = %e, "sc.exe stop (pre-uninstall)");
2573 }
2574
2575 let output = std::process::Command::new("sc.exe")
2576 .args(["delete", "cfgd"])
2577 .output()
2578 .map_err(|e| DaemonError::ServiceInstallFailed {
2579 message: format!("sc.exe delete failed: {}", e),
2580 })?;
2581
2582 if !output.status.success() {
2583 let stdout = crate::stdout_lossy_trimmed(&output);
2584 if stdout.contains("1060") || stdout.contains("does not exist") {
2587 tracing::debug!("cfgd Windows Service not found; nothing to remove");
2588 return Ok(());
2589 }
2590 return Err(DaemonError::ServiceInstallFailed {
2591 message: format!("sc.exe delete failed: {}", stdout),
2592 }
2593 .into());
2594 }
2595
2596 tracing::info!("removed Windows Service: cfgd");
2597 Ok(())
2598}
2599
2600#[cfg(windows)]
2602static SERVICE_HOOKS: std::sync::OnceLock<Arc<dyn DaemonHooks>> = std::sync::OnceLock::new();
2603
2604#[cfg(windows)]
2607pub fn run_as_windows_service(hooks: Arc<dyn DaemonHooks>) -> Result<()> {
2608 use windows_service::service_dispatcher;
2609 let _ = SERVICE_HOOKS.set(hooks);
2611 service_dispatcher::start("cfgd", ffi_service_main).map_err(|e| DaemonError::ServiceError {
2612 message: format!("failed to start service dispatcher: {}", e),
2613 })?;
2614 Ok(())
2615}
2616
2617#[cfg(not(windows))]
2619pub fn run_as_windows_service(_hooks: Arc<dyn DaemonHooks>) -> Result<()> {
2620 Err(DaemonError::ServiceError {
2621 message: "Windows Service mode is only available on Windows".to_string(),
2622 }
2623 .into())
2624}
2625
2626#[cfg(windows)]
2627extern "system" fn ffi_service_main(_argc: u32, _argv: *mut *mut u16) {
2628 if let Err(e) = windows_service_main() {
2629 tracing::error!(error = %e, "windows service main failed");
2630 }
2631}
2632
2633#[cfg(windows)]
2634fn init_windows_logging() {
2635 let log_dir = std::env::var("LOCALAPPDATA")
2636 .map(|d| PathBuf::from(d).join("cfgd"))
2637 .unwrap_or_else(|_| crate::default_config_dir());
2638
2639 let _ = std::fs::create_dir_all(&log_dir);
2640 let log_path = log_dir.join("daemon.log");
2641
2642 if let Ok(file) = std::fs::OpenOptions::new()
2643 .create(true)
2644 .append(true)
2645 .open(&log_path)
2646 {
2647 let subscriber = tracing_subscriber::fmt()
2648 .with_writer(std::sync::Mutex::new(file))
2649 .with_ansi(false)
2650 .with_target(false)
2651 .finish();
2652 let _ = tracing::subscriber::set_global_default(subscriber);
2653 }
2654}
2655
2656#[cfg(windows)]
2657fn windows_service_main() -> std::result::Result<(), Box<dyn std::error::Error>> {
2658 use windows_service::service::*;
2659 use windows_service::service_control_handler::{self, ServiceControlHandlerResult};
2660
2661 init_windows_logging();
2662
2663 let (shutdown_tx, shutdown_rx) = std::sync::mpsc::channel();
2664
2665 let event_handler = move |control_event| -> ServiceControlHandlerResult {
2666 match control_event {
2667 ServiceControl::Stop | ServiceControl::Shutdown => {
2668 let _ = shutdown_tx.send(());
2669 ServiceControlHandlerResult::NoError
2670 }
2671 ServiceControl::Interrogate => ServiceControlHandlerResult::NoError,
2672 _ => ServiceControlHandlerResult::NotImplemented,
2673 }
2674 };
2675
2676 let status_handle = service_control_handler::register("cfgd", event_handler)?;
2677
2678 status_handle.set_service_status(ServiceStatus {
2680 service_type: ServiceType::OWN_PROCESS,
2681 current_state: ServiceState::StartPending,
2682 controls_accepted: ServiceControlAccept::empty(),
2683 exit_code: ServiceExitCode::Win32(0),
2684 checkpoint: 1,
2685 wait_hint: std::time::Duration::from_secs(10),
2686 process_id: None,
2687 })?;
2688
2689 let args: Vec<String> = std::env::args().collect();
2692 let mut config_path = crate::default_config_dir().join("config.yaml");
2693 let mut profile_override: Option<String> = None;
2694 let mut i = 0;
2695 while i < args.len() {
2696 match args[i].as_str() {
2697 "--config" if i + 1 < args.len() => {
2698 config_path = PathBuf::from(&args[i + 1]);
2699 i += 2;
2700 }
2701 "--profile" if i + 1 < args.len() => {
2702 profile_override = Some(args[i + 1].clone());
2703 i += 2;
2704 }
2705 _ => {
2706 i += 1;
2707 }
2708 }
2709 }
2710
2711 let hooks = SERVICE_HOOKS
2713 .get()
2714 .ok_or("SERVICE_HOOKS not initialized — run_as_windows_service must be called first")?
2715 .clone();
2716
2717 let rt = tokio::runtime::Runtime::new()?;
2719 let printer = Arc::new(crate::output::Printer::new(crate::output::Verbosity::Quiet));
2720
2721 rt.spawn(async move {
2723 if let Err(e) = run_daemon(config_path, profile_override, printer, hooks).await {
2724 tracing::error!(error = %e, "daemon error");
2725 }
2726 });
2727
2728 status_handle.set_service_status(ServiceStatus {
2730 service_type: ServiceType::OWN_PROCESS,
2731 current_state: ServiceState::Running,
2732 controls_accepted: ServiceControlAccept::STOP | ServiceControlAccept::SHUTDOWN,
2733 exit_code: ServiceExitCode::Win32(0),
2734 checkpoint: 0,
2735 wait_hint: std::time::Duration::default(),
2736 process_id: None,
2737 })?;
2738
2739 let _ = shutdown_rx.recv();
2741
2742 status_handle.set_service_status(ServiceStatus {
2744 service_type: ServiceType::OWN_PROCESS,
2745 current_state: ServiceState::StopPending,
2746 controls_accepted: ServiceControlAccept::empty(),
2747 exit_code: ServiceExitCode::Win32(0),
2748 checkpoint: 1,
2749 wait_hint: std::time::Duration::from_secs(5),
2750 process_id: None,
2751 })?;
2752
2753 rt.shutdown_timeout(std::time::Duration::from_secs(5));
2755
2756 status_handle.set_service_status(ServiceStatus {
2757 service_type: ServiceType::OWN_PROCESS,
2758 current_state: ServiceState::Stopped,
2759 controls_accepted: ServiceControlAccept::empty(),
2760 exit_code: ServiceExitCode::Win32(0),
2761 checkpoint: 0,
2762 wait_hint: std::time::Duration::default(),
2763 process_id: None,
2764 })?;
2765
2766 Ok(())
2767}
2768
2769#[cfg(unix)]
2771fn generate_launchd_plist(
2772 binary: &Path,
2773 config_path: &Path,
2774 profile: Option<&str>,
2775 home: &Path,
2776) -> String {
2777 let mut args = vec![
2778 format!("<string>{}</string>", binary.display()),
2779 "<string>--config</string>".to_string(),
2780 format!("<string>{}</string>", config_path.display()),
2781 "<string>daemon</string>".to_string(),
2782 ];
2783
2784 if let Some(p) = profile {
2785 args.push("<string>--profile</string>".to_string());
2786 args.push(format!("<string>{}</string>", p));
2787 }
2788
2789 let args_xml = args.join("\n ");
2790 let label = LAUNCHD_LABEL;
2791 let home_display = home.display();
2792
2793 format!(
2794 r#"<?xml version="1.0" encoding="UTF-8"?>
2795<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
2796<plist version="1.0">
2797<dict>
2798 <key>Label</key>
2799 <string>{label}</string>
2800 <key>ProgramArguments</key>
2801 <array>
2802 {args_xml}
2803 </array>
2804 <key>RunAtLoad</key>
2805 <true/>
2806 <key>KeepAlive</key>
2807 <true/>
2808 <key>StandardOutPath</key>
2809 <string>{home_display}/Library/Logs/cfgd.log</string>
2810 <key>StandardErrorPath</key>
2811 <string>{home_display}/Library/Logs/cfgd.err</string>
2812</dict>
2813</plist>"#
2814 )
2815}
2816
2817#[cfg(unix)]
2818fn install_launchd_service(binary: &Path, config_path: &Path, profile: Option<&str>) -> Result<()> {
2819 let home = crate::expand_tilde(Path::new("~"));
2820 let plist_dir = home.join(LAUNCHD_AGENTS_DIR);
2821 std::fs::create_dir_all(&plist_dir).map_err(|e| DaemonError::ServiceInstallFailed {
2822 message: format!("create LaunchAgents dir: {}", e),
2823 })?;
2824
2825 let plist_path = plist_dir.join(format!("{}.plist", LAUNCHD_LABEL));
2826 let config_abs =
2827 std::fs::canonicalize(config_path).unwrap_or_else(|_| config_path.to_path_buf());
2828
2829 let plist = generate_launchd_plist(binary, &config_abs, profile, &home);
2830
2831 crate::atomic_write_str(&plist_path, &plist).map_err(|e| {
2832 DaemonError::ServiceInstallFailed {
2833 message: format!("write plist: {}", e),
2834 }
2835 })?;
2836
2837 tracing::info!(path = %plist_path.display(), "installed launchd service");
2838 Ok(())
2839}
2840
2841#[cfg(unix)]
2842fn uninstall_launchd_service() -> Result<()> {
2843 let home = crate::expand_tilde(Path::new("~"));
2844 let plist_path = home
2845 .join(LAUNCHD_AGENTS_DIR)
2846 .join(format!("{}.plist", LAUNCHD_LABEL));
2847
2848 if plist_path.exists() {
2849 std::fs::remove_file(&plist_path).map_err(|e| DaemonError::ServiceInstallFailed {
2850 message: format!("remove plist: {}", e),
2851 })?;
2852 tracing::info!(path = %plist_path.display(), "removed launchd service");
2853 }
2854
2855 Ok(())
2856}
2857
2858#[cfg(unix)]
2860fn generate_systemd_unit(binary: &Path, config_path: &Path, profile: Option<&str>) -> String {
2861 let mut exec_start = format!(
2862 "{} --config {} daemon",
2863 binary.display(),
2864 config_path.display()
2865 );
2866 if let Some(p) = profile {
2867 exec_start = format!(
2868 "{} --config {} --profile {} daemon",
2869 binary.display(),
2870 config_path.display(),
2871 p
2872 );
2873 }
2874
2875 format!(
2876 r#"[Unit]
2877Description=cfgd configuration daemon
2878After=network.target
2879
2880[Service]
2881Type=simple
2882ExecStart={exec_start}
2883Restart=on-failure
2884RestartSec=10
2885
2886[Install]
2887WantedBy=default.target"#
2888 )
2889}
2890
2891#[cfg(unix)]
2892fn install_systemd_service(binary: &Path, config_path: &Path, profile: Option<&str>) -> Result<()> {
2893 let home = crate::expand_tilde(Path::new("~"));
2894 let unit_dir = home.join(SYSTEMD_USER_DIR);
2895 std::fs::create_dir_all(&unit_dir).map_err(|e| DaemonError::ServiceInstallFailed {
2896 message: format!("create systemd user dir: {}", e),
2897 })?;
2898
2899 let unit_path = unit_dir.join("cfgd.service");
2900 let config_abs =
2901 std::fs::canonicalize(config_path).unwrap_or_else(|_| config_path.to_path_buf());
2902
2903 let unit = generate_systemd_unit(binary, &config_abs, profile);
2904
2905 crate::atomic_write_str(&unit_path, &unit).map_err(|e| DaemonError::ServiceInstallFailed {
2906 message: format!("write unit file: {}", e),
2907 })?;
2908
2909 tracing::info!(path = %unit_path.display(), "installed systemd user service");
2910 Ok(())
2911}
2912
2913#[cfg(unix)]
2914fn uninstall_systemd_service() -> Result<()> {
2915 let home = crate::expand_tilde(Path::new("~"));
2916 let unit_path = home.join(SYSTEMD_USER_DIR).join("cfgd.service");
2917
2918 if unit_path.exists() {
2919 std::fs::remove_file(&unit_path).map_err(|e| DaemonError::ServiceInstallFailed {
2920 message: format!("remove unit file: {}", e),
2921 })?;
2922 tracing::info!(path = %unit_path.display(), "removed systemd user service");
2923 }
2924
2925 Ok(())
2926}
2927
2928fn connect_daemon_ipc() -> Option<IpcStream> {
2932 #[cfg(unix)]
2933 {
2934 let path = PathBuf::from(DEFAULT_IPC_PATH);
2935 if !path.exists() {
2936 return None;
2937 }
2938 let stream = StdUnixStream::connect(&path).ok()?;
2939 stream.set_read_timeout(Some(Duration::from_secs(5))).ok()?;
2940 Some(IpcStream::Unix(stream))
2941 }
2942 #[cfg(windows)]
2943 {
2944 let file = std::fs::OpenOptions::new()
2945 .read(true)
2946 .write(true)
2947 .open(DEFAULT_IPC_PATH)
2948 .ok()?;
2949 Some(IpcStream::Pipe(file))
2950 }
2951}
2952
2953enum IpcStream {
2955 #[cfg(unix)]
2956 Unix(StdUnixStream),
2957 #[cfg(windows)]
2958 Pipe(std::fs::File),
2959}
2960
2961impl std::io::Read for IpcStream {
2962 fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
2963 match self {
2964 #[cfg(unix)]
2965 IpcStream::Unix(s) => s.read(buf),
2966 #[cfg(windows)]
2967 IpcStream::Pipe(f) => f.read(buf),
2968 }
2969 }
2970}
2971
2972impl std::io::Write for IpcStream {
2973 fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
2974 match self {
2975 #[cfg(unix)]
2976 IpcStream::Unix(s) => s.write(buf),
2977 #[cfg(windows)]
2978 IpcStream::Pipe(f) => f.write(buf),
2979 }
2980 }
2981
2982 fn flush(&mut self) -> std::io::Result<()> {
2983 match self {
2984 #[cfg(unix)]
2985 IpcStream::Unix(s) => s.flush(),
2986 #[cfg(windows)]
2987 IpcStream::Pipe(f) => f.flush(),
2988 }
2989 }
2990}
2991
2992pub fn query_daemon_status() -> Result<Option<DaemonStatusResponse>> {
2993 let mut stream = match connect_daemon_ipc() {
2994 Some(s) => s,
2995 None => return Ok(None),
2996 };
2997
2998 write!(
2999 stream,
3000 "GET /status HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n"
3001 )
3002 .map_err(|e| DaemonError::HealthSocketError {
3003 message: format!("write request: {}", e),
3004 })?;
3005
3006 let reader = BufReader::new(&mut stream);
3007 let mut lines: Vec<String> = Vec::new();
3008 let mut in_body = false;
3009
3010 for line_result in reader.lines() {
3011 let line = line_result.map_err(|e| DaemonError::HealthSocketError {
3012 message: format!("read response: {}", e),
3013 })?;
3014
3015 if in_body {
3016 lines.push(line);
3017 } else if line.trim().is_empty() {
3018 in_body = true;
3019 }
3020 }
3021
3022 let body = lines.join("\n");
3023 if body.is_empty() {
3024 return Ok(None);
3025 }
3026
3027 let status: DaemonStatusResponse =
3028 serde_json::from_str(&body).map_err(|e| DaemonError::HealthSocketError {
3029 message: format!("parse response: {}", e),
3030 })?;
3031
3032 Ok(Some(status))
3033}
3034
3035pub fn git_pull_sync(repo_path: &Path) -> std::result::Result<bool, String> {
3038 git_pull(repo_path)
3039}
3040
3041pub(crate) fn parse_duration_or_default(s: &str) -> Duration {
3044 crate::parse_duration_str(s).unwrap_or(Duration::from_secs(DEFAULT_RECONCILE_SECS))
3045}
3046
3047#[cfg(unix)]
3049async fn recv_sighup(signal: &mut tokio::signal::unix::Signal) {
3050 signal.recv().await;
3051}
3052
3053#[cfg(not(unix))]
3055async fn recv_sighup(_signal: &mut ()) {
3056 std::future::pending::<()>().await;
3057}
3058
3059#[cfg(unix)]
3061async fn recv_sigterm(signal: &mut tokio::signal::unix::Signal) {
3062 signal.recv().await;
3063}
3064
3065#[cfg(not(unix))]
3067async fn recv_sigterm(_signal: &mut ()) {
3068 std::future::pending::<()>().await;
3069}
3070
3071#[cfg(test)]
3072mod tests {
3073 use super::*;
3074 use crate::test_helpers::test_state;
3075
3076 #[test]
3077 fn parse_duration_seconds() {
3078 assert_eq!(parse_duration_or_default("30s"), Duration::from_secs(30));
3079 }
3080
3081 #[test]
3082 fn parse_duration_minutes() {
3083 assert_eq!(parse_duration_or_default("5m"), Duration::from_secs(300));
3084 }
3085
3086 #[test]
3087 fn parse_duration_hours() {
3088 assert_eq!(parse_duration_or_default("1h"), Duration::from_secs(3600));
3089 }
3090
3091 #[test]
3092 fn parse_duration_plain_number() {
3093 assert_eq!(parse_duration_or_default("120"), Duration::from_secs(120));
3094 }
3095
3096 #[test]
3097 fn parse_duration_invalid_falls_back() {
3098 assert_eq!(
3099 parse_duration_or_default("invalid"),
3100 Duration::from_secs(DEFAULT_RECONCILE_SECS)
3101 );
3102 }
3103
3104 #[test]
3105 fn parse_duration_with_whitespace() {
3106 assert_eq!(parse_duration_or_default(" 10m "), Duration::from_secs(600));
3107 }
3108
3109 #[test]
3110 fn daemon_state_initial() {
3111 let state = DaemonState::new();
3112 assert!(state.last_reconcile.is_none());
3113 assert!(state.last_sync.is_none());
3114 assert_eq!(state.drift_count, 0);
3115 assert_eq!(state.sources.len(), 1);
3116 assert_eq!(state.sources[0].name, "local");
3117 }
3118
3119 #[test]
3120 fn daemon_state_response() {
3121 let state = DaemonState::new();
3122 let response = state.to_response();
3123 assert!(response.running);
3124 assert!(response.pid > 0);
3125 assert_eq!(response.sources.len(), 1);
3126 }
3127
3128 #[test]
3129 fn notifier_stdout_does_not_panic() {
3130 let notifier = Notifier::new(NotifyMethod::Stdout, None);
3131 assert!(matches!(notifier.method, NotifyMethod::Stdout));
3132 assert!(notifier.webhook_url.is_none());
3133 notifier.notify("test", "message");
3135 }
3136
3137 #[test]
3138 fn source_status_round_trips() {
3139 let status = SourceStatus {
3140 name: "local".to_string(),
3141 last_sync: Some("2026-01-01T00:00:00Z".to_string()),
3142 last_reconcile: None,
3143 drift_count: 3,
3144 status: "active".to_string(),
3145 };
3146 let json = serde_json::to_string(&status).unwrap();
3147 let parsed: SourceStatus = serde_json::from_str(&json).unwrap();
3148 assert_eq!(parsed.name, "local");
3149 assert_eq!(parsed.last_sync.as_deref(), Some("2026-01-01T00:00:00Z"));
3150 assert!(parsed.last_reconcile.is_none());
3151 assert_eq!(parsed.drift_count, 3);
3152 assert_eq!(parsed.status, "active");
3153 assert!(json.contains("\"driftCount\":3"));
3155 assert!(json.contains("\"lastSync\":"));
3156 }
3157
3158 #[test]
3159 #[cfg(unix)]
3160 fn systemd_unit_path() {
3161 let home = "/home/testuser";
3162 let unit_dir = PathBuf::from(home).join(SYSTEMD_USER_DIR);
3163 let unit_path = unit_dir.join("cfgd.service");
3164 assert_eq!(
3165 unit_path.to_str().unwrap(),
3166 "/home/testuser/.config/systemd/user/cfgd.service"
3167 );
3168 }
3169
3170 #[test]
3171 fn generate_device_id_is_stable() {
3172 let id1 = generate_device_id().unwrap();
3173 let id2 = generate_device_id().unwrap();
3174 assert_eq!(id1, id2);
3175 assert_eq!(id1.len(), 64);
3177 }
3178
3179 #[test]
3180 fn compute_config_hash_is_deterministic() {
3181 use crate::config::{
3182 CargoSpec, LayerPolicy, MergedProfile, PackagesSpec, ProfileLayer, ProfileSpec,
3183 ResolvedProfile,
3184 };
3185 let resolved = ResolvedProfile {
3186 layers: vec![ProfileLayer {
3187 source: "local".into(),
3188 profile_name: "test".into(),
3189 priority: 1000,
3190 policy: LayerPolicy::Local,
3191 spec: ProfileSpec::default(),
3192 }],
3193 merged: MergedProfile {
3194 packages: PackagesSpec {
3195 cargo: Some(CargoSpec {
3196 file: None,
3197 packages: vec!["bat".into()],
3198 }),
3199 ..Default::default()
3200 },
3201 ..Default::default()
3202 },
3203 };
3204 let hash1 = compute_config_hash(&resolved).unwrap();
3205 let hash2 = compute_config_hash(&resolved).unwrap();
3206 assert_eq!(hash1, hash2);
3207 assert_eq!(hash1.len(), 64);
3208 }
3209
3210 #[test]
3211 fn find_server_url_returns_none_for_git_origin() {
3212 use crate::config::*;
3213 let config = CfgdConfig {
3214 api_version: crate::API_VERSION.into(),
3215 kind: "Config".into(),
3216 metadata: ConfigMetadata {
3217 name: "test".into(),
3218 },
3219 spec: ConfigSpec {
3220 profile: Some("default".into()),
3221 origin: vec![OriginSpec {
3222 origin_type: OriginType::Git,
3223 url: "https://github.com/test/repo.git".into(),
3224 branch: "master".into(),
3225 auth: None,
3226 ssh_strict_host_key_checking: Default::default(),
3227 }],
3228 daemon: None,
3229 secrets: None,
3230 sources: vec![],
3231 theme: None,
3232 modules: None,
3233 security: None,
3234 aliases: std::collections::HashMap::new(),
3235 file_strategy: crate::config::FileStrategy::default(),
3236 ai: None,
3237 compliance: None,
3238 },
3239 };
3240 assert!(find_server_url(&config).is_none());
3241 }
3242
3243 #[test]
3244 fn find_server_url_returns_url_for_server_origin() {
3245 use crate::config::*;
3246 let config = CfgdConfig {
3247 api_version: crate::API_VERSION.into(),
3248 kind: "Config".into(),
3249 metadata: ConfigMetadata {
3250 name: "test".into(),
3251 },
3252 spec: ConfigSpec {
3253 profile: Some("default".into()),
3254 origin: vec![OriginSpec {
3255 origin_type: OriginType::Server,
3256 url: "https://cfgd.example.com".into(),
3257 branch: "master".into(),
3258 auth: None,
3259 ssh_strict_host_key_checking: Default::default(),
3260 }],
3261 daemon: None,
3262 secrets: None,
3263 sources: vec![],
3264 theme: None,
3265 modules: None,
3266 security: None,
3267 aliases: std::collections::HashMap::new(),
3268 file_strategy: crate::config::FileStrategy::default(),
3269 ai: None,
3270 compliance: None,
3271 },
3272 };
3273 assert_eq!(
3274 find_server_url(&config),
3275 Some("https://cfgd.example.com".to_string())
3276 );
3277 }
3278
3279 #[test]
3280 fn checkin_payload_round_trips() {
3281 let payload = CheckinPayload {
3282 device_id: "abc123".into(),
3283 hostname: "test-host".into(),
3284 os: "linux".into(),
3285 arch: "x86_64".into(),
3286 config_hash: "deadbeef".into(),
3287 };
3288 let json = serde_json::to_string(&payload).unwrap();
3289 let parsed: serde_json::Value = serde_json::from_str(&json).unwrap();
3290 assert_eq!(parsed["device_id"], "abc123");
3291 assert_eq!(parsed["hostname"], "test-host");
3292 assert_eq!(parsed["os"], "linux");
3293 assert_eq!(parsed["arch"], "x86_64");
3294 assert_eq!(parsed["config_hash"], "deadbeef");
3295 assert_eq!(parsed.as_object().unwrap().len(), 5);
3297 }
3298
3299 #[test]
3300 fn checkin_response_deserializes() {
3301 let json = r#"{"status":"ok","config_changed":true,"config":null}"#;
3302 let resp: CheckinServerResponse = serde_json::from_str(json).unwrap();
3303 assert!(resp.config_changed);
3304 assert_eq!(resp._status, "ok");
3305 }
3306
3307 #[test]
3308 #[cfg(unix)]
3309 fn launchd_plist_path() {
3310 let home = "/Users/testuser";
3311 let plist_dir = PathBuf::from(home).join(LAUNCHD_AGENTS_DIR);
3312 let plist_path = plist_dir.join(format!("{}.plist", LAUNCHD_LABEL));
3313 assert_eq!(
3314 plist_path.to_str().unwrap(),
3315 "/Users/testuser/Library/LaunchAgents/com.cfgd.daemon.plist"
3316 );
3317 }
3318
3319 #[test]
3320 fn extract_source_resources_from_merged_profile() {
3321 use crate::config::{
3322 BrewSpec, CargoSpec, FilesSpec, ManagedFileSpec, MergedProfile, PackagesSpec,
3323 };
3324
3325 let merged = MergedProfile {
3326 packages: PackagesSpec {
3327 brew: Some(BrewSpec {
3328 formulae: vec!["ripgrep".into(), "fd".into()],
3329 casks: vec!["firefox".into()],
3330 ..Default::default()
3331 }),
3332 cargo: Some(CargoSpec {
3333 file: None,
3334 packages: vec!["bat".into()],
3335 }),
3336 ..Default::default()
3337 },
3338 files: FilesSpec {
3339 managed: vec![ManagedFileSpec {
3340 source: "dotfiles/.zshrc".into(),
3341 target: PathBuf::from("/home/user/.zshrc"),
3342 strategy: None,
3343 private: false,
3344 origin: None,
3345 encryption: None,
3346 permissions: None,
3347 }],
3348 ..Default::default()
3349 },
3350 env: vec![crate::config::EnvVar {
3351 name: "EDITOR".into(),
3352 value: "vim".into(),
3353 }],
3354 ..Default::default()
3355 };
3356
3357 let resources = extract_source_resources(&merged);
3358 assert!(resources.contains("packages.brew.ripgrep"));
3359 assert!(resources.contains("packages.brew.fd"));
3360 assert!(resources.contains("packages.brew.firefox"));
3361 assert!(resources.contains("packages.cargo.bat"));
3362 assert!(resources.contains("files./home/user/.zshrc"));
3363 assert!(resources.contains("env.EDITOR"));
3364 assert_eq!(resources.len(), 6);
3365 }
3366
3367 #[test]
3368 fn hash_resources_is_deterministic() {
3369 let r1: HashSet<String> =
3370 HashSet::from_iter(["a".to_string(), "b".to_string(), "c".to_string()]);
3371 let r2: HashSet<String> =
3372 HashSet::from_iter(["c".to_string(), "a".to_string(), "b".to_string()]);
3373
3374 assert_eq!(hash_resources(&r1), hash_resources(&r2));
3375 }
3376
3377 #[test]
3378 fn hash_resources_differs_for_different_sets() {
3379 let r1: HashSet<String> = HashSet::from_iter(["a".to_string()]);
3380 let r2: HashSet<String> = HashSet::from_iter(["b".to_string()]);
3381
3382 assert_ne!(hash_resources(&r1), hash_resources(&r2));
3383 }
3384
3385 #[test]
3386 fn infer_item_tier_defaults_to_recommended() {
3387 assert_eq!(infer_item_tier("packages.brew.ripgrep"), "recommended");
3388 assert_eq!(infer_item_tier("env.EDITOR"), "recommended");
3389 }
3390
3391 #[test]
3392 fn infer_item_tier_detects_locked() {
3393 assert_eq!(infer_item_tier("files.security-policy.yaml"), "locked");
3394 assert_eq!(
3395 infer_item_tier("files./home/user/.config/company/security.yaml"),
3396 "locked"
3397 );
3398 }
3399
3400 #[test]
3401 fn process_source_decisions_first_run_records_decisions() {
3402 use crate::config::PackagesSpec;
3403 let store = test_state();
3404 let notifier = Notifier::new(NotifyMethod::Stdout, None);
3405 let policy = AutoApplyPolicyConfig::default(); let merged = MergedProfile {
3408 packages: PackagesSpec {
3409 cargo: Some(crate::config::CargoSpec {
3410 file: None,
3411 packages: vec!["bat".into()],
3412 }),
3413 ..Default::default()
3414 },
3415 ..Default::default()
3416 };
3417
3418 let excluded = process_source_decisions(&store, "acme", &merged, &policy, ¬ifier);
3419
3420 let pending = store.pending_decisions().unwrap();
3422 assert_eq!(pending.len(), 1);
3423 assert_eq!(pending[0].resource, "packages.cargo.bat");
3424 assert!(excluded.contains("packages.cargo.bat"));
3425 }
3426
3427 #[test]
3428 fn process_source_decisions_accept_policy_no_pending() {
3429 use crate::config::PackagesSpec;
3430 let store = test_state();
3431 let notifier = Notifier::new(NotifyMethod::Stdout, None);
3432 let policy = AutoApplyPolicyConfig {
3433 new_recommended: PolicyAction::Accept,
3434 ..Default::default()
3435 };
3436
3437 let merged = MergedProfile {
3438 packages: PackagesSpec {
3439 cargo: Some(crate::config::CargoSpec {
3440 file: None,
3441 packages: vec!["bat".into()],
3442 }),
3443 ..Default::default()
3444 },
3445 ..Default::default()
3446 };
3447
3448 let excluded = process_source_decisions(&store, "acme", &merged, &policy, ¬ifier);
3449
3450 let pending = store.pending_decisions().unwrap();
3452 assert!(pending.is_empty());
3453 assert!(!excluded.contains("packages.cargo.bat"));
3454 }
3455
3456 #[test]
3459 fn compliance_snapshot_skips_when_hash_unchanged() {
3460 let store = test_state();
3461 let snapshot = crate::compliance::ComplianceSnapshot {
3462 timestamp: crate::utc_now_iso8601(),
3463 machine: crate::compliance::MachineInfo {
3464 hostname: "test".into(),
3465 os: "linux".into(),
3466 arch: "x86_64".into(),
3467 },
3468 profile: "default".into(),
3469 sources: vec!["local".into()],
3470 checks: vec![crate::compliance::ComplianceCheck {
3471 category: "file".into(),
3472 status: crate::compliance::ComplianceStatus::Compliant,
3473 detail: Some("present".into()),
3474 ..Default::default()
3475 }],
3476 summary: crate::compliance::ComplianceSummary {
3477 compliant: 1,
3478 warning: 0,
3479 violation: 0,
3480 },
3481 };
3482
3483 let json = serde_json::to_string_pretty(&snapshot).unwrap();
3484 let hash = crate::sha256_hex(json.as_bytes());
3485
3486 store.store_compliance_snapshot(&snapshot, &hash).unwrap();
3488
3489 let latest = store.latest_compliance_hash().unwrap();
3491 assert_eq!(latest.as_deref(), Some(hash.as_str()));
3492 }
3493
3494 #[test]
3495 fn compliance_snapshot_stores_when_hash_changes() {
3496 let store = test_state();
3497
3498 let snapshot1 = crate::compliance::ComplianceSnapshot {
3499 timestamp: "2026-01-01T00:00:00Z".into(),
3500 machine: crate::compliance::MachineInfo {
3501 hostname: "test".into(),
3502 os: "linux".into(),
3503 arch: "x86_64".into(),
3504 },
3505 profile: "default".into(),
3506 sources: vec!["local".into()],
3507 checks: vec![crate::compliance::ComplianceCheck {
3508 category: "file".into(),
3509 status: crate::compliance::ComplianceStatus::Compliant,
3510 ..Default::default()
3511 }],
3512 summary: crate::compliance::ComplianceSummary {
3513 compliant: 1,
3514 warning: 0,
3515 violation: 0,
3516 },
3517 };
3518
3519 let json1 = serde_json::to_string_pretty(&snapshot1).unwrap();
3520 let hash1 = crate::sha256_hex(json1.as_bytes());
3521 store.store_compliance_snapshot(&snapshot1, &hash1).unwrap();
3522
3523 let snapshot2 = crate::compliance::ComplianceSnapshot {
3525 timestamp: "2026-01-02T00:00:00Z".into(),
3526 machine: crate::compliance::MachineInfo {
3527 hostname: "test".into(),
3528 os: "linux".into(),
3529 arch: "x86_64".into(),
3530 },
3531 profile: "default".into(),
3532 sources: vec!["local".into()],
3533 checks: vec![crate::compliance::ComplianceCheck {
3534 category: "package".into(),
3535 status: crate::compliance::ComplianceStatus::Violation,
3536 ..Default::default()
3537 }],
3538 summary: crate::compliance::ComplianceSummary {
3539 compliant: 0,
3540 warning: 0,
3541 violation: 1,
3542 },
3543 };
3544
3545 let json2 = serde_json::to_string_pretty(&snapshot2).unwrap();
3546 let hash2 = crate::sha256_hex(json2.as_bytes());
3547
3548 assert_ne!(hash1, hash2);
3550 let latest = store.latest_compliance_hash().unwrap();
3551 assert_ne!(latest.as_deref(), Some(hash2.as_str()));
3552
3553 store.store_compliance_snapshot(&snapshot2, &hash2).unwrap();
3554 let latest = store.latest_compliance_hash().unwrap();
3555 assert_eq!(latest.as_deref(), Some(hash2.as_str()));
3556
3557 let history = store.compliance_history(None, 10).unwrap();
3559 assert_eq!(history.len(), 2);
3560 }
3561
3562 #[test]
3563 fn compliance_timer_not_created_when_disabled() {
3564 let config = config::ComplianceConfig {
3566 enabled: false,
3567 interval: "1h".into(),
3568 retention: "30d".into(),
3569 scope: config::ComplianceScope::default(),
3570 export: config::ComplianceExport::default(),
3571 };
3572
3573 let interval = config
3574 .enabled
3575 .then(|| crate::parse_duration_str(&config.interval).ok())
3576 .flatten();
3577
3578 assert!(interval.is_none());
3579 }
3580
3581 #[test]
3582 fn compliance_timer_created_when_enabled() {
3583 let config = config::ComplianceConfig {
3584 enabled: true,
3585 interval: "30m".into(),
3586 retention: "7d".into(),
3587 scope: config::ComplianceScope::default(),
3588 export: config::ComplianceExport::default(),
3589 };
3590
3591 let interval = config
3592 .enabled
3593 .then(|| crate::parse_duration_str(&config.interval).ok())
3594 .flatten();
3595
3596 assert_eq!(interval, Some(Duration::from_secs(30 * 60)));
3597 }
3598
3599 #[test]
3600 fn compliance_timer_invalid_interval_when_enabled() {
3601 let config = config::ComplianceConfig {
3602 enabled: true,
3603 interval: "garbage".into(),
3604 retention: "7d".into(),
3605 scope: config::ComplianceScope::default(),
3606 export: config::ComplianceExport::default(),
3607 };
3608
3609 let interval = config
3610 .enabled
3611 .then(|| crate::parse_duration_str(&config.interval).ok())
3612 .flatten();
3613
3614 assert!(interval.is_none());
3616 }
3617
3618 #[test]
3621 fn compute_config_hash_differs_for_different_packages() {
3622 use crate::config::{
3623 CargoSpec, LayerPolicy, MergedProfile, PackagesSpec, ProfileLayer, ProfileSpec,
3624 ResolvedProfile,
3625 };
3626
3627 let resolved_a = ResolvedProfile {
3628 layers: vec![ProfileLayer {
3629 source: "local".into(),
3630 profile_name: "a".into(),
3631 priority: 1000,
3632 policy: LayerPolicy::Local,
3633 spec: ProfileSpec::default(),
3634 }],
3635 merged: MergedProfile {
3636 packages: PackagesSpec {
3637 cargo: Some(CargoSpec {
3638 file: None,
3639 packages: vec!["bat".into()],
3640 }),
3641 ..Default::default()
3642 },
3643 ..Default::default()
3644 },
3645 };
3646
3647 let resolved_b = ResolvedProfile {
3648 layers: vec![ProfileLayer {
3649 source: "local".into(),
3650 profile_name: "b".into(),
3651 priority: 1000,
3652 policy: LayerPolicy::Local,
3653 spec: ProfileSpec::default(),
3654 }],
3655 merged: MergedProfile {
3656 packages: PackagesSpec {
3657 cargo: Some(CargoSpec {
3658 file: None,
3659 packages: vec!["ripgrep".into()],
3660 }),
3661 ..Default::default()
3662 },
3663 ..Default::default()
3664 },
3665 };
3666
3667 let hash_a = compute_config_hash(&resolved_a).unwrap();
3668 let hash_b = compute_config_hash(&resolved_b).unwrap();
3669 assert_ne!(hash_a, hash_b);
3670 }
3671
3672 #[test]
3675 fn hash_resources_empty_set() {
3676 let empty: HashSet<String> = HashSet::new();
3677 let hash = hash_resources(&empty);
3678 assert_eq!(hash, crate::sha256_hex(b""));
3680 }
3681
3682 #[test]
3683 fn hash_resources_single_element() {
3684 let set: HashSet<String> = HashSet::from_iter(["packages.brew.ripgrep".to_string()]);
3685 let hash = hash_resources(&set);
3686 assert_eq!(hash.len(), 64);
3687 let expected = crate::sha256_hex(b"packages.brew.ripgrep\n");
3689 assert_eq!(hash, expected);
3690 }
3691
3692 #[test]
3695 fn daemon_state_to_response_propagates_fields() {
3696 let mut state = DaemonState::new();
3697 state.last_reconcile = Some("2026-03-30T12:00:00Z".to_string());
3698 state.last_sync = Some("2026-03-30T12:01:00Z".to_string());
3699 state.drift_count = 5;
3700 state.update_available = Some("2.0.0".to_string());
3701
3702 let response = state.to_response();
3703 assert!(response.running);
3704 assert_eq!(
3705 response.last_reconcile.as_deref(),
3706 Some("2026-03-30T12:00:00Z")
3707 );
3708 assert_eq!(response.last_sync.as_deref(), Some("2026-03-30T12:01:00Z"));
3709 assert_eq!(response.drift_count, 5);
3710 assert_eq!(response.update_available.as_deref(), Some("2.0.0"));
3711 assert_eq!(response.sources.len(), 1);
3712 assert_eq!(response.sources[0].name, "local");
3713 }
3714
3715 #[test]
3718 fn daemon_status_response_with_modules_round_trips() {
3719 let response = DaemonStatusResponse {
3720 running: true,
3721 pid: 42,
3722 uptime_secs: 100,
3723 last_reconcile: None,
3724 last_sync: None,
3725 drift_count: 2,
3726 sources: vec![],
3727 update_available: Some("1.5.0".to_string()),
3728 module_reconcile: vec![
3729 ModuleReconcileStatus {
3730 name: "security-baseline".to_string(),
3731 interval: "60s".to_string(),
3732 auto_apply: true,
3733 drift_policy: "Auto".to_string(),
3734 last_reconcile: Some("2026-03-30T00:00:00Z".to_string()),
3735 },
3736 ModuleReconcileStatus {
3737 name: "dev-tools".to_string(),
3738 interval: "300s".to_string(),
3739 auto_apply: false,
3740 drift_policy: "NotifyOnly".to_string(),
3741 last_reconcile: None,
3742 },
3743 ],
3744 };
3745
3746 let json = serde_json::to_string(&response).unwrap();
3747 let parsed: DaemonStatusResponse = serde_json::from_str(&json).unwrap();
3748 assert_eq!(parsed.pid, 42);
3749 assert_eq!(parsed.drift_count, 2);
3750 assert_eq!(parsed.update_available.as_deref(), Some("1.5.0"));
3751 assert_eq!(parsed.module_reconcile.len(), 2);
3752 assert_eq!(parsed.module_reconcile[0].name, "security-baseline");
3753 assert!(parsed.module_reconcile[0].auto_apply);
3754 assert_eq!(parsed.module_reconcile[1].name, "dev-tools");
3755 assert!(!parsed.module_reconcile[1].auto_apply);
3756 assert!(parsed.module_reconcile[1].last_reconcile.is_none());
3757 }
3758
3759 #[test]
3760 fn daemon_status_response_skips_empty_module_reconcile() {
3761 let response = DaemonStatusResponse {
3762 running: true,
3763 pid: 1,
3764 uptime_secs: 0,
3765 last_reconcile: None,
3766 last_sync: None,
3767 drift_count: 0,
3768 sources: vec![],
3769 update_available: None,
3770 module_reconcile: vec![],
3771 };
3772
3773 let json = serde_json::to_string(&response).unwrap();
3774 assert!(!json.contains("\"moduleReconcile\""));
3776 assert!(!json.contains("\"updateAvailable\""));
3778 }
3779
3780 #[test]
3783 fn action_resource_info_file_create() {
3784 use crate::reconciler::Action;
3785
3786 let action = Action::File(crate::providers::FileAction::Create {
3787 source: PathBuf::from("/src/.zshrc"),
3788 target: PathBuf::from("/home/user/.zshrc"),
3789 origin: "local".into(),
3790 strategy: crate::config::FileStrategy::default(),
3791 source_hash: None,
3792 });
3793 let (rtype, rid) = action_resource_info(&action);
3794 assert_eq!(rtype, "file");
3795 assert_eq!(rid, "/home/user/.zshrc");
3796 }
3797
3798 #[test]
3799 fn action_resource_info_file_update() {
3800 use crate::reconciler::Action;
3801
3802 let action = Action::File(crate::providers::FileAction::Update {
3803 source: PathBuf::from("/src/.zshrc"),
3804 target: PathBuf::from("/home/user/.zshrc"),
3805 diff: "--- a\n+++ b".into(),
3806 origin: "local".into(),
3807 strategy: crate::config::FileStrategy::default(),
3808 source_hash: None,
3809 });
3810 let (rtype, rid) = action_resource_info(&action);
3811 assert_eq!(rtype, "file");
3812 assert_eq!(rid, "/home/user/.zshrc");
3813 }
3814
3815 #[test]
3816 fn action_resource_info_file_delete() {
3817 use crate::reconciler::Action;
3818
3819 let action = Action::File(crate::providers::FileAction::Delete {
3820 target: PathBuf::from("/tmp/gone"),
3821 origin: "local".into(),
3822 });
3823 let (rtype, rid) = action_resource_info(&action);
3824 assert_eq!(rtype, "file");
3825 assert_eq!(rid, "/tmp/gone");
3826 }
3827
3828 #[test]
3829 fn action_resource_info_file_set_permissions() {
3830 use crate::reconciler::Action;
3831
3832 let action = Action::File(crate::providers::FileAction::SetPermissions {
3833 target: PathBuf::from("/home/user/.ssh/config"),
3834 mode: 0o600,
3835 origin: "local".into(),
3836 });
3837 let (rtype, rid) = action_resource_info(&action);
3838 assert_eq!(rtype, "file");
3839 assert_eq!(rid, "/home/user/.ssh/config");
3840 }
3841
3842 #[test]
3843 fn action_resource_info_file_skip() {
3844 use crate::reconciler::Action;
3845
3846 let action = Action::File(crate::providers::FileAction::Skip {
3847 target: PathBuf::from("/etc/skipped"),
3848 reason: "not needed".into(),
3849 origin: "local".into(),
3850 });
3851 let (rtype, rid) = action_resource_info(&action);
3852 assert_eq!(rtype, "file");
3853 assert_eq!(rid, "/etc/skipped");
3854 }
3855
3856 #[test]
3857 fn action_resource_info_package_bootstrap() {
3858 use crate::reconciler::Action;
3859
3860 let action = Action::Package(crate::providers::PackageAction::Bootstrap {
3861 manager: "brew".into(),
3862 method: "curl".into(),
3863 origin: "local".into(),
3864 });
3865 let (rtype, rid) = action_resource_info(&action);
3866 assert_eq!(rtype, "package");
3867 assert_eq!(rid, "brew:bootstrap");
3868 }
3869
3870 #[test]
3871 fn action_resource_info_package_install() {
3872 use crate::reconciler::Action;
3873
3874 let action = Action::Package(crate::providers::PackageAction::Install {
3875 manager: "apt".into(),
3876 packages: vec!["curl".into(), "wget".into()],
3877 origin: "local".into(),
3878 });
3879 let (rtype, rid) = action_resource_info(&action);
3880 assert_eq!(rtype, "package");
3881 assert_eq!(rid, "apt:curl,wget");
3882 }
3883
3884 #[test]
3885 fn action_resource_info_package_uninstall() {
3886 use crate::reconciler::Action;
3887
3888 let action = Action::Package(crate::providers::PackageAction::Uninstall {
3889 manager: "npm".into(),
3890 packages: vec!["typescript".into()],
3891 origin: "local".into(),
3892 });
3893 let (rtype, rid) = action_resource_info(&action);
3894 assert_eq!(rtype, "package");
3895 assert_eq!(rid, "npm:typescript");
3896 }
3897
3898 #[test]
3899 fn action_resource_info_package_skip() {
3900 use crate::reconciler::Action;
3901
3902 let action = Action::Package(crate::providers::PackageAction::Skip {
3903 manager: "cargo".into(),
3904 reason: "not available".into(),
3905 origin: "local".into(),
3906 });
3907 let (rtype, rid) = action_resource_info(&action);
3908 assert_eq!(rtype, "package");
3909 assert_eq!(rid, "cargo");
3910 }
3911
3912 #[test]
3913 fn action_resource_info_secret_decrypt() {
3914 use crate::reconciler::Action;
3915
3916 let action = Action::Secret(crate::providers::SecretAction::Decrypt {
3917 source: PathBuf::from("/secrets/api.enc"),
3918 target: PathBuf::from("/home/user/.api_key"),
3919 backend: "age".into(),
3920 origin: "local".into(),
3921 });
3922 let (rtype, rid) = action_resource_info(&action);
3923 assert_eq!(rtype, "secret");
3924 assert_eq!(rid, "/home/user/.api_key");
3925 }
3926
3927 #[test]
3928 fn action_resource_info_secret_resolve() {
3929 use crate::reconciler::Action;
3930
3931 let action = Action::Secret(crate::providers::SecretAction::Resolve {
3932 provider: "1password".into(),
3933 reference: "op://vault/item/field".into(),
3934 target: PathBuf::from("/tmp/secret"),
3935 origin: "local".into(),
3936 });
3937 let (rtype, rid) = action_resource_info(&action);
3938 assert_eq!(rtype, "secret");
3939 assert_eq!(rid, "op://vault/item/field");
3940 }
3941
3942 #[test]
3943 fn action_resource_info_secret_resolve_env() {
3944 use crate::reconciler::Action;
3945
3946 let action = Action::Secret(crate::providers::SecretAction::ResolveEnv {
3947 provider: "vault".into(),
3948 reference: "secret/data/app".into(),
3949 envs: vec!["API_KEY".into(), "DB_PASS".into()],
3950 origin: "local".into(),
3951 });
3952 let (rtype, rid) = action_resource_info(&action);
3953 assert_eq!(rtype, "secret");
3954 assert_eq!(rid, "env:[API_KEY,DB_PASS]");
3955 }
3956
3957 #[test]
3958 fn action_resource_info_secret_skip() {
3959 use crate::reconciler::Action;
3960
3961 let action = Action::Secret(crate::providers::SecretAction::Skip {
3962 source: "bitwarden".into(),
3963 reason: "not configured".into(),
3964 origin: "local".into(),
3965 });
3966 let (rtype, rid) = action_resource_info(&action);
3967 assert_eq!(rtype, "secret");
3968 assert_eq!(rid, "bitwarden");
3969 }
3970
3971 #[test]
3972 fn action_resource_info_system_set_value() {
3973 use crate::reconciler::{Action, SystemAction};
3974
3975 let action = Action::System(SystemAction::SetValue {
3976 configurator: "sysctl".into(),
3977 key: "vm.swappiness".into(),
3978 desired: "10".into(),
3979 current: "60".into(),
3980 origin: "local".into(),
3981 });
3982 let (rtype, rid) = action_resource_info(&action);
3983 assert_eq!(rtype, "system");
3984 assert_eq!(rid, "sysctl:vm.swappiness");
3985 }
3986
3987 #[test]
3988 fn action_resource_info_system_skip() {
3989 use crate::reconciler::{Action, SystemAction};
3990
3991 let action = Action::System(SystemAction::Skip {
3992 configurator: "gsettings".into(),
3993 reason: "not on GNOME".into(),
3994 origin: "local".into(),
3995 });
3996 let (rtype, rid) = action_resource_info(&action);
3997 assert_eq!(rtype, "system");
3998 assert_eq!(rid, "gsettings");
3999 }
4000
4001 #[test]
4002 fn action_resource_info_script_run() {
4003 use crate::reconciler::{Action, ScriptAction, ScriptPhase};
4004
4005 let action = Action::Script(ScriptAction::Run {
4006 entry: crate::config::ScriptEntry::Simple("echo hello".into()),
4007 phase: ScriptPhase::PreApply,
4008 origin: "local".into(),
4009 });
4010 let (rtype, rid) = action_resource_info(&action);
4011 assert_eq!(rtype, "script");
4012 assert_eq!(rid, "echo hello");
4013 }
4014
4015 #[test]
4016 fn action_resource_info_module() {
4017 use crate::reconciler::{Action, ModuleAction, ModuleActionKind};
4018
4019 let action = Action::Module(ModuleAction {
4020 module_name: "security-baseline".into(),
4021 kind: ModuleActionKind::InstallPackages { resolved: vec![] },
4022 });
4023 let (rtype, rid) = action_resource_info(&action);
4024 assert_eq!(rtype, "module");
4025 assert_eq!(rid, "security-baseline");
4026 }
4027
4028 #[test]
4029 fn action_resource_info_env_write() {
4030 use crate::reconciler::{Action, EnvAction};
4031
4032 let action = Action::Env(EnvAction::WriteEnvFile {
4033 path: PathBuf::from("/home/user/.cfgd.env"),
4034 content: "export FOO=bar".into(),
4035 });
4036 let (rtype, rid) = action_resource_info(&action);
4037 assert_eq!(rtype, "env");
4038 assert_eq!(rid, "/home/user/.cfgd.env");
4039 }
4040
4041 #[test]
4042 fn action_resource_info_env_inject() {
4043 use crate::reconciler::{Action, EnvAction};
4044
4045 let action = Action::Env(EnvAction::InjectSourceLine {
4046 rc_path: PathBuf::from("/home/user/.bashrc"),
4047 line: "source ~/.cfgd.env".into(),
4048 });
4049 let (rtype, rid) = action_resource_info(&action);
4050 assert_eq!(rtype, "env-rc");
4051 assert_eq!(rid, "/home/user/.bashrc");
4052 }
4053
4054 #[test]
4057 fn extract_source_resources_apt_dnf_pipx_npm() {
4058 use crate::config::{AptSpec, MergedProfile, NpmSpec, PackagesSpec};
4059
4060 let merged = MergedProfile {
4061 packages: PackagesSpec {
4062 apt: Some(AptSpec {
4063 file: None,
4064 packages: vec!["git".into(), "tmux".into()],
4065 }),
4066 dnf: vec!["vim".into()],
4067 pipx: vec!["black".into()],
4068 npm: Some(NpmSpec {
4069 file: None,
4070 global: vec!["prettier".into()],
4071 }),
4072 ..Default::default()
4073 },
4074 ..Default::default()
4075 };
4076
4077 let resources = extract_source_resources(&merged);
4078 assert!(resources.contains("packages.apt.git"));
4079 assert!(resources.contains("packages.apt.tmux"));
4080 assert!(resources.contains("packages.dnf.vim"));
4081 assert!(resources.contains("packages.pipx.black"));
4082 assert!(resources.contains("packages.npm.prettier"));
4083 assert_eq!(resources.len(), 5);
4084 }
4085
4086 #[test]
4087 fn extract_source_resources_system_keys() {
4088 use crate::config::MergedProfile;
4089
4090 let mut merged = MergedProfile::default();
4091 merged
4092 .system
4093 .insert("sysctl".into(), serde_yaml::Value::Null);
4094 merged
4095 .system
4096 .insert("kernelModules".into(), serde_yaml::Value::Null);
4097
4098 let resources = extract_source_resources(&merged);
4099 assert!(resources.contains("system.sysctl"));
4100 assert!(resources.contains("system.kernelModules"));
4101 assert_eq!(resources.len(), 2);
4102 }
4103
4104 #[test]
4105 fn extract_source_resources_empty_profile() {
4106 let merged = crate::config::MergedProfile::default();
4107 let resources = extract_source_resources(&merged);
4108 assert!(resources.is_empty());
4109 }
4110
4111 #[test]
4114 fn process_source_decisions_no_change_on_second_call() {
4115 use crate::config::{CargoSpec, PackagesSpec};
4116 let store = test_state();
4117 let notifier = Notifier::new(NotifyMethod::Stdout, None);
4118 let policy = AutoApplyPolicyConfig {
4119 new_recommended: crate::config::PolicyAction::Accept,
4120 ..Default::default()
4121 };
4122
4123 let merged = MergedProfile {
4124 packages: PackagesSpec {
4125 cargo: Some(CargoSpec {
4126 file: None,
4127 packages: vec!["bat".into()],
4128 }),
4129 ..Default::default()
4130 },
4131 ..Default::default()
4132 };
4133
4134 let _ = process_source_decisions(&store, "acme", &merged, &policy, ¬ifier);
4136
4137 let excluded = process_source_decisions(&store, "acme", &merged, &policy, ¬ifier);
4139
4140 let pending = store.pending_decisions().unwrap();
4142 assert!(pending.is_empty());
4143 assert!(excluded.is_empty());
4144 }
4145
4146 #[test]
4147 fn process_source_decisions_detects_new_items_on_change() {
4148 use crate::config::{CargoSpec, PackagesSpec};
4149 let store = test_state();
4150 let notifier = Notifier::new(NotifyMethod::Stdout, None);
4151 let policy = AutoApplyPolicyConfig::default(); let merged1 = MergedProfile {
4155 packages: PackagesSpec {
4156 cargo: Some(CargoSpec {
4157 file: None,
4158 packages: vec!["bat".into()],
4159 }),
4160 ..Default::default()
4161 },
4162 ..Default::default()
4163 };
4164 let _ = process_source_decisions(&store, "acme", &merged1, &policy, ¬ifier);
4165 let first_pending = store.pending_decisions().unwrap();
4167 for d in &first_pending {
4168 let _ = store.resolve_decisions_for_source(&d.source, "accepted");
4169 }
4170
4171 let merged2 = MergedProfile {
4173 packages: PackagesSpec {
4174 cargo: Some(CargoSpec {
4175 file: None,
4176 packages: vec!["bat".into(), "ripgrep".into()],
4177 }),
4178 ..Default::default()
4179 },
4180 ..Default::default()
4181 };
4182 let excluded = process_source_decisions(&store, "acme", &merged2, &policy, ¬ifier);
4183
4184 let pending = store.pending_decisions().unwrap();
4186 assert!(!pending.is_empty());
4187 let resource_names: Vec<&str> = pending.iter().map(|d| d.resource.as_str()).collect();
4188 assert!(resource_names.contains(&"packages.cargo.ripgrep"));
4189 assert!(excluded.contains("packages.cargo.ripgrep"));
4190 }
4191
4192 #[test]
4195 fn infer_item_tier_detects_policy_keyword() {
4196 assert_eq!(infer_item_tier("files.policy-definitions.yaml"), "locked");
4197 assert_eq!(infer_item_tier("system.security-policy"), "locked");
4198 }
4199
4200 #[test]
4203 fn module_reconcile_status_round_trips() {
4204 let status = ModuleReconcileStatus {
4205 name: "dev-tools".into(),
4206 interval: "120s".into(),
4207 auto_apply: false,
4208 drift_policy: "NotifyOnly".into(),
4209 last_reconcile: None,
4210 };
4211 let json = serde_json::to_string(&status).unwrap();
4212 let parsed: ModuleReconcileStatus = serde_json::from_str(&json).unwrap();
4213 assert_eq!(parsed.name, "dev-tools");
4214 assert_eq!(parsed.interval, "120s");
4215 assert!(!parsed.auto_apply);
4216 assert_eq!(parsed.drift_policy, "NotifyOnly");
4217 assert!(parsed.last_reconcile.is_none());
4218 assert!(json.contains("\"autoApply\""));
4220 assert!(json.contains("\"driftPolicy\""));
4221 assert!(json.contains("\"lastReconcile\""));
4222 }
4223
4224 #[test]
4227 fn notifier_webhook_without_url_does_not_panic() {
4228 let notifier = Notifier::new(NotifyMethod::Webhook, None);
4229 assert!(matches!(notifier.method, NotifyMethod::Webhook));
4230 assert!(
4232 notifier.webhook_url.is_none(),
4233 "webhook_url must be None to exercise the early-return path"
4234 );
4235 notifier.notify("test", "no url configured");
4237 }
4238
4239 #[test]
4242 fn find_server_url_picks_server_among_multiple_origins() {
4243 use crate::config::*;
4244 let config = CfgdConfig {
4245 api_version: crate::API_VERSION.into(),
4246 kind: "Config".into(),
4247 metadata: ConfigMetadata {
4248 name: "test".into(),
4249 },
4250 spec: ConfigSpec {
4251 profile: Some("default".into()),
4252 origin: vec![
4253 OriginSpec {
4254 origin_type: OriginType::Git,
4255 url: "https://github.com/test/repo.git".into(),
4256 branch: "main".into(),
4257 auth: None,
4258 ssh_strict_host_key_checking: Default::default(),
4259 },
4260 OriginSpec {
4261 origin_type: OriginType::Server,
4262 url: "https://fleet.example.com".into(),
4263 branch: "main".into(),
4264 auth: None,
4265 ssh_strict_host_key_checking: Default::default(),
4266 },
4267 ],
4268 daemon: None,
4269 secrets: None,
4270 sources: vec![],
4271 theme: None,
4272 modules: None,
4273 security: None,
4274 aliases: std::collections::HashMap::new(),
4275 file_strategy: crate::config::FileStrategy::default(),
4276 ai: None,
4277 compliance: None,
4278 },
4279 };
4280 assert_eq!(
4281 find_server_url(&config),
4282 Some("https://fleet.example.com".to_string())
4283 );
4284 }
4285
4286 #[test]
4287 fn find_server_url_returns_none_for_empty_origins() {
4288 use crate::config::*;
4289 let config = CfgdConfig {
4290 api_version: crate::API_VERSION.into(),
4291 kind: "Config".into(),
4292 metadata: ConfigMetadata {
4293 name: "test".into(),
4294 },
4295 spec: ConfigSpec {
4296 profile: Some("default".into()),
4297 origin: vec![],
4298 daemon: None,
4299 secrets: None,
4300 sources: vec![],
4301 theme: None,
4302 modules: None,
4303 security: None,
4304 aliases: std::collections::HashMap::new(),
4305 file_strategy: crate::config::FileStrategy::default(),
4306 ai: None,
4307 compliance: None,
4308 },
4309 };
4310 assert!(find_server_url(&config).is_none());
4311 }
4312
4313 #[test]
4316 fn checkin_response_with_config_payload() {
4317 let json = r#"{"status":"ok","config_changed":true,"config":{"packages":["git"]}}"#;
4318 let resp: CheckinServerResponse = serde_json::from_str(json).unwrap();
4319 assert!(resp.config_changed);
4320 assert!(resp._config.is_some());
4321 }
4322
4323 #[test]
4324 fn checkin_response_no_change() {
4325 let json = r#"{"status":"ok","config_changed":false,"config":null}"#;
4326 let resp: CheckinServerResponse = serde_json::from_str(json).unwrap();
4327 assert!(!resp.config_changed);
4328 }
4329
4330 #[test]
4333 fn parse_duration_zero_seconds() {
4334 assert_eq!(parse_duration_or_default("0s"), Duration::from_secs(0));
4335 }
4336
4337 #[test]
4338 fn parse_duration_zero_plain() {
4339 assert_eq!(parse_duration_or_default("0"), Duration::from_secs(0));
4340 }
4341
4342 #[test]
4345 fn compute_config_hash_with_empty_packages() {
4346 use crate::config::{
4347 LayerPolicy, MergedProfile, PackagesSpec, ProfileLayer, ProfileSpec, ResolvedProfile,
4348 };
4349
4350 let resolved = ResolvedProfile {
4351 layers: vec![ProfileLayer {
4352 source: "local".into(),
4353 profile_name: "empty".into(),
4354 priority: 1000,
4355 policy: LayerPolicy::Local,
4356 spec: ProfileSpec::default(),
4357 }],
4358 merged: MergedProfile {
4359 packages: PackagesSpec::default(),
4360 ..Default::default()
4361 },
4362 };
4363
4364 let hash1 = compute_config_hash(&resolved).unwrap();
4365 let hash2 = compute_config_hash(&resolved).unwrap();
4366 assert_eq!(hash1, hash2, "hash should be deterministic");
4367 assert_eq!(hash1.len(), 64, "hash should be a valid SHA256 hex string");
4368 }
4369
4370 #[test]
4373 fn extract_source_resources_brew_casks_only() {
4374 use crate::config::{BrewSpec, MergedProfile, PackagesSpec};
4375
4376 let merged = MergedProfile {
4377 packages: PackagesSpec {
4378 brew: Some(BrewSpec {
4379 formulae: vec![],
4380 casks: vec!["iterm2".into(), "visual-studio-code".into()],
4381 taps: vec!["homebrew/cask".into()],
4382 ..Default::default()
4383 }),
4384 ..Default::default()
4385 },
4386 ..Default::default()
4387 };
4388
4389 let resources = extract_source_resources(&merged);
4390 assert!(
4391 resources.contains("packages.brew.iterm2"),
4392 "casks should appear as brew resources"
4393 );
4394 assert!(
4395 resources.contains("packages.brew.visual-studio-code"),
4396 "casks should appear as brew resources"
4397 );
4398 assert!(
4400 !resources.contains("packages.brew.homebrew/cask"),
4401 "taps should not appear as resources"
4402 );
4403 assert_eq!(resources.len(), 2);
4404 }
4405
4406 #[test]
4407 fn extract_source_resources_cargo_packages_only() {
4408 use crate::config::{CargoSpec, MergedProfile, PackagesSpec};
4409
4410 let merged = MergedProfile {
4411 packages: PackagesSpec {
4412 cargo: Some(CargoSpec {
4413 file: Some("Cargo.toml".into()),
4414 packages: vec!["cargo-watch".into(), "cargo-expand".into()],
4415 }),
4416 ..Default::default()
4417 },
4418 ..Default::default()
4419 };
4420
4421 let resources = extract_source_resources(&merged);
4422 assert!(resources.contains("packages.cargo.cargo-watch"));
4423 assert!(resources.contains("packages.cargo.cargo-expand"));
4424 assert_eq!(resources.len(), 2);
4425 }
4426
4427 #[test]
4428 fn extract_source_resources_npm_globals() {
4429 use crate::config::{MergedProfile, NpmSpec, PackagesSpec};
4430
4431 let merged = MergedProfile {
4432 packages: PackagesSpec {
4433 npm: Some(NpmSpec {
4434 file: None,
4435 global: vec!["typescript".into(), "eslint".into()],
4436 }),
4437 ..Default::default()
4438 },
4439 ..Default::default()
4440 };
4441
4442 let resources = extract_source_resources(&merged);
4443 assert!(resources.contains("packages.npm.typescript"));
4444 assert!(resources.contains("packages.npm.eslint"));
4445 assert_eq!(resources.len(), 2);
4446 }
4447
4448 #[test]
4451 fn process_source_decisions_reject_policy_silently_skips() {
4452 use crate::config::{CargoSpec, PackagesSpec};
4453 let store = test_state();
4454 let notifier = Notifier::new(NotifyMethod::Stdout, None);
4455 let policy = AutoApplyPolicyConfig {
4456 new_recommended: PolicyAction::Reject,
4457 ..Default::default()
4458 };
4459
4460 let merged = MergedProfile {
4461 packages: PackagesSpec {
4462 cargo: Some(CargoSpec {
4463 file: None,
4464 packages: vec!["bat".into()],
4465 }),
4466 ..Default::default()
4467 },
4468 ..Default::default()
4469 };
4470
4471 let excluded = process_source_decisions(&store, "acme", &merged, &policy, ¬ifier);
4472
4473 let pending = store.pending_decisions().unwrap();
4475 assert!(
4476 pending.is_empty(),
4477 "reject policy should not create pending decisions"
4478 );
4479 assert!(
4480 excluded.is_empty(),
4481 "reject policy does not create pending records so nothing is excluded"
4482 );
4483 }
4484
4485 #[test]
4488 fn find_server_url_picks_first_server_among_duplicates() {
4489 use crate::config::*;
4490 let config = CfgdConfig {
4491 api_version: crate::API_VERSION.into(),
4492 kind: "Config".into(),
4493 metadata: ConfigMetadata {
4494 name: "test".into(),
4495 },
4496 spec: ConfigSpec {
4497 profile: Some("default".into()),
4498 origin: vec![
4499 OriginSpec {
4500 origin_type: OriginType::Server,
4501 url: "https://first-server.example.com".into(),
4502 branch: "main".into(),
4503 auth: None,
4504 ssh_strict_host_key_checking: Default::default(),
4505 },
4506 OriginSpec {
4507 origin_type: OriginType::Server,
4508 url: "https://second-server.example.com".into(),
4509 branch: "main".into(),
4510 auth: None,
4511 ssh_strict_host_key_checking: Default::default(),
4512 },
4513 ],
4514 daemon: None,
4515 secrets: None,
4516 sources: vec![],
4517 theme: None,
4518 modules: None,
4519 security: None,
4520 aliases: std::collections::HashMap::new(),
4521 file_strategy: crate::config::FileStrategy::default(),
4522 ai: None,
4523 compliance: None,
4524 },
4525 };
4526 assert_eq!(
4527 find_server_url(&config),
4528 Some("https://first-server.example.com".to_string()),
4529 "should return the first server origin when multiple exist"
4530 );
4531 }
4532
4533 #[test]
4536 fn compute_config_hash_empty_vs_nonempty_differ() {
4537 use crate::config::{
4538 CargoSpec, LayerPolicy, MergedProfile, PackagesSpec, ProfileLayer, ProfileSpec,
4539 ResolvedProfile,
4540 };
4541
4542 let empty_resolved = ResolvedProfile {
4543 layers: vec![ProfileLayer {
4544 source: "local".into(),
4545 profile_name: "empty".into(),
4546 priority: 1000,
4547 policy: LayerPolicy::Local,
4548 spec: ProfileSpec::default(),
4549 }],
4550 merged: MergedProfile {
4551 packages: PackagesSpec::default(),
4552 ..Default::default()
4553 },
4554 };
4555
4556 let nonempty_resolved = ResolvedProfile {
4557 layers: vec![ProfileLayer {
4558 source: "local".into(),
4559 profile_name: "nonempty".into(),
4560 priority: 1000,
4561 policy: LayerPolicy::Local,
4562 spec: ProfileSpec::default(),
4563 }],
4564 merged: MergedProfile {
4565 packages: PackagesSpec {
4566 cargo: Some(CargoSpec {
4567 file: None,
4568 packages: vec!["bat".into()],
4569 }),
4570 ..Default::default()
4571 },
4572 ..Default::default()
4573 },
4574 };
4575
4576 let hash_empty = compute_config_hash(&empty_resolved).unwrap();
4577 let hash_nonempty = compute_config_hash(&nonempty_resolved).unwrap();
4578 assert_ne!(
4579 hash_empty, hash_nonempty,
4580 "empty and non-empty packages should produce different hashes"
4581 );
4582 }
4583
4584 #[test]
4587 fn process_source_decisions_ignore_policy_no_pending_no_excluded() {
4588 use crate::config::{CargoSpec, PackagesSpec};
4589 let store = test_state();
4590 let notifier = Notifier::new(NotifyMethod::Stdout, None);
4591 let policy = AutoApplyPolicyConfig {
4592 new_recommended: PolicyAction::Ignore,
4593 ..Default::default()
4594 };
4595
4596 let merged = MergedProfile {
4597 packages: PackagesSpec {
4598 cargo: Some(CargoSpec {
4599 file: None,
4600 packages: vec!["bat".into()],
4601 }),
4602 ..Default::default()
4603 },
4604 ..Default::default()
4605 };
4606
4607 let excluded = process_source_decisions(&store, "acme", &merged, &policy, ¬ifier);
4608
4609 let pending = store.pending_decisions().unwrap();
4611 assert!(
4612 pending.is_empty(),
4613 "ignore policy should not create pending decisions"
4614 );
4615 assert!(
4616 excluded.is_empty(),
4617 "ignore policy does not create pending records so nothing is excluded"
4618 );
4619 }
4620
4621 #[test]
4624 fn notifier_desktop_mode_does_not_panic() {
4625 let notifier = Notifier::new(NotifyMethod::Desktop, None);
4628 assert!(matches!(notifier.method, NotifyMethod::Desktop));
4629 assert!(
4630 notifier.webhook_url.is_none(),
4631 "desktop notifier should not have a webhook URL"
4632 );
4633 notifier.notify("test title", "test body");
4634 }
4635
4636 #[tokio::test]
4637 async fn notifier_webhook_with_url_does_not_panic() {
4638 let notifier = Notifier::new(
4640 NotifyMethod::Webhook,
4641 Some("http://127.0.0.1:1/nonexistent".to_string()),
4642 );
4643 notifier.notify("test", "message to invalid webhook");
4644 }
4645
4646 #[test]
4647 fn notifier_stdout_writes_info() {
4648 let notifier = Notifier::new(NotifyMethod::Stdout, None);
4651 assert!(matches!(notifier.method, NotifyMethod::Stdout));
4652 notifier.notify("drift event", "file /etc/foo changed");
4655 notifier.notify("", ""); notifier.notify("special chars: <>&\"'", "path: /home/user/.config/cfgd");
4657 }
4658
4659 #[test]
4662 fn daemon_state_with_multiple_sources() {
4663 let mut state = DaemonState::new();
4664 state.sources.push(SourceStatus {
4665 name: "acme-corp".to_string(),
4666 last_sync: Some("2026-03-30T10:00:00Z".to_string()),
4667 last_reconcile: None,
4668 drift_count: 2,
4669 status: "active".to_string(),
4670 });
4671 state.sources.push(SourceStatus {
4672 name: "team-tools".to_string(),
4673 last_sync: None,
4674 last_reconcile: Some("2026-03-30T11:00:00Z".to_string()),
4675 drift_count: 0,
4676 status: "error".to_string(),
4677 });
4678
4679 let response = state.to_response();
4680 assert_eq!(response.sources.len(), 3); assert_eq!(response.sources[1].name, "acme-corp");
4682 assert_eq!(response.sources[1].drift_count, 2);
4683 assert_eq!(response.sources[2].name, "team-tools");
4684 assert_eq!(response.sources[2].status, "error");
4685 }
4686
4687 #[test]
4690 fn daemon_state_drift_increments_propagate_to_response() {
4691 let mut state = DaemonState::new();
4692 state.drift_count = 10;
4693 if let Some(source) = state.sources.first_mut() {
4694 source.drift_count = 7;
4695 }
4696
4697 let response = state.to_response();
4698 assert_eq!(response.drift_count, 10);
4699 assert_eq!(response.sources[0].drift_count, 7);
4700 }
4701
4702 #[test]
4705 fn daemon_state_module_last_reconcile_tracking() {
4706 let mut state = DaemonState::new();
4707 state.module_last_reconcile.insert(
4708 "security-baseline".to_string(),
4709 "2026-03-30T12:00:00Z".to_string(),
4710 );
4711 state
4712 .module_last_reconcile
4713 .insert("dev-tools".to_string(), "2026-03-30T12:05:00Z".to_string());
4714
4715 assert_eq!(state.module_last_reconcile.len(), 2);
4716 assert_eq!(
4717 state
4718 .module_last_reconcile
4719 .get("security-baseline")
4720 .unwrap(),
4721 "2026-03-30T12:00:00Z"
4722 );
4723 assert_eq!(
4724 state.module_last_reconcile.get("dev-tools").unwrap(),
4725 "2026-03-30T12:05:00Z"
4726 );
4727
4728 let response = state.to_response();
4730 assert!(response.module_reconcile.is_empty());
4731 }
4732
4733 #[test]
4736 fn daemon_status_response_update_available_present() {
4737 let response = DaemonStatusResponse {
4738 running: true,
4739 pid: 99,
4740 uptime_secs: 600,
4741 last_reconcile: None,
4742 last_sync: None,
4743 drift_count: 0,
4744 sources: vec![],
4745 update_available: Some("3.0.0".to_string()),
4746 module_reconcile: vec![],
4747 };
4748
4749 let json = serde_json::to_string(&response).unwrap();
4750 assert!(json.contains("\"updateAvailable\":\"3.0.0\""));
4751 let parsed: DaemonStatusResponse = serde_json::from_str(&json).unwrap();
4752 assert_eq!(parsed.update_available.as_deref(), Some("3.0.0"));
4753 }
4754
4755 #[test]
4758 fn sync_task_local_defaults() {
4759 let task = SyncTask {
4760 source_name: "local".to_string(),
4761 repo_path: PathBuf::from("/home/user/.config/cfgd"),
4762 auto_pull: false,
4763 auto_push: false,
4764 auto_apply: true,
4765 interval: Duration::from_secs(DEFAULT_SYNC_SECS),
4766 last_synced: None,
4767 require_signed_commits: false,
4768 allow_unsigned: false,
4769 };
4770
4771 assert_eq!(task.source_name, "local");
4772 assert!(task.auto_apply);
4773 assert!(!task.auto_pull);
4774 assert!(!task.auto_push);
4775 assert!(task.last_synced.is_none());
4776 assert_eq!(task.interval.as_secs(), 300);
4777 }
4778
4779 #[test]
4780 fn sync_task_source_with_signing() {
4781 let task = SyncTask {
4782 source_name: "acme-corp".to_string(),
4783 repo_path: PathBuf::from("/tmp/sources/acme-corp"),
4784 auto_pull: true,
4785 auto_push: false,
4786 auto_apply: false,
4787 interval: Duration::from_secs(600),
4788 last_synced: Some(Instant::now()),
4789 require_signed_commits: true,
4790 allow_unsigned: false,
4791 };
4792
4793 assert_eq!(task.source_name, "acme-corp");
4794 assert!(task.auto_pull);
4795 assert!(!task.auto_push);
4796 assert!(!task.auto_apply);
4797 assert!(task.require_signed_commits);
4798 assert!(!task.allow_unsigned);
4799 assert!(task.last_synced.is_some());
4800 }
4801
4802 #[test]
4803 fn sync_task_allow_unsigned_overrides_require_signed() {
4804 let task = SyncTask {
4805 source_name: "relaxed".to_string(),
4806 repo_path: PathBuf::from("/tmp/sources/relaxed"),
4807 auto_pull: true,
4808 auto_push: false,
4809 auto_apply: true,
4810 interval: Duration::from_secs(300),
4811 last_synced: None,
4812 require_signed_commits: true,
4813 allow_unsigned: true,
4814 };
4815
4816 assert!(task.require_signed_commits);
4818 assert!(task.allow_unsigned);
4819 }
4820
4821 #[test]
4824 fn reconcile_task_default() {
4825 let task = ReconcileTask {
4826 entity: "__default__".to_string(),
4827 interval: Duration::from_secs(DEFAULT_RECONCILE_SECS),
4828 auto_apply: false,
4829 drift_policy: config::DriftPolicy::default(),
4830 last_reconciled: None,
4831 };
4832
4833 assert_eq!(task.entity, "__default__");
4834 assert_eq!(task.interval.as_secs(), 300);
4835 assert!(!task.auto_apply);
4836 assert!(task.last_reconciled.is_none());
4837 }
4838
4839 #[test]
4840 fn reconcile_task_per_module() {
4841 let task = ReconcileTask {
4842 entity: "security-baseline".to_string(),
4843 interval: Duration::from_secs(60),
4844 auto_apply: true,
4845 drift_policy: config::DriftPolicy::Auto,
4846 last_reconciled: Some(Instant::now()),
4847 };
4848
4849 assert_eq!(task.entity, "security-baseline");
4850 assert_eq!(task.interval.as_secs(), 60);
4851 assert!(task.auto_apply);
4852 assert!(task.last_reconciled.is_some());
4853 }
4854
4855 #[test]
4858 fn pending_resource_paths_empty_store() {
4859 let store = test_state();
4860 let paths = pending_resource_paths(&store);
4861 assert!(paths.is_empty());
4862 }
4863
4864 #[test]
4865 fn pending_resource_paths_with_decisions() {
4866 let store = test_state();
4867 store
4868 .upsert_pending_decision(
4869 "acme",
4870 "packages.cargo.bat",
4871 "recommended",
4872 "install",
4873 "recommended packages.cargo.bat (from acme)",
4874 )
4875 .unwrap();
4876 store
4877 .upsert_pending_decision(
4878 "acme",
4879 "env.EDITOR",
4880 "recommended",
4881 "install",
4882 "recommended env.EDITOR (from acme)",
4883 )
4884 .unwrap();
4885
4886 let paths = pending_resource_paths(&store);
4887 assert_eq!(paths.len(), 2);
4888 assert!(paths.contains("packages.cargo.bat"));
4889 assert!(paths.contains("env.EDITOR"));
4890 }
4891
4892 #[test]
4895 fn infer_item_tier_locked_keyword() {
4896 assert_eq!(infer_item_tier("files.locked-module-config.yaml"), "locked");
4897 }
4898
4899 #[test]
4900 fn infer_item_tier_security_in_system() {
4901 assert_eq!(infer_item_tier("system.security-baseline"), "locked");
4902 }
4903
4904 #[test]
4905 fn infer_item_tier_normal_package() {
4906 assert_eq!(infer_item_tier("packages.brew.curl"), "recommended");
4907 }
4908
4909 #[test]
4910 fn infer_item_tier_normal_env_var() {
4911 assert_eq!(infer_item_tier("env.GOPATH"), "recommended");
4912 }
4913
4914 #[test]
4915 fn infer_item_tier_normal_file() {
4916 assert_eq!(infer_item_tier("files./home/user/.zshrc"), "recommended");
4917 }
4918
4919 #[test]
4922 fn extract_source_resources_aliases_not_tracked() {
4923 use crate::config::{MergedProfile, ShellAlias};
4924
4925 let merged = MergedProfile {
4926 aliases: vec![
4927 ShellAlias {
4928 name: "ll".into(),
4929 command: "ls -la".into(),
4930 },
4931 ShellAlias {
4932 name: "gp".into(),
4933 command: "git push".into(),
4934 },
4935 ],
4936 ..Default::default()
4937 };
4938
4939 let resources = extract_source_resources(&merged);
4940 assert!(
4942 resources.is_empty(),
4943 "aliases should not be tracked as source resources"
4944 );
4945 }
4946
4947 #[test]
4950 fn extract_source_resources_full_profile() {
4951 use crate::config::{
4952 AptSpec, BrewSpec, CargoSpec, EnvVar, FilesSpec, ManagedFileSpec, MergedProfile,
4953 NpmSpec, PackagesSpec,
4954 };
4955
4956 let mut system = std::collections::HashMap::new();
4957 system.insert("sysctl".into(), serde_yaml::Value::Null);
4958
4959 let merged = MergedProfile {
4960 packages: PackagesSpec {
4961 brew: Some(BrewSpec {
4962 formulae: vec!["ripgrep".into()],
4963 casks: vec!["firefox".into()],
4964 ..Default::default()
4965 }),
4966 apt: Some(AptSpec {
4967 file: None,
4968 packages: vec!["curl".into()],
4969 }),
4970 cargo: Some(CargoSpec {
4971 file: None,
4972 packages: vec!["bat".into()],
4973 }),
4974 pipx: vec!["black".into()],
4975 dnf: vec!["vim".into()],
4976 npm: Some(NpmSpec {
4977 file: None,
4978 global: vec!["typescript".into()],
4979 }),
4980 ..Default::default()
4981 },
4982 files: FilesSpec {
4983 managed: vec![ManagedFileSpec {
4984 source: "dotfiles/.zshrc".into(),
4985 target: PathBuf::from("/home/user/.zshrc"),
4986 strategy: None,
4987 private: false,
4988 origin: None,
4989 encryption: None,
4990 permissions: None,
4991 }],
4992 ..Default::default()
4993 },
4994 env: vec![
4995 EnvVar {
4996 name: "EDITOR".into(),
4997 value: "vim".into(),
4998 },
4999 EnvVar {
5000 name: "GOPATH".into(),
5001 value: "/home/user/go".into(),
5002 },
5003 ],
5004 system,
5005 ..Default::default()
5006 };
5007
5008 let resources = extract_source_resources(&merged);
5009 assert!(resources.contains("packages.brew.ripgrep"));
5011 assert!(resources.contains("packages.brew.firefox"));
5012 assert!(resources.contains("packages.apt.curl"));
5013 assert!(resources.contains("packages.cargo.bat"));
5014 assert!(resources.contains("packages.pipx.black"));
5015 assert!(resources.contains("packages.dnf.vim"));
5016 assert!(resources.contains("packages.npm.typescript"));
5017 assert!(resources.contains("files./home/user/.zshrc"));
5018 assert!(resources.contains("env.EDITOR"));
5019 assert!(resources.contains("env.GOPATH"));
5020 assert!(resources.contains("system.sysctl"));
5021 assert_eq!(resources.len(), 11);
5023 }
5024
5025 #[test]
5028 fn process_source_decisions_locked_item_notify_policy() {
5029 let store = test_state();
5030 let notifier = Notifier::new(NotifyMethod::Stdout, None);
5031 let policy = AutoApplyPolicyConfig {
5032 new_recommended: PolicyAction::Accept,
5033 locked_conflict: PolicyAction::Notify,
5034 ..Default::default()
5035 };
5036
5037 let mut system = std::collections::HashMap::new();
5039 system.insert("security-baseline".into(), serde_yaml::Value::Null);
5040
5041 let merged = MergedProfile {
5042 system,
5043 ..Default::default()
5044 };
5045
5046 let excluded = process_source_decisions(&store, "corp", &merged, &policy, ¬ifier);
5047
5048 let pending = store.pending_decisions().unwrap();
5051 assert_eq!(pending.len(), 1);
5052 assert_eq!(pending[0].resource, "system.security-baseline");
5053 assert!(excluded.contains("system.security-baseline"));
5054 }
5055
5056 #[test]
5059 fn process_source_decisions_different_sources_independent() {
5060 use crate::config::{CargoSpec, PackagesSpec};
5061 let store = test_state();
5062 let notifier = Notifier::new(NotifyMethod::Stdout, None);
5063 let policy = AutoApplyPolicyConfig {
5064 new_recommended: PolicyAction::Accept,
5065 ..Default::default()
5066 };
5067
5068 let merged_a = MergedProfile {
5069 packages: PackagesSpec {
5070 cargo: Some(CargoSpec {
5071 file: None,
5072 packages: vec!["bat".into()],
5073 }),
5074 ..Default::default()
5075 },
5076 ..Default::default()
5077 };
5078
5079 let merged_b = MergedProfile {
5080 packages: PackagesSpec {
5081 cargo: Some(CargoSpec {
5082 file: None,
5083 packages: vec!["ripgrep".into()],
5084 }),
5085 ..Default::default()
5086 },
5087 ..Default::default()
5088 };
5089
5090 let excluded_a =
5091 process_source_decisions(&store, "source-a", &merged_a, &policy, ¬ifier);
5092 let excluded_b =
5093 process_source_decisions(&store, "source-b", &merged_b, &policy, ¬ifier);
5094
5095 assert!(excluded_a.is_empty());
5097 assert!(excluded_b.is_empty());
5098 }
5099
5100 #[test]
5103 fn process_source_decisions_removed_items_update_hash() {
5104 use crate::config::{CargoSpec, PackagesSpec};
5105 let store = test_state();
5106 let notifier = Notifier::new(NotifyMethod::Stdout, None);
5107 let policy = AutoApplyPolicyConfig {
5108 new_recommended: PolicyAction::Accept,
5109 ..Default::default()
5110 };
5111
5112 let merged1 = MergedProfile {
5114 packages: PackagesSpec {
5115 cargo: Some(CargoSpec {
5116 file: None,
5117 packages: vec!["bat".into(), "ripgrep".into()],
5118 }),
5119 ..Default::default()
5120 },
5121 ..Default::default()
5122 };
5123 let _ = process_source_decisions(&store, "acme", &merged1, &policy, ¬ifier);
5124
5125 let merged2 = MergedProfile {
5127 packages: PackagesSpec {
5128 cargo: Some(CargoSpec {
5129 file: None,
5130 packages: vec!["bat".into()],
5131 }),
5132 ..Default::default()
5133 },
5134 ..Default::default()
5135 };
5136 let excluded = process_source_decisions(&store, "acme", &merged2, &policy, ¬ifier);
5137
5138 let pending = store.pending_decisions().unwrap();
5140 assert!(pending.is_empty());
5141 assert!(excluded.is_empty());
5142 }
5143
5144 #[test]
5147 fn source_status_defaults() {
5148 let status = SourceStatus {
5149 name: "test".to_string(),
5150 last_sync: None,
5151 last_reconcile: None,
5152 drift_count: 0,
5153 status: "active".to_string(),
5154 };
5155
5156 assert!(status.last_sync.is_none());
5157 assert!(status.last_reconcile.is_none());
5158 assert_eq!(status.drift_count, 0);
5159 }
5160
5161 #[test]
5164 fn source_status_all_fields_populated() {
5165 let status = SourceStatus {
5166 name: "corp-source".to_string(),
5167 last_sync: Some("2026-03-30T10:00:00Z".to_string()),
5168 last_reconcile: Some("2026-03-30T10:05:00Z".to_string()),
5169 drift_count: 15,
5170 status: "error".to_string(),
5171 };
5172
5173 let json = serde_json::to_string(&status).unwrap();
5174 let parsed: SourceStatus = serde_json::from_str(&json).unwrap();
5175 assert_eq!(parsed.name, "corp-source");
5176 assert_eq!(parsed.last_sync.as_deref(), Some("2026-03-30T10:00:00Z"));
5177 assert_eq!(
5178 parsed.last_reconcile.as_deref(),
5179 Some("2026-03-30T10:05:00Z")
5180 );
5181 assert_eq!(parsed.drift_count, 15);
5182 assert_eq!(parsed.status, "error");
5183 }
5184
5185 #[test]
5188 fn daemon_status_response_deserializes_from_minimal_json() {
5189 let json = r#"{
5190 "running": false,
5191 "pid": 0,
5192 "uptimeSecs": 0,
5193 "lastReconcile": null,
5194 "lastSync": null,
5195 "driftCount": 0,
5196 "sources": []
5197 }"#;
5198
5199 let parsed: DaemonStatusResponse = serde_json::from_str(json).unwrap();
5200 assert!(!parsed.running);
5201 assert_eq!(parsed.pid, 0);
5202 assert!(parsed.module_reconcile.is_empty());
5203 assert!(parsed.update_available.is_none());
5204 }
5205
5206 #[test]
5209 fn checkin_payload_serializes_all_fields() {
5210 let payload = CheckinPayload {
5211 device_id: "sha256hex".into(),
5212 hostname: "myhost.local".into(),
5213 os: "linux".into(),
5214 arch: "aarch64".into(),
5215 config_hash: "abcd1234".into(),
5216 };
5217
5218 let json = serde_json::to_string(&payload).unwrap();
5219 assert!(json.contains("\"device_id\""));
5220 assert!(json.contains("\"hostname\""));
5221 assert!(json.contains("\"os\""));
5222 assert!(json.contains("\"arch\""));
5223 assert!(json.contains("\"config_hash\""));
5224 assert!(json.contains("aarch64"));
5225 }
5226
5227 #[test]
5230 fn parse_duration_large_seconds() {
5231 assert_eq!(
5232 parse_duration_or_default("86400s"),
5233 Duration::from_secs(86400)
5234 );
5235 }
5236
5237 #[test]
5238 fn parse_duration_large_hours() {
5239 assert_eq!(parse_duration_or_default("24h"), Duration::from_secs(86400));
5240 }
5241
5242 #[test]
5243 fn parse_duration_empty_string_falls_back() {
5244 assert_eq!(
5245 parse_duration_or_default(""),
5246 Duration::from_secs(DEFAULT_RECONCILE_SECS)
5247 );
5248 }
5249
5250 #[test]
5253 fn hash_resources_large_set_deterministic() {
5254 let set1: HashSet<String> = (0..100)
5255 .map(|i| format!("packages.brew.pkg{}", i))
5256 .collect();
5257 let set2: HashSet<String> = (0..100)
5258 .rev()
5259 .map(|i| format!("packages.brew.pkg{}", i))
5260 .collect();
5261
5262 assert_eq!(hash_resources(&set1), hash_resources(&set2));
5263 }
5264
5265 #[test]
5268 fn module_reconcile_status_camel_case_fields() {
5269 let status = ModuleReconcileStatus {
5270 name: "test".into(),
5271 interval: "60s".into(),
5272 auto_apply: true,
5273 drift_policy: "Auto".into(),
5274 last_reconcile: Some("2026-01-01T00:00:00Z".into()),
5275 };
5276
5277 let json = serde_json::to_string(&status).unwrap();
5278 assert!(json.contains("\"autoApply\""));
5279 assert!(json.contains("\"driftPolicy\""));
5280 assert!(json.contains("\"lastReconcile\""));
5281 assert!(!json.contains("\"auto_apply\""));
5283 assert!(!json.contains("\"drift_policy\""));
5284 assert!(!json.contains("\"last_reconcile\""));
5285 }
5286
5287 #[test]
5290 fn daemon_status_response_camel_case_uptime() {
5291 let response = DaemonStatusResponse {
5292 running: true,
5293 pid: 1,
5294 uptime_secs: 42,
5295 last_reconcile: None,
5296 last_sync: None,
5297 drift_count: 0,
5298 sources: vec![],
5299 update_available: None,
5300 module_reconcile: vec![],
5301 };
5302
5303 let json = serde_json::to_string(&response).unwrap();
5304 assert!(json.contains("\"uptimeSecs\""));
5305 assert!(json.contains("\"driftCount\""));
5306 assert!(!json.contains("\"uptime_secs\""));
5307 assert!(!json.contains("\"drift_count\""));
5308 }
5309
5310 #[test]
5313 fn process_source_decisions_mixed_tiers_accept_recommended_notify_locked() {
5314 use crate::config::{CargoSpec, PackagesSpec};
5315
5316 let store = test_state();
5317 let notifier = Notifier::new(NotifyMethod::Stdout, None);
5318 let policy = AutoApplyPolicyConfig {
5319 new_recommended: PolicyAction::Accept,
5320 new_optional: PolicyAction::Ignore,
5321 locked_conflict: PolicyAction::Notify,
5322 };
5323
5324 let mut system = std::collections::HashMap::new();
5326 system.insert("security-policy".into(), serde_yaml::Value::Null);
5327
5328 let merged = MergedProfile {
5329 packages: PackagesSpec {
5330 cargo: Some(CargoSpec {
5331 file: None,
5332 packages: vec!["bat".into()],
5333 }),
5334 ..Default::default()
5335 },
5336 system,
5337 ..Default::default()
5338 };
5339
5340 let excluded = process_source_decisions(&store, "corp", &merged, &policy, ¬ifier);
5341
5342 let pending = store.pending_decisions().unwrap();
5343 assert_eq!(pending.len(), 1);
5345 assert_eq!(pending[0].resource, "system.security-policy");
5346 assert!(!excluded.contains("packages.cargo.bat"));
5348 assert!(excluded.contains("system.security-policy"));
5350 }
5351
5352 #[test]
5355 fn generate_device_id_hex_format() {
5356 let id = generate_device_id().unwrap();
5357 assert!(
5359 id.chars().all(|c| c.is_ascii_hexdigit()),
5360 "device ID should be hex: {}",
5361 id
5362 );
5363 }
5364
5365 #[test]
5368 fn extract_source_resources_multiple_files() {
5369 use crate::config::{FilesSpec, ManagedFileSpec, MergedProfile};
5370
5371 let merged = MergedProfile {
5372 files: FilesSpec {
5373 managed: vec![
5374 ManagedFileSpec {
5375 source: "dotfiles/.zshrc".into(),
5376 target: PathBuf::from("/home/user/.zshrc"),
5377 strategy: None,
5378 private: false,
5379 origin: None,
5380 encryption: None,
5381 permissions: None,
5382 },
5383 ManagedFileSpec {
5384 source: "dotfiles/.vimrc".into(),
5385 target: PathBuf::from("/home/user/.vimrc"),
5386 strategy: None,
5387 private: false,
5388 origin: None,
5389 encryption: None,
5390 permissions: None,
5391 },
5392 ManagedFileSpec {
5393 source: "dotfiles/.gitconfig".into(),
5394 target: PathBuf::from("/home/user/.gitconfig"),
5395 strategy: None,
5396 private: true,
5397 origin: None,
5398 encryption: None,
5399 permissions: None,
5400 },
5401 ],
5402 ..Default::default()
5403 },
5404 ..Default::default()
5405 };
5406
5407 let resources = extract_source_resources(&merged);
5408 assert_eq!(resources.len(), 3);
5409 assert!(resources.contains("files./home/user/.zshrc"));
5410 assert!(resources.contains("files./home/user/.vimrc"));
5411 assert!(resources.contains("files./home/user/.gitconfig"));
5412 }
5413
5414 #[test]
5417 fn extract_source_resources_multiple_env_vars() {
5418 use crate::config::{EnvVar, MergedProfile};
5419
5420 let merged = MergedProfile {
5421 env: vec![
5422 EnvVar {
5423 name: "PATH".into(),
5424 value: "/usr/local/bin:$PATH".into(),
5425 },
5426 EnvVar {
5427 name: "EDITOR".into(),
5428 value: "nvim".into(),
5429 },
5430 EnvVar {
5431 name: "GOPATH".into(),
5432 value: "/home/user/go".into(),
5433 },
5434 ],
5435 ..Default::default()
5436 };
5437
5438 let resources = extract_source_resources(&merged);
5439 assert_eq!(resources.len(), 3);
5440 assert!(resources.contains("env.PATH"));
5441 assert!(resources.contains("env.EDITOR"));
5442 assert!(resources.contains("env.GOPATH"));
5443 }
5444
5445 #[test]
5448 fn extract_source_resources_multiple_system_keys() {
5449 use crate::config::MergedProfile;
5450
5451 let mut system = std::collections::HashMap::new();
5452 system.insert("sysctl".into(), serde_yaml::Value::Null);
5453 system.insert("kernelModules".into(), serde_yaml::Value::Null);
5454 system.insert("apparmor".into(), serde_yaml::Value::Null);
5455
5456 let merged = MergedProfile {
5457 system,
5458 ..Default::default()
5459 };
5460
5461 let resources = extract_source_resources(&merged);
5462 assert_eq!(resources.len(), 3);
5463 assert!(resources.contains("system.sysctl"));
5464 assert!(resources.contains("system.kernelModules"));
5465 assert!(resources.contains("system.apparmor"));
5466 }
5467
5468 #[test]
5471 fn daemon_state_uptime_increases() {
5472 let state = DaemonState::new();
5473 std::thread::sleep(Duration::from_millis(10));
5475 let response = state.to_response();
5476 assert!(response.uptime_secs < 10);
5479 }
5480
5481 #[tokio::test]
5484 async fn health_connection_health_endpoint() {
5485 let state = Arc::new(Mutex::new(DaemonState::new()));
5486 let (client, server) = tokio::io::duplex(4096);
5487
5488 let handler_state = Arc::clone(&state);
5490 let handler = tokio::spawn(async move {
5491 handle_health_connection(server, handler_state)
5492 .await
5493 .unwrap();
5494 });
5495
5496 let (reader, mut writer) = tokio::io::split(client);
5498 writer
5499 .write_all(b"GET /health HTTP/1.1\r\nHost: localhost\r\n\r\n")
5500 .await
5501 .unwrap();
5502 writer.shutdown().await.unwrap();
5503
5504 let mut buf_reader = tokio::io::BufReader::new(reader);
5506 let mut response = String::new();
5507 loop {
5508 let mut line = String::new();
5509 match buf_reader.read_line(&mut line).await {
5510 Ok(0) => break,
5511 Ok(_) => response.push_str(&line),
5512 Err(_) => break,
5513 }
5514 }
5515
5516 handler.await.unwrap();
5517
5518 assert!(
5519 response.starts_with("HTTP/1.1 200 OK"),
5520 "expected 200 OK, got: {}",
5521 &response[..response.len().min(40)]
5522 );
5523 assert!(response.contains("\"status\""));
5524 assert!(response.contains("\"pid\""));
5525 assert!(response.contains("\"uptime_secs\""));
5526 }
5527
5528 #[tokio::test]
5531 async fn health_connection_status_endpoint() {
5532 let state = Arc::new(Mutex::new(DaemonState::new()));
5533 {
5535 let mut st = state.lock().await;
5536 st.drift_count = 3;
5537 st.last_reconcile = Some("2026-03-30T10:00:00Z".to_string());
5538 }
5539
5540 let (client, server) = tokio::io::duplex(4096);
5541
5542 let handler_state = Arc::clone(&state);
5543 let handler = tokio::spawn(async move {
5544 handle_health_connection(server, handler_state)
5545 .await
5546 .unwrap();
5547 });
5548
5549 let (reader, mut writer) = tokio::io::split(client);
5550 writer
5551 .write_all(b"GET /status HTTP/1.1\r\nHost: localhost\r\n\r\n")
5552 .await
5553 .unwrap();
5554 writer.shutdown().await.unwrap();
5555
5556 let mut buf_reader = tokio::io::BufReader::new(reader);
5557 let mut response = String::new();
5558 loop {
5559 let mut line = String::new();
5560 match buf_reader.read_line(&mut line).await {
5561 Ok(0) => break,
5562 Ok(_) => response.push_str(&line),
5563 Err(_) => break,
5564 }
5565 }
5566
5567 handler.await.unwrap();
5568
5569 assert!(
5570 response.starts_with("HTTP/1.1 200 OK"),
5571 "expected 200 OK, got: {}",
5572 &response[..response.len().min(40)]
5573 );
5574 assert!(
5576 response.contains("\"running\": true"),
5577 "response should contain running field: {}",
5578 &response
5579 );
5580 assert!(
5581 response.contains("\"driftCount\": 3"),
5582 "response should contain driftCount field: {}",
5583 &response
5584 );
5585 }
5586
5587 #[tokio::test]
5590 async fn health_connection_drift_endpoint() {
5591 let state = Arc::new(Mutex::new(DaemonState::new()));
5592 let (client, server) = tokio::io::duplex(4096);
5593
5594 let handler_state = Arc::clone(&state);
5595 let handler = tokio::spawn(async move {
5596 handle_health_connection(server, handler_state)
5597 .await
5598 .unwrap();
5599 });
5600
5601 let (reader, mut writer) = tokio::io::split(client);
5602 writer
5603 .write_all(b"GET /drift HTTP/1.1\r\nHost: localhost\r\n\r\n")
5604 .await
5605 .unwrap();
5606 writer.shutdown().await.unwrap();
5607
5608 let mut buf_reader = tokio::io::BufReader::new(reader);
5609 let mut response = String::new();
5610 loop {
5611 let mut line = String::new();
5612 match buf_reader.read_line(&mut line).await {
5613 Ok(0) => break,
5614 Ok(_) => response.push_str(&line),
5615 Err(_) => break,
5616 }
5617 }
5618
5619 handler.await.unwrap();
5620
5621 assert!(
5622 response.starts_with("HTTP/1.1 200 OK"),
5623 "expected 200 OK, got: {}",
5624 &response[..response.len().min(40)]
5625 );
5626 assert!(response.contains("\"drift_count\""));
5627 assert!(response.contains("\"events\""));
5628 }
5629
5630 #[tokio::test]
5633 async fn health_connection_unknown_path_returns_404() {
5634 let state = Arc::new(Mutex::new(DaemonState::new()));
5635 let (client, server) = tokio::io::duplex(4096);
5636
5637 let handler_state = Arc::clone(&state);
5638 let handler = tokio::spawn(async move {
5639 handle_health_connection(server, handler_state)
5640 .await
5641 .unwrap();
5642 });
5643
5644 let (reader, mut writer) = tokio::io::split(client);
5645 writer
5646 .write_all(b"GET /nonexistent HTTP/1.1\r\nHost: localhost\r\n\r\n")
5647 .await
5648 .unwrap();
5649 writer.shutdown().await.unwrap();
5650
5651 let mut buf_reader = tokio::io::BufReader::new(reader);
5652 let mut response = String::new();
5653 loop {
5654 let mut line = String::new();
5655 match buf_reader.read_line(&mut line).await {
5656 Ok(0) => break,
5657 Ok(_) => response.push_str(&line),
5658 Err(_) => break,
5659 }
5660 }
5661
5662 handler.await.unwrap();
5663
5664 assert!(
5665 response.starts_with("HTTP/1.1 404 Not Found"),
5666 "expected 404, got: {}",
5667 &response[..response.len().min(40)]
5668 );
5669 assert!(response.contains("\"error\""));
5670 }
5671
5672 #[test]
5675 fn git_pull_no_remote_returns_up_to_date() {
5676 let tmp = tempfile::TempDir::new().unwrap();
5677 let bare_dir = tmp.path().join("bare.git");
5678 let work_dir = tmp.path().join("work");
5679
5680 std::fs::create_dir_all(&bare_dir).unwrap();
5682 git2::Repository::init_bare(&bare_dir).unwrap();
5683
5684 let repo = git2::Repository::clone(bare_dir.to_str().unwrap(), &work_dir).unwrap();
5686
5687 let mut config = repo.config().unwrap();
5689 config.set_str("user.name", "cfgd-test").unwrap();
5690 config.set_str("user.email", "test@cfgd.io").unwrap();
5691
5692 let readme = work_dir.join("README");
5694 std::fs::write(&readme, "test\n").unwrap();
5695 let mut index = repo.index().unwrap();
5696 index.add_path(Path::new("README")).unwrap();
5697 index.write().unwrap();
5698 let tree_id = index.write_tree().unwrap();
5699 let tree = repo.find_tree(tree_id).unwrap();
5700 let sig = repo.signature().unwrap();
5701 repo.commit(Some("HEAD"), &sig, &sig, "initial", &tree, &[])
5702 .unwrap();
5703
5704 let mut remote = repo.find_remote("origin").unwrap();
5706 remote
5707 .push(&["refs/heads/master:refs/heads/master"], None)
5708 .unwrap();
5709
5710 let result = git_pull(&work_dir);
5712 assert!(result.is_ok(), "git_pull failed: {:?}", result);
5713 assert!(!result.unwrap(), "expected no changes");
5714 }
5715
5716 #[test]
5719 fn git_pull_with_remote_changes_returns_true() {
5720 let tmp = tempfile::TempDir::new().unwrap();
5721 let bare_dir = tmp.path().join("bare.git");
5722 let work_dir = tmp.path().join("work");
5723 let pusher_dir = tmp.path().join("pusher");
5724
5725 std::fs::create_dir_all(&bare_dir).unwrap();
5727 git2::Repository::init_bare(&bare_dir).unwrap();
5728
5729 let repo = git2::Repository::clone(bare_dir.to_str().unwrap(), &work_dir).unwrap();
5731 {
5732 let mut config = repo.config().unwrap();
5733 config.set_str("user.name", "cfgd-test").unwrap();
5734 config.set_str("user.email", "test@cfgd.io").unwrap();
5735 }
5736
5737 std::fs::write(work_dir.join("README"), "v1\n").unwrap();
5739 {
5740 let mut index = repo.index().unwrap();
5741 index.add_path(Path::new("README")).unwrap();
5742 index.write().unwrap();
5743 let tree_id = index.write_tree().unwrap();
5744 let tree = repo.find_tree(tree_id).unwrap();
5745 let sig = repo.signature().unwrap();
5746 repo.commit(Some("HEAD"), &sig, &sig, "initial", &tree, &[])
5747 .unwrap();
5748 }
5749 {
5750 let mut remote = repo.find_remote("origin").unwrap();
5751 remote
5752 .push(&["refs/heads/master:refs/heads/master"], None)
5753 .unwrap();
5754 }
5755
5756 let pusher = git2::Repository::clone(bare_dir.to_str().unwrap(), &pusher_dir).unwrap();
5758 {
5759 let mut config = pusher.config().unwrap();
5760 config.set_str("user.name", "cfgd-pusher").unwrap();
5761 config.set_str("user.email", "pusher@cfgd.io").unwrap();
5762 }
5763 std::fs::write(pusher_dir.join("NEW_FILE"), "hello\n").unwrap();
5764 {
5765 let mut index = pusher.index().unwrap();
5766 index.add_path(Path::new("NEW_FILE")).unwrap();
5767 index.write().unwrap();
5768 let tree_id = index.write_tree().unwrap();
5769 let tree = pusher.find_tree(tree_id).unwrap();
5770 let sig = pusher.signature().unwrap();
5771 let parent = pusher.head().unwrap().peel_to_commit().unwrap();
5772 pusher
5773 .commit(Some("HEAD"), &sig, &sig, "add file", &tree, &[&parent])
5774 .unwrap();
5775 }
5776 {
5777 let mut remote = pusher.find_remote("origin").unwrap();
5778 remote
5779 .push(&["refs/heads/master:refs/heads/master"], None)
5780 .unwrap();
5781 }
5782
5783 let result = git_pull(&work_dir);
5785 assert!(result.is_ok(), "git_pull failed: {:?}", result);
5786 assert!(result.unwrap(), "expected changes from remote");
5787
5788 assert!(
5790 work_dir.join("NEW_FILE").exists(),
5791 "NEW_FILE should exist after fast-forward pull"
5792 );
5793 }
5794
5795 #[test]
5798 fn git_auto_commit_push_no_changes() {
5799 let tmp = tempfile::TempDir::new().unwrap();
5800 let bare_dir = tmp.path().join("bare.git");
5801 let work_dir = tmp.path().join("work");
5802
5803 std::fs::create_dir_all(&bare_dir).unwrap();
5805 git2::Repository::init_bare(&bare_dir).unwrap();
5806
5807 let repo = git2::Repository::clone(bare_dir.to_str().unwrap(), &work_dir).unwrap();
5809 {
5810 let mut config = repo.config().unwrap();
5811 config.set_str("user.name", "cfgd-test").unwrap();
5812 config.set_str("user.email", "test@cfgd.io").unwrap();
5813 }
5814 std::fs::write(work_dir.join("README"), "test\n").unwrap();
5815 {
5816 let mut index = repo.index().unwrap();
5817 index.add_path(Path::new("README")).unwrap();
5818 index.write().unwrap();
5819 let tree_id = index.write_tree().unwrap();
5820 let tree = repo.find_tree(tree_id).unwrap();
5821 let sig = repo.signature().unwrap();
5822 repo.commit(Some("HEAD"), &sig, &sig, "initial", &tree, &[])
5823 .unwrap();
5824 }
5825 {
5826 let mut remote = repo.find_remote("origin").unwrap();
5827 remote
5828 .push(&["refs/heads/master:refs/heads/master"], None)
5829 .unwrap();
5830 }
5831
5832 let result = git_auto_commit_push(&work_dir);
5834 assert!(result.is_ok(), "git_auto_commit_push failed: {:?}", result);
5835 assert!(!result.unwrap(), "expected no changes to push");
5836 }
5837
5838 #[test]
5841 fn git_auto_commit_push_with_changes() {
5842 let tmp = tempfile::TempDir::new().unwrap();
5843 let bare_dir = tmp.path().join("bare.git");
5844 let work_dir = tmp.path().join("work");
5845
5846 std::fs::create_dir_all(&bare_dir).unwrap();
5848 git2::Repository::init_bare(&bare_dir).unwrap();
5849
5850 let repo = git2::Repository::clone(bare_dir.to_str().unwrap(), &work_dir).unwrap();
5852 {
5853 let mut config = repo.config().unwrap();
5854 config.set_str("user.name", "cfgd-test").unwrap();
5855 config.set_str("user.email", "test@cfgd.io").unwrap();
5856 }
5857 std::fs::write(work_dir.join("README"), "test\n").unwrap();
5858 {
5859 let mut index = repo.index().unwrap();
5860 index.add_path(Path::new("README")).unwrap();
5861 index.write().unwrap();
5862 let tree_id = index.write_tree().unwrap();
5863 let tree = repo.find_tree(tree_id).unwrap();
5864 let sig = repo.signature().unwrap();
5865 repo.commit(Some("HEAD"), &sig, &sig, "initial", &tree, &[])
5866 .unwrap();
5867 }
5868 {
5869 let mut remote = repo.find_remote("origin").unwrap();
5870 remote
5871 .push(&["refs/heads/master:refs/heads/master"], None)
5872 .unwrap();
5873 }
5874
5875 std::fs::write(work_dir.join("new_config.yaml"), "key: value\n").unwrap();
5877
5878 let result = git_auto_commit_push(&work_dir);
5880 assert!(result.is_ok(), "git_auto_commit_push failed: {:?}", result);
5881 assert!(result.unwrap(), "expected changes to be pushed");
5882
5883 let repo = git2::Repository::open(&work_dir).unwrap();
5885 let head = repo.head().unwrap().peel_to_commit().unwrap();
5886 assert_eq!(
5887 head.message().unwrap(),
5888 "cfgd: auto-commit configuration changes"
5889 );
5890
5891 let bare = git2::Repository::open_bare(&bare_dir).unwrap();
5893 let bare_head = bare
5894 .find_reference("refs/heads/master")
5895 .unwrap()
5896 .peel_to_commit()
5897 .unwrap();
5898 assert_eq!(head.id(), bare_head.id());
5899 }
5900
5901 #[test]
5904 fn git_pull_non_repo_returns_error() {
5905 let tmp = tempfile::TempDir::new().unwrap();
5906 let result = git_pull(tmp.path());
5907 let err = result.unwrap_err();
5908 assert!(
5909 err.contains("open repo"),
5910 "expected 'open repo' error, got: {err}"
5911 );
5912 }
5913
5914 #[test]
5917 fn git_auto_commit_push_non_repo_returns_error() {
5918 let tmp = tempfile::TempDir::new().unwrap();
5919 let result = git_auto_commit_push(tmp.path());
5920 let err = result.unwrap_err();
5921 assert!(
5922 err.contains("open repo"),
5923 "expected 'open repo' error, got: {err}"
5924 );
5925 }
5926
5927 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
5932 async fn handle_sync_updates_state_timestamps() {
5933 use crate::test_helpers::init_test_git_repo;
5934
5935 let tmp = tempfile::TempDir::new().unwrap();
5936 let repo_dir = tmp.path().join("repo");
5937 init_test_git_repo(&repo_dir);
5938
5939 let state = Arc::new(Mutex::new(DaemonState::new()));
5940
5941 let st = Arc::clone(&state);
5942 let rd = repo_dir.clone();
5943 let changed = tokio::task::spawn_blocking(move || {
5944 handle_sync(&rd, false, false, "local", &st, false, false)
5945 })
5946 .await
5947 .unwrap();
5948
5949 assert!(!changed);
5950
5951 let st = state.lock().await;
5952 assert!(st.last_sync.is_some(), "last_sync should be set");
5953 }
5954
5955 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
5958 async fn handle_sync_pull_without_remote_logs_warning() {
5959 use crate::test_helpers::init_test_git_repo;
5960
5961 let tmp = tempfile::TempDir::new().unwrap();
5962 let repo_dir = tmp.path().join("repo");
5963 init_test_git_repo(&repo_dir);
5964
5965 let state = Arc::new(Mutex::new(DaemonState::new()));
5966
5967 let st = Arc::clone(&state);
5968 let rd = repo_dir.clone();
5969 let changed = tokio::task::spawn_blocking(move || {
5970 handle_sync(&rd, true, false, "local", &st, false, false)
5971 })
5972 .await
5973 .unwrap();
5974
5975 assert!(!changed);
5977 }
5978
5979 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
5982 async fn handle_sync_updates_per_source_status() {
5983 use crate::test_helpers::init_test_git_repo;
5984
5985 let tmp = tempfile::TempDir::new().unwrap();
5986 let repo_dir = tmp.path().join("repo");
5987 init_test_git_repo(&repo_dir);
5988
5989 let state = Arc::new(Mutex::new(DaemonState::new()));
5990 {
5992 let mut st = state.lock().await;
5993 st.sources.push(SourceStatus {
5994 name: "acme".to_string(),
5995 last_sync: None,
5996 last_reconcile: None,
5997 drift_count: 0,
5998 status: "active".to_string(),
5999 });
6000 }
6001
6002 let st = Arc::clone(&state);
6003 let rd = repo_dir.clone();
6004 tokio::task::spawn_blocking(move || {
6005 handle_sync(&rd, false, false, "acme", &st, false, false)
6006 })
6007 .await
6008 .unwrap();
6009
6010 let st = state.lock().await;
6011 let acme = st.sources.iter().find(|s| s.name == "acme").unwrap();
6013 assert!(
6014 acme.last_sync.is_some(),
6015 "acme source last_sync should be set"
6016 );
6017 let local = st.sources.iter().find(|s| s.name == "local").unwrap();
6019 assert!(
6020 local.last_sync.is_none(),
6021 "local source last_sync should remain None"
6022 );
6023 }
6024
6025 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
6028 async fn handle_sync_auto_pull_with_remote_changes() {
6029 let tmp = tempfile::TempDir::new().unwrap();
6030 let bare_dir = tmp.path().join("bare.git");
6031 let work_dir = tmp.path().join("work");
6032 let pusher_dir = tmp.path().join("pusher");
6033
6034 std::fs::create_dir_all(&bare_dir).unwrap();
6036 git2::Repository::init_bare(&bare_dir).unwrap();
6037
6038 let repo = git2::Repository::clone(bare_dir.to_str().unwrap(), &work_dir).unwrap();
6039 {
6040 let mut config = repo.config().unwrap();
6041 config.set_str("user.name", "cfgd-test").unwrap();
6042 config.set_str("user.email", "test@cfgd.io").unwrap();
6043 }
6044 std::fs::write(work_dir.join("README"), "v1\n").unwrap();
6045 {
6046 let mut index = repo.index().unwrap();
6047 index.add_path(Path::new("README")).unwrap();
6048 index.write().unwrap();
6049 let tree_id = index.write_tree().unwrap();
6050 let tree = repo.find_tree(tree_id).unwrap();
6051 let sig = repo.signature().unwrap();
6052 repo.commit(Some("HEAD"), &sig, &sig, "initial", &tree, &[])
6053 .unwrap();
6054 }
6055 {
6056 let mut remote = repo.find_remote("origin").unwrap();
6057 remote
6058 .push(&["refs/heads/master:refs/heads/master"], None)
6059 .unwrap();
6060 }
6061
6062 let pusher = git2::Repository::clone(bare_dir.to_str().unwrap(), &pusher_dir).unwrap();
6064 {
6065 let mut config = pusher.config().unwrap();
6066 config.set_str("user.name", "cfgd-pusher").unwrap();
6067 config.set_str("user.email", "pusher@cfgd.io").unwrap();
6068 }
6069 std::fs::write(pusher_dir.join("NEWFILE"), "synced\n").unwrap();
6070 {
6071 let mut index = pusher.index().unwrap();
6072 index.add_path(Path::new("NEWFILE")).unwrap();
6073 index.write().unwrap();
6074 let tree_id = index.write_tree().unwrap();
6075 let tree = pusher.find_tree(tree_id).unwrap();
6076 let sig = pusher.signature().unwrap();
6077 let parent = pusher.head().unwrap().peel_to_commit().unwrap();
6078 pusher
6079 .commit(Some("HEAD"), &sig, &sig, "add newfile", &tree, &[&parent])
6080 .unwrap();
6081 }
6082 {
6083 let mut remote = pusher.find_remote("origin").unwrap();
6084 remote
6085 .push(&["refs/heads/master:refs/heads/master"], None)
6086 .unwrap();
6087 }
6088
6089 let state = Arc::new(Mutex::new(DaemonState::new()));
6090 let st = Arc::clone(&state);
6091 let wd = work_dir.clone();
6092 let changed = tokio::task::spawn_blocking(move || {
6093 handle_sync(&wd, true, false, "local", &st, false, false)
6094 })
6095 .await
6096 .unwrap();
6097
6098 assert!(changed, "handle_sync should detect remote changes");
6099 assert!(
6100 work_dir.join("NEWFILE").exists(),
6101 "pulled file should exist after sync"
6102 );
6103 }
6104
6105 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
6108 async fn handle_sync_auto_push_with_local_changes() {
6109 let tmp = tempfile::TempDir::new().unwrap();
6110 let bare_dir = tmp.path().join("bare.git");
6111 let work_dir = tmp.path().join("work");
6112
6113 std::fs::create_dir_all(&bare_dir).unwrap();
6114 git2::Repository::init_bare(&bare_dir).unwrap();
6115
6116 let repo = git2::Repository::clone(bare_dir.to_str().unwrap(), &work_dir).unwrap();
6117 {
6118 let mut config = repo.config().unwrap();
6119 config.set_str("user.name", "cfgd-test").unwrap();
6120 config.set_str("user.email", "test@cfgd.io").unwrap();
6121 }
6122 std::fs::write(work_dir.join("README"), "v1\n").unwrap();
6123 {
6124 let mut index = repo.index().unwrap();
6125 index.add_path(Path::new("README")).unwrap();
6126 index.write().unwrap();
6127 let tree_id = index.write_tree().unwrap();
6128 let tree = repo.find_tree(tree_id).unwrap();
6129 let sig = repo.signature().unwrap();
6130 repo.commit(Some("HEAD"), &sig, &sig, "initial", &tree, &[])
6131 .unwrap();
6132 }
6133 {
6134 let mut remote = repo.find_remote("origin").unwrap();
6135 remote
6136 .push(&["refs/heads/master:refs/heads/master"], None)
6137 .unwrap();
6138 }
6139
6140 std::fs::write(work_dir.join("local_change.txt"), "new content\n").unwrap();
6142
6143 let state = Arc::new(Mutex::new(DaemonState::new()));
6144 let st = Arc::clone(&state);
6145 let wd = work_dir.clone();
6146 let changed = tokio::task::spawn_blocking(move || {
6148 handle_sync(&wd, false, true, "local", &st, false, false)
6149 })
6150 .await
6151 .unwrap();
6152
6153 assert!(!changed, "no pull changes expected");
6155
6156 let bare = git2::Repository::open_bare(&bare_dir).unwrap();
6158 let bare_head = bare
6159 .find_reference("refs/heads/master")
6160 .unwrap()
6161 .peel_to_commit()
6162 .unwrap();
6163 assert_eq!(
6164 bare_head.message().unwrap(),
6165 "cfgd: auto-commit configuration changes"
6166 );
6167 }
6168
6169 #[test]
6172 fn git_pull_diverged_returns_error() {
6173 let tmp = tempfile::TempDir::new().unwrap();
6174 let bare_dir = tmp.path().join("bare.git");
6175 let work_dir = tmp.path().join("work");
6176 let pusher_dir = tmp.path().join("pusher");
6177
6178 std::fs::create_dir_all(&bare_dir).unwrap();
6179 git2::Repository::init_bare(&bare_dir).unwrap();
6180
6181 let repo = git2::Repository::clone(bare_dir.to_str().unwrap(), &work_dir).unwrap();
6182 {
6183 let mut config = repo.config().unwrap();
6184 config.set_str("user.name", "cfgd-test").unwrap();
6185 config.set_str("user.email", "test@cfgd.io").unwrap();
6186 }
6187 std::fs::write(work_dir.join("README"), "v1\n").unwrap();
6188 {
6189 let mut index = repo.index().unwrap();
6190 index.add_path(Path::new("README")).unwrap();
6191 index.write().unwrap();
6192 let tree_id = index.write_tree().unwrap();
6193 let tree = repo.find_tree(tree_id).unwrap();
6194 let sig = repo.signature().unwrap();
6195 repo.commit(Some("HEAD"), &sig, &sig, "initial", &tree, &[])
6196 .unwrap();
6197 }
6198 {
6199 let mut remote = repo.find_remote("origin").unwrap();
6200 remote
6201 .push(&["refs/heads/master:refs/heads/master"], None)
6202 .unwrap();
6203 }
6204
6205 let pusher = git2::Repository::clone(bare_dir.to_str().unwrap(), &pusher_dir).unwrap();
6207 {
6208 let mut config = pusher.config().unwrap();
6209 config.set_str("user.name", "cfgd-pusher").unwrap();
6210 config.set_str("user.email", "pusher@cfgd.io").unwrap();
6211 }
6212 std::fs::write(pusher_dir.join("PUSHER_FILE"), "pusher\n").unwrap();
6213 {
6214 let mut index = pusher.index().unwrap();
6215 index.add_path(Path::new("PUSHER_FILE")).unwrap();
6216 index.write().unwrap();
6217 let tree_id = index.write_tree().unwrap();
6218 let tree = pusher.find_tree(tree_id).unwrap();
6219 let sig = pusher.signature().unwrap();
6220 let parent = pusher.head().unwrap().peel_to_commit().unwrap();
6221 pusher
6222 .commit(Some("HEAD"), &sig, &sig, "pusher commit", &tree, &[&parent])
6223 .unwrap();
6224 }
6225 {
6226 let mut remote = pusher.find_remote("origin").unwrap();
6227 remote
6228 .push(&["refs/heads/master:refs/heads/master"], None)
6229 .unwrap();
6230 }
6231
6232 std::fs::write(work_dir.join("LOCAL_FILE"), "local\n").unwrap();
6234 {
6235 let mut index = repo.index().unwrap();
6236 index.add_path(Path::new("LOCAL_FILE")).unwrap();
6237 index.write().unwrap();
6238 let tree_id = index.write_tree().unwrap();
6239 let tree = repo.find_tree(tree_id).unwrap();
6240 let sig = repo.signature().unwrap();
6241 let parent = repo.head().unwrap().peel_to_commit().unwrap();
6242 repo.commit(Some("HEAD"), &sig, &sig, "local commit", &tree, &[&parent])
6243 .unwrap();
6244 }
6245
6246 let result = git_pull(&work_dir);
6248 assert!(result.is_err(), "diverged branch should return error");
6249 let err_msg = result.unwrap_err();
6250 assert!(
6251 err_msg.contains("diverged") || err_msg.contains("fast-forward"),
6252 "error should mention divergence: {}",
6253 err_msg
6254 );
6255 }
6256
6257 #[test]
6260 fn git_auto_commit_push_fresh_repo_no_head() {
6261 let tmp = tempfile::TempDir::new().unwrap();
6262 let bare_dir = tmp.path().join("bare.git");
6263 let work_dir = tmp.path().join("work");
6264
6265 std::fs::create_dir_all(&bare_dir).unwrap();
6266 git2::Repository::init_bare(&bare_dir).unwrap();
6267
6268 let repo = git2::Repository::clone(bare_dir.to_str().unwrap(), &work_dir).unwrap();
6269 {
6270 let mut config = repo.config().unwrap();
6271 config.set_str("user.name", "cfgd-test").unwrap();
6272 config.set_str("user.email", "test@cfgd.io").unwrap();
6273 }
6274
6275 std::fs::write(work_dir.join("first_file.txt"), "hello\n").unwrap();
6277
6278 let result = git_auto_commit_push(&work_dir);
6279 assert!(result.is_ok(), "fresh repo push failed: {:?}", result);
6280 assert!(result.unwrap(), "expected changes to be committed");
6281
6282 let repo = git2::Repository::open(&work_dir).unwrap();
6284 let head = repo.head().unwrap().peel_to_commit().unwrap();
6285 assert_eq!(
6286 head.message().unwrap(),
6287 "cfgd: auto-commit configuration changes"
6288 );
6289 }
6290
6291 #[test]
6294 fn server_checkin_mock_config_changed() {
6295 use crate::config::{
6296 LayerPolicy, MergedProfile, PackagesSpec, ProfileLayer, ProfileSpec, ResolvedProfile,
6297 };
6298
6299 let mut server = mockito::Server::new();
6300 let mock = server
6301 .mock("POST", "/api/v1/checkin")
6302 .with_status(200)
6303 .with_header("content-type", "application/json")
6304 .with_body(r#"{"status":"ok","config_changed":true,"config":null}"#)
6305 .create();
6306
6307 let resolved = ResolvedProfile {
6308 layers: vec![ProfileLayer {
6309 source: "local".into(),
6310 profile_name: "test".into(),
6311 priority: 1000,
6312 policy: LayerPolicy::Local,
6313 spec: ProfileSpec::default(),
6314 }],
6315 merged: MergedProfile {
6316 packages: PackagesSpec::default(),
6317 ..Default::default()
6318 },
6319 };
6320
6321 let changed = server_checkin(&server.url(), &resolved);
6322 assert!(changed, "server should report config changed");
6323 mock.assert();
6324 }
6325
6326 #[test]
6329 fn server_checkin_mock_no_change() {
6330 use crate::config::{
6331 LayerPolicy, MergedProfile, PackagesSpec, ProfileLayer, ProfileSpec, ResolvedProfile,
6332 };
6333
6334 let mut server = mockito::Server::new();
6335 let mock = server
6336 .mock("POST", "/api/v1/checkin")
6337 .with_status(200)
6338 .with_header("content-type", "application/json")
6339 .with_body(r#"{"status":"ok","config_changed":false,"config":null}"#)
6340 .create();
6341
6342 let resolved = ResolvedProfile {
6343 layers: vec![ProfileLayer {
6344 source: "local".into(),
6345 profile_name: "test".into(),
6346 priority: 1000,
6347 policy: LayerPolicy::Local,
6348 spec: ProfileSpec::default(),
6349 }],
6350 merged: MergedProfile {
6351 packages: PackagesSpec::default(),
6352 ..Default::default()
6353 },
6354 };
6355
6356 let changed = server_checkin(&server.url(), &resolved);
6357 assert!(!changed, "server should report no change");
6358 mock.assert();
6359 }
6360
6361 #[test]
6364 fn server_checkin_mock_server_error() {
6365 use crate::config::{
6366 LayerPolicy, MergedProfile, PackagesSpec, ProfileLayer, ProfileSpec, ResolvedProfile,
6367 };
6368
6369 let mut server = mockito::Server::new();
6370 let mock = server
6371 .mock("POST", "/api/v1/checkin")
6372 .with_status(500)
6373 .with_body("internal server error")
6374 .create();
6375
6376 let resolved = ResolvedProfile {
6377 layers: vec![ProfileLayer {
6378 source: "local".into(),
6379 profile_name: "test".into(),
6380 priority: 1000,
6381 policy: LayerPolicy::Local,
6382 spec: ProfileSpec::default(),
6383 }],
6384 merged: MergedProfile {
6385 packages: PackagesSpec::default(),
6386 ..Default::default()
6387 },
6388 };
6389
6390 let changed = server_checkin(&server.url(), &resolved);
6391 assert!(!changed, "server error should return false");
6392 mock.assert();
6393 }
6394
6395 #[test]
6398 fn server_checkin_mock_malformed_json() {
6399 use crate::config::{
6400 LayerPolicy, MergedProfile, PackagesSpec, ProfileLayer, ProfileSpec, ResolvedProfile,
6401 };
6402
6403 let mut server = mockito::Server::new();
6404 let mock = server
6405 .mock("POST", "/api/v1/checkin")
6406 .with_status(200)
6407 .with_header("content-type", "application/json")
6408 .with_body("not json at all")
6409 .create();
6410
6411 let resolved = ResolvedProfile {
6412 layers: vec![ProfileLayer {
6413 source: "local".into(),
6414 profile_name: "test".into(),
6415 priority: 1000,
6416 policy: LayerPolicy::Local,
6417 spec: ProfileSpec::default(),
6418 }],
6419 merged: MergedProfile {
6420 packages: PackagesSpec::default(),
6421 ..Default::default()
6422 },
6423 };
6424
6425 let changed = server_checkin(&server.url(), &resolved);
6426 assert!(!changed, "malformed JSON should return false");
6427 mock.assert();
6428 }
6429
6430 #[test]
6433 fn server_checkin_mock_trailing_slash_url() {
6434 use crate::config::{
6435 LayerPolicy, MergedProfile, PackagesSpec, ProfileLayer, ProfileSpec, ResolvedProfile,
6436 };
6437
6438 let mut server = mockito::Server::new();
6439 let mock = server
6440 .mock("POST", "/api/v1/checkin")
6441 .with_status(200)
6442 .with_header("content-type", "application/json")
6443 .with_body(r#"{"status":"ok","config_changed":false,"config":null}"#)
6444 .create();
6445
6446 let resolved = ResolvedProfile {
6447 layers: vec![ProfileLayer {
6448 source: "local".into(),
6449 profile_name: "test".into(),
6450 priority: 1000,
6451 policy: LayerPolicy::Local,
6452 spec: ProfileSpec::default(),
6453 }],
6454 merged: MergedProfile {
6455 packages: PackagesSpec::default(),
6456 ..Default::default()
6457 },
6458 };
6459
6460 let url_with_slash = format!("{}/", server.url());
6462 let changed = server_checkin(&url_with_slash, &resolved);
6463 assert!(!changed);
6464 mock.assert();
6465 }
6466
6467 #[test]
6470 fn server_checkin_mock_verifies_request_body() {
6471 use crate::config::{
6472 CargoSpec, LayerPolicy, MergedProfile, PackagesSpec, ProfileLayer, ProfileSpec,
6473 ResolvedProfile,
6474 };
6475
6476 let mut server = mockito::Server::new();
6477 let mock = server
6478 .mock("POST", "/api/v1/checkin")
6479 .match_header("Content-Type", "application/json")
6480 .with_status(200)
6481 .with_header("content-type", "application/json")
6482 .with_body(r#"{"status":"ok","config_changed":false,"config":null}"#)
6483 .create();
6484
6485 let resolved = ResolvedProfile {
6486 layers: vec![ProfileLayer {
6487 source: "local".into(),
6488 profile_name: "test".into(),
6489 priority: 1000,
6490 policy: LayerPolicy::Local,
6491 spec: ProfileSpec::default(),
6492 }],
6493 merged: MergedProfile {
6494 packages: PackagesSpec {
6495 cargo: Some(CargoSpec {
6496 file: None,
6497 packages: vec!["bat".into()],
6498 }),
6499 ..Default::default()
6500 },
6501 ..Default::default()
6502 },
6503 };
6504
6505 let changed = server_checkin(&server.url(), &resolved);
6506 assert!(!changed);
6507 mock.assert();
6509 }
6510
6511 #[test]
6514 fn try_server_checkin_no_server_origin_returns_false() {
6515 use crate::config::*;
6516 let config = CfgdConfig {
6517 api_version: crate::API_VERSION.into(),
6518 kind: "Config".into(),
6519 metadata: ConfigMetadata {
6520 name: "test".into(),
6521 },
6522 spec: ConfigSpec {
6523 profile: Some("default".into()),
6524 origin: vec![OriginSpec {
6525 origin_type: OriginType::Git,
6526 url: "https://github.com/test/repo.git".into(),
6527 branch: "main".into(),
6528 auth: None,
6529 ssh_strict_host_key_checking: Default::default(),
6530 }],
6531 daemon: None,
6532 secrets: None,
6533 sources: vec![],
6534 theme: None,
6535 modules: None,
6536 security: None,
6537 aliases: std::collections::HashMap::new(),
6538 file_strategy: FileStrategy::default(),
6539 ai: None,
6540 compliance: None,
6541 },
6542 };
6543 let resolved = ResolvedProfile {
6544 layers: vec![ProfileLayer {
6545 source: "local".into(),
6546 profile_name: "test".into(),
6547 priority: 1000,
6548 policy: LayerPolicy::Local,
6549 spec: ProfileSpec::default(),
6550 }],
6551 merged: MergedProfile::default(),
6552 };
6553
6554 let changed = try_server_checkin(&config, &resolved);
6555 assert!(!changed, "no server origin means no checkin");
6556 }
6557
6558 #[test]
6561 fn try_server_checkin_with_server_origin_calls_checkin() {
6562 use crate::config::*;
6563
6564 let mut server = mockito::Server::new();
6565 let mock = server
6566 .mock("POST", "/api/v1/checkin")
6567 .with_status(200)
6568 .with_header("content-type", "application/json")
6569 .with_body(r#"{"status":"ok","config_changed":true,"config":null}"#)
6570 .create();
6571
6572 let config = CfgdConfig {
6573 api_version: crate::API_VERSION.into(),
6574 kind: "Config".into(),
6575 metadata: ConfigMetadata {
6576 name: "test".into(),
6577 },
6578 spec: ConfigSpec {
6579 profile: Some("default".into()),
6580 origin: vec![OriginSpec {
6581 origin_type: OriginType::Server,
6582 url: server.url(),
6583 branch: "main".into(),
6584 auth: None,
6585 ssh_strict_host_key_checking: Default::default(),
6586 }],
6587 daemon: None,
6588 secrets: None,
6589 sources: vec![],
6590 theme: None,
6591 modules: None,
6592 security: None,
6593 aliases: std::collections::HashMap::new(),
6594 file_strategy: FileStrategy::default(),
6595 ai: None,
6596 compliance: None,
6597 },
6598 };
6599 let resolved = ResolvedProfile {
6600 layers: vec![ProfileLayer {
6601 source: "local".into(),
6602 profile_name: "test".into(),
6603 priority: 1000,
6604 policy: LayerPolicy::Local,
6605 spec: ProfileSpec::default(),
6606 }],
6607 merged: MergedProfile::default(),
6608 };
6609
6610 let changed = try_server_checkin(&config, &resolved);
6611 assert!(changed, "server origin should trigger checkin");
6612 mock.assert();
6613 }
6614
6615 #[tokio::test]
6618 async fn health_connection_response_headers() {
6619 let state = Arc::new(Mutex::new(DaemonState::new()));
6620 let (client, server) = tokio::io::duplex(4096);
6621
6622 let handler_state = Arc::clone(&state);
6623 let handler = tokio::spawn(async move {
6624 handle_health_connection(server, handler_state)
6625 .await
6626 .unwrap();
6627 });
6628
6629 let (reader, mut writer) = tokio::io::split(client);
6630 writer
6631 .write_all(b"GET /health HTTP/1.1\r\nHost: localhost\r\n\r\n")
6632 .await
6633 .unwrap();
6634 writer.shutdown().await.unwrap();
6635
6636 let mut buf_reader = tokio::io::BufReader::new(reader);
6637 let mut response = String::new();
6638 loop {
6639 let mut line = String::new();
6640 match buf_reader.read_line(&mut line).await {
6641 Ok(0) => break,
6642 Ok(_) => response.push_str(&line),
6643 Err(_) => break,
6644 }
6645 }
6646
6647 handler.await.unwrap();
6648
6649 assert!(
6650 response.contains("Content-Type: application/json"),
6651 "missing Content-Type header"
6652 );
6653 assert!(
6654 response.contains("Content-Length:"),
6655 "missing Content-Length header"
6656 );
6657 assert!(
6658 response.contains("Connection: close"),
6659 "missing Connection header"
6660 );
6661 }
6662
6663 #[tokio::test]
6666 async fn health_connection_empty_request_defaults_to_health() {
6667 let state = Arc::new(Mutex::new(DaemonState::new()));
6668 let (client, server) = tokio::io::duplex(4096);
6669
6670 let handler_state = Arc::clone(&state);
6671 let handler = tokio::spawn(async move {
6672 handle_health_connection(server, handler_state)
6673 .await
6674 .unwrap();
6675 });
6676
6677 let (reader, mut writer) = tokio::io::split(client);
6678 writer.write_all(b"\r\n\r\n").await.unwrap();
6680 writer.shutdown().await.unwrap();
6681
6682 let mut buf_reader = tokio::io::BufReader::new(reader);
6683 let mut response = String::new();
6684 loop {
6685 let mut line = String::new();
6686 match buf_reader.read_line(&mut line).await {
6687 Ok(0) => break,
6688 Ok(_) => response.push_str(&line),
6689 Err(_) => break,
6690 }
6691 }
6692
6693 handler.await.unwrap();
6694
6695 assert!(
6699 response.contains("200 OK") || response.contains("404 Not Found"),
6700 "should handle empty request gracefully: {}",
6701 &response[..response.len().min(80)]
6702 );
6703 }
6704
6705 #[tokio::test]
6708 async fn health_connection_status_body_parses_as_response() {
6709 let state = Arc::new(Mutex::new(DaemonState::new()));
6710 {
6711 let mut st = state.lock().await;
6712 st.drift_count = 7;
6713 st.update_available = Some("2.0.0".to_string());
6714 }
6715
6716 let (client, server) = tokio::io::duplex(8192);
6717
6718 let handler_state = Arc::clone(&state);
6719 let handler = tokio::spawn(async move {
6720 handle_health_connection(server, handler_state)
6721 .await
6722 .unwrap();
6723 });
6724
6725 let (reader, mut writer) = tokio::io::split(client);
6726 writer
6727 .write_all(b"GET /status HTTP/1.1\r\nHost: localhost\r\n\r\n")
6728 .await
6729 .unwrap();
6730 writer.shutdown().await.unwrap();
6731
6732 let mut buf_reader = tokio::io::BufReader::new(reader);
6733 let mut lines: Vec<String> = Vec::new();
6734 let mut in_body = false;
6735 loop {
6736 let mut line = String::new();
6737 match buf_reader.read_line(&mut line).await {
6738 Ok(0) => break,
6739 Ok(_) => {
6740 if in_body {
6741 lines.push(line);
6742 } else if line.trim().is_empty() {
6743 in_body = true;
6744 }
6745 }
6746 Err(_) => break,
6747 }
6748 }
6749
6750 handler.await.unwrap();
6751
6752 let body = lines.join("");
6753 let parsed: DaemonStatusResponse =
6754 serde_json::from_str(&body).expect("body should parse as DaemonStatusResponse");
6755 assert!(parsed.running);
6756 assert_eq!(parsed.drift_count, 7);
6757 assert_eq!(parsed.update_available.as_deref(), Some("2.0.0"));
6758 assert_eq!(parsed.sources.len(), 1);
6759 assert_eq!(parsed.sources[0].name, "local");
6760 }
6761
6762 #[test]
6765 fn daemon_state_module_last_reconcile_overwrite() {
6766 let mut state = DaemonState::new();
6767 state
6768 .module_last_reconcile
6769 .insert("mod-a".into(), "2026-01-01T00:00:00Z".into());
6770 state
6771 .module_last_reconcile
6772 .insert("mod-a".into(), "2026-01-02T00:00:00Z".into());
6773
6774 assert_eq!(state.module_last_reconcile.len(), 1);
6776 assert_eq!(
6777 state.module_last_reconcile.get("mod-a").unwrap(),
6778 "2026-01-02T00:00:00Z"
6779 );
6780 }
6781
6782 #[test]
6785 fn daemon_state_update_available_in_response() {
6786 let mut state = DaemonState::new();
6787 state.update_available = Some("3.1.0".to_string());
6788
6789 let response = state.to_response();
6790 assert_eq!(response.update_available.as_deref(), Some("3.1.0"));
6791 }
6792
6793 #[test]
6796 fn notifier_webhook_payload_structure() {
6797 let title = "cfgd: drift detected";
6799 let message = "3 files drifted";
6800 let payload = serde_json::json!({
6801 "event": title,
6802 "message": message,
6803 "timestamp": crate::utc_now_iso8601(),
6804 "source": "cfgd",
6805 });
6806
6807 let obj = payload.as_object().unwrap();
6808 assert_eq!(obj.len(), 4);
6809 assert_eq!(obj.get("event").unwrap().as_str().unwrap(), title);
6810 assert_eq!(obj.get("message").unwrap().as_str().unwrap(), message);
6811 assert!(obj.contains_key("timestamp"));
6812 assert_eq!(obj.get("source").unwrap().as_str().unwrap(), "cfgd");
6813 }
6814
6815 #[test]
6818 fn notifier_webhook_payload_timestamp_is_iso8601() {
6819 let payload = serde_json::json!({
6820 "event": "test",
6821 "message": "msg",
6822 "timestamp": crate::utc_now_iso8601(),
6823 "source": "cfgd",
6824 });
6825
6826 let ts = payload["timestamp"].as_str().unwrap();
6827 assert!(ts.contains('T'), "timestamp should be ISO 8601: {}", ts);
6829 assert!(ts.ends_with('Z'), "timestamp should end with Z: {}", ts);
6830 }
6831
6832 #[test]
6835 fn reconcile_task_drift_policy_auto() {
6836 let task = ReconcileTask {
6837 entity: "critical-module".into(),
6838 interval: Duration::from_secs(30),
6839 auto_apply: true,
6840 drift_policy: config::DriftPolicy::Auto,
6841 last_reconciled: None,
6842 };
6843 assert!(matches!(task.drift_policy, config::DriftPolicy::Auto));
6844 }
6845
6846 #[test]
6847 fn reconcile_task_drift_policy_notify_only() {
6848 let task = ReconcileTask {
6849 entity: "optional-module".into(),
6850 interval: Duration::from_secs(600),
6851 auto_apply: false,
6852 drift_policy: config::DriftPolicy::NotifyOnly,
6853 last_reconciled: None,
6854 };
6855 assert!(matches!(task.drift_policy, config::DriftPolicy::NotifyOnly));
6856 }
6857
6858 #[test]
6859 fn reconcile_task_drift_policy_prompt() {
6860 let task = ReconcileTask {
6861 entity: "interactive-module".into(),
6862 interval: Duration::from_secs(300),
6863 auto_apply: false,
6864 drift_policy: config::DriftPolicy::Prompt,
6865 last_reconciled: None,
6866 };
6867 assert!(matches!(task.drift_policy, config::DriftPolicy::Prompt));
6868 }
6869
6870 #[test]
6873 fn process_source_decisions_optional_tier_accept() {
6874 let store = test_state();
6875 let notifier = Notifier::new(NotifyMethod::Stdout, None);
6876 let policy = AutoApplyPolicyConfig {
6877 new_recommended: PolicyAction::Notify,
6878 new_optional: PolicyAction::Accept,
6879 locked_conflict: PolicyAction::Notify,
6880 };
6881
6882 let merged = MergedProfile {
6886 packages: crate::config::PackagesSpec {
6887 cargo: Some(crate::config::CargoSpec {
6888 file: None,
6889 packages: vec!["bat".into()],
6890 }),
6891 ..Default::default()
6892 },
6893 ..Default::default()
6894 };
6895
6896 let excluded = process_source_decisions(&store, "acme", &merged, &policy, ¬ifier);
6897 let pending = store.pending_decisions().unwrap();
6898 assert_eq!(pending.len(), 1);
6900 assert_eq!(pending[0].resource, "packages.cargo.bat");
6901 assert!(excluded.contains("packages.cargo.bat"));
6902 }
6903
6904 #[test]
6907 fn process_source_decisions_empty_profile_no_decisions() {
6908 let store = test_state();
6909 let notifier = Notifier::new(NotifyMethod::Stdout, None);
6910 let policy = AutoApplyPolicyConfig::default();
6911
6912 let merged = MergedProfile::default();
6913
6914 let excluded = process_source_decisions(&store, "empty", &merged, &policy, ¬ifier);
6915 let pending = store.pending_decisions().unwrap();
6916 assert!(pending.is_empty());
6917 assert!(excluded.is_empty());
6918 }
6919
6920 #[test]
6923 fn daemon_status_response_full_deserialization() {
6924 let json = r#"{
6925 "running": true,
6926 "pid": 54321,
6927 "uptimeSecs": 7200,
6928 "lastReconcile": "2026-04-01T00:00:00Z",
6929 "lastSync": "2026-04-01T00:01:00Z",
6930 "driftCount": 42,
6931 "sources": [
6932 {
6933 "name": "local",
6934 "lastSync": "2026-04-01T00:01:00Z",
6935 "lastReconcile": "2026-04-01T00:00:00Z",
6936 "driftCount": 10,
6937 "status": "active"
6938 }
6939 ],
6940 "updateAvailable": "4.0.0",
6941 "moduleReconcile": [
6942 {
6943 "name": "sec",
6944 "interval": "30s",
6945 "autoApply": true,
6946 "driftPolicy": "Auto",
6947 "lastReconcile": "2026-04-01T00:00:00Z"
6948 }
6949 ]
6950 }"#;
6951
6952 let parsed: DaemonStatusResponse = serde_json::from_str(json).unwrap();
6953 assert!(parsed.running);
6954 assert_eq!(parsed.pid, 54321);
6955 assert_eq!(parsed.uptime_secs, 7200);
6956 assert_eq!(
6957 parsed.last_reconcile.as_deref(),
6958 Some("2026-04-01T00:00:00Z")
6959 );
6960 assert_eq!(parsed.last_sync.as_deref(), Some("2026-04-01T00:01:00Z"));
6961 assert_eq!(parsed.drift_count, 42);
6962 assert_eq!(parsed.sources.len(), 1);
6963 assert_eq!(parsed.sources[0].drift_count, 10);
6964 assert_eq!(parsed.update_available.as_deref(), Some("4.0.0"));
6965 assert_eq!(parsed.module_reconcile.len(), 1);
6966 assert_eq!(parsed.module_reconcile[0].name, "sec");
6967 assert!(parsed.module_reconcile[0].auto_apply);
6968 }
6969
6970 #[test]
6973 fn checkin_response_without_config_field() {
6974 let json = r#"{"status":"ok","config_changed":false}"#;
6975 let resp: CheckinServerResponse = serde_json::from_str(json).unwrap();
6976 assert!(!resp.config_changed);
6978 assert!(resp._config.is_none());
6979 }
6980
6981 #[test]
6984 fn hash_resources_unicode_content() {
6985 let set: HashSet<String> = HashSet::from_iter(["packages.brew.\u{1f600}".to_string()]);
6986 let hash = hash_resources(&set);
6987 assert_eq!(hash.len(), 64);
6988 assert_eq!(hash, hash_resources(&set));
6990 }
6991
6992 #[test]
6995 fn parse_duration_whitespace_only_falls_back() {
6996 assert_eq!(
6997 parse_duration_or_default(" "),
6998 Duration::from_secs(DEFAULT_RECONCILE_SECS)
6999 );
7000 }
7001
7002 #[test]
7005 fn sync_task_zero_interval() {
7006 let task = SyncTask {
7007 source_name: "instant".into(),
7008 repo_path: PathBuf::from("/tmp"),
7009 auto_pull: true,
7010 auto_push: true,
7011 auto_apply: true,
7012 interval: Duration::from_secs(0),
7013 last_synced: None,
7014 require_signed_commits: false,
7015 allow_unsigned: false,
7016 };
7017 assert_eq!(task.interval, Duration::ZERO);
7018 }
7019
7020 #[test]
7023 fn daemon_state_to_response_preserves_source_order() {
7024 let mut state = DaemonState::new();
7025 state.sources.push(SourceStatus {
7026 name: "z-source".into(),
7027 last_sync: None,
7028 last_reconcile: None,
7029 drift_count: 0,
7030 status: "active".into(),
7031 });
7032 state.sources.push(SourceStatus {
7033 name: "a-source".into(),
7034 last_sync: None,
7035 last_reconcile: None,
7036 drift_count: 0,
7037 status: "active".into(),
7038 });
7039
7040 let response = state.to_response();
7041 assert_eq!(response.sources[0].name, "local");
7042 assert_eq!(response.sources[1].name, "z-source");
7043 assert_eq!(response.sources[2].name, "a-source");
7044 }
7045
7046 #[test]
7049 fn daemon_state_started_at_elapses() {
7050 let state = DaemonState::new();
7051 let elapsed = state.started_at.elapsed();
7052 assert!(
7053 elapsed < Duration::from_secs(5),
7054 "started_at should be recent"
7055 );
7056 }
7057
7058 #[tokio::test]
7061 async fn health_connection_drift_body_parses_as_json() {
7062 let state = Arc::new(Mutex::new(DaemonState::new()));
7063 let (client, server) = tokio::io::duplex(8192);
7064
7065 let handler_state = Arc::clone(&state);
7066 let handler = tokio::spawn(async move {
7067 handle_health_connection(server, handler_state)
7068 .await
7069 .unwrap();
7070 });
7071
7072 let (reader, mut writer) = tokio::io::split(client);
7073 writer
7074 .write_all(b"GET /drift HTTP/1.1\r\nHost: localhost\r\n\r\n")
7075 .await
7076 .unwrap();
7077 writer.shutdown().await.unwrap();
7078
7079 let mut buf_reader = tokio::io::BufReader::new(reader);
7080 let mut lines: Vec<String> = Vec::new();
7081 let mut in_body = false;
7082 loop {
7083 let mut line = String::new();
7084 match buf_reader.read_line(&mut line).await {
7085 Ok(0) => break,
7086 Ok(_) => {
7087 if in_body {
7088 lines.push(line);
7089 } else if line.trim().is_empty() {
7090 in_body = true;
7091 }
7092 }
7093 Err(_) => break,
7094 }
7095 }
7096
7097 handler.await.unwrap();
7098
7099 let body = lines.join("");
7100 let parsed: serde_json::Value =
7101 serde_json::from_str(&body).expect("drift body should be valid JSON");
7102 assert!(parsed.get("drift_count").is_some());
7103 assert!(parsed.get("events").is_some());
7104 assert!(parsed["events"].is_array());
7105 assert_eq!(parsed["drift_count"].as_u64().unwrap(), 0);
7107 }
7108
7109 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
7112 async fn handle_sync_no_pull_no_push_updates_timestamp() {
7113 use crate::test_helpers::init_test_git_repo;
7114
7115 let tmp = tempfile::TempDir::new().unwrap();
7116 let repo_dir = tmp.path().join("repo");
7117 init_test_git_repo(&repo_dir);
7118
7119 let state = Arc::new(Mutex::new(DaemonState::new()));
7120 let st = Arc::clone(&state);
7121 let rd = repo_dir.clone();
7122
7123 let changed = tokio::task::spawn_blocking(move || {
7124 handle_sync(&rd, false, false, "local", &st, false, false)
7125 })
7126 .await
7127 .unwrap();
7128
7129 assert!(!changed, "no pull/push means no changes");
7130
7131 let st = state.lock().await;
7132 assert!(
7133 st.last_sync.is_some(),
7134 "last_sync should be set even with no operations"
7135 );
7136 }
7137
7138 #[test]
7141 fn git_pull_sync_non_repo_returns_error() {
7142 let tmp = tempfile::TempDir::new().unwrap();
7143 let result = git_pull_sync(tmp.path());
7144 let err = result.unwrap_err();
7145 assert!(
7146 err.contains("open repo"),
7147 "expected 'open repo' error, got: {err}"
7148 );
7149 }
7150
7151 #[test]
7152 fn git_pull_sync_clean_repo_no_changes() {
7153 let tmp = tempfile::TempDir::new().unwrap();
7154 let bare_dir = tmp.path().join("bare.git");
7155 let work_dir = tmp.path().join("work");
7156
7157 std::fs::create_dir_all(&bare_dir).unwrap();
7158 git2::Repository::init_bare(&bare_dir).unwrap();
7159
7160 let repo = git2::Repository::clone(bare_dir.to_str().unwrap(), &work_dir).unwrap();
7161 {
7162 let mut config = repo.config().unwrap();
7163 config.set_str("user.name", "cfgd-test").unwrap();
7164 config.set_str("user.email", "test@cfgd.io").unwrap();
7165 }
7166 std::fs::write(work_dir.join("README"), "test\n").unwrap();
7167 {
7168 let mut index = repo.index().unwrap();
7169 index.add_path(Path::new("README")).unwrap();
7170 index.write().unwrap();
7171 let tree_id = index.write_tree().unwrap();
7172 let tree = repo.find_tree(tree_id).unwrap();
7173 let sig = repo.signature().unwrap();
7174 repo.commit(Some("HEAD"), &sig, &sig, "initial", &tree, &[])
7175 .unwrap();
7176 }
7177 {
7178 let mut remote = repo.find_remote("origin").unwrap();
7179 remote
7180 .push(&["refs/heads/master:refs/heads/master"], None)
7181 .unwrap();
7182 }
7183
7184 let result = git_pull_sync(&work_dir);
7185 assert!(result.is_ok());
7186 assert!(!result.unwrap(), "should be up to date");
7187 }
7188
7189 #[test]
7192 fn notifier_all_methods_construct() {
7193 let stdout = Notifier::new(NotifyMethod::Stdout, None);
7194 assert!(matches!(stdout.method, NotifyMethod::Stdout));
7195 assert!(stdout.webhook_url.is_none());
7196
7197 let desktop = Notifier::new(NotifyMethod::Desktop, None);
7198 assert!(matches!(desktop.method, NotifyMethod::Desktop));
7199
7200 let webhook_none = Notifier::new(NotifyMethod::Webhook, None);
7201 assert!(matches!(webhook_none.method, NotifyMethod::Webhook));
7202 assert!(webhook_none.webhook_url.is_none());
7203
7204 let webhook_url = Notifier::new(
7205 NotifyMethod::Webhook,
7206 Some("https://example.com/hook".into()),
7207 );
7208 assert_eq!(
7209 webhook_url.webhook_url.as_deref(),
7210 Some("https://example.com/hook")
7211 );
7212 }
7213
7214 #[test]
7217 fn daemon_status_response_roundtrip_symmetry() {
7218 let original = DaemonStatusResponse {
7219 running: true,
7220 pid: 99999,
7221 uptime_secs: 86400,
7222 last_reconcile: Some("2026-04-01T12:00:00Z".into()),
7223 last_sync: Some("2026-04-01T12:01:00Z".into()),
7224 drift_count: 100,
7225 sources: vec![
7226 SourceStatus {
7227 name: "local".into(),
7228 last_sync: Some("2026-04-01T12:01:00Z".into()),
7229 last_reconcile: Some("2026-04-01T12:00:00Z".into()),
7230 drift_count: 50,
7231 status: "active".into(),
7232 },
7233 SourceStatus {
7234 name: "corp".into(),
7235 last_sync: None,
7236 last_reconcile: None,
7237 drift_count: 50,
7238 status: "error".into(),
7239 },
7240 ],
7241 update_available: Some("5.0.0".into()),
7242 module_reconcile: vec![ModuleReconcileStatus {
7243 name: "sec-baseline".into(),
7244 interval: "30s".into(),
7245 auto_apply: true,
7246 drift_policy: "Auto".into(),
7247 last_reconcile: Some("2026-04-01T12:00:00Z".into()),
7248 }],
7249 };
7250
7251 let json = serde_json::to_string(&original).unwrap();
7252 let roundtripped: DaemonStatusResponse = serde_json::from_str(&json).unwrap();
7253
7254 assert_eq!(roundtripped.pid, original.pid);
7255 assert_eq!(roundtripped.uptime_secs, original.uptime_secs);
7256 assert_eq!(roundtripped.drift_count, original.drift_count);
7257 assert_eq!(roundtripped.sources.len(), original.sources.len());
7258 assert_eq!(
7259 roundtripped.sources[1].drift_count,
7260 original.sources[1].drift_count
7261 );
7262 assert_eq!(
7263 roundtripped.module_reconcile.len(),
7264 original.module_reconcile.len()
7265 );
7266 assert_eq!(roundtripped.update_available, original.update_available);
7267 }
7268
7269 #[test]
7272 fn source_status_camel_case_serialization() {
7273 let status = SourceStatus {
7274 name: "test".into(),
7275 last_sync: Some("ts".into()),
7276 last_reconcile: Some("tr".into()),
7277 drift_count: 1,
7278 status: "active".into(),
7279 };
7280 let json = serde_json::to_string(&status).unwrap();
7281 assert!(json.contains("\"lastSync\""));
7282 assert!(json.contains("\"lastReconcile\""));
7283 assert!(json.contains("\"driftCount\""));
7284 assert!(!json.contains("\"last_sync\""));
7285 assert!(!json.contains("\"last_reconcile\""));
7286 assert!(!json.contains("\"drift_count\""));
7287 }
7288
7289 #[test]
7292 fn infer_item_tier_empty_string() {
7293 assert_eq!(infer_item_tier(""), "recommended");
7294 }
7295
7296 #[test]
7297 fn infer_item_tier_case_sensitivity() {
7298 assert_eq!(infer_item_tier("files.Security-settings"), "recommended");
7300 assert_eq!(infer_item_tier("files.POLICY-doc"), "recommended");
7302 assert_eq!(infer_item_tier("files.security-settings"), "locked");
7304 assert_eq!(infer_item_tier("files.policy-doc"), "locked");
7305 }
7306
7307 #[test]
7308 fn infer_item_tier_partial_keyword_match() {
7309 assert_eq!(infer_item_tier("files.insecurity-note"), "locked");
7311 }
7312
7313 #[test]
7316 fn compute_config_hash_ignores_non_package_fields() {
7317 use crate::config::{
7318 EnvVar, LayerPolicy, MergedProfile, PackagesSpec, ProfileLayer, ProfileSpec,
7319 ResolvedProfile,
7320 };
7321
7322 let resolved_a = ResolvedProfile {
7323 layers: vec![ProfileLayer {
7324 source: "local".into(),
7325 profile_name: "a".into(),
7326 priority: 1000,
7327 policy: LayerPolicy::Local,
7328 spec: ProfileSpec::default(),
7329 }],
7330 merged: MergedProfile {
7331 packages: PackagesSpec::default(),
7332 env: vec![EnvVar {
7333 name: "FOO".into(),
7334 value: "bar".into(),
7335 }],
7336 ..Default::default()
7337 },
7338 };
7339
7340 let resolved_b = ResolvedProfile {
7341 layers: vec![ProfileLayer {
7342 source: "local".into(),
7343 profile_name: "b".into(),
7344 priority: 1000,
7345 policy: LayerPolicy::Local,
7346 spec: ProfileSpec::default(),
7347 }],
7348 merged: MergedProfile {
7349 packages: PackagesSpec::default(),
7350 env: vec![EnvVar {
7351 name: "BAZ".into(),
7352 value: "qux".into(),
7353 }],
7354 ..Default::default()
7355 },
7356 };
7357
7358 let hash_a = compute_config_hash(&resolved_a).unwrap();
7361 let hash_b = compute_config_hash(&resolved_b).unwrap();
7362 assert_eq!(
7363 hash_a, hash_b,
7364 "compute_config_hash should only hash packages, not env vars"
7365 );
7366 }
7367
7368 #[cfg(unix)]
7371 #[test]
7372 fn generate_launchd_plist_contains_correct_structure() {
7373 let binary = Path::new("/usr/local/bin/cfgd");
7374 let config = Path::new("/Users/testuser/.config/cfgd/config.yaml");
7375 let home = Path::new("/Users/testuser");
7376
7377 let plist = generate_launchd_plist(binary, config, None, home);
7378
7379 assert!(
7380 plist.contains("<?xml version=\"1.0\""),
7381 "plist should have XML declaration"
7382 );
7383 assert!(
7384 plist.contains(&format!("<string>{}</string>", LAUNCHD_LABEL)),
7385 "plist should contain the launchd label"
7386 );
7387 assert!(
7388 plist.contains("<string>/usr/local/bin/cfgd</string>"),
7389 "plist should contain binary path"
7390 );
7391 assert!(
7392 plist.contains("<string>/Users/testuser/.config/cfgd/config.yaml</string>"),
7393 "plist should contain config path"
7394 );
7395 assert!(
7396 plist.contains("<string>daemon</string>"),
7397 "plist should contain daemon subcommand"
7398 );
7399 assert!(
7400 plist.contains("<key>RunAtLoad</key>"),
7401 "plist should enable run at load"
7402 );
7403 assert!(
7404 plist.contains("<key>KeepAlive</key>"),
7405 "plist should enable keep alive"
7406 );
7407 assert!(
7408 plist.contains("/Users/testuser/Library/Logs/cfgd.log"),
7409 "plist should set stdout log path under home"
7410 );
7411 assert!(
7412 plist.contains("/Users/testuser/Library/Logs/cfgd.err"),
7413 "plist should set stderr log path under home"
7414 );
7415 assert!(
7417 !plist.contains("--profile"),
7418 "plist without profile should not contain --profile"
7419 );
7420 }
7421
7422 #[cfg(unix)]
7423 #[test]
7424 fn generate_launchd_plist_with_profile() {
7425 let binary = Path::new("/usr/local/bin/cfgd");
7426 let config = Path::new("/home/user/.config/cfgd/config.yaml");
7427 let home = Path::new("/home/user");
7428
7429 let plist = generate_launchd_plist(binary, config, Some("work"), home);
7430
7431 assert!(
7432 plist.contains("<string>--profile</string>"),
7433 "plist with profile should contain --profile argument"
7434 );
7435 assert!(
7436 plist.contains("<string>work</string>"),
7437 "plist with profile should contain the profile name"
7438 );
7439 let config_pos = plist.find("<string>--config</string>").unwrap();
7441 let daemon_pos = plist.find("<string>daemon</string>").unwrap();
7442 let profile_pos = plist.find("<string>--profile</string>").unwrap();
7443 assert!(
7444 config_pos < daemon_pos,
7445 "--config should appear before daemon"
7446 );
7447 assert!(
7448 daemon_pos < profile_pos,
7449 "daemon should appear before --profile"
7450 );
7451 }
7452
7453 #[cfg(unix)]
7456 #[test]
7457 fn generate_systemd_unit_contains_correct_structure() {
7458 let binary = Path::new("/usr/local/bin/cfgd");
7459 let config = Path::new("/home/user/.config/cfgd/config.yaml");
7460
7461 let unit = generate_systemd_unit(binary, config, None);
7462
7463 assert!(
7464 unit.contains("[Unit]"),
7465 "unit file should have [Unit] section"
7466 );
7467 assert!(
7468 unit.contains("Description=cfgd configuration daemon"),
7469 "unit file should have correct description"
7470 );
7471 assert!(
7472 unit.contains("After=network.target"),
7473 "unit file should depend on network.target"
7474 );
7475 assert!(
7476 unit.contains("[Service]"),
7477 "unit file should have [Service] section"
7478 );
7479 assert!(
7480 unit.contains("Type=simple"),
7481 "unit file should use simple service type"
7482 );
7483 assert!(
7484 unit.contains(
7485 "ExecStart=/usr/local/bin/cfgd --config /home/user/.config/cfgd/config.yaml daemon"
7486 ),
7487 "unit file should have correct ExecStart"
7488 );
7489 assert!(
7490 unit.contains("Restart=on-failure"),
7491 "unit file should restart on failure"
7492 );
7493 assert!(
7494 unit.contains("RestartSec=10"),
7495 "unit file should have 10s restart delay"
7496 );
7497 assert!(
7498 unit.contains("[Install]"),
7499 "unit file should have [Install] section"
7500 );
7501 assert!(
7502 unit.contains("WantedBy=default.target"),
7503 "unit file should be wanted by default.target"
7504 );
7505 assert!(
7507 !unit.contains("--profile"),
7508 "unit without profile should not contain --profile"
7509 );
7510 }
7511
7512 #[cfg(unix)]
7513 #[test]
7514 fn generate_systemd_unit_with_profile() {
7515 let binary = Path::new("/opt/bin/cfgd");
7516 let config = Path::new("/etc/cfgd/config.yaml");
7517
7518 let unit = generate_systemd_unit(binary, config, Some("server"));
7519
7520 assert!(
7521 unit.contains(
7522 "ExecStart=/opt/bin/cfgd --config /etc/cfgd/config.yaml --profile server daemon"
7523 ),
7524 "unit file with profile should include --profile in ExecStart"
7525 );
7526 }
7527
7528 #[test]
7531 fn record_file_drift_to_records_event() {
7532 let store = test_state();
7533 let path = Path::new("/home/user/.bashrc");
7534
7535 let result = record_file_drift_to(&store, path);
7536 assert!(result, "record_file_drift_to should return true on success");
7537
7538 let events = store.unresolved_drift().unwrap();
7539 assert_eq!(events.len(), 1, "should have exactly one drift event");
7540 assert_eq!(events[0].resource_id, "/home/user/.bashrc");
7541 }
7542
7543 #[test]
7544 fn record_file_drift_to_records_correct_type() {
7545 let store = test_state();
7546 let path = Path::new("/etc/config.yaml");
7547
7548 record_file_drift_to(&store, path);
7549
7550 let events = store.unresolved_drift().unwrap();
7551 assert_eq!(events.len(), 1);
7552 assert_eq!(
7553 events[0].resource_type, "file",
7554 "drift event should have resource_type 'file'"
7555 );
7556 assert_eq!(
7557 events[0].source, "local",
7558 "drift event should have source 'local'"
7559 );
7560 assert_eq!(
7561 events[0].actual.as_deref(),
7562 Some("modified"),
7563 "drift event should have actual value 'modified'"
7564 );
7565 assert!(
7566 events[0].expected.is_none(),
7567 "drift event should have no expected value"
7568 );
7569 }
7570
7571 #[test]
7574 fn discover_managed_paths_with_no_config_returns_empty() {
7575 use std::path::Path;
7576
7577 struct TestHooks;
7578 impl DaemonHooks for TestHooks {
7579 fn build_registry(&self, _: &CfgdConfig) -> ProviderRegistry {
7580 ProviderRegistry::new()
7581 }
7582 fn plan_files(
7583 &self,
7584 _: &Path,
7585 _: &ResolvedProfile,
7586 ) -> crate::errors::Result<Vec<FileAction>> {
7587 Ok(vec![])
7588 }
7589 fn plan_packages(
7590 &self,
7591 _: &MergedProfile,
7592 _: &[&dyn PackageManager],
7593 ) -> crate::errors::Result<Vec<PackageAction>> {
7594 Ok(vec![])
7595 }
7596 fn extend_registry_custom_managers(
7597 &self,
7598 _: &mut ProviderRegistry,
7599 _: &config::PackagesSpec,
7600 ) {
7601 }
7602 fn expand_tilde(&self, path: &Path) -> PathBuf {
7603 crate::expand_tilde(path)
7604 }
7605 }
7606
7607 let hooks = TestHooks;
7608 let paths = discover_managed_paths(Path::new("/nonexistent/config.yaml"), None, &hooks);
7610 assert!(
7611 paths.is_empty(),
7612 "non-existent config should return no managed paths"
7613 );
7614 }
7615
7616 #[test]
7619 fn parse_daemon_config_defaults() {
7620 let daemon_cfg = config::DaemonConfig {
7621 enabled: true,
7622 reconcile: None,
7623 sync: None,
7624 notify: None,
7625 };
7626 let parsed = parse_daemon_config(&daemon_cfg);
7627 assert_eq!(
7628 parsed.reconcile_interval,
7629 Duration::from_secs(DEFAULT_RECONCILE_SECS)
7630 );
7631 assert_eq!(parsed.sync_interval, Duration::from_secs(DEFAULT_SYNC_SECS));
7632 assert!(!parsed.auto_pull);
7633 assert!(!parsed.auto_push);
7634 assert!(!parsed.on_change_reconcile);
7635 assert!(!parsed.notify_on_drift);
7636 assert!(matches!(parsed.notify_method, NotifyMethod::Stdout));
7637 assert!(parsed.webhook_url.is_none());
7638 assert!(!parsed.auto_apply);
7639 }
7640
7641 #[test]
7642 fn parse_daemon_config_custom_intervals() {
7643 let daemon_cfg = config::DaemonConfig {
7644 enabled: true,
7645 reconcile: Some(config::ReconcileConfig {
7646 interval: "10m".to_string(),
7647 on_change: false,
7648 auto_apply: false,
7649 policy: None,
7650 drift_policy: config::DriftPolicy::default(),
7651 patches: vec![],
7652 }),
7653 sync: Some(config::SyncConfig {
7654 auto_pull: false,
7655 auto_push: false,
7656 interval: "30s".to_string(),
7657 }),
7658 notify: None,
7659 };
7660 let parsed = parse_daemon_config(&daemon_cfg);
7661 assert_eq!(parsed.reconcile_interval, Duration::from_secs(600));
7662 assert_eq!(parsed.sync_interval, Duration::from_secs(30));
7663 }
7664
7665 #[test]
7666 fn parse_daemon_config_notification_settings() {
7667 let daemon_cfg = config::DaemonConfig {
7668 enabled: true,
7669 reconcile: None,
7670 sync: None,
7671 notify: Some(config::NotifyConfig {
7672 drift: true,
7673 method: NotifyMethod::Webhook,
7674 webhook_url: Some("https://hooks.example.com/drift".to_string()),
7675 }),
7676 };
7677 let parsed = parse_daemon_config(&daemon_cfg);
7678 assert!(parsed.notify_on_drift);
7679 assert!(matches!(parsed.notify_method, NotifyMethod::Webhook));
7680 assert_eq!(
7681 parsed.webhook_url.as_deref(),
7682 Some("https://hooks.example.com/drift")
7683 );
7684 }
7685
7686 #[test]
7687 fn parse_daemon_config_sync_flags() {
7688 let daemon_cfg = config::DaemonConfig {
7689 enabled: true,
7690 reconcile: None,
7691 sync: Some(config::SyncConfig {
7692 auto_pull: true,
7693 auto_push: true,
7694 interval: "5m".to_string(),
7695 }),
7696 notify: None,
7697 };
7698 let parsed = parse_daemon_config(&daemon_cfg);
7699 assert!(parsed.auto_pull);
7700 assert!(parsed.auto_push);
7701 }
7702
7703 #[test]
7704 fn parse_daemon_config_on_change_enabled() {
7705 let daemon_cfg = config::DaemonConfig {
7706 enabled: true,
7707 reconcile: Some(config::ReconcileConfig {
7708 interval: "5m".to_string(),
7709 on_change: true,
7710 auto_apply: false,
7711 policy: None,
7712 drift_policy: config::DriftPolicy::default(),
7713 patches: vec![],
7714 }),
7715 sync: None,
7716 notify: None,
7717 };
7718 let parsed = parse_daemon_config(&daemon_cfg);
7719 assert!(parsed.on_change_reconcile);
7720 assert!(!parsed.auto_apply);
7721 }
7722
7723 #[test]
7724 fn parse_daemon_config_auto_apply_enabled() {
7725 let daemon_cfg = config::DaemonConfig {
7726 enabled: true,
7727 reconcile: Some(config::ReconcileConfig {
7728 interval: "5m".to_string(),
7729 on_change: false,
7730 auto_apply: true,
7731 policy: None,
7732 drift_policy: config::DriftPolicy::Auto,
7733 patches: vec![],
7734 }),
7735 sync: None,
7736 notify: None,
7737 };
7738 let parsed = parse_daemon_config(&daemon_cfg);
7739 assert!(parsed.auto_apply);
7740 }
7741
7742 #[test]
7743 fn handle_reconcile_with_no_config_file() {
7744 let state = Arc::new(Mutex::new(DaemonState::new()));
7745 let notifier = Arc::new(Notifier::new(NotifyMethod::Stdout, None));
7746
7747 struct NoopHooks;
7748 impl DaemonHooks for NoopHooks {
7749 fn build_registry(&self, _: &CfgdConfig) -> ProviderRegistry {
7750 ProviderRegistry::new()
7751 }
7752 fn plan_files(
7753 &self,
7754 _: &Path,
7755 _: &ResolvedProfile,
7756 ) -> crate::errors::Result<Vec<FileAction>> {
7757 Ok(vec![])
7758 }
7759 fn plan_packages(
7760 &self,
7761 _: &MergedProfile,
7762 _: &[&dyn PackageManager],
7763 ) -> crate::errors::Result<Vec<PackageAction>> {
7764 Ok(vec![])
7765 }
7766 fn extend_registry_custom_managers(
7767 &self,
7768 _: &mut ProviderRegistry,
7769 _: &config::PackagesSpec,
7770 ) {
7771 }
7772 fn expand_tilde(&self, path: &Path) -> PathBuf {
7773 crate::expand_tilde(path)
7774 }
7775 }
7776
7777 let tmp = tempfile::tempdir().unwrap();
7778 let state_dir = tmp.path().to_path_buf();
7779
7780 handle_reconcile(
7782 Path::new("/nonexistent/path/config.yaml"),
7783 None,
7784 &state,
7785 ¬ifier,
7786 false,
7787 &NoopHooks,
7788 Some(&state_dir),
7789 );
7790 let rt = tokio::runtime::Builder::new_current_thread()
7793 .enable_all()
7794 .build()
7795 .unwrap();
7796 let guard = rt.block_on(state.lock());
7797 assert!(
7798 guard.last_reconcile.is_none(),
7799 "no reconcile should have occurred with missing config"
7800 );
7801 }
7802
7803 #[test]
7804 fn handle_reconcile_with_no_profile() {
7805 let state = Arc::new(Mutex::new(DaemonState::new()));
7806 let notifier = Arc::new(Notifier::new(NotifyMethod::Stdout, None));
7807
7808 struct NoopHooks;
7809 impl DaemonHooks for NoopHooks {
7810 fn build_registry(&self, _: &CfgdConfig) -> ProviderRegistry {
7811 ProviderRegistry::new()
7812 }
7813 fn plan_files(
7814 &self,
7815 _: &Path,
7816 _: &ResolvedProfile,
7817 ) -> crate::errors::Result<Vec<FileAction>> {
7818 Ok(vec![])
7819 }
7820 fn plan_packages(
7821 &self,
7822 _: &MergedProfile,
7823 _: &[&dyn PackageManager],
7824 ) -> crate::errors::Result<Vec<PackageAction>> {
7825 Ok(vec![])
7826 }
7827 fn extend_registry_custom_managers(
7828 &self,
7829 _: &mut ProviderRegistry,
7830 _: &config::PackagesSpec,
7831 ) {
7832 }
7833 fn expand_tilde(&self, path: &Path) -> PathBuf {
7834 crate::expand_tilde(path)
7835 }
7836 }
7837
7838 let tmp = tempfile::tempdir().unwrap();
7839 let state_dir = tmp.path().to_path_buf();
7840
7841 let config_path = tmp.path().join("config.yaml");
7843 std::fs::write(
7844 &config_path,
7845 "apiVersion: cfgd.io/v1alpha1\nkind: CfgdConfig\nmetadata:\n name: test\nspec: {}\n",
7846 )
7847 .unwrap();
7848
7849 handle_reconcile(
7851 &config_path,
7852 None,
7853 &state,
7854 ¬ifier,
7855 false,
7856 &NoopHooks,
7857 Some(&state_dir),
7858 );
7859 let rt = tokio::runtime::Builder::new_current_thread()
7861 .enable_all()
7862 .build()
7863 .unwrap();
7864 let guard = rt.block_on(state.lock());
7865 assert!(
7866 guard.last_reconcile.is_none(),
7867 "no reconcile should have occurred without a profile"
7868 );
7869 }
7870
7871 #[test]
7874 fn build_reconcile_tasks_default_only_when_no_patches() {
7875 let daemon_cfg = config::DaemonConfig {
7876 enabled: true,
7877 reconcile: Some(config::ReconcileConfig {
7878 interval: "60s".to_string(),
7879 on_change: false,
7880 auto_apply: false,
7881 policy: None,
7882 drift_policy: config::DriftPolicy::NotifyOnly,
7883 patches: vec![],
7884 }),
7885 sync: None,
7886 notify: None,
7887 };
7888 let tasks = build_reconcile_tasks(&daemon_cfg, None, &[], Duration::from_secs(60), false);
7889 assert_eq!(tasks.len(), 1);
7890 assert_eq!(tasks[0].entity, "__default__");
7891 assert_eq!(tasks[0].interval, Duration::from_secs(60));
7892 assert!(!tasks[0].auto_apply);
7893 assert_eq!(tasks[0].drift_policy, config::DriftPolicy::NotifyOnly);
7894 }
7895
7896 #[test]
7897 fn build_reconcile_tasks_default_inherits_global_drift_policy() {
7898 let daemon_cfg = config::DaemonConfig {
7899 enabled: true,
7900 reconcile: Some(config::ReconcileConfig {
7901 interval: "120s".to_string(),
7902 on_change: false,
7903 auto_apply: true,
7904 policy: None,
7905 drift_policy: config::DriftPolicy::Auto,
7906 patches: vec![],
7907 }),
7908 sync: None,
7909 notify: None,
7910 };
7911 let tasks = build_reconcile_tasks(&daemon_cfg, None, &[], Duration::from_secs(120), true);
7912 assert_eq!(tasks.len(), 1);
7913 assert_eq!(tasks[0].drift_policy, config::DriftPolicy::Auto);
7914 assert!(tasks[0].auto_apply);
7915 }
7916
7917 #[test]
7918 fn build_reconcile_tasks_no_reconcile_config_uses_defaults() {
7919 let daemon_cfg = config::DaemonConfig {
7920 enabled: true,
7921 reconcile: None,
7922 sync: None,
7923 notify: None,
7924 };
7925 let tasks = build_reconcile_tasks(&daemon_cfg, None, &[], Duration::from_secs(300), false);
7926 assert_eq!(tasks.len(), 1);
7927 assert_eq!(tasks[0].entity, "__default__");
7928 assert_eq!(tasks[0].interval, Duration::from_secs(300));
7929 assert_eq!(tasks[0].drift_policy, config::DriftPolicy::default());
7931 }
7932
7933 #[test]
7934 fn build_reconcile_tasks_patches_without_resolved_profile_skips_modules() {
7935 let daemon_cfg = config::DaemonConfig {
7937 enabled: true,
7938 reconcile: Some(config::ReconcileConfig {
7939 interval: "60s".to_string(),
7940 on_change: false,
7941 auto_apply: false,
7942 policy: None,
7943 drift_policy: config::DriftPolicy::NotifyOnly,
7944 patches: vec![config::ReconcilePatch {
7945 kind: config::ReconcilePatchKind::Module,
7946 name: Some("vim".to_string()),
7947 interval: Some("10s".to_string()),
7948 auto_apply: Some(true),
7949 drift_policy: None,
7950 }],
7951 }),
7952 sync: None,
7953 notify: None,
7954 };
7955 let tasks = build_reconcile_tasks(
7956 &daemon_cfg,
7957 None, &["default"],
7959 Duration::from_secs(60),
7960 false,
7961 );
7962 assert_eq!(tasks.len(), 1);
7964 assert_eq!(tasks[0].entity, "__default__");
7965 }
7966
7967 #[test]
7968 fn build_reconcile_tasks_module_with_overridden_interval_gets_dedicated_task() {
7969 let merged = config::MergedProfile {
7971 modules: vec!["vim".to_string()],
7972 ..Default::default()
7973 };
7974 let resolved = config::ResolvedProfile {
7975 layers: vec![config::ProfileLayer {
7976 source: "local".to_string(),
7977 profile_name: "default".to_string(),
7978 priority: 0,
7979 policy: config::LayerPolicy::Local,
7980 spec: Default::default(),
7981 }],
7982 merged,
7983 };
7984
7985 let daemon_cfg = config::DaemonConfig {
7986 enabled: true,
7987 reconcile: Some(config::ReconcileConfig {
7988 interval: "60s".to_string(),
7989 on_change: false,
7990 auto_apply: false,
7991 policy: None,
7992 drift_policy: config::DriftPolicy::NotifyOnly,
7993 patches: vec![config::ReconcilePatch {
7994 kind: config::ReconcilePatchKind::Module,
7995 name: Some("vim".to_string()),
7996 interval: Some("10s".to_string()),
7997 auto_apply: None,
7998 drift_policy: None,
7999 }],
8000 }),
8001 sync: None,
8002 notify: None,
8003 };
8004
8005 let tasks = build_reconcile_tasks(
8006 &daemon_cfg,
8007 Some(&resolved),
8008 &["default"],
8009 Duration::from_secs(60),
8010 false,
8011 );
8012 assert_eq!(tasks.len(), 2);
8014 let vim_task = tasks.iter().find(|t| t.entity == "vim").unwrap();
8015 assert_eq!(vim_task.interval, Duration::from_secs(10));
8016 assert!(!vim_task.auto_apply);
8017 let default_task = tasks.iter().find(|t| t.entity == "__default__").unwrap();
8018 assert_eq!(default_task.interval, Duration::from_secs(60));
8019 }
8020
8021 #[test]
8022 fn build_reconcile_tasks_module_matching_global_gets_no_dedicated_task() {
8023 let merged = config::MergedProfile {
8025 modules: vec!["vim".to_string()],
8026 ..Default::default()
8027 };
8028 let resolved = config::ResolvedProfile {
8029 layers: vec![config::ProfileLayer {
8030 source: "local".to_string(),
8031 profile_name: "default".to_string(),
8032 priority: 0,
8033 policy: config::LayerPolicy::Local,
8034 spec: Default::default(),
8035 }],
8036 merged,
8037 };
8038
8039 let daemon_cfg = config::DaemonConfig {
8040 enabled: true,
8041 reconcile: Some(config::ReconcileConfig {
8042 interval: "60s".to_string(),
8043 on_change: false,
8044 auto_apply: false,
8045 policy: None,
8046 drift_policy: config::DriftPolicy::NotifyOnly,
8047 patches: vec![config::ReconcilePatch {
8049 kind: config::ReconcilePatchKind::Module,
8050 name: Some("vim".to_string()),
8051 interval: None, auto_apply: None, drift_policy: None, }],
8055 }),
8056 sync: None,
8057 notify: None,
8058 };
8059
8060 let tasks = build_reconcile_tasks(
8061 &daemon_cfg,
8062 Some(&resolved),
8063 &["default"],
8064 Duration::from_secs(60),
8065 false,
8066 );
8067 assert_eq!(tasks.len(), 1);
8069 assert_eq!(tasks[0].entity, "__default__");
8070 }
8071
8072 #[test]
8075 fn build_sync_tasks_local_only_when_no_sources() {
8076 let parsed = ParsedDaemonConfig {
8077 reconcile_interval: Duration::from_secs(60),
8078 sync_interval: Duration::from_secs(300),
8079 auto_pull: true,
8080 auto_push: false,
8081 on_change_reconcile: false,
8082 notify_on_drift: false,
8083 notify_method: NotifyMethod::Stdout,
8084 webhook_url: None,
8085 auto_apply: false,
8086 };
8087 let tmp = tempfile::tempdir().unwrap();
8088 let tasks = build_sync_tasks(tmp.path(), &parsed, &[], false, tmp.path(), |_| None);
8089 assert_eq!(tasks.len(), 1);
8090 assert_eq!(tasks[0].source_name, "local");
8091 assert!(tasks[0].auto_pull);
8092 assert!(!tasks[0].auto_push);
8093 assert!(tasks[0].auto_apply);
8094 assert_eq!(tasks[0].interval, Duration::from_secs(300));
8095 assert!(!tasks[0].require_signed_commits);
8096 }
8097
8098 #[test]
8099 fn build_sync_tasks_includes_source_when_dir_exists() {
8100 let parsed = ParsedDaemonConfig {
8101 reconcile_interval: Duration::from_secs(60),
8102 sync_interval: Duration::from_secs(300),
8103 auto_pull: false,
8104 auto_push: false,
8105 on_change_reconcile: false,
8106 notify_on_drift: false,
8107 notify_method: NotifyMethod::Stdout,
8108 webhook_url: None,
8109 auto_apply: false,
8110 };
8111 let tmp = tempfile::tempdir().unwrap();
8112 let cache_dir = tmp.path().join("sources");
8113 std::fs::create_dir_all(cache_dir.join("team-config")).unwrap();
8114
8115 let sources = vec![config::SourceSpec {
8116 name: "team-config".to_string(),
8117 origin: config::OriginSpec {
8118 origin_type: config::OriginType::Git,
8119 url: "https://github.com/team/config.git".to_string(),
8120 branch: "main".to_string(),
8121 auth: None,
8122 ssh_strict_host_key_checking: Default::default(),
8123 },
8124 subscription: Default::default(),
8125 sync: config::SourceSyncSpec {
8126 interval: "120s".to_string(),
8127 auto_apply: true,
8128 pin_version: None,
8129 },
8130 }];
8131
8132 let tasks = build_sync_tasks(
8133 tmp.path(),
8134 &parsed,
8135 &sources,
8136 false,
8137 &cache_dir,
8138 |_| Some(true), );
8140 assert_eq!(tasks.len(), 2);
8141 let source_task = tasks
8142 .iter()
8143 .find(|t| t.source_name == "team-config")
8144 .unwrap();
8145 assert!(source_task.auto_pull);
8146 assert!(!source_task.auto_push);
8147 assert!(source_task.auto_apply);
8148 assert_eq!(source_task.interval, Duration::from_secs(120));
8149 assert!(source_task.require_signed_commits);
8150 }
8151
8152 #[test]
8153 fn build_sync_tasks_skips_source_when_dir_missing() {
8154 let parsed = ParsedDaemonConfig {
8155 reconcile_interval: Duration::from_secs(60),
8156 sync_interval: Duration::from_secs(300),
8157 auto_pull: false,
8158 auto_push: false,
8159 on_change_reconcile: false,
8160 notify_on_drift: false,
8161 notify_method: NotifyMethod::Stdout,
8162 webhook_url: None,
8163 auto_apply: false,
8164 };
8165 let tmp = tempfile::tempdir().unwrap();
8166 let cache_dir = tmp.path().join("sources");
8167 let sources = vec![config::SourceSpec {
8170 name: "missing-source".to_string(),
8171 origin: config::OriginSpec {
8172 origin_type: config::OriginType::Git,
8173 url: "https://github.com/team/config.git".to_string(),
8174 branch: "main".to_string(),
8175 auth: None,
8176 ssh_strict_host_key_checking: Default::default(),
8177 },
8178 subscription: Default::default(),
8179 sync: Default::default(),
8180 }];
8181
8182 let tasks = build_sync_tasks(tmp.path(), &parsed, &sources, false, &cache_dir, |_| None);
8183 assert_eq!(tasks.len(), 1);
8185 assert_eq!(tasks[0].source_name, "local");
8186 }
8187
8188 #[test]
8189 fn build_sync_tasks_propagates_allow_unsigned() {
8190 let parsed = ParsedDaemonConfig {
8191 reconcile_interval: Duration::from_secs(60),
8192 sync_interval: Duration::from_secs(300),
8193 auto_pull: true,
8194 auto_push: true,
8195 on_change_reconcile: false,
8196 notify_on_drift: false,
8197 notify_method: NotifyMethod::Stdout,
8198 webhook_url: None,
8199 auto_apply: false,
8200 };
8201 let tmp = tempfile::tempdir().unwrap();
8202 let tasks = build_sync_tasks(
8203 tmp.path(),
8204 &parsed,
8205 &[],
8206 true, tmp.path(),
8208 |_| None,
8209 );
8210 assert!(tasks[0].allow_unsigned);
8211 }
8212
8213 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
8216 async fn handle_reconcile_with_valid_config_records_drift_events() {
8217 let tmp = tempfile::tempdir().unwrap();
8220 let state_dir = tmp.path().join("state");
8221 std::fs::create_dir_all(&state_dir).unwrap();
8222
8223 let config_path = tmp.path().join("config.yaml");
8225 std::fs::write(
8226 &config_path,
8227 "apiVersion: cfgd.io/v1alpha1\nkind: CfgdConfig\nmetadata:\n name: test\nspec:\n profile: default\n",
8228 )
8229 .unwrap();
8230
8231 let profiles_dir = tmp.path().join("profiles");
8233 std::fs::create_dir_all(&profiles_dir).unwrap();
8234 std::fs::write(
8235 profiles_dir.join("default.yaml"),
8236 "apiVersion: cfgd.io/v1alpha1\nkind: Profile\nmetadata:\n name: default\nspec:\n packages:\n cargo:\n packages:\n - bat\n",
8237 )
8238 .unwrap();
8239
8240 struct DriftHooks;
8241 impl DaemonHooks for DriftHooks {
8242 fn build_registry(&self, _: &CfgdConfig) -> ProviderRegistry {
8243 ProviderRegistry::new()
8244 }
8245 fn plan_files(
8246 &self,
8247 _: &Path,
8248 _: &ResolvedProfile,
8249 ) -> crate::errors::Result<Vec<FileAction>> {
8250 Ok(vec![])
8251 }
8252 fn plan_packages(
8253 &self,
8254 _: &MergedProfile,
8255 _: &[&dyn PackageManager],
8256 ) -> crate::errors::Result<Vec<PackageAction>> {
8257 Ok(vec![PackageAction::Install {
8259 manager: "cargo".into(),
8260 packages: vec!["bat".into()],
8261 origin: "local".into(),
8262 }])
8263 }
8264 fn extend_registry_custom_managers(
8265 &self,
8266 _: &mut ProviderRegistry,
8267 _: &config::PackagesSpec,
8268 ) {
8269 }
8270 fn expand_tilde(&self, path: &Path) -> PathBuf {
8271 crate::expand_tilde(path)
8272 }
8273 }
8274
8275 let state = Arc::new(Mutex::new(DaemonState::new()));
8276 let notifier = Arc::new(Notifier::new(NotifyMethod::Stdout, None));
8277
8278 let st = Arc::clone(&state);
8279 let not = Arc::clone(¬ifier);
8280 let sd = state_dir.clone();
8281 let cp = config_path.clone();
8282 tokio::task::spawn_blocking(move || {
8283 handle_reconcile(&cp, None, &st, ¬, false, &DriftHooks, Some(&sd));
8284 })
8285 .await
8286 .unwrap();
8287
8288 let store = StateStore::open(&state_dir.join("cfgd.db")).unwrap();
8290 let drift_events = store.unresolved_drift().unwrap();
8291 assert!(
8292 !drift_events.is_empty(),
8293 "drift events should have been recorded"
8294 );
8295 let pkg_drift = drift_events.iter().find(|e| e.resource_type == "package");
8297 assert!(
8298 pkg_drift.is_some(),
8299 "should have a package drift event; events: {:?}",
8300 drift_events
8301 );
8302 assert_eq!(pkg_drift.unwrap().resource_id, "cargo:bat");
8303
8304 let guard = state.lock().await;
8306 assert!(
8307 guard.last_reconcile.is_some(),
8308 "last_reconcile should have been set"
8309 );
8310 assert!(
8311 guard.drift_count > 0,
8312 "drift_count should have been incremented"
8313 );
8314 }
8315
8316 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
8317 async fn handle_reconcile_notify_only_drift_policy_does_not_apply() {
8318 let tmp = tempfile::tempdir().unwrap();
8320 let state_dir = tmp.path().join("state");
8321 std::fs::create_dir_all(&state_dir).unwrap();
8322
8323 let config_path = tmp.path().join("config.yaml");
8324 std::fs::write(
8325 &config_path,
8326 "apiVersion: cfgd.io/v1alpha1\nkind: CfgdConfig\nmetadata:\n name: test\nspec:\n profile: default\n daemon:\n enabled: true\n reconcile:\n interval: 60s\n onChange: false\n autoApply: false\n driftPolicy: NotifyOnly\n",
8327 )
8328 .unwrap();
8329
8330 let profiles_dir = tmp.path().join("profiles");
8331 std::fs::create_dir_all(&profiles_dir).unwrap();
8332 std::fs::write(
8333 profiles_dir.join("default.yaml"),
8334 "apiVersion: cfgd.io/v1alpha1\nkind: Profile\nmetadata:\n name: default\nspec:\n packages:\n cargo:\n packages:\n - bat\n",
8335 )
8336 .unwrap();
8337
8338 struct NotifyOnlyHooks;
8339 impl DaemonHooks for NotifyOnlyHooks {
8340 fn build_registry(&self, _: &CfgdConfig) -> ProviderRegistry {
8341 ProviderRegistry::new()
8342 }
8343 fn plan_files(
8344 &self,
8345 _: &Path,
8346 _: &ResolvedProfile,
8347 ) -> crate::errors::Result<Vec<FileAction>> {
8348 Ok(vec![])
8349 }
8350 fn plan_packages(
8351 &self,
8352 _: &MergedProfile,
8353 _: &[&dyn PackageManager],
8354 ) -> crate::errors::Result<Vec<PackageAction>> {
8355 Ok(vec![PackageAction::Install {
8356 manager: "cargo".into(),
8357 packages: vec!["ripgrep".into()],
8358 origin: "local".into(),
8359 }])
8360 }
8361 fn extend_registry_custom_managers(
8362 &self,
8363 _: &mut ProviderRegistry,
8364 _: &config::PackagesSpec,
8365 ) {
8366 }
8367 fn expand_tilde(&self, path: &Path) -> PathBuf {
8368 crate::expand_tilde(path)
8369 }
8370 }
8371
8372 let state = Arc::new(Mutex::new(DaemonState::new()));
8373 let notifier = Arc::new(Notifier::new(NotifyMethod::Stdout, None));
8374
8375 let st = Arc::clone(&state);
8376 let not = Arc::clone(¬ifier);
8377 let sd = state_dir.clone();
8378 let cp = config_path.clone();
8379 tokio::task::spawn_blocking(move || {
8380 handle_reconcile(&cp, None, &st, ¬, false, &NotifyOnlyHooks, Some(&sd));
8381 })
8382 .await
8383 .unwrap();
8384
8385 let store = StateStore::open(&state_dir.join("cfgd.db")).unwrap();
8387 let drift_events = store.unresolved_drift().unwrap();
8388 assert!(
8389 !drift_events.is_empty(),
8390 "drift events should be recorded even with NotifyOnly policy"
8391 );
8392
8393 let guard = state.lock().await;
8395 assert!(guard.drift_count > 0);
8396 assert!(guard.last_reconcile.is_some());
8397 }
8398
8399 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
8400 async fn handle_reconcile_no_drift_when_no_actions() {
8401 let tmp = tempfile::tempdir().unwrap();
8403 let state_dir = tmp.path().join("state");
8404 std::fs::create_dir_all(&state_dir).unwrap();
8405
8406 let config_path = tmp.path().join("config.yaml");
8407 std::fs::write(
8408 &config_path,
8409 "apiVersion: cfgd.io/v1alpha1\nkind: CfgdConfig\nmetadata:\n name: test\nspec:\n profile: default\n",
8410 )
8411 .unwrap();
8412
8413 let profiles_dir = tmp.path().join("profiles");
8414 std::fs::create_dir_all(&profiles_dir).unwrap();
8415 std::fs::write(
8416 profiles_dir.join("default.yaml"),
8417 "apiVersion: cfgd.io/v1alpha1\nkind: Profile\nmetadata:\n name: default\nspec: {}\n",
8418 )
8419 .unwrap();
8420
8421 struct NoDriftHooks;
8422 impl DaemonHooks for NoDriftHooks {
8423 fn build_registry(&self, _: &CfgdConfig) -> ProviderRegistry {
8424 ProviderRegistry::new()
8425 }
8426 fn plan_files(
8427 &self,
8428 _: &Path,
8429 _: &ResolvedProfile,
8430 ) -> crate::errors::Result<Vec<FileAction>> {
8431 Ok(vec![])
8432 }
8433 fn plan_packages(
8434 &self,
8435 _: &MergedProfile,
8436 _: &[&dyn PackageManager],
8437 ) -> crate::errors::Result<Vec<PackageAction>> {
8438 Ok(vec![])
8439 }
8440 fn extend_registry_custom_managers(
8441 &self,
8442 _: &mut ProviderRegistry,
8443 _: &config::PackagesSpec,
8444 ) {
8445 }
8446 fn expand_tilde(&self, path: &Path) -> PathBuf {
8447 crate::expand_tilde(path)
8448 }
8449 }
8450
8451 let state = Arc::new(Mutex::new(DaemonState::new()));
8452 let notifier = Arc::new(Notifier::new(NotifyMethod::Stdout, None));
8453
8454 let st = Arc::clone(&state);
8455 let not = Arc::clone(¬ifier);
8456 let sd = state_dir.clone();
8457 let cp = config_path.clone();
8458 tokio::task::spawn_blocking(move || {
8459 handle_reconcile(&cp, None, &st, ¬, false, &NoDriftHooks, Some(&sd));
8460 })
8461 .await
8462 .unwrap();
8463
8464 let store = StateStore::open(&state_dir.join("cfgd.db")).unwrap();
8466 let drift_events = store.unresolved_drift().unwrap();
8467 assert!(
8468 drift_events.is_empty(),
8469 "no drift events should be recorded when plan has no actions"
8470 );
8471
8472 let guard = state.lock().await;
8474 assert!(guard.last_reconcile.is_some());
8475 assert_eq!(guard.drift_count, 0);
8476 }
8477
8478 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
8479 async fn handle_reconcile_with_profile_override() {
8480 let tmp = tempfile::tempdir().unwrap();
8482 let state_dir = tmp.path().join("state");
8483 std::fs::create_dir_all(&state_dir).unwrap();
8484
8485 let config_path = tmp.path().join("config.yaml");
8487 std::fs::write(
8488 &config_path,
8489 "apiVersion: cfgd.io/v1alpha1\nkind: CfgdConfig\nmetadata:\n name: test\nspec:\n profile: nonexistent\n",
8490 )
8491 .unwrap();
8492
8493 let profiles_dir = tmp.path().join("profiles");
8494 std::fs::create_dir_all(&profiles_dir).unwrap();
8495 std::fs::write(
8496 profiles_dir.join("default.yaml"),
8497 "apiVersion: cfgd.io/v1alpha1\nkind: Profile\nmetadata:\n name: default\nspec: {}\n",
8498 )
8499 .unwrap();
8500
8501 struct EmptyHooks;
8502 impl DaemonHooks for EmptyHooks {
8503 fn build_registry(&self, _: &CfgdConfig) -> ProviderRegistry {
8504 ProviderRegistry::new()
8505 }
8506 fn plan_files(
8507 &self,
8508 _: &Path,
8509 _: &ResolvedProfile,
8510 ) -> crate::errors::Result<Vec<FileAction>> {
8511 Ok(vec![])
8512 }
8513 fn plan_packages(
8514 &self,
8515 _: &MergedProfile,
8516 _: &[&dyn PackageManager],
8517 ) -> crate::errors::Result<Vec<PackageAction>> {
8518 Ok(vec![])
8519 }
8520 fn extend_registry_custom_managers(
8521 &self,
8522 _: &mut ProviderRegistry,
8523 _: &config::PackagesSpec,
8524 ) {
8525 }
8526 fn expand_tilde(&self, path: &Path) -> PathBuf {
8527 crate::expand_tilde(path)
8528 }
8529 }
8530
8531 let state = Arc::new(Mutex::new(DaemonState::new()));
8532 let notifier = Arc::new(Notifier::new(NotifyMethod::Stdout, None));
8533
8534 let st = Arc::clone(&state);
8535 let not = Arc::clone(¬ifier);
8536 let sd = state_dir.clone();
8537 let cp = config_path.clone();
8538 tokio::task::spawn_blocking(move || {
8540 handle_reconcile(
8541 &cp,
8542 Some("default"),
8543 &st,
8544 ¬,
8545 false,
8546 &EmptyHooks,
8547 Some(&sd),
8548 );
8549 })
8550 .await
8551 .unwrap();
8552
8553 let guard = state.lock().await;
8555 assert!(
8556 guard.last_reconcile.is_some(),
8557 "reconciliation should succeed with profile override"
8558 );
8559 }
8560
8561 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
8562 async fn handle_reconcile_multiple_actions_records_all_drift() {
8563 let tmp = tempfile::tempdir().unwrap();
8565 let state_dir = tmp.path().join("state");
8566 std::fs::create_dir_all(&state_dir).unwrap();
8567
8568 let config_path = tmp.path().join("config.yaml");
8569 std::fs::write(
8570 &config_path,
8571 "apiVersion: cfgd.io/v1alpha1\nkind: CfgdConfig\nmetadata:\n name: test\nspec:\n profile: default\n",
8572 )
8573 .unwrap();
8574
8575 let profiles_dir = tmp.path().join("profiles");
8576 std::fs::create_dir_all(&profiles_dir).unwrap();
8577 std::fs::write(
8578 profiles_dir.join("default.yaml"),
8579 "apiVersion: cfgd.io/v1alpha1\nkind: Profile\nmetadata:\n name: default\nspec:\n packages:\n cargo:\n packages:\n - bat\n - ripgrep\n - fd-find\n",
8580 )
8581 .unwrap();
8582
8583 struct MultiDriftHooks;
8584 impl DaemonHooks for MultiDriftHooks {
8585 fn build_registry(&self, _: &CfgdConfig) -> ProviderRegistry {
8586 ProviderRegistry::new()
8587 }
8588 fn plan_files(
8589 &self,
8590 _: &Path,
8591 _: &ResolvedProfile,
8592 ) -> crate::errors::Result<Vec<FileAction>> {
8593 Ok(vec![FileAction::Create {
8595 source: PathBuf::from("/src/.zshrc"),
8596 target: PathBuf::from("/home/user/.zshrc"),
8597 origin: "local".into(),
8598 strategy: crate::config::FileStrategy::default(),
8599 source_hash: None,
8600 }])
8601 }
8602 fn plan_packages(
8603 &self,
8604 _: &MergedProfile,
8605 _: &[&dyn PackageManager],
8606 ) -> crate::errors::Result<Vec<PackageAction>> {
8607 Ok(vec![
8608 PackageAction::Install {
8609 manager: "cargo".into(),
8610 packages: vec!["bat".into(), "ripgrep".into()],
8611 origin: "local".into(),
8612 },
8613 PackageAction::Install {
8614 manager: "cargo".into(),
8615 packages: vec!["fd-find".into()],
8616 origin: "local".into(),
8617 },
8618 ])
8619 }
8620 fn extend_registry_custom_managers(
8621 &self,
8622 _: &mut ProviderRegistry,
8623 _: &config::PackagesSpec,
8624 ) {
8625 }
8626 fn expand_tilde(&self, path: &Path) -> PathBuf {
8627 crate::expand_tilde(path)
8628 }
8629 }
8630
8631 let state = Arc::new(Mutex::new(DaemonState::new()));
8632 let notifier = Arc::new(Notifier::new(NotifyMethod::Stdout, None));
8633
8634 let st = Arc::clone(&state);
8635 let not = Arc::clone(¬ifier);
8636 let sd = state_dir.clone();
8637 let cp = config_path.clone();
8638 tokio::task::spawn_blocking(move || {
8639 handle_reconcile(&cp, None, &st, ¬, false, &MultiDriftHooks, Some(&sd));
8640 })
8641 .await
8642 .unwrap();
8643
8644 let store = StateStore::open(&state_dir.join("cfgd.db")).unwrap();
8645 let drift_events = store.unresolved_drift().unwrap();
8646 assert_eq!(
8649 drift_events.len(),
8650 3,
8651 "should have drift events for all actions; got: {:?}",
8652 drift_events
8653 );
8654
8655 let resource_types: Vec<&str> = drift_events
8656 .iter()
8657 .map(|e| e.resource_type.as_str())
8658 .collect();
8659 assert!(
8660 resource_types.contains(&"file"),
8661 "should have a file drift event"
8662 );
8663 assert!(
8664 resource_types.contains(&"package"),
8665 "should have package drift events"
8666 );
8667 }
8668
8669 #[test]
8672 fn discover_managed_paths_returns_targets_from_profile() {
8673 let tmp = tempfile::tempdir().unwrap();
8674
8675 let config_path = tmp.path().join("config.yaml");
8676 std::fs::write(
8677 &config_path,
8678 "apiVersion: cfgd.io/v1alpha1\nkind: CfgdConfig\nmetadata:\n name: test\nspec:\n profile: default\n",
8679 )
8680 .unwrap();
8681
8682 let profiles_dir = tmp.path().join("profiles");
8683 std::fs::create_dir_all(&profiles_dir).unwrap();
8684 std::fs::write(
8685 profiles_dir.join("default.yaml"),
8686 "apiVersion: cfgd.io/v1alpha1\nkind: Profile\nmetadata:\n name: default\nspec:\n files:\n managed:\n - source: src/zshrc\n target: /home/user/.zshrc\n - source: src/vimrc\n target: /home/user/.vimrc\n",
8687 )
8688 .unwrap();
8689
8690 struct TestHooks;
8691 impl DaemonHooks for TestHooks {
8692 fn build_registry(&self, _: &CfgdConfig) -> ProviderRegistry {
8693 ProviderRegistry::new()
8694 }
8695 fn plan_files(
8696 &self,
8697 _: &Path,
8698 _: &ResolvedProfile,
8699 ) -> crate::errors::Result<Vec<FileAction>> {
8700 Ok(vec![])
8701 }
8702 fn plan_packages(
8703 &self,
8704 _: &MergedProfile,
8705 _: &[&dyn PackageManager],
8706 ) -> crate::errors::Result<Vec<PackageAction>> {
8707 Ok(vec![])
8708 }
8709 fn extend_registry_custom_managers(
8710 &self,
8711 _: &mut ProviderRegistry,
8712 _: &config::PackagesSpec,
8713 ) {
8714 }
8715 fn expand_tilde(&self, path: &Path) -> PathBuf {
8716 path.to_path_buf()
8717 }
8718 }
8719
8720 let paths = discover_managed_paths(&config_path, None, &TestHooks);
8721 assert_eq!(paths.len(), 2);
8722 assert!(paths.contains(&PathBuf::from("/home/user/.zshrc")));
8723 assert!(paths.contains(&PathBuf::from("/home/user/.vimrc")));
8724 }
8725
8726 #[test]
8727 fn discover_managed_paths_returns_empty_for_missing_config() {
8728 struct TestHooks;
8729 impl DaemonHooks for TestHooks {
8730 fn build_registry(&self, _: &CfgdConfig) -> ProviderRegistry {
8731 ProviderRegistry::new()
8732 }
8733 fn plan_files(
8734 &self,
8735 _: &Path,
8736 _: &ResolvedProfile,
8737 ) -> crate::errors::Result<Vec<FileAction>> {
8738 Ok(vec![])
8739 }
8740 fn plan_packages(
8741 &self,
8742 _: &MergedProfile,
8743 _: &[&dyn PackageManager],
8744 ) -> crate::errors::Result<Vec<PackageAction>> {
8745 Ok(vec![])
8746 }
8747 fn extend_registry_custom_managers(
8748 &self,
8749 _: &mut ProviderRegistry,
8750 _: &config::PackagesSpec,
8751 ) {
8752 }
8753 fn expand_tilde(&self, path: &Path) -> PathBuf {
8754 path.to_path_buf()
8755 }
8756 }
8757
8758 let paths = discover_managed_paths(Path::new("/nonexistent/config.yaml"), None, &TestHooks);
8759 assert!(paths.is_empty());
8760 }
8761
8762 #[test]
8763 fn discover_managed_paths_with_profile_override() {
8764 let tmp = tempfile::tempdir().unwrap();
8765
8766 let config_path = tmp.path().join("config.yaml");
8767 std::fs::write(
8768 &config_path,
8769 "apiVersion: cfgd.io/v1alpha1\nkind: CfgdConfig\nmetadata:\n name: test\nspec: {}\n",
8770 )
8771 .unwrap();
8772
8773 let profiles_dir = tmp.path().join("profiles");
8774 std::fs::create_dir_all(&profiles_dir).unwrap();
8775 std::fs::write(
8776 profiles_dir.join("custom.yaml"),
8777 "apiVersion: cfgd.io/v1alpha1\nkind: Profile\nmetadata:\n name: custom\nspec:\n files:\n managed:\n - source: src/bashrc\n target: /home/user/.bashrc\n",
8778 )
8779 .unwrap();
8780
8781 struct TestHooks;
8782 impl DaemonHooks for TestHooks {
8783 fn build_registry(&self, _: &CfgdConfig) -> ProviderRegistry {
8784 ProviderRegistry::new()
8785 }
8786 fn plan_files(
8787 &self,
8788 _: &Path,
8789 _: &ResolvedProfile,
8790 ) -> crate::errors::Result<Vec<FileAction>> {
8791 Ok(vec![])
8792 }
8793 fn plan_packages(
8794 &self,
8795 _: &MergedProfile,
8796 _: &[&dyn PackageManager],
8797 ) -> crate::errors::Result<Vec<PackageAction>> {
8798 Ok(vec![])
8799 }
8800 fn extend_registry_custom_managers(
8801 &self,
8802 _: &mut ProviderRegistry,
8803 _: &config::PackagesSpec,
8804 ) {
8805 }
8806 fn expand_tilde(&self, path: &Path) -> PathBuf {
8807 path.to_path_buf()
8808 }
8809 }
8810
8811 let paths = discover_managed_paths(&config_path, Some("custom"), &TestHooks);
8812 assert_eq!(paths.len(), 1);
8813 assert_eq!(paths[0], PathBuf::from("/home/user/.bashrc"));
8814 }
8815
8816 #[test]
8819 fn pending_resource_paths_returns_empty_for_no_decisions() {
8820 let store = test_state();
8821 let paths = pending_resource_paths(&store);
8822 assert!(paths.is_empty());
8823 }
8824
8825 #[test]
8828 #[cfg(unix)]
8829 fn generate_launchd_plist_xml_structure_complete() {
8830 let binary = Path::new("/usr/local/bin/cfgd");
8831 let config = Path::new("/Users/alice/.config/cfgd/config.yaml");
8832 let home = Path::new("/Users/alice");
8833
8834 let plist = generate_launchd_plist(binary, config, None, home);
8835
8836 assert!(
8838 plist.contains("<?xml version=\"1.0\""),
8839 "should start with XML declaration"
8840 );
8841 assert!(
8842 plist.contains("<!DOCTYPE plist"),
8843 "should contain plist DOCTYPE"
8844 );
8845 assert!(
8846 plist.contains(&format!("<string>{}</string>", LAUNCHD_LABEL)),
8847 "should contain the label"
8848 );
8849 assert!(
8850 plist.contains("<string>/usr/local/bin/cfgd</string>"),
8851 "should contain binary path"
8852 );
8853 assert!(
8854 plist.contains("<string>--config</string>"),
8855 "should contain --config flag"
8856 );
8857 assert!(
8858 plist.contains("<string>/Users/alice/.config/cfgd/config.yaml</string>"),
8859 "should contain config path"
8860 );
8861 assert!(
8862 plist.contains("<string>daemon</string>"),
8863 "should contain daemon subcommand"
8864 );
8865 assert!(
8866 plist.contains("<key>RunAtLoad</key>"),
8867 "should set RunAtLoad"
8868 );
8869 assert!(
8870 plist.contains("<key>KeepAlive</key>"),
8871 "should set KeepAlive"
8872 );
8873 assert!(
8874 plist.contains("/Users/alice/Library/Logs/cfgd.log"),
8875 "stdout log should be under home Library/Logs"
8876 );
8877 assert!(
8878 plist.contains("/Users/alice/Library/Logs/cfgd.err"),
8879 "stderr log should be under home Library/Logs"
8880 );
8881 assert!(
8883 !plist.contains("--profile"),
8884 "should not contain --profile when None"
8885 );
8886 }
8887
8888 #[test]
8889 #[cfg(unix)]
8890 fn generate_launchd_plist_includes_profile_flag() {
8891 let binary = Path::new("/usr/local/bin/cfgd");
8892 let config = Path::new("/home/user/config.yaml");
8893 let home = Path::new("/home/user");
8894
8895 let plist = generate_launchd_plist(binary, config, Some("work"), home);
8896
8897 assert!(
8898 plist.contains("<string>--profile</string>"),
8899 "should contain --profile flag"
8900 );
8901 assert!(
8902 plist.contains("<string>work</string>"),
8903 "should contain profile name"
8904 );
8905 }
8906
8907 #[test]
8910 #[cfg(unix)]
8911 fn generate_systemd_unit_complete_structure() {
8912 let binary = Path::new("/usr/local/bin/cfgd");
8913 let config = Path::new("/home/user/.config/cfgd/config.yaml");
8914
8915 let unit = generate_systemd_unit(binary, config, None);
8916
8917 assert!(unit.contains("[Unit]"), "should contain [Unit] section");
8918 assert!(
8919 unit.contains("[Service]"),
8920 "should contain [Service] section"
8921 );
8922 assert!(
8923 unit.contains("[Install]"),
8924 "should contain [Install] section"
8925 );
8926 assert!(
8927 unit.contains("Description=cfgd configuration daemon"),
8928 "should have description"
8929 );
8930 assert!(
8931 unit.contains("After=network.target"),
8932 "should require network"
8933 );
8934 assert!(
8935 unit.contains("Type=simple"),
8936 "should be simple service type"
8937 );
8938 assert!(
8939 unit.contains("Restart=on-failure"),
8940 "should restart on failure"
8941 );
8942 assert!(unit.contains("RestartSec=10"), "should have restart delay");
8943 assert!(
8944 unit.contains("WantedBy=default.target"),
8945 "should be wanted by default.target"
8946 );
8947
8948 let expected_exec = format!(
8950 "ExecStart={} --config {} daemon",
8951 binary.display(),
8952 config.display()
8953 );
8954 assert!(
8955 unit.contains(&expected_exec),
8956 "ExecStart should be '{expected_exec}', got unit:\n{unit}"
8957 );
8958 assert!(
8960 !unit.contains("--profile"),
8961 "should not contain --profile when None"
8962 );
8963 }
8964
8965 #[test]
8966 #[cfg(unix)]
8967 fn generate_systemd_unit_includes_profile() {
8968 let binary = Path::new("/opt/cfgd/cfgd");
8969 let config = Path::new("/etc/cfgd/config.yaml");
8970
8971 let unit = generate_systemd_unit(binary, config, Some("server"));
8972
8973 let expected_exec = format!(
8974 "ExecStart={} --config {} --profile {} daemon",
8975 binary.display(),
8976 config.display(),
8977 "server"
8978 );
8979 assert!(
8980 unit.contains(&expected_exec),
8981 "ExecStart with profile should be '{expected_exec}', got:\n{unit}"
8982 );
8983 }
8984
8985 #[test]
8988 fn record_file_drift_to_stores_event_in_db() {
8989 let store = test_state();
8990 let path = Path::new("/home/user/.bashrc");
8991
8992 let result = record_file_drift_to(&store, path);
8993 assert!(result, "record_file_drift_to should return true on success");
8994
8995 let events = store.unresolved_drift().unwrap();
8997 assert_eq!(events.len(), 1, "should have exactly one drift event");
8998 assert_eq!(events[0].resource_type, "file");
8999 assert_eq!(events[0].resource_id, "/home/user/.bashrc");
9000 }
9001
9002 #[test]
9003 fn record_file_drift_to_multiple_files() {
9004 let store = test_state();
9005
9006 record_file_drift_to(&store, Path::new("/etc/hosts"));
9007 record_file_drift_to(&store, Path::new("/etc/resolv.conf"));
9008 record_file_drift_to(&store, Path::new("/home/user/.zshrc"));
9009
9010 let events = store.unresolved_drift().unwrap();
9011 assert_eq!(events.len(), 3, "should have three drift events");
9012
9013 let ids: Vec<&str> = events.iter().map(|e| e.resource_id.as_str()).collect();
9014 assert!(ids.contains(&"/etc/hosts"));
9015 assert!(ids.contains(&"/etc/resolv.conf"));
9016 assert!(ids.contains(&"/home/user/.zshrc"));
9017 }
9018
9019 #[test]
9022 fn parse_daemon_config_all_defaults() {
9023 let cfg = config::DaemonConfig {
9024 enabled: true,
9025 reconcile: None,
9026 sync: None,
9027 notify: None,
9028 };
9029
9030 let parsed = parse_daemon_config(&cfg);
9031 assert_eq!(
9032 parsed.reconcile_interval,
9033 Duration::from_secs(DEFAULT_RECONCILE_SECS)
9034 );
9035 assert_eq!(parsed.sync_interval, Duration::from_secs(DEFAULT_SYNC_SECS));
9036 assert!(!parsed.auto_pull);
9037 assert!(!parsed.auto_push);
9038 assert!(!parsed.on_change_reconcile);
9039 assert!(!parsed.notify_on_drift);
9040 assert!(matches!(parsed.notify_method, NotifyMethod::Stdout));
9041 assert!(parsed.webhook_url.is_none());
9042 assert!(!parsed.auto_apply);
9043 }
9044
9045 #[test]
9046 fn parse_daemon_config_with_all_settings() {
9047 let cfg = config::DaemonConfig {
9048 enabled: true,
9049 reconcile: Some(config::ReconcileConfig {
9050 interval: "60s".into(),
9051 on_change: true,
9052 auto_apply: true,
9053 policy: None,
9054 drift_policy: config::DriftPolicy::Auto,
9055 patches: vec![],
9056 }),
9057 sync: Some(config::SyncConfig {
9058 auto_pull: true,
9059 auto_push: true,
9060 interval: "120s".into(),
9061 }),
9062 notify: Some(config::NotifyConfig {
9063 drift: true,
9064 method: NotifyMethod::Webhook,
9065 webhook_url: Some("https://hooks.example.com/notify".into()),
9066 }),
9067 };
9068
9069 let parsed = parse_daemon_config(&cfg);
9070 assert_eq!(parsed.reconcile_interval, Duration::from_secs(60));
9071 assert_eq!(parsed.sync_interval, Duration::from_secs(120));
9072 assert!(parsed.auto_pull);
9073 assert!(parsed.auto_push);
9074 assert!(parsed.on_change_reconcile);
9075 assert!(parsed.notify_on_drift);
9076 assert!(matches!(parsed.notify_method, NotifyMethod::Webhook));
9077 assert_eq!(
9078 parsed.webhook_url.as_deref(),
9079 Some("https://hooks.example.com/notify")
9080 );
9081 assert!(parsed.auto_apply);
9082 }
9083
9084 #[test]
9085 fn parse_daemon_config_with_minute_interval() {
9086 let cfg = config::DaemonConfig {
9087 enabled: true,
9088 reconcile: Some(config::ReconcileConfig {
9089 interval: "10m".into(),
9090 on_change: false,
9091 auto_apply: false,
9092 policy: None,
9093 drift_policy: config::DriftPolicy::default(),
9094 patches: vec![],
9095 }),
9096 sync: Some(config::SyncConfig {
9097 auto_pull: false,
9098 auto_push: false,
9099 interval: "30m".into(),
9100 }),
9101 notify: None,
9102 };
9103
9104 let parsed = parse_daemon_config(&cfg);
9105 assert_eq!(parsed.reconcile_interval, Duration::from_secs(600));
9106 assert_eq!(parsed.sync_interval, Duration::from_secs(1800));
9107 }
9108
9109 #[test]
9112 fn build_sync_tasks_propagates_source_sync_interval() {
9113 let dir = tempfile::tempdir().unwrap();
9114 let config_dir = dir.path();
9115 let source_cache = dir.path().join("sources");
9116 std::fs::create_dir_all(source_cache.join("team-tools")).unwrap();
9117
9118 let parsed = ParsedDaemonConfig {
9119 reconcile_interval: Duration::from_secs(300),
9120 sync_interval: Duration::from_secs(300),
9121 auto_pull: true,
9122 auto_push: false,
9123 on_change_reconcile: false,
9124 notify_on_drift: false,
9125 notify_method: NotifyMethod::Stdout,
9126 webhook_url: None,
9127 auto_apply: false,
9128 };
9129
9130 let sources = vec![config::SourceSpec {
9131 name: "team-tools".into(),
9132 origin: config::OriginSpec {
9133 origin_type: config::OriginType::Git,
9134 url: "https://github.com/team/tools.git".into(),
9135 branch: "main".into(),
9136 auth: None,
9137 ssh_strict_host_key_checking: Default::default(),
9138 },
9139 subscription: config::SubscriptionSpec::default(),
9140 sync: config::SourceSyncSpec {
9141 auto_apply: true,
9142 interval: "60s".into(),
9143 pin_version: None,
9144 },
9145 }];
9146
9147 let tasks = build_sync_tasks(config_dir, &parsed, &sources, false, &source_cache, |_| {
9148 None
9149 });
9150
9151 assert_eq!(tasks.len(), 2, "should have local + team-tools");
9152 assert_eq!(tasks[0].source_name, "local");
9154 assert!(tasks[0].auto_pull);
9155 assert!(!tasks[0].auto_push);
9156 assert_eq!(tasks[0].interval, Duration::from_secs(300));
9157
9158 assert_eq!(tasks[1].source_name, "team-tools");
9160 assert!(tasks[1].auto_pull); assert!(!tasks[1].auto_push); assert!(tasks[1].auto_apply);
9163 assert_eq!(tasks[1].interval, Duration::from_secs(60));
9164 }
9165
9166 #[test]
9167 fn build_sync_tasks_manifest_detector_sets_require_signed() {
9168 let dir = tempfile::tempdir().unwrap();
9169 let config_dir = dir.path();
9170 let source_cache = dir.path().join("sources");
9171 std::fs::create_dir_all(source_cache.join("signed-source")).unwrap();
9172
9173 let parsed = ParsedDaemonConfig {
9174 reconcile_interval: Duration::from_secs(300),
9175 sync_interval: Duration::from_secs(300),
9176 auto_pull: false,
9177 auto_push: false,
9178 on_change_reconcile: false,
9179 notify_on_drift: false,
9180 notify_method: NotifyMethod::Stdout,
9181 webhook_url: None,
9182 auto_apply: false,
9183 };
9184
9185 let sources = vec![config::SourceSpec {
9186 name: "signed-source".into(),
9187 origin: config::OriginSpec {
9188 origin_type: config::OriginType::Git,
9189 url: "https://github.com/secure/config.git".into(),
9190 branch: "main".into(),
9191 auth: None,
9192 ssh_strict_host_key_checking: Default::default(),
9193 },
9194 subscription: config::SubscriptionSpec::default(),
9195 sync: config::SourceSyncSpec::default(),
9196 }];
9197
9198 let tasks = build_sync_tasks(config_dir, &parsed, &sources, false, &source_cache, |_| {
9200 Some(true)
9201 });
9202
9203 assert_eq!(tasks.len(), 2);
9204 assert!(
9205 !tasks[0].require_signed_commits,
9206 "local should not require signed"
9207 );
9208 assert!(
9209 tasks[1].require_signed_commits,
9210 "source with manifest should require signed"
9211 );
9212 }
9213
9214 #[test]
9217 fn build_reconcile_tasks_always_has_default() {
9218 let cfg = config::DaemonConfig {
9219 enabled: true,
9220 reconcile: None,
9221 sync: None,
9222 notify: None,
9223 };
9224
9225 let tasks = build_reconcile_tasks(&cfg, None, &[], Duration::from_secs(300), false);
9226
9227 assert_eq!(tasks.len(), 1);
9228 assert_eq!(tasks[0].entity, "__default__");
9229 assert_eq!(tasks[0].interval, Duration::from_secs(300));
9230 assert!(!tasks[0].auto_apply);
9231 }
9232
9233 #[test]
9236 fn git_pull_on_local_repo_no_remote_is_error() {
9237 let dir = tempfile::tempdir().unwrap();
9238 git2::Repository::init(dir.path()).unwrap();
9239
9240 let repo = git2::Repository::open(dir.path()).unwrap();
9242 let sig = git2::Signature::now("Test", "test@test.com").unwrap();
9243 let tree_oid = repo.index().unwrap().write_tree().unwrap();
9244 let tree = repo.find_tree(tree_oid).unwrap();
9245 repo.commit(Some("HEAD"), &sig, &sig, "init", &tree, &[])
9246 .unwrap();
9247
9248 let result = git_pull(dir.path());
9250 assert!(result.is_err(), "pull without remote should fail");
9251 }
9252
9253 #[test]
9254 fn git_auto_commit_push_with_no_changes_returns_false() {
9255 let dir = tempfile::tempdir().unwrap();
9256 let repo = git2::Repository::init(dir.path()).unwrap();
9257
9258 let sig = git2::Signature::now("Test", "test@test.com").unwrap();
9260 std::fs::write(dir.path().join("README.md"), "# Hello").unwrap();
9261 let mut index = repo.index().unwrap();
9262 index
9263 .add_all(["*"].iter(), git2::IndexAddOption::DEFAULT, None)
9264 .unwrap();
9265 index.write().unwrap();
9266 let tree_oid = index.write_tree().unwrap();
9267 let tree = repo.find_tree(tree_oid).unwrap();
9268 repo.commit(Some("HEAD"), &sig, &sig, "init", &tree, &[])
9269 .unwrap();
9270
9271 let result = git_auto_commit_push(dir.path());
9273 assert_eq!(result, Ok(false));
9275 }
9276
9277 #[test]
9280 fn daemon_status_response_camel_case_keys() {
9281 let response = DaemonStatusResponse {
9282 running: true,
9283 pid: 100,
9284 uptime_secs: 3600,
9285 last_reconcile: Some("2026-01-01T00:00:00Z".into()),
9286 last_sync: None,
9287 drift_count: 0,
9288 sources: vec![],
9289 update_available: None,
9290 module_reconcile: vec![],
9291 };
9292
9293 let json = serde_json::to_string(&response).unwrap();
9294 assert!(
9295 json.contains("\"uptimeSecs\""),
9296 "should use camelCase: {json}"
9297 );
9298 assert!(
9299 json.contains("\"lastReconcile\""),
9300 "should use camelCase: {json}"
9301 );
9302 assert!(
9303 json.contains("\"driftCount\""),
9304 "should use camelCase: {json}"
9305 );
9306 assert!(
9307 !json.contains("\"uptime_secs\""),
9308 "should not use snake_case: {json}"
9309 );
9310 }
9311
9312 #[test]
9315 fn module_reconcile_status_round_trips_extended() {
9316 let status = ModuleReconcileStatus {
9317 name: "security-baseline".into(),
9318 interval: "30s".into(),
9319 auto_apply: true,
9320 drift_policy: "Auto".into(),
9321 last_reconcile: Some("2026-04-01T12:00:00Z".into()),
9322 };
9323
9324 let json = serde_json::to_string(&status).unwrap();
9325 assert!(json.contains("\"autoApply\""), "should use camelCase");
9326 assert!(json.contains("\"driftPolicy\""), "should use camelCase");
9327 assert!(json.contains("\"lastReconcile\""), "should use camelCase");
9328
9329 let parsed: ModuleReconcileStatus = serde_json::from_str(&json).unwrap();
9330 assert_eq!(parsed.name, "security-baseline");
9331 assert!(parsed.auto_apply);
9332 assert_eq!(parsed.drift_policy, "Auto");
9333 }
9334
9335 #[test]
9338 fn extract_source_resources_includes_npm_and_pipx_and_dnf() {
9339 use crate::config::{MergedProfile, NpmSpec, PackagesSpec};
9340
9341 let merged = MergedProfile {
9342 packages: PackagesSpec {
9343 npm: Some(NpmSpec {
9344 file: None,
9345 global: vec!["typescript".into(), "eslint".into()],
9346 }),
9347 pipx: vec!["black".into()],
9348 dnf: vec!["gcc".into(), "make".into()],
9349 ..Default::default()
9350 },
9351 ..Default::default()
9352 };
9353
9354 let resources = extract_source_resources(&merged);
9355 assert!(resources.contains("packages.npm.typescript"));
9356 assert!(resources.contains("packages.npm.eslint"));
9357 assert!(resources.contains("packages.pipx.black"));
9358 assert!(resources.contains("packages.dnf.gcc"));
9359 assert!(resources.contains("packages.dnf.make"));
9360 assert_eq!(resources.len(), 5);
9361 }
9362
9363 #[test]
9364 fn extract_source_resources_includes_apt() {
9365 use crate::config::{AptSpec, MergedProfile, PackagesSpec};
9366
9367 let merged = MergedProfile {
9368 packages: PackagesSpec {
9369 apt: Some(AptSpec {
9370 packages: vec!["vim".into(), "git".into()],
9371 ..Default::default()
9372 }),
9373 ..Default::default()
9374 },
9375 ..Default::default()
9376 };
9377
9378 let resources = extract_source_resources(&merged);
9379 assert!(resources.contains("packages.apt.vim"));
9380 assert!(resources.contains("packages.apt.git"));
9381 assert_eq!(resources.len(), 2);
9382 }
9383
9384 #[test]
9385 fn extract_source_resources_includes_system_keys() {
9386 use crate::config::MergedProfile;
9387
9388 let mut merged = MergedProfile::default();
9389 merged.system.insert(
9390 "shell".into(),
9391 serde_yaml::to_value(serde_json::json!({"defaultShell": "/bin/zsh"})).unwrap(),
9392 );
9393 merged.system.insert(
9394 "macos_defaults".into(),
9395 serde_yaml::Value::Mapping(Default::default()),
9396 );
9397
9398 let resources = extract_source_resources(&merged);
9399 assert!(resources.contains("system.shell"));
9400 assert!(resources.contains("system.macos_defaults"));
9401 assert_eq!(resources.len(), 2);
9402 }
9403
9404 #[test]
9407 fn notifier_new_stores_method_and_url() {
9408 let notifier = Notifier::new(
9409 NotifyMethod::Webhook,
9410 Some("https://hooks.slack.com/test".into()),
9411 );
9412 assert!(matches!(notifier.method, NotifyMethod::Webhook));
9413 assert_eq!(
9414 notifier.webhook_url.as_deref(),
9415 Some("https://hooks.slack.com/test")
9416 );
9417 }
9418
9419 #[test]
9420 fn notifier_desktop_does_not_panic() {
9421 let notifier = Notifier::new(NotifyMethod::Desktop, None);
9422 notifier.notify("test title", "test body");
9424 }
9425
9426 #[test]
9429 fn infer_item_tier_detects_policy_keyword_extended() {
9430 assert_eq!(infer_item_tier("files./etc/security-policy.conf"), "locked");
9431 assert_eq!(infer_item_tier("system.policy_engine"), "locked");
9432 }
9433
9434 #[test]
9435 fn infer_item_tier_normal_resources_are_recommended() {
9436 assert_eq!(infer_item_tier("packages.npm.typescript"), "recommended");
9437 assert_eq!(
9438 infer_item_tier("files./home/user/.gitconfig"),
9439 "recommended"
9440 );
9441 assert_eq!(infer_item_tier("env.PATH"), "recommended");
9442 }
9443}