Skip to main content

work_tuimer/storage/
mod.rs

1use crate::models::{DayData, TimePoint, WorkRecord};
2use crate::timer::{TimerState, TimerStatus};
3use anyhow::{Context, Result, anyhow};
4use rusqlite::{Connection, OptionalExtension, Transaction, params};
5use std::collections::HashMap;
6use std::fs;
7use std::path::{Path, PathBuf};
8use std::time::{Duration as StdDuration, SystemTime};
9use time::format_description::well_known::Rfc3339;
10use time::macros::format_description;
11use time::{Date, OffsetDateTime};
12
13const DATABASE_FILE_NAME: &str = "work-tuimer.db";
14const LEGACY_RUNNING_TIMER_FILE_NAME: &str = "running_timer.json";
15const LEGACY_ACTIVE_TIMER_FILE_NAME: &str = "active_timer.json";
16const JSON_MIGRATION_META_KEY: &str = "migration.json_to_sqlite.v1";
17
18#[derive(Clone)]
19pub struct Storage {
20    repository: SqliteRepository,
21}
22
23#[derive(Clone)]
24struct SqliteRepository {
25    db_path: PathBuf,
26}
27
28/// High-level storage manager that provides transactional operations
29/// and automatic external modification tracking.
30pub struct StorageManager {
31    storage: Storage,
32    file_modified_times: HashMap<Date, Option<SystemTime>>,
33}
34
35pub struct StorageDiagnostics {
36    pub database_path: PathBuf,
37    pub migration_marker: Option<String>,
38    pub days_count: u64,
39    pub work_records_count: u64,
40    pub active_timer_present: bool,
41    pub legacy_day_json_files: u64,
42    pub legacy_timer_json_files: u64,
43}
44
45impl StorageManager {
46    /// Create a new StorageManager
47    pub fn new() -> Result<Self> {
48        Ok(StorageManager {
49            storage: Storage::new()?,
50            file_modified_times: HashMap::new(),
51        })
52    }
53
54    /// Create a new StorageManager with a custom directory (for testing)
55    #[doc(hidden)]
56    #[allow(dead_code)]
57    pub fn new_with_dir(data_dir: PathBuf) -> Result<Self> {
58        Ok(StorageManager {
59            storage: Storage::new_with_dir(data_dir)?,
60            file_modified_times: HashMap::new(),
61        })
62    }
63
64    /// Load day data with automatic revision tracking
65    pub fn load_with_tracking(&mut self, date: Date) -> Result<DayData> {
66        let data = self.storage.load(&date)?;
67        let modified_time = self.storage.try_get_file_modified_time(&date)?;
68        self.file_modified_times.insert(date, modified_time);
69        Ok(data)
70    }
71
72    /// Check if day data changed externally and reload if needed.
73    /// Returns Some(DayData) if data was modified and reloaded, None if no change.
74    pub fn check_and_reload(&mut self, date: Date) -> Result<Option<DayData>> {
75        let current_modified = self.storage.try_get_file_modified_time(&date)?;
76
77        let is_tracked = self.file_modified_times.contains_key(&date);
78        if !is_tracked {
79            let data = self.storage.load(&date)?;
80            self.file_modified_times.insert(date, current_modified);
81            return Ok(Some(data));
82        }
83
84        let last_known = self.file_modified_times.get(&date).copied().flatten();
85        // We only need inequality detection here. We intentionally do not rely
86        // on ordering semantics across processes.
87        if current_modified != last_known {
88            let data = self.storage.load(&date)?;
89            self.file_modified_times.insert(date, current_modified);
90            Ok(Some(data))
91        } else {
92            Ok(None)
93        }
94    }
95
96    /// Add a new work record (transactional: load -> add -> save -> track)
97    #[allow(dead_code)]
98    pub fn add_record(&mut self, date: Date, record: WorkRecord) -> Result<()> {
99        let mut day_data = self.storage.load(&date)?;
100        day_data.add_record(record);
101        self.storage.save(&day_data)?;
102
103        let modified_time = self.storage.try_get_file_modified_time(&date)?;
104        self.file_modified_times.insert(date, modified_time);
105
106        Ok(())
107    }
108
109    /// Update an existing work record (transactional: load -> update -> save -> track)
110    #[allow(dead_code)]
111    pub fn update_record(&mut self, date: Date, record: WorkRecord) -> Result<()> {
112        let mut day_data = self.storage.load(&date)?;
113        day_data.add_record(record);
114        self.storage.save(&day_data)?;
115
116        let modified_time = self.storage.try_get_file_modified_time(&date)?;
117        self.file_modified_times.insert(date, modified_time);
118
119        Ok(())
120    }
121
122    /// Remove a work record by ID (transactional: load -> remove -> save -> track)
123    /// Returns the removed record if found.
124    #[allow(dead_code)]
125    pub fn remove_record(&mut self, date: Date, id: u32) -> Result<WorkRecord> {
126        let mut day_data = self.storage.load(&date)?;
127
128        let record = day_data
129            .work_records
130            .remove(&id)
131            .context(format!("Record with ID {} not found", id))?;
132
133        self.storage.save(&day_data)?;
134
135        let modified_time = self.storage.try_get_file_modified_time(&date)?;
136        self.file_modified_times.insert(date, modified_time);
137
138        Ok(record)
139    }
140
141    /// Save day data and update tracking
142    pub fn save(&mut self, day_data: &DayData) -> Result<()> {
143        self.storage.save(day_data)?;
144
145        let modified_time = self.storage.try_get_file_modified_time(&day_data.date)?;
146        self.file_modified_times
147            .insert(day_data.date, modified_time);
148
149        Ok(())
150    }
151
152    /// Get the last known modification token for a date
153    pub fn get_last_modified(&self, date: &Date) -> Option<SystemTime> {
154        self.file_modified_times.get(date).copied().flatten()
155    }
156
157    #[allow(dead_code)]
158    pub fn save_active_timer(&self, timer: &TimerState) -> Result<()> {
159        self.storage.save_active_timer(timer)
160    }
161
162    pub fn load_active_timer(&self) -> Result<Option<TimerState>> {
163        self.storage.load_active_timer()
164    }
165
166    #[allow(dead_code)]
167    pub fn clear_active_timer(&self) -> Result<()> {
168        self.storage.clear_active_timer()
169    }
170
171    fn create_timer_manager(&self) -> crate::timer::TimerManager {
172        crate::timer::TimerManager::new(self.storage.clone())
173    }
174
175    pub fn start_timer(
176        &self,
177        task_name: String,
178        description: Option<String>,
179        project: Option<String>,
180        customer: Option<String>,
181        source_record_id: Option<u32>,
182        source_record_date: Option<time::Date>,
183    ) -> Result<TimerState> {
184        let timer_manager = self.create_timer_manager();
185        timer_manager.start(
186            task_name,
187            description,
188            project,
189            customer,
190            source_record_id,
191            source_record_date,
192        )
193    }
194
195    pub fn stop_timer(&self) -> Result<crate::models::WorkRecord> {
196        let timer_manager = self.create_timer_manager();
197        timer_manager.stop()
198    }
199
200    pub fn pause_timer(&self) -> Result<TimerState> {
201        let timer_manager = self.create_timer_manager();
202        timer_manager.pause()
203    }
204
205    pub fn resume_timer(&self) -> Result<TimerState> {
206        let timer_manager = self.create_timer_manager();
207        timer_manager.resume()
208    }
209
210    #[allow(dead_code)]
211    pub fn get_timer_elapsed(&self, timer: &TimerState) -> std::time::Duration {
212        let timer_manager = self.create_timer_manager();
213        timer_manager.get_elapsed_duration(timer)
214    }
215}
216
217impl Storage {
218    pub fn new() -> Result<Self> {
219        let data_dir = Self::get_data_directory()?;
220        Self::new_with_dir(data_dir)
221    }
222
223    /// Create a new Storage with a custom directory (for testing)
224    #[doc(hidden)]
225    #[allow(dead_code)]
226    pub fn new_with_dir(data_dir: PathBuf) -> Result<Self> {
227        fs::create_dir_all(&data_dir).context("Failed to create data directory")?;
228
229        let db_path = data_dir.join(DATABASE_FILE_NAME);
230        let repository = SqliteRepository::new(db_path);
231        repository.initialize(&data_dir)?;
232
233        Ok(Storage { repository })
234    }
235
236    fn get_data_directory() -> Result<PathBuf> {
237        if let Some(data_dir) = dirs::data_local_dir() {
238            let app_dir = data_dir.join("work-tuimer");
239            if fs::create_dir_all(&app_dir).is_ok() {
240                return Ok(app_dir);
241            }
242        }
243
244        let local_data = PathBuf::from("./data");
245        if fs::create_dir_all(&local_data).is_ok() {
246            return Ok(local_data);
247        }
248
249        anyhow::bail!("Failed to create data directory in system location or ./data")
250    }
251
252    pub fn load(&self, date: &Date) -> Result<DayData> {
253        self.repository.load_day(date)
254    }
255
256    pub fn save(&self, day_data: &DayData) -> Result<()> {
257        self.repository.save_day(day_data)
258    }
259
260    /// Get a synthetic monotonic token derived from day revision state.
261    /// Returns None if the day has never been written.
262    ///
263    /// This is not a wall-clock timestamp; it is used only for equality/
264    /// inequality change detection.
265    #[allow(dead_code)]
266    pub fn get_file_modified_time(&self, date: &Date) -> Option<SystemTime> {
267        match self.try_get_file_modified_time(date) {
268            Ok(token) => token,
269            Err(err) => {
270                eprintln!(
271                    "Failed to read day revision token for {} from storage: {err:#}",
272                    date
273                );
274                None
275            }
276        }
277    }
278
279    pub fn try_get_file_modified_time(&self, date: &Date) -> Result<Option<SystemTime>> {
280        self.repository.day_revision_token(date)
281    }
282
283    pub fn save_active_timer(&self, timer: &TimerState) -> Result<()> {
284        self.repository.save_active_timer(timer)
285    }
286
287    pub fn load_active_timer(&self) -> Result<Option<TimerState>> {
288        self.repository.load_active_timer()
289    }
290
291    pub fn clear_active_timer(&self) -> Result<()> {
292        self.repository.clear_active_timer()
293    }
294
295    pub fn diagnostics(&self) -> Result<StorageDiagnostics> {
296        self.repository.diagnostics()
297    }
298
299    #[cfg(test)]
300    fn get_db_path(&self) -> PathBuf {
301        self.repository.db_path.clone()
302    }
303}
304
305impl SqliteRepository {
306    fn new(db_path: PathBuf) -> Self {
307        Self { db_path }
308    }
309
310    fn initialize(&self, data_dir: &Path) -> Result<()> {
311        let conn = self.open_connection()?;
312        Self::apply_database_pragmas(&conn)?;
313        Self::initialize_schema(&conn)?;
314        Self::apply_schema_migrations(&conn)?;
315        drop(conn);
316
317        self
318            .migrate_from_legacy_json_if_needed(data_dir)
319            .context("Failed to migrate legacy JSON into SQLite. Fix or remove malformed legacy JSON files and restart")
320    }
321
322    fn open_connection(&self) -> Result<Connection> {
323        let conn = Connection::open(&self.db_path).context(format!(
324            "Failed to open SQLite database at {:?}",
325            self.db_path
326        ))?;
327        conn.execute_batch("PRAGMA foreign_keys = ON;")
328            .context("Failed to enable SQLite foreign key support")?;
329        Ok(conn)
330    }
331
332    fn apply_database_pragmas(conn: &Connection) -> Result<()> {
333        conn.execute_batch(
334            "
335            PRAGMA journal_mode = WAL;
336            PRAGMA synchronous = NORMAL;
337            ",
338        )
339        .context("Failed to configure SQLite pragmas")?;
340        Ok(())
341    }
342
343    fn initialize_schema(conn: &Connection) -> Result<()> {
344        conn.execute_batch(
345            "
346            CREATE TABLE IF NOT EXISTS meta (
347                key TEXT PRIMARY KEY,
348                value TEXT NOT NULL
349            );
350
351            CREATE TABLE IF NOT EXISTS day_meta (
352                date TEXT PRIMARY KEY,
353                last_id INTEGER NOT NULL DEFAULT 0 CHECK (last_id >= 0),
354                revision INTEGER NOT NULL DEFAULT 0 CHECK (revision >= 0)
355            );
356
357            CREATE TABLE IF NOT EXISTS work_records (
358                date TEXT NOT NULL,
359                id INTEGER NOT NULL,
360                name TEXT NOT NULL,
361                start_minutes INTEGER NOT NULL,
362                end_minutes INTEGER NOT NULL,
363                total_minutes INTEGER NOT NULL,
364                project TEXT NOT NULL DEFAULT '',
365                customer TEXT NOT NULL DEFAULT '',
366                description TEXT NOT NULL DEFAULT '',
367                PRIMARY KEY (date, id),
368                FOREIGN KEY (date) REFERENCES day_meta(date) ON DELETE CASCADE
369            );
370
371            CREATE INDEX IF NOT EXISTS idx_work_records_date_start
372                ON work_records(date, start_minutes);
373
374            CREATE TABLE IF NOT EXISTS active_timer (
375                singleton_id INTEGER PRIMARY KEY CHECK (singleton_id = 1),
376                id INTEGER,
377                task_name TEXT NOT NULL,
378                description TEXT,
379                project TEXT,
380                customer TEXT,
381                start_time TEXT NOT NULL,
382                end_time TEXT,
383                date TEXT NOT NULL,
384                status TEXT NOT NULL CHECK (status IN ('running', 'paused', 'stopped')),
385                paused_duration_secs INTEGER NOT NULL CHECK (paused_duration_secs >= 0),
386                paused_at TEXT,
387                created_at TEXT NOT NULL,
388                updated_at TEXT NOT NULL,
389                source_record_id INTEGER,
390                source_record_date TEXT
391            );
392            ",
393        )
394        .context("Failed to initialize SQLite schema")?;
395
396        Ok(())
397    }
398
399    fn apply_schema_migrations(conn: &Connection) -> Result<()> {
400        Self::ensure_column_exists(conn, "work_records", "project", "TEXT NOT NULL DEFAULT ''")?;
401        Self::ensure_column_exists(conn, "work_records", "customer", "TEXT NOT NULL DEFAULT ''")?;
402        Self::ensure_column_exists(conn, "active_timer", "project", "TEXT")?;
403        Self::ensure_column_exists(conn, "active_timer", "customer", "TEXT")?;
404        Ok(())
405    }
406
407    fn ensure_column_exists(
408        conn: &Connection,
409        table: &str,
410        column: &str,
411        definition: &str,
412    ) -> Result<()> {
413        if Self::has_column(conn, table, column)? {
414            return Ok(());
415        }
416
417        let alter_sql = format!("ALTER TABLE {table} ADD COLUMN {column} {definition}");
418        conn.execute(&alter_sql, [])
419            .context(format!("Failed to add column {column} to {table}"))?;
420        Ok(())
421    }
422
423    fn has_column(conn: &Connection, table: &str, column: &str) -> Result<bool> {
424        let pragma_sql = format!("PRAGMA table_info({table})");
425        let mut stmt = conn
426            .prepare(&pragma_sql)
427            .context(format!("Failed to inspect table schema for {table}"))?;
428        let mut rows = stmt.query([]).context("Failed to query table_info")?;
429
430        while let Some(row) = rows.next().context("Failed to read table_info row")? {
431            let column_name = row.get::<_, String>(1)?;
432            if column_name == column {
433                return Ok(true);
434            }
435        }
436
437        Ok(false)
438    }
439
440    fn load_day(&self, date: &Date) -> Result<DayData> {
441        let conn = self.open_connection()?;
442        let date_key = format_date(*date);
443
444        let last_id_opt = conn
445            .query_row(
446                "SELECT last_id FROM day_meta WHERE date = ?1",
447                params![date_key.clone()],
448                |row| row.get::<_, i64>(0),
449            )
450            .optional()
451            .context("Failed to query day metadata")?;
452
453        let Some(last_id_raw) = last_id_opt else {
454            return Ok(DayData::new(*date));
455        };
456
457        let last_id = i64_to_u32(last_id_raw, "day_meta.last_id")?;
458
459        let mut day_data = DayData {
460            date: *date,
461            last_id,
462            work_records: HashMap::new(),
463        };
464
465        let mut stmt = conn
466            .prepare(
467                "
468                SELECT id, name, start_minutes, end_minutes, total_minutes, project, customer, description
469                FROM work_records
470                WHERE date = ?1
471                ORDER BY id
472                ",
473            )
474            .context("Failed to prepare work records query")?;
475
476        let mut rows = stmt
477            .query(params![date_key])
478            .context("Failed to query work records")?;
479
480        while let Some(row) = rows.next().context("Failed to read work record row")? {
481            let id = i64_to_u32(row.get::<_, i64>(0)?, "work_records.id")?;
482            let name = row.get::<_, String>(1)?;
483            let start_minutes = i64_to_u32(row.get::<_, i64>(2)?, "work_records.start_minutes")?;
484            let end_minutes = i64_to_u32(row.get::<_, i64>(3)?, "work_records.end_minutes")?;
485            let total_minutes = i64_to_u32(row.get::<_, i64>(4)?, "work_records.total_minutes")?;
486            let project = row.get::<_, Option<String>>(5)?.unwrap_or_default();
487            let customer = row.get::<_, Option<String>>(6)?.unwrap_or_default();
488            let description = row.get::<_, Option<String>>(7)?.unwrap_or_default();
489
490            let start = TimePoint::from_minutes_since_midnight(start_minutes)
491                .map_err(|e| anyhow!(e))
492                .context("Invalid start_minutes value in database")?;
493            let end = TimePoint::from_minutes_since_midnight(end_minutes)
494                .map_err(|e| anyhow!(e))
495                .context("Invalid end_minutes value in database")?;
496
497            let record = WorkRecord {
498                id,
499                name,
500                start,
501                end,
502                total_minutes,
503                project,
504                customer,
505                description,
506            };
507
508            day_data.work_records.insert(id, record);
509        }
510
511        Ok(day_data)
512    }
513
514    fn save_day(&self, day_data: &DayData) -> Result<()> {
515        let mut conn = self.open_connection()?;
516        let tx = conn
517            .transaction()
518            .context("Failed to begin day save transaction")?;
519
520        Self::write_day_data_tx(&tx, day_data, true)?;
521
522        tx.commit()
523            .context("Failed to commit day save transaction")?;
524        Ok(())
525    }
526
527    fn day_revision_token(&self, date: &Date) -> Result<Option<SystemTime>> {
528        let conn = self.open_connection()?;
529        let date_key = format_date(*date);
530
531        let revision_opt = conn
532            .query_row(
533                "SELECT revision FROM day_meta WHERE date = ?1",
534                params![date_key],
535                |row| row.get::<_, i64>(0),
536            )
537            .optional()
538            .context("Failed to query day revision")?;
539
540        match revision_opt {
541            None => Ok(None),
542            Some(revision) => {
543                if revision < 0 {
544                    anyhow::bail!("Invalid negative revision in database: {}", revision);
545                }
546                // Convert revision counter into a synthetic token. We only use
547                // this value as a monotonic change marker, not as a real time.
548                let token = SystemTime::UNIX_EPOCH
549                    .checked_add(StdDuration::from_secs(revision as u64))
550                    .context("Failed to convert revision to SystemTime")?;
551                Ok(Some(token))
552            }
553        }
554    }
555
556    fn save_active_timer(&self, timer: &TimerState) -> Result<()> {
557        let mut conn = self.open_connection()?;
558        let tx = conn
559            .transaction()
560            .context("Failed to begin active timer transaction")?;
561
562        Self::save_active_timer_tx(&tx, timer)?;
563        tx.commit()
564            .context("Failed to commit active timer transaction")?;
565
566        Ok(())
567    }
568
569    fn load_active_timer(&self) -> Result<Option<TimerState>> {
570        let conn = self.open_connection()?;
571
572        let timer_row = conn
573            .query_row(
574                "
575                SELECT
576                    id,
577                    task_name,
578                    description,
579                    project,
580                    customer,
581                    start_time,
582                    end_time,
583                    date,
584                    status,
585                    paused_duration_secs,
586                    paused_at,
587                    created_at,
588                    updated_at,
589                    source_record_id,
590                    source_record_date
591                FROM active_timer
592                WHERE singleton_id = 1
593                ",
594                [],
595                |row| {
596                    Ok((
597                        row.get::<_, Option<i64>>(0)?,
598                        row.get::<_, String>(1)?,
599                        row.get::<_, Option<String>>(2)?,
600                        row.get::<_, Option<String>>(3)?,
601                        row.get::<_, Option<String>>(4)?,
602                        row.get::<_, String>(5)?,
603                        row.get::<_, Option<String>>(6)?,
604                        row.get::<_, String>(7)?,
605                        row.get::<_, String>(8)?,
606                        row.get::<_, i64>(9)?,
607                        row.get::<_, Option<String>>(10)?,
608                        row.get::<_, String>(11)?,
609                        row.get::<_, String>(12)?,
610                        row.get::<_, Option<i64>>(13)?,
611                        row.get::<_, Option<String>>(14)?,
612                    ))
613                },
614            )
615            .optional()
616            .context("Failed to query active timer")?;
617
618        let Some(row) = timer_row else {
619            return Ok(None);
620        };
621
622        let id = row
623            .0
624            .map(|v| i64_to_u32(v, "active_timer.id"))
625            .transpose()?;
626        let task_name = row.1;
627        let description = row.2;
628        let project = row.3;
629        let customer = row.4;
630        let start_time = parse_datetime(&row.5, "active_timer.start_time")?;
631        let end_time = row
632            .6
633            .as_deref()
634            .map(|v| parse_datetime(v, "active_timer.end_time"))
635            .transpose()?;
636        let date = parse_date(&row.7).context("Invalid active_timer.date")?;
637        let status = parse_timer_status(&row.8)?;
638        let paused_duration_secs = row.9;
639        let paused_at = row
640            .10
641            .as_deref()
642            .map(|v| parse_datetime(v, "active_timer.paused_at"))
643            .transpose()?;
644        let created_at = parse_datetime(&row.11, "active_timer.created_at")?;
645        let updated_at = parse_datetime(&row.12, "active_timer.updated_at")?;
646        let source_record_id = row
647            .13
648            .map(|v| i64_to_u32(v, "active_timer.source_record_id"))
649            .transpose()?;
650        let source_record_date = row
651            .14
652            .as_deref()
653            .map(parse_date)
654            .transpose()
655            .context("Invalid active_timer.source_record_date")?;
656
657        Ok(Some(TimerState {
658            id,
659            task_name,
660            description,
661            project,
662            customer,
663            start_time,
664            end_time,
665            date,
666            status,
667            paused_duration_secs,
668            paused_at,
669            created_at,
670            updated_at,
671            source_record_id,
672            source_record_date,
673        }))
674    }
675
676    fn clear_active_timer(&self) -> Result<()> {
677        let conn = self.open_connection()?;
678        conn.execute("DELETE FROM active_timer WHERE singleton_id = 1", [])
679            .context("Failed to clear active timer")?;
680        Ok(())
681    }
682
683    fn diagnostics(&self) -> Result<StorageDiagnostics> {
684        let conn = self.open_connection()?;
685
686        let migration_marker = conn
687            .query_row(
688                "SELECT value FROM meta WHERE key = ?1",
689                params![JSON_MIGRATION_META_KEY],
690                |row| row.get::<_, String>(0),
691            )
692            .optional()
693            .context("Failed to query migration marker")?;
694
695        let days_count = i64_to_u64(
696            conn.query_row("SELECT COUNT(*) FROM day_meta", [], |row| {
697                row.get::<_, i64>(0)
698            })
699            .context("Failed to count day rows")?,
700            "day_meta count",
701        )?;
702
703        let work_records_count = i64_to_u64(
704            conn.query_row("SELECT COUNT(*) FROM work_records", [], |row| {
705                row.get::<_, i64>(0)
706            })
707            .context("Failed to count work record rows")?,
708            "work_records count",
709        )?;
710
711        let active_timer_present = conn
712            .query_row(
713                "SELECT 1 FROM active_timer WHERE singleton_id = 1 LIMIT 1",
714                [],
715                |_row| Ok(()),
716            )
717            .optional()
718            .context("Failed to check active timer existence")?
719            .is_some();
720
721        let database_path = self.db_path.clone();
722        let data_dir = database_path
723            .parent()
724            .context("Database path has no parent directory")?;
725
726        let (legacy_day_json_files, legacy_timer_json_files) =
727            Self::count_legacy_json_files(data_dir)?;
728
729        Ok(StorageDiagnostics {
730            database_path,
731            migration_marker,
732            days_count,
733            work_records_count,
734            active_timer_present,
735            legacy_day_json_files,
736            legacy_timer_json_files,
737        })
738    }
739
740    fn count_legacy_json_files(data_dir: &Path) -> Result<(u64, u64)> {
741        let entries = fs::read_dir(data_dir).context("Failed to read data directory")?;
742
743        let mut day_files_count = 0_u64;
744        let mut timer_files_count = 0_u64;
745
746        for entry in entries {
747            let entry = entry.context("Failed to read directory entry")?;
748            let path = entry.path();
749
750            if !path.is_file() {
751                continue;
752            }
753
754            let Some(file_name_os) = path.file_name() else {
755                continue;
756            };
757            let Some(file_name) = file_name_os.to_str() else {
758                continue;
759            };
760
761            if !file_name.ends_with(".json") {
762                continue;
763            }
764
765            if matches!(
766                file_name,
767                LEGACY_RUNNING_TIMER_FILE_NAME | LEGACY_ACTIVE_TIMER_FILE_NAME
768            ) {
769                timer_files_count += 1;
770                continue;
771            }
772
773            let Some(date_part) = file_name.strip_suffix(".json") else {
774                continue;
775            };
776
777            if parse_date(date_part).is_ok() {
778                day_files_count += 1;
779            }
780        }
781
782        Ok((day_files_count, timer_files_count))
783    }
784
785    fn migrate_from_legacy_json_if_needed(&self, data_dir: &Path) -> Result<()> {
786        let mut conn = self.open_connection()?;
787        let tx = conn
788            .transaction()
789            .context("Failed to begin JSON migration transaction")?;
790
791        if Self::get_meta_value_tx(&tx, JSON_MIGRATION_META_KEY)?.is_some() {
792            tx.commit()
793                .context("Failed to finalize already-completed migration transaction")?;
794            return Ok(());
795        }
796
797        Self::import_legacy_day_files_tx(&tx, data_dir)?;
798        Self::import_legacy_timer_tx(&tx, data_dir)?;
799        Self::set_meta_value_tx(&tx, JSON_MIGRATION_META_KEY, "done")?;
800
801        tx.commit().context("Failed to commit JSON migration")?;
802        Ok(())
803    }
804
805    fn write_day_data_tx(
806        tx: &Transaction<'_>,
807        day_data: &DayData,
808        increment_revision: bool,
809    ) -> Result<()> {
810        let date_key = format_date(day_data.date);
811
812        tx.execute(
813            "
814            INSERT INTO day_meta(date, last_id, revision)
815            VALUES (?1, 0, 0)
816            ON CONFLICT(date) DO NOTHING
817            ",
818            params![date_key.clone()],
819        )
820        .context("Failed to upsert day_meta row")?;
821
822        tx.execute(
823            "DELETE FROM work_records WHERE date = ?1",
824            params![date_key.clone()],
825        )
826        .context("Failed to clear existing day work records")?;
827
828        {
829            let mut stmt = tx
830                .prepare(
831                    "
832                    INSERT INTO work_records (
833                        date,
834                        id,
835                        name,
836                        start_minutes,
837                        end_minutes,
838                        total_minutes,
839                        project,
840                        customer,
841                        description
842                    )
843                    VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)
844                    ",
845                )
846                .context("Failed to prepare work record insert statement")?;
847
848            for record in day_data.work_records.values() {
849                stmt.execute(params![
850                    date_key.clone(),
851                    i64::from(record.id),
852                    &record.name,
853                    i64::from(record.start.to_minutes_since_midnight()),
854                    i64::from(record.end.to_minutes_since_midnight()),
855                    i64::from(record.total_minutes),
856                    &record.project,
857                    &record.customer,
858                    &record.description,
859                ])
860                .context("Failed to insert work record")?;
861            }
862        }
863
864        if increment_revision {
865            // Revision is an i64 counter. It would require an impractical number
866            // of writes to overflow, and serves only as a monotonic change token.
867            tx.execute(
868                "
869                UPDATE day_meta
870                SET last_id = ?1,
871                    revision = revision + 1
872                WHERE date = ?2
873                ",
874                params![i64::from(day_data.last_id), date_key],
875            )
876            .context("Failed to update day_meta with incremented revision")?;
877        } else {
878            // Migration initializes revision to at least 1 so downstream change
879            // detection has a stable non-zero token from first persisted state.
880            tx.execute(
881                "
882                UPDATE day_meta
883                SET last_id = ?1,
884                    revision = CASE WHEN revision = 0 THEN 1 ELSE revision END
885                WHERE date = ?2
886                ",
887                params![i64::from(day_data.last_id), date_key],
888            )
889            .context("Failed to update day_meta during migration")?;
890        }
891
892        Ok(())
893    }
894
895    fn save_active_timer_tx(tx: &Transaction<'_>, timer: &TimerState) -> Result<()> {
896        let id = timer.id.map(i64::from);
897        let start_time = format_datetime(timer.start_time)?;
898        let end_time = timer.end_time.map(format_datetime).transpose()?;
899        let date = format_date(timer.date);
900        let status = timer_status_to_str(timer.status);
901        let project = timer.project.as_deref();
902        let customer = timer.customer.as_deref();
903        let paused_at = timer.paused_at.map(format_datetime).transpose()?;
904        let created_at = format_datetime(timer.created_at)?;
905        let updated_at = format_datetime(timer.updated_at)?;
906        let source_record_id = timer.source_record_id.map(i64::from);
907        let source_record_date = timer.source_record_date.map(format_date);
908
909        tx.execute(
910            "
911            INSERT INTO active_timer (
912                singleton_id,
913                id,
914                task_name,
915                description,
916                project,
917                customer,
918                start_time,
919                end_time,
920                date,
921                status,
922                paused_duration_secs,
923                paused_at,
924                created_at,
925                updated_at,
926                source_record_id,
927                source_record_date
928            )
929            VALUES (
930                1,
931                ?1,
932                ?2,
933                ?3,
934                ?4,
935                ?5,
936                ?6,
937                ?7,
938                ?8,
939                ?9,
940                ?10,
941                ?11,
942                ?12,
943                ?13,
944                ?14,
945                ?15
946            )
947            ON CONFLICT(singleton_id) DO UPDATE SET
948                id = excluded.id,
949                task_name = excluded.task_name,
950                description = excluded.description,
951                project = excluded.project,
952                customer = excluded.customer,
953                start_time = excluded.start_time,
954                end_time = excluded.end_time,
955                date = excluded.date,
956                status = excluded.status,
957                paused_duration_secs = excluded.paused_duration_secs,
958                paused_at = excluded.paused_at,
959                created_at = excluded.created_at,
960                updated_at = excluded.updated_at,
961                source_record_id = excluded.source_record_id,
962                source_record_date = excluded.source_record_date
963            ",
964            params![
965                id,
966                &timer.task_name,
967                timer.description.as_deref(),
968                project,
969                customer,
970                start_time,
971                end_time,
972                date,
973                status,
974                timer.paused_duration_secs,
975                paused_at,
976                created_at,
977                updated_at,
978                source_record_id,
979                source_record_date,
980            ],
981        )
982        .context("Failed to upsert active timer")?;
983
984        Ok(())
985    }
986
987    fn get_meta_value_tx(tx: &Transaction<'_>, key: &str) -> Result<Option<String>> {
988        tx.query_row(
989            "SELECT value FROM meta WHERE key = ?1",
990            params![key],
991            |row| row.get::<_, String>(0),
992        )
993        .optional()
994        .context(format!("Failed to query meta key '{}'", key))
995    }
996
997    fn set_meta_value_tx(tx: &Transaction<'_>, key: &str, value: &str) -> Result<()> {
998        tx.execute(
999            "
1000            INSERT INTO meta(key, value)
1001            VALUES (?1, ?2)
1002            ON CONFLICT(key) DO UPDATE SET value = excluded.value
1003            ",
1004            params![key, value],
1005        )
1006        .context(format!("Failed to upsert meta key '{}'", key))?;
1007        Ok(())
1008    }
1009
1010    fn day_exists_tx(tx: &Transaction<'_>, date_key: &str) -> Result<bool> {
1011        let exists = tx
1012            .query_row(
1013                "SELECT 1 FROM day_meta WHERE date = ?1 LIMIT 1",
1014                params![date_key],
1015                |_row| Ok(()),
1016            )
1017            .optional()
1018            .context("Failed to check day existence")?
1019            .is_some();
1020        Ok(exists)
1021    }
1022
1023    fn import_legacy_day_files_tx(tx: &Transaction<'_>, data_dir: &Path) -> Result<()> {
1024        let entries = fs::read_dir(data_dir).context("Failed to read data directory")?;
1025        let mut paths = Vec::new();
1026
1027        for entry in entries {
1028            let entry = entry.context("Failed to read directory entry")?;
1029            paths.push(entry.path());
1030        }
1031
1032        paths.sort();
1033
1034        for path in paths {
1035            if !path.is_file() {
1036                continue;
1037            }
1038
1039            let Some(file_name_os) = path.file_name() else {
1040                continue;
1041            };
1042            let Some(file_name) = file_name_os.to_str() else {
1043                continue;
1044            };
1045
1046            if !file_name.ends_with(".json") {
1047                continue;
1048            }
1049
1050            if matches!(
1051                file_name,
1052                LEGACY_RUNNING_TIMER_FILE_NAME | LEGACY_ACTIVE_TIMER_FILE_NAME
1053            ) {
1054                continue;
1055            }
1056
1057            let Some(date_part) = file_name.strip_suffix(".json") else {
1058                continue;
1059            };
1060
1061            let parsed_file_date = match parse_date(date_part) {
1062                Ok(date) => date,
1063                Err(_) => continue,
1064            };
1065
1066            let date_key = format_date(parsed_file_date);
1067            if Self::day_exists_tx(tx, &date_key)? {
1068                eprintln!(
1069                    "Skipping legacy JSON import for {} from {:?}: data already exists in SQLite",
1070                    date_key, path
1071                );
1072                continue;
1073            }
1074
1075            let contents = fs::read_to_string(&path)
1076                .context(format!("Failed to read legacy JSON file: {:?}", path))?;
1077            let day_data: DayData = serde_json::from_str(&contents)
1078                .context(format!("Failed to parse legacy day JSON: {:?}", path))?;
1079
1080            Self::write_day_data_tx(tx, &day_data, false)?;
1081        }
1082
1083        Ok(())
1084    }
1085
1086    fn import_legacy_timer_tx(tx: &Transaction<'_>, data_dir: &Path) -> Result<()> {
1087        let timer_exists = tx
1088            .query_row(
1089                "SELECT 1 FROM active_timer WHERE singleton_id = 1 LIMIT 1",
1090                [],
1091                |_row| Ok(()),
1092            )
1093            .optional()
1094            .context("Failed to check existing active timer")?
1095            .is_some();
1096
1097        if timer_exists {
1098            return Ok(());
1099        }
1100
1101        let running_timer_path = data_dir.join(LEGACY_RUNNING_TIMER_FILE_NAME);
1102        let active_timer_path = data_dir.join(LEGACY_ACTIVE_TIMER_FILE_NAME);
1103
1104        if running_timer_path.exists() && active_timer_path.exists() {
1105            eprintln!(
1106                "Found both legacy timer files ({:?}, {:?}); preferring {:?}",
1107                running_timer_path, active_timer_path, running_timer_path
1108            );
1109        }
1110
1111        let timer_paths = [running_timer_path, active_timer_path];
1112
1113        for path in timer_paths {
1114            if !path.exists() {
1115                continue;
1116            }
1117
1118            let contents = fs::read_to_string(&path)
1119                .context(format!("Failed to read legacy timer JSON: {:?}", path))?;
1120            let timer: TimerState = serde_json::from_str(&contents)
1121                .context(format!("Failed to parse legacy timer JSON: {:?}", path))?;
1122
1123            Self::save_active_timer_tx(tx, &timer)?;
1124            break;
1125        }
1126
1127        Ok(())
1128    }
1129}
1130
1131fn format_date(date: Date) -> String {
1132    format!(
1133        "{:04}-{:02}-{:02}",
1134        date.year(),
1135        date.month() as u8,
1136        date.day()
1137    )
1138}
1139
1140fn parse_date(value: &str) -> Result<Date> {
1141    Date::parse(value, format_description!("[year]-[month]-[day]"))
1142        .context(format!("Invalid date: '{}'. Expected YYYY-MM-DD", value))
1143}
1144
1145fn format_datetime(value: OffsetDateTime) -> Result<String> {
1146    value
1147        .format(&Rfc3339)
1148        .context("Failed to serialize datetime as RFC3339")
1149}
1150
1151fn parse_datetime(value: &str, field: &str) -> Result<OffsetDateTime> {
1152    OffsetDateTime::parse(value, &Rfc3339).context(format!(
1153        "Invalid RFC3339 datetime in {}: '{}'",
1154        field, value
1155    ))
1156}
1157
1158fn timer_status_to_str(status: TimerStatus) -> &'static str {
1159    match status {
1160        TimerStatus::Running => "running",
1161        TimerStatus::Paused => "paused",
1162        TimerStatus::Stopped => "stopped",
1163    }
1164}
1165
1166fn parse_timer_status(value: &str) -> Result<TimerStatus> {
1167    match value {
1168        "running" => Ok(TimerStatus::Running),
1169        "paused" => Ok(TimerStatus::Paused),
1170        "stopped" => Ok(TimerStatus::Stopped),
1171        _ => anyhow::bail!("Invalid timer status in database: {}", value),
1172    }
1173}
1174
1175fn i64_to_u32(value: i64, field_name: &str) -> Result<u32> {
1176    u32::try_from(value).context(format!(
1177        "Invalid value in {}: {} (must fit in u32)",
1178        field_name, value
1179    ))
1180}
1181
1182fn i64_to_u64(value: i64, field_name: &str) -> Result<u64> {
1183    u64::try_from(value).context(format!(
1184        "Invalid value in {}: {} (must fit in u64)",
1185        field_name, value
1186    ))
1187}
1188
1189#[cfg(test)]
1190mod tests {
1191    use super::*;
1192    use tempfile::TempDir;
1193
1194    fn create_test_date() -> Date {
1195        Date::from_calendar_date(2025, time::Month::November, 6).unwrap()
1196    }
1197
1198    fn create_test_record(id: u32, name: &str) -> WorkRecord {
1199        let start = TimePoint::new(9, 0).unwrap();
1200        let end = TimePoint::new(17, 0).unwrap();
1201        WorkRecord::new(id, name.to_string(), start, end)
1202    }
1203
1204    fn create_test_timer() -> TimerState {
1205        let now = OffsetDateTime::now_utc();
1206        TimerState {
1207            id: None,
1208            task_name: "Test Timer".to_string(),
1209            description: Some("Timer description".to_string()),
1210            project: Some("Platform".to_string()),
1211            customer: Some("ACME".to_string()),
1212            start_time: now,
1213            end_time: None,
1214            date: now.date(),
1215            status: TimerStatus::Running,
1216            paused_duration_secs: 0,
1217            paused_at: None,
1218            created_at: now,
1219            updated_at: now,
1220            source_record_id: None,
1221            source_record_date: None,
1222        }
1223    }
1224
1225    #[test]
1226    fn test_new_storage_with_temp_dir_initializes_database() {
1227        let temp_dir = TempDir::new().unwrap();
1228        let storage = Storage::new_with_dir(temp_dir.path().to_path_buf()).unwrap();
1229
1230        assert!(temp_dir.path().exists());
1231        assert!(storage.get_db_path().exists());
1232    }
1233
1234    #[test]
1235    fn test_save_and_load_day_data_round_trip() {
1236        let temp_dir = TempDir::new().unwrap();
1237        let storage = Storage::new_with_dir(temp_dir.path().to_path_buf()).unwrap();
1238        let date = create_test_date();
1239
1240        let mut day_data = DayData::new(date);
1241        day_data.add_record(create_test_record(1, "Coding"));
1242        day_data.add_record(create_test_record(2, "Meeting"));
1243
1244        storage.save(&day_data).unwrap();
1245        let loaded = storage.load(&date).unwrap();
1246
1247        assert_eq!(loaded.date, date);
1248        assert_eq!(loaded.last_id, 2);
1249        assert_eq!(loaded.work_records.len(), 2);
1250        assert_eq!(loaded.work_records.get(&1).unwrap().name, "Coding");
1251        assert_eq!(loaded.work_records.get(&2).unwrap().name, "Meeting");
1252    }
1253
1254    #[test]
1255    fn test_get_file_modified_time_uses_revision_token() {
1256        let temp_dir = TempDir::new().unwrap();
1257        let storage = Storage::new_with_dir(temp_dir.path().to_path_buf()).unwrap();
1258        let date = create_test_date();
1259
1260        assert!(storage.get_file_modified_time(&date).is_none());
1261
1262        let mut day_data = DayData::new(date);
1263        day_data.add_record(create_test_record(1, "Task 1"));
1264        storage.save(&day_data).unwrap();
1265        let first = storage.get_file_modified_time(&date);
1266
1267        day_data.add_record(create_test_record(2, "Task 2"));
1268        storage.save(&day_data).unwrap();
1269        let second = storage.get_file_modified_time(&date);
1270
1271        assert!(first.is_some());
1272        assert!(second.is_some());
1273        assert_ne!(first, second);
1274    }
1275
1276    #[test]
1277    fn test_save_load_and_clear_active_timer() {
1278        let temp_dir = TempDir::new().unwrap();
1279        let storage = Storage::new_with_dir(temp_dir.path().to_path_buf()).unwrap();
1280        let timer = create_test_timer();
1281
1282        storage.save_active_timer(&timer).unwrap();
1283        let loaded = storage.load_active_timer().unwrap();
1284        assert!(loaded.is_some());
1285        let loaded_timer = loaded.unwrap();
1286        assert_eq!(loaded_timer.task_name, "Test Timer");
1287        assert_eq!(loaded_timer.project.as_deref(), Some("Platform"));
1288        assert_eq!(loaded_timer.customer.as_deref(), Some("ACME"));
1289
1290        storage.clear_active_timer().unwrap();
1291        assert!(storage.load_active_timer().unwrap().is_none());
1292    }
1293
1294    #[test]
1295    fn test_storage_manager_check_and_reload_detects_external_changes() {
1296        let temp_dir = TempDir::new().unwrap();
1297        let mut manager1 = StorageManager::new_with_dir(temp_dir.path().to_path_buf()).unwrap();
1298        let mut manager2 = StorageManager::new_with_dir(temp_dir.path().to_path_buf()).unwrap();
1299        let date = create_test_date();
1300
1301        manager1.load_with_tracking(date).unwrap();
1302        manager2
1303            .add_record(date, create_test_record(1, "External Change"))
1304            .unwrap();
1305
1306        let reloaded = manager1.check_and_reload(date).unwrap();
1307        assert!(reloaded.is_some());
1308        assert_eq!(reloaded.unwrap().work_records.len(), 1);
1309    }
1310
1311    #[test]
1312    fn test_json_migration_imports_legacy_day_data_and_timer() {
1313        let temp_dir = TempDir::new().unwrap();
1314        let data_dir = temp_dir.path();
1315
1316        let date = create_test_date();
1317        let mut day_data = DayData::new(date);
1318        day_data.add_record(create_test_record(1, "Legacy Task"));
1319
1320        let day_json_path = data_dir.join("2025-11-06.json");
1321        fs::write(
1322            &day_json_path,
1323            serde_json::to_string_pretty(&day_data).unwrap(),
1324        )
1325        .unwrap();
1326
1327        let timer = create_test_timer();
1328        let timer_json_path = data_dir.join(LEGACY_RUNNING_TIMER_FILE_NAME);
1329        fs::write(
1330            &timer_json_path,
1331            serde_json::to_string_pretty(&timer).unwrap(),
1332        )
1333        .unwrap();
1334
1335        let storage = Storage::new_with_dir(data_dir.to_path_buf()).unwrap();
1336
1337        let loaded_day = storage.load(&date).unwrap();
1338        assert_eq!(loaded_day.work_records.len(), 1);
1339        assert_eq!(loaded_day.work_records.get(&1).unwrap().name, "Legacy Task");
1340
1341        let loaded_timer = storage.load_active_timer().unwrap();
1342        assert!(loaded_timer.is_some());
1343        assert_eq!(loaded_timer.unwrap().task_name, "Test Timer");
1344
1345        // Legacy files should be preserved as backup.
1346        assert!(day_json_path.exists());
1347        assert!(timer_json_path.exists());
1348    }
1349
1350    #[test]
1351    fn test_json_migration_is_idempotent() {
1352        let temp_dir = TempDir::new().unwrap();
1353        let data_dir = temp_dir.path();
1354
1355        let date = create_test_date();
1356        let mut day_data = DayData::new(date);
1357        day_data.add_record(create_test_record(1, "Legacy Task"));
1358
1359        let day_json_path = data_dir.join("2025-11-06.json");
1360        fs::write(
1361            &day_json_path,
1362            serde_json::to_string_pretty(&day_data).unwrap(),
1363        )
1364        .unwrap();
1365
1366        let storage1 = Storage::new_with_dir(data_dir.to_path_buf()).unwrap();
1367        let loaded1 = storage1.load(&date).unwrap();
1368        assert_eq!(loaded1.work_records.len(), 1);
1369
1370        let storage2 = Storage::new_with_dir(data_dir.to_path_buf()).unwrap();
1371        let loaded2 = storage2.load(&date).unwrap();
1372        assert_eq!(loaded2.work_records.len(), 1);
1373        assert_eq!(loaded2.work_records.get(&1).unwrap().name, "Legacy Task");
1374    }
1375
1376    #[test]
1377    fn test_json_migration_fails_on_malformed_legacy_day_file() {
1378        let temp_dir = TempDir::new().unwrap();
1379        let data_dir = temp_dir.path();
1380
1381        let malformed_day_path = data_dir.join("2025-11-08.json");
1382        fs::write(&malformed_day_path, "{ invalid json").unwrap();
1383
1384        let result = Storage::new_with_dir(data_dir.to_path_buf());
1385        assert!(result.is_err());
1386        let err = result.err().expect("expected malformed migration to fail");
1387        let err_msg = format!("{err:#}");
1388        let malformed_path_str = malformed_day_path.display().to_string();
1389        assert!(
1390            err_msg.contains(&malformed_path_str),
1391            "error message '{}' did not include malformed file path '{}'",
1392            err_msg,
1393            malformed_path_str
1394        );
1395
1396        let db_path = data_dir.join(DATABASE_FILE_NAME);
1397        let conn = Connection::open(db_path).unwrap();
1398        let day_rows: i64 = conn
1399            .query_row("SELECT COUNT(*) FROM day_meta", [], |row| row.get(0))
1400            .unwrap();
1401        assert_eq!(day_rows, 0);
1402    }
1403
1404    #[test]
1405    fn test_json_migration_prefers_running_timer_when_both_timer_files_exist() {
1406        let temp_dir = TempDir::new().unwrap();
1407        let data_dir = temp_dir.path();
1408
1409        let mut running_timer = create_test_timer();
1410        running_timer.task_name = "running timer file".to_string();
1411        fs::write(
1412            data_dir.join(LEGACY_RUNNING_TIMER_FILE_NAME),
1413            serde_json::to_string_pretty(&running_timer).unwrap(),
1414        )
1415        .unwrap();
1416
1417        let mut active_timer = create_test_timer();
1418        active_timer.task_name = "active timer file".to_string();
1419        fs::write(
1420            data_dir.join(LEGACY_ACTIVE_TIMER_FILE_NAME),
1421            serde_json::to_string_pretty(&active_timer).unwrap(),
1422        )
1423        .unwrap();
1424
1425        let storage = Storage::new_with_dir(data_dir.to_path_buf()).unwrap();
1426        let loaded = storage.load_active_timer().unwrap().unwrap();
1427
1428        assert_eq!(loaded.task_name, "running timer file");
1429    }
1430
1431    #[test]
1432    fn test_storage_diagnostics_reports_expected_values() {
1433        let temp_dir = TempDir::new().unwrap();
1434        let storage = Storage::new_with_dir(temp_dir.path().to_path_buf()).unwrap();
1435
1436        let date = create_test_date();
1437        let mut day_data = DayData::new(date);
1438        day_data.add_record(create_test_record(1, "Task One"));
1439        day_data.add_record(create_test_record(2, "Task Two"));
1440        storage.save(&day_data).unwrap();
1441
1442        fs::write(
1443            temp_dir.path().join("2025-11-09.json"),
1444            serde_json::to_string_pretty(&day_data).unwrap(),
1445        )
1446        .unwrap();
1447
1448        let diagnostics = storage.diagnostics().unwrap();
1449
1450        assert_eq!(diagnostics.migration_marker.as_deref(), Some("done"));
1451        assert_eq!(diagnostics.days_count, 1);
1452        assert_eq!(diagnostics.work_records_count, 2);
1453        assert!(!diagnostics.active_timer_present);
1454        assert_eq!(diagnostics.legacy_day_json_files, 1);
1455        assert_eq!(diagnostics.legacy_timer_json_files, 0);
1456    }
1457
1458    #[test]
1459    fn test_schema_migration_adds_project_and_customer_columns() {
1460        let temp_dir = TempDir::new().unwrap();
1461        let db_path = temp_dir.path().join(DATABASE_FILE_NAME);
1462
1463        {
1464            let conn = Connection::open(&db_path).unwrap();
1465            conn.execute_batch(
1466                "
1467                CREATE TABLE meta (
1468                    key TEXT PRIMARY KEY,
1469                    value TEXT NOT NULL
1470                );
1471
1472                CREATE TABLE day_meta (
1473                    date TEXT PRIMARY KEY,
1474                    last_id INTEGER NOT NULL DEFAULT 0 CHECK (last_id >= 0),
1475                    revision INTEGER NOT NULL DEFAULT 0 CHECK (revision >= 0)
1476                );
1477
1478                CREATE TABLE work_records (
1479                    date TEXT NOT NULL,
1480                    id INTEGER NOT NULL,
1481                    name TEXT NOT NULL,
1482                    start_minutes INTEGER NOT NULL,
1483                    end_minutes INTEGER NOT NULL,
1484                    total_minutes INTEGER NOT NULL,
1485                    description TEXT NOT NULL DEFAULT '',
1486                    PRIMARY KEY (date, id),
1487                    FOREIGN KEY (date) REFERENCES day_meta(date) ON DELETE CASCADE
1488                );
1489
1490                CREATE TABLE active_timer (
1491                    singleton_id INTEGER PRIMARY KEY CHECK (singleton_id = 1),
1492                    id INTEGER,
1493                    task_name TEXT NOT NULL,
1494                    description TEXT,
1495                    start_time TEXT NOT NULL,
1496                    end_time TEXT,
1497                    date TEXT NOT NULL,
1498                    status TEXT NOT NULL CHECK (status IN ('running', 'paused', 'stopped')),
1499                    paused_duration_secs INTEGER NOT NULL CHECK (paused_duration_secs >= 0),
1500                    paused_at TEXT,
1501                    created_at TEXT NOT NULL,
1502                    updated_at TEXT NOT NULL,
1503                    source_record_id INTEGER,
1504                    source_record_date TEXT
1505                );
1506                ",
1507            )
1508            .unwrap();
1509        }
1510
1511        let _storage = Storage::new_with_dir(temp_dir.path().to_path_buf()).unwrap();
1512        let conn = Connection::open(db_path).unwrap();
1513
1514        let work_records_has_project: i64 = conn
1515            .query_row(
1516                "SELECT COUNT(*) FROM pragma_table_info('work_records') WHERE name = 'project'",
1517                [],
1518                |row| row.get(0),
1519            )
1520            .unwrap();
1521        let work_records_has_customer: i64 = conn
1522            .query_row(
1523                "SELECT COUNT(*) FROM pragma_table_info('work_records') WHERE name = 'customer'",
1524                [],
1525                |row| row.get(0),
1526            )
1527            .unwrap();
1528        let active_timer_has_project: i64 = conn
1529            .query_row(
1530                "SELECT COUNT(*) FROM pragma_table_info('active_timer') WHERE name = 'project'",
1531                [],
1532                |row| row.get(0),
1533            )
1534            .unwrap();
1535        let active_timer_has_customer: i64 = conn
1536            .query_row(
1537                "SELECT COUNT(*) FROM pragma_table_info('active_timer') WHERE name = 'customer'",
1538                [],
1539                |row| row.get(0),
1540            )
1541            .unwrap();
1542
1543        assert_eq!(work_records_has_project, 1);
1544        assert_eq!(work_records_has_customer, 1);
1545        assert_eq!(active_timer_has_project, 1);
1546        assert_eq!(active_timer_has_customer, 1);
1547    }
1548}