Skip to main content

intent_engine/db/
mod.rs

1pub mod models;
2
3use crate::error::Result;
4use sqlx::sqlite::{SqliteConnectOptions, SqlitePool, SqlitePoolOptions};
5use std::path::Path;
6
7pub async fn create_pool(db_path: &Path) -> Result<SqlitePool> {
8    let options = SqliteConnectOptions::new()
9        .filename(db_path)
10        .create_if_missing(true)
11        .journal_mode(sqlx::sqlite::SqliteJournalMode::Wal)
12        .busy_timeout(std::time::Duration::from_millis(5000))
13        // Enforce FK constraints on every connection. Without this SQLite silently
14        // ignores all FOREIGN KEY declarations (CASCADE, RESTRICT, etc.).
15        .pragma("foreign_keys", "ON");
16
17    let pool = SqlitePoolOptions::new()
18        .max_connections(5)
19        .connect_with(options)
20        .await?;
21
22    Ok(pool)
23}
24
25// ---------------------------------------------------------------------------
26// Version helpers
27// ---------------------------------------------------------------------------
28
29/// Parse a semver string like "0.13.0" into a comparable (major, minor, patch) tuple.
30/// String comparison is incorrect for version numbers (e.g. "0.9.0" > "0.14.0" as strings).
31fn parse_version(v: &str) -> (u32, u32, u32) {
32    let mut parts = v.splitn(3, '.');
33    let major = parts.next().and_then(|s| s.parse().ok()).unwrap_or(0);
34    let minor = parts.next().and_then(|s| s.parse().ok()).unwrap_or(0);
35    let patch = parts.next().and_then(|s| s.parse().ok()).unwrap_or(0);
36    (major, minor, patch)
37}
38
39/// Check whether a table exists in the database.
40async fn table_exists(pool: &SqlitePool, name: &str) -> bool {
41    let count: i64 =
42        sqlx::query_scalar("SELECT COUNT(*) FROM sqlite_schema WHERE type='table' AND name=?")
43            .bind(name)
44            .fetch_one(pool)
45            .await
46            .unwrap_or(0);
47    count > 0
48}
49
50/// Check whether a column exists in a table.
51#[cfg(test)]
52async fn column_exists(pool: &SqlitePool, table: &str, column: &str) -> bool {
53    let count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM pragma_table_info(?) WHERE name=?")
54        .bind(table)
55        .bind(column)
56        .fetch_one(pool)
57        .await
58        .unwrap_or(0);
59    count > 0
60}
61
62/// Write `schema_version` into `workspace_state`.
63async fn set_schema_version(pool: &SqlitePool, version: &str) -> Result<()> {
64    sqlx::query(
65        "INSERT INTO workspace_state (key, value) VALUES ('schema_version', ?) \
66         ON CONFLICT(key) DO UPDATE SET value = excluded.value",
67    )
68    .bind(version)
69    .execute(pool)
70    .await?;
71    Ok(())
72}
73
74/// Detect the current schema version of the database.
75///
76/// Detection order (most-specific first):
77/// 1. If `workspace_state.schema_version` exists → parse and return it.
78/// 2. If the `tasks` table does not exist → fresh database `(0,0,0)`.
79/// 3. Otherwise → pre-version database; return `(0,1,0)` so that every
80///    incremental upgrade function runs. Each upgrade is idempotent
81///    (`IF NOT EXISTS`, ignored `ALTER TABLE` errors, explicit `RESTRICT`
82///    check in v0.14.0), so running them on a database that is already at a
83///    newer state is safe. Column-based probing that returns an intermediate
84///    version is NOT used here: a non-standard schema (e.g. a hand-added
85///    `deleted_at`) could cause earlier, essential upgrades to be skipped.
86async fn detect_schema_version(pool: &SqlitePool) -> Result<(u32, u32, u32)> {
87    // 1. Authoritative version stored in workspace_state
88    if table_exists(pool, "workspace_state").await {
89        let stored: Option<String> =
90            sqlx::query_scalar("SELECT value FROM workspace_state WHERE key = 'schema_version'")
91                .fetch_optional(pool)
92                .await
93                .unwrap_or(None);
94
95        if let Some(v) = stored {
96            return Ok(parse_version(&v));
97        }
98    }
99
100    // 2. No tasks table → brand-new database
101    if !table_exists(pool, "tasks").await {
102        return Ok((0, 0, 0));
103    }
104
105    // 3. Pre-version database: tasks table exists but no schema_version.
106    // Return the lowest non-fresh version so all upgrade functions run.
107    Ok((0, 1, 0))
108}
109
110// ---------------------------------------------------------------------------
111// Public entry point
112// ---------------------------------------------------------------------------
113
114pub async fn run_migrations(pool: &SqlitePool) -> Result<()> {
115    // WAL mode — always safe to set, even if already set
116    sqlx::query("PRAGMA journal_mode=WAL").execute(pool).await?;
117
118    let version = detect_schema_version(pool).await?;
119
120    if version == (0, 0, 0) {
121        // Brand-new database: build the full current schema in one shot
122        migrate_fresh(pool).await?;
123        return Ok(());
124    }
125
126    // Incremental upgrades for existing databases — each gate is independent so
127    // that a partially-upgraded database can resume from wherever it stopped.
128    if version < (0, 9, 0) {
129        upgrade_to_v0_9_0(pool).await?;
130    }
131    if version < (0, 11, 0) {
132        upgrade_to_v0_11_0(pool).await?;
133    }
134    if version < (0, 12, 0) {
135        upgrade_to_v0_12_0(pool).await?;
136    }
137    if version < (0, 13, 0) {
138        upgrade_to_v0_13_0(pool).await?;
139    }
140    if version < (0, 14, 0) {
141        upgrade_to_v0_14_0(pool).await?;
142    }
143    if version < (0, 15, 0) {
144        upgrade_to_v0_15_0(pool).await?;
145    }
146
147    Ok(())
148}
149
150// ---------------------------------------------------------------------------
151// Fresh install — full current schema
152// ---------------------------------------------------------------------------
153
154async fn migrate_fresh(pool: &SqlitePool) -> Result<()> {
155    // ── tasks ──────────────────────────────────────────────────────────────
156    sqlx::query(
157        r#"
158        CREATE TABLE IF NOT EXISTS tasks (
159            id INTEGER PRIMARY KEY AUTOINCREMENT,
160            parent_id INTEGER,
161            name TEXT NOT NULL,
162            spec TEXT,
163            status TEXT NOT NULL DEFAULT 'todo',
164            complexity INTEGER,
165            priority INTEGER DEFAULT 0,
166            first_todo_at DATETIME,
167            first_doing_at DATETIME,
168            first_done_at DATETIME,
169            active_form TEXT,
170            owner TEXT NOT NULL DEFAULT 'human',
171            metadata TEXT DEFAULT '{}',
172            deleted_at DATETIME,
173            FOREIGN KEY (parent_id) REFERENCES tasks(id) ON DELETE CASCADE,
174            CHECK (status IN ('todo', 'doing', 'done')),
175            CHECK (owner IS NOT NULL AND owner != '')
176        )
177        "#,
178    )
179    .execute(pool)
180    .await?;
181
182    // ── tasks_fts (trigram tokenizer for CJK support) ───────────────────
183    sqlx::query(
184        r#"
185        CREATE VIRTUAL TABLE IF NOT EXISTS tasks_fts USING fts5(
186            name,
187            spec,
188            content=tasks,
189            content_rowid=id,
190            tokenize='trigram'
191        )
192        "#,
193    )
194    .execute(pool)
195    .await?;
196
197    // ── tasks triggers ───────────────────────────────────────────────────
198    sqlx::query(
199        r#"
200        CREATE TRIGGER IF NOT EXISTS tasks_ai AFTER INSERT ON tasks BEGIN
201            INSERT INTO tasks_fts(rowid, name, spec) VALUES (new.id, new.name, new.spec);
202        END
203        "#,
204    )
205    .execute(pool)
206    .await?;
207
208    sqlx::query(
209        r#"
210        CREATE TRIGGER IF NOT EXISTS tasks_ad AFTER DELETE ON tasks BEGIN
211            DELETE FROM tasks_fts WHERE rowid = old.id;
212        END
213        "#,
214    )
215    .execute(pool)
216    .await?;
217
218    // For active tasks: keep FTS in sync on active→active updates.
219    // Both old and new WHEN conditions are required so the trigger fires only
220    // when the row remains active. Using only `new.deleted_at IS NULL` would
221    // also fire on a restore (deleted→active), where the FTS entry no longer
222    // exists — attempting to 'delete' a non-existent FTS5 entry corrupts the
223    // index (SQLITE_CORRUPT, code 267).
224    sqlx::query(
225        r#"
226        CREATE TRIGGER IF NOT EXISTS tasks_au_active
227        AFTER UPDATE ON tasks WHEN old.deleted_at IS NULL AND new.deleted_at IS NULL BEGIN
228            INSERT INTO tasks_fts(tasks_fts, rowid, name, spec)
229                VALUES('delete', old.id, old.name, old.spec);
230            INSERT INTO tasks_fts(rowid, name, spec) VALUES (new.id, new.name, new.spec);
231        END
232        "#,
233    )
234    .execute(pool)
235    .await?;
236
237    // For soft-deleted tasks: remove from FTS on the active→deleted transition.
238    // WHEN clause requires old.deleted_at IS NULL so the trigger fires only once
239    // (on the transition), not on every subsequent update to an already-deleted
240    // task. Re-firing on an already-deleted row would attempt to remove a
241    // non-existent FTS entry, which corrupts the FTS5 index.
242    sqlx::query(
243        r#"
244        CREATE TRIGGER IF NOT EXISTS tasks_au_softdelete
245        AFTER UPDATE ON tasks WHEN old.deleted_at IS NULL AND new.deleted_at IS NOT NULL BEGIN
246            INSERT INTO tasks_fts(tasks_fts, rowid, name, spec)
247                VALUES('delete', old.id, old.name, old.spec);
248        END
249        "#,
250    )
251    .execute(pool)
252    .await?;
253
254    // ── events ────────────────────────────────────────────────────────────
255    // ON DELETE RESTRICT: events are an immutable audit log.
256    sqlx::query(
257        r#"
258        CREATE TABLE IF NOT EXISTS events (
259            id INTEGER PRIMARY KEY AUTOINCREMENT,
260            task_id INTEGER NOT NULL,
261            timestamp DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
262            log_type TEXT NOT NULL,
263            discussion_data TEXT NOT NULL,
264            FOREIGN KEY (task_id) REFERENCES tasks(id) ON DELETE RESTRICT
265        )
266        "#,
267    )
268    .execute(pool)
269    .await?;
270
271    sqlx::query("CREATE INDEX IF NOT EXISTS idx_events_task_id ON events(task_id)")
272        .execute(pool)
273        .await?;
274
275    sqlx::query(
276        "CREATE INDEX IF NOT EXISTS idx_events_task_type_time \
277         ON events(task_id, log_type, timestamp)",
278    )
279    .execute(pool)
280    .await?;
281
282    // ── events_fts ────────────────────────────────────────────────────────
283    sqlx::query(
284        r#"
285        CREATE VIRTUAL TABLE IF NOT EXISTS events_fts USING fts5(
286            discussion_data,
287            content=events,
288            content_rowid=id
289        )
290        "#,
291    )
292    .execute(pool)
293    .await?;
294
295    sqlx::query(
296        r#"
297        CREATE TRIGGER IF NOT EXISTS events_ai AFTER INSERT ON events BEGIN
298            INSERT INTO events_fts(rowid, discussion_data) VALUES (new.id, new.discussion_data);
299        END
300        "#,
301    )
302    .execute(pool)
303    .await?;
304
305    sqlx::query(
306        r#"
307        CREATE TRIGGER IF NOT EXISTS events_ad AFTER DELETE ON events BEGIN
308            DELETE FROM events_fts WHERE rowid = old.id;
309        END
310        "#,
311    )
312    .execute(pool)
313    .await?;
314
315    sqlx::query(
316        r#"
317        CREATE TRIGGER IF NOT EXISTS events_au AFTER UPDATE ON events BEGIN
318            INSERT INTO events_fts(events_fts, rowid, discussion_data)
319                VALUES('delete', old.id, old.discussion_data);
320            INSERT INTO events_fts(rowid, discussion_data)
321                VALUES (new.id, new.discussion_data);
322        END
323        "#,
324    )
325    .execute(pool)
326    .await?;
327
328    // ── workspace_state ───────────────────────────────────────────────────
329    sqlx::query(
330        r#"
331        CREATE TABLE IF NOT EXISTS workspace_state (
332            key TEXT PRIMARY KEY,
333            value TEXT NOT NULL
334        )
335        "#,
336    )
337    .execute(pool)
338    .await?;
339
340    // ── sessions ──────────────────────────────────────────────────────────
341    sqlx::query(
342        r#"
343        CREATE TABLE IF NOT EXISTS sessions (
344            session_id TEXT PRIMARY KEY,
345            current_task_id INTEGER,
346            created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
347            last_active_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
348            FOREIGN KEY (current_task_id) REFERENCES tasks(id) ON DELETE SET NULL
349        )
350        "#,
351    )
352    .execute(pool)
353    .await?;
354
355    sqlx::query("CREATE INDEX IF NOT EXISTS idx_sessions_last_active ON sessions(last_active_at)")
356        .execute(pool)
357        .await?;
358
359    // ── suggestions ───────────────────────────────────────────────────────
360    sqlx::query(
361        r#"
362        CREATE TABLE IF NOT EXISTS suggestions (
363            id INTEGER PRIMARY KEY AUTOINCREMENT,
364            type TEXT NOT NULL,
365            content TEXT NOT NULL,
366            created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
367            dismissed BOOLEAN NOT NULL DEFAULT 0,
368            CHECK (type IN ('task_structure', 'event_synthesis', 'error'))
369        )
370        "#,
371    )
372    .execute(pool)
373    .await?;
374
375    sqlx::query(
376        "CREATE INDEX IF NOT EXISTS idx_suggestions_active \
377         ON suggestions(dismissed, created_at) WHERE dismissed = 0",
378    )
379    .execute(pool)
380    .await?;
381
382    // ── dependencies ──────────────────────────────────────────────────────
383    sqlx::query(
384        r#"
385        CREATE TABLE IF NOT EXISTS dependencies (
386            id INTEGER PRIMARY KEY AUTOINCREMENT,
387            blocking_task_id INTEGER NOT NULL,
388            blocked_task_id INTEGER NOT NULL,
389            created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
390            FOREIGN KEY (blocking_task_id) REFERENCES tasks(id) ON DELETE CASCADE,
391            FOREIGN KEY (blocked_task_id) REFERENCES tasks(id) ON DELETE CASCADE,
392            UNIQUE(blocking_task_id, blocked_task_id),
393            CHECK(blocking_task_id != blocked_task_id)
394        )
395        "#,
396    )
397    .execute(pool)
398    .await?;
399
400    sqlx::query(
401        "CREATE INDEX IF NOT EXISTS idx_dependencies_blocking ON dependencies(blocking_task_id)",
402    )
403    .execute(pool)
404    .await?;
405
406    sqlx::query(
407        "CREATE INDEX IF NOT EXISTS idx_dependencies_blocked ON dependencies(blocked_task_id)",
408    )
409    .execute(pool)
410    .await?;
411
412    // ── tasks partial indexes (active rows only) ──────────────────────────
413    sqlx::query(
414        "CREATE INDEX IF NOT EXISTS idx_tasks_status_parent_priority \
415         ON tasks(status, parent_id, priority, id) WHERE deleted_at IS NULL",
416    )
417    .execute(pool)
418    .await?;
419
420    sqlx::query(
421        "CREATE INDEX IF NOT EXISTS idx_tasks_priority_complexity \
422         ON tasks(priority, complexity, id) WHERE deleted_at IS NULL",
423    )
424    .execute(pool)
425    .await?;
426
427    sqlx::query(
428        "CREATE INDEX IF NOT EXISTS idx_tasks_doing_at \
429         ON tasks(first_doing_at) WHERE status = 'doing' AND deleted_at IS NULL",
430    )
431    .execute(pool)
432    .await?;
433
434    set_schema_version(pool, "0.15.0").await?;
435    Ok(())
436}
437
438// ---------------------------------------------------------------------------
439// Incremental upgrade: pre-0.9.0 → 0.9.0
440// ---------------------------------------------------------------------------
441
442async fn upgrade_to_v0_9_0(pool: &SqlitePool) -> Result<()> {
443    // Ensure tasks_fts uses the trigram tokenizer.
444    // Old databases may have been created with the default tokenizer; we must
445    // drop and recreate to switch tokenizers.
446    let uses_trigram: i64 = sqlx::query_scalar(
447        "SELECT COUNT(*) FROM sqlite_schema \
448         WHERE name='tasks_fts' AND type='table' AND sql LIKE '%trigram%'",
449    )
450    .fetch_one(pool)
451    .await
452    .unwrap_or(0);
453
454    if uses_trigram == 0 {
455        let _ = sqlx::query("DROP TABLE IF EXISTS tasks_fts")
456            .execute(pool)
457            .await;
458
459        sqlx::query(
460            r#"
461            CREATE VIRTUAL TABLE tasks_fts USING fts5(
462                name,
463                spec,
464                content=tasks,
465                content_rowid=id,
466                tokenize='trigram'
467            )
468            "#,
469        )
470        .execute(pool)
471        .await?;
472
473        // Rebuild FTS index from existing active rows.
474        // At this schema stage deleted_at does not exist yet, so all rows are active.
475        sqlx::query("INSERT INTO tasks_fts(rowid, name, spec) SELECT id, name, spec FROM tasks")
476            .execute(pool)
477            .await?;
478    }
479
480    // Add owner column (idempotent: ignore error if column already exists)
481    let _ = sqlx::query("ALTER TABLE tasks ADD COLUMN owner TEXT NOT NULL DEFAULT 'human'")
482        .execute(pool)
483        .await;
484
485    // Ensure workspace_state exists (needed for set_schema_version)
486    sqlx::query(
487        r#"
488        CREATE TABLE IF NOT EXISTS workspace_state (
489            key TEXT PRIMARY KEY,
490            value TEXT NOT NULL
491        )
492        "#,
493    )
494    .execute(pool)
495    .await?;
496
497    set_schema_version(pool, "0.9.0").await?;
498    Ok(())
499}
500
501// ---------------------------------------------------------------------------
502// Incremental upgrade: 0.9.x → 0.11.0
503// ---------------------------------------------------------------------------
504
505async fn upgrade_to_v0_11_0(pool: &SqlitePool) -> Result<()> {
506    // sessions table
507    sqlx::query(
508        r#"
509        CREATE TABLE IF NOT EXISTS sessions (
510            session_id TEXT PRIMARY KEY,
511            current_task_id INTEGER,
512            created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
513            last_active_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
514            FOREIGN KEY (current_task_id) REFERENCES tasks(id) ON DELETE SET NULL
515        )
516        "#,
517    )
518    .execute(pool)
519    .await?;
520
521    sqlx::query("CREATE INDEX IF NOT EXISTS idx_sessions_last_active ON sessions(last_active_at)")
522        .execute(pool)
523        .await?;
524
525    // suggestions table
526    sqlx::query(
527        r#"
528        CREATE TABLE IF NOT EXISTS suggestions (
529            id INTEGER PRIMARY KEY AUTOINCREMENT,
530            type TEXT NOT NULL,
531            content TEXT NOT NULL,
532            created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
533            dismissed BOOLEAN NOT NULL DEFAULT 0,
534            CHECK (type IN ('task_structure', 'event_synthesis', 'error'))
535        )
536        "#,
537    )
538    .execute(pool)
539    .await?;
540
541    sqlx::query(
542        "CREATE INDEX IF NOT EXISTS idx_suggestions_active \
543         ON suggestions(dismissed, created_at) WHERE dismissed = 0",
544    )
545    .execute(pool)
546    .await?;
547
548    // dependencies table
549    sqlx::query(
550        r#"
551        CREATE TABLE IF NOT EXISTS dependencies (
552            id INTEGER PRIMARY KEY AUTOINCREMENT,
553            blocking_task_id INTEGER NOT NULL,
554            blocked_task_id INTEGER NOT NULL,
555            created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
556            FOREIGN KEY (blocking_task_id) REFERENCES tasks(id) ON DELETE CASCADE,
557            FOREIGN KEY (blocked_task_id) REFERENCES tasks(id) ON DELETE CASCADE,
558            UNIQUE(blocking_task_id, blocked_task_id),
559            CHECK(blocking_task_id != blocked_task_id)
560        )
561        "#,
562    )
563    .execute(pool)
564    .await?;
565
566    sqlx::query(
567        "CREATE INDEX IF NOT EXISTS idx_dependencies_blocking ON dependencies(blocking_task_id)",
568    )
569    .execute(pool)
570    .await?;
571
572    sqlx::query(
573        "CREATE INDEX IF NOT EXISTS idx_dependencies_blocked ON dependencies(blocked_task_id)",
574    )
575    .execute(pool)
576    .await?;
577
578    // Composite event index
579    sqlx::query(
580        "CREATE INDEX IF NOT EXISTS idx_events_task_type_time \
581         ON events(task_id, log_type, timestamp)",
582    )
583    .execute(pool)
584    .await?;
585
586    // Task sorting indexes (non-partial at this version; upgraded later in v0.14.0)
587    sqlx::query(
588        "CREATE INDEX IF NOT EXISTS idx_tasks_status_parent_priority \
589         ON tasks(status, parent_id, priority, id)",
590    )
591    .execute(pool)
592    .await?;
593
594    sqlx::query(
595        "CREATE INDEX IF NOT EXISTS idx_tasks_priority_complexity \
596         ON tasks(priority, complexity, id)",
597    )
598    .execute(pool)
599    .await?;
600
601    sqlx::query(
602        "CREATE INDEX IF NOT EXISTS idx_tasks_doing_at \
603         ON tasks(first_doing_at) WHERE status = 'doing'",
604    )
605    .execute(pool)
606    .await?;
607
608    // Migrate legacy current_task_id from workspace_state into the default session
609    sqlx::query(
610        r#"
611        INSERT OR IGNORE INTO sessions (session_id, current_task_id, created_at, last_active_at)
612        SELECT '-1', CAST(value AS INTEGER), datetime('now'), datetime('now')
613        FROM workspace_state
614        WHERE key = 'current_task_id' AND value IS NOT NULL AND value != ''
615        "#,
616    )
617    .execute(pool)
618    .await?;
619
620    set_schema_version(pool, "0.11.0").await?;
621    Ok(())
622}
623
624// ---------------------------------------------------------------------------
625// Incremental upgrade: 0.11.x → 0.12.0
626// ---------------------------------------------------------------------------
627
628async fn upgrade_to_v0_12_0(pool: &SqlitePool) -> Result<()> {
629    // Add metadata column (idempotent: ignore error if column already exists)
630    let _ = sqlx::query("ALTER TABLE tasks ADD COLUMN metadata TEXT DEFAULT '{}'")
631        .execute(pool)
632        .await;
633
634    set_schema_version(pool, "0.12.0").await?;
635    Ok(())
636}
637
638// ---------------------------------------------------------------------------
639// Incremental upgrade: 0.12.x → 0.13.0
640// ---------------------------------------------------------------------------
641
642async fn upgrade_to_v0_13_0(pool: &SqlitePool) -> Result<()> {
643    // Add deleted_at column. We capture whether it was actually *newly* added:
644    // if the column already existed (idempotent re-run), the FTS index is
645    // already correct and a full delete-all + re-insert would be wasteful
646    // (and hazardous on large databases if interrupted).
647    let column_added = sqlx::query("ALTER TABLE tasks ADD COLUMN deleted_at DATETIME")
648        .execute(pool)
649        .await
650        .is_ok();
651
652    // Replace any existing tasks_au* triggers with the split WHEN-gated versions
653    // that correctly handle the soft-delete semantics introduced by deleted_at.
654    let _ = sqlx::query("DROP TRIGGER IF EXISTS tasks_au")
655        .execute(pool)
656        .await;
657    let _ = sqlx::query("DROP TRIGGER IF EXISTS tasks_au_active")
658        .execute(pool)
659        .await;
660    let _ = sqlx::query("DROP TRIGGER IF EXISTS tasks_au_softdelete")
661        .execute(pool)
662        .await;
663
664    sqlx::query(
665        r#"
666        CREATE TRIGGER IF NOT EXISTS tasks_au_active
667        AFTER UPDATE ON tasks WHEN old.deleted_at IS NULL AND new.deleted_at IS NULL BEGIN
668            INSERT INTO tasks_fts(tasks_fts, rowid, name, spec)
669                VALUES('delete', old.id, old.name, old.spec);
670            INSERT INTO tasks_fts(rowid, name, spec) VALUES (new.id, new.name, new.spec);
671        END
672        "#,
673    )
674    .execute(pool)
675    .await?;
676
677    sqlx::query(
678        r#"
679        CREATE TRIGGER IF NOT EXISTS tasks_au_softdelete
680        AFTER UPDATE ON tasks WHEN old.deleted_at IS NULL AND new.deleted_at IS NOT NULL BEGIN
681            INSERT INTO tasks_fts(tasks_fts, rowid, name, spec)
682                VALUES('delete', old.id, old.name, old.spec);
683        END
684        "#,
685    )
686    .execute(pool)
687    .await?;
688
689    // Rebuild FTS only when deleted_at was newly added. If the column already
690    // existed, the FTS index is consistent and there is nothing to fix.
691    if column_added {
692        let has_fts_data: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM tasks_fts")
693            .fetch_one(pool)
694            .await
695            .unwrap_or(0);
696
697        if has_fts_data > 0 {
698            sqlx::query("INSERT INTO tasks_fts(tasks_fts) VALUES('delete-all')")
699                .execute(pool)
700                .await?;
701        }
702
703        sqlx::query(
704            "INSERT INTO tasks_fts(rowid, name, spec) \
705             SELECT id, name, spec FROM tasks WHERE deleted_at IS NULL",
706        )
707        .execute(pool)
708        .await?;
709    }
710
711    set_schema_version(pool, "0.13.0").await?;
712    Ok(())
713}
714
715// ---------------------------------------------------------------------------
716// Incremental upgrade: 0.13.x → 0.14.0
717// ---------------------------------------------------------------------------
718
719async fn upgrade_to_v0_14_0(pool: &SqlitePool) -> Result<()> {
720    // ── Rebuild tasks indexes as partial indexes (WHERE deleted_at IS NULL) ──
721    // Idempotent: DROP IF EXISTS + CREATE IF NOT EXISTS makes this safe to re-run.
722    // (Pre-version databases are all treated as (0,1,0) so this function may run
723    // on databases that already have partial indexes; DROP+CREATE handles that.)
724    let _ = sqlx::query("DROP INDEX IF EXISTS idx_tasks_status_parent_priority")
725        .execute(pool)
726        .await;
727    sqlx::query(
728        "CREATE INDEX IF NOT EXISTS idx_tasks_status_parent_priority \
729         ON tasks(status, parent_id, priority, id) WHERE deleted_at IS NULL",
730    )
731    .execute(pool)
732    .await?;
733
734    let _ = sqlx::query("DROP INDEX IF EXISTS idx_tasks_priority_complexity")
735        .execute(pool)
736        .await;
737    sqlx::query(
738        "CREATE INDEX IF NOT EXISTS idx_tasks_priority_complexity \
739         ON tasks(priority, complexity, id) WHERE deleted_at IS NULL",
740    )
741    .execute(pool)
742    .await?;
743
744    let _ = sqlx::query("DROP INDEX IF EXISTS idx_tasks_doing_at")
745        .execute(pool)
746        .await;
747    sqlx::query(
748        "CREATE INDEX IF NOT EXISTS idx_tasks_doing_at \
749         ON tasks(first_doing_at) WHERE status = 'doing' AND deleted_at IS NULL",
750    )
751    .execute(pool)
752    .await?;
753
754    // ── Recreate events table with ON DELETE RESTRICT ─────────────────────
755    // SQLite cannot ALTER a FK constraint in-place; we recreate the table.
756    //
757    // Gate: skip if events already declares RESTRICT (idempotent re-run).
758    let needs_restrict: i64 = sqlx::query_scalar(
759        "SELECT COUNT(*) FROM sqlite_schema \
760         WHERE type='table' AND name='events' AND sql NOT LIKE '%ON DELETE RESTRICT%'",
761    )
762    .fetch_one(pool)
763    .await
764    .unwrap_or(0);
765
766    if needs_restrict > 0 {
767        // All DDL steps MUST run inside a single explicit transaction.
768        // Without this, a crash mid-reconstruction leaves the database in an
769        // inconsistent state (events_new exists, events gone).
770        // pool.begin() pins us to one physical connection for the duration.
771        let mut tx = pool.begin().await?;
772
773        // Drop any leftover events_new from a previous interrupted upgrade.
774        sqlx::query("DROP TABLE IF EXISTS events_new")
775            .execute(&mut *tx)
776            .await?;
777
778        sqlx::query(
779            r#"
780            CREATE TABLE events_new (
781                id INTEGER PRIMARY KEY AUTOINCREMENT,
782                task_id INTEGER NOT NULL,
783                timestamp DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
784                log_type TEXT NOT NULL,
785                discussion_data TEXT NOT NULL,
786                FOREIGN KEY (task_id) REFERENCES tasks(id) ON DELETE RESTRICT
787            )
788            "#,
789        )
790        .execute(&mut *tx)
791        .await?;
792
793        sqlx::query("INSERT INTO events_new SELECT * FROM events")
794            .execute(&mut *tx)
795            .await?;
796
797        // DROP old table — also drops events_ai, events_ad, events_au triggers
798        sqlx::query("DROP TABLE events").execute(&mut *tx).await?;
799        sqlx::query("ALTER TABLE events_new RENAME TO events")
800            .execute(&mut *tx)
801            .await?;
802
803        // Recreate indexes (dropped with the old table)
804        sqlx::query("CREATE INDEX IF NOT EXISTS idx_events_task_id ON events(task_id)")
805            .execute(&mut *tx)
806            .await?;
807
808        sqlx::query(
809            "CREATE INDEX IF NOT EXISTS idx_events_task_type_time \
810             ON events(task_id, log_type, timestamp)",
811        )
812        .execute(&mut *tx)
813        .await?;
814
815        // Recreate FTS sync triggers (dropped with the old table)
816        sqlx::query(
817            r#"
818            CREATE TRIGGER IF NOT EXISTS events_ai AFTER INSERT ON events BEGIN
819                INSERT INTO events_fts(rowid, discussion_data)
820                    VALUES (new.id, new.discussion_data);
821            END
822            "#,
823        )
824        .execute(&mut *tx)
825        .await?;
826
827        sqlx::query(
828            r#"
829            CREATE TRIGGER IF NOT EXISTS events_ad AFTER DELETE ON events BEGIN
830                DELETE FROM events_fts WHERE rowid = old.id;
831            END
832            "#,
833        )
834        .execute(&mut *tx)
835        .await?;
836
837        sqlx::query(
838            r#"
839            CREATE TRIGGER IF NOT EXISTS events_au AFTER UPDATE ON events BEGIN
840                INSERT INTO events_fts(events_fts, rowid, discussion_data)
841                    VALUES('delete', old.id, old.discussion_data);
842                INSERT INTO events_fts(rowid, discussion_data)
843                    VALUES (new.id, new.discussion_data);
844            END
845            "#,
846        )
847        .execute(&mut *tx)
848        .await?;
849
850        tx.commit().await?;
851    }
852
853    set_schema_version(pool, "0.14.0").await?;
854    Ok(())
855}
856
857// ---------------------------------------------------------------------------
858// Incremental upgrade: 0.14.x → 0.15.0
859// ---------------------------------------------------------------------------
860
861async fn upgrade_to_v0_15_0(pool: &SqlitePool) -> Result<()> {
862    // Fix tasks_au_active trigger: the original WHEN clause only checked
863    // `new.deleted_at IS NULL`, which would fire on restore operations
864    // (deleted→active), causing an attempt to remove a non-existent FTS5 entry
865    // and corrupting the index.  Narrow to active→active transitions only.
866    //
867    // tasks_au_softdelete is correct as-is — its WHEN clause
868    // (old.deleted_at IS NULL AND new.deleted_at IS NOT NULL) already narrows
869    // to active→deleted only and does not need to be touched.
870    let _ = sqlx::query("DROP TRIGGER IF EXISTS tasks_au_active")
871        .execute(pool)
872        .await;
873
874    sqlx::query(
875        r#"CREATE TRIGGER IF NOT EXISTS tasks_au_active
876AFTER UPDATE ON tasks WHEN old.deleted_at IS NULL AND new.deleted_at IS NULL BEGIN
877    INSERT INTO tasks_fts(tasks_fts, rowid, name, spec)
878        VALUES('delete', old.id, old.name, old.spec);
879    INSERT INTO tasks_fts(rowid, name, spec) VALUES (new.id, new.name, new.spec);
880END"#,
881    )
882    .execute(pool)
883    .await
884    .map_err(crate::error::IntentError::DatabaseError)?;
885
886    set_schema_version(pool, "0.15.0").await?;
887    Ok(())
888}
889
890// ---------------------------------------------------------------------------
891// Tests
892// ---------------------------------------------------------------------------
893
894#[cfg(test)]
895mod tests {
896    use super::*;
897    use tempfile::TempDir;
898
899    #[tokio::test]
900    async fn test_create_pool_success() {
901        let temp_dir = TempDir::new().unwrap();
902        let db_path = temp_dir.path().join("test.db");
903
904        let pool = create_pool(&db_path).await.unwrap();
905
906        // Verify we can execute a query
907        let result: i64 = sqlx::query_scalar("SELECT 1")
908            .fetch_one(&pool)
909            .await
910            .unwrap();
911
912        assert_eq!(result, 1);
913    }
914
915    #[tokio::test]
916    async fn test_run_migrations_creates_tables() {
917        let temp_dir = TempDir::new().unwrap();
918        let db_path = temp_dir.path().join("test.db");
919        let pool = create_pool(&db_path).await.unwrap();
920
921        run_migrations(&pool).await.unwrap();
922
923        // Verify tables were created
924        let tables: Vec<String> =
925            sqlx::query_scalar("SELECT name FROM sqlite_master WHERE type='table' ORDER BY name")
926                .fetch_all(&pool)
927                .await
928                .unwrap();
929
930        assert!(tables.contains(&"tasks".to_string()));
931        assert!(tables.contains(&"events".to_string()));
932        assert!(tables.contains(&"workspace_state".to_string()));
933    }
934
935    #[tokio::test]
936    async fn test_run_migrations_creates_fts_tables() {
937        let temp_dir = TempDir::new().unwrap();
938        let db_path = temp_dir.path().join("test.db");
939        let pool = create_pool(&db_path).await.unwrap();
940
941        run_migrations(&pool).await.unwrap();
942
943        // Verify FTS tables were created
944        let tables: Vec<String> = sqlx::query_scalar(
945            "SELECT name FROM sqlite_master WHERE type='table' AND name LIKE '%_fts'",
946        )
947        .fetch_all(&pool)
948        .await
949        .unwrap();
950
951        assert!(tables.contains(&"tasks_fts".to_string()));
952        assert!(tables.contains(&"events_fts".to_string()));
953    }
954
955    #[tokio::test]
956    async fn test_run_migrations_creates_triggers() {
957        let temp_dir = TempDir::new().unwrap();
958        let db_path = temp_dir.path().join("test.db");
959        let pool = create_pool(&db_path).await.unwrap();
960
961        run_migrations(&pool).await.unwrap();
962
963        // Verify triggers were created
964        let triggers: Vec<String> =
965            sqlx::query_scalar("SELECT name FROM sqlite_master WHERE type='trigger'")
966                .fetch_all(&pool)
967                .await
968                .unwrap();
969
970        assert!(triggers.contains(&"tasks_ai".to_string()));
971        assert!(triggers.contains(&"tasks_ad".to_string()));
972        assert!(triggers.contains(&"tasks_au_active".to_string()));
973        assert!(triggers.contains(&"tasks_au_softdelete".to_string()));
974        assert!(triggers.contains(&"events_ai".to_string()));
975        assert!(triggers.contains(&"events_ad".to_string()));
976        assert!(triggers.contains(&"events_au".to_string()));
977    }
978
979    #[tokio::test]
980    async fn test_run_migrations_idempotent() {
981        let temp_dir = TempDir::new().unwrap();
982        let db_path = temp_dir.path().join("test.db");
983        let pool = create_pool(&db_path).await.unwrap();
984
985        // Run migrations twice
986        run_migrations(&pool).await.unwrap();
987        run_migrations(&pool).await.unwrap();
988
989        // Should not fail - migrations are idempotent
990        let tables: Vec<String> =
991            sqlx::query_scalar("SELECT name FROM sqlite_master WHERE type='table'")
992                .fetch_all(&pool)
993                .await
994                .unwrap();
995
996        assert!(tables.len() >= 3);
997    }
998
999    #[tokio::test]
1000    async fn test_fts_triggers_work() {
1001        let temp_dir = TempDir::new().unwrap();
1002        let db_path = temp_dir.path().join("test.db");
1003        let pool = create_pool(&db_path).await.unwrap();
1004        run_migrations(&pool).await.unwrap();
1005
1006        // Insert a task
1007        sqlx::query("INSERT INTO tasks (name, spec, status) VALUES (?, ?, ?)")
1008            .bind("Test task")
1009            .bind("Test spec")
1010            .bind("todo")
1011            .execute(&pool)
1012            .await
1013            .unwrap();
1014
1015        // Verify FTS was updated
1016        let count: i64 =
1017            sqlx::query_scalar("SELECT COUNT(*) FROM tasks_fts WHERE name MATCH 'Test'")
1018                .fetch_one(&pool)
1019                .await
1020                .unwrap();
1021
1022        assert_eq!(count, 1);
1023    }
1024
1025    #[tokio::test]
1026    async fn test_workspace_state_table_structure() {
1027        let temp_dir = TempDir::new().unwrap();
1028        let db_path = temp_dir.path().join("test.db");
1029        let pool = create_pool(&db_path).await.unwrap();
1030        run_migrations(&pool).await.unwrap();
1031
1032        // Insert and retrieve workspace state
1033        sqlx::query("INSERT INTO workspace_state (key, value) VALUES (?, ?)")
1034            .bind("test_key")
1035            .bind("test_value")
1036            .execute(&pool)
1037            .await
1038            .unwrap();
1039
1040        let value: String = sqlx::query_scalar("SELECT value FROM workspace_state WHERE key = ?")
1041            .bind("test_key")
1042            .fetch_one(&pool)
1043            .await
1044            .unwrap();
1045
1046        assert_eq!(value, "test_value");
1047    }
1048
1049    #[tokio::test]
1050    async fn test_task_status_constraint() {
1051        let temp_dir = TempDir::new().unwrap();
1052        let db_path = temp_dir.path().join("test.db");
1053        let pool = create_pool(&db_path).await.unwrap();
1054        run_migrations(&pool).await.unwrap();
1055
1056        // Try to insert task with invalid status
1057        let result = sqlx::query("INSERT INTO tasks (name, status) VALUES (?, ?)")
1058            .bind("Test")
1059            .bind("invalid_status")
1060            .execute(&pool)
1061            .await;
1062
1063        // Should fail due to CHECK constraint
1064        assert!(result.is_err());
1065    }
1066
1067    // v0.2.0 Migration Tests
1068
1069    #[tokio::test]
1070    async fn test_dependencies_table_created() {
1071        let temp_dir = TempDir::new().unwrap();
1072        let db_path = temp_dir.path().join("test.db");
1073        let pool = create_pool(&db_path).await.unwrap();
1074        run_migrations(&pool).await.unwrap();
1075
1076        // Verify dependencies table exists
1077        let tables: Vec<String> = sqlx::query_scalar(
1078            "SELECT name FROM sqlite_master WHERE type='table' AND name='dependencies'",
1079        )
1080        .fetch_all(&pool)
1081        .await
1082        .unwrap();
1083
1084        assert!(tables.contains(&"dependencies".to_string()));
1085    }
1086
1087    #[tokio::test]
1088    async fn test_dependencies_indexes_created() {
1089        let temp_dir = TempDir::new().unwrap();
1090        let db_path = temp_dir.path().join("test.db");
1091        let pool = create_pool(&db_path).await.unwrap();
1092        run_migrations(&pool).await.unwrap();
1093
1094        // Verify indexes exist
1095        let indexes: Vec<String> = sqlx::query_scalar(
1096            "SELECT name FROM sqlite_master WHERE type='index' AND name IN ('idx_dependencies_blocking', 'idx_dependencies_blocked', 'idx_events_task_type_time')",
1097        )
1098        .fetch_all(&pool)
1099        .await
1100        .unwrap();
1101
1102        assert!(indexes.contains(&"idx_dependencies_blocking".to_string()));
1103        assert!(indexes.contains(&"idx_dependencies_blocked".to_string()));
1104        assert!(indexes.contains(&"idx_events_task_type_time".to_string()));
1105    }
1106
1107    #[tokio::test]
1108    async fn test_dependencies_self_dependency_constraint() {
1109        let temp_dir = TempDir::new().unwrap();
1110        let db_path = temp_dir.path().join("test.db");
1111        let pool = create_pool(&db_path).await.unwrap();
1112        run_migrations(&pool).await.unwrap();
1113
1114        // Create a task
1115        sqlx::query("INSERT INTO tasks (name, status) VALUES (?, ?)")
1116            .bind("Task 1")
1117            .bind("todo")
1118            .execute(&pool)
1119            .await
1120            .unwrap();
1121
1122        // Try to create self-dependency (should fail)
1123        let result = sqlx::query(
1124            "INSERT INTO dependencies (blocking_task_id, blocked_task_id) VALUES (?, ?)",
1125        )
1126        .bind(1)
1127        .bind(1)
1128        .execute(&pool)
1129        .await;
1130
1131        assert!(result.is_err());
1132    }
1133
1134    #[tokio::test]
1135    async fn test_dependencies_unique_constraint() {
1136        let temp_dir = TempDir::new().unwrap();
1137        let db_path = temp_dir.path().join("test.db");
1138        let pool = create_pool(&db_path).await.unwrap();
1139        run_migrations(&pool).await.unwrap();
1140
1141        // Create tasks
1142        for i in 1..=2 {
1143            sqlx::query("INSERT INTO tasks (name, status) VALUES (?, ?)")
1144                .bind(format!("Task {}", i))
1145                .bind("todo")
1146                .execute(&pool)
1147                .await
1148                .unwrap();
1149        }
1150
1151        // Create dependency
1152        sqlx::query("INSERT INTO dependencies (blocking_task_id, blocked_task_id) VALUES (?, ?)")
1153            .bind(1)
1154            .bind(2)
1155            .execute(&pool)
1156            .await
1157            .unwrap();
1158
1159        // Try to create duplicate dependency (should fail)
1160        let result = sqlx::query(
1161            "INSERT INTO dependencies (blocking_task_id, blocked_task_id) VALUES (?, ?)",
1162        )
1163        .bind(1)
1164        .bind(2)
1165        .execute(&pool)
1166        .await;
1167
1168        assert!(result.is_err());
1169    }
1170
1171    // NOTE: This test uses a physical DELETE (not soft-delete) because it is
1172    // specifically testing the schema-level ON DELETE CASCADE behaviour of the
1173    // dependencies FK. Physical task deletion is NOT what the application does
1174    // (it uses soft-delete via deleted_at). If you need events on these tasks,
1175    // add them AFTER the physical DELETE assertion, not before — adding events
1176    // to a task that will be physically deleted would trigger ON DELETE RESTRICT
1177    // and cause this test to fail (which would be the correct behaviour).
1178    #[tokio::test]
1179    async fn test_dependencies_cascade_delete() {
1180        let temp_dir = TempDir::new().unwrap();
1181        let db_path = temp_dir.path().join("test.db");
1182        let pool = create_pool(&db_path).await.unwrap();
1183        run_migrations(&pool).await.unwrap();
1184
1185        // Create two tasks (no events: physical delete path requires no events
1186        // because events FK is ON DELETE RESTRICT)
1187        for i in 1..=2 {
1188            sqlx::query("INSERT INTO tasks (name, status) VALUES (?, ?)")
1189                .bind(format!("Task {}", i))
1190                .bind("todo")
1191                .execute(&pool)
1192                .await
1193                .unwrap();
1194        }
1195
1196        sqlx::query("INSERT INTO dependencies (blocking_task_id, blocked_task_id) VALUES (?, ?)")
1197            .bind(1)
1198            .bind(2)
1199            .execute(&pool)
1200            .await
1201            .unwrap();
1202
1203        let count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM dependencies")
1204            .fetch_one(&pool)
1205            .await
1206            .unwrap();
1207        assert_eq!(count, 1);
1208
1209        // Physical delete (schema-level test only; application uses soft-delete)
1210        sqlx::query("DELETE FROM tasks WHERE id = ?")
1211            .bind(1)
1212            .execute(&pool)
1213            .await
1214            .unwrap();
1215
1216        let count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM dependencies")
1217            .fetch_one(&pool)
1218            .await
1219            .unwrap();
1220        assert_eq!(count, 0);
1221    }
1222
1223    /// Soft-deleting a task must NOT remove its events: they are an immutable audit log.
1224    #[tokio::test]
1225    async fn test_soft_delete_preserves_events() {
1226        let temp_dir = TempDir::new().unwrap();
1227        let db_path = temp_dir.path().join("test.db");
1228        let pool = create_pool(&db_path).await.unwrap();
1229        run_migrations(&pool).await.unwrap();
1230
1231        sqlx::query("INSERT INTO tasks (name, status) VALUES ('audited task', 'todo')")
1232            .execute(&pool)
1233            .await
1234            .unwrap();
1235
1236        sqlx::query(
1237            "INSERT INTO events (task_id, log_type, discussion_data) VALUES (1, 'decision', 'why we chose X')",
1238        )
1239        .execute(&pool)
1240        .await
1241        .unwrap();
1242
1243        // Soft-delete via deleted_at — the application path
1244        sqlx::query("UPDATE tasks SET deleted_at = datetime('now') WHERE id = 1")
1245            .execute(&pool)
1246            .await
1247            .unwrap();
1248
1249        // Task must appear deleted
1250        let active: i64 =
1251            sqlx::query_scalar("SELECT COUNT(*) FROM tasks WHERE id = 1 AND deleted_at IS NULL")
1252                .fetch_one(&pool)
1253                .await
1254                .unwrap();
1255        assert_eq!(active, 0, "task should be soft-deleted");
1256
1257        // Event must still exist — audit log is immutable
1258        let events: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM events WHERE task_id = 1")
1259            .fetch_one(&pool)
1260            .await
1261            .unwrap();
1262        assert_eq!(events, 1, "event must be retained after soft-delete");
1263    }
1264
1265    /// Physical deletion of a task that has events must be rejected by the
1266    /// ON DELETE RESTRICT FK on the events table (audit log invariant).
1267    #[tokio::test]
1268    async fn test_physical_delete_blocked_when_task_has_events() {
1269        let temp_dir = TempDir::new().unwrap();
1270        let db_path = temp_dir.path().join("test.db");
1271        let pool = create_pool(&db_path).await.unwrap();
1272        run_migrations(&pool).await.unwrap();
1273
1274        sqlx::query("INSERT INTO tasks (name, status) VALUES ('task with history', 'todo')")
1275            .execute(&pool)
1276            .await
1277            .unwrap();
1278
1279        sqlx::query(
1280            "INSERT INTO events (task_id, log_type, discussion_data) VALUES (1, 'decision', 'important decision')",
1281        )
1282        .execute(&pool)
1283        .await
1284        .unwrap();
1285
1286        // Physical DELETE must fail: FK ON DELETE RESTRICT protects the audit log
1287        let result = sqlx::query("DELETE FROM tasks WHERE id = 1")
1288            .execute(&pool)
1289            .await;
1290
1291        assert!(
1292            result.is_err(),
1293            "physical delete of task with events must be rejected by ON DELETE RESTRICT"
1294        );
1295
1296        // Event must still be there
1297        let events: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM events WHERE task_id = 1")
1298            .fetch_one(&pool)
1299            .await
1300            .unwrap();
1301        assert_eq!(events, 1, "event must not have been deleted");
1302    }
1303
1304    #[tokio::test]
1305    async fn test_schema_version_tracking() {
1306        let temp_dir = TempDir::new().unwrap();
1307        let db_path = temp_dir.path().join("test.db");
1308        let pool = create_pool(&db_path).await.unwrap();
1309        run_migrations(&pool).await.unwrap();
1310
1311        // Verify schema version is set to 0.15.0
1312        let version: String =
1313            sqlx::query_scalar("SELECT value FROM workspace_state WHERE key = 'schema_version'")
1314                .fetch_one(&pool)
1315                .await
1316                .unwrap();
1317
1318        assert_eq!(version, "0.15.0");
1319    }
1320
1321    #[tokio::test]
1322    async fn test_migration_idempotency_v0_11_0() {
1323        let temp_dir = TempDir::new().unwrap();
1324        let db_path = temp_dir.path().join("test.db");
1325        let pool = create_pool(&db_path).await.unwrap();
1326
1327        // Run migrations multiple times
1328        run_migrations(&pool).await.unwrap();
1329        run_migrations(&pool).await.unwrap();
1330        run_migrations(&pool).await.unwrap();
1331
1332        // Verify dependencies table exists and is functional
1333        let tables: Vec<String> = sqlx::query_scalar(
1334            "SELECT name FROM sqlite_master WHERE type='table' AND name='dependencies'",
1335        )
1336        .fetch_all(&pool)
1337        .await
1338        .unwrap();
1339
1340        assert!(tables.contains(&"dependencies".to_string()));
1341
1342        // Verify schema version is still correct
1343        let version: String =
1344            sqlx::query_scalar("SELECT value FROM workspace_state WHERE key = 'schema_version'")
1345                .fetch_one(&pool)
1346                .await
1347                .unwrap();
1348
1349        assert_eq!(version, "0.15.0");
1350    }
1351
1352    #[tokio::test]
1353    async fn test_sessions_table_created() {
1354        let temp_dir = TempDir::new().unwrap();
1355        let db_path = temp_dir.path().join("test.db");
1356        let pool = create_pool(&db_path).await.unwrap();
1357        run_migrations(&pool).await.unwrap();
1358
1359        // Verify sessions table exists
1360        let tables: Vec<String> = sqlx::query_scalar(
1361            "SELECT name FROM sqlite_master WHERE type='table' AND name='sessions'",
1362        )
1363        .fetch_all(&pool)
1364        .await
1365        .unwrap();
1366
1367        assert!(tables.contains(&"sessions".to_string()));
1368
1369        // Verify index exists
1370        let indices: Vec<String> = sqlx::query_scalar(
1371            "SELECT name FROM sqlite_master WHERE type='index' AND name='idx_sessions_last_active'",
1372        )
1373        .fetch_all(&pool)
1374        .await
1375        .unwrap();
1376
1377        assert!(indices.contains(&"idx_sessions_last_active".to_string()));
1378    }
1379
1380    // ── New tests for version-aware migration ─────────────────────────────
1381
1382    #[tokio::test]
1383    async fn test_detect_schema_version_fresh() {
1384        let temp_dir = TempDir::new().unwrap();
1385        let db_path = temp_dir.path().join("test.db");
1386        let pool = create_pool(&db_path).await.unwrap();
1387
1388        // Brand-new database: no tables at all
1389        let version = detect_schema_version(&pool).await.unwrap();
1390        assert_eq!(version, (0, 0, 0), "fresh database must return (0,0,0)");
1391    }
1392
1393    #[tokio::test]
1394    async fn test_detect_schema_version_after_migration() {
1395        let temp_dir = TempDir::new().unwrap();
1396        let db_path = temp_dir.path().join("test.db");
1397        let pool = create_pool(&db_path).await.unwrap();
1398
1399        run_migrations(&pool).await.unwrap();
1400
1401        let version = detect_schema_version(&pool).await.unwrap();
1402        assert_eq!(
1403            version,
1404            (0, 15, 0),
1405            "after migration detect must return (0,15,0)"
1406        );
1407    }
1408
1409    #[tokio::test]
1410    async fn test_migration_from_pre_version_db() {
1411        // Simulate an old database that has tasks and events but:
1412        //   - no owner column
1413        //   - no sessions table
1414        //   - no metadata column
1415        //   - no deleted_at column
1416        //   - no workspace_state (hence no schema_version)
1417        let temp_dir = TempDir::new().unwrap();
1418        let db_path = temp_dir.path().join("test.db");
1419        let pool = create_pool(&db_path).await.unwrap();
1420
1421        // Bootstrap a minimal "old" schema manually
1422        sqlx::query(
1423            r#"
1424            CREATE TABLE tasks (
1425                id INTEGER PRIMARY KEY AUTOINCREMENT,
1426                parent_id INTEGER,
1427                name TEXT NOT NULL,
1428                spec TEXT,
1429                status TEXT NOT NULL DEFAULT 'todo',
1430                complexity INTEGER,
1431                priority INTEGER DEFAULT 0,
1432                first_todo_at DATETIME,
1433                first_doing_at DATETIME,
1434                first_done_at DATETIME,
1435                active_form TEXT,
1436                FOREIGN KEY (parent_id) REFERENCES tasks(id) ON DELETE CASCADE,
1437                CHECK (status IN ('todo', 'doing', 'done'))
1438            )
1439            "#,
1440        )
1441        .execute(&pool)
1442        .await
1443        .unwrap();
1444
1445        sqlx::query(
1446            r#"
1447            CREATE TABLE events (
1448                id INTEGER PRIMARY KEY AUTOINCREMENT,
1449                task_id INTEGER NOT NULL,
1450                timestamp DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
1451                log_type TEXT NOT NULL,
1452                discussion_data TEXT NOT NULL,
1453                FOREIGN KEY (task_id) REFERENCES tasks(id) ON DELETE CASCADE
1454            )
1455            "#,
1456        )
1457        .execute(&pool)
1458        .await
1459        .unwrap();
1460
1461        // Insert some data to verify preservation across upgrade
1462        sqlx::query("INSERT INTO tasks (name, status) VALUES ('legacy task', 'todo')")
1463            .execute(&pool)
1464            .await
1465            .unwrap();
1466
1467        // Verify the old schema is detected correctly
1468        let version_before = detect_schema_version(&pool).await.unwrap();
1469        assert_eq!(
1470            version_before,
1471            (0, 1, 0),
1472            "old schema without owner should be detected as (0,1,0)"
1473        );
1474
1475        // Run migrations — should upgrade from (0,1,0) to (0,15,0)
1476        run_migrations(&pool).await.unwrap();
1477
1478        // Verify final version
1479        let version: String =
1480            sqlx::query_scalar("SELECT value FROM workspace_state WHERE key = 'schema_version'")
1481                .fetch_one(&pool)
1482                .await
1483                .unwrap();
1484        assert_eq!(version, "0.15.0");
1485
1486        // Verify legacy data is intact
1487        let task_count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM tasks")
1488            .fetch_one(&pool)
1489            .await
1490            .unwrap();
1491        assert_eq!(task_count, 1, "legacy task must survive the upgrade");
1492
1493        // Verify new columns were added
1494        assert!(
1495            column_exists(&pool, "tasks", "owner").await,
1496            "owner column must exist after upgrade"
1497        );
1498        assert!(
1499            column_exists(&pool, "tasks", "metadata").await,
1500            "metadata column must exist after upgrade"
1501        );
1502        assert!(
1503            column_exists(&pool, "tasks", "deleted_at").await,
1504            "deleted_at column must exist after upgrade"
1505        );
1506
1507        // Verify new tables were created
1508        assert!(
1509            table_exists(&pool, "sessions").await,
1510            "sessions table must exist after upgrade"
1511        );
1512        assert!(
1513            table_exists(&pool, "dependencies").await,
1514            "dependencies table must exist after upgrade"
1515        );
1516    }
1517
1518    // ── FTS + soft delete interaction ─────────────────────────────────────
1519
1520    /// After soft-deleting a task the FTS index must not return it.
1521    #[tokio::test]
1522    async fn test_fts_excludes_soft_deleted_task() {
1523        let temp_dir = TempDir::new().unwrap();
1524        let db_path = temp_dir.path().join("test.db");
1525        let pool = create_pool(&db_path).await.unwrap();
1526        run_migrations(&pool).await.unwrap();
1527
1528        sqlx::query(
1529            "INSERT INTO tasks (name, spec, status) VALUES ('unique_fts_target', 'some spec', 'todo')",
1530        )
1531        .execute(&pool)
1532        .await
1533        .unwrap();
1534
1535        // Confirm it is searchable before deletion
1536        let before: i64 = sqlx::query_scalar(
1537            "SELECT COUNT(*) FROM tasks_fts WHERE name MATCH 'unique_fts_target'",
1538        )
1539        .fetch_one(&pool)
1540        .await
1541        .unwrap();
1542        assert_eq!(before, 1, "task must be in FTS before soft-delete");
1543
1544        // Soft-delete via deleted_at (the tasks_au_softdelete trigger fires)
1545        sqlx::query(
1546            "UPDATE tasks SET deleted_at = datetime('now') WHERE name = 'unique_fts_target'",
1547        )
1548        .execute(&pool)
1549        .await
1550        .unwrap();
1551
1552        // FTS must no longer return the task
1553        let after: i64 = sqlx::query_scalar(
1554            "SELECT COUNT(*) FROM tasks_fts WHERE name MATCH 'unique_fts_target'",
1555        )
1556        .fetch_one(&pool)
1557        .await
1558        .unwrap();
1559        assert_eq!(after, 0, "soft-deleted task must be removed from FTS");
1560    }
1561
1562    /// Soft-deleting one task must not affect FTS entries for other tasks.
1563    #[tokio::test]
1564    async fn test_fts_soft_delete_does_not_affect_siblings() {
1565        let temp_dir = TempDir::new().unwrap();
1566        let db_path = temp_dir.path().join("test.db");
1567        let pool = create_pool(&db_path).await.unwrap();
1568        run_migrations(&pool).await.unwrap();
1569
1570        sqlx::query(
1571            "INSERT INTO tasks (name, spec, status) VALUES ('keep_this_task', 'spec a', 'todo')",
1572        )
1573        .execute(&pool)
1574        .await
1575        .unwrap();
1576        sqlx::query(
1577            "INSERT INTO tasks (name, spec, status) VALUES ('delete_this_task', 'spec b', 'todo')",
1578        )
1579        .execute(&pool)
1580        .await
1581        .unwrap();
1582
1583        // Soft-delete only the second task
1584        sqlx::query(
1585            "UPDATE tasks SET deleted_at = datetime('now') WHERE name = 'delete_this_task'",
1586        )
1587        .execute(&pool)
1588        .await
1589        .unwrap();
1590
1591        // The deleted task must not appear
1592        let deleted: i64 = sqlx::query_scalar(
1593            "SELECT COUNT(*) FROM tasks_fts WHERE name MATCH 'delete_this_task'",
1594        )
1595        .fetch_one(&pool)
1596        .await
1597        .unwrap();
1598        assert_eq!(deleted, 0, "deleted task must not appear in FTS");
1599
1600        // The surviving task must still be searchable
1601        let kept: i64 =
1602            sqlx::query_scalar("SELECT COUNT(*) FROM tasks_fts WHERE name MATCH 'keep_this_task'")
1603                .fetch_one(&pool)
1604                .await
1605                .unwrap();
1606        assert_eq!(kept, 1, "surviving task must still be in FTS");
1607    }
1608
1609    /// An update to an already-soft-deleted task (e.g. correcting spec while
1610    /// archived) must not cause FTS corruption. The softdelete trigger uses
1611    /// `old.deleted_at IS NULL AND new.deleted_at IS NOT NULL` so it fires only
1612    /// on the active→deleted transition, not on subsequent updates to a deleted
1613    /// row. Neither the old nor the new name must appear in FTS after.
1614    #[tokio::test]
1615    async fn test_fts_update_of_soft_deleted_task_stays_excluded() {
1616        let temp_dir = TempDir::new().unwrap();
1617        let db_path = temp_dir.path().join("test.db");
1618        let pool = create_pool(&db_path).await.unwrap();
1619        run_migrations(&pool).await.unwrap();
1620
1621        sqlx::query(
1622            "INSERT INTO tasks (name, spec, status) VALUES ('archived_task', 'old spec', 'todo')",
1623        )
1624        .execute(&pool)
1625        .await
1626        .unwrap();
1627
1628        // Soft-delete
1629        sqlx::query("UPDATE tasks SET deleted_at = datetime('now') WHERE name = 'archived_task'")
1630            .execute(&pool)
1631            .await
1632            .unwrap();
1633
1634        // Update the name while keeping deleted_at set — softdelete trigger does NOT
1635        // fire again (old.deleted_at IS NOT NULL); no FTS operation occurs.
1636        sqlx::query(
1637            "UPDATE tasks SET name = 'archived_task_renamed', spec = 'new spec' \
1638             WHERE name = 'archived_task'",
1639        )
1640        .execute(&pool)
1641        .await
1642        .unwrap();
1643
1644        // Neither old nor new name must appear in FTS
1645        let old_name: i64 =
1646            sqlx::query_scalar("SELECT COUNT(*) FROM tasks_fts WHERE name MATCH 'archived_task'")
1647                .fetch_one(&pool)
1648                .await
1649                .unwrap();
1650        assert_eq!(
1651            old_name, 0,
1652            "old name of soft-deleted task must not be in FTS"
1653        );
1654
1655        let new_name: i64 = sqlx::query_scalar(
1656            "SELECT COUNT(*) FROM tasks_fts WHERE name MATCH 'archived_task_renamed'",
1657        )
1658        .fetch_one(&pool)
1659        .await
1660        .unwrap();
1661        assert_eq!(
1662            new_name, 0,
1663            "renamed soft-deleted task must not re-enter FTS"
1664        );
1665    }
1666
1667    /// Restoring a soft-deleted task (clearing deleted_at) must NOT corrupt the FTS index.
1668    ///
1669    /// This is the exact scenario fixed in v0.15.0: the old trigger fired on
1670    /// deleted→active transitions and tried to remove a non-existent FTS entry,
1671    /// corrupting the index.  The fixed trigger only fires on active→active
1672    /// (old.deleted_at IS NULL AND new.deleted_at IS NULL), so a restore must be
1673    /// a no-op for the trigger — the caller is responsible for re-inserting the
1674    /// FTS row explicitly if needed.
1675    #[tokio::test]
1676    async fn test_fts_restore_does_not_corrupt_fts() {
1677        let temp_dir = TempDir::new().unwrap();
1678        let db_path = temp_dir.path().join("test.db");
1679        let pool = create_pool(&db_path).await.unwrap();
1680        run_migrations(&pool).await.unwrap();
1681
1682        sqlx::query(
1683            "INSERT INTO tasks (name, spec, status) VALUES ('restore_me', 'spec text', 'todo')",
1684        )
1685        .execute(&pool)
1686        .await
1687        .unwrap();
1688
1689        // Soft-delete — removes from FTS via tasks_au_softdelete trigger
1690        sqlx::query("UPDATE tasks SET deleted_at = datetime('now') WHERE name = 'restore_me'")
1691            .execute(&pool)
1692            .await
1693            .unwrap();
1694
1695        // Restore — clears deleted_at; tasks_au_active must NOT fire (old.deleted_at IS NOT NULL)
1696        // This is the regression path: the old trigger would attempt
1697        // `INSERT INTO tasks_fts(tasks_fts, ...) VALUES('delete', ...)` for a row
1698        // that is no longer in the FTS index, corrupting it.
1699        sqlx::query("UPDATE tasks SET deleted_at = NULL WHERE name = 'restore_me'")
1700            .execute(&pool)
1701            .await
1702            .unwrap();
1703
1704        // The trigger does not re-insert on restore; the row is absent from FTS.
1705        let count: i64 =
1706            sqlx::query_scalar("SELECT COUNT(*) FROM tasks_fts WHERE tasks_fts MATCH 'restore_me'")
1707                .fetch_one(&pool)
1708                .await
1709                .unwrap();
1710        assert_eq!(
1711            count, 0,
1712            "restored task is not in FTS until explicitly re-indexed"
1713        );
1714
1715        // The real proof that the FTS index is not corrupted: insert a new task
1716        // and verify it is searchable.  SQLite FTS5 corruption often surfaces only
1717        // on the next write or rebuild, not on a read-only COUNT(*).
1718        sqlx::query(
1719            "INSERT INTO tasks (name, spec, status) VALUES ('healthy_task', 'healthy spec', 'todo')",
1720        )
1721        .execute(&pool)
1722        .await
1723        .unwrap();
1724
1725        let healthy: i64 =
1726            sqlx::query_scalar("SELECT COUNT(*) FROM tasks_fts WHERE name MATCH 'healthy_task'")
1727                .fetch_one(&pool)
1728                .await
1729                .expect("FTS must remain functional after restore — index is not corrupted");
1730        assert_eq!(healthy, 1, "FTS index must still work after restore");
1731    }
1732}