1#[cfg(any(feature = "advanced-queries", feature = "batch-operations"))]
2use crate::models::TaskFilters;
3use crate::{
4 database::{
5 mappers::{map_project_row, map_task_row},
6 query_builders::TaskUpdateBuilder,
7 validators,
8 },
9 error::{Result as ThingsResult, ThingsError},
10 models::{
11 Area, CreateTaskRequest, DeleteChildHandling, Project, Task, TaskStatus, TaskType,
12 UpdateTaskRequest,
13 },
14};
15use chrono::{DateTime, NaiveDate, Utc};
16use serde::{Deserialize, Serialize};
17use sqlx::{pool::PoolOptions, Row, SqlitePool};
18use std::path::{Path, PathBuf};
19use std::time::Duration;
20use tracing::{debug, error, info, instrument};
21use uuid::Uuid;
22
23pub(crate) fn safe_timestamp_convert(ts_f64: f64) -> i64 {
25 if ts_f64.is_finite() && ts_f64 >= 0.0 {
27 let max_timestamp = 4_102_444_800_f64; if ts_f64 <= max_timestamp {
30 let ts_str = format!("{:.0}", ts_f64.trunc());
32 ts_str.parse::<i64>().unwrap_or(0)
33 } else {
34 0 }
36 } else {
37 0 }
39}
40
41pub(crate) fn things_date_to_naive_date(seconds_since_2001: i64) -> Option<chrono::NaiveDate> {
43 use chrono::{TimeZone, Utc};
44
45 if seconds_since_2001 <= 0 {
46 return None;
47 }
48
49 let base_date = Utc.with_ymd_and_hms(2001, 1, 1, 0, 0, 0).single().unwrap();
51
52 let date_time = base_date + chrono::Duration::seconds(seconds_since_2001);
54
55 Some(date_time.date_naive())
56}
57
58pub fn naive_date_to_things_timestamp(date: NaiveDate) -> i64 {
60 use chrono::{NaiveTime, TimeZone, Utc};
61
62 let base_date = Utc.with_ymd_and_hms(2001, 1, 1, 0, 0, 0).single().unwrap();
64
65 let date_time = date
67 .and_time(NaiveTime::from_hms_opt(0, 0, 0).unwrap())
68 .and_local_timezone(Utc)
69 .single()
70 .unwrap();
71
72 date_time.timestamp() - base_date.timestamp()
74}
75
76pub fn serialize_tags_to_blob(tags: &[String]) -> ThingsResult<Vec<u8>> {
80 serde_json::to_vec(tags)
81 .map_err(|e| ThingsError::unknown(format!("Failed to serialize tags: {e}")))
82}
83
84pub fn deserialize_tags_from_blob(blob: &[u8]) -> ThingsResult<Vec<String>> {
86 serde_json::from_slice(blob)
87 .map_err(|e| ThingsError::unknown(format!("Failed to deserialize tags: {e}")))
88}
89
90pub(crate) fn things_uuid_to_uuid(things_uuid: &str) -> Uuid {
93 use std::collections::hash_map::DefaultHasher;
96 use std::hash::{Hash, Hasher};
97
98 let mut hasher = DefaultHasher::new();
99 things_uuid.hash(&mut hasher);
100 let hash = hasher.finish();
101
102 let bytes = [
105 ((hash >> 56) & 0xFF) as u8,
106 ((hash >> 48) & 0xFF) as u8,
107 ((hash >> 40) & 0xFF) as u8,
108 ((hash >> 32) & 0xFF) as u8,
109 ((hash >> 24) & 0xFF) as u8,
110 ((hash >> 16) & 0xFF) as u8,
111 ((hash >> 8) & 0xFF) as u8,
112 (hash & 0xFF) as u8,
113 u8::try_from(things_uuid.len().min(255)).unwrap_or(255),
115 things_uuid.chars().next().unwrap_or('0') as u8,
116 things_uuid.chars().nth(1).unwrap_or('0') as u8,
117 things_uuid.chars().nth(2).unwrap_or('0') as u8,
118 things_uuid.chars().nth(3).unwrap_or('0') as u8,
119 things_uuid.chars().nth(4).unwrap_or('0') as u8,
120 things_uuid.chars().nth(5).unwrap_or('0') as u8,
121 things_uuid.chars().nth(6).unwrap_or('0') as u8,
122 ];
123
124 Uuid::from_bytes(bytes)
125}
126
127impl TaskStatus {
128 fn from_i32(value: i32) -> Option<Self> {
129 match value {
130 0 => Some(TaskStatus::Incomplete),
131 1 => Some(TaskStatus::Completed),
132 2 => Some(TaskStatus::Canceled),
133 3 => Some(TaskStatus::Trashed),
134 _ => None,
135 }
136 }
137}
138
139impl TaskType {
140 fn from_i32(value: i32) -> Option<Self> {
141 match value {
142 0 => Some(TaskType::Todo),
143 1 => Some(TaskType::Project),
144 2 => Some(TaskType::Heading),
145 3 => Some(TaskType::Area),
146 _ => None,
147 }
148 }
149}
150
151#[derive(Debug, Clone, Serialize, Deserialize)]
153pub struct DatabasePoolConfig {
154 pub max_connections: u32,
156 pub min_connections: u32,
158 pub connect_timeout: Duration,
160 pub idle_timeout: Duration,
162 pub max_lifetime: Duration,
164 pub test_before_acquire: bool,
166 pub sqlite_optimizations: SqliteOptimizations,
168}
169
170#[derive(Debug, Clone, Serialize, Deserialize)]
172pub struct SqliteOptimizations {
173 pub enable_wal_mode: bool,
175 pub synchronous_mode: String,
177 pub cache_size: i32,
179 pub enable_foreign_keys: bool,
181 pub journal_mode: String,
183 pub temp_store: String,
185 pub mmap_size: i64,
187 pub enable_query_planner: bool,
189}
190
191impl Default for DatabasePoolConfig {
192 fn default() -> Self {
193 Self {
194 max_connections: 10,
195 min_connections: 1,
196 connect_timeout: Duration::from_secs(30),
197 idle_timeout: Duration::from_secs(600), max_lifetime: Duration::from_secs(1800), test_before_acquire: true,
200 sqlite_optimizations: SqliteOptimizations::default(),
201 }
202 }
203}
204
205impl Default for SqliteOptimizations {
206 fn default() -> Self {
207 Self {
208 enable_wal_mode: true,
209 synchronous_mode: "NORMAL".to_string(),
210 cache_size: -20000, enable_foreign_keys: true,
212 journal_mode: "WAL".to_string(),
213 temp_store: "MEMORY".to_string(),
214 mmap_size: 268_435_456, enable_query_planner: true,
216 }
217 }
218}
219
220#[derive(Debug, Clone, Serialize, Deserialize)]
222pub struct PoolHealthStatus {
223 pub is_healthy: bool,
224 pub pool_size: u32,
225 pub active_connections: u32,
226 pub idle_connections: u32,
227 pub max_connections: u32,
228 pub min_connections: u32,
229 pub connection_timeout: Duration,
230 pub idle_timeout: Option<Duration>,
231 pub max_lifetime: Option<Duration>,
232}
233
234#[derive(Debug, Clone, Serialize, Deserialize)]
236pub struct PoolMetrics {
237 pub pool_size: u32,
238 pub active_connections: u32,
239 pub idle_connections: u32,
240 pub max_connections: u32,
241 pub min_connections: u32,
242 pub utilization_percentage: f64,
243 pub is_healthy: bool,
244 pub response_time_ms: u64,
245 pub connection_timeout: Duration,
246 pub idle_timeout: Option<Duration>,
247 pub max_lifetime: Option<Duration>,
248}
249
250#[derive(Debug, Clone, Serialize, Deserialize)]
252pub struct ComprehensiveHealthStatus {
253 pub overall_healthy: bool,
254 pub pool_health: PoolHealthStatus,
255 pub pool_metrics: PoolMetrics,
256 pub database_stats: DatabaseStats,
257 pub timestamp: DateTime<Utc>,
258}
259
260#[derive(Debug, Clone)]
263pub struct ThingsDatabase {
264 pub(crate) pool: SqlitePool,
265 config: DatabasePoolConfig,
266}
267
268impl ThingsDatabase {
269 #[instrument]
292 pub async fn new(database_path: &Path) -> ThingsResult<Self> {
293 Self::new_with_config(database_path, DatabasePoolConfig::default()).await
294 }
295
296 #[instrument]
330 pub async fn new_with_config(
331 database_path: &Path,
332 config: DatabasePoolConfig,
333 ) -> ThingsResult<Self> {
334 let database_url = format!("sqlite:{}", database_path.display());
335
336 info!(
337 "Connecting to SQLite database at: {} with optimized pool",
338 database_url
339 );
340
341 let pool = PoolOptions::new()
343 .max_connections(config.max_connections)
344 .min_connections(config.min_connections)
345 .acquire_timeout(config.connect_timeout)
346 .idle_timeout(Some(config.idle_timeout))
347 .max_lifetime(Some(config.max_lifetime))
348 .test_before_acquire(config.test_before_acquire)
349 .connect(&database_url)
350 .await
351 .map_err(|e| ThingsError::unknown(format!("Failed to connect to database: {e}")))?;
352
353 Self::apply_sqlite_optimizations(&pool, &config.sqlite_optimizations).await?;
355
356 info!(
357 "Database connection pool established successfully with {} max connections",
358 config.max_connections
359 );
360
361 Ok(Self { pool, config })
362 }
363
364 async fn apply_sqlite_optimizations(
366 pool: &SqlitePool,
367 optimizations: &SqliteOptimizations,
368 ) -> ThingsResult<()> {
369 sqlx::query(&format!(
371 "PRAGMA journal_mode = {}",
372 optimizations.journal_mode
373 ))
374 .execute(pool)
375 .await
376 .map_err(|e| ThingsError::unknown(format!("Failed to set journal mode: {e}")))?;
377
378 sqlx::query(&format!(
380 "PRAGMA synchronous = {}",
381 optimizations.synchronous_mode
382 ))
383 .execute(pool)
384 .await
385 .map_err(|e| ThingsError::unknown(format!("Failed to set synchronous mode: {e}")))?;
386
387 sqlx::query(&format!("PRAGMA cache_size = {}", optimizations.cache_size))
389 .execute(pool)
390 .await
391 .map_err(|e| ThingsError::unknown(format!("Failed to set cache size: {e}")))?;
392
393 let fk_setting = if optimizations.enable_foreign_keys {
395 "ON"
396 } else {
397 "OFF"
398 };
399 sqlx::query(&format!("PRAGMA foreign_keys = {fk_setting}"))
400 .execute(pool)
401 .await
402 .map_err(|e| ThingsError::unknown(format!("Failed to set foreign keys: {e}")))?;
403
404 sqlx::query(&format!("PRAGMA temp_store = {}", optimizations.temp_store))
406 .execute(pool)
407 .await
408 .map_err(|e| ThingsError::unknown(format!("Failed to set temp store: {e}")))?;
409
410 sqlx::query(&format!("PRAGMA mmap_size = {}", optimizations.mmap_size))
412 .execute(pool)
413 .await
414 .map_err(|e| ThingsError::unknown(format!("Failed to set mmap size: {e}")))?;
415
416 if optimizations.enable_query_planner {
418 sqlx::query("PRAGMA optimize")
419 .execute(pool)
420 .await
421 .map_err(|e| ThingsError::unknown(format!("Failed to optimize database: {e}")))?;
422 }
423
424 debug!(
425 "Applied SQLite optimizations: WAL={}, sync={}, cache={}KB, fk={}, temp={}, mmap={}MB",
426 optimizations.enable_wal_mode,
427 optimizations.synchronous_mode,
428 optimizations.cache_size.abs() / 1024,
429 optimizations.enable_foreign_keys,
430 optimizations.temp_store,
431 optimizations.mmap_size / 1024 / 1024
432 );
433
434 Ok(())
435 }
436
437 #[instrument]
443 pub async fn from_connection_string(database_url: &str) -> ThingsResult<Self> {
444 Self::from_connection_string_with_config(database_url, DatabasePoolConfig::default()).await
445 }
446
447 #[instrument]
453 pub async fn from_connection_string_with_config(
454 database_url: &str,
455 config: DatabasePoolConfig,
456 ) -> ThingsResult<Self> {
457 info!(
458 "Connecting to SQLite database: {} with optimized pool",
459 database_url
460 );
461
462 let pool = PoolOptions::new()
464 .max_connections(config.max_connections)
465 .min_connections(config.min_connections)
466 .acquire_timeout(config.connect_timeout)
467 .idle_timeout(Some(config.idle_timeout))
468 .max_lifetime(Some(config.max_lifetime))
469 .test_before_acquire(config.test_before_acquire)
470 .connect(database_url)
471 .await
472 .map_err(|e| ThingsError::unknown(format!("Failed to connect to database: {e}")))?;
473
474 Self::apply_sqlite_optimizations(&pool, &config.sqlite_optimizations).await?;
476
477 info!(
478 "Database connection pool established successfully with {} max connections",
479 config.max_connections
480 );
481
482 Ok(Self { pool, config })
483 }
484
485 #[must_use]
487 pub fn pool(&self) -> &SqlitePool {
488 &self.pool
489 }
490
491 #[instrument]
493 pub async fn is_connected(&self) -> bool {
494 match sqlx::query("SELECT 1").fetch_one(&self.pool).await {
495 Ok(_) => {
496 debug!("Database connection is healthy");
497 true
498 }
499 Err(e) => {
500 error!("Database connection check failed: {}", e);
501 false
502 }
503 }
504 }
505
506 #[instrument]
512 pub async fn get_pool_health(&self) -> ThingsResult<PoolHealthStatus> {
513 let pool_size = self.pool.size();
514 let idle_connections = self.pool.num_idle();
515 let active_connections = pool_size - u32::try_from(idle_connections).unwrap_or(0);
516
517 let is_healthy = self.is_connected().await;
519
520 Ok(PoolHealthStatus {
521 is_healthy,
522 pool_size,
523 active_connections,
524 idle_connections: u32::try_from(idle_connections).unwrap_or(0),
525 max_connections: self.config.max_connections,
526 min_connections: self.config.min_connections,
527 connection_timeout: self.config.connect_timeout,
528 idle_timeout: Some(self.config.idle_timeout),
529 max_lifetime: Some(self.config.max_lifetime),
530 })
531 }
532
533 #[instrument]
539 pub async fn get_pool_metrics(&self) -> ThingsResult<PoolMetrics> {
540 let pool_size = self.pool.size();
541 let idle_connections = self.pool.num_idle();
542 let active_connections = pool_size - u32::try_from(idle_connections).unwrap_or(0);
543
544 let max_connections = self.config.max_connections;
546 let utilization_percentage = if max_connections > 0 {
547 (f64::from(active_connections) / f64::from(max_connections)) * 100.0
548 } else {
549 0.0
550 };
551
552 let start_time = std::time::Instant::now();
554 let is_connected = self.is_connected().await;
555 let response_time_ms = u64::try_from(start_time.elapsed().as_millis()).unwrap_or(0);
556
557 Ok(PoolMetrics {
558 pool_size,
559 active_connections,
560 idle_connections: u32::try_from(idle_connections).unwrap_or(0),
561 max_connections,
562 min_connections: self.config.min_connections,
563 utilization_percentage,
564 is_healthy: is_connected,
565 response_time_ms,
566 connection_timeout: self.config.connect_timeout,
567 idle_timeout: Some(self.config.idle_timeout),
568 max_lifetime: Some(self.config.max_lifetime),
569 })
570 }
571
572 #[instrument]
578 pub async fn comprehensive_health_check(&self) -> ThingsResult<ComprehensiveHealthStatus> {
579 let pool_health = self.get_pool_health().await?;
580 let pool_metrics = self.get_pool_metrics().await?;
581 let db_stats = self.get_stats().await?;
582
583 let overall_healthy = pool_health.is_healthy && pool_metrics.is_healthy;
584
585 Ok(ComprehensiveHealthStatus {
586 overall_healthy,
587 pool_health,
588 pool_metrics,
589 database_stats: db_stats,
590 timestamp: Utc::now(),
591 })
592 }
593
594 #[instrument]
600 pub async fn get_stats(&self) -> ThingsResult<DatabaseStats> {
601 let task_count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM TMTask")
602 .fetch_one(&self.pool)
603 .await
604 .map_err(|e| ThingsError::unknown(format!("Failed to get task count: {e}")))?;
605
606 let project_count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM TMTask WHERE type = 1")
607 .fetch_one(&self.pool)
608 .await
609 .map_err(|e| ThingsError::unknown(format!("Failed to get project count: {e}")))?;
610
611 let area_count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM TMArea")
612 .fetch_one(&self.pool)
613 .await
614 .map_err(|e| ThingsError::unknown(format!("Failed to get area count: {e}")))?;
615
616 Ok(DatabaseStats {
617 task_count: task_count.try_into().unwrap_or(0),
618 project_count: project_count.try_into().unwrap_or(0),
619 area_count: area_count.try_into().unwrap_or(0),
620 })
621 }
622
623 #[instrument]
651 pub async fn get_all_tasks(&self) -> ThingsResult<Vec<Task>> {
652 let rows = sqlx::query(
653 r"
654 SELECT
655 uuid, title, status, type,
656 start_date, due_date,
657 project_uuid, area_uuid,
658 notes, tags,
659 created, modified
660 FROM TMTask
661 ORDER BY created DESC
662 ",
663 )
664 .fetch_all(&self.pool)
665 .await
666 .map_err(|e| ThingsError::unknown(format!("Failed to fetch tasks: {e}")))?;
667
668 let mut tasks = Vec::new();
669 for row in rows {
670 let task = Task {
671 uuid: Uuid::parse_str(&row.get::<String, _>("uuid"))
672 .map_err(|e| ThingsError::unknown(format!("Invalid task UUID: {e}")))?,
673 title: row.get("title"),
674 status: TaskStatus::from_i32(row.get("status")).unwrap_or(TaskStatus::Incomplete),
675 task_type: TaskType::from_i32(row.get("type")).unwrap_or(TaskType::Todo),
676 start_date: row
677 .get::<Option<String>, _>("start_date")
678 .and_then(|s| NaiveDate::parse_from_str(&s, "%Y-%m-%d").ok()),
679 deadline: row
680 .get::<Option<String>, _>("due_date")
681 .and_then(|s| NaiveDate::parse_from_str(&s, "%Y-%m-%d").ok()),
682 project_uuid: row
683 .get::<Option<String>, _>("project_uuid")
684 .and_then(|s| Uuid::parse_str(&s).ok()),
685 area_uuid: row
686 .get::<Option<String>, _>("area_uuid")
687 .and_then(|s| Uuid::parse_str(&s).ok()),
688 parent_uuid: None, notes: row.get("notes"),
690 tags: row
691 .get::<Option<String>, _>("tags")
692 .map(|s| s.split(',').map(|s| s.trim().to_string()).collect())
693 .unwrap_or_default(),
694 children: Vec::new(), created: DateTime::parse_from_rfc3339(&row.get::<String, _>("created"))
696 .ok()
697 .map_or_else(Utc::now, |dt| dt.with_timezone(&Utc)),
698 modified: DateTime::parse_from_rfc3339(&row.get::<String, _>("modified"))
699 .ok()
700 .map_or_else(Utc::now, |dt| dt.with_timezone(&Utc)),
701 stop_date: None, };
703 tasks.push(task);
704 }
705
706 debug!("Fetched {} tasks", tasks.len());
707 Ok(tasks)
708 }
709
710 #[instrument]
716 pub async fn get_all_projects(&self) -> ThingsResult<Vec<Project>> {
717 let rows = sqlx::query(
718 r"
719 SELECT
720 uuid, title, status,
721 area, notes,
722 creationDate, userModificationDate,
723 startDate, deadline
724 FROM TMTask
725 WHERE type = 1 AND trashed = 0
726 ORDER BY creationDate DESC
727 ",
728 )
729 .fetch_all(&self.pool)
730 .await
731 .map_err(|e| ThingsError::unknown(format!("Failed to fetch projects: {e}")))?;
732
733 let mut projects = Vec::new();
734 for row in rows {
735 let project = Project {
736 uuid: things_uuid_to_uuid(&row.get::<String, _>("uuid")),
737 title: row.get("title"),
738 status: TaskStatus::from_i32(row.get("status")).unwrap_or(TaskStatus::Incomplete),
739 area_uuid: row
740 .get::<Option<String>, _>("area")
741 .map(|s| things_uuid_to_uuid(&s)),
742 notes: row.get("notes"),
743 deadline: row
744 .get::<Option<i64>, _>("deadline")
745 .and_then(|ts| DateTime::from_timestamp(ts, 0))
746 .map(|dt| dt.date_naive()),
747 start_date: row
748 .get::<Option<i64>, _>("startDate")
749 .and_then(|ts| DateTime::from_timestamp(ts, 0))
750 .map(|dt| dt.date_naive()),
751 tags: Vec::new(), tasks: Vec::new(), created: {
754 let ts_f64 = row.get::<f64, _>("creationDate");
755 let ts = safe_timestamp_convert(ts_f64);
756 DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
757 },
758 modified: {
759 let ts_f64 = row.get::<f64, _>("userModificationDate");
760 let ts = safe_timestamp_convert(ts_f64);
761 DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
762 },
763 };
764 projects.push(project);
765 }
766
767 debug!("Fetched {} projects", projects.len());
768 Ok(projects)
769 }
770
771 #[instrument]
777 pub async fn get_all_areas(&self) -> ThingsResult<Vec<Area>> {
778 let rows = sqlx::query(
780 r"
781 SELECT
782 uuid, title, visible, `index`
783 FROM TMArea
784 ORDER BY `index` ASC
785 ",
786 )
787 .fetch_all(&self.pool)
788 .await
789 .map_err(|e| ThingsError::unknown(format!("Failed to fetch areas: {e}")))?;
790
791 let mut areas = Vec::new();
792 for row in rows {
793 let uuid_str: String = row.get("uuid");
794 let uuid =
796 Uuid::parse_str(&uuid_str).unwrap_or_else(|_| things_uuid_to_uuid(&uuid_str));
797
798 let area = Area {
799 uuid,
800 title: row.get("title"),
801 notes: None, projects: Vec::new(), tags: Vec::new(), created: Utc::now(), modified: Utc::now(), };
807 areas.push(area);
808 }
809
810 debug!("Fetched {} areas", areas.len());
811 Ok(areas)
812 }
813
814 #[instrument]
820 pub async fn get_tasks_by_status(&self, status: TaskStatus) -> ThingsResult<Vec<Task>> {
821 let status_value = status as i32;
822 let rows = sqlx::query(
823 r"
824 SELECT
825 uuid, title, status, type,
826 start_date, due_date,
827 project_uuid, area_uuid,
828 notes, tags,
829 created, modified
830 FROM TMTask
831 WHERE status = ?
832 ORDER BY created DESC
833 ",
834 )
835 .bind(status_value)
836 .fetch_all(&self.pool)
837 .await
838 .map_err(|e| ThingsError::unknown(format!("Failed to fetch tasks by status: {e}")))?;
839
840 let mut tasks = Vec::new();
841 for row in rows {
842 let task = Task {
843 uuid: Uuid::parse_str(&row.get::<String, _>("uuid"))
844 .map_err(|e| ThingsError::unknown(format!("Invalid task UUID: {e}")))?,
845 title: row.get("title"),
846 status: TaskStatus::from_i32(row.get("status")).unwrap_or(TaskStatus::Incomplete),
847 task_type: TaskType::from_i32(row.get("type")).unwrap_or(TaskType::Todo),
848 start_date: row
849 .get::<Option<String>, _>("start_date")
850 .and_then(|s| NaiveDate::parse_from_str(&s, "%Y-%m-%d").ok()),
851 deadline: row
852 .get::<Option<String>, _>("due_date")
853 .and_then(|s| NaiveDate::parse_from_str(&s, "%Y-%m-%d").ok()),
854 project_uuid: row
855 .get::<Option<String>, _>("project_uuid")
856 .and_then(|s| Uuid::parse_str(&s).ok()),
857 area_uuid: row
858 .get::<Option<String>, _>("area_uuid")
859 .and_then(|s| Uuid::parse_str(&s).ok()),
860 parent_uuid: None, notes: row.get("notes"),
862 tags: row
863 .get::<Option<String>, _>("tags")
864 .map(|s| s.split(',').map(|s| s.trim().to_string()).collect())
865 .unwrap_or_default(),
866 children: Vec::new(), created: DateTime::parse_from_rfc3339(&row.get::<String, _>("created"))
868 .ok()
869 .map_or_else(Utc::now, |dt| dt.with_timezone(&Utc)),
870 modified: DateTime::parse_from_rfc3339(&row.get::<String, _>("modified"))
871 .ok()
872 .map_or_else(Utc::now, |dt| dt.with_timezone(&Utc)),
873 stop_date: None, };
875 tasks.push(task);
876 }
877
878 debug!("Fetched {} tasks with status {:?}", tasks.len(), status);
879 Ok(tasks)
880 }
881
882 #[instrument]
888 pub async fn search_tasks(&self, query: &str) -> ThingsResult<Vec<Task>> {
889 let search_pattern = format!("%{query}%");
890 let rows = sqlx::query(
891 r"
892 SELECT
893 uuid, title, status, type,
894 startDate, deadline, stopDate,
895 project, area, heading,
896 notes, cachedTags,
897 creationDate, userModificationDate
898 FROM TMTask
899 WHERE (title LIKE ? OR notes LIKE ?) AND trashed = 0 AND type = 0
900 ORDER BY creationDate DESC
901 ",
902 )
903 .bind(&search_pattern)
904 .bind(&search_pattern)
905 .fetch_all(&self.pool)
906 .await
907 .map_err(|e| ThingsError::unknown(format!("Failed to search tasks: {e}")))?;
908
909 let tasks = rows
910 .iter()
911 .map(map_task_row)
912 .collect::<ThingsResult<Vec<Task>>>()?;
913
914 debug!("Found {} tasks matching query: {}", tasks.len(), query);
915 Ok(tasks)
916 }
917
918 #[cfg(any(feature = "advanced-queries", feature = "batch-operations"))]
936 pub async fn query_tasks(&self, filters: &TaskFilters) -> ThingsResult<Vec<Task>> {
937 self.query_tasks_inner(filters, None).await
938 }
939
940 #[cfg(any(feature = "advanced-queries", feature = "batch-operations"))]
950 pub(crate) async fn query_tasks_inner(
951 &self,
952 filters: &TaskFilters,
953 after: Option<(i64, Uuid)>,
954 ) -> ThingsResult<Vec<Task>> {
955 const COLS: &str = "uuid, title, type, status, notes, startDate, deadline, stopDate, \
956 creationDate, userModificationDate, project, area, heading, cachedTags";
957
958 let trashed_val = i32::from(matches!(filters.status, Some(TaskStatus::Trashed)));
961 let mut conditions: Vec<String> = vec![format!("trashed = {trashed_val}")];
962
963 if let Some(status) = filters.status {
964 let n = match status {
965 TaskStatus::Incomplete => Some(0),
966 TaskStatus::Completed => Some(1),
967 TaskStatus::Canceled => Some(2),
968 TaskStatus::Trashed => None, };
970 if let Some(n) = n {
971 conditions.push(format!("status = {n}"));
972 }
973 }
974
975 if let Some(task_type) = filters.task_type {
976 let n = match task_type {
977 TaskType::Todo => 0,
978 TaskType::Project => 1,
979 TaskType::Heading => 2,
980 TaskType::Area => 3,
981 };
982 conditions.push(format!("type = {n}"));
983 }
984
985 if let Some(ref uuid) = filters.project_uuid {
986 conditions.push(format!("project = '{uuid}'"));
987 }
988
989 if let Some(ref uuid) = filters.area_uuid {
990 conditions.push(format!("area = '{uuid}'"));
991 }
992
993 if let Some(from) = filters.start_date_from {
994 conditions.push(format!(
995 "startDate >= {}",
996 naive_date_to_things_timestamp(from)
997 ));
998 }
999 if let Some(to) = filters.start_date_to {
1000 conditions.push(format!(
1001 "startDate <= {}",
1002 naive_date_to_things_timestamp(to)
1003 ));
1004 }
1005
1006 if let Some(from) = filters.deadline_from {
1007 conditions.push(format!(
1008 "deadline >= {}",
1009 naive_date_to_things_timestamp(from)
1010 ));
1011 }
1012 if let Some(to) = filters.deadline_to {
1013 conditions.push(format!(
1014 "deadline <= {}",
1015 naive_date_to_things_timestamp(to)
1016 ));
1017 }
1018
1019 if let Some((after_seconds, _)) = after {
1020 conditions.push(format!(
1027 "(CAST(creationDate AS INTEGER) < {after_seconds} \
1028 OR (CAST(creationDate AS INTEGER) = {after_seconds} AND uuid < ?))"
1029 ));
1030 }
1031
1032 let where_clause = conditions.join(" AND ");
1033 let mut sql = format!(
1038 "SELECT {COLS} FROM TMTask WHERE {where_clause} \
1039 ORDER BY CAST(creationDate AS INTEGER) DESC, uuid DESC"
1040 );
1041
1042 let has_post_filters =
1045 filters.tags.as_ref().is_some_and(|t| !t.is_empty()) || filters.search_query.is_some();
1046
1047 if !has_post_filters {
1048 match (filters.limit, filters.offset) {
1049 (Some(limit), Some(offset)) => {
1050 sql.push_str(&format!(" LIMIT {limit} OFFSET {offset}"));
1051 }
1052 (Some(limit), None) => {
1053 sql.push_str(&format!(" LIMIT {limit}"));
1054 }
1055 (None, Some(offset)) => {
1056 sql.push_str(&format!(" LIMIT -1 OFFSET {offset}"));
1058 }
1059 (None, None) => {}
1060 }
1061 }
1062
1063 let rows = if let Some((_, after_uuid)) = after {
1064 sqlx::query(&sql)
1065 .bind(after_uuid.to_string())
1066 .fetch_all(&self.pool)
1067 .await
1068 } else {
1069 sqlx::query(&sql).fetch_all(&self.pool).await
1070 }
1071 .map_err(|e| ThingsError::unknown(format!("Failed to query tasks: {e}")))?;
1072
1073 let mut tasks = rows
1074 .iter()
1075 .map(map_task_row)
1076 .collect::<ThingsResult<Vec<Task>>>()?;
1077
1078 if let Some(ref filter_tags) = filters.tags {
1079 if !filter_tags.is_empty() {
1080 tasks.retain(|task| filter_tags.iter().all(|f| task.tags.contains(f)));
1081 }
1082 }
1083
1084 if let Some(ref q) = filters.search_query {
1085 let q_lower = q.to_lowercase();
1086 tasks.retain(|task| {
1087 task.title.to_lowercase().contains(&q_lower)
1088 || task
1089 .notes
1090 .as_deref()
1091 .unwrap_or("")
1092 .to_lowercase()
1093 .contains(&q_lower)
1094 });
1095 }
1096
1097 if has_post_filters {
1098 let offset = filters.offset.unwrap_or(0);
1099 tasks = tasks.into_iter().skip(offset).collect();
1100 if let Some(limit) = filters.limit {
1101 tasks.truncate(limit);
1102 }
1103 }
1104
1105 Ok(tasks)
1106 }
1107
1108 #[allow(clippy::too_many_arguments)]
1127 #[instrument(skip(self))]
1128 pub async fn search_logbook(
1129 &self,
1130 search_text: Option<String>,
1131 from_date: Option<NaiveDate>,
1132 to_date: Option<NaiveDate>,
1133 project_uuid: Option<Uuid>,
1134 area_uuid: Option<Uuid>,
1135 tags: Option<Vec<String>>,
1136 limit: Option<u32>,
1137 ) -> ThingsResult<Vec<Task>> {
1138 let result_limit = limit.unwrap_or(50).min(500);
1140
1141 let rows = if let Some(ref text) = search_text {
1143 let pattern = format!("%{text}%");
1144 let mut q = String::from(
1145 "SELECT uuid, title, status, type, startDate, deadline, stopDate, project, area, heading, notes, cachedTags, creationDate, userModificationDate FROM TMTask WHERE status = 1 AND trashed = 0 AND type = 0",
1146 );
1147 q.push_str(" AND (title LIKE ? OR notes LIKE ?)");
1148
1149 if let Some(date) = from_date {
1150 let date_time = date.and_hms_opt(0, 0, 0).unwrap().and_utc();
1152 let timestamp = date_time.timestamp() as f64;
1153 q.push_str(&format!(" AND stopDate >= {}", timestamp));
1154 }
1155
1156 if let Some(date) = to_date {
1157 let end_date = date + chrono::Duration::days(1);
1159 let date_time = end_date.and_hms_opt(0, 0, 0).unwrap().and_utc();
1160 let timestamp = date_time.timestamp() as f64;
1161 q.push_str(&format!(" AND stopDate < {}", timestamp));
1162 }
1163
1164 if let Some(uuid) = project_uuid {
1165 q.push_str(&format!(" AND project = '{}'", uuid));
1166 }
1167
1168 if let Some(uuid) = area_uuid {
1169 q.push_str(&format!(" AND area = '{}'", uuid));
1170 }
1171
1172 q.push_str(&format!(" ORDER BY stopDate DESC LIMIT {result_limit}"));
1173
1174 sqlx::query(&q)
1175 .bind(&pattern)
1176 .bind(&pattern)
1177 .fetch_all(&self.pool)
1178 .await
1179 .map_err(|e| ThingsError::unknown(format!("Failed to search logbook: {e}")))?
1180 } else {
1181 let mut q = String::from(
1182 "SELECT uuid, title, status, type, startDate, deadline, stopDate, project, area, heading, notes, cachedTags, creationDate, userModificationDate FROM TMTask WHERE status = 1 AND trashed = 0 AND type = 0",
1183 );
1184
1185 if let Some(date) = from_date {
1186 let date_time = date.and_hms_opt(0, 0, 0).unwrap().and_utc();
1188 let timestamp = date_time.timestamp() as f64;
1189 q.push_str(&format!(" AND stopDate >= {}", timestamp));
1190 }
1191
1192 if let Some(date) = to_date {
1193 let end_date = date + chrono::Duration::days(1);
1195 let date_time = end_date.and_hms_opt(0, 0, 0).unwrap().and_utc();
1196 let timestamp = date_time.timestamp() as f64;
1197 q.push_str(&format!(" AND stopDate < {}", timestamp));
1198 }
1199
1200 if let Some(uuid) = project_uuid {
1201 q.push_str(&format!(" AND project = '{}'", uuid));
1202 }
1203
1204 if let Some(uuid) = area_uuid {
1205 q.push_str(&format!(" AND area = '{}'", uuid));
1206 }
1207
1208 q.push_str(&format!(" ORDER BY stopDate DESC LIMIT {result_limit}"));
1209
1210 sqlx::query(&q)
1211 .fetch_all(&self.pool)
1212 .await
1213 .map_err(|e| ThingsError::unknown(format!("Failed to search logbook: {e}")))?
1214 };
1215
1216 let mut tasks = rows
1218 .iter()
1219 .map(map_task_row)
1220 .collect::<ThingsResult<Vec<Task>>>()?;
1221
1222 if let Some(ref filter_tags) = tags {
1223 if !filter_tags.is_empty() {
1224 tasks.retain(|task| {
1225 filter_tags
1227 .iter()
1228 .all(|filter_tag| task.tags.contains(filter_tag))
1229 });
1230 }
1231 }
1232
1233 debug!("Found {} completed tasks in logbook", tasks.len());
1234 Ok(tasks)
1235 }
1236
1237 #[instrument(skip(self))]
1243 pub async fn get_inbox(&self, limit: Option<usize>) -> ThingsResult<Vec<Task>> {
1244 let query = if let Some(limit) = limit {
1245 format!("SELECT uuid, title, type, status, notes, startDate, deadline, stopDate, creationDate, userModificationDate, project, area, heading, cachedTags FROM TMTask WHERE type = 0 AND status = 0 AND project IS NULL AND trashed = 0 ORDER BY creationDate DESC LIMIT {limit}")
1246 } else {
1247 "SELECT uuid, title, type, status, notes, startDate, deadline, stopDate, creationDate, userModificationDate, project, area, heading, cachedTags FROM TMTask WHERE type = 0 AND status = 0 AND project IS NULL AND trashed = 0 ORDER BY creationDate DESC"
1248 .to_string()
1249 };
1250
1251 let rows = sqlx::query(&query)
1252 .fetch_all(&self.pool)
1253 .await
1254 .map_err(|e| ThingsError::unknown(format!("Failed to fetch inbox tasks: {e}")))?;
1255
1256 let tasks = rows
1257 .iter()
1258 .map(map_task_row)
1259 .collect::<ThingsResult<Vec<Task>>>()?;
1260
1261 Ok(tasks)
1262 }
1263
1264 #[instrument(skip(self))]
1274 pub async fn get_today(&self, limit: Option<usize>) -> ThingsResult<Vec<Task>> {
1275 let query = if let Some(limit) = limit {
1278 format!(
1279 "SELECT uuid, title, type, status, notes, startDate, deadline, stopDate, creationDate, userModificationDate, project, area, heading, cachedTags FROM TMTask WHERE status = 0 AND todayIndex IS NOT NULL AND todayIndex != 0 AND trashed = 0 ORDER BY todayIndex ASC LIMIT {limit}"
1280 )
1281 } else {
1282 "SELECT uuid, title, type, status, notes, startDate, deadline, stopDate, creationDate, userModificationDate, project, area, heading, cachedTags FROM TMTask WHERE status = 0 AND todayIndex IS NOT NULL AND todayIndex != 0 AND trashed = 0 ORDER BY todayIndex ASC".to_string()
1283 };
1284
1285 let rows = sqlx::query(&query)
1286 .fetch_all(&self.pool)
1287 .await
1288 .map_err(|e| ThingsError::unknown(format!("Failed to fetch today's tasks: {e}")))?;
1289
1290 let tasks = rows
1291 .iter()
1292 .map(map_task_row)
1293 .collect::<ThingsResult<Vec<Task>>>()?;
1294
1295 Ok(tasks)
1296 }
1297
1298 #[instrument(skip(self))]
1304 pub async fn get_projects(&self, limit: Option<usize>) -> ThingsResult<Vec<Project>> {
1305 let _ = limit; self.get_all_projects().await
1307 }
1308
1309 #[instrument(skip(self))]
1315 pub async fn get_areas(&self) -> ThingsResult<Vec<Area>> {
1316 self.get_all_areas().await
1317 }
1318
1319 #[instrument(skip(self))]
1363 pub async fn create_task(&self, request: CreateTaskRequest) -> ThingsResult<Uuid> {
1364 crate::database::validate_date_range(request.start_date, request.deadline)?;
1366
1367 let uuid = Uuid::new_v4();
1369 let uuid_str = uuid.to_string();
1370
1371 if let Some(project_uuid) = &request.project_uuid {
1373 validators::validate_project_exists(&self.pool, project_uuid).await?;
1374 }
1375
1376 if let Some(area_uuid) = &request.area_uuid {
1377 validators::validate_area_exists(&self.pool, area_uuid).await?;
1378 }
1379
1380 if let Some(parent_uuid) = &request.parent_uuid {
1381 validators::validate_task_exists(&self.pool, parent_uuid).await?;
1382 }
1383
1384 let start_date_ts = request.start_date.map(naive_date_to_things_timestamp);
1386 let deadline_ts = request.deadline.map(naive_date_to_things_timestamp);
1387
1388 let now = Utc::now().timestamp() as f64;
1390
1391 let cached_tags = request
1393 .tags
1394 .as_ref()
1395 .map(|tags| serialize_tags_to_blob(tags))
1396 .transpose()?;
1397
1398 sqlx::query(
1400 r"
1401 INSERT INTO TMTask (
1402 uuid, title, type, status, notes,
1403 startDate, deadline, project, area, heading,
1404 cachedTags, creationDate, userModificationDate,
1405 trashed
1406 ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
1407 ",
1408 )
1409 .bind(&uuid_str)
1410 .bind(&request.title)
1411 .bind(request.task_type.unwrap_or(TaskType::Todo) as i32)
1412 .bind(request.status.unwrap_or(TaskStatus::Incomplete) as i32)
1413 .bind(request.notes.as_ref())
1414 .bind(start_date_ts)
1415 .bind(deadline_ts)
1416 .bind(request.project_uuid.map(|u| u.to_string()))
1417 .bind(request.area_uuid.map(|u| u.to_string()))
1418 .bind(request.parent_uuid.map(|u| u.to_string()))
1419 .bind(cached_tags)
1420 .bind(now)
1421 .bind(now)
1422 .bind(0) .execute(&self.pool)
1424 .await
1425 .map_err(|e| ThingsError::unknown(format!("Failed to create task: {e}")))?;
1426
1427 info!("Created task with UUID: {}", uuid);
1428 Ok(uuid)
1429 }
1430
1431 #[instrument(skip(self))]
1439 pub async fn create_project(
1440 &self,
1441 request: crate::models::CreateProjectRequest,
1442 ) -> ThingsResult<Uuid> {
1443 crate::database::validate_date_range(request.start_date, request.deadline)?;
1445
1446 let uuid = Uuid::new_v4();
1448 let uuid_str = uuid.to_string();
1449
1450 if let Some(area_uuid) = &request.area_uuid {
1452 validators::validate_area_exists(&self.pool, area_uuid).await?;
1453 }
1454
1455 let start_date_ts = request.start_date.map(naive_date_to_things_timestamp);
1457 let deadline_ts = request.deadline.map(naive_date_to_things_timestamp);
1458
1459 let now = Utc::now().timestamp() as f64;
1461
1462 let cached_tags = request
1464 .tags
1465 .as_ref()
1466 .map(|tags| serialize_tags_to_blob(tags))
1467 .transpose()?;
1468
1469 sqlx::query(
1471 r"
1472 INSERT INTO TMTask (
1473 uuid, title, type, status, notes,
1474 startDate, deadline, project, area, heading,
1475 cachedTags, creationDate, userModificationDate,
1476 trashed
1477 ) VALUES (?, ?, 1, 0, ?, ?, ?, NULL, ?, NULL, ?, ?, ?, 0)
1478 ",
1479 )
1480 .bind(&uuid_str)
1481 .bind(&request.title)
1482 .bind(request.notes.as_ref())
1483 .bind(start_date_ts)
1484 .bind(deadline_ts)
1485 .bind(request.area_uuid.map(|u| u.to_string()))
1486 .bind(cached_tags)
1487 .bind(now)
1488 .bind(now)
1489 .execute(&self.pool)
1490 .await
1491 .map_err(|e| ThingsError::unknown(format!("Failed to create project: {e}")))?;
1492
1493 info!("Created project with UUID: {}", uuid);
1494 Ok(uuid)
1495 }
1496
1497 #[instrument(skip(self))]
1506 pub async fn update_task(&self, request: UpdateTaskRequest) -> ThingsResult<()> {
1507 validators::validate_task_exists(&self.pool, &request.uuid).await?;
1509
1510 if request.start_date.is_some() || request.deadline.is_some() {
1512 if let Some(current_task) = self.get_task_by_uuid(&request.uuid).await? {
1514 let final_start = request.start_date.or(current_task.start_date);
1515 let final_deadline = request.deadline.or(current_task.deadline);
1516 crate::database::validate_date_range(final_start, final_deadline)?;
1517 }
1518 }
1519
1520 if let Some(project_uuid) = &request.project_uuid {
1522 validators::validate_project_exists(&self.pool, project_uuid).await?;
1523 }
1524
1525 if let Some(area_uuid) = &request.area_uuid {
1526 validators::validate_area_exists(&self.pool, area_uuid).await?;
1527 }
1528
1529 let builder = TaskUpdateBuilder::from_request(&request);
1531
1532 if builder.is_empty() {
1534 return Ok(());
1535 }
1536
1537 let query_string = builder.build_query_string();
1538 let mut q = sqlx::query(&query_string);
1539
1540 if let Some(title) = &request.title {
1542 q = q.bind(title);
1543 }
1544
1545 if let Some(notes) = &request.notes {
1546 q = q.bind(notes);
1547 }
1548
1549 if let Some(start_date) = request.start_date {
1550 q = q.bind(naive_date_to_things_timestamp(start_date));
1551 }
1552
1553 if let Some(deadline) = request.deadline {
1554 q = q.bind(naive_date_to_things_timestamp(deadline));
1555 }
1556
1557 if let Some(status) = request.status {
1558 q = q.bind(status as i32);
1559 }
1560
1561 if let Some(project_uuid) = request.project_uuid {
1562 q = q.bind(project_uuid.to_string());
1563 }
1564
1565 if let Some(area_uuid) = request.area_uuid {
1566 q = q.bind(area_uuid.to_string());
1567 }
1568
1569 if let Some(tags) = &request.tags {
1570 let cached_tags = serialize_tags_to_blob(tags)?;
1571 q = q.bind(cached_tags);
1572 }
1573
1574 let now = Utc::now().timestamp() as f64;
1576 q = q.bind(now).bind(request.uuid.to_string());
1577
1578 q.execute(&self.pool)
1579 .await
1580 .map_err(|e| ThingsError::unknown(format!("Failed to update task: {e}")))?;
1581
1582 info!("Updated task with UUID: {}", request.uuid);
1583 Ok(())
1584 }
1585
1586 #[instrument(skip(self))]
1594 pub async fn get_project_by_uuid(&self, uuid: &Uuid) -> ThingsResult<Option<Project>> {
1595 let row = sqlx::query(
1596 r"
1597 SELECT
1598 uuid, title, status,
1599 area, notes,
1600 creationDate, userModificationDate,
1601 startDate, deadline,
1602 trashed, type
1603 FROM TMTask
1604 WHERE uuid = ? AND type = 1
1605 ",
1606 )
1607 .bind(uuid.to_string())
1608 .fetch_optional(&self.pool)
1609 .await
1610 .map_err(|e| ThingsError::unknown(format!("Failed to fetch project: {e}")))?;
1611
1612 if let Some(row) = row {
1613 let trashed: i64 = row.get("trashed");
1614 if trashed == 1 {
1615 return Ok(None);
1616 }
1617 Ok(Some(map_project_row(&row)))
1618 } else {
1619 Ok(None)
1620 }
1621 }
1622
1623 #[instrument(skip(self))]
1632 pub async fn update_project(
1633 &self,
1634 request: crate::models::UpdateProjectRequest,
1635 ) -> ThingsResult<()> {
1636 validators::validate_project_exists(&self.pool, &request.uuid).await?;
1638
1639 if request.start_date.is_some() || request.deadline.is_some() {
1641 if let Some(current_project) = self.get_project_by_uuid(&request.uuid).await? {
1643 let final_start = request.start_date.or(current_project.start_date);
1644 let final_deadline = request.deadline.or(current_project.deadline);
1645 crate::database::validate_date_range(final_start, final_deadline)?;
1646 }
1647 }
1648
1649 if let Some(area_uuid) = &request.area_uuid {
1651 validators::validate_area_exists(&self.pool, area_uuid).await?;
1652 }
1653
1654 let mut builder = TaskUpdateBuilder::new();
1656
1657 if request.title.is_some() {
1659 builder = builder.add_field("title");
1660 }
1661 if request.notes.is_some() {
1662 builder = builder.add_field("notes");
1663 }
1664 if request.start_date.is_some() {
1665 builder = builder.add_field("startDate");
1666 }
1667 if request.deadline.is_some() {
1668 builder = builder.add_field("deadline");
1669 }
1670 if request.area_uuid.is_some() {
1671 builder = builder.add_field("area");
1672 }
1673 if request.tags.is_some() {
1674 builder = builder.add_field("cachedTags");
1675 }
1676
1677 if builder.is_empty() {
1679 return Ok(());
1680 }
1681
1682 let query_str = builder.build_query_string();
1684 let mut q = sqlx::query(&query_str);
1685
1686 if let Some(ref title) = request.title {
1688 q = q.bind(title);
1689 }
1690 if let Some(ref notes) = request.notes {
1691 q = q.bind(notes);
1692 }
1693 if let Some(start_date) = request.start_date {
1694 q = q.bind(naive_date_to_things_timestamp(start_date));
1695 }
1696 if let Some(deadline) = request.deadline {
1697 q = q.bind(naive_date_to_things_timestamp(deadline));
1698 }
1699 if let Some(area_uuid) = request.area_uuid {
1700 q = q.bind(area_uuid.to_string());
1701 }
1702 if let Some(tags) = &request.tags {
1703 let cached_tags = serialize_tags_to_blob(tags)?;
1704 q = q.bind(cached_tags);
1705 }
1706
1707 let now = Utc::now().timestamp() as f64;
1709 q = q.bind(now).bind(request.uuid.to_string());
1710
1711 q.execute(&self.pool)
1712 .await
1713 .map_err(|e| ThingsError::unknown(format!("Failed to update project: {e}")))?;
1714
1715 info!("Updated project with UUID: {}", request.uuid);
1716 Ok(())
1717 }
1718
1719 #[instrument(skip(self))]
1725 pub async fn get_task_by_uuid(&self, uuid: &Uuid) -> ThingsResult<Option<Task>> {
1726 let row = sqlx::query(
1727 r"
1728 SELECT
1729 uuid, title, status, type,
1730 startDate, deadline, stopDate,
1731 project, area, heading,
1732 notes, cachedTags,
1733 creationDate, userModificationDate,
1734 trashed
1735 FROM TMTask
1736 WHERE uuid = ?
1737 ",
1738 )
1739 .bind(uuid.to_string())
1740 .fetch_optional(&self.pool)
1741 .await
1742 .map_err(|e| ThingsError::unknown(format!("Failed to fetch task: {e}")))?;
1743
1744 if let Some(row) = row {
1745 let trashed: i64 = row.get("trashed");
1747 if trashed == 1 {
1748 return Ok(None); }
1750
1751 let task = map_task_row(&row)?;
1753 Ok(Some(task))
1754 } else {
1755 Ok(None)
1756 }
1757 }
1758
1759 #[instrument(skip(self))]
1765 pub async fn complete_task(&self, uuid: &Uuid) -> ThingsResult<()> {
1766 validators::validate_task_exists(&self.pool, uuid).await?;
1768
1769 let now = Utc::now().timestamp() as f64;
1770
1771 sqlx::query(
1772 "UPDATE TMTask SET status = 1, stopDate = ?, userModificationDate = ? WHERE uuid = ?",
1773 )
1774 .bind(now)
1775 .bind(now)
1776 .bind(uuid.to_string())
1777 .execute(&self.pool)
1778 .await
1779 .map_err(|e| ThingsError::unknown(format!("Failed to complete task: {e}")))?;
1780
1781 info!("Completed task with UUID: {}", uuid);
1782 Ok(())
1783 }
1784
1785 #[instrument(skip(self))]
1791 pub async fn uncomplete_task(&self, uuid: &Uuid) -> ThingsResult<()> {
1792 validators::validate_task_exists(&self.pool, uuid).await?;
1794
1795 let now = Utc::now().timestamp() as f64;
1796
1797 sqlx::query(
1798 "UPDATE TMTask SET status = 0, stopDate = NULL, userModificationDate = ? WHERE uuid = ?",
1799 )
1800 .bind(now)
1801 .bind(uuid.to_string())
1802 .execute(&self.pool)
1803 .await
1804 .map_err(|e| ThingsError::unknown(format!("Failed to uncomplete task: {e}")))?;
1805
1806 info!("Uncompleted task with UUID: {}", uuid);
1807 Ok(())
1808 }
1809
1810 #[instrument(skip(self))]
1816 pub async fn complete_project(
1817 &self,
1818 uuid: &Uuid,
1819 child_handling: crate::models::ProjectChildHandling,
1820 ) -> ThingsResult<()> {
1821 validators::validate_project_exists(&self.pool, uuid).await?;
1823
1824 let now = Utc::now().timestamp() as f64;
1825
1826 match child_handling {
1828 crate::models::ProjectChildHandling::Error => {
1829 let child_count: i64 = sqlx::query_scalar(
1831 "SELECT COUNT(*) FROM TMTask WHERE project = ? AND trashed = 0",
1832 )
1833 .bind(uuid.to_string())
1834 .fetch_one(&self.pool)
1835 .await
1836 .map_err(|e| {
1837 ThingsError::unknown(format!("Failed to check for child tasks: {e}"))
1838 })?;
1839
1840 if child_count > 0 {
1841 return Err(ThingsError::unknown(format!(
1842 "Project {} has {} child task(s). Use cascade or orphan mode to complete.",
1843 uuid, child_count
1844 )));
1845 }
1846 }
1847 crate::models::ProjectChildHandling::Cascade => {
1848 sqlx::query(
1850 "UPDATE TMTask SET status = 1, stopDate = ?, userModificationDate = ? WHERE project = ? AND trashed = 0",
1851 )
1852 .bind(now)
1853 .bind(now)
1854 .bind(uuid.to_string())
1855 .execute(&self.pool)
1856 .await
1857 .map_err(|e| ThingsError::unknown(format!("Failed to complete child tasks: {e}")))?;
1858 }
1859 crate::models::ProjectChildHandling::Orphan => {
1860 sqlx::query(
1862 "UPDATE TMTask SET project = NULL, userModificationDate = ? WHERE project = ? AND trashed = 0",
1863 )
1864 .bind(now)
1865 .bind(uuid.to_string())
1866 .execute(&self.pool)
1867 .await
1868 .map_err(|e| ThingsError::unknown(format!("Failed to orphan child tasks: {e}")))?;
1869 }
1870 }
1871
1872 sqlx::query(
1874 "UPDATE TMTask SET status = 1, stopDate = ?, userModificationDate = ? WHERE uuid = ?",
1875 )
1876 .bind(now)
1877 .bind(now)
1878 .bind(uuid.to_string())
1879 .execute(&self.pool)
1880 .await
1881 .map_err(|e| ThingsError::unknown(format!("Failed to complete project: {e}")))?;
1882
1883 info!("Completed project with UUID: {}", uuid);
1884 Ok(())
1885 }
1886
1887 #[instrument(skip(self))]
1893 pub async fn delete_task(
1894 &self,
1895 uuid: &Uuid,
1896 child_handling: DeleteChildHandling,
1897 ) -> ThingsResult<()> {
1898 validators::validate_task_exists(&self.pool, uuid).await?;
1900
1901 let children = sqlx::query("SELECT uuid FROM TMTask WHERE heading = ? AND trashed = 0")
1903 .bind(uuid.to_string())
1904 .fetch_all(&self.pool)
1905 .await
1906 .map_err(|e| ThingsError::unknown(format!("Failed to query child tasks: {e}")))?;
1907
1908 let has_children = !children.is_empty();
1909
1910 if has_children {
1911 match child_handling {
1912 DeleteChildHandling::Error => {
1913 return Err(ThingsError::unknown(format!(
1914 "Task {} has {} child task(s). Use cascade or orphan mode to delete.",
1915 uuid,
1916 children.len()
1917 )));
1918 }
1919 DeleteChildHandling::Cascade => {
1920 let now = Utc::now().timestamp() as f64;
1922 for child_row in &children {
1923 let child_uuid: String = child_row.get("uuid");
1924 sqlx::query(
1925 "UPDATE TMTask SET trashed = 1, userModificationDate = ? WHERE uuid = ?",
1926 )
1927 .bind(now)
1928 .bind(&child_uuid)
1929 .execute(&self.pool)
1930 .await
1931 .map_err(|e| {
1932 ThingsError::unknown(format!("Failed to delete child task: {e}"))
1933 })?;
1934 }
1935 info!("Cascade deleted {} child task(s)", children.len());
1936 }
1937 DeleteChildHandling::Orphan => {
1938 let now = Utc::now().timestamp() as f64;
1940 for child_row in &children {
1941 let child_uuid: String = child_row.get("uuid");
1942 sqlx::query(
1943 "UPDATE TMTask SET heading = NULL, userModificationDate = ? WHERE uuid = ?",
1944 )
1945 .bind(now)
1946 .bind(&child_uuid)
1947 .execute(&self.pool)
1948 .await
1949 .map_err(|e| {
1950 ThingsError::unknown(format!("Failed to orphan child task: {e}"))
1951 })?;
1952 }
1953 info!("Orphaned {} child task(s)", children.len());
1954 }
1955 }
1956 }
1957
1958 let now = Utc::now().timestamp() as f64;
1960 sqlx::query("UPDATE TMTask SET trashed = 1, userModificationDate = ? WHERE uuid = ?")
1961 .bind(now)
1962 .bind(uuid.to_string())
1963 .execute(&self.pool)
1964 .await
1965 .map_err(|e| ThingsError::unknown(format!("Failed to delete task: {e}")))?;
1966
1967 info!("Deleted task with UUID: {}", uuid);
1968 Ok(())
1969 }
1970
1971 #[instrument(skip(self))]
1977 pub async fn delete_project(
1978 &self,
1979 uuid: &Uuid,
1980 child_handling: crate::models::ProjectChildHandling,
1981 ) -> ThingsResult<()> {
1982 validators::validate_project_exists(&self.pool, uuid).await?;
1984
1985 let now = Utc::now().timestamp() as f64;
1986
1987 match child_handling {
1989 crate::models::ProjectChildHandling::Error => {
1990 let child_count: i64 = sqlx::query_scalar(
1992 "SELECT COUNT(*) FROM TMTask WHERE project = ? AND trashed = 0",
1993 )
1994 .bind(uuid.to_string())
1995 .fetch_one(&self.pool)
1996 .await
1997 .map_err(|e| {
1998 ThingsError::unknown(format!("Failed to check for child tasks: {e}"))
1999 })?;
2000
2001 if child_count > 0 {
2002 return Err(ThingsError::unknown(format!(
2003 "Project {} has {} child task(s). Use cascade or orphan mode to delete.",
2004 uuid, child_count
2005 )));
2006 }
2007 }
2008 crate::models::ProjectChildHandling::Cascade => {
2009 sqlx::query(
2011 "UPDATE TMTask SET trashed = 1, userModificationDate = ? WHERE project = ? AND trashed = 0",
2012 )
2013 .bind(now)
2014 .bind(uuid.to_string())
2015 .execute(&self.pool)
2016 .await
2017 .map_err(|e| ThingsError::unknown(format!("Failed to delete child tasks: {e}")))?;
2018 }
2019 crate::models::ProjectChildHandling::Orphan => {
2020 sqlx::query(
2022 "UPDATE TMTask SET project = NULL, userModificationDate = ? WHERE project = ? AND trashed = 0",
2023 )
2024 .bind(now)
2025 .bind(uuid.to_string())
2026 .execute(&self.pool)
2027 .await
2028 .map_err(|e| ThingsError::unknown(format!("Failed to orphan child tasks: {e}")))?;
2029 }
2030 }
2031
2032 sqlx::query("UPDATE TMTask SET trashed = 1, userModificationDate = ? WHERE uuid = ?")
2034 .bind(now)
2035 .bind(uuid.to_string())
2036 .execute(&self.pool)
2037 .await
2038 .map_err(|e| ThingsError::unknown(format!("Failed to delete project: {e}")))?;
2039
2040 info!("Deleted project with UUID: {}", uuid);
2041 Ok(())
2042 }
2043
2044 #[instrument(skip(self))]
2050 pub async fn create_area(
2051 &self,
2052 request: crate::models::CreateAreaRequest,
2053 ) -> ThingsResult<Uuid> {
2054 let uuid = Uuid::new_v4();
2056 let uuid_str = uuid.to_string();
2057
2058 let now = Utc::now().timestamp() as f64;
2060
2061 let max_index: Option<i64> = sqlx::query_scalar("SELECT MAX(`index`) FROM TMArea")
2063 .fetch_one(&self.pool)
2064 .await
2065 .map_err(|e| ThingsError::unknown(format!("Failed to get max area index: {e}")))?;
2066
2067 let next_index = max_index.unwrap_or(-1) + 1;
2068
2069 sqlx::query(
2071 r"
2072 INSERT INTO TMArea (
2073 uuid, title, visible, `index`,
2074 creationDate, userModificationDate
2075 ) VALUES (?, ?, 1, ?, ?, ?)
2076 ",
2077 )
2078 .bind(&uuid_str)
2079 .bind(&request.title)
2080 .bind(next_index)
2081 .bind(now)
2082 .bind(now)
2083 .execute(&self.pool)
2084 .await
2085 .map_err(|e| ThingsError::unknown(format!("Failed to create area: {e}")))?;
2086
2087 info!("Created area with UUID: {}", uuid);
2088 Ok(uuid)
2089 }
2090
2091 #[instrument(skip(self))]
2097 pub async fn update_area(&self, request: crate::models::UpdateAreaRequest) -> ThingsResult<()> {
2098 validators::validate_area_exists(&self.pool, &request.uuid).await?;
2100
2101 let now = Utc::now().timestamp() as f64;
2102
2103 sqlx::query("UPDATE TMArea SET title = ?, userModificationDate = ? WHERE uuid = ?")
2104 .bind(&request.title)
2105 .bind(now)
2106 .bind(request.uuid.to_string())
2107 .execute(&self.pool)
2108 .await
2109 .map_err(|e| ThingsError::unknown(format!("Failed to update area: {e}")))?;
2110
2111 info!("Updated area with UUID: {}", request.uuid);
2112 Ok(())
2113 }
2114
2115 #[instrument(skip(self))]
2124 pub async fn delete_area(&self, uuid: &Uuid) -> ThingsResult<()> {
2125 validators::validate_area_exists(&self.pool, uuid).await?;
2127
2128 let now = Utc::now().timestamp() as f64;
2129
2130 sqlx::query(
2132 "UPDATE TMTask SET area = NULL, userModificationDate = ? WHERE area = ? AND type = 1 AND trashed = 0",
2133 )
2134 .bind(now)
2135 .bind(uuid.to_string())
2136 .execute(&self.pool)
2137 .await
2138 .map_err(|e| ThingsError::unknown(format!("Failed to orphan projects in area: {e}")))?;
2139
2140 sqlx::query("DELETE FROM TMArea WHERE uuid = ?")
2142 .bind(uuid.to_string())
2143 .execute(&self.pool)
2144 .await
2145 .map_err(|e| ThingsError::unknown(format!("Failed to delete area: {e}")))?;
2146
2147 info!("Deleted area with UUID: {}", uuid);
2148 Ok(())
2149 }
2150
2151 #[instrument(skip(self))]
2161 pub async fn find_tag_by_normalized_title(
2162 &self,
2163 normalized: &str,
2164 ) -> ThingsResult<Option<crate::models::Tag>> {
2165 let row = sqlx::query(
2166 "SELECT uuid, title, shortcut, parent, creationDate, userModificationDate, usedDate
2167 FROM TMTag
2168 WHERE LOWER(title) = LOWER(?)",
2169 )
2170 .bind(normalized)
2171 .fetch_optional(&self.pool)
2172 .await
2173 .map_err(|e| ThingsError::unknown(format!("Failed to find tag by title: {e}")))?;
2174
2175 if let Some(row) = row {
2176 let uuid_str: String = row.get("uuid");
2177 let uuid =
2178 Uuid::parse_str(&uuid_str).unwrap_or_else(|_| things_uuid_to_uuid(&uuid_str));
2179 let title: String = row.get("title");
2180 let shortcut: Option<String> = row.get("shortcut");
2181 let parent_str: Option<String> = row.get("parent");
2182 let parent_uuid =
2183 parent_str.map(|s| Uuid::parse_str(&s).unwrap_or_else(|_| things_uuid_to_uuid(&s)));
2184
2185 let creation_ts: f64 = row.get("creationDate");
2186 let created = {
2187 let ts = safe_timestamp_convert(creation_ts);
2188 DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
2189 };
2190
2191 let modification_ts: f64 = row.get("userModificationDate");
2192 let modified = {
2193 let ts = safe_timestamp_convert(modification_ts);
2194 DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
2195 };
2196
2197 let used_ts: Option<f64> = row.get("usedDate");
2198 let last_used = used_ts.and_then(|ts| {
2199 let ts_i64 = safe_timestamp_convert(ts);
2200 DateTime::from_timestamp(ts_i64, 0)
2201 });
2202
2203 let usage_count: i64 = sqlx::query_scalar(
2205 "SELECT COUNT(*) FROM TMTask
2206 WHERE cachedTags IS NOT NULL
2207 AND json_extract(cachedTags, '$') LIKE ?
2208 AND trashed = 0",
2209 )
2210 .bind(format!("%\"{}\"%", title))
2211 .fetch_one(&self.pool)
2212 .await
2213 .unwrap_or(0);
2214
2215 Ok(Some(crate::models::Tag {
2216 uuid,
2217 title,
2218 shortcut,
2219 parent_uuid,
2220 created,
2221 modified,
2222 usage_count: usage_count as u32,
2223 last_used,
2224 }))
2225 } else {
2226 Ok(None)
2227 }
2228 }
2229
2230 #[instrument(skip(self))]
2238 pub async fn find_similar_tags(
2239 &self,
2240 title: &str,
2241 min_similarity: f32,
2242 ) -> ThingsResult<Vec<crate::models::TagMatch>> {
2243 use crate::database::tag_utils::{calculate_similarity, get_match_type};
2244
2245 let all_tags = self.get_all_tags().await?;
2247
2248 let mut matches: Vec<crate::models::TagMatch> = all_tags
2250 .into_iter()
2251 .filter_map(|tag| {
2252 let similarity = calculate_similarity(title, &tag.title);
2253 if similarity >= min_similarity {
2254 let match_type = get_match_type(title, &tag.title, min_similarity);
2255 Some(crate::models::TagMatch {
2256 tag,
2257 similarity_score: similarity,
2258 match_type,
2259 })
2260 } else {
2261 None
2262 }
2263 })
2264 .collect();
2265
2266 matches.sort_by(|a, b| {
2268 b.similarity_score
2269 .partial_cmp(&a.similarity_score)
2270 .unwrap_or(std::cmp::Ordering::Equal)
2271 });
2272
2273 Ok(matches)
2274 }
2275
2276 #[instrument(skip(self))]
2282 pub async fn search_tags(&self, query: &str) -> ThingsResult<Vec<crate::models::Tag>> {
2283 let rows = sqlx::query(
2284 "SELECT uuid, title, shortcut, parent, creationDate, userModificationDate, usedDate
2285 FROM TMTag
2286 WHERE title LIKE ?
2287 ORDER BY title",
2288 )
2289 .bind(format!("%{}%", query))
2290 .fetch_all(&self.pool)
2291 .await
2292 .map_err(|e| ThingsError::unknown(format!("Failed to search tags: {e}")))?;
2293
2294 let mut tags = Vec::new();
2295 for row in rows {
2296 let uuid_str: String = row.get("uuid");
2297 let uuid =
2298 Uuid::parse_str(&uuid_str).unwrap_or_else(|_| things_uuid_to_uuid(&uuid_str));
2299 let title: String = row.get("title");
2300 let shortcut: Option<String> = row.get("shortcut");
2301 let parent_str: Option<String> = row.get("parent");
2302 let parent_uuid =
2303 parent_str.map(|s| Uuid::parse_str(&s).unwrap_or_else(|_| things_uuid_to_uuid(&s)));
2304
2305 let creation_ts: f64 = row.get("creationDate");
2306 let created = {
2307 let ts = safe_timestamp_convert(creation_ts);
2308 DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
2309 };
2310
2311 let modification_ts: f64 = row.get("userModificationDate");
2312 let modified = {
2313 let ts = safe_timestamp_convert(modification_ts);
2314 DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
2315 };
2316
2317 let used_ts: Option<f64> = row.get("usedDate");
2318 let last_used = used_ts.and_then(|ts| {
2319 let ts_i64 = safe_timestamp_convert(ts);
2320 DateTime::from_timestamp(ts_i64, 0)
2321 });
2322
2323 let usage_count: i64 = sqlx::query_scalar(
2325 "SELECT COUNT(*) FROM TMTask
2326 WHERE cachedTags IS NOT NULL
2327 AND json_extract(cachedTags, '$') LIKE ?
2328 AND trashed = 0",
2329 )
2330 .bind(format!("%\"{}\"%", title))
2331 .fetch_one(&self.pool)
2332 .await
2333 .unwrap_or(0);
2334
2335 tags.push(crate::models::Tag {
2336 uuid,
2337 title,
2338 shortcut,
2339 parent_uuid,
2340 created,
2341 modified,
2342 usage_count: usage_count as u32,
2343 last_used,
2344 });
2345 }
2346
2347 Ok(tags)
2348 }
2349
2350 #[instrument(skip(self))]
2356 pub async fn get_all_tags(&self) -> ThingsResult<Vec<crate::models::Tag>> {
2357 let rows = sqlx::query(
2358 "SELECT uuid, title, shortcut, parent, creationDate, userModificationDate, usedDate
2359 FROM TMTag
2360 ORDER BY title",
2361 )
2362 .fetch_all(&self.pool)
2363 .await
2364 .map_err(|e| ThingsError::unknown(format!("Failed to get all tags: {e}")))?;
2365
2366 let mut tags = Vec::new();
2367 for row in rows {
2368 let uuid_str: String = row.get("uuid");
2369 let uuid =
2370 Uuid::parse_str(&uuid_str).unwrap_or_else(|_| things_uuid_to_uuid(&uuid_str));
2371 let title: String = row.get("title");
2372 let shortcut: Option<String> = row.get("shortcut");
2373 let parent_str: Option<String> = row.get("parent");
2374 let parent_uuid =
2375 parent_str.map(|s| Uuid::parse_str(&s).unwrap_or_else(|_| things_uuid_to_uuid(&s)));
2376
2377 let creation_ts: f64 = row.get("creationDate");
2378 let created = {
2379 let ts = safe_timestamp_convert(creation_ts);
2380 DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
2381 };
2382
2383 let modification_ts: f64 = row.get("userModificationDate");
2384 let modified = {
2385 let ts = safe_timestamp_convert(modification_ts);
2386 DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
2387 };
2388
2389 let used_ts: Option<f64> = row.get("usedDate");
2390 let last_used = used_ts.and_then(|ts| {
2391 let ts_i64 = safe_timestamp_convert(ts);
2392 DateTime::from_timestamp(ts_i64, 0)
2393 });
2394
2395 let usage_count: i64 = sqlx::query_scalar(
2397 "SELECT COUNT(*) FROM TMTask
2398 WHERE cachedTags IS NOT NULL
2399 AND json_extract(cachedTags, '$') LIKE ?
2400 AND trashed = 0",
2401 )
2402 .bind(format!("%\"{}\"%", title))
2403 .fetch_one(&self.pool)
2404 .await
2405 .unwrap_or(0);
2406
2407 tags.push(crate::models::Tag {
2408 uuid,
2409 title,
2410 shortcut,
2411 parent_uuid,
2412 created,
2413 modified,
2414 usage_count: usage_count as u32,
2415 last_used,
2416 });
2417 }
2418
2419 Ok(tags)
2420 }
2421
2422 #[instrument(skip(self))]
2428 pub async fn get_popular_tags(&self, limit: usize) -> ThingsResult<Vec<crate::models::Tag>> {
2429 let mut all_tags = self.get_all_tags().await?;
2430
2431 all_tags.sort_by_key(|t| std::cmp::Reverse(t.usage_count));
2433
2434 all_tags.truncate(limit);
2436
2437 Ok(all_tags)
2438 }
2439
2440 #[instrument(skip(self))]
2446 pub async fn get_recent_tags(&self, limit: usize) -> ThingsResult<Vec<crate::models::Tag>> {
2447 let rows = sqlx::query(
2448 "SELECT uuid, title, shortcut, parent, creationDate, userModificationDate, usedDate
2449 FROM TMTag
2450 WHERE usedDate IS NOT NULL
2451 ORDER BY usedDate DESC
2452 LIMIT ?",
2453 )
2454 .bind(limit as i64)
2455 .fetch_all(&self.pool)
2456 .await
2457 .map_err(|e| ThingsError::unknown(format!("Failed to get recent tags: {e}")))?;
2458
2459 let mut tags = Vec::new();
2460 for row in rows {
2461 let uuid_str: String = row.get("uuid");
2462 let uuid =
2463 Uuid::parse_str(&uuid_str).unwrap_or_else(|_| things_uuid_to_uuid(&uuid_str));
2464 let title: String = row.get("title");
2465 let shortcut: Option<String> = row.get("shortcut");
2466 let parent_str: Option<String> = row.get("parent");
2467 let parent_uuid =
2468 parent_str.map(|s| Uuid::parse_str(&s).unwrap_or_else(|_| things_uuid_to_uuid(&s)));
2469
2470 let creation_ts: f64 = row.get("creationDate");
2471 let created = {
2472 let ts = safe_timestamp_convert(creation_ts);
2473 DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
2474 };
2475
2476 let modification_ts: f64 = row.get("userModificationDate");
2477 let modified = {
2478 let ts = safe_timestamp_convert(modification_ts);
2479 DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
2480 };
2481
2482 let used_ts: Option<f64> = row.get("usedDate");
2483 let last_used = used_ts.and_then(|ts| {
2484 let ts_i64 = safe_timestamp_convert(ts);
2485 DateTime::from_timestamp(ts_i64, 0)
2486 });
2487
2488 let usage_count: i64 = sqlx::query_scalar(
2490 "SELECT COUNT(*) FROM TMTask
2491 WHERE cachedTags IS NOT NULL
2492 AND json_extract(cachedTags, '$') LIKE ?
2493 AND trashed = 0",
2494 )
2495 .bind(format!("%\"{}\"%", title))
2496 .fetch_one(&self.pool)
2497 .await
2498 .unwrap_or(0);
2499
2500 tags.push(crate::models::Tag {
2501 uuid,
2502 title,
2503 shortcut,
2504 parent_uuid,
2505 created,
2506 modified,
2507 usage_count: usage_count as u32,
2508 last_used,
2509 });
2510 }
2511
2512 Ok(tags)
2513 }
2514
2515 #[instrument(skip(self))]
2526 pub async fn create_tag_smart(
2527 &self,
2528 request: crate::models::CreateTagRequest,
2529 ) -> ThingsResult<crate::models::TagCreationResult> {
2530 use crate::database::tag_utils::normalize_tag_title;
2531 use crate::models::TagCreationResult;
2532
2533 let normalized = normalize_tag_title(&request.title);
2535
2536 if let Some(existing) = self.find_tag_by_normalized_title(&normalized).await? {
2538 return Ok(TagCreationResult::Existing {
2539 tag: existing,
2540 is_new: false,
2541 });
2542 }
2543
2544 let similar_tags = self.find_similar_tags(&normalized, 0.8).await?;
2546
2547 if !similar_tags.is_empty() {
2549 return Ok(TagCreationResult::SimilarFound {
2550 similar_tags,
2551 requested_title: request.title,
2552 });
2553 }
2554
2555 let uuid = Uuid::new_v4();
2557 let now = Utc::now().timestamp() as f64;
2558
2559 sqlx::query(
2560 "INSERT INTO TMTag (uuid, title, shortcut, parent, creationDate, userModificationDate, usedDate, `index`)
2561 VALUES (?, ?, ?, ?, ?, ?, NULL, 0)"
2562 )
2563 .bind(uuid.to_string())
2564 .bind(&request.title)
2565 .bind(request.shortcut.as_ref())
2566 .bind(request.parent_uuid.map(|u| u.to_string()))
2567 .bind(now)
2568 .bind(now)
2569 .execute(&self.pool)
2570 .await
2571 .map_err(|e| ThingsError::unknown(format!("Failed to create tag: {e}")))?;
2572
2573 info!("Created tag with UUID: {}", uuid);
2574 Ok(TagCreationResult::Created { uuid, is_new: true })
2575 }
2576
2577 #[instrument(skip(self))]
2583 pub async fn create_tag_force(
2584 &self,
2585 request: crate::models::CreateTagRequest,
2586 ) -> ThingsResult<Uuid> {
2587 let uuid = Uuid::new_v4();
2588 let now = Utc::now().timestamp() as f64;
2589
2590 sqlx::query(
2591 "INSERT INTO TMTag (uuid, title, shortcut, parent, creationDate, userModificationDate, usedDate, `index`)
2592 VALUES (?, ?, ?, ?, ?, ?, NULL, 0)"
2593 )
2594 .bind(uuid.to_string())
2595 .bind(&request.title)
2596 .bind(request.shortcut.as_ref())
2597 .bind(request.parent_uuid.map(|u| u.to_string()))
2598 .bind(now)
2599 .bind(now)
2600 .execute(&self.pool)
2601 .await
2602 .map_err(|e| ThingsError::unknown(format!("Failed to create tag: {e}")))?;
2603
2604 info!("Forcefully created tag with UUID: {}", uuid);
2605 Ok(uuid)
2606 }
2607
2608 #[instrument(skip(self))]
2614 pub async fn update_tag(&self, request: crate::models::UpdateTagRequest) -> ThingsResult<()> {
2615 use crate::database::tag_utils::normalize_tag_title;
2616
2617 let existing = self
2619 .find_tag_by_normalized_title(&request.uuid.to_string())
2620 .await?;
2621 if existing.is_none() {
2622 let row = sqlx::query("SELECT 1 FROM TMTag WHERE uuid = ?")
2624 .bind(request.uuid.to_string())
2625 .fetch_optional(&self.pool)
2626 .await
2627 .map_err(|e| ThingsError::unknown(format!("Failed to validate tag: {e}")))?;
2628
2629 if row.is_none() {
2630 return Err(ThingsError::unknown(format!(
2631 "Tag not found: {}",
2632 request.uuid
2633 )));
2634 }
2635 }
2636
2637 if let Some(new_title) = &request.title {
2639 let normalized = normalize_tag_title(new_title);
2640 if let Some(duplicate) = self.find_tag_by_normalized_title(&normalized).await? {
2641 if duplicate.uuid != request.uuid {
2642 return Err(ThingsError::unknown(format!(
2643 "Tag with title '{}' already exists",
2644 new_title
2645 )));
2646 }
2647 }
2648 }
2649
2650 let now = Utc::now().timestamp() as f64;
2651
2652 let mut updates = Vec::new();
2654 let mut params: Vec<String> = Vec::new();
2655
2656 if let Some(title) = &request.title {
2657 updates.push("title = ?");
2658 params.push(title.clone());
2659 }
2660 if let Some(shortcut) = &request.shortcut {
2661 updates.push("shortcut = ?");
2662 params.push(shortcut.clone());
2663 }
2664 if let Some(parent_uuid) = request.parent_uuid {
2665 updates.push("parent = ?");
2666 params.push(parent_uuid.to_string());
2667 }
2668
2669 if updates.is_empty() {
2670 return Ok(()); }
2672
2673 updates.push("userModificationDate = ?");
2674 params.push(now.to_string());
2675
2676 let sql = format!("UPDATE TMTag SET {} WHERE uuid = ?", updates.join(", "));
2677 params.push(request.uuid.to_string());
2678
2679 let mut query = sqlx::query(&sql);
2680 for param in params {
2681 query = query.bind(param);
2682 }
2683
2684 query
2685 .execute(&self.pool)
2686 .await
2687 .map_err(|e| ThingsError::unknown(format!("Failed to update tag: {e}")))?;
2688
2689 info!("Updated tag with UUID: {}", request.uuid);
2690 Ok(())
2691 }
2692
2693 #[instrument(skip(self))]
2704 pub async fn delete_tag(&self, uuid: &Uuid, remove_from_tasks: bool) -> ThingsResult<()> {
2705 let tag = self.find_tag_by_normalized_title(&uuid.to_string()).await?;
2707
2708 if tag.is_none() {
2709 let row = sqlx::query("SELECT title FROM TMTag WHERE uuid = ?")
2711 .bind(uuid.to_string())
2712 .fetch_optional(&self.pool)
2713 .await
2714 .map_err(|e| ThingsError::unknown(format!("Failed to find tag: {e}")))?;
2715
2716 if row.is_none() {
2717 return Err(ThingsError::unknown(format!("Tag not found: {}", uuid)));
2718 }
2719 }
2720
2721 if remove_from_tasks {
2722 info!("Removing tag {} from all tasks (not yet implemented)", uuid);
2725 }
2726
2727 sqlx::query("DELETE FROM TMTag WHERE uuid = ?")
2729 .bind(uuid.to_string())
2730 .execute(&self.pool)
2731 .await
2732 .map_err(|e| ThingsError::unknown(format!("Failed to delete tag: {e}")))?;
2733
2734 info!("Deleted tag with UUID: {}", uuid);
2735 Ok(())
2736 }
2737
2738 #[instrument(skip(self))]
2749 pub async fn merge_tags(&self, source_uuid: &Uuid, target_uuid: &Uuid) -> ThingsResult<()> {
2750 let source_row = sqlx::query("SELECT title FROM TMTag WHERE uuid = ?")
2752 .bind(source_uuid.to_string())
2753 .fetch_optional(&self.pool)
2754 .await
2755 .map_err(|e| ThingsError::unknown(format!("Failed to find source tag: {e}")))?;
2756
2757 if source_row.is_none() {
2758 return Err(ThingsError::unknown(format!(
2759 "Source tag not found: {}",
2760 source_uuid
2761 )));
2762 }
2763
2764 let target_row = sqlx::query("SELECT title FROM TMTag WHERE uuid = ?")
2765 .bind(target_uuid.to_string())
2766 .fetch_optional(&self.pool)
2767 .await
2768 .map_err(|e| ThingsError::unknown(format!("Failed to find target tag: {e}")))?;
2769
2770 if target_row.is_none() {
2771 return Err(ThingsError::unknown(format!(
2772 "Target tag not found: {}",
2773 target_uuid
2774 )));
2775 }
2776
2777 info!(
2780 "Merging tag {} into {} (tag replacement in tasks not yet fully implemented)",
2781 source_uuid, target_uuid
2782 );
2783
2784 let now = Utc::now().timestamp() as f64;
2786 sqlx::query("UPDATE TMTag SET userModificationDate = ?, usedDate = ? WHERE uuid = ?")
2787 .bind(now)
2788 .bind(now)
2789 .bind(target_uuid.to_string())
2790 .execute(&self.pool)
2791 .await
2792 .map_err(|e| ThingsError::unknown(format!("Failed to update target tag: {e}")))?;
2793
2794 sqlx::query("DELETE FROM TMTag WHERE uuid = ?")
2796 .bind(source_uuid.to_string())
2797 .execute(&self.pool)
2798 .await
2799 .map_err(|e| ThingsError::unknown(format!("Failed to delete source tag: {e}")))?;
2800
2801 info!("Merged tag {} into {}", source_uuid, target_uuid);
2802 Ok(())
2803 }
2804
2805 #[instrument(skip(self))]
2819 pub async fn add_tag_to_task(
2820 &self,
2821 task_uuid: &Uuid,
2822 tag_title: &str,
2823 ) -> ThingsResult<crate::models::TagAssignmentResult> {
2824 use crate::database::tag_utils::normalize_tag_title;
2825 use crate::models::TagAssignmentResult;
2826
2827 validators::validate_task_exists(&self.pool, task_uuid).await?;
2829
2830 let normalized = normalize_tag_title(tag_title);
2832
2833 let tag = if let Some(existing_tag) = self.find_tag_by_normalized_title(&normalized).await?
2835 {
2836 existing_tag
2837 } else {
2838 let similar_tags = self.find_similar_tags(&normalized, 0.8).await?;
2840
2841 if !similar_tags.is_empty() {
2842 return Ok(TagAssignmentResult::Suggestions { similar_tags });
2843 }
2844
2845 let request = crate::models::CreateTagRequest {
2847 title: tag_title.to_string(),
2848 shortcut: None,
2849 parent_uuid: None,
2850 };
2851 let _uuid = self.create_tag_force(request).await?;
2852
2853 self.find_tag_by_normalized_title(&normalized)
2855 .await?
2856 .ok_or_else(|| ThingsError::unknown("Failed to retrieve newly created tag"))?
2857 };
2858
2859 let row = sqlx::query("SELECT cachedTags FROM TMTask WHERE uuid = ?")
2861 .bind(task_uuid.to_string())
2862 .fetch_one(&self.pool)
2863 .await
2864 .map_err(|e| ThingsError::unknown(format!("Failed to fetch task tags: {e}")))?;
2865
2866 let cached_tags_blob: Option<Vec<u8>> = row.get("cachedTags");
2867 let mut tags: Vec<String> = if let Some(blob) = cached_tags_blob {
2868 deserialize_tags_from_blob(&blob)?
2869 } else {
2870 Vec::new()
2871 };
2872
2873 if !tags.contains(&tag.title) {
2875 tags.push(tag.title.clone());
2876
2877 let cached_tags = serialize_tags_to_blob(&tags)?;
2879 let now = Utc::now().timestamp() as f64;
2880
2881 sqlx::query(
2882 "UPDATE TMTask SET cachedTags = ?, userModificationDate = ? WHERE uuid = ?",
2883 )
2884 .bind(cached_tags)
2885 .bind(now)
2886 .bind(task_uuid.to_string())
2887 .execute(&self.pool)
2888 .await
2889 .map_err(|e| ThingsError::unknown(format!("Failed to update task tags: {e}")))?;
2890
2891 sqlx::query("UPDATE TMTag SET usedDate = ?, userModificationDate = ? WHERE uuid = ?")
2893 .bind(now)
2894 .bind(now)
2895 .bind(tag.uuid.to_string())
2896 .execute(&self.pool)
2897 .await
2898 .map_err(|e| ThingsError::unknown(format!("Failed to update tag usedDate: {e}")))?;
2899
2900 info!("Added tag '{}' to task {}", tag.title, task_uuid);
2901 }
2902
2903 Ok(TagAssignmentResult::Assigned { tag_uuid: tag.uuid })
2904 }
2905
2906 #[instrument(skip(self))]
2912 pub async fn remove_tag_from_task(
2913 &self,
2914 task_uuid: &Uuid,
2915 tag_title: &str,
2916 ) -> ThingsResult<()> {
2917 use crate::database::tag_utils::normalize_tag_title;
2918
2919 validators::validate_task_exists(&self.pool, task_uuid).await?;
2921
2922 let row = sqlx::query("SELECT cachedTags FROM TMTask WHERE uuid = ?")
2924 .bind(task_uuid.to_string())
2925 .fetch_one(&self.pool)
2926 .await
2927 .map_err(|e| ThingsError::unknown(format!("Failed to fetch task tags: {e}")))?;
2928
2929 let cached_tags_blob: Option<Vec<u8>> = row.get("cachedTags");
2930 let mut tags: Vec<String> = if let Some(blob) = cached_tags_blob {
2931 deserialize_tags_from_blob(&blob)?
2932 } else {
2933 return Ok(()); };
2935
2936 let normalized = normalize_tag_title(tag_title);
2938 let original_len = tags.len();
2939 tags.retain(|t| normalize_tag_title(t) != normalized);
2940
2941 if tags.len() < original_len {
2943 let cached_tags = if tags.is_empty() {
2944 None
2945 } else {
2946 Some(serialize_tags_to_blob(&tags)?)
2947 };
2948
2949 let now = Utc::now().timestamp() as f64;
2950
2951 if let Some(cached_tags_val) = cached_tags {
2952 sqlx::query(
2953 "UPDATE TMTask SET cachedTags = ?, userModificationDate = ? WHERE uuid = ?",
2954 )
2955 .bind(cached_tags_val)
2956 .bind(now)
2957 .bind(task_uuid.to_string())
2958 .execute(&self.pool)
2959 .await
2960 .map_err(|e| ThingsError::unknown(format!("Failed to update task tags: {e}")))?;
2961 } else {
2962 sqlx::query(
2964 "UPDATE TMTask SET cachedTags = NULL, userModificationDate = ? WHERE uuid = ?",
2965 )
2966 .bind(now)
2967 .bind(task_uuid.to_string())
2968 .execute(&self.pool)
2969 .await
2970 .map_err(|e| ThingsError::unknown(format!("Failed to update task tags: {e}")))?;
2971 }
2972
2973 info!("Removed tag '{}' from task {}", tag_title, task_uuid);
2974 }
2975
2976 Ok(())
2977 }
2978
2979 #[instrument(skip(self))]
2987 pub async fn set_task_tags(
2988 &self,
2989 task_uuid: &Uuid,
2990 tag_titles: Vec<String>,
2991 ) -> ThingsResult<Vec<crate::models::TagMatch>> {
2992 use crate::database::tag_utils::normalize_tag_title;
2993
2994 validators::validate_task_exists(&self.pool, task_uuid).await?;
2996
2997 let mut resolved_tags = Vec::new();
2998 let mut suggestions = Vec::new();
2999
3000 for title in tag_titles {
3002 let normalized = normalize_tag_title(&title);
3003
3004 if let Some(existing_tag) = self.find_tag_by_normalized_title(&normalized).await? {
3006 resolved_tags.push(existing_tag.title);
3007 } else {
3008 let similar_tags = self.find_similar_tags(&normalized, 0.8).await?;
3010
3011 if !similar_tags.is_empty() {
3012 suggestions.extend(similar_tags);
3013 }
3014
3015 resolved_tags.push(title);
3017 }
3018 }
3019
3020 for title in &resolved_tags {
3022 let normalized = normalize_tag_title(title);
3023 if self
3024 .find_tag_by_normalized_title(&normalized)
3025 .await?
3026 .is_none()
3027 {
3028 let request = crate::models::CreateTagRequest {
3029 title: title.clone(),
3030 shortcut: None,
3031 parent_uuid: None,
3032 };
3033 self.create_tag_force(request).await?;
3034 }
3035 }
3036
3037 let cached_tags = if resolved_tags.is_empty() {
3039 None
3040 } else {
3041 Some(serialize_tags_to_blob(&resolved_tags)?)
3042 };
3043
3044 let now = Utc::now().timestamp() as f64;
3045
3046 if let Some(cached_tags_val) = cached_tags {
3047 sqlx::query(
3048 "UPDATE TMTask SET cachedTags = ?, userModificationDate = ? WHERE uuid = ?",
3049 )
3050 .bind(cached_tags_val)
3051 .bind(now)
3052 .bind(task_uuid.to_string())
3053 .execute(&self.pool)
3054 .await
3055 .map_err(|e| ThingsError::unknown(format!("Failed to update task tags: {e}")))?;
3056 } else {
3057 sqlx::query(
3058 "UPDATE TMTask SET cachedTags = NULL, userModificationDate = ? WHERE uuid = ?",
3059 )
3060 .bind(now)
3061 .bind(task_uuid.to_string())
3062 .execute(&self.pool)
3063 .await
3064 .map_err(|e| ThingsError::unknown(format!("Failed to update task tags: {e}")))?;
3065 }
3066
3067 for title in &resolved_tags {
3069 let normalized = normalize_tag_title(title);
3070 if let Some(tag) = self.find_tag_by_normalized_title(&normalized).await? {
3071 sqlx::query(
3072 "UPDATE TMTag SET usedDate = ?, userModificationDate = ? WHERE uuid = ?",
3073 )
3074 .bind(now)
3075 .bind(now)
3076 .bind(tag.uuid.to_string())
3077 .execute(&self.pool)
3078 .await
3079 .map_err(|e| ThingsError::unknown(format!("Failed to update tag usedDate: {e}")))?;
3080 }
3081 }
3082
3083 info!("Set tags on task {} to: {:?}", task_uuid, resolved_tags);
3084 Ok(suggestions)
3085 }
3086
3087 #[instrument(skip(self))]
3103 pub async fn get_tag_completions(
3104 &self,
3105 partial_input: &str,
3106 limit: usize,
3107 ) -> ThingsResult<Vec<crate::models::TagCompletion>> {
3108 use crate::database::tag_utils::{calculate_similarity, normalize_tag_title};
3109
3110 let normalized_input = normalize_tag_title(partial_input);
3111 let all_tags = self.get_all_tags().await?;
3112
3113 let mut completions: Vec<crate::models::TagCompletion> = all_tags
3114 .into_iter()
3115 .filter_map(|tag| {
3116 let normalized_tag = normalize_tag_title(&tag.title);
3117
3118 let score = if normalized_tag.starts_with(&normalized_input) {
3120 3.0 + (tag.usage_count as f32 / 100.0)
3122 } else if normalized_tag.contains(&normalized_input) {
3123 2.0 + (tag.usage_count as f32 / 100.0)
3125 } else {
3126 let similarity = calculate_similarity(partial_input, &tag.title);
3128 if similarity >= 0.6 {
3129 similarity + (tag.usage_count as f32 / 1000.0)
3130 } else {
3131 return None; }
3133 };
3134
3135 Some(crate::models::TagCompletion { tag, score })
3136 })
3137 .collect();
3138
3139 completions.sort_by(|a, b| {
3141 b.score
3142 .partial_cmp(&a.score)
3143 .unwrap_or(std::cmp::Ordering::Equal)
3144 });
3145
3146 completions.truncate(limit);
3148
3149 Ok(completions)
3150 }
3151
3152 #[instrument(skip(self))]
3158 pub async fn get_tag_statistics(
3159 &self,
3160 uuid: &Uuid,
3161 ) -> ThingsResult<crate::models::TagStatistics> {
3162 let tag_row = sqlx::query("SELECT title FROM TMTag WHERE uuid = ?")
3164 .bind(uuid.to_string())
3165 .fetch_optional(&self.pool)
3166 .await
3167 .map_err(|e| ThingsError::unknown(format!("Failed to find tag: {e}")))?;
3168
3169 let title: String = tag_row
3170 .ok_or_else(|| ThingsError::unknown(format!("Tag not found: {}", uuid)))?
3171 .get("title");
3172
3173 let task_rows = sqlx::query(
3176 "SELECT uuid, cachedTags FROM TMTask
3177 WHERE cachedTags IS NOT NULL
3178 AND trashed = 0",
3179 )
3180 .fetch_all(&self.pool)
3181 .await
3182 .map_err(|e| ThingsError::unknown(format!("Failed to query tasks with tag: {e}")))?;
3183
3184 let mut task_uuids = Vec::new();
3185 for row in task_rows {
3186 let uuid_str: String = row.get("uuid");
3187 let cached_tags_blob: Option<Vec<u8>> = row.get("cachedTags");
3188
3189 if let Some(blob) = cached_tags_blob {
3191 if let Ok(tags) = deserialize_tags_from_blob(&blob) {
3192 if tags.iter().any(|t| t.eq_ignore_ascii_case(&title)) {
3193 let task_uuid = Uuid::parse_str(&uuid_str)
3194 .unwrap_or_else(|_| things_uuid_to_uuid(&uuid_str));
3195 task_uuids.push(task_uuid);
3196 }
3197 }
3198 }
3199 }
3200
3201 let usage_count = task_uuids.len() as u32;
3202
3203 let mut related_tags: std::collections::HashMap<String, u32> =
3205 std::collections::HashMap::new();
3206
3207 for task_uuid in &task_uuids {
3208 let row = sqlx::query("SELECT cachedTags FROM TMTask WHERE uuid = ?")
3209 .bind(task_uuid.to_string())
3210 .fetch_optional(&self.pool)
3211 .await
3212 .map_err(|e| ThingsError::unknown(format!("Failed to fetch task tags: {e}")))?;
3213
3214 if let Some(row) = row {
3215 let cached_tags_blob: Option<Vec<u8>> = row.get("cachedTags");
3216 if let Some(blob) = cached_tags_blob {
3217 let tags: Vec<String> = deserialize_tags_from_blob(&blob)?;
3218 for tag in tags {
3219 if tag != title {
3220 *related_tags.entry(tag).or_insert(0) += 1;
3221 }
3222 }
3223 }
3224 }
3225 }
3226
3227 let mut related_vec: Vec<(String, u32)> = related_tags.into_iter().collect();
3229 related_vec.sort_by_key(|r| std::cmp::Reverse(r.1));
3230
3231 Ok(crate::models::TagStatistics {
3232 uuid: *uuid,
3233 title,
3234 usage_count,
3235 task_uuids,
3236 related_tags: related_vec,
3237 })
3238 }
3239
3240 #[instrument(skip(self))]
3248 pub async fn find_duplicate_tags(
3249 &self,
3250 min_similarity: f32,
3251 ) -> ThingsResult<Vec<crate::models::TagPair>> {
3252 use crate::database::tag_utils::calculate_similarity;
3253
3254 let all_tags = self.get_all_tags().await?;
3255 let mut pairs = Vec::new();
3256
3257 for i in 0..all_tags.len() {
3259 for j in (i + 1)..all_tags.len() {
3260 let tag1 = &all_tags[i];
3261 let tag2 = &all_tags[j];
3262
3263 let similarity = calculate_similarity(&tag1.title, &tag2.title);
3264
3265 if similarity >= min_similarity {
3266 pairs.push(crate::models::TagPair {
3267 tag1: tag1.clone(),
3268 tag2: tag2.clone(),
3269 similarity,
3270 });
3271 }
3272 }
3273 }
3274
3275 pairs.sort_by(|a, b| {
3277 b.similarity
3278 .partial_cmp(&a.similarity)
3279 .unwrap_or(std::cmp::Ordering::Equal)
3280 });
3281
3282 Ok(pairs)
3283 }
3284
3285 const MAX_BULK_BATCH_SIZE: usize = 1000;
3292
3293 #[instrument(skip(self))]
3306 pub async fn bulk_move(
3307 &self,
3308 request: crate::models::BulkMoveRequest,
3309 ) -> ThingsResult<crate::models::BulkOperationResult> {
3310 if request.task_uuids.is_empty() {
3312 return Err(ThingsError::validation("Task UUIDs cannot be empty"));
3313 }
3314 if request.task_uuids.len() > Self::MAX_BULK_BATCH_SIZE {
3315 return Err(ThingsError::validation(format!(
3316 "Batch size {} exceeds maximum of {}",
3317 request.task_uuids.len(),
3318 Self::MAX_BULK_BATCH_SIZE
3319 )));
3320 }
3321 if request.project_uuid.is_none() && request.area_uuid.is_none() {
3322 return Err(ThingsError::validation(
3323 "Must specify either project_uuid or area_uuid",
3324 ));
3325 }
3326
3327 if let Some(project_uuid) = &request.project_uuid {
3329 validators::validate_project_exists(&self.pool, project_uuid).await?;
3330 }
3331 if let Some(area_uuid) = &request.area_uuid {
3332 validators::validate_area_exists(&self.pool, area_uuid).await?;
3333 }
3334
3335 let mut tx = self
3337 .pool
3338 .begin()
3339 .await
3340 .map_err(|e| ThingsError::unknown(format!("Failed to begin transaction: {e}")))?;
3341
3342 let placeholders = request
3344 .task_uuids
3345 .iter()
3346 .map(|_| "?")
3347 .collect::<Vec<_>>()
3348 .join(",");
3349 let query_str = format!(
3350 "SELECT uuid FROM TMTask WHERE uuid IN ({}) AND trashed = 0",
3351 placeholders
3352 );
3353
3354 let mut query = sqlx::query(&query_str);
3355 for uuid in &request.task_uuids {
3356 query = query.bind(uuid.to_string());
3357 }
3358
3359 let found_uuids: Vec<String> = query
3360 .fetch_all(&mut *tx)
3361 .await
3362 .map_err(|e| ThingsError::unknown(format!("Failed to validate tasks: {e}")))?
3363 .iter()
3364 .map(|row| row.get("uuid"))
3365 .collect();
3366
3367 if found_uuids.len() != request.task_uuids.len() {
3369 for uuid in &request.task_uuids {
3371 if !found_uuids.contains(&uuid.to_string()) {
3372 tx.rollback().await.ok();
3373 return Err(ThingsError::TaskNotFound {
3374 uuid: uuid.to_string(),
3375 });
3376 }
3377 }
3378 }
3379
3380 let now = Utc::now().timestamp() as f64;
3382 let placeholders = request
3383 .task_uuids
3384 .iter()
3385 .map(|_| "?")
3386 .collect::<Vec<_>>()
3387 .join(",");
3388 let query_str = format!(
3389 "UPDATE TMTask SET project = ?, area = ?, userModificationDate = ? WHERE uuid IN ({})",
3390 placeholders
3391 );
3392
3393 let mut query = sqlx::query(&query_str)
3394 .bind(request.project_uuid.map(|u| u.to_string()))
3395 .bind(request.area_uuid.map(|u| u.to_string()))
3396 .bind(now);
3397
3398 for uuid in &request.task_uuids {
3399 query = query.bind(uuid.to_string());
3400 }
3401
3402 query
3403 .execute(&mut *tx)
3404 .await
3405 .map_err(|e| ThingsError::unknown(format!("Failed to bulk move tasks: {e}")))?;
3406
3407 tx.commit()
3409 .await
3410 .map_err(|e| ThingsError::unknown(format!("Failed to commit transaction: {e}")))?;
3411
3412 info!("Bulk moved {} task(s)", request.task_uuids.len());
3413 Ok(crate::models::BulkOperationResult {
3414 success: true,
3415 processed_count: request.task_uuids.len(),
3416 message: format!("Successfully moved {} task(s)", request.task_uuids.len()),
3417 })
3418 }
3419
3420 #[instrument(skip(self))]
3433 pub async fn bulk_update_dates(
3434 &self,
3435 request: crate::models::BulkUpdateDatesRequest,
3436 ) -> ThingsResult<crate::models::BulkOperationResult> {
3437 use crate::database::{safe_things_date_to_naive_date, validate_date_range};
3438
3439 if request.task_uuids.is_empty() {
3441 return Err(ThingsError::validation("Task UUIDs cannot be empty"));
3442 }
3443 if request.task_uuids.len() > Self::MAX_BULK_BATCH_SIZE {
3444 return Err(ThingsError::validation(format!(
3445 "Batch size {} exceeds maximum of {}",
3446 request.task_uuids.len(),
3447 Self::MAX_BULK_BATCH_SIZE
3448 )));
3449 }
3450
3451 if let (Some(start), Some(deadline)) = (request.start_date, request.deadline) {
3453 validate_date_range(Some(start), Some(deadline))?;
3454 }
3455
3456 let mut tx = self
3458 .pool
3459 .begin()
3460 .await
3461 .map_err(|e| ThingsError::unknown(format!("Failed to begin transaction: {e}")))?;
3462
3463 let placeholders = request
3465 .task_uuids
3466 .iter()
3467 .map(|_| "?")
3468 .collect::<Vec<_>>()
3469 .join(",");
3470 let query_str = format!(
3471 "SELECT uuid, startDate, deadline FROM TMTask WHERE uuid IN ({}) AND trashed = 0",
3472 placeholders
3473 );
3474
3475 let mut query = sqlx::query(&query_str);
3476 for uuid in &request.task_uuids {
3477 query = query.bind(uuid.to_string());
3478 }
3479
3480 let rows = query
3481 .fetch_all(&mut *tx)
3482 .await
3483 .map_err(|e| ThingsError::unknown(format!("Failed to validate tasks: {e}")))?;
3484
3485 if rows.len() != request.task_uuids.len() {
3487 let found_uuids: Vec<String> = rows.iter().map(|row| row.get("uuid")).collect();
3489 for uuid in &request.task_uuids {
3490 if !found_uuids.contains(&uuid.to_string()) {
3491 tx.rollback().await.ok();
3492 return Err(ThingsError::TaskNotFound {
3493 uuid: uuid.to_string(),
3494 });
3495 }
3496 }
3497 }
3498
3499 for row in &rows {
3501 let current_start: Option<i64> = row.get("startDate");
3502 let current_deadline: Option<i64> = row.get("deadline");
3503
3504 let final_start = if request.clear_start_date {
3505 None
3506 } else if let Some(new_start) = request.start_date {
3507 Some(new_start)
3508 } else {
3509 current_start.and_then(|ts| safe_things_date_to_naive_date(ts).ok())
3510 };
3511
3512 let final_deadline = if request.clear_deadline {
3513 None
3514 } else if let Some(new_deadline) = request.deadline {
3515 Some(new_deadline)
3516 } else {
3517 current_deadline.and_then(|ts| safe_things_date_to_naive_date(ts).ok())
3518 };
3519
3520 validate_date_range(final_start, final_deadline)?;
3521 }
3522
3523 let now = Utc::now().timestamp() as f64;
3525 let placeholders = request
3526 .task_uuids
3527 .iter()
3528 .map(|_| "?")
3529 .collect::<Vec<_>>()
3530 .join(",");
3531
3532 let start_date_value = if request.clear_start_date {
3533 None
3534 } else {
3535 request.start_date.map(naive_date_to_things_timestamp)
3536 };
3537
3538 let deadline_value = if request.clear_deadline {
3539 None
3540 } else {
3541 request.deadline.map(naive_date_to_things_timestamp)
3542 };
3543
3544 let query_str = format!(
3545 "UPDATE TMTask SET startDate = ?, deadline = ?, userModificationDate = ? WHERE uuid IN ({})",
3546 placeholders
3547 );
3548
3549 let mut query = sqlx::query(&query_str)
3550 .bind(start_date_value)
3551 .bind(deadline_value)
3552 .bind(now);
3553
3554 for uuid in &request.task_uuids {
3555 query = query.bind(uuid.to_string());
3556 }
3557
3558 query
3559 .execute(&mut *tx)
3560 .await
3561 .map_err(|e| ThingsError::unknown(format!("Failed to bulk update dates: {e}")))?;
3562
3563 tx.commit()
3564 .await
3565 .map_err(|e| ThingsError::unknown(format!("Failed to commit transaction: {e}")))?;
3566
3567 info!(
3568 "Bulk updated dates for {} task(s)",
3569 request.task_uuids.len()
3570 );
3571 Ok(crate::models::BulkOperationResult {
3572 success: true,
3573 processed_count: request.task_uuids.len(),
3574 message: format!(
3575 "Successfully updated dates for {} task(s)",
3576 request.task_uuids.len()
3577 ),
3578 })
3579 }
3580
3581 #[instrument(skip(self))]
3592 pub async fn bulk_complete(
3593 &self,
3594 request: crate::models::BulkCompleteRequest,
3595 ) -> ThingsResult<crate::models::BulkOperationResult> {
3596 if request.task_uuids.is_empty() {
3598 return Err(ThingsError::validation("Task UUIDs cannot be empty"));
3599 }
3600 if request.task_uuids.len() > Self::MAX_BULK_BATCH_SIZE {
3601 return Err(ThingsError::validation(format!(
3602 "Batch size {} exceeds maximum of {}",
3603 request.task_uuids.len(),
3604 Self::MAX_BULK_BATCH_SIZE
3605 )));
3606 }
3607
3608 let mut tx = self
3610 .pool
3611 .begin()
3612 .await
3613 .map_err(|e| ThingsError::unknown(format!("Failed to begin transaction: {e}")))?;
3614
3615 let placeholders = request
3617 .task_uuids
3618 .iter()
3619 .map(|_| "?")
3620 .collect::<Vec<_>>()
3621 .join(",");
3622 let query_str = format!(
3623 "SELECT uuid FROM TMTask WHERE uuid IN ({}) AND trashed = 0",
3624 placeholders
3625 );
3626
3627 let mut query = sqlx::query(&query_str);
3628 for uuid in &request.task_uuids {
3629 query = query.bind(uuid.to_string());
3630 }
3631
3632 let found_uuids: Vec<String> = query
3633 .fetch_all(&mut *tx)
3634 .await
3635 .map_err(|e| ThingsError::unknown(format!("Failed to validate tasks: {e}")))?
3636 .iter()
3637 .map(|row| row.get("uuid"))
3638 .collect();
3639
3640 if found_uuids.len() != request.task_uuids.len() {
3642 for uuid in &request.task_uuids {
3644 if !found_uuids.contains(&uuid.to_string()) {
3645 tx.rollback().await.ok();
3646 return Err(ThingsError::TaskNotFound {
3647 uuid: uuid.to_string(),
3648 });
3649 }
3650 }
3651 }
3652
3653 let now = Utc::now().timestamp() as f64;
3655 let placeholders = request
3656 .task_uuids
3657 .iter()
3658 .map(|_| "?")
3659 .collect::<Vec<_>>()
3660 .join(",");
3661 let query_str = format!(
3662 "UPDATE TMTask SET status = 1, stopDate = ?, userModificationDate = ? WHERE uuid IN ({})",
3663 placeholders
3664 );
3665
3666 let mut query = sqlx::query(&query_str).bind(now).bind(now);
3667
3668 for uuid in &request.task_uuids {
3669 query = query.bind(uuid.to_string());
3670 }
3671
3672 query
3673 .execute(&mut *tx)
3674 .await
3675 .map_err(|e| ThingsError::unknown(format!("Failed to bulk complete tasks: {e}")))?;
3676
3677 tx.commit()
3679 .await
3680 .map_err(|e| ThingsError::unknown(format!("Failed to commit transaction: {e}")))?;
3681
3682 info!("Bulk completed {} task(s)", request.task_uuids.len());
3683 Ok(crate::models::BulkOperationResult {
3684 success: true,
3685 processed_count: request.task_uuids.len(),
3686 message: format!(
3687 "Successfully completed {} task(s)",
3688 request.task_uuids.len()
3689 ),
3690 })
3691 }
3692
3693 #[instrument(skip(self))]
3704 pub async fn bulk_delete(
3705 &self,
3706 request: crate::models::BulkDeleteRequest,
3707 ) -> ThingsResult<crate::models::BulkOperationResult> {
3708 if request.task_uuids.is_empty() {
3710 return Err(ThingsError::validation("Task UUIDs cannot be empty"));
3711 }
3712 if request.task_uuids.len() > Self::MAX_BULK_BATCH_SIZE {
3713 return Err(ThingsError::validation(format!(
3714 "Batch size {} exceeds maximum of {}",
3715 request.task_uuids.len(),
3716 Self::MAX_BULK_BATCH_SIZE
3717 )));
3718 }
3719
3720 let mut tx = self
3722 .pool
3723 .begin()
3724 .await
3725 .map_err(|e| ThingsError::unknown(format!("Failed to begin transaction: {e}")))?;
3726
3727 let placeholders = request
3729 .task_uuids
3730 .iter()
3731 .map(|_| "?")
3732 .collect::<Vec<_>>()
3733 .join(",");
3734 let query_str = format!(
3735 "SELECT uuid FROM TMTask WHERE uuid IN ({}) AND trashed = 0",
3736 placeholders
3737 );
3738
3739 let mut query = sqlx::query(&query_str);
3740 for uuid in &request.task_uuids {
3741 query = query.bind(uuid.to_string());
3742 }
3743
3744 let found_uuids: Vec<String> = query
3745 .fetch_all(&mut *tx)
3746 .await
3747 .map_err(|e| ThingsError::unknown(format!("Failed to validate tasks: {e}")))?
3748 .iter()
3749 .map(|row| row.get("uuid"))
3750 .collect();
3751
3752 if found_uuids.len() != request.task_uuids.len() {
3754 for uuid in &request.task_uuids {
3756 if !found_uuids.contains(&uuid.to_string()) {
3757 tx.rollback().await.ok();
3758 return Err(ThingsError::TaskNotFound {
3759 uuid: uuid.to_string(),
3760 });
3761 }
3762 }
3763 }
3764
3765 let now = Utc::now().timestamp() as f64;
3767 let placeholders = request
3768 .task_uuids
3769 .iter()
3770 .map(|_| "?")
3771 .collect::<Vec<_>>()
3772 .join(",");
3773 let query_str = format!(
3774 "UPDATE TMTask SET trashed = 1, userModificationDate = ? WHERE uuid IN ({})",
3775 placeholders
3776 );
3777
3778 let mut query = sqlx::query(&query_str).bind(now);
3779
3780 for uuid in &request.task_uuids {
3781 query = query.bind(uuid.to_string());
3782 }
3783
3784 query
3785 .execute(&mut *tx)
3786 .await
3787 .map_err(|e| ThingsError::unknown(format!("Failed to bulk delete tasks: {e}")))?;
3788
3789 tx.commit()
3791 .await
3792 .map_err(|e| ThingsError::unknown(format!("Failed to commit transaction: {e}")))?;
3793
3794 info!("Bulk deleted {} task(s)", request.task_uuids.len());
3795 Ok(crate::models::BulkOperationResult {
3796 success: true,
3797 processed_count: request.task_uuids.len(),
3798 message: format!("Successfully deleted {} task(s)", request.task_uuids.len()),
3799 })
3800 }
3801}
3802
3803#[derive(Debug, Clone, Serialize, Deserialize)]
3805pub struct DatabaseStats {
3806 pub task_count: u64,
3807 pub project_count: u64,
3808 pub area_count: u64,
3809}
3810
3811impl DatabaseStats {
3812 #[must_use]
3813 pub fn total_items(&self) -> u64 {
3814 self.task_count + self.project_count + self.area_count
3815 }
3816}
3817
3818#[must_use]
3830pub fn get_default_database_path() -> PathBuf {
3831 let home = std::env::var("HOME").unwrap_or_else(|_| "~".to_string());
3832 PathBuf::from(format!(
3833 "{home}/Library/Group Containers/JLMPQHK86H.com.culturedcode.ThingsMac/ThingsData-0Z0Z2/Things Database.thingsdatabase/main.sqlite"
3834 ))
3835}
3836
3837#[cfg(test)]
3838mod tests {
3839 use super::*;
3840 use tempfile::{NamedTempFile, TempDir};
3841
3842 #[tokio::test]
3843 async fn test_database_connection() {
3844 let temp_dir = TempDir::new().unwrap();
3845 let db_path = temp_dir.path().join("test.db");
3846
3847 let result = super::ThingsDatabase::new(&db_path).await;
3850 assert!(result.is_err());
3851 }
3852
3853 #[tokio::test]
3854 async fn test_connection_string() {
3855 let result = super::ThingsDatabase::from_connection_string("sqlite::memory:").await;
3856 assert!(result.is_ok());
3857 }
3858
3859 #[test]
3860 fn test_task_status_from_i32() {
3861 assert_eq!(TaskStatus::from_i32(0), Some(TaskStatus::Incomplete));
3862 assert_eq!(TaskStatus::from_i32(1), Some(TaskStatus::Completed));
3863 assert_eq!(TaskStatus::from_i32(2), Some(TaskStatus::Canceled));
3864 assert_eq!(TaskStatus::from_i32(3), Some(TaskStatus::Trashed));
3865 assert_eq!(TaskStatus::from_i32(4), None);
3866 assert_eq!(TaskStatus::from_i32(-1), None);
3867 }
3868
3869 #[test]
3870 fn test_task_type_from_i32() {
3871 assert_eq!(TaskType::from_i32(0), Some(TaskType::Todo));
3872 assert_eq!(TaskType::from_i32(1), Some(TaskType::Project));
3873 assert_eq!(TaskType::from_i32(2), Some(TaskType::Heading));
3874 assert_eq!(TaskType::from_i32(3), Some(TaskType::Area));
3875 assert_eq!(TaskType::from_i32(4), None);
3876 assert_eq!(TaskType::from_i32(-1), None);
3877 }
3878
3879 #[test]
3880 fn test_database_stats_total_items() {
3881 let stats = DatabaseStats {
3882 task_count: 10,
3883 project_count: 5,
3884 area_count: 3,
3885 };
3886 assert_eq!(stats.total_items(), 18);
3887
3888 let empty_stats = DatabaseStats {
3889 task_count: 0,
3890 project_count: 0,
3891 area_count: 0,
3892 };
3893 assert_eq!(empty_stats.total_items(), 0);
3894 }
3895
3896 #[test]
3897 fn test_database_pool_config_default() {
3898 let config = DatabasePoolConfig::default();
3899 assert_eq!(config.max_connections, 10);
3900 assert_eq!(config.min_connections, 1);
3901 assert_eq!(config.connect_timeout, Duration::from_secs(30));
3902 assert_eq!(config.idle_timeout, Duration::from_secs(600));
3903 assert_eq!(config.max_lifetime, Duration::from_secs(1800));
3904 assert!(config.test_before_acquire);
3905 }
3906
3907 #[test]
3908 fn test_sqlite_optimizations_default() {
3909 let opts = SqliteOptimizations::default();
3910 assert!(opts.enable_wal_mode);
3911 assert_eq!(opts.cache_size, -20000);
3912 assert_eq!(opts.synchronous_mode, "NORMAL".to_string());
3913 assert_eq!(opts.temp_store, "MEMORY".to_string());
3914 assert_eq!(opts.journal_mode, "WAL".to_string());
3915 assert_eq!(opts.mmap_size, 268_435_456);
3916 assert!(opts.enable_foreign_keys);
3917 assert!(opts.enable_query_planner);
3918 }
3919
3920 #[test]
3921 fn test_pool_health_status_creation() {
3922 let status = PoolHealthStatus {
3923 is_healthy: true,
3924 pool_size: 8,
3925 active_connections: 5,
3926 idle_connections: 3,
3927 max_connections: 10,
3928 min_connections: 1,
3929 connection_timeout: Duration::from_secs(30),
3930 idle_timeout: Some(Duration::from_secs(600)),
3931 max_lifetime: Some(Duration::from_secs(1800)),
3932 };
3933 assert!(status.is_healthy);
3934 assert_eq!(status.active_connections, 5);
3935 assert_eq!(status.idle_connections, 3);
3936 assert_eq!(status.pool_size, 8);
3937 }
3938
3939 #[test]
3940 fn test_pool_metrics_creation() {
3941 let metrics = PoolMetrics {
3942 pool_size: 8,
3943 active_connections: 5,
3944 idle_connections: 3,
3945 max_connections: 10,
3946 min_connections: 1,
3947 utilization_percentage: 80.0,
3948 is_healthy: true,
3949 response_time_ms: 50,
3950 connection_timeout: Duration::from_secs(30),
3951 idle_timeout: Some(Duration::from_secs(600)),
3952 max_lifetime: Some(Duration::from_secs(1800)),
3953 };
3954 assert!(metrics.is_healthy);
3955 assert_eq!(metrics.pool_size, 8);
3956 assert_eq!(metrics.active_connections, 5);
3957 assert_eq!(metrics.idle_connections, 3);
3958 assert!((metrics.utilization_percentage - 80.0).abs() < f64::EPSILON);
3959 assert_eq!(metrics.response_time_ms, 50);
3960 }
3961
3962 #[test]
3963 fn test_comprehensive_health_status_creation() {
3964 let pool_health = PoolHealthStatus {
3965 is_healthy: true,
3966 pool_size: 8,
3967 active_connections: 5,
3968 idle_connections: 3,
3969 max_connections: 10,
3970 min_connections: 1,
3971 connection_timeout: Duration::from_secs(30),
3972 idle_timeout: Some(Duration::from_secs(600)),
3973 max_lifetime: Some(Duration::from_secs(1800)),
3974 };
3975
3976 let pool_metrics = PoolMetrics {
3977 pool_size: 8,
3978 active_connections: 5,
3979 idle_connections: 3,
3980 max_connections: 10,
3981 min_connections: 1,
3982 utilization_percentage: 80.0,
3983 is_healthy: true,
3984 response_time_ms: 50,
3985 connection_timeout: Duration::from_secs(30),
3986 idle_timeout: Some(Duration::from_secs(600)),
3987 max_lifetime: Some(Duration::from_secs(1800)),
3988 };
3989
3990 let db_stats = DatabaseStats {
3991 task_count: 50,
3992 project_count: 10,
3993 area_count: 5,
3994 };
3995
3996 let health_status = ComprehensiveHealthStatus {
3997 overall_healthy: true,
3998 pool_health,
3999 pool_metrics,
4000 database_stats: db_stats,
4001 timestamp: Utc::now(),
4002 };
4003
4004 assert!(health_status.overall_healthy);
4005 assert_eq!(health_status.database_stats.total_items(), 65);
4006 }
4007
4008 #[test]
4009 fn test_safe_timestamp_convert_edge_cases() {
4010 assert_eq!(safe_timestamp_convert(1_609_459_200.0), 1_609_459_200); assert_eq!(safe_timestamp_convert(0.0), 0);
4015
4016 assert_eq!(safe_timestamp_convert(-1.0), 0);
4018
4019 assert_eq!(safe_timestamp_convert(f64::INFINITY), 0);
4021
4022 assert_eq!(safe_timestamp_convert(f64::NAN), 0);
4024
4025 assert_eq!(safe_timestamp_convert(5_000_000_000.0), 0);
4027
4028 let max_timestamp = 4_102_444_800_f64; assert_eq!(safe_timestamp_convert(max_timestamp), 4_102_444_800);
4031 }
4032
4033 #[test]
4034 fn test_things_uuid_to_uuid_consistency() {
4035 let things_id = "test-id-123";
4037 let uuid1 = things_uuid_to_uuid(things_id);
4038 let uuid2 = things_uuid_to_uuid(things_id);
4039 assert_eq!(uuid1, uuid2, "UUIDs should be consistent for same input");
4040
4041 let uuid3 = things_uuid_to_uuid("different-id");
4043 assert_ne!(
4044 uuid1, uuid3,
4045 "Different inputs should produce different UUIDs"
4046 );
4047
4048 let uuid_empty = things_uuid_to_uuid("");
4050 assert!(!uuid_empty.to_string().is_empty());
4051
4052 let long_string = "a".repeat(1000);
4054 let uuid_long = things_uuid_to_uuid(&long_string);
4055 assert!(!uuid_long.to_string().is_empty());
4056 }
4057
4058 #[test]
4059 fn test_task_status_from_i32_all_variants() {
4060 assert_eq!(TaskStatus::from_i32(0), Some(TaskStatus::Incomplete));
4061 assert_eq!(TaskStatus::from_i32(1), Some(TaskStatus::Completed));
4062 assert_eq!(TaskStatus::from_i32(2), Some(TaskStatus::Canceled));
4063 assert_eq!(TaskStatus::from_i32(3), Some(TaskStatus::Trashed));
4064 assert_eq!(TaskStatus::from_i32(999), None);
4065 assert_eq!(TaskStatus::from_i32(-1), None);
4066 }
4067
4068 #[test]
4069 fn test_task_type_from_i32_all_variants() {
4070 assert_eq!(TaskType::from_i32(0), Some(TaskType::Todo));
4071 assert_eq!(TaskType::from_i32(1), Some(TaskType::Project));
4072 assert_eq!(TaskType::from_i32(2), Some(TaskType::Heading));
4073 assert_eq!(TaskType::from_i32(3), Some(TaskType::Area));
4074 assert_eq!(TaskType::from_i32(999), None);
4075 assert_eq!(TaskType::from_i32(-1), None);
4076 }
4077
4078 #[test]
4079 fn test_database_pool_config_default_values() {
4080 let config = DatabasePoolConfig::default();
4081 assert_eq!(config.max_connections, 10);
4082 assert_eq!(config.min_connections, 1);
4083 assert_eq!(config.connect_timeout, Duration::from_secs(30));
4084 assert_eq!(config.idle_timeout, Duration::from_secs(600));
4085 assert_eq!(config.max_lifetime, Duration::from_secs(1800));
4086 assert!(config.test_before_acquire);
4087 }
4088
4089 #[test]
4090 fn test_database_stats_total_items_calculation() {
4091 let stats = DatabaseStats {
4092 task_count: 10,
4093 project_count: 5,
4094 area_count: 3,
4095 };
4096 assert_eq!(stats.total_items(), 18); let empty_stats = DatabaseStats {
4100 task_count: 0,
4101 project_count: 0,
4102 area_count: 0,
4103 };
4104 assert_eq!(empty_stats.total_items(), 0);
4105 }
4106
4107 #[test]
4108 fn test_pool_health_status_creation_comprehensive() {
4109 let status = PoolHealthStatus {
4110 is_healthy: true,
4111 pool_size: 8,
4112 active_connections: 2,
4113 idle_connections: 3,
4114 max_connections: 10,
4115 min_connections: 1,
4116 connection_timeout: Duration::from_secs(30),
4117 idle_timeout: Some(Duration::from_secs(600)),
4118 max_lifetime: Some(Duration::from_secs(1800)),
4119 };
4120 assert!(status.is_healthy);
4121 assert_eq!(status.pool_size, 8);
4122 assert_eq!(status.max_connections, 10);
4123 }
4124
4125 #[test]
4126 fn test_pool_metrics_creation_comprehensive() {
4127 let metrics = PoolMetrics {
4128 pool_size: 8,
4129 active_connections: 5,
4130 idle_connections: 3,
4131 max_connections: 10,
4132 min_connections: 1,
4133 utilization_percentage: 80.0,
4134 is_healthy: true,
4135 response_time_ms: 50,
4136 connection_timeout: Duration::from_secs(30),
4137 idle_timeout: Some(Duration::from_secs(600)),
4138 max_lifetime: Some(Duration::from_secs(1800)),
4139 };
4140 assert_eq!(metrics.pool_size, 8);
4141 assert_eq!(metrics.response_time_ms, 50);
4142 assert!(metrics.is_healthy);
4143 }
4144
4145 #[test]
4146 fn test_comprehensive_health_status_creation_full() {
4147 let pool_health = PoolHealthStatus {
4148 is_healthy: true,
4149 pool_size: 8,
4150 active_connections: 2,
4151 idle_connections: 3,
4152 max_connections: 10,
4153 min_connections: 1,
4154 connection_timeout: Duration::from_secs(30),
4155 idle_timeout: Some(Duration::from_secs(600)),
4156 max_lifetime: Some(Duration::from_secs(1800)),
4157 };
4158
4159 let pool_metrics = PoolMetrics {
4160 pool_size: 8,
4161 active_connections: 5,
4162 idle_connections: 3,
4163 max_connections: 10,
4164 min_connections: 1,
4165 utilization_percentage: 80.0,
4166 is_healthy: true,
4167 response_time_ms: 50,
4168 connection_timeout: Duration::from_secs(30),
4169 idle_timeout: Some(Duration::from_secs(600)),
4170 max_lifetime: Some(Duration::from_secs(1800)),
4171 };
4172
4173 let database_stats = DatabaseStats {
4174 task_count: 100,
4175 project_count: 20,
4176 area_count: 5,
4177 };
4178
4179 let status = ComprehensiveHealthStatus {
4180 overall_healthy: true,
4181 pool_health,
4182 pool_metrics,
4183 database_stats,
4184 timestamp: Utc::now(),
4185 };
4186
4187 assert!(status.overall_healthy);
4188 assert_eq!(status.database_stats.total_items(), 125);
4189 }
4190
4191 #[test]
4192 fn test_sqlite_optimizations_default_values() {
4193 let opts = SqliteOptimizations::default();
4194 assert!(opts.enable_wal_mode);
4195 assert!(opts.enable_foreign_keys);
4196 assert_eq!(opts.cache_size, -20000);
4197 assert_eq!(opts.temp_store, "MEMORY");
4198 assert_eq!(opts.mmap_size, 268_435_456);
4199 assert_eq!(opts.synchronous_mode, "NORMAL");
4200 assert_eq!(opts.journal_mode, "WAL");
4201 }
4202
4203 #[test]
4204 fn test_get_default_database_path_format() {
4205 let path = get_default_database_path();
4206 let path_str = path.to_string_lossy();
4207 assert!(path_str.contains("Things Database.thingsdatabase"));
4208 assert!(path_str.contains("main.sqlite"));
4209 assert!(path_str.contains("Library/Group Containers"));
4210 }
4211
4212 #[tokio::test]
4213 async fn test_database_new_with_config() {
4214 let temp_file = NamedTempFile::new().unwrap();
4215 let db_path = temp_file.path();
4216
4217 crate::test_utils::create_test_database(db_path)
4218 .await
4219 .unwrap();
4220
4221 let config = DatabasePoolConfig {
4222 max_connections: 5,
4223 min_connections: 1,
4224 connect_timeout: Duration::from_secs(10),
4225 idle_timeout: Duration::from_secs(300),
4226 max_lifetime: Duration::from_secs(900),
4227 test_before_acquire: true,
4228 sqlite_optimizations: SqliteOptimizations::default(),
4229 };
4230
4231 let database = ThingsDatabase::new_with_config(db_path, config)
4232 .await
4233 .unwrap();
4234 let pool = database.pool();
4235 assert!(!pool.is_closed());
4236 }
4237
4238 #[tokio::test]
4239 async fn test_database_error_handling_invalid_path() {
4240 let result = ThingsDatabase::new(Path::new("/non/existent/path.db")).await;
4242 assert!(result.is_err(), "Should fail with non-existent path");
4243 }
4244
4245 #[tokio::test]
4246 async fn test_database_get_stats() {
4247 let temp_file = NamedTempFile::new().unwrap();
4248 let db_path = temp_file.path();
4249
4250 crate::test_utils::create_test_database(db_path)
4251 .await
4252 .unwrap();
4253 let database = ThingsDatabase::new(db_path).await.unwrap();
4254
4255 let stats = database.get_stats().await.unwrap();
4256 assert!(stats.task_count > 0, "Should have test tasks");
4257 assert!(stats.area_count > 0, "Should have test areas");
4258 assert!(stats.total_items() > 0, "Should have total items");
4259 }
4260
4261 #[tokio::test]
4262 async fn test_database_comprehensive_health_check() {
4263 let temp_file = NamedTempFile::new().unwrap();
4264 let db_path = temp_file.path();
4265
4266 crate::test_utils::create_test_database(db_path)
4267 .await
4268 .unwrap();
4269 let database = ThingsDatabase::new(db_path).await.unwrap();
4270
4271 let health = database.comprehensive_health_check().await.unwrap();
4272 assert!(health.overall_healthy, "Database should be healthy");
4273 assert!(health.pool_health.is_healthy, "Pool should be healthy");
4274 assert!(
4275 health.pool_metrics.is_healthy,
4276 "Pool metrics should be healthy"
4277 );
4278 }
4279
4280 #[test]
4285 fn test_things_date_negative_returns_none() {
4286 assert_eq!(things_date_to_naive_date(-1), None);
4288 assert_eq!(things_date_to_naive_date(-100), None);
4289 assert_eq!(things_date_to_naive_date(i64::MIN), None);
4290 }
4291
4292 #[test]
4293 fn test_things_date_zero_returns_none() {
4294 assert_eq!(things_date_to_naive_date(0), None);
4296 }
4297
4298 #[test]
4299 fn test_things_date_boundary_2001() {
4300 use chrono::Datelike;
4301 let result = things_date_to_naive_date(1);
4303 assert!(result.is_some());
4304
4305 let date = result.unwrap();
4306 assert_eq!(date.year(), 2001);
4307 assert_eq!(date.month(), 1);
4308 assert_eq!(date.day(), 1);
4309 }
4310
4311 #[test]
4312 fn test_things_date_one_day() {
4313 use chrono::Datelike;
4314 let seconds_per_day = 86400i64;
4316 let result = things_date_to_naive_date(seconds_per_day);
4317 assert!(result.is_some());
4318
4319 let date = result.unwrap();
4320 assert_eq!(date.year(), 2001);
4321 assert_eq!(date.month(), 1);
4322 assert_eq!(date.day(), 2);
4323 }
4324
4325 #[test]
4326 fn test_things_date_one_year() {
4327 use chrono::Datelike;
4328 let seconds_per_year = 365 * 86400i64;
4330 let result = things_date_to_naive_date(seconds_per_year);
4331 assert!(result.is_some());
4332
4333 let date = result.unwrap();
4334 assert_eq!(date.year(), 2002);
4335 }
4336
4337 #[test]
4338 fn test_things_date_current_era() {
4339 use chrono::Datelike;
4340 let days_to_2024 = 8401i64;
4344 let seconds_to_2024 = days_to_2024 * 86400;
4345
4346 let result = things_date_to_naive_date(seconds_to_2024);
4347 assert!(result.is_some());
4348
4349 let date = result.unwrap();
4350 assert_eq!(date.year(), 2024);
4351 }
4352
4353 #[test]
4354 fn test_things_date_leap_year() {
4355 use chrono::{Datelike, TimeZone, Utc};
4356 let base_date = Utc.with_ymd_and_hms(2001, 1, 1, 0, 0, 0).single().unwrap();
4359 let target_date = Utc.with_ymd_and_hms(2004, 2, 29, 0, 0, 0).single().unwrap();
4360 let seconds_diff = (target_date - base_date).num_seconds();
4361
4362 let result = things_date_to_naive_date(seconds_diff);
4363 assert!(result.is_some());
4364
4365 let date = result.unwrap();
4366 assert_eq!(date.year(), 2004);
4367 assert_eq!(date.month(), 2);
4368 assert_eq!(date.day(), 29);
4369 }
4370
4371 #[test]
4376 fn test_uuid_conversion_consistency() {
4377 let input = "ABC123";
4379 let uuid1 = things_uuid_to_uuid(input);
4380 let uuid2 = things_uuid_to_uuid(input);
4381
4382 assert_eq!(uuid1, uuid2);
4383 }
4384
4385 #[test]
4386 fn test_uuid_conversion_uniqueness() {
4387 let uuid1 = things_uuid_to_uuid("ABC123");
4389 let uuid2 = things_uuid_to_uuid("ABC124");
4390 let uuid3 = things_uuid_to_uuid("XYZ789");
4391
4392 assert_ne!(uuid1, uuid2);
4393 assert_ne!(uuid1, uuid3);
4394 assert_ne!(uuid2, uuid3);
4395 }
4396
4397 #[test]
4398 fn test_uuid_conversion_empty_string() {
4399 let uuid = things_uuid_to_uuid("");
4401 assert!(!uuid.to_string().is_empty());
4402 }
4403
4404 #[test]
4405 fn test_uuid_conversion_special_characters() {
4406 let uuid1 = things_uuid_to_uuid("test-with-dashes");
4408 let uuid2 = things_uuid_to_uuid("test_with_underscores");
4409 let uuid3 = things_uuid_to_uuid("test.with.dots");
4410
4411 assert_ne!(uuid1, uuid2);
4413 assert_ne!(uuid1, uuid3);
4414 assert_ne!(uuid2, uuid3);
4415 }
4416
4417 #[test]
4422 fn test_safe_timestamp_convert_normal_values() {
4423 let ts = 1_700_000_000.0; let result = safe_timestamp_convert(ts);
4426 assert_eq!(result, 1_700_000_000);
4427 }
4428
4429 #[test]
4430 fn test_safe_timestamp_convert_zero() {
4431 assert_eq!(safe_timestamp_convert(0.0), 0);
4433 }
4434
4435 #[test]
4436 fn test_safe_timestamp_convert_negative() {
4437 assert_eq!(safe_timestamp_convert(-1.0), 0);
4439 assert_eq!(safe_timestamp_convert(-1000.0), 0);
4440 }
4441
4442 #[test]
4443 fn test_safe_timestamp_convert_infinity() {
4444 assert_eq!(safe_timestamp_convert(f64::INFINITY), 0);
4446 assert_eq!(safe_timestamp_convert(f64::NEG_INFINITY), 0);
4447 }
4448
4449 #[test]
4450 fn test_safe_timestamp_convert_nan() {
4451 assert_eq!(safe_timestamp_convert(f64::NAN), 0);
4453 }
4454
4455 #[test]
4456 fn test_date_roundtrip_known_dates() {
4457 use chrono::{Datelike, TimeZone, Utc};
4458 let test_cases = vec![
4462 (2001, 1, 2), (2010, 6, 15),
4464 (2020, 12, 31),
4465 (2024, 2, 29), (2025, 7, 4),
4467 ];
4468
4469 for (year, month, day) in test_cases {
4470 let base_date = Utc.with_ymd_and_hms(2001, 1, 1, 0, 0, 0).single().unwrap();
4471 let target_date = Utc
4472 .with_ymd_and_hms(year, month, day, 0, 0, 0)
4473 .single()
4474 .unwrap();
4475 let seconds = (target_date - base_date).num_seconds();
4476
4477 let converted = things_date_to_naive_date(seconds);
4478 assert!(
4479 converted.is_some(),
4480 "Failed to convert {}-{:02}-{:02}",
4481 year,
4482 month,
4483 day
4484 );
4485
4486 let result_date = converted.unwrap();
4487 assert_eq!(
4488 result_date.year(),
4489 year,
4490 "Year mismatch for {}-{:02}-{:02}",
4491 year,
4492 month,
4493 day
4494 );
4495 assert_eq!(
4496 result_date.month(),
4497 month,
4498 "Month mismatch for {}-{:02}-{:02}",
4499 year,
4500 month,
4501 day
4502 );
4503 assert_eq!(
4504 result_date.day(),
4505 day,
4506 "Day mismatch for {}-{:02}-{:02}",
4507 year,
4508 month,
4509 day
4510 );
4511 }
4512 }
4513
4514 #[cfg(feature = "advanced-queries")]
4515 mod query_tasks_tests {
4516 use super::*;
4517 use crate::models::TaskFilters;
4518 use crate::query::TaskQueryBuilder;
4519 use tempfile::NamedTempFile;
4520
4521 async fn open_test_db() -> (ThingsDatabase, NamedTempFile) {
4522 let f = NamedTempFile::new().unwrap();
4523 crate::test_utils::create_test_database(f.path())
4524 .await
4525 .unwrap();
4526 let db = ThingsDatabase::new(f.path()).await.unwrap();
4527 (db, f)
4528 }
4529
4530 #[tokio::test]
4531 async fn test_query_tasks_no_filters() {
4532 let (db, _f) = open_test_db().await;
4533 let result = db.query_tasks(&TaskFilters::default()).await;
4534 assert!(result.is_ok());
4535 }
4536
4537 #[tokio::test]
4538 async fn test_query_tasks_status_filter() {
4539 let (db, _f) = open_test_db().await;
4540 let filters = TaskFilters {
4541 status: Some(TaskStatus::Completed),
4542 ..TaskFilters::default()
4543 };
4544 let tasks = db.query_tasks(&filters).await.unwrap();
4545 assert!(tasks.iter().all(|t| t.status == TaskStatus::Completed));
4546 }
4547
4548 #[tokio::test]
4549 async fn test_query_tasks_limit() {
4550 let (db, _f) = open_test_db().await;
4551 let filters = TaskFilters {
4552 limit: Some(1),
4553 ..TaskFilters::default()
4554 };
4555 let tasks = db.query_tasks(&filters).await.unwrap();
4556 assert!(tasks.len() <= 1);
4557 }
4558
4559 #[tokio::test]
4560 async fn test_query_tasks_tag_filter_and_semantics() {
4561 let (db, _f) = open_test_db().await;
4562 let filters = TaskFilters {
4563 tags: Some(vec!["nonexistent-tag-xyz".to_string()]),
4564 ..TaskFilters::default()
4565 };
4566 let tasks = db.query_tasks(&filters).await.unwrap();
4567 assert!(tasks.is_empty());
4568 }
4569
4570 #[tokio::test]
4571 async fn test_query_tasks_search_query() {
4572 let (db, _f) = open_test_db().await;
4573 let filters = TaskFilters {
4574 search_query: Some("zzznomatch".to_string()),
4575 ..TaskFilters::default()
4576 };
4577 let tasks = db.query_tasks(&filters).await.unwrap();
4578 assert!(tasks.is_empty());
4579 }
4580
4581 #[tokio::test]
4582 async fn test_query_tasks_trashed_status() {
4583 use sqlx::SqlitePool;
4584 use uuid::Uuid;
4585
4586 let f = NamedTempFile::new().unwrap();
4590 crate::test_utils::create_test_database(f.path())
4591 .await
4592 .unwrap();
4593 let pool = SqlitePool::connect(&format!("sqlite:{}", f.path().display()))
4594 .await
4595 .unwrap();
4596 let trashed_uuid = Uuid::new_v4().to_string();
4597 sqlx::query(
4598 "INSERT INTO TMTask \
4599 (uuid, title, type, status, trashed, creationDate, userModificationDate) \
4600 VALUES (?, ?, 0, 0, 1, 0, 0)",
4601 )
4602 .bind(&trashed_uuid)
4603 .bind("Trashed Task")
4604 .execute(&pool)
4605 .await
4606 .unwrap();
4607 pool.close().await;
4608
4609 let db = ThingsDatabase::new(f.path()).await.unwrap();
4610
4611 let active = db.query_tasks(&TaskFilters::default()).await.unwrap();
4613 assert!(active.iter().all(|t| t.uuid.to_string() != trashed_uuid));
4614
4615 let trashed = db
4617 .query_tasks(&TaskFilters {
4618 status: Some(TaskStatus::Trashed),
4619 ..TaskFilters::default()
4620 })
4621 .await
4622 .unwrap();
4623 assert!(
4624 trashed.iter().any(|t| t.uuid.to_string() == trashed_uuid),
4625 "expected trashed row to be returned by TaskStatus::Trashed filter"
4626 );
4627 }
4628
4629 #[tokio::test]
4630 async fn test_query_tasks_offset_without_limit() {
4631 let (db, _f) = open_test_db().await;
4633 let all = db.query_tasks(&TaskFilters::default()).await.unwrap();
4634 if all.len() < 2 {
4635 return; }
4637 let filters = TaskFilters {
4638 offset: Some(1),
4639 ..TaskFilters::default()
4640 };
4641 let offset_tasks = db.query_tasks(&filters).await.unwrap();
4642 assert_eq!(offset_tasks.len(), all.len() - 1);
4643 assert_eq!(offset_tasks[0].uuid, all[1].uuid);
4644 }
4645
4646 #[tokio::test]
4647 async fn test_query_tasks_pagination_with_post_filter() {
4648 let (db, _f) = open_test_db().await;
4650 let all_matching = db
4652 .query_tasks(&TaskFilters {
4653 search_query: Some(String::new()),
4654 ..TaskFilters::default()
4655 })
4656 .await
4657 .unwrap();
4658 if all_matching.len() < 2 {
4659 return;
4660 }
4661 let page0 = db
4662 .query_tasks(&TaskFilters {
4663 search_query: Some(String::new()),
4664 limit: Some(1),
4665 offset: Some(0),
4666 ..TaskFilters::default()
4667 })
4668 .await
4669 .unwrap();
4670 let page1 = db
4671 .query_tasks(&TaskFilters {
4672 search_query: Some(String::new()),
4673 limit: Some(1),
4674 offset: Some(1),
4675 ..TaskFilters::default()
4676 })
4677 .await
4678 .unwrap();
4679 assert_eq!(page0.len(), 1);
4680 assert_eq!(page1.len(), 1);
4681 assert_ne!(page0[0].uuid, page1[0].uuid);
4682 }
4683
4684 async fn insert_task(
4687 db: &ThingsDatabase,
4688 title: &str,
4689 notes: Option<&str>,
4690 tags: &[&str],
4691 ) -> Uuid {
4692 let uuid = Uuid::new_v4();
4693 let owned: Vec<String> = tags.iter().map(|s| (*s).to_string()).collect();
4694 let blob = serialize_tags_to_blob(&owned).unwrap();
4695 sqlx::query(
4696 "INSERT INTO TMTask \
4697 (uuid, title, notes, type, status, trashed, creationDate, userModificationDate, cachedTags) \
4698 VALUES (?, ?, ?, 0, 0, 0, 0, 0, ?)",
4699 )
4700 .bind(uuid.to_string())
4701 .bind(title)
4702 .bind(notes)
4703 .bind(blob)
4704 .execute(&db.pool)
4705 .await
4706 .unwrap();
4707 uuid
4708 }
4709
4710 async fn insert_task_with_tags(db: &ThingsDatabase, title: &str, tags: &[&str]) -> Uuid {
4711 insert_task(db, title, None, tags).await
4712 }
4713
4714 async fn open_db_with_tagged_rows() -> (ThingsDatabase, NamedTempFile, Uuid, Uuid, Uuid) {
4715 let (db, f) = open_test_db().await;
4716 let a = insert_task_with_tags(&db, "task-a", &["a"]).await;
4717 let b = insert_task_with_tags(&db, "task-b", &["b"]).await;
4718 let c = insert_task_with_tags(&db, "task-c", &["c"]).await;
4719 (db, f, a, b, c)
4720 }
4721
4722 #[tokio::test]
4723 async fn test_query_tasks_any_tags_or_semantics() {
4724 let (db, _f, a, b, c) = open_db_with_tagged_rows().await;
4725 let tasks = TaskQueryBuilder::new()
4726 .any_tags(vec!["a".to_string(), "b".to_string()])
4727 .execute(&db)
4728 .await
4729 .unwrap();
4730 let uuids: std::collections::HashSet<_> = tasks.iter().map(|t| t.uuid).collect();
4731 assert!(uuids.contains(&a));
4732 assert!(uuids.contains(&b));
4733 assert!(!uuids.contains(&c));
4734 }
4735
4736 #[tokio::test]
4737 async fn test_query_tasks_exclude_tags() {
4738 let (db, _f, a, b, c) = open_db_with_tagged_rows().await;
4739 let tasks = TaskQueryBuilder::new()
4740 .exclude_tags(vec!["b".to_string()])
4741 .execute(&db)
4742 .await
4743 .unwrap();
4744 let uuids: std::collections::HashSet<_> = tasks.iter().map(|t| t.uuid).collect();
4745 assert!(uuids.contains(&a));
4746 assert!(!uuids.contains(&b));
4747 assert!(uuids.contains(&c));
4748 }
4749
4750 #[tokio::test]
4751 async fn test_query_tasks_tag_count_min() {
4752 let (db, _f) = open_test_db().await;
4753 insert_task_with_tags(&db, "zero-tags", &[]).await;
4754 insert_task_with_tags(&db, "one-tag", &["x"]).await;
4755 let two = insert_task_with_tags(&db, "two-tags", &["x", "y"]).await;
4756 let tasks = TaskQueryBuilder::new()
4757 .tag_count(2)
4758 .execute(&db)
4759 .await
4760 .unwrap();
4761 let uuids: Vec<Uuid> = tasks.iter().map(|t| t.uuid).collect();
4762 assert_eq!(uuids, vec![two]);
4763 }
4764
4765 #[tokio::test]
4766 async fn test_query_tasks_combined_tag_filters() {
4767 let (db, _f) = open_test_db().await;
4768 let target = insert_task_with_tags(&db, "target", &["a", "x"]).await;
4769 let _wrong_required = insert_task_with_tags(&db, "no-a", &["x"]).await;
4770 let _excluded = insert_task_with_tags(&db, "has-z", &["a", "x", "z"]).await;
4771 let _no_any = insert_task_with_tags(&db, "no-x", &["a"]).await;
4772
4773 let tasks = TaskQueryBuilder::new()
4774 .tags(vec!["a".to_string()])
4775 .any_tags(vec!["x".to_string(), "y".to_string()])
4776 .exclude_tags(vec!["z".to_string()])
4777 .execute(&db)
4778 .await
4779 .unwrap();
4780 let uuids: Vec<Uuid> = tasks.iter().map(|t| t.uuid).collect();
4781 assert_eq!(uuids, vec![target]);
4782 }
4783
4784 #[tokio::test]
4785 async fn test_query_tasks_pagination_with_any_tags() {
4786 let (db, _f) = open_test_db().await;
4789 insert_task_with_tags(&db, "a1", &["a"]).await;
4790 insert_task_with_tags(&db, "a2", &["a"]).await;
4791 insert_task_with_tags(&db, "a3", &["a"]).await;
4792 let page0 = TaskQueryBuilder::new()
4793 .any_tags(vec!["a".to_string()])
4794 .limit(1)
4795 .offset(0)
4796 .execute(&db)
4797 .await
4798 .unwrap();
4799 let page1 = TaskQueryBuilder::new()
4800 .any_tags(vec!["a".to_string()])
4801 .limit(1)
4802 .offset(1)
4803 .execute(&db)
4804 .await
4805 .unwrap();
4806 assert_eq!(page0.len(), 1);
4807 assert_eq!(page1.len(), 1);
4808 assert_ne!(page0[0].uuid, page1[0].uuid);
4809 }
4810
4811 #[tokio::test]
4812 async fn test_execute_fuzzy_typo_match() {
4813 let (db, _f) = open_test_db().await;
4814 let groceries = insert_task(&db, "Buy groceries", None, &[]).await;
4815 let tasks = TaskQueryBuilder::new()
4816 .fuzzy_search("grocries")
4817 .execute(&db)
4818 .await
4819 .unwrap();
4820 let uuids: Vec<Uuid> = tasks.iter().map(|t| t.uuid).collect();
4821 assert!(
4822 uuids.contains(&groceries),
4823 "typo 'grocries' should match 'Buy groceries'"
4824 );
4825 }
4826
4827 #[tokio::test]
4828 async fn test_execute_fuzzy_below_threshold_excluded() {
4829 let (db, _f) = open_test_db().await;
4830 insert_task(&db, "Buy groceries", None, &[]).await;
4831 let tasks = TaskQueryBuilder::new()
4832 .fuzzy_search("xyz")
4833 .fuzzy_threshold(0.95)
4834 .execute(&db)
4835 .await
4836 .unwrap();
4837 assert!(
4838 tasks.is_empty(),
4839 "completely unrelated query should return nothing at 0.95 threshold"
4840 );
4841 }
4842
4843 #[tokio::test]
4844 async fn test_execute_ranked_score_ordering() {
4845 let (db, _f) = open_test_db().await;
4846 insert_task(&db, "urgent task", None, &[]).await;
4847 insert_task(&db, "urgntt task", None, &[]).await; insert_task(&db, "completely unrelated xyz abc", None, &[]).await;
4849 let ranked = TaskQueryBuilder::new()
4850 .fuzzy_search("urgent")
4851 .fuzzy_threshold(0.5)
4852 .execute_ranked(&db)
4853 .await
4854 .unwrap();
4855 for pair in ranked.windows(2) {
4857 assert!(
4858 pair[0].score >= pair[1].score,
4859 "results must be sorted by score desc: {} < {}",
4860 pair[0].score,
4861 pair[1].score
4862 );
4863 }
4864 assert!(!ranked.is_empty(), "at least 'urgent task' should match");
4865 }
4866
4867 #[tokio::test]
4868 async fn test_execute_ranked_pagination() {
4869 let (db, _f) = open_test_db().await;
4870 for i in 0..5 {
4871 insert_task(&db, &format!("meeting agenda item {i}"), None, &[]).await;
4872 }
4873 let all = TaskQueryBuilder::new()
4874 .fuzzy_search("agenda")
4875 .execute_ranked(&db)
4876 .await
4877 .unwrap();
4878 let page = TaskQueryBuilder::new()
4879 .fuzzy_search("agenda")
4880 .limit(2)
4881 .offset(1)
4882 .execute_ranked(&db)
4883 .await
4884 .unwrap();
4885 assert_eq!(page.len(), 2);
4886 assert_eq!(page[0].task.uuid, all[1].task.uuid);
4887 assert_eq!(page[1].task.uuid, all[2].task.uuid);
4888 }
4889
4890 #[tokio::test]
4891 async fn test_execute_fuzzy_with_search_collision() {
4892 let (db, _f) = open_test_db().await;
4895 let target = insert_task(&db, "meeting agenda", None, &[]).await;
4896 let tasks = TaskQueryBuilder::new()
4897 .search("zzznomatch")
4898 .fuzzy_search("agenda")
4899 .execute(&db)
4900 .await
4901 .unwrap();
4902 assert_eq!(
4903 tasks.len(),
4904 1,
4905 "only the 'meeting agenda' row should match; substring filter must be suppressed"
4906 );
4907 assert_eq!(
4908 tasks[0].uuid, target,
4909 "fuzzy should win over substring search"
4910 );
4911 }
4912
4913 #[tokio::test]
4914 async fn test_execute_ranked_errors_without_fuzzy_query() {
4915 let (db, _f) = open_test_db().await;
4916 let result = TaskQueryBuilder::new().execute_ranked(&db).await;
4917 assert!(
4918 result.is_err(),
4919 "execute_ranked without fuzzy_search should error"
4920 );
4921 }
4922
4923 #[tokio::test]
4924 async fn test_execute_fuzzy_searches_notes() {
4925 let (db, _f) = open_test_db().await;
4926 let target = insert_task(&db, "Weekly sync", Some("meeting agenda for Q2"), &[]).await;
4927 let tasks = TaskQueryBuilder::new()
4928 .fuzzy_search("agenda")
4929 .execute(&db)
4930 .await
4931 .unwrap();
4932 let uuids: Vec<Uuid> = tasks.iter().map(|t| t.uuid).collect();
4933 assert!(uuids.contains(&target), "fuzzy should match text in notes");
4934 }
4935
4936 async fn insert_task_with_status(
4937 db: &ThingsDatabase,
4938 title: &str,
4939 status: TaskStatus,
4940 ) -> Uuid {
4941 let uuid = Uuid::new_v4();
4942 let blob = serialize_tags_to_blob(&Vec::<String>::new()).unwrap();
4943 let status_n: i64 = match status {
4944 TaskStatus::Incomplete => 0,
4945 TaskStatus::Completed => 1,
4946 TaskStatus::Canceled => 2,
4947 TaskStatus::Trashed => 0,
4948 };
4949 sqlx::query(
4950 "INSERT INTO TMTask \
4951 (uuid, title, notes, type, status, trashed, creationDate, userModificationDate, cachedTags) \
4952 VALUES (?, ?, NULL, 0, ?, 0, 0, 0, ?)",
4953 )
4954 .bind(uuid.to_string())
4955 .bind(title)
4956 .bind(status_n)
4957 .bind(blob)
4958 .execute(&db.pool)
4959 .await
4960 .unwrap();
4961 uuid
4962 }
4963
4964 async fn insert_task_with_type(
4965 db: &ThingsDatabase,
4966 title: &str,
4967 task_type: crate::models::TaskType,
4968 ) -> Uuid {
4969 let uuid = Uuid::new_v4();
4970 let blob = serialize_tags_to_blob(&Vec::<String>::new()).unwrap();
4971 let type_n: i64 = match task_type {
4972 crate::models::TaskType::Todo => 0,
4973 crate::models::TaskType::Project => 1,
4974 crate::models::TaskType::Heading => 2,
4975 crate::models::TaskType::Area => 3,
4976 };
4977 sqlx::query(
4978 "INSERT INTO TMTask \
4979 (uuid, title, notes, type, status, trashed, creationDate, userModificationDate, cachedTags) \
4980 VALUES (?, ?, NULL, ?, 0, 0, 0, 0, ?)",
4981 )
4982 .bind(uuid.to_string())
4983 .bind(title)
4984 .bind(type_n)
4985 .bind(blob)
4986 .execute(&db.pool)
4987 .await
4988 .unwrap();
4989 uuid
4990 }
4991
4992 #[tokio::test]
4993 async fn test_execute_with_where_expr_or_status() {
4994 use crate::filter_expr::FilterExpr;
4995 let (db, _f) = open_test_db().await;
4996 let inc = insert_task_with_status(&db, "inc", TaskStatus::Incomplete).await;
4997 let comp = insert_task_with_status(&db, "comp", TaskStatus::Completed).await;
4998 let canc = insert_task_with_status(&db, "canc", TaskStatus::Canceled).await;
4999
5000 let tasks = TaskQueryBuilder::new()
5001 .where_expr(
5002 FilterExpr::status(TaskStatus::Incomplete)
5003 .or(FilterExpr::status(TaskStatus::Completed)),
5004 )
5005 .execute(&db)
5006 .await
5007 .unwrap();
5008
5009 let uuids: std::collections::HashSet<_> = tasks.iter().map(|t| t.uuid).collect();
5010 assert!(uuids.contains(&inc));
5011 assert!(uuids.contains(&comp));
5012 assert!(!uuids.contains(&canc));
5013 }
5014
5015 #[tokio::test]
5016 async fn test_execute_with_where_expr_not_type() {
5017 use crate::filter_expr::FilterExpr;
5018 use crate::models::TaskType;
5019 let (db, _f) = open_test_db().await;
5020 let todo = insert_task_with_type(&db, "todo", TaskType::Todo).await;
5021 let project = insert_task_with_type(&db, "project", TaskType::Project).await;
5022
5023 let tasks = TaskQueryBuilder::new()
5024 .where_expr(FilterExpr::task_type(TaskType::Project).not())
5025 .execute(&db)
5026 .await
5027 .unwrap();
5028
5029 let uuids: std::collections::HashSet<_> = tasks.iter().map(|t| t.uuid).collect();
5030 assert!(uuids.contains(&todo));
5031 assert!(!uuids.contains(&project));
5032 }
5033
5034 #[tokio::test]
5035 async fn test_execute_pagination_defers_to_rust_when_where_expr_set() {
5036 use crate::filter_expr::FilterExpr;
5039 let (db, _f) = open_test_db().await;
5040 insert_task_with_status(&db, "inc-1", TaskStatus::Incomplete).await;
5041 insert_task_with_status(&db, "inc-2", TaskStatus::Incomplete).await;
5042 insert_task_with_status(&db, "inc-3", TaskStatus::Incomplete).await;
5043 insert_task_with_status(&db, "comp", TaskStatus::Completed).await;
5044
5045 let page0 = TaskQueryBuilder::new()
5046 .where_expr(FilterExpr::status(TaskStatus::Incomplete))
5047 .limit(1)
5048 .offset(0)
5049 .execute(&db)
5050 .await
5051 .unwrap();
5052 let page1 = TaskQueryBuilder::new()
5053 .where_expr(FilterExpr::status(TaskStatus::Incomplete))
5054 .limit(1)
5055 .offset(1)
5056 .execute(&db)
5057 .await
5058 .unwrap();
5059 assert_eq!(page0.len(), 1);
5060 assert_eq!(page1.len(), 1);
5061 assert_ne!(page0[0].uuid, page1[0].uuid);
5062 assert_eq!(page0[0].status, TaskStatus::Incomplete);
5063 assert_eq!(page1[0].status, TaskStatus::Incomplete);
5064 }
5065
5066 #[tokio::test]
5067 async fn test_execute_combines_where_expr_with_filters_status() {
5068 use crate::filter_expr::FilterExpr;
5070 let (db, _f) = open_test_db().await;
5071 let target = insert_task(&db, "needle", None, &["work"]).await;
5072 insert_task(&db, "decoy", None, &["work"]).await;
5073
5074 let tasks = TaskQueryBuilder::new()
5075 .status(TaskStatus::Incomplete)
5076 .where_expr(FilterExpr::title_contains("needle"))
5077 .execute(&db)
5078 .await
5079 .unwrap();
5080
5081 let uuids: std::collections::HashSet<_> = tasks.iter().map(|t| t.uuid).collect();
5082 assert!(uuids.contains(&target));
5083 assert_eq!(tasks.len(), 1);
5084 }
5085
5086 #[tokio::test]
5087 async fn test_execute_combines_where_expr_with_any_tags() {
5088 use crate::filter_expr::FilterExpr;
5091 let (db, _f) = open_test_db().await;
5092 let target = insert_task(&db, "needle-task", None, &["work"]).await;
5093 insert_task(&db, "decoy-task", None, &["work"]).await;
5094 insert_task(&db, "needle-but-wrong-tag", None, &["personal"]).await;
5095
5096 let tasks = TaskQueryBuilder::new()
5097 .any_tags(vec!["work".to_string()])
5098 .where_expr(FilterExpr::title_contains("needle"))
5099 .execute(&db)
5100 .await
5101 .unwrap();
5102
5103 let uuids: std::collections::HashSet<_> = tasks.iter().map(|t| t.uuid).collect();
5104 assert!(uuids.contains(&target));
5105 assert_eq!(tasks.len(), 1);
5106 }
5107
5108 #[cfg(feature = "batch-operations")]
5109 mod cursor_pagination_tests {
5110 use super::*;
5111
5112 #[tokio::test]
5113 async fn test_execute_paged_walks_through_all_tasks() {
5114 let (db, _f) = open_test_db().await;
5115 let mut inserted = vec![];
5116 for i in 0..5 {
5117 inserted.push(insert_task(&db, &format!("task-{i}"), None, &[]).await);
5118 }
5119
5120 let mut all_collected: Vec<Uuid> = vec![];
5121 let mut cursor = None;
5122 let mut page_count = 0;
5123 loop {
5124 let mut builder = TaskQueryBuilder::new().limit(2);
5125 if let Some(c) = cursor.take() {
5126 builder = builder.after(c);
5127 }
5128 let page = builder.execute_paged(&db).await.unwrap();
5129 page_count += 1;
5130 all_collected.extend(page.items.iter().map(|t| t.uuid));
5131 if let Some(next) = page.next_cursor {
5132 cursor = Some(next);
5133 } else {
5134 break;
5135 }
5136 assert!(page_count < 10, "runaway pagination loop");
5137 }
5138
5139 let inserted_set: std::collections::HashSet<_> = inserted.iter().copied().collect();
5140 let collected_set: std::collections::HashSet<_> =
5141 all_collected.iter().copied().collect();
5142 for uuid in &inserted_set {
5143 assert!(collected_set.contains(uuid), "missing inserted uuid {uuid}");
5144 }
5145 let mut sorted = all_collected.clone();
5146 sorted.sort();
5147 sorted.dedup();
5148 assert_eq!(sorted.len(), all_collected.len(), "duplicates in pages");
5149 }
5150
5151 #[tokio::test]
5152 async fn test_execute_paged_last_page_has_no_next_cursor() {
5153 let (db, _f) = open_test_db().await;
5154 insert_task(&db, "only-task", None, &[]).await;
5155 let page = TaskQueryBuilder::new()
5156 .status(TaskStatus::Incomplete)
5157 .limit(100)
5158 .execute_paged(&db)
5159 .await
5160 .unwrap();
5161 assert!(
5162 page.next_cursor.is_none(),
5163 "non-full page should not have a next cursor"
5164 );
5165 }
5166
5167 #[tokio::test]
5168 async fn test_execute_paged_with_status_filter() {
5169 let (db, _f) = open_test_db().await;
5170 let target = insert_task(&db, "incomplete-task", None, &[]).await;
5171 let page = TaskQueryBuilder::new()
5172 .status(TaskStatus::Incomplete)
5173 .limit(50)
5174 .execute_paged(&db)
5175 .await
5176 .unwrap();
5177 let uuids: std::collections::HashSet<_> =
5178 page.items.iter().map(|t| t.uuid).collect();
5179 assert!(uuids.contains(&target));
5180 for task in &page.items {
5181 assert_eq!(task.status, TaskStatus::Incomplete);
5182 }
5183 }
5184
5185 #[tokio::test]
5186 async fn test_execute_paged_with_post_filter_any_tags() {
5187 let (db, _f) = open_test_db().await;
5188 let a1 = insert_task_with_tags(&db, "a1", &["a"]).await;
5189 let a2 = insert_task_with_tags(&db, "a2", &["a"]).await;
5190 let a3 = insert_task_with_tags(&db, "a3", &["a"]).await;
5191 let _b = insert_task_with_tags(&db, "b1", &["b"]).await;
5192
5193 let mut all: Vec<Uuid> = vec![];
5194 let mut cursor = None;
5195 loop {
5196 let mut builder = TaskQueryBuilder::new()
5197 .any_tags(vec!["a".to_string()])
5198 .limit(2);
5199 if let Some(c) = cursor.take() {
5200 builder = builder.after(c);
5201 }
5202 let page = builder.execute_paged(&db).await.unwrap();
5203 all.extend(page.items.iter().map(|t| t.uuid));
5204 if let Some(n) = page.next_cursor {
5205 cursor = Some(n);
5206 } else {
5207 break;
5208 }
5209 }
5210
5211 let collected: std::collections::HashSet<_> = all.iter().copied().collect();
5212 assert!(collected.contains(&a1));
5213 assert!(collected.contains(&a2));
5214 assert!(collected.contains(&a3));
5215 assert_eq!(collected.len(), 3, "should contain only a-tagged tasks");
5216 }
5217
5218 #[tokio::test]
5219 async fn test_execute_paged_default_page_size_when_no_limit() {
5220 let (db, _f) = open_test_db().await;
5221 let page = TaskQueryBuilder::new()
5225 .status(TaskStatus::Incomplete)
5226 .execute_paged(&db)
5227 .await
5228 .unwrap();
5229 assert!(page.items.len() <= 100);
5230 assert!(page.next_cursor.is_none());
5231 }
5232
5233 #[tokio::test]
5234 async fn test_query_tasks_order_is_deterministic_by_uuid_tiebreak() {
5235 let (db, _f) = open_test_db().await;
5236 for i in 0..3 {
5239 insert_task(&db, &format!("dup-time-{i}"), None, &[]).await;
5240 }
5241 let first = db.query_tasks(&TaskFilters::default()).await.unwrap();
5242 let second = db.query_tasks(&TaskFilters::default()).await.unwrap();
5243 let first_uuids: Vec<_> = first.iter().map(|t| t.uuid).collect();
5244 let second_uuids: Vec<_> = second.iter().map(|t| t.uuid).collect();
5245 assert_eq!(
5246 first_uuids, second_uuids,
5247 "tied-creationDate ordering should be deterministic"
5248 );
5249 }
5250 }
5251
5252 #[cfg(feature = "batch-operations")]
5253 mod cursor_streaming_tests {
5254 use super::*;
5255 use futures_util::{StreamExt, TryStreamExt};
5256
5257 #[tokio::test]
5258 async fn test_execute_stream_yields_all_tasks() {
5259 let (db, _f) = open_test_db().await;
5260 let mut inserted = vec![];
5261 for i in 0..5 {
5262 inserted.push(insert_task(&db, &format!("task-{i}"), None, &[]).await);
5263 }
5264
5265 let collected: Vec<_> = TaskQueryBuilder::new()
5266 .limit(2)
5267 .execute_stream(&db)
5268 .try_collect::<Vec<_>>()
5269 .await
5270 .unwrap();
5271
5272 let inserted_set: std::collections::HashSet<_> = inserted.iter().copied().collect();
5273 let collected_set: std::collections::HashSet<_> =
5274 collected.iter().map(|t| t.uuid).collect();
5275 for uuid in &inserted_set {
5276 assert!(
5277 collected_set.contains(uuid),
5278 "stream missing inserted uuid {uuid}"
5279 );
5280 }
5281 assert_eq!(
5282 collected.len(),
5283 collected_set.len(),
5284 "stream yielded duplicates"
5285 );
5286 }
5287
5288 #[tokio::test]
5289 async fn test_execute_stream_with_status_filter() {
5290 let (db, _f) = open_test_db().await;
5291 let target = insert_task(&db, "incomplete-task", None, &[]).await;
5292
5293 let tasks = TaskQueryBuilder::new()
5294 .status(TaskStatus::Incomplete)
5295 .limit(50)
5296 .execute_stream(&db)
5297 .try_collect::<Vec<_>>()
5298 .await
5299 .unwrap();
5300
5301 let uuids: std::collections::HashSet<_> = tasks.iter().map(|t| t.uuid).collect();
5302 assert!(uuids.contains(&target));
5303 for task in &tasks {
5304 assert_eq!(task.status, TaskStatus::Incomplete);
5305 }
5306 }
5307
5308 #[tokio::test]
5309 async fn test_execute_stream_with_any_tags_post_filter() {
5310 let (db, _f) = open_test_db().await;
5311 let a1 = insert_task_with_tags(&db, "a1", &["a"]).await;
5312 let a2 = insert_task_with_tags(&db, "a2", &["a"]).await;
5313 let a3 = insert_task_with_tags(&db, "a3", &["a"]).await;
5314 let _b = insert_task_with_tags(&db, "b1", &["b"]).await;
5315
5316 let tasks = TaskQueryBuilder::new()
5317 .any_tags(vec!["a".to_string()])
5318 .limit(2)
5319 .execute_stream(&db)
5320 .try_collect::<Vec<_>>()
5321 .await
5322 .unwrap();
5323
5324 let uuids: std::collections::HashSet<_> = tasks.iter().map(|t| t.uuid).collect();
5325 assert!(uuids.contains(&a1));
5326 assert!(uuids.contains(&a2));
5327 assert!(uuids.contains(&a3));
5328 assert_eq!(uuids.len(), 3, "should yield only a-tagged tasks");
5329 }
5330
5331 #[tokio::test]
5332 async fn test_execute_stream_empty_result() {
5333 let (db, _f) = open_test_db().await;
5334 let tasks = TaskQueryBuilder::new()
5336 .project_uuid(Uuid::new_v4())
5337 .execute_stream(&db)
5338 .try_collect::<Vec<_>>()
5339 .await
5340 .unwrap();
5341 assert!(tasks.is_empty());
5342 }
5343
5344 #[tokio::test]
5345 async fn test_execute_stream_rejects_fuzzy_search() {
5346 let (db, _f) = open_test_db().await;
5347 let mut stream = TaskQueryBuilder::new()
5352 .fuzzy_search("anything")
5353 .execute_stream(&db);
5354 match stream.next().await {
5355 Some(Err(crate::error::ThingsError::InvalidCursor(msg))) => {
5356 assert!(msg.contains("fuzzy"), "msg: {msg}");
5357 }
5358 other => panic!("expected first item to be InvalidCursor, got {other:?}"),
5359 }
5360 assert!(stream.next().await.is_none());
5361 }
5362
5363 #[tokio::test]
5364 async fn test_execute_stream_cross_page_ordering() {
5365 let (db, _f) = open_test_db().await;
5366 for i in 0..5 {
5367 insert_task(&db, &format!("task-{i}"), None, &[]).await;
5368 }
5369
5370 let stream_uuids: Vec<Uuid> = TaskQueryBuilder::new()
5372 .limit(2)
5373 .execute_stream(&db)
5374 .try_collect::<Vec<_>>()
5375 .await
5376 .unwrap()
5377 .into_iter()
5378 .map(|t| t.uuid)
5379 .collect();
5380
5381 let full_uuids: Vec<Uuid> = db
5383 .query_tasks(&TaskFilters::default())
5384 .await
5385 .unwrap()
5386 .into_iter()
5387 .map(|t| t.uuid)
5388 .collect();
5389
5390 let stream_set: std::collections::HashSet<_> =
5393 stream_uuids.iter().copied().collect();
5394 let filtered_full: Vec<Uuid> = full_uuids
5395 .into_iter()
5396 .filter(|u| stream_set.contains(u))
5397 .collect();
5398 assert_eq!(
5399 stream_uuids, filtered_full,
5400 "stream ordering should agree with full-query (creationDate DESC, uuid DESC)"
5401 );
5402 }
5403 }
5404 }
5405}