#[cfg(any(feature = "advanced-queries", feature = "batch-operations"))]
use crate::models::TaskFilters;
use crate::{
database::{
mappers::{map_project_row, map_task_row},
query_builders::TaskUpdateBuilder,
validators,
},
error::{Result as ThingsResult, ThingsError},
models::{
Area, CreateTaskRequest, DeleteChildHandling, Project, Task, TaskStatus, TaskType,
UpdateTaskRequest,
},
};
use chrono::{DateTime, NaiveDate, Utc};
use serde::{Deserialize, Serialize};
use sqlx::{pool::PoolOptions, Row, SqlitePool};
use std::path::{Path, PathBuf};
use std::time::Duration;
use tracing::{debug, error, info, instrument};
use uuid::Uuid;
pub(crate) fn safe_timestamp_convert(ts_f64: f64) -> i64 {
if ts_f64.is_finite() && ts_f64 >= 0.0 {
let max_timestamp = 4_102_444_800_f64; if ts_f64 <= max_timestamp {
let ts_str = format!("{:.0}", ts_f64.trunc());
ts_str.parse::<i64>().unwrap_or(0)
} else {
0 }
} else {
0 }
}
pub(crate) fn things_date_to_naive_date(seconds_since_2001: i64) -> Option<chrono::NaiveDate> {
use chrono::{TimeZone, Utc};
if seconds_since_2001 <= 0 {
return None;
}
let base_date = Utc.with_ymd_and_hms(2001, 1, 1, 0, 0, 0).single().unwrap();
let date_time = base_date + chrono::Duration::seconds(seconds_since_2001);
Some(date_time.date_naive())
}
pub fn naive_date_to_things_timestamp(date: NaiveDate) -> i64 {
use chrono::{NaiveTime, TimeZone, Utc};
let base_date = Utc.with_ymd_and_hms(2001, 1, 1, 0, 0, 0).single().unwrap();
let date_time = date
.and_time(NaiveTime::from_hms_opt(0, 0, 0).unwrap())
.and_local_timezone(Utc)
.single()
.unwrap();
date_time.timestamp() - base_date.timestamp()
}
pub fn serialize_tags_to_blob(tags: &[String]) -> ThingsResult<Vec<u8>> {
serde_json::to_vec(tags)
.map_err(|e| ThingsError::unknown(format!("Failed to serialize tags: {e}")))
}
pub fn deserialize_tags_from_blob(blob: &[u8]) -> ThingsResult<Vec<String>> {
serde_json::from_slice(blob)
.map_err(|e| ThingsError::unknown(format!("Failed to deserialize tags: {e}")))
}
pub(crate) fn things_uuid_to_uuid(things_uuid: &str) -> Uuid {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
things_uuid.hash(&mut hasher);
let hash = hasher.finish();
let bytes = [
((hash >> 56) & 0xFF) as u8,
((hash >> 48) & 0xFF) as u8,
((hash >> 40) & 0xFF) as u8,
((hash >> 32) & 0xFF) as u8,
((hash >> 24) & 0xFF) as u8,
((hash >> 16) & 0xFF) as u8,
((hash >> 8) & 0xFF) as u8,
(hash & 0xFF) as u8,
u8::try_from(things_uuid.len().min(255)).unwrap_or(255),
things_uuid.chars().next().unwrap_or('0') as u8,
things_uuid.chars().nth(1).unwrap_or('0') as u8,
things_uuid.chars().nth(2).unwrap_or('0') as u8,
things_uuid.chars().nth(3).unwrap_or('0') as u8,
things_uuid.chars().nth(4).unwrap_or('0') as u8,
things_uuid.chars().nth(5).unwrap_or('0') as u8,
things_uuid.chars().nth(6).unwrap_or('0') as u8,
];
Uuid::from_bytes(bytes)
}
impl TaskStatus {
fn from_i32(value: i32) -> Option<Self> {
match value {
0 => Some(TaskStatus::Incomplete),
1 => Some(TaskStatus::Completed),
2 => Some(TaskStatus::Canceled),
3 => Some(TaskStatus::Trashed),
_ => None,
}
}
}
impl TaskType {
fn from_i32(value: i32) -> Option<Self> {
match value {
0 => Some(TaskType::Todo),
1 => Some(TaskType::Project),
2 => Some(TaskType::Heading),
3 => Some(TaskType::Area),
_ => None,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DatabasePoolConfig {
pub max_connections: u32,
pub min_connections: u32,
pub connect_timeout: Duration,
pub idle_timeout: Duration,
pub max_lifetime: Duration,
pub test_before_acquire: bool,
pub sqlite_optimizations: SqliteOptimizations,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SqliteOptimizations {
pub enable_wal_mode: bool,
pub synchronous_mode: String,
pub cache_size: i32,
pub enable_foreign_keys: bool,
pub journal_mode: String,
pub temp_store: String,
pub mmap_size: i64,
pub enable_query_planner: bool,
}
impl Default for DatabasePoolConfig {
fn default() -> Self {
Self {
max_connections: 10,
min_connections: 1,
connect_timeout: Duration::from_secs(30),
idle_timeout: Duration::from_secs(600), max_lifetime: Duration::from_secs(1800), test_before_acquire: true,
sqlite_optimizations: SqliteOptimizations::default(),
}
}
}
impl Default for SqliteOptimizations {
fn default() -> Self {
Self {
enable_wal_mode: true,
synchronous_mode: "NORMAL".to_string(),
cache_size: -20000, enable_foreign_keys: true,
journal_mode: "WAL".to_string(),
temp_store: "MEMORY".to_string(),
mmap_size: 268_435_456, enable_query_planner: true,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PoolHealthStatus {
pub is_healthy: bool,
pub pool_size: u32,
pub active_connections: u32,
pub idle_connections: u32,
pub max_connections: u32,
pub min_connections: u32,
pub connection_timeout: Duration,
pub idle_timeout: Option<Duration>,
pub max_lifetime: Option<Duration>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PoolMetrics {
pub pool_size: u32,
pub active_connections: u32,
pub idle_connections: u32,
pub max_connections: u32,
pub min_connections: u32,
pub utilization_percentage: f64,
pub is_healthy: bool,
pub response_time_ms: u64,
pub connection_timeout: Duration,
pub idle_timeout: Option<Duration>,
pub max_lifetime: Option<Duration>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ComprehensiveHealthStatus {
pub overall_healthy: bool,
pub pool_health: PoolHealthStatus,
pub pool_metrics: PoolMetrics,
pub database_stats: DatabaseStats,
pub timestamp: DateTime<Utc>,
}
#[derive(Debug, Clone)]
pub struct ThingsDatabase {
pub(crate) pool: SqlitePool,
config: DatabasePoolConfig,
}
impl ThingsDatabase {
#[instrument]
pub async fn new(database_path: &Path) -> ThingsResult<Self> {
Self::new_with_config(database_path, DatabasePoolConfig::default()).await
}
#[instrument]
pub async fn new_with_config(
database_path: &Path,
config: DatabasePoolConfig,
) -> ThingsResult<Self> {
let database_url = format!("sqlite:{}", database_path.display());
info!(
"Connecting to SQLite database at: {} with optimized pool",
database_url
);
let pool = PoolOptions::new()
.max_connections(config.max_connections)
.min_connections(config.min_connections)
.acquire_timeout(config.connect_timeout)
.idle_timeout(Some(config.idle_timeout))
.max_lifetime(Some(config.max_lifetime))
.test_before_acquire(config.test_before_acquire)
.connect(&database_url)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to connect to database: {e}")))?;
Self::apply_sqlite_optimizations(&pool, &config.sqlite_optimizations).await?;
info!(
"Database connection pool established successfully with {} max connections",
config.max_connections
);
Ok(Self { pool, config })
}
async fn apply_sqlite_optimizations(
pool: &SqlitePool,
optimizations: &SqliteOptimizations,
) -> ThingsResult<()> {
sqlx::query(&format!(
"PRAGMA journal_mode = {}",
optimizations.journal_mode
))
.execute(pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to set journal mode: {e}")))?;
sqlx::query(&format!(
"PRAGMA synchronous = {}",
optimizations.synchronous_mode
))
.execute(pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to set synchronous mode: {e}")))?;
sqlx::query(&format!("PRAGMA cache_size = {}", optimizations.cache_size))
.execute(pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to set cache size: {e}")))?;
let fk_setting = if optimizations.enable_foreign_keys {
"ON"
} else {
"OFF"
};
sqlx::query(&format!("PRAGMA foreign_keys = {fk_setting}"))
.execute(pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to set foreign keys: {e}")))?;
sqlx::query(&format!("PRAGMA temp_store = {}", optimizations.temp_store))
.execute(pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to set temp store: {e}")))?;
sqlx::query(&format!("PRAGMA mmap_size = {}", optimizations.mmap_size))
.execute(pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to set mmap size: {e}")))?;
if optimizations.enable_query_planner {
sqlx::query("PRAGMA optimize")
.execute(pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to optimize database: {e}")))?;
}
debug!(
"Applied SQLite optimizations: WAL={}, sync={}, cache={}KB, fk={}, temp={}, mmap={}MB",
optimizations.enable_wal_mode,
optimizations.synchronous_mode,
optimizations.cache_size.abs() / 1024,
optimizations.enable_foreign_keys,
optimizations.temp_store,
optimizations.mmap_size / 1024 / 1024
);
Ok(())
}
#[instrument]
pub async fn from_connection_string(database_url: &str) -> ThingsResult<Self> {
Self::from_connection_string_with_config(database_url, DatabasePoolConfig::default()).await
}
#[instrument]
pub async fn from_connection_string_with_config(
database_url: &str,
config: DatabasePoolConfig,
) -> ThingsResult<Self> {
info!(
"Connecting to SQLite database: {} with optimized pool",
database_url
);
let pool = PoolOptions::new()
.max_connections(config.max_connections)
.min_connections(config.min_connections)
.acquire_timeout(config.connect_timeout)
.idle_timeout(Some(config.idle_timeout))
.max_lifetime(Some(config.max_lifetime))
.test_before_acquire(config.test_before_acquire)
.connect(database_url)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to connect to database: {e}")))?;
Self::apply_sqlite_optimizations(&pool, &config.sqlite_optimizations).await?;
info!(
"Database connection pool established successfully with {} max connections",
config.max_connections
);
Ok(Self { pool, config })
}
#[must_use]
pub fn pool(&self) -> &SqlitePool {
&self.pool
}
#[instrument]
pub async fn is_connected(&self) -> bool {
match sqlx::query("SELECT 1").fetch_one(&self.pool).await {
Ok(_) => {
debug!("Database connection is healthy");
true
}
Err(e) => {
error!("Database connection check failed: {}", e);
false
}
}
}
#[instrument]
pub async fn get_pool_health(&self) -> ThingsResult<PoolHealthStatus> {
let pool_size = self.pool.size();
let idle_connections = self.pool.num_idle();
let active_connections = pool_size - u32::try_from(idle_connections).unwrap_or(0);
let is_healthy = self.is_connected().await;
Ok(PoolHealthStatus {
is_healthy,
pool_size,
active_connections,
idle_connections: u32::try_from(idle_connections).unwrap_or(0),
max_connections: self.config.max_connections,
min_connections: self.config.min_connections,
connection_timeout: self.config.connect_timeout,
idle_timeout: Some(self.config.idle_timeout),
max_lifetime: Some(self.config.max_lifetime),
})
}
#[instrument]
pub async fn get_pool_metrics(&self) -> ThingsResult<PoolMetrics> {
let pool_size = self.pool.size();
let idle_connections = self.pool.num_idle();
let active_connections = pool_size - u32::try_from(idle_connections).unwrap_or(0);
let max_connections = self.config.max_connections;
let utilization_percentage = if max_connections > 0 {
(f64::from(active_connections) / f64::from(max_connections)) * 100.0
} else {
0.0
};
let start_time = std::time::Instant::now();
let is_connected = self.is_connected().await;
let response_time_ms = u64::try_from(start_time.elapsed().as_millis()).unwrap_or(0);
Ok(PoolMetrics {
pool_size,
active_connections,
idle_connections: u32::try_from(idle_connections).unwrap_or(0),
max_connections,
min_connections: self.config.min_connections,
utilization_percentage,
is_healthy: is_connected,
response_time_ms,
connection_timeout: self.config.connect_timeout,
idle_timeout: Some(self.config.idle_timeout),
max_lifetime: Some(self.config.max_lifetime),
})
}
#[instrument]
pub async fn comprehensive_health_check(&self) -> ThingsResult<ComprehensiveHealthStatus> {
let pool_health = self.get_pool_health().await?;
let pool_metrics = self.get_pool_metrics().await?;
let db_stats = self.get_stats().await?;
let overall_healthy = pool_health.is_healthy && pool_metrics.is_healthy;
Ok(ComprehensiveHealthStatus {
overall_healthy,
pool_health,
pool_metrics,
database_stats: db_stats,
timestamp: Utc::now(),
})
}
#[instrument]
pub async fn get_stats(&self) -> ThingsResult<DatabaseStats> {
let task_count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM TMTask")
.fetch_one(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to get task count: {e}")))?;
let project_count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM TMTask WHERE type = 1")
.fetch_one(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to get project count: {e}")))?;
let area_count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM TMArea")
.fetch_one(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to get area count: {e}")))?;
Ok(DatabaseStats {
task_count: task_count.try_into().unwrap_or(0),
project_count: project_count.try_into().unwrap_or(0),
area_count: area_count.try_into().unwrap_or(0),
})
}
#[instrument]
pub async fn get_all_tasks(&self) -> ThingsResult<Vec<Task>> {
let rows = sqlx::query(
r"
SELECT
uuid, title, status, type,
start_date, due_date,
project_uuid, area_uuid,
notes, tags,
created, modified
FROM TMTask
ORDER BY created DESC
",
)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to fetch tasks: {e}")))?;
let mut tasks = Vec::new();
for row in rows {
let task = Task {
uuid: Uuid::parse_str(&row.get::<String, _>("uuid"))
.map_err(|e| ThingsError::unknown(format!("Invalid task UUID: {e}")))?,
title: row.get("title"),
status: TaskStatus::from_i32(row.get("status")).unwrap_or(TaskStatus::Incomplete),
task_type: TaskType::from_i32(row.get("type")).unwrap_or(TaskType::Todo),
start_date: row
.get::<Option<String>, _>("start_date")
.and_then(|s| NaiveDate::parse_from_str(&s, "%Y-%m-%d").ok()),
deadline: row
.get::<Option<String>, _>("due_date")
.and_then(|s| NaiveDate::parse_from_str(&s, "%Y-%m-%d").ok()),
project_uuid: row
.get::<Option<String>, _>("project_uuid")
.and_then(|s| Uuid::parse_str(&s).ok()),
area_uuid: row
.get::<Option<String>, _>("area_uuid")
.and_then(|s| Uuid::parse_str(&s).ok()),
parent_uuid: None, notes: row.get("notes"),
tags: row
.get::<Option<String>, _>("tags")
.map(|s| s.split(',').map(|s| s.trim().to_string()).collect())
.unwrap_or_default(),
children: Vec::new(), created: DateTime::parse_from_rfc3339(&row.get::<String, _>("created"))
.ok()
.map_or_else(Utc::now, |dt| dt.with_timezone(&Utc)),
modified: DateTime::parse_from_rfc3339(&row.get::<String, _>("modified"))
.ok()
.map_or_else(Utc::now, |dt| dt.with_timezone(&Utc)),
stop_date: None, };
tasks.push(task);
}
debug!("Fetched {} tasks", tasks.len());
Ok(tasks)
}
#[instrument]
pub async fn get_all_projects(&self) -> ThingsResult<Vec<Project>> {
let rows = sqlx::query(
r"
SELECT
uuid, title, status,
area, notes,
creationDate, userModificationDate,
startDate, deadline
FROM TMTask
WHERE type = 1 AND trashed = 0
ORDER BY creationDate DESC
",
)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to fetch projects: {e}")))?;
let mut projects = Vec::new();
for row in rows {
let project = Project {
uuid: things_uuid_to_uuid(&row.get::<String, _>("uuid")),
title: row.get("title"),
status: TaskStatus::from_i32(row.get("status")).unwrap_or(TaskStatus::Incomplete),
area_uuid: row
.get::<Option<String>, _>("area")
.map(|s| things_uuid_to_uuid(&s)),
notes: row.get("notes"),
deadline: row
.get::<Option<i64>, _>("deadline")
.and_then(|ts| DateTime::from_timestamp(ts, 0))
.map(|dt| dt.date_naive()),
start_date: row
.get::<Option<i64>, _>("startDate")
.and_then(|ts| DateTime::from_timestamp(ts, 0))
.map(|dt| dt.date_naive()),
tags: Vec::new(), tasks: Vec::new(), created: {
let ts_f64 = row.get::<f64, _>("creationDate");
let ts = safe_timestamp_convert(ts_f64);
DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
},
modified: {
let ts_f64 = row.get::<f64, _>("userModificationDate");
let ts = safe_timestamp_convert(ts_f64);
DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
},
};
projects.push(project);
}
debug!("Fetched {} projects", projects.len());
Ok(projects)
}
#[instrument]
pub async fn get_all_areas(&self) -> ThingsResult<Vec<Area>> {
let rows = sqlx::query(
r"
SELECT
uuid, title, visible, `index`
FROM TMArea
ORDER BY `index` ASC
",
)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to fetch areas: {e}")))?;
let mut areas = Vec::new();
for row in rows {
let uuid_str: String = row.get("uuid");
let uuid =
Uuid::parse_str(&uuid_str).unwrap_or_else(|_| things_uuid_to_uuid(&uuid_str));
let area = Area {
uuid,
title: row.get("title"),
notes: None, projects: Vec::new(), tags: Vec::new(), created: Utc::now(), modified: Utc::now(), };
areas.push(area);
}
debug!("Fetched {} areas", areas.len());
Ok(areas)
}
#[instrument]
pub async fn get_tasks_by_status(&self, status: TaskStatus) -> ThingsResult<Vec<Task>> {
let status_value = status as i32;
let rows = sqlx::query(
r"
SELECT
uuid, title, status, type,
start_date, due_date,
project_uuid, area_uuid,
notes, tags,
created, modified
FROM TMTask
WHERE status = ?
ORDER BY created DESC
",
)
.bind(status_value)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to fetch tasks by status: {e}")))?;
let mut tasks = Vec::new();
for row in rows {
let task = Task {
uuid: Uuid::parse_str(&row.get::<String, _>("uuid"))
.map_err(|e| ThingsError::unknown(format!("Invalid task UUID: {e}")))?,
title: row.get("title"),
status: TaskStatus::from_i32(row.get("status")).unwrap_or(TaskStatus::Incomplete),
task_type: TaskType::from_i32(row.get("type")).unwrap_or(TaskType::Todo),
start_date: row
.get::<Option<String>, _>("start_date")
.and_then(|s| NaiveDate::parse_from_str(&s, "%Y-%m-%d").ok()),
deadline: row
.get::<Option<String>, _>("due_date")
.and_then(|s| NaiveDate::parse_from_str(&s, "%Y-%m-%d").ok()),
project_uuid: row
.get::<Option<String>, _>("project_uuid")
.and_then(|s| Uuid::parse_str(&s).ok()),
area_uuid: row
.get::<Option<String>, _>("area_uuid")
.and_then(|s| Uuid::parse_str(&s).ok()),
parent_uuid: None, notes: row.get("notes"),
tags: row
.get::<Option<String>, _>("tags")
.map(|s| s.split(',').map(|s| s.trim().to_string()).collect())
.unwrap_or_default(),
children: Vec::new(), created: DateTime::parse_from_rfc3339(&row.get::<String, _>("created"))
.ok()
.map_or_else(Utc::now, |dt| dt.with_timezone(&Utc)),
modified: DateTime::parse_from_rfc3339(&row.get::<String, _>("modified"))
.ok()
.map_or_else(Utc::now, |dt| dt.with_timezone(&Utc)),
stop_date: None, };
tasks.push(task);
}
debug!("Fetched {} tasks with status {:?}", tasks.len(), status);
Ok(tasks)
}
#[instrument]
pub async fn search_tasks(&self, query: &str) -> ThingsResult<Vec<Task>> {
let search_pattern = format!("%{query}%");
let rows = sqlx::query(
r"
SELECT
uuid, title, status, type,
startDate, deadline, stopDate,
project, area, heading,
notes, cachedTags,
creationDate, userModificationDate
FROM TMTask
WHERE (title LIKE ? OR notes LIKE ?) AND trashed = 0 AND type = 0
ORDER BY creationDate DESC
",
)
.bind(&search_pattern)
.bind(&search_pattern)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to search tasks: {e}")))?;
let tasks = rows
.iter()
.map(map_task_row)
.collect::<ThingsResult<Vec<Task>>>()?;
debug!("Found {} tasks matching query: {}", tasks.len(), query);
Ok(tasks)
}
#[cfg(any(feature = "advanced-queries", feature = "batch-operations"))]
pub async fn query_tasks(&self, filters: &TaskFilters) -> ThingsResult<Vec<Task>> {
self.query_tasks_inner(filters, None).await
}
#[cfg(any(feature = "advanced-queries", feature = "batch-operations"))]
pub(crate) async fn query_tasks_inner(
&self,
filters: &TaskFilters,
after: Option<(i64, Uuid)>,
) -> ThingsResult<Vec<Task>> {
const COLS: &str = "uuid, title, type, status, notes, startDate, deadline, stopDate, \
creationDate, userModificationDate, project, area, heading, cachedTags";
let trashed_val = i32::from(matches!(filters.status, Some(TaskStatus::Trashed)));
let mut conditions: Vec<String> = vec![format!("trashed = {trashed_val}")];
if let Some(status) = filters.status {
let n = match status {
TaskStatus::Incomplete => Some(0),
TaskStatus::Completed => Some(1),
TaskStatus::Canceled => Some(2),
TaskStatus::Trashed => None, };
if let Some(n) = n {
conditions.push(format!("status = {n}"));
}
}
if let Some(task_type) = filters.task_type {
let n = match task_type {
TaskType::Todo => 0,
TaskType::Project => 1,
TaskType::Heading => 2,
TaskType::Area => 3,
};
conditions.push(format!("type = {n}"));
}
if let Some(ref uuid) = filters.project_uuid {
conditions.push(format!("project = '{uuid}'"));
}
if let Some(ref uuid) = filters.area_uuid {
conditions.push(format!("area = '{uuid}'"));
}
if let Some(from) = filters.start_date_from {
conditions.push(format!(
"startDate >= {}",
naive_date_to_things_timestamp(from)
));
}
if let Some(to) = filters.start_date_to {
conditions.push(format!(
"startDate <= {}",
naive_date_to_things_timestamp(to)
));
}
if let Some(from) = filters.deadline_from {
conditions.push(format!(
"deadline >= {}",
naive_date_to_things_timestamp(from)
));
}
if let Some(to) = filters.deadline_to {
conditions.push(format!(
"deadline <= {}",
naive_date_to_things_timestamp(to)
));
}
if let Some((after_seconds, _)) = after {
conditions.push(format!(
"(CAST(creationDate AS INTEGER) < {after_seconds} \
OR (CAST(creationDate AS INTEGER) = {after_seconds} AND uuid < ?))"
));
}
let where_clause = conditions.join(" AND ");
let mut sql = format!(
"SELECT {COLS} FROM TMTask WHERE {where_clause} \
ORDER BY CAST(creationDate AS INTEGER) DESC, uuid DESC"
);
let has_post_filters =
filters.tags.as_ref().is_some_and(|t| !t.is_empty()) || filters.search_query.is_some();
if !has_post_filters {
match (filters.limit, filters.offset) {
(Some(limit), Some(offset)) => {
sql.push_str(&format!(" LIMIT {limit} OFFSET {offset}"));
}
(Some(limit), None) => {
sql.push_str(&format!(" LIMIT {limit}"));
}
(None, Some(offset)) => {
sql.push_str(&format!(" LIMIT -1 OFFSET {offset}"));
}
(None, None) => {}
}
}
let rows = if let Some((_, after_uuid)) = after {
sqlx::query(&sql)
.bind(after_uuid.to_string())
.fetch_all(&self.pool)
.await
} else {
sqlx::query(&sql).fetch_all(&self.pool).await
}
.map_err(|e| ThingsError::unknown(format!("Failed to query tasks: {e}")))?;
let mut tasks = rows
.iter()
.map(map_task_row)
.collect::<ThingsResult<Vec<Task>>>()?;
if let Some(ref filter_tags) = filters.tags {
if !filter_tags.is_empty() {
tasks.retain(|task| filter_tags.iter().all(|f| task.tags.contains(f)));
}
}
if let Some(ref q) = filters.search_query {
let q_lower = q.to_lowercase();
tasks.retain(|task| {
task.title.to_lowercase().contains(&q_lower)
|| task
.notes
.as_deref()
.unwrap_or("")
.to_lowercase()
.contains(&q_lower)
});
}
if has_post_filters {
let offset = filters.offset.unwrap_or(0);
tasks = tasks.into_iter().skip(offset).collect();
if let Some(limit) = filters.limit {
tasks.truncate(limit);
}
}
Ok(tasks)
}
#[allow(clippy::too_many_arguments)]
#[instrument(skip(self))]
pub async fn search_logbook(
&self,
search_text: Option<String>,
from_date: Option<NaiveDate>,
to_date: Option<NaiveDate>,
project_uuid: Option<Uuid>,
area_uuid: Option<Uuid>,
tags: Option<Vec<String>>,
limit: Option<u32>,
) -> ThingsResult<Vec<Task>> {
let result_limit = limit.unwrap_or(50).min(500);
let rows = if let Some(ref text) = search_text {
let pattern = format!("%{text}%");
let mut q = String::from(
"SELECT uuid, title, status, type, startDate, deadline, stopDate, project, area, heading, notes, cachedTags, creationDate, userModificationDate FROM TMTask WHERE status = 1 AND trashed = 0 AND type = 0",
);
q.push_str(" AND (title LIKE ? OR notes LIKE ?)");
if let Some(date) = from_date {
let date_time = date.and_hms_opt(0, 0, 0).unwrap().and_utc();
let timestamp = date_time.timestamp() as f64;
q.push_str(&format!(" AND stopDate >= {}", timestamp));
}
if let Some(date) = to_date {
let end_date = date + chrono::Duration::days(1);
let date_time = end_date.and_hms_opt(0, 0, 0).unwrap().and_utc();
let timestamp = date_time.timestamp() as f64;
q.push_str(&format!(" AND stopDate < {}", timestamp));
}
if let Some(uuid) = project_uuid {
q.push_str(&format!(" AND project = '{}'", uuid));
}
if let Some(uuid) = area_uuid {
q.push_str(&format!(" AND area = '{}'", uuid));
}
q.push_str(&format!(" ORDER BY stopDate DESC LIMIT {result_limit}"));
sqlx::query(&q)
.bind(&pattern)
.bind(&pattern)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to search logbook: {e}")))?
} else {
let mut q = String::from(
"SELECT uuid, title, status, type, startDate, deadline, stopDate, project, area, heading, notes, cachedTags, creationDate, userModificationDate FROM TMTask WHERE status = 1 AND trashed = 0 AND type = 0",
);
if let Some(date) = from_date {
let date_time = date.and_hms_opt(0, 0, 0).unwrap().and_utc();
let timestamp = date_time.timestamp() as f64;
q.push_str(&format!(" AND stopDate >= {}", timestamp));
}
if let Some(date) = to_date {
let end_date = date + chrono::Duration::days(1);
let date_time = end_date.and_hms_opt(0, 0, 0).unwrap().and_utc();
let timestamp = date_time.timestamp() as f64;
q.push_str(&format!(" AND stopDate < {}", timestamp));
}
if let Some(uuid) = project_uuid {
q.push_str(&format!(" AND project = '{}'", uuid));
}
if let Some(uuid) = area_uuid {
q.push_str(&format!(" AND area = '{}'", uuid));
}
q.push_str(&format!(" ORDER BY stopDate DESC LIMIT {result_limit}"));
sqlx::query(&q)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to search logbook: {e}")))?
};
let mut tasks = rows
.iter()
.map(map_task_row)
.collect::<ThingsResult<Vec<Task>>>()?;
if let Some(ref filter_tags) = tags {
if !filter_tags.is_empty() {
tasks.retain(|task| {
filter_tags
.iter()
.all(|filter_tag| task.tags.contains(filter_tag))
});
}
}
debug!("Found {} completed tasks in logbook", tasks.len());
Ok(tasks)
}
#[instrument(skip(self))]
pub async fn get_inbox(&self, limit: Option<usize>) -> ThingsResult<Vec<Task>> {
let query = if let Some(limit) = limit {
format!("SELECT uuid, title, type, status, notes, startDate, deadline, stopDate, creationDate, userModificationDate, project, area, heading, cachedTags FROM TMTask WHERE type = 0 AND status = 0 AND project IS NULL AND trashed = 0 ORDER BY creationDate DESC LIMIT {limit}")
} else {
"SELECT uuid, title, type, status, notes, startDate, deadline, stopDate, creationDate, userModificationDate, project, area, heading, cachedTags FROM TMTask WHERE type = 0 AND status = 0 AND project IS NULL AND trashed = 0 ORDER BY creationDate DESC"
.to_string()
};
let rows = sqlx::query(&query)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to fetch inbox tasks: {e}")))?;
let tasks = rows
.iter()
.map(map_task_row)
.collect::<ThingsResult<Vec<Task>>>()?;
Ok(tasks)
}
#[instrument(skip(self))]
pub async fn get_today(&self, limit: Option<usize>) -> ThingsResult<Vec<Task>> {
let query = if let Some(limit) = limit {
format!(
"SELECT uuid, title, type, status, notes, startDate, deadline, stopDate, creationDate, userModificationDate, project, area, heading, cachedTags FROM TMTask WHERE status = 0 AND todayIndex IS NOT NULL AND todayIndex != 0 AND trashed = 0 ORDER BY todayIndex ASC LIMIT {limit}"
)
} else {
"SELECT uuid, title, type, status, notes, startDate, deadline, stopDate, creationDate, userModificationDate, project, area, heading, cachedTags FROM TMTask WHERE status = 0 AND todayIndex IS NOT NULL AND todayIndex != 0 AND trashed = 0 ORDER BY todayIndex ASC".to_string()
};
let rows = sqlx::query(&query)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to fetch today's tasks: {e}")))?;
let tasks = rows
.iter()
.map(map_task_row)
.collect::<ThingsResult<Vec<Task>>>()?;
Ok(tasks)
}
#[instrument(skip(self))]
pub async fn get_projects(&self, limit: Option<usize>) -> ThingsResult<Vec<Project>> {
let _ = limit; self.get_all_projects().await
}
#[instrument(skip(self))]
pub async fn get_areas(&self) -> ThingsResult<Vec<Area>> {
self.get_all_areas().await
}
#[instrument(skip(self))]
pub async fn create_task(&self, request: CreateTaskRequest) -> ThingsResult<Uuid> {
crate::database::validate_date_range(request.start_date, request.deadline)?;
let uuid = Uuid::new_v4();
let uuid_str = uuid.to_string();
if let Some(project_uuid) = &request.project_uuid {
validators::validate_project_exists(&self.pool, project_uuid).await?;
}
if let Some(area_uuid) = &request.area_uuid {
validators::validate_area_exists(&self.pool, area_uuid).await?;
}
if let Some(parent_uuid) = &request.parent_uuid {
validators::validate_task_exists(&self.pool, parent_uuid).await?;
}
let start_date_ts = request.start_date.map(naive_date_to_things_timestamp);
let deadline_ts = request.deadline.map(naive_date_to_things_timestamp);
let now = Utc::now().timestamp() as f64;
let cached_tags = request
.tags
.as_ref()
.map(|tags| serialize_tags_to_blob(tags))
.transpose()?;
sqlx::query(
r"
INSERT INTO TMTask (
uuid, title, type, status, notes,
startDate, deadline, project, area, heading,
cachedTags, creationDate, userModificationDate,
trashed
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
",
)
.bind(&uuid_str)
.bind(&request.title)
.bind(request.task_type.unwrap_or(TaskType::Todo) as i32)
.bind(request.status.unwrap_or(TaskStatus::Incomplete) as i32)
.bind(request.notes.as_ref())
.bind(start_date_ts)
.bind(deadline_ts)
.bind(request.project_uuid.map(|u| u.to_string()))
.bind(request.area_uuid.map(|u| u.to_string()))
.bind(request.parent_uuid.map(|u| u.to_string()))
.bind(cached_tags)
.bind(now)
.bind(now)
.bind(0) .execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to create task: {e}")))?;
info!("Created task with UUID: {}", uuid);
Ok(uuid)
}
#[instrument(skip(self))]
pub async fn create_project(
&self,
request: crate::models::CreateProjectRequest,
) -> ThingsResult<Uuid> {
crate::database::validate_date_range(request.start_date, request.deadline)?;
let uuid = Uuid::new_v4();
let uuid_str = uuid.to_string();
if let Some(area_uuid) = &request.area_uuid {
validators::validate_area_exists(&self.pool, area_uuid).await?;
}
let start_date_ts = request.start_date.map(naive_date_to_things_timestamp);
let deadline_ts = request.deadline.map(naive_date_to_things_timestamp);
let now = Utc::now().timestamp() as f64;
let cached_tags = request
.tags
.as_ref()
.map(|tags| serialize_tags_to_blob(tags))
.transpose()?;
sqlx::query(
r"
INSERT INTO TMTask (
uuid, title, type, status, notes,
startDate, deadline, project, area, heading,
cachedTags, creationDate, userModificationDate,
trashed
) VALUES (?, ?, 1, 0, ?, ?, ?, NULL, ?, NULL, ?, ?, ?, 0)
",
)
.bind(&uuid_str)
.bind(&request.title)
.bind(request.notes.as_ref())
.bind(start_date_ts)
.bind(deadline_ts)
.bind(request.area_uuid.map(|u| u.to_string()))
.bind(cached_tags)
.bind(now)
.bind(now)
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to create project: {e}")))?;
info!("Created project with UUID: {}", uuid);
Ok(uuid)
}
#[instrument(skip(self))]
pub async fn update_task(&self, request: UpdateTaskRequest) -> ThingsResult<()> {
validators::validate_task_exists(&self.pool, &request.uuid).await?;
if request.start_date.is_some() || request.deadline.is_some() {
if let Some(current_task) = self.get_task_by_uuid(&request.uuid).await? {
let final_start = request.start_date.or(current_task.start_date);
let final_deadline = request.deadline.or(current_task.deadline);
crate::database::validate_date_range(final_start, final_deadline)?;
}
}
if let Some(project_uuid) = &request.project_uuid {
validators::validate_project_exists(&self.pool, project_uuid).await?;
}
if let Some(area_uuid) = &request.area_uuid {
validators::validate_area_exists(&self.pool, area_uuid).await?;
}
let builder = TaskUpdateBuilder::from_request(&request);
if builder.is_empty() {
return Ok(());
}
let query_string = builder.build_query_string();
let mut q = sqlx::query(&query_string);
if let Some(title) = &request.title {
q = q.bind(title);
}
if let Some(notes) = &request.notes {
q = q.bind(notes);
}
if let Some(start_date) = request.start_date {
q = q.bind(naive_date_to_things_timestamp(start_date));
}
if let Some(deadline) = request.deadline {
q = q.bind(naive_date_to_things_timestamp(deadline));
}
if let Some(status) = request.status {
q = q.bind(status as i32);
}
if let Some(project_uuid) = request.project_uuid {
q = q.bind(project_uuid.to_string());
}
if let Some(area_uuid) = request.area_uuid {
q = q.bind(area_uuid.to_string());
}
if let Some(tags) = &request.tags {
let cached_tags = serialize_tags_to_blob(tags)?;
q = q.bind(cached_tags);
}
let now = Utc::now().timestamp() as f64;
q = q.bind(now).bind(request.uuid.to_string());
q.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update task: {e}")))?;
info!("Updated task with UUID: {}", request.uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn get_project_by_uuid(&self, uuid: &Uuid) -> ThingsResult<Option<Project>> {
let row = sqlx::query(
r"
SELECT
uuid, title, status,
area, notes,
creationDate, userModificationDate,
startDate, deadline,
trashed, type
FROM TMTask
WHERE uuid = ? AND type = 1
",
)
.bind(uuid.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to fetch project: {e}")))?;
if let Some(row) = row {
let trashed: i64 = row.get("trashed");
if trashed == 1 {
return Ok(None);
}
Ok(Some(map_project_row(&row)))
} else {
Ok(None)
}
}
#[instrument(skip(self))]
pub async fn update_project(
&self,
request: crate::models::UpdateProjectRequest,
) -> ThingsResult<()> {
validators::validate_project_exists(&self.pool, &request.uuid).await?;
if request.start_date.is_some() || request.deadline.is_some() {
if let Some(current_project) = self.get_project_by_uuid(&request.uuid).await? {
let final_start = request.start_date.or(current_project.start_date);
let final_deadline = request.deadline.or(current_project.deadline);
crate::database::validate_date_range(final_start, final_deadline)?;
}
}
if let Some(area_uuid) = &request.area_uuid {
validators::validate_area_exists(&self.pool, area_uuid).await?;
}
let mut builder = TaskUpdateBuilder::new();
if request.title.is_some() {
builder = builder.add_field("title");
}
if request.notes.is_some() {
builder = builder.add_field("notes");
}
if request.start_date.is_some() {
builder = builder.add_field("startDate");
}
if request.deadline.is_some() {
builder = builder.add_field("deadline");
}
if request.area_uuid.is_some() {
builder = builder.add_field("area");
}
if request.tags.is_some() {
builder = builder.add_field("cachedTags");
}
if builder.is_empty() {
return Ok(());
}
let query_str = builder.build_query_string();
let mut q = sqlx::query(&query_str);
if let Some(ref title) = request.title {
q = q.bind(title);
}
if let Some(ref notes) = request.notes {
q = q.bind(notes);
}
if let Some(start_date) = request.start_date {
q = q.bind(naive_date_to_things_timestamp(start_date));
}
if let Some(deadline) = request.deadline {
q = q.bind(naive_date_to_things_timestamp(deadline));
}
if let Some(area_uuid) = request.area_uuid {
q = q.bind(area_uuid.to_string());
}
if let Some(tags) = &request.tags {
let cached_tags = serialize_tags_to_blob(tags)?;
q = q.bind(cached_tags);
}
let now = Utc::now().timestamp() as f64;
q = q.bind(now).bind(request.uuid.to_string());
q.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update project: {e}")))?;
info!("Updated project with UUID: {}", request.uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn get_task_by_uuid(&self, uuid: &Uuid) -> ThingsResult<Option<Task>> {
let row = sqlx::query(
r"
SELECT
uuid, title, status, type,
startDate, deadline, stopDate,
project, area, heading,
notes, cachedTags,
creationDate, userModificationDate,
trashed
FROM TMTask
WHERE uuid = ?
",
)
.bind(uuid.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to fetch task: {e}")))?;
if let Some(row) = row {
let trashed: i64 = row.get("trashed");
if trashed == 1 {
return Ok(None); }
let task = map_task_row(&row)?;
Ok(Some(task))
} else {
Ok(None)
}
}
#[instrument(skip(self))]
pub async fn complete_task(&self, uuid: &Uuid) -> ThingsResult<()> {
validators::validate_task_exists(&self.pool, uuid).await?;
let now = Utc::now().timestamp() as f64;
sqlx::query(
"UPDATE TMTask SET status = 1, stopDate = ?, userModificationDate = ? WHERE uuid = ?",
)
.bind(now)
.bind(now)
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to complete task: {e}")))?;
info!("Completed task with UUID: {}", uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn uncomplete_task(&self, uuid: &Uuid) -> ThingsResult<()> {
validators::validate_task_exists(&self.pool, uuid).await?;
let now = Utc::now().timestamp() as f64;
sqlx::query(
"UPDATE TMTask SET status = 0, stopDate = NULL, userModificationDate = ? WHERE uuid = ?",
)
.bind(now)
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to uncomplete task: {e}")))?;
info!("Uncompleted task with UUID: {}", uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn complete_project(
&self,
uuid: &Uuid,
child_handling: crate::models::ProjectChildHandling,
) -> ThingsResult<()> {
validators::validate_project_exists(&self.pool, uuid).await?;
let now = Utc::now().timestamp() as f64;
match child_handling {
crate::models::ProjectChildHandling::Error => {
let child_count: i64 = sqlx::query_scalar(
"SELECT COUNT(*) FROM TMTask WHERE project = ? AND trashed = 0",
)
.bind(uuid.to_string())
.fetch_one(&self.pool)
.await
.map_err(|e| {
ThingsError::unknown(format!("Failed to check for child tasks: {e}"))
})?;
if child_count > 0 {
return Err(ThingsError::unknown(format!(
"Project {} has {} child task(s). Use cascade or orphan mode to complete.",
uuid, child_count
)));
}
}
crate::models::ProjectChildHandling::Cascade => {
sqlx::query(
"UPDATE TMTask SET status = 1, stopDate = ?, userModificationDate = ? WHERE project = ? AND trashed = 0",
)
.bind(now)
.bind(now)
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to complete child tasks: {e}")))?;
}
crate::models::ProjectChildHandling::Orphan => {
sqlx::query(
"UPDATE TMTask SET project = NULL, userModificationDate = ? WHERE project = ? AND trashed = 0",
)
.bind(now)
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to orphan child tasks: {e}")))?;
}
}
sqlx::query(
"UPDATE TMTask SET status = 1, stopDate = ?, userModificationDate = ? WHERE uuid = ?",
)
.bind(now)
.bind(now)
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to complete project: {e}")))?;
info!("Completed project with UUID: {}", uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn delete_task(
&self,
uuid: &Uuid,
child_handling: DeleteChildHandling,
) -> ThingsResult<()> {
validators::validate_task_exists(&self.pool, uuid).await?;
let children = sqlx::query("SELECT uuid FROM TMTask WHERE heading = ? AND trashed = 0")
.bind(uuid.to_string())
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to query child tasks: {e}")))?;
let has_children = !children.is_empty();
if has_children {
match child_handling {
DeleteChildHandling::Error => {
return Err(ThingsError::unknown(format!(
"Task {} has {} child task(s). Use cascade or orphan mode to delete.",
uuid,
children.len()
)));
}
DeleteChildHandling::Cascade => {
let now = Utc::now().timestamp() as f64;
for child_row in &children {
let child_uuid: String = child_row.get("uuid");
sqlx::query(
"UPDATE TMTask SET trashed = 1, userModificationDate = ? WHERE uuid = ?",
)
.bind(now)
.bind(&child_uuid)
.execute(&self.pool)
.await
.map_err(|e| {
ThingsError::unknown(format!("Failed to delete child task: {e}"))
})?;
}
info!("Cascade deleted {} child task(s)", children.len());
}
DeleteChildHandling::Orphan => {
let now = Utc::now().timestamp() as f64;
for child_row in &children {
let child_uuid: String = child_row.get("uuid");
sqlx::query(
"UPDATE TMTask SET heading = NULL, userModificationDate = ? WHERE uuid = ?",
)
.bind(now)
.bind(&child_uuid)
.execute(&self.pool)
.await
.map_err(|e| {
ThingsError::unknown(format!("Failed to orphan child task: {e}"))
})?;
}
info!("Orphaned {} child task(s)", children.len());
}
}
}
let now = Utc::now().timestamp() as f64;
sqlx::query("UPDATE TMTask SET trashed = 1, userModificationDate = ? WHERE uuid = ?")
.bind(now)
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to delete task: {e}")))?;
info!("Deleted task with UUID: {}", uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn delete_project(
&self,
uuid: &Uuid,
child_handling: crate::models::ProjectChildHandling,
) -> ThingsResult<()> {
validators::validate_project_exists(&self.pool, uuid).await?;
let now = Utc::now().timestamp() as f64;
match child_handling {
crate::models::ProjectChildHandling::Error => {
let child_count: i64 = sqlx::query_scalar(
"SELECT COUNT(*) FROM TMTask WHERE project = ? AND trashed = 0",
)
.bind(uuid.to_string())
.fetch_one(&self.pool)
.await
.map_err(|e| {
ThingsError::unknown(format!("Failed to check for child tasks: {e}"))
})?;
if child_count > 0 {
return Err(ThingsError::unknown(format!(
"Project {} has {} child task(s). Use cascade or orphan mode to delete.",
uuid, child_count
)));
}
}
crate::models::ProjectChildHandling::Cascade => {
sqlx::query(
"UPDATE TMTask SET trashed = 1, userModificationDate = ? WHERE project = ? AND trashed = 0",
)
.bind(now)
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to delete child tasks: {e}")))?;
}
crate::models::ProjectChildHandling::Orphan => {
sqlx::query(
"UPDATE TMTask SET project = NULL, userModificationDate = ? WHERE project = ? AND trashed = 0",
)
.bind(now)
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to orphan child tasks: {e}")))?;
}
}
sqlx::query("UPDATE TMTask SET trashed = 1, userModificationDate = ? WHERE uuid = ?")
.bind(now)
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to delete project: {e}")))?;
info!("Deleted project with UUID: {}", uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn create_area(
&self,
request: crate::models::CreateAreaRequest,
) -> ThingsResult<Uuid> {
let uuid = Uuid::new_v4();
let uuid_str = uuid.to_string();
let now = Utc::now().timestamp() as f64;
let max_index: Option<i64> = sqlx::query_scalar("SELECT MAX(`index`) FROM TMArea")
.fetch_one(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to get max area index: {e}")))?;
let next_index = max_index.unwrap_or(-1) + 1;
sqlx::query(
r"
INSERT INTO TMArea (
uuid, title, visible, `index`,
creationDate, userModificationDate
) VALUES (?, ?, 1, ?, ?, ?)
",
)
.bind(&uuid_str)
.bind(&request.title)
.bind(next_index)
.bind(now)
.bind(now)
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to create area: {e}")))?;
info!("Created area with UUID: {}", uuid);
Ok(uuid)
}
#[instrument(skip(self))]
pub async fn update_area(&self, request: crate::models::UpdateAreaRequest) -> ThingsResult<()> {
validators::validate_area_exists(&self.pool, &request.uuid).await?;
let now = Utc::now().timestamp() as f64;
sqlx::query("UPDATE TMArea SET title = ?, userModificationDate = ? WHERE uuid = ?")
.bind(&request.title)
.bind(now)
.bind(request.uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update area: {e}")))?;
info!("Updated area with UUID: {}", request.uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn delete_area(&self, uuid: &Uuid) -> ThingsResult<()> {
validators::validate_area_exists(&self.pool, uuid).await?;
let now = Utc::now().timestamp() as f64;
sqlx::query(
"UPDATE TMTask SET area = NULL, userModificationDate = ? WHERE area = ? AND type = 1 AND trashed = 0",
)
.bind(now)
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to orphan projects in area: {e}")))?;
sqlx::query("DELETE FROM TMArea WHERE uuid = ?")
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to delete area: {e}")))?;
info!("Deleted area with UUID: {}", uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn find_tag_by_normalized_title(
&self,
normalized: &str,
) -> ThingsResult<Option<crate::models::Tag>> {
let row = sqlx::query(
"SELECT uuid, title, shortcut, parent, creationDate, userModificationDate, usedDate
FROM TMTag
WHERE LOWER(title) = LOWER(?)",
)
.bind(normalized)
.fetch_optional(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to find tag by title: {e}")))?;
if let Some(row) = row {
let uuid_str: String = row.get("uuid");
let uuid =
Uuid::parse_str(&uuid_str).unwrap_or_else(|_| things_uuid_to_uuid(&uuid_str));
let title: String = row.get("title");
let shortcut: Option<String> = row.get("shortcut");
let parent_str: Option<String> = row.get("parent");
let parent_uuid =
parent_str.map(|s| Uuid::parse_str(&s).unwrap_or_else(|_| things_uuid_to_uuid(&s)));
let creation_ts: f64 = row.get("creationDate");
let created = {
let ts = safe_timestamp_convert(creation_ts);
DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
};
let modification_ts: f64 = row.get("userModificationDate");
let modified = {
let ts = safe_timestamp_convert(modification_ts);
DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
};
let used_ts: Option<f64> = row.get("usedDate");
let last_used = used_ts.and_then(|ts| {
let ts_i64 = safe_timestamp_convert(ts);
DateTime::from_timestamp(ts_i64, 0)
});
let usage_count: i64 = sqlx::query_scalar(
"SELECT COUNT(*) FROM TMTask
WHERE cachedTags IS NOT NULL
AND json_extract(cachedTags, '$') LIKE ?
AND trashed = 0",
)
.bind(format!("%\"{}\"%", title))
.fetch_one(&self.pool)
.await
.unwrap_or(0);
Ok(Some(crate::models::Tag {
uuid,
title,
shortcut,
parent_uuid,
created,
modified,
usage_count: usage_count as u32,
last_used,
}))
} else {
Ok(None)
}
}
#[instrument(skip(self))]
pub async fn find_similar_tags(
&self,
title: &str,
min_similarity: f32,
) -> ThingsResult<Vec<crate::models::TagMatch>> {
use crate::database::tag_utils::{calculate_similarity, get_match_type};
let all_tags = self.get_all_tags().await?;
let mut matches: Vec<crate::models::TagMatch> = all_tags
.into_iter()
.filter_map(|tag| {
let similarity = calculate_similarity(title, &tag.title);
if similarity >= min_similarity {
let match_type = get_match_type(title, &tag.title, min_similarity);
Some(crate::models::TagMatch {
tag,
similarity_score: similarity,
match_type,
})
} else {
None
}
})
.collect();
matches.sort_by(|a, b| {
b.similarity_score
.partial_cmp(&a.similarity_score)
.unwrap_or(std::cmp::Ordering::Equal)
});
Ok(matches)
}
#[instrument(skip(self))]
pub async fn search_tags(&self, query: &str) -> ThingsResult<Vec<crate::models::Tag>> {
let rows = sqlx::query(
"SELECT uuid, title, shortcut, parent, creationDate, userModificationDate, usedDate
FROM TMTag
WHERE title LIKE ?
ORDER BY title",
)
.bind(format!("%{}%", query))
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to search tags: {e}")))?;
let mut tags = Vec::new();
for row in rows {
let uuid_str: String = row.get("uuid");
let uuid =
Uuid::parse_str(&uuid_str).unwrap_or_else(|_| things_uuid_to_uuid(&uuid_str));
let title: String = row.get("title");
let shortcut: Option<String> = row.get("shortcut");
let parent_str: Option<String> = row.get("parent");
let parent_uuid =
parent_str.map(|s| Uuid::parse_str(&s).unwrap_or_else(|_| things_uuid_to_uuid(&s)));
let creation_ts: f64 = row.get("creationDate");
let created = {
let ts = safe_timestamp_convert(creation_ts);
DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
};
let modification_ts: f64 = row.get("userModificationDate");
let modified = {
let ts = safe_timestamp_convert(modification_ts);
DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
};
let used_ts: Option<f64> = row.get("usedDate");
let last_used = used_ts.and_then(|ts| {
let ts_i64 = safe_timestamp_convert(ts);
DateTime::from_timestamp(ts_i64, 0)
});
let usage_count: i64 = sqlx::query_scalar(
"SELECT COUNT(*) FROM TMTask
WHERE cachedTags IS NOT NULL
AND json_extract(cachedTags, '$') LIKE ?
AND trashed = 0",
)
.bind(format!("%\"{}\"%", title))
.fetch_one(&self.pool)
.await
.unwrap_or(0);
tags.push(crate::models::Tag {
uuid,
title,
shortcut,
parent_uuid,
created,
modified,
usage_count: usage_count as u32,
last_used,
});
}
Ok(tags)
}
#[instrument(skip(self))]
pub async fn get_all_tags(&self) -> ThingsResult<Vec<crate::models::Tag>> {
let rows = sqlx::query(
"SELECT uuid, title, shortcut, parent, creationDate, userModificationDate, usedDate
FROM TMTag
ORDER BY title",
)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to get all tags: {e}")))?;
let mut tags = Vec::new();
for row in rows {
let uuid_str: String = row.get("uuid");
let uuid =
Uuid::parse_str(&uuid_str).unwrap_or_else(|_| things_uuid_to_uuid(&uuid_str));
let title: String = row.get("title");
let shortcut: Option<String> = row.get("shortcut");
let parent_str: Option<String> = row.get("parent");
let parent_uuid =
parent_str.map(|s| Uuid::parse_str(&s).unwrap_or_else(|_| things_uuid_to_uuid(&s)));
let creation_ts: f64 = row.get("creationDate");
let created = {
let ts = safe_timestamp_convert(creation_ts);
DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
};
let modification_ts: f64 = row.get("userModificationDate");
let modified = {
let ts = safe_timestamp_convert(modification_ts);
DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
};
let used_ts: Option<f64> = row.get("usedDate");
let last_used = used_ts.and_then(|ts| {
let ts_i64 = safe_timestamp_convert(ts);
DateTime::from_timestamp(ts_i64, 0)
});
let usage_count: i64 = sqlx::query_scalar(
"SELECT COUNT(*) FROM TMTask
WHERE cachedTags IS NOT NULL
AND json_extract(cachedTags, '$') LIKE ?
AND trashed = 0",
)
.bind(format!("%\"{}\"%", title))
.fetch_one(&self.pool)
.await
.unwrap_or(0);
tags.push(crate::models::Tag {
uuid,
title,
shortcut,
parent_uuid,
created,
modified,
usage_count: usage_count as u32,
last_used,
});
}
Ok(tags)
}
#[instrument(skip(self))]
pub async fn get_popular_tags(&self, limit: usize) -> ThingsResult<Vec<crate::models::Tag>> {
let mut all_tags = self.get_all_tags().await?;
all_tags.sort_by_key(|t| std::cmp::Reverse(t.usage_count));
all_tags.truncate(limit);
Ok(all_tags)
}
#[instrument(skip(self))]
pub async fn get_recent_tags(&self, limit: usize) -> ThingsResult<Vec<crate::models::Tag>> {
let rows = sqlx::query(
"SELECT uuid, title, shortcut, parent, creationDate, userModificationDate, usedDate
FROM TMTag
WHERE usedDate IS NOT NULL
ORDER BY usedDate DESC
LIMIT ?",
)
.bind(limit as i64)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to get recent tags: {e}")))?;
let mut tags = Vec::new();
for row in rows {
let uuid_str: String = row.get("uuid");
let uuid =
Uuid::parse_str(&uuid_str).unwrap_or_else(|_| things_uuid_to_uuid(&uuid_str));
let title: String = row.get("title");
let shortcut: Option<String> = row.get("shortcut");
let parent_str: Option<String> = row.get("parent");
let parent_uuid =
parent_str.map(|s| Uuid::parse_str(&s).unwrap_or_else(|_| things_uuid_to_uuid(&s)));
let creation_ts: f64 = row.get("creationDate");
let created = {
let ts = safe_timestamp_convert(creation_ts);
DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
};
let modification_ts: f64 = row.get("userModificationDate");
let modified = {
let ts = safe_timestamp_convert(modification_ts);
DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
};
let used_ts: Option<f64> = row.get("usedDate");
let last_used = used_ts.and_then(|ts| {
let ts_i64 = safe_timestamp_convert(ts);
DateTime::from_timestamp(ts_i64, 0)
});
let usage_count: i64 = sqlx::query_scalar(
"SELECT COUNT(*) FROM TMTask
WHERE cachedTags IS NOT NULL
AND json_extract(cachedTags, '$') LIKE ?
AND trashed = 0",
)
.bind(format!("%\"{}\"%", title))
.fetch_one(&self.pool)
.await
.unwrap_or(0);
tags.push(crate::models::Tag {
uuid,
title,
shortcut,
parent_uuid,
created,
modified,
usage_count: usage_count as u32,
last_used,
});
}
Ok(tags)
}
#[instrument(skip(self))]
pub async fn create_tag_smart(
&self,
request: crate::models::CreateTagRequest,
) -> ThingsResult<crate::models::TagCreationResult> {
use crate::database::tag_utils::normalize_tag_title;
use crate::models::TagCreationResult;
let normalized = normalize_tag_title(&request.title);
if let Some(existing) = self.find_tag_by_normalized_title(&normalized).await? {
return Ok(TagCreationResult::Existing {
tag: existing,
is_new: false,
});
}
let similar_tags = self.find_similar_tags(&normalized, 0.8).await?;
if !similar_tags.is_empty() {
return Ok(TagCreationResult::SimilarFound {
similar_tags,
requested_title: request.title,
});
}
let uuid = Uuid::new_v4();
let now = Utc::now().timestamp() as f64;
sqlx::query(
"INSERT INTO TMTag (uuid, title, shortcut, parent, creationDate, userModificationDate, usedDate, `index`)
VALUES (?, ?, ?, ?, ?, ?, NULL, 0)"
)
.bind(uuid.to_string())
.bind(&request.title)
.bind(request.shortcut.as_ref())
.bind(request.parent_uuid.map(|u| u.to_string()))
.bind(now)
.bind(now)
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to create tag: {e}")))?;
info!("Created tag with UUID: {}", uuid);
Ok(TagCreationResult::Created { uuid, is_new: true })
}
#[instrument(skip(self))]
pub async fn create_tag_force(
&self,
request: crate::models::CreateTagRequest,
) -> ThingsResult<Uuid> {
let uuid = Uuid::new_v4();
let now = Utc::now().timestamp() as f64;
sqlx::query(
"INSERT INTO TMTag (uuid, title, shortcut, parent, creationDate, userModificationDate, usedDate, `index`)
VALUES (?, ?, ?, ?, ?, ?, NULL, 0)"
)
.bind(uuid.to_string())
.bind(&request.title)
.bind(request.shortcut.as_ref())
.bind(request.parent_uuid.map(|u| u.to_string()))
.bind(now)
.bind(now)
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to create tag: {e}")))?;
info!("Forcefully created tag with UUID: {}", uuid);
Ok(uuid)
}
#[instrument(skip(self))]
pub async fn update_tag(&self, request: crate::models::UpdateTagRequest) -> ThingsResult<()> {
use crate::database::tag_utils::normalize_tag_title;
let existing = self
.find_tag_by_normalized_title(&request.uuid.to_string())
.await?;
if existing.is_none() {
let row = sqlx::query("SELECT 1 FROM TMTag WHERE uuid = ?")
.bind(request.uuid.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to validate tag: {e}")))?;
if row.is_none() {
return Err(ThingsError::unknown(format!(
"Tag not found: {}",
request.uuid
)));
}
}
if let Some(new_title) = &request.title {
let normalized = normalize_tag_title(new_title);
if let Some(duplicate) = self.find_tag_by_normalized_title(&normalized).await? {
if duplicate.uuid != request.uuid {
return Err(ThingsError::unknown(format!(
"Tag with title '{}' already exists",
new_title
)));
}
}
}
let now = Utc::now().timestamp() as f64;
let mut updates = Vec::new();
let mut params: Vec<String> = Vec::new();
if let Some(title) = &request.title {
updates.push("title = ?");
params.push(title.clone());
}
if let Some(shortcut) = &request.shortcut {
updates.push("shortcut = ?");
params.push(shortcut.clone());
}
if let Some(parent_uuid) = request.parent_uuid {
updates.push("parent = ?");
params.push(parent_uuid.to_string());
}
if updates.is_empty() {
return Ok(()); }
updates.push("userModificationDate = ?");
params.push(now.to_string());
let sql = format!("UPDATE TMTag SET {} WHERE uuid = ?", updates.join(", "));
params.push(request.uuid.to_string());
let mut query = sqlx::query(&sql);
for param in params {
query = query.bind(param);
}
query
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update tag: {e}")))?;
info!("Updated tag with UUID: {}", request.uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn delete_tag(&self, uuid: &Uuid, remove_from_tasks: bool) -> ThingsResult<()> {
let tag = self.find_tag_by_normalized_title(&uuid.to_string()).await?;
if tag.is_none() {
let row = sqlx::query("SELECT title FROM TMTag WHERE uuid = ?")
.bind(uuid.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to find tag: {e}")))?;
if row.is_none() {
return Err(ThingsError::unknown(format!("Tag not found: {}", uuid)));
}
}
if remove_from_tasks {
info!("Removing tag {} from all tasks (not yet implemented)", uuid);
}
sqlx::query("DELETE FROM TMTag WHERE uuid = ?")
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to delete tag: {e}")))?;
info!("Deleted tag with UUID: {}", uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn merge_tags(&self, source_uuid: &Uuid, target_uuid: &Uuid) -> ThingsResult<()> {
let source_row = sqlx::query("SELECT title FROM TMTag WHERE uuid = ?")
.bind(source_uuid.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to find source tag: {e}")))?;
if source_row.is_none() {
return Err(ThingsError::unknown(format!(
"Source tag not found: {}",
source_uuid
)));
}
let target_row = sqlx::query("SELECT title FROM TMTag WHERE uuid = ?")
.bind(target_uuid.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to find target tag: {e}")))?;
if target_row.is_none() {
return Err(ThingsError::unknown(format!(
"Target tag not found: {}",
target_uuid
)));
}
info!(
"Merging tag {} into {} (tag replacement in tasks not yet fully implemented)",
source_uuid, target_uuid
);
let now = Utc::now().timestamp() as f64;
sqlx::query("UPDATE TMTag SET userModificationDate = ?, usedDate = ? WHERE uuid = ?")
.bind(now)
.bind(now)
.bind(target_uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update target tag: {e}")))?;
sqlx::query("DELETE FROM TMTag WHERE uuid = ?")
.bind(source_uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to delete source tag: {e}")))?;
info!("Merged tag {} into {}", source_uuid, target_uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn add_tag_to_task(
&self,
task_uuid: &Uuid,
tag_title: &str,
) -> ThingsResult<crate::models::TagAssignmentResult> {
use crate::database::tag_utils::normalize_tag_title;
use crate::models::TagAssignmentResult;
validators::validate_task_exists(&self.pool, task_uuid).await?;
let normalized = normalize_tag_title(tag_title);
let tag = if let Some(existing_tag) = self.find_tag_by_normalized_title(&normalized).await?
{
existing_tag
} else {
let similar_tags = self.find_similar_tags(&normalized, 0.8).await?;
if !similar_tags.is_empty() {
return Ok(TagAssignmentResult::Suggestions { similar_tags });
}
let request = crate::models::CreateTagRequest {
title: tag_title.to_string(),
shortcut: None,
parent_uuid: None,
};
let _uuid = self.create_tag_force(request).await?;
self.find_tag_by_normalized_title(&normalized)
.await?
.ok_or_else(|| ThingsError::unknown("Failed to retrieve newly created tag"))?
};
let row = sqlx::query("SELECT cachedTags FROM TMTask WHERE uuid = ?")
.bind(task_uuid.to_string())
.fetch_one(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to fetch task tags: {e}")))?;
let cached_tags_blob: Option<Vec<u8>> = row.get("cachedTags");
let mut tags: Vec<String> = if let Some(blob) = cached_tags_blob {
deserialize_tags_from_blob(&blob)?
} else {
Vec::new()
};
if !tags.contains(&tag.title) {
tags.push(tag.title.clone());
let cached_tags = serialize_tags_to_blob(&tags)?;
let now = Utc::now().timestamp() as f64;
sqlx::query(
"UPDATE TMTask SET cachedTags = ?, userModificationDate = ? WHERE uuid = ?",
)
.bind(cached_tags)
.bind(now)
.bind(task_uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update task tags: {e}")))?;
sqlx::query("UPDATE TMTag SET usedDate = ?, userModificationDate = ? WHERE uuid = ?")
.bind(now)
.bind(now)
.bind(tag.uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update tag usedDate: {e}")))?;
info!("Added tag '{}' to task {}", tag.title, task_uuid);
}
Ok(TagAssignmentResult::Assigned { tag_uuid: tag.uuid })
}
#[instrument(skip(self))]
pub async fn remove_tag_from_task(
&self,
task_uuid: &Uuid,
tag_title: &str,
) -> ThingsResult<()> {
use crate::database::tag_utils::normalize_tag_title;
validators::validate_task_exists(&self.pool, task_uuid).await?;
let row = sqlx::query("SELECT cachedTags FROM TMTask WHERE uuid = ?")
.bind(task_uuid.to_string())
.fetch_one(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to fetch task tags: {e}")))?;
let cached_tags_blob: Option<Vec<u8>> = row.get("cachedTags");
let mut tags: Vec<String> = if let Some(blob) = cached_tags_blob {
deserialize_tags_from_blob(&blob)?
} else {
return Ok(()); };
let normalized = normalize_tag_title(tag_title);
let original_len = tags.len();
tags.retain(|t| normalize_tag_title(t) != normalized);
if tags.len() < original_len {
let cached_tags = if tags.is_empty() {
None
} else {
Some(serialize_tags_to_blob(&tags)?)
};
let now = Utc::now().timestamp() as f64;
if let Some(cached_tags_val) = cached_tags {
sqlx::query(
"UPDATE TMTask SET cachedTags = ?, userModificationDate = ? WHERE uuid = ?",
)
.bind(cached_tags_val)
.bind(now)
.bind(task_uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update task tags: {e}")))?;
} else {
sqlx::query(
"UPDATE TMTask SET cachedTags = NULL, userModificationDate = ? WHERE uuid = ?",
)
.bind(now)
.bind(task_uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update task tags: {e}")))?;
}
info!("Removed tag '{}' from task {}", tag_title, task_uuid);
}
Ok(())
}
#[instrument(skip(self))]
pub async fn set_task_tags(
&self,
task_uuid: &Uuid,
tag_titles: Vec<String>,
) -> ThingsResult<Vec<crate::models::TagMatch>> {
use crate::database::tag_utils::normalize_tag_title;
validators::validate_task_exists(&self.pool, task_uuid).await?;
let mut resolved_tags = Vec::new();
let mut suggestions = Vec::new();
for title in tag_titles {
let normalized = normalize_tag_title(&title);
if let Some(existing_tag) = self.find_tag_by_normalized_title(&normalized).await? {
resolved_tags.push(existing_tag.title);
} else {
let similar_tags = self.find_similar_tags(&normalized, 0.8).await?;
if !similar_tags.is_empty() {
suggestions.extend(similar_tags);
}
resolved_tags.push(title);
}
}
for title in &resolved_tags {
let normalized = normalize_tag_title(title);
if self
.find_tag_by_normalized_title(&normalized)
.await?
.is_none()
{
let request = crate::models::CreateTagRequest {
title: title.clone(),
shortcut: None,
parent_uuid: None,
};
self.create_tag_force(request).await?;
}
}
let cached_tags = if resolved_tags.is_empty() {
None
} else {
Some(serialize_tags_to_blob(&resolved_tags)?)
};
let now = Utc::now().timestamp() as f64;
if let Some(cached_tags_val) = cached_tags {
sqlx::query(
"UPDATE TMTask SET cachedTags = ?, userModificationDate = ? WHERE uuid = ?",
)
.bind(cached_tags_val)
.bind(now)
.bind(task_uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update task tags: {e}")))?;
} else {
sqlx::query(
"UPDATE TMTask SET cachedTags = NULL, userModificationDate = ? WHERE uuid = ?",
)
.bind(now)
.bind(task_uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update task tags: {e}")))?;
}
for title in &resolved_tags {
let normalized = normalize_tag_title(title);
if let Some(tag) = self.find_tag_by_normalized_title(&normalized).await? {
sqlx::query(
"UPDATE TMTag SET usedDate = ?, userModificationDate = ? WHERE uuid = ?",
)
.bind(now)
.bind(now)
.bind(tag.uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update tag usedDate: {e}")))?;
}
}
info!("Set tags on task {} to: {:?}", task_uuid, resolved_tags);
Ok(suggestions)
}
#[instrument(skip(self))]
pub async fn get_tag_completions(
&self,
partial_input: &str,
limit: usize,
) -> ThingsResult<Vec<crate::models::TagCompletion>> {
use crate::database::tag_utils::{calculate_similarity, normalize_tag_title};
let normalized_input = normalize_tag_title(partial_input);
let all_tags = self.get_all_tags().await?;
let mut completions: Vec<crate::models::TagCompletion> = all_tags
.into_iter()
.filter_map(|tag| {
let normalized_tag = normalize_tag_title(&tag.title);
let score = if normalized_tag.starts_with(&normalized_input) {
3.0 + (tag.usage_count as f32 / 100.0)
} else if normalized_tag.contains(&normalized_input) {
2.0 + (tag.usage_count as f32 / 100.0)
} else {
let similarity = calculate_similarity(partial_input, &tag.title);
if similarity >= 0.6 {
similarity + (tag.usage_count as f32 / 1000.0)
} else {
return None; }
};
Some(crate::models::TagCompletion { tag, score })
})
.collect();
completions.sort_by(|a, b| {
b.score
.partial_cmp(&a.score)
.unwrap_or(std::cmp::Ordering::Equal)
});
completions.truncate(limit);
Ok(completions)
}
#[instrument(skip(self))]
pub async fn get_tag_statistics(
&self,
uuid: &Uuid,
) -> ThingsResult<crate::models::TagStatistics> {
let tag_row = sqlx::query("SELECT title FROM TMTag WHERE uuid = ?")
.bind(uuid.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to find tag: {e}")))?;
let title: String = tag_row
.ok_or_else(|| ThingsError::unknown(format!("Tag not found: {}", uuid)))?
.get("title");
let task_rows = sqlx::query(
"SELECT uuid, cachedTags FROM TMTask
WHERE cachedTags IS NOT NULL
AND trashed = 0",
)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to query tasks with tag: {e}")))?;
let mut task_uuids = Vec::new();
for row in task_rows {
let uuid_str: String = row.get("uuid");
let cached_tags_blob: Option<Vec<u8>> = row.get("cachedTags");
if let Some(blob) = cached_tags_blob {
if let Ok(tags) = deserialize_tags_from_blob(&blob) {
if tags.iter().any(|t| t.eq_ignore_ascii_case(&title)) {
let task_uuid = Uuid::parse_str(&uuid_str)
.unwrap_or_else(|_| things_uuid_to_uuid(&uuid_str));
task_uuids.push(task_uuid);
}
}
}
}
let usage_count = task_uuids.len() as u32;
let mut related_tags: std::collections::HashMap<String, u32> =
std::collections::HashMap::new();
for task_uuid in &task_uuids {
let row = sqlx::query("SELECT cachedTags FROM TMTask WHERE uuid = ?")
.bind(task_uuid.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to fetch task tags: {e}")))?;
if let Some(row) = row {
let cached_tags_blob: Option<Vec<u8>> = row.get("cachedTags");
if let Some(blob) = cached_tags_blob {
let tags: Vec<String> = deserialize_tags_from_blob(&blob)?;
for tag in tags {
if tag != title {
*related_tags.entry(tag).or_insert(0) += 1;
}
}
}
}
}
let mut related_vec: Vec<(String, u32)> = related_tags.into_iter().collect();
related_vec.sort_by_key(|r| std::cmp::Reverse(r.1));
Ok(crate::models::TagStatistics {
uuid: *uuid,
title,
usage_count,
task_uuids,
related_tags: related_vec,
})
}
#[instrument(skip(self))]
pub async fn find_duplicate_tags(
&self,
min_similarity: f32,
) -> ThingsResult<Vec<crate::models::TagPair>> {
use crate::database::tag_utils::calculate_similarity;
let all_tags = self.get_all_tags().await?;
let mut pairs = Vec::new();
for i in 0..all_tags.len() {
for j in (i + 1)..all_tags.len() {
let tag1 = &all_tags[i];
let tag2 = &all_tags[j];
let similarity = calculate_similarity(&tag1.title, &tag2.title);
if similarity >= min_similarity {
pairs.push(crate::models::TagPair {
tag1: tag1.clone(),
tag2: tag2.clone(),
similarity,
});
}
}
}
pairs.sort_by(|a, b| {
b.similarity
.partial_cmp(&a.similarity)
.unwrap_or(std::cmp::Ordering::Equal)
});
Ok(pairs)
}
const MAX_BULK_BATCH_SIZE: usize = 1000;
#[instrument(skip(self))]
pub async fn bulk_move(
&self,
request: crate::models::BulkMoveRequest,
) -> ThingsResult<crate::models::BulkOperationResult> {
if request.task_uuids.is_empty() {
return Err(ThingsError::validation("Task UUIDs cannot be empty"));
}
if request.task_uuids.len() > Self::MAX_BULK_BATCH_SIZE {
return Err(ThingsError::validation(format!(
"Batch size {} exceeds maximum of {}",
request.task_uuids.len(),
Self::MAX_BULK_BATCH_SIZE
)));
}
if request.project_uuid.is_none() && request.area_uuid.is_none() {
return Err(ThingsError::validation(
"Must specify either project_uuid or area_uuid",
));
}
if let Some(project_uuid) = &request.project_uuid {
validators::validate_project_exists(&self.pool, project_uuid).await?;
}
if let Some(area_uuid) = &request.area_uuid {
validators::validate_area_exists(&self.pool, area_uuid).await?;
}
let mut tx = self
.pool
.begin()
.await
.map_err(|e| ThingsError::unknown(format!("Failed to begin transaction: {e}")))?;
let placeholders = request
.task_uuids
.iter()
.map(|_| "?")
.collect::<Vec<_>>()
.join(",");
let query_str = format!(
"SELECT uuid FROM TMTask WHERE uuid IN ({}) AND trashed = 0",
placeholders
);
let mut query = sqlx::query(&query_str);
for uuid in &request.task_uuids {
query = query.bind(uuid.to_string());
}
let found_uuids: Vec<String> = query
.fetch_all(&mut *tx)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to validate tasks: {e}")))?
.iter()
.map(|row| row.get("uuid"))
.collect();
if found_uuids.len() != request.task_uuids.len() {
for uuid in &request.task_uuids {
if !found_uuids.contains(&uuid.to_string()) {
tx.rollback().await.ok();
return Err(ThingsError::TaskNotFound {
uuid: uuid.to_string(),
});
}
}
}
let now = Utc::now().timestamp() as f64;
let placeholders = request
.task_uuids
.iter()
.map(|_| "?")
.collect::<Vec<_>>()
.join(",");
let query_str = format!(
"UPDATE TMTask SET project = ?, area = ?, userModificationDate = ? WHERE uuid IN ({})",
placeholders
);
let mut query = sqlx::query(&query_str)
.bind(request.project_uuid.map(|u| u.to_string()))
.bind(request.area_uuid.map(|u| u.to_string()))
.bind(now);
for uuid in &request.task_uuids {
query = query.bind(uuid.to_string());
}
query
.execute(&mut *tx)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to bulk move tasks: {e}")))?;
tx.commit()
.await
.map_err(|e| ThingsError::unknown(format!("Failed to commit transaction: {e}")))?;
info!("Bulk moved {} task(s)", request.task_uuids.len());
Ok(crate::models::BulkOperationResult {
success: true,
processed_count: request.task_uuids.len(),
message: format!("Successfully moved {} task(s)", request.task_uuids.len()),
})
}
#[instrument(skip(self))]
pub async fn bulk_update_dates(
&self,
request: crate::models::BulkUpdateDatesRequest,
) -> ThingsResult<crate::models::BulkOperationResult> {
use crate::database::{safe_things_date_to_naive_date, validate_date_range};
if request.task_uuids.is_empty() {
return Err(ThingsError::validation("Task UUIDs cannot be empty"));
}
if request.task_uuids.len() > Self::MAX_BULK_BATCH_SIZE {
return Err(ThingsError::validation(format!(
"Batch size {} exceeds maximum of {}",
request.task_uuids.len(),
Self::MAX_BULK_BATCH_SIZE
)));
}
if let (Some(start), Some(deadline)) = (request.start_date, request.deadline) {
validate_date_range(Some(start), Some(deadline))?;
}
let mut tx = self
.pool
.begin()
.await
.map_err(|e| ThingsError::unknown(format!("Failed to begin transaction: {e}")))?;
let placeholders = request
.task_uuids
.iter()
.map(|_| "?")
.collect::<Vec<_>>()
.join(",");
let query_str = format!(
"SELECT uuid, startDate, deadline FROM TMTask WHERE uuid IN ({}) AND trashed = 0",
placeholders
);
let mut query = sqlx::query(&query_str);
for uuid in &request.task_uuids {
query = query.bind(uuid.to_string());
}
let rows = query
.fetch_all(&mut *tx)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to validate tasks: {e}")))?;
if rows.len() != request.task_uuids.len() {
let found_uuids: Vec<String> = rows.iter().map(|row| row.get("uuid")).collect();
for uuid in &request.task_uuids {
if !found_uuids.contains(&uuid.to_string()) {
tx.rollback().await.ok();
return Err(ThingsError::TaskNotFound {
uuid: uuid.to_string(),
});
}
}
}
for row in &rows {
let current_start: Option<i64> = row.get("startDate");
let current_deadline: Option<i64> = row.get("deadline");
let final_start = if request.clear_start_date {
None
} else if let Some(new_start) = request.start_date {
Some(new_start)
} else {
current_start.and_then(|ts| safe_things_date_to_naive_date(ts).ok())
};
let final_deadline = if request.clear_deadline {
None
} else if let Some(new_deadline) = request.deadline {
Some(new_deadline)
} else {
current_deadline.and_then(|ts| safe_things_date_to_naive_date(ts).ok())
};
validate_date_range(final_start, final_deadline)?;
}
let now = Utc::now().timestamp() as f64;
let placeholders = request
.task_uuids
.iter()
.map(|_| "?")
.collect::<Vec<_>>()
.join(",");
let start_date_value = if request.clear_start_date {
None
} else {
request.start_date.map(naive_date_to_things_timestamp)
};
let deadline_value = if request.clear_deadline {
None
} else {
request.deadline.map(naive_date_to_things_timestamp)
};
let query_str = format!(
"UPDATE TMTask SET startDate = ?, deadline = ?, userModificationDate = ? WHERE uuid IN ({})",
placeholders
);
let mut query = sqlx::query(&query_str)
.bind(start_date_value)
.bind(deadline_value)
.bind(now);
for uuid in &request.task_uuids {
query = query.bind(uuid.to_string());
}
query
.execute(&mut *tx)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to bulk update dates: {e}")))?;
tx.commit()
.await
.map_err(|e| ThingsError::unknown(format!("Failed to commit transaction: {e}")))?;
info!(
"Bulk updated dates for {} task(s)",
request.task_uuids.len()
);
Ok(crate::models::BulkOperationResult {
success: true,
processed_count: request.task_uuids.len(),
message: format!(
"Successfully updated dates for {} task(s)",
request.task_uuids.len()
),
})
}
#[instrument(skip(self))]
pub async fn bulk_complete(
&self,
request: crate::models::BulkCompleteRequest,
) -> ThingsResult<crate::models::BulkOperationResult> {
if request.task_uuids.is_empty() {
return Err(ThingsError::validation("Task UUIDs cannot be empty"));
}
if request.task_uuids.len() > Self::MAX_BULK_BATCH_SIZE {
return Err(ThingsError::validation(format!(
"Batch size {} exceeds maximum of {}",
request.task_uuids.len(),
Self::MAX_BULK_BATCH_SIZE
)));
}
let mut tx = self
.pool
.begin()
.await
.map_err(|e| ThingsError::unknown(format!("Failed to begin transaction: {e}")))?;
let placeholders = request
.task_uuids
.iter()
.map(|_| "?")
.collect::<Vec<_>>()
.join(",");
let query_str = format!(
"SELECT uuid FROM TMTask WHERE uuid IN ({}) AND trashed = 0",
placeholders
);
let mut query = sqlx::query(&query_str);
for uuid in &request.task_uuids {
query = query.bind(uuid.to_string());
}
let found_uuids: Vec<String> = query
.fetch_all(&mut *tx)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to validate tasks: {e}")))?
.iter()
.map(|row| row.get("uuid"))
.collect();
if found_uuids.len() != request.task_uuids.len() {
for uuid in &request.task_uuids {
if !found_uuids.contains(&uuid.to_string()) {
tx.rollback().await.ok();
return Err(ThingsError::TaskNotFound {
uuid: uuid.to_string(),
});
}
}
}
let now = Utc::now().timestamp() as f64;
let placeholders = request
.task_uuids
.iter()
.map(|_| "?")
.collect::<Vec<_>>()
.join(",");
let query_str = format!(
"UPDATE TMTask SET status = 1, stopDate = ?, userModificationDate = ? WHERE uuid IN ({})",
placeholders
);
let mut query = sqlx::query(&query_str).bind(now).bind(now);
for uuid in &request.task_uuids {
query = query.bind(uuid.to_string());
}
query
.execute(&mut *tx)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to bulk complete tasks: {e}")))?;
tx.commit()
.await
.map_err(|e| ThingsError::unknown(format!("Failed to commit transaction: {e}")))?;
info!("Bulk completed {} task(s)", request.task_uuids.len());
Ok(crate::models::BulkOperationResult {
success: true,
processed_count: request.task_uuids.len(),
message: format!(
"Successfully completed {} task(s)",
request.task_uuids.len()
),
})
}
#[instrument(skip(self))]
pub async fn bulk_delete(
&self,
request: crate::models::BulkDeleteRequest,
) -> ThingsResult<crate::models::BulkOperationResult> {
if request.task_uuids.is_empty() {
return Err(ThingsError::validation("Task UUIDs cannot be empty"));
}
if request.task_uuids.len() > Self::MAX_BULK_BATCH_SIZE {
return Err(ThingsError::validation(format!(
"Batch size {} exceeds maximum of {}",
request.task_uuids.len(),
Self::MAX_BULK_BATCH_SIZE
)));
}
let mut tx = self
.pool
.begin()
.await
.map_err(|e| ThingsError::unknown(format!("Failed to begin transaction: {e}")))?;
let placeholders = request
.task_uuids
.iter()
.map(|_| "?")
.collect::<Vec<_>>()
.join(",");
let query_str = format!(
"SELECT uuid FROM TMTask WHERE uuid IN ({}) AND trashed = 0",
placeholders
);
let mut query = sqlx::query(&query_str);
for uuid in &request.task_uuids {
query = query.bind(uuid.to_string());
}
let found_uuids: Vec<String> = query
.fetch_all(&mut *tx)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to validate tasks: {e}")))?
.iter()
.map(|row| row.get("uuid"))
.collect();
if found_uuids.len() != request.task_uuids.len() {
for uuid in &request.task_uuids {
if !found_uuids.contains(&uuid.to_string()) {
tx.rollback().await.ok();
return Err(ThingsError::TaskNotFound {
uuid: uuid.to_string(),
});
}
}
}
let now = Utc::now().timestamp() as f64;
let placeholders = request
.task_uuids
.iter()
.map(|_| "?")
.collect::<Vec<_>>()
.join(",");
let query_str = format!(
"UPDATE TMTask SET trashed = 1, userModificationDate = ? WHERE uuid IN ({})",
placeholders
);
let mut query = sqlx::query(&query_str).bind(now);
for uuid in &request.task_uuids {
query = query.bind(uuid.to_string());
}
query
.execute(&mut *tx)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to bulk delete tasks: {e}")))?;
tx.commit()
.await
.map_err(|e| ThingsError::unknown(format!("Failed to commit transaction: {e}")))?;
info!("Bulk deleted {} task(s)", request.task_uuids.len());
Ok(crate::models::BulkOperationResult {
success: true,
processed_count: request.task_uuids.len(),
message: format!("Successfully deleted {} task(s)", request.task_uuids.len()),
})
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DatabaseStats {
pub task_count: u64,
pub project_count: u64,
pub area_count: u64,
}
impl DatabaseStats {
#[must_use]
pub fn total_items(&self) -> u64 {
self.task_count + self.project_count + self.area_count
}
}
#[must_use]
pub fn get_default_database_path() -> PathBuf {
let home = std::env::var("HOME").unwrap_or_else(|_| "~".to_string());
PathBuf::from(format!(
"{home}/Library/Group Containers/JLMPQHK86H.com.culturedcode.ThingsMac/ThingsData-0Z0Z2/Things Database.thingsdatabase/main.sqlite"
))
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::{NamedTempFile, TempDir};
#[tokio::test]
async fn test_database_connection() {
let temp_dir = TempDir::new().unwrap();
let db_path = temp_dir.path().join("test.db");
let result = super::ThingsDatabase::new(&db_path).await;
assert!(result.is_err());
}
#[tokio::test]
async fn test_connection_string() {
let result = super::ThingsDatabase::from_connection_string("sqlite::memory:").await;
assert!(result.is_ok());
}
#[test]
fn test_task_status_from_i32() {
assert_eq!(TaskStatus::from_i32(0), Some(TaskStatus::Incomplete));
assert_eq!(TaskStatus::from_i32(1), Some(TaskStatus::Completed));
assert_eq!(TaskStatus::from_i32(2), Some(TaskStatus::Canceled));
assert_eq!(TaskStatus::from_i32(3), Some(TaskStatus::Trashed));
assert_eq!(TaskStatus::from_i32(4), None);
assert_eq!(TaskStatus::from_i32(-1), None);
}
#[test]
fn test_task_type_from_i32() {
assert_eq!(TaskType::from_i32(0), Some(TaskType::Todo));
assert_eq!(TaskType::from_i32(1), Some(TaskType::Project));
assert_eq!(TaskType::from_i32(2), Some(TaskType::Heading));
assert_eq!(TaskType::from_i32(3), Some(TaskType::Area));
assert_eq!(TaskType::from_i32(4), None);
assert_eq!(TaskType::from_i32(-1), None);
}
#[test]
fn test_database_stats_total_items() {
let stats = DatabaseStats {
task_count: 10,
project_count: 5,
area_count: 3,
};
assert_eq!(stats.total_items(), 18);
let empty_stats = DatabaseStats {
task_count: 0,
project_count: 0,
area_count: 0,
};
assert_eq!(empty_stats.total_items(), 0);
}
#[test]
fn test_database_pool_config_default() {
let config = DatabasePoolConfig::default();
assert_eq!(config.max_connections, 10);
assert_eq!(config.min_connections, 1);
assert_eq!(config.connect_timeout, Duration::from_secs(30));
assert_eq!(config.idle_timeout, Duration::from_secs(600));
assert_eq!(config.max_lifetime, Duration::from_secs(1800));
assert!(config.test_before_acquire);
}
#[test]
fn test_sqlite_optimizations_default() {
let opts = SqliteOptimizations::default();
assert!(opts.enable_wal_mode);
assert_eq!(opts.cache_size, -20000);
assert_eq!(opts.synchronous_mode, "NORMAL".to_string());
assert_eq!(opts.temp_store, "MEMORY".to_string());
assert_eq!(opts.journal_mode, "WAL".to_string());
assert_eq!(opts.mmap_size, 268_435_456);
assert!(opts.enable_foreign_keys);
assert!(opts.enable_query_planner);
}
#[test]
fn test_pool_health_status_creation() {
let status = PoolHealthStatus {
is_healthy: true,
pool_size: 8,
active_connections: 5,
idle_connections: 3,
max_connections: 10,
min_connections: 1,
connection_timeout: Duration::from_secs(30),
idle_timeout: Some(Duration::from_secs(600)),
max_lifetime: Some(Duration::from_secs(1800)),
};
assert!(status.is_healthy);
assert_eq!(status.active_connections, 5);
assert_eq!(status.idle_connections, 3);
assert_eq!(status.pool_size, 8);
}
#[test]
fn test_pool_metrics_creation() {
let metrics = PoolMetrics {
pool_size: 8,
active_connections: 5,
idle_connections: 3,
max_connections: 10,
min_connections: 1,
utilization_percentage: 80.0,
is_healthy: true,
response_time_ms: 50,
connection_timeout: Duration::from_secs(30),
idle_timeout: Some(Duration::from_secs(600)),
max_lifetime: Some(Duration::from_secs(1800)),
};
assert!(metrics.is_healthy);
assert_eq!(metrics.pool_size, 8);
assert_eq!(metrics.active_connections, 5);
assert_eq!(metrics.idle_connections, 3);
assert!((metrics.utilization_percentage - 80.0).abs() < f64::EPSILON);
assert_eq!(metrics.response_time_ms, 50);
}
#[test]
fn test_comprehensive_health_status_creation() {
let pool_health = PoolHealthStatus {
is_healthy: true,
pool_size: 8,
active_connections: 5,
idle_connections: 3,
max_connections: 10,
min_connections: 1,
connection_timeout: Duration::from_secs(30),
idle_timeout: Some(Duration::from_secs(600)),
max_lifetime: Some(Duration::from_secs(1800)),
};
let pool_metrics = PoolMetrics {
pool_size: 8,
active_connections: 5,
idle_connections: 3,
max_connections: 10,
min_connections: 1,
utilization_percentage: 80.0,
is_healthy: true,
response_time_ms: 50,
connection_timeout: Duration::from_secs(30),
idle_timeout: Some(Duration::from_secs(600)),
max_lifetime: Some(Duration::from_secs(1800)),
};
let db_stats = DatabaseStats {
task_count: 50,
project_count: 10,
area_count: 5,
};
let health_status = ComprehensiveHealthStatus {
overall_healthy: true,
pool_health,
pool_metrics,
database_stats: db_stats,
timestamp: Utc::now(),
};
assert!(health_status.overall_healthy);
assert_eq!(health_status.database_stats.total_items(), 65);
}
#[test]
fn test_safe_timestamp_convert_edge_cases() {
assert_eq!(safe_timestamp_convert(1_609_459_200.0), 1_609_459_200);
assert_eq!(safe_timestamp_convert(0.0), 0);
assert_eq!(safe_timestamp_convert(-1.0), 0);
assert_eq!(safe_timestamp_convert(f64::INFINITY), 0);
assert_eq!(safe_timestamp_convert(f64::NAN), 0);
assert_eq!(safe_timestamp_convert(5_000_000_000.0), 0);
let max_timestamp = 4_102_444_800_f64; assert_eq!(safe_timestamp_convert(max_timestamp), 4_102_444_800);
}
#[test]
fn test_things_uuid_to_uuid_consistency() {
let things_id = "test-id-123";
let uuid1 = things_uuid_to_uuid(things_id);
let uuid2 = things_uuid_to_uuid(things_id);
assert_eq!(uuid1, uuid2, "UUIDs should be consistent for same input");
let uuid3 = things_uuid_to_uuid("different-id");
assert_ne!(
uuid1, uuid3,
"Different inputs should produce different UUIDs"
);
let uuid_empty = things_uuid_to_uuid("");
assert!(!uuid_empty.to_string().is_empty());
let long_string = "a".repeat(1000);
let uuid_long = things_uuid_to_uuid(&long_string);
assert!(!uuid_long.to_string().is_empty());
}
#[test]
fn test_task_status_from_i32_all_variants() {
assert_eq!(TaskStatus::from_i32(0), Some(TaskStatus::Incomplete));
assert_eq!(TaskStatus::from_i32(1), Some(TaskStatus::Completed));
assert_eq!(TaskStatus::from_i32(2), Some(TaskStatus::Canceled));
assert_eq!(TaskStatus::from_i32(3), Some(TaskStatus::Trashed));
assert_eq!(TaskStatus::from_i32(999), None);
assert_eq!(TaskStatus::from_i32(-1), None);
}
#[test]
fn test_task_type_from_i32_all_variants() {
assert_eq!(TaskType::from_i32(0), Some(TaskType::Todo));
assert_eq!(TaskType::from_i32(1), Some(TaskType::Project));
assert_eq!(TaskType::from_i32(2), Some(TaskType::Heading));
assert_eq!(TaskType::from_i32(3), Some(TaskType::Area));
assert_eq!(TaskType::from_i32(999), None);
assert_eq!(TaskType::from_i32(-1), None);
}
#[test]
fn test_database_pool_config_default_values() {
let config = DatabasePoolConfig::default();
assert_eq!(config.max_connections, 10);
assert_eq!(config.min_connections, 1);
assert_eq!(config.connect_timeout, Duration::from_secs(30));
assert_eq!(config.idle_timeout, Duration::from_secs(600));
assert_eq!(config.max_lifetime, Duration::from_secs(1800));
assert!(config.test_before_acquire);
}
#[test]
fn test_database_stats_total_items_calculation() {
let stats = DatabaseStats {
task_count: 10,
project_count: 5,
area_count: 3,
};
assert_eq!(stats.total_items(), 18);
let empty_stats = DatabaseStats {
task_count: 0,
project_count: 0,
area_count: 0,
};
assert_eq!(empty_stats.total_items(), 0);
}
#[test]
fn test_pool_health_status_creation_comprehensive() {
let status = PoolHealthStatus {
is_healthy: true,
pool_size: 8,
active_connections: 2,
idle_connections: 3,
max_connections: 10,
min_connections: 1,
connection_timeout: Duration::from_secs(30),
idle_timeout: Some(Duration::from_secs(600)),
max_lifetime: Some(Duration::from_secs(1800)),
};
assert!(status.is_healthy);
assert_eq!(status.pool_size, 8);
assert_eq!(status.max_connections, 10);
}
#[test]
fn test_pool_metrics_creation_comprehensive() {
let metrics = PoolMetrics {
pool_size: 8,
active_connections: 5,
idle_connections: 3,
max_connections: 10,
min_connections: 1,
utilization_percentage: 80.0,
is_healthy: true,
response_time_ms: 50,
connection_timeout: Duration::from_secs(30),
idle_timeout: Some(Duration::from_secs(600)),
max_lifetime: Some(Duration::from_secs(1800)),
};
assert_eq!(metrics.pool_size, 8);
assert_eq!(metrics.response_time_ms, 50);
assert!(metrics.is_healthy);
}
#[test]
fn test_comprehensive_health_status_creation_full() {
let pool_health = PoolHealthStatus {
is_healthy: true,
pool_size: 8,
active_connections: 2,
idle_connections: 3,
max_connections: 10,
min_connections: 1,
connection_timeout: Duration::from_secs(30),
idle_timeout: Some(Duration::from_secs(600)),
max_lifetime: Some(Duration::from_secs(1800)),
};
let pool_metrics = PoolMetrics {
pool_size: 8,
active_connections: 5,
idle_connections: 3,
max_connections: 10,
min_connections: 1,
utilization_percentage: 80.0,
is_healthy: true,
response_time_ms: 50,
connection_timeout: Duration::from_secs(30),
idle_timeout: Some(Duration::from_secs(600)),
max_lifetime: Some(Duration::from_secs(1800)),
};
let database_stats = DatabaseStats {
task_count: 100,
project_count: 20,
area_count: 5,
};
let status = ComprehensiveHealthStatus {
overall_healthy: true,
pool_health,
pool_metrics,
database_stats,
timestamp: Utc::now(),
};
assert!(status.overall_healthy);
assert_eq!(status.database_stats.total_items(), 125);
}
#[test]
fn test_sqlite_optimizations_default_values() {
let opts = SqliteOptimizations::default();
assert!(opts.enable_wal_mode);
assert!(opts.enable_foreign_keys);
assert_eq!(opts.cache_size, -20000);
assert_eq!(opts.temp_store, "MEMORY");
assert_eq!(opts.mmap_size, 268_435_456);
assert_eq!(opts.synchronous_mode, "NORMAL");
assert_eq!(opts.journal_mode, "WAL");
}
#[test]
fn test_get_default_database_path_format() {
let path = get_default_database_path();
let path_str = path.to_string_lossy();
assert!(path_str.contains("Things Database.thingsdatabase"));
assert!(path_str.contains("main.sqlite"));
assert!(path_str.contains("Library/Group Containers"));
}
#[tokio::test]
async fn test_database_new_with_config() {
let temp_file = NamedTempFile::new().unwrap();
let db_path = temp_file.path();
crate::test_utils::create_test_database(db_path)
.await
.unwrap();
let config = DatabasePoolConfig {
max_connections: 5,
min_connections: 1,
connect_timeout: Duration::from_secs(10),
idle_timeout: Duration::from_secs(300),
max_lifetime: Duration::from_secs(900),
test_before_acquire: true,
sqlite_optimizations: SqliteOptimizations::default(),
};
let database = ThingsDatabase::new_with_config(db_path, config)
.await
.unwrap();
let pool = database.pool();
assert!(!pool.is_closed());
}
#[tokio::test]
async fn test_database_error_handling_invalid_path() {
let result = ThingsDatabase::new(Path::new("/non/existent/path.db")).await;
assert!(result.is_err(), "Should fail with non-existent path");
}
#[tokio::test]
async fn test_database_get_stats() {
let temp_file = NamedTempFile::new().unwrap();
let db_path = temp_file.path();
crate::test_utils::create_test_database(db_path)
.await
.unwrap();
let database = ThingsDatabase::new(db_path).await.unwrap();
let stats = database.get_stats().await.unwrap();
assert!(stats.task_count > 0, "Should have test tasks");
assert!(stats.area_count > 0, "Should have test areas");
assert!(stats.total_items() > 0, "Should have total items");
}
#[tokio::test]
async fn test_database_comprehensive_health_check() {
let temp_file = NamedTempFile::new().unwrap();
let db_path = temp_file.path();
crate::test_utils::create_test_database(db_path)
.await
.unwrap();
let database = ThingsDatabase::new(db_path).await.unwrap();
let health = database.comprehensive_health_check().await.unwrap();
assert!(health.overall_healthy, "Database should be healthy");
assert!(health.pool_health.is_healthy, "Pool should be healthy");
assert!(
health.pool_metrics.is_healthy,
"Pool metrics should be healthy"
);
}
#[test]
fn test_things_date_negative_returns_none() {
assert_eq!(things_date_to_naive_date(-1), None);
assert_eq!(things_date_to_naive_date(-100), None);
assert_eq!(things_date_to_naive_date(i64::MIN), None);
}
#[test]
fn test_things_date_zero_returns_none() {
assert_eq!(things_date_to_naive_date(0), None);
}
#[test]
fn test_things_date_boundary_2001() {
use chrono::Datelike;
let result = things_date_to_naive_date(1);
assert!(result.is_some());
let date = result.unwrap();
assert_eq!(date.year(), 2001);
assert_eq!(date.month(), 1);
assert_eq!(date.day(), 1);
}
#[test]
fn test_things_date_one_day() {
use chrono::Datelike;
let seconds_per_day = 86400i64;
let result = things_date_to_naive_date(seconds_per_day);
assert!(result.is_some());
let date = result.unwrap();
assert_eq!(date.year(), 2001);
assert_eq!(date.month(), 1);
assert_eq!(date.day(), 2);
}
#[test]
fn test_things_date_one_year() {
use chrono::Datelike;
let seconds_per_year = 365 * 86400i64;
let result = things_date_to_naive_date(seconds_per_year);
assert!(result.is_some());
let date = result.unwrap();
assert_eq!(date.year(), 2002);
}
#[test]
fn test_things_date_current_era() {
use chrono::Datelike;
let days_to_2024 = 8401i64;
let seconds_to_2024 = days_to_2024 * 86400;
let result = things_date_to_naive_date(seconds_to_2024);
assert!(result.is_some());
let date = result.unwrap();
assert_eq!(date.year(), 2024);
}
#[test]
fn test_things_date_leap_year() {
use chrono::{Datelike, TimeZone, Utc};
let base_date = Utc.with_ymd_and_hms(2001, 1, 1, 0, 0, 0).single().unwrap();
let target_date = Utc.with_ymd_and_hms(2004, 2, 29, 0, 0, 0).single().unwrap();
let seconds_diff = (target_date - base_date).num_seconds();
let result = things_date_to_naive_date(seconds_diff);
assert!(result.is_some());
let date = result.unwrap();
assert_eq!(date.year(), 2004);
assert_eq!(date.month(), 2);
assert_eq!(date.day(), 29);
}
#[test]
fn test_uuid_conversion_consistency() {
let input = "ABC123";
let uuid1 = things_uuid_to_uuid(input);
let uuid2 = things_uuid_to_uuid(input);
assert_eq!(uuid1, uuid2);
}
#[test]
fn test_uuid_conversion_uniqueness() {
let uuid1 = things_uuid_to_uuid("ABC123");
let uuid2 = things_uuid_to_uuid("ABC124");
let uuid3 = things_uuid_to_uuid("XYZ789");
assert_ne!(uuid1, uuid2);
assert_ne!(uuid1, uuid3);
assert_ne!(uuid2, uuid3);
}
#[test]
fn test_uuid_conversion_empty_string() {
let uuid = things_uuid_to_uuid("");
assert!(!uuid.to_string().is_empty());
}
#[test]
fn test_uuid_conversion_special_characters() {
let uuid1 = things_uuid_to_uuid("test-with-dashes");
let uuid2 = things_uuid_to_uuid("test_with_underscores");
let uuid3 = things_uuid_to_uuid("test.with.dots");
assert_ne!(uuid1, uuid2);
assert_ne!(uuid1, uuid3);
assert_ne!(uuid2, uuid3);
}
#[test]
fn test_safe_timestamp_convert_normal_values() {
let ts = 1_700_000_000.0; let result = safe_timestamp_convert(ts);
assert_eq!(result, 1_700_000_000);
}
#[test]
fn test_safe_timestamp_convert_zero() {
assert_eq!(safe_timestamp_convert(0.0), 0);
}
#[test]
fn test_safe_timestamp_convert_negative() {
assert_eq!(safe_timestamp_convert(-1.0), 0);
assert_eq!(safe_timestamp_convert(-1000.0), 0);
}
#[test]
fn test_safe_timestamp_convert_infinity() {
assert_eq!(safe_timestamp_convert(f64::INFINITY), 0);
assert_eq!(safe_timestamp_convert(f64::NEG_INFINITY), 0);
}
#[test]
fn test_safe_timestamp_convert_nan() {
assert_eq!(safe_timestamp_convert(f64::NAN), 0);
}
#[test]
fn test_date_roundtrip_known_dates() {
use chrono::{Datelike, TimeZone, Utc};
let test_cases = vec![
(2001, 1, 2), (2010, 6, 15),
(2020, 12, 31),
(2024, 2, 29), (2025, 7, 4),
];
for (year, month, day) in test_cases {
let base_date = Utc.with_ymd_and_hms(2001, 1, 1, 0, 0, 0).single().unwrap();
let target_date = Utc
.with_ymd_and_hms(year, month, day, 0, 0, 0)
.single()
.unwrap();
let seconds = (target_date - base_date).num_seconds();
let converted = things_date_to_naive_date(seconds);
assert!(
converted.is_some(),
"Failed to convert {}-{:02}-{:02}",
year,
month,
day
);
let result_date = converted.unwrap();
assert_eq!(
result_date.year(),
year,
"Year mismatch for {}-{:02}-{:02}",
year,
month,
day
);
assert_eq!(
result_date.month(),
month,
"Month mismatch for {}-{:02}-{:02}",
year,
month,
day
);
assert_eq!(
result_date.day(),
day,
"Day mismatch for {}-{:02}-{:02}",
year,
month,
day
);
}
}
#[cfg(feature = "advanced-queries")]
mod query_tasks_tests {
use super::*;
use crate::models::TaskFilters;
use crate::query::TaskQueryBuilder;
use tempfile::NamedTempFile;
async fn open_test_db() -> (ThingsDatabase, NamedTempFile) {
let f = NamedTempFile::new().unwrap();
crate::test_utils::create_test_database(f.path())
.await
.unwrap();
let db = ThingsDatabase::new(f.path()).await.unwrap();
(db, f)
}
#[tokio::test]
async fn test_query_tasks_no_filters() {
let (db, _f) = open_test_db().await;
let result = db.query_tasks(&TaskFilters::default()).await;
assert!(result.is_ok());
}
#[tokio::test]
async fn test_query_tasks_status_filter() {
let (db, _f) = open_test_db().await;
let filters = TaskFilters {
status: Some(TaskStatus::Completed),
..TaskFilters::default()
};
let tasks = db.query_tasks(&filters).await.unwrap();
assert!(tasks.iter().all(|t| t.status == TaskStatus::Completed));
}
#[tokio::test]
async fn test_query_tasks_limit() {
let (db, _f) = open_test_db().await;
let filters = TaskFilters {
limit: Some(1),
..TaskFilters::default()
};
let tasks = db.query_tasks(&filters).await.unwrap();
assert!(tasks.len() <= 1);
}
#[tokio::test]
async fn test_query_tasks_tag_filter_and_semantics() {
let (db, _f) = open_test_db().await;
let filters = TaskFilters {
tags: Some(vec!["nonexistent-tag-xyz".to_string()]),
..TaskFilters::default()
};
let tasks = db.query_tasks(&filters).await.unwrap();
assert!(tasks.is_empty());
}
#[tokio::test]
async fn test_query_tasks_search_query() {
let (db, _f) = open_test_db().await;
let filters = TaskFilters {
search_query: Some("zzznomatch".to_string()),
..TaskFilters::default()
};
let tasks = db.query_tasks(&filters).await.unwrap();
assert!(tasks.is_empty());
}
#[tokio::test]
async fn test_query_tasks_trashed_status() {
use sqlx::SqlitePool;
use uuid::Uuid;
let f = NamedTempFile::new().unwrap();
crate::test_utils::create_test_database(f.path())
.await
.unwrap();
let pool = SqlitePool::connect(&format!("sqlite:{}", f.path().display()))
.await
.unwrap();
let trashed_uuid = Uuid::new_v4().to_string();
sqlx::query(
"INSERT INTO TMTask \
(uuid, title, type, status, trashed, creationDate, userModificationDate) \
VALUES (?, ?, 0, 0, 1, 0, 0)",
)
.bind(&trashed_uuid)
.bind("Trashed Task")
.execute(&pool)
.await
.unwrap();
pool.close().await;
let db = ThingsDatabase::new(f.path()).await.unwrap();
let active = db.query_tasks(&TaskFilters::default()).await.unwrap();
assert!(active.iter().all(|t| t.uuid.to_string() != trashed_uuid));
let trashed = db
.query_tasks(&TaskFilters {
status: Some(TaskStatus::Trashed),
..TaskFilters::default()
})
.await
.unwrap();
assert!(
trashed.iter().any(|t| t.uuid.to_string() == trashed_uuid),
"expected trashed row to be returned by TaskStatus::Trashed filter"
);
}
#[tokio::test]
async fn test_query_tasks_offset_without_limit() {
let (db, _f) = open_test_db().await;
let all = db.query_tasks(&TaskFilters::default()).await.unwrap();
if all.len() < 2 {
return; }
let filters = TaskFilters {
offset: Some(1),
..TaskFilters::default()
};
let offset_tasks = db.query_tasks(&filters).await.unwrap();
assert_eq!(offset_tasks.len(), all.len() - 1);
assert_eq!(offset_tasks[0].uuid, all[1].uuid);
}
#[tokio::test]
async fn test_query_tasks_pagination_with_post_filter() {
let (db, _f) = open_test_db().await;
let all_matching = db
.query_tasks(&TaskFilters {
search_query: Some(String::new()),
..TaskFilters::default()
})
.await
.unwrap();
if all_matching.len() < 2 {
return;
}
let page0 = db
.query_tasks(&TaskFilters {
search_query: Some(String::new()),
limit: Some(1),
offset: Some(0),
..TaskFilters::default()
})
.await
.unwrap();
let page1 = db
.query_tasks(&TaskFilters {
search_query: Some(String::new()),
limit: Some(1),
offset: Some(1),
..TaskFilters::default()
})
.await
.unwrap();
assert_eq!(page0.len(), 1);
assert_eq!(page1.len(), 1);
assert_ne!(page0[0].uuid, page1[0].uuid);
}
async fn insert_task(
db: &ThingsDatabase,
title: &str,
notes: Option<&str>,
tags: &[&str],
) -> Uuid {
let uuid = Uuid::new_v4();
let owned: Vec<String> = tags.iter().map(|s| (*s).to_string()).collect();
let blob = serialize_tags_to_blob(&owned).unwrap();
sqlx::query(
"INSERT INTO TMTask \
(uuid, title, notes, type, status, trashed, creationDate, userModificationDate, cachedTags) \
VALUES (?, ?, ?, 0, 0, 0, 0, 0, ?)",
)
.bind(uuid.to_string())
.bind(title)
.bind(notes)
.bind(blob)
.execute(&db.pool)
.await
.unwrap();
uuid
}
async fn insert_task_with_tags(db: &ThingsDatabase, title: &str, tags: &[&str]) -> Uuid {
insert_task(db, title, None, tags).await
}
async fn open_db_with_tagged_rows() -> (ThingsDatabase, NamedTempFile, Uuid, Uuid, Uuid) {
let (db, f) = open_test_db().await;
let a = insert_task_with_tags(&db, "task-a", &["a"]).await;
let b = insert_task_with_tags(&db, "task-b", &["b"]).await;
let c = insert_task_with_tags(&db, "task-c", &["c"]).await;
(db, f, a, b, c)
}
#[tokio::test]
async fn test_query_tasks_any_tags_or_semantics() {
let (db, _f, a, b, c) = open_db_with_tagged_rows().await;
let tasks = TaskQueryBuilder::new()
.any_tags(vec!["a".to_string(), "b".to_string()])
.execute(&db)
.await
.unwrap();
let uuids: std::collections::HashSet<_> = tasks.iter().map(|t| t.uuid).collect();
assert!(uuids.contains(&a));
assert!(uuids.contains(&b));
assert!(!uuids.contains(&c));
}
#[tokio::test]
async fn test_query_tasks_exclude_tags() {
let (db, _f, a, b, c) = open_db_with_tagged_rows().await;
let tasks = TaskQueryBuilder::new()
.exclude_tags(vec!["b".to_string()])
.execute(&db)
.await
.unwrap();
let uuids: std::collections::HashSet<_> = tasks.iter().map(|t| t.uuid).collect();
assert!(uuids.contains(&a));
assert!(!uuids.contains(&b));
assert!(uuids.contains(&c));
}
#[tokio::test]
async fn test_query_tasks_tag_count_min() {
let (db, _f) = open_test_db().await;
insert_task_with_tags(&db, "zero-tags", &[]).await;
insert_task_with_tags(&db, "one-tag", &["x"]).await;
let two = insert_task_with_tags(&db, "two-tags", &["x", "y"]).await;
let tasks = TaskQueryBuilder::new()
.tag_count(2)
.execute(&db)
.await
.unwrap();
let uuids: Vec<Uuid> = tasks.iter().map(|t| t.uuid).collect();
assert_eq!(uuids, vec![two]);
}
#[tokio::test]
async fn test_query_tasks_combined_tag_filters() {
let (db, _f) = open_test_db().await;
let target = insert_task_with_tags(&db, "target", &["a", "x"]).await;
let _wrong_required = insert_task_with_tags(&db, "no-a", &["x"]).await;
let _excluded = insert_task_with_tags(&db, "has-z", &["a", "x", "z"]).await;
let _no_any = insert_task_with_tags(&db, "no-x", &["a"]).await;
let tasks = TaskQueryBuilder::new()
.tags(vec!["a".to_string()])
.any_tags(vec!["x".to_string(), "y".to_string()])
.exclude_tags(vec!["z".to_string()])
.execute(&db)
.await
.unwrap();
let uuids: Vec<Uuid> = tasks.iter().map(|t| t.uuid).collect();
assert_eq!(uuids, vec![target]);
}
#[tokio::test]
async fn test_query_tasks_pagination_with_any_tags() {
let (db, _f) = open_test_db().await;
insert_task_with_tags(&db, "a1", &["a"]).await;
insert_task_with_tags(&db, "a2", &["a"]).await;
insert_task_with_tags(&db, "a3", &["a"]).await;
let page0 = TaskQueryBuilder::new()
.any_tags(vec!["a".to_string()])
.limit(1)
.offset(0)
.execute(&db)
.await
.unwrap();
let page1 = TaskQueryBuilder::new()
.any_tags(vec!["a".to_string()])
.limit(1)
.offset(1)
.execute(&db)
.await
.unwrap();
assert_eq!(page0.len(), 1);
assert_eq!(page1.len(), 1);
assert_ne!(page0[0].uuid, page1[0].uuid);
}
#[tokio::test]
async fn test_execute_fuzzy_typo_match() {
let (db, _f) = open_test_db().await;
let groceries = insert_task(&db, "Buy groceries", None, &[]).await;
let tasks = TaskQueryBuilder::new()
.fuzzy_search("grocries")
.execute(&db)
.await
.unwrap();
let uuids: Vec<Uuid> = tasks.iter().map(|t| t.uuid).collect();
assert!(
uuids.contains(&groceries),
"typo 'grocries' should match 'Buy groceries'"
);
}
#[tokio::test]
async fn test_execute_fuzzy_below_threshold_excluded() {
let (db, _f) = open_test_db().await;
insert_task(&db, "Buy groceries", None, &[]).await;
let tasks = TaskQueryBuilder::new()
.fuzzy_search("xyz")
.fuzzy_threshold(0.95)
.execute(&db)
.await
.unwrap();
assert!(
tasks.is_empty(),
"completely unrelated query should return nothing at 0.95 threshold"
);
}
#[tokio::test]
async fn test_execute_ranked_score_ordering() {
let (db, _f) = open_test_db().await;
insert_task(&db, "urgent task", None, &[]).await;
insert_task(&db, "urgntt task", None, &[]).await; insert_task(&db, "completely unrelated xyz abc", None, &[]).await;
let ranked = TaskQueryBuilder::new()
.fuzzy_search("urgent")
.fuzzy_threshold(0.5)
.execute_ranked(&db)
.await
.unwrap();
for pair in ranked.windows(2) {
assert!(
pair[0].score >= pair[1].score,
"results must be sorted by score desc: {} < {}",
pair[0].score,
pair[1].score
);
}
assert!(!ranked.is_empty(), "at least 'urgent task' should match");
}
#[tokio::test]
async fn test_execute_ranked_pagination() {
let (db, _f) = open_test_db().await;
for i in 0..5 {
insert_task(&db, &format!("meeting agenda item {i}"), None, &[]).await;
}
let all = TaskQueryBuilder::new()
.fuzzy_search("agenda")
.execute_ranked(&db)
.await
.unwrap();
let page = TaskQueryBuilder::new()
.fuzzy_search("agenda")
.limit(2)
.offset(1)
.execute_ranked(&db)
.await
.unwrap();
assert_eq!(page.len(), 2);
assert_eq!(page[0].task.uuid, all[1].task.uuid);
assert_eq!(page[1].task.uuid, all[2].task.uuid);
}
#[tokio::test]
async fn test_execute_fuzzy_with_search_collision() {
let (db, _f) = open_test_db().await;
let target = insert_task(&db, "meeting agenda", None, &[]).await;
let tasks = TaskQueryBuilder::new()
.search("zzznomatch")
.fuzzy_search("agenda")
.execute(&db)
.await
.unwrap();
assert_eq!(
tasks.len(),
1,
"only the 'meeting agenda' row should match; substring filter must be suppressed"
);
assert_eq!(
tasks[0].uuid, target,
"fuzzy should win over substring search"
);
}
#[tokio::test]
async fn test_execute_ranked_errors_without_fuzzy_query() {
let (db, _f) = open_test_db().await;
let result = TaskQueryBuilder::new().execute_ranked(&db).await;
assert!(
result.is_err(),
"execute_ranked without fuzzy_search should error"
);
}
#[tokio::test]
async fn test_execute_fuzzy_searches_notes() {
let (db, _f) = open_test_db().await;
let target = insert_task(&db, "Weekly sync", Some("meeting agenda for Q2"), &[]).await;
let tasks = TaskQueryBuilder::new()
.fuzzy_search("agenda")
.execute(&db)
.await
.unwrap();
let uuids: Vec<Uuid> = tasks.iter().map(|t| t.uuid).collect();
assert!(uuids.contains(&target), "fuzzy should match text in notes");
}
async fn insert_task_with_status(
db: &ThingsDatabase,
title: &str,
status: TaskStatus,
) -> Uuid {
let uuid = Uuid::new_v4();
let blob = serialize_tags_to_blob(&Vec::<String>::new()).unwrap();
let status_n: i64 = match status {
TaskStatus::Incomplete => 0,
TaskStatus::Completed => 1,
TaskStatus::Canceled => 2,
TaskStatus::Trashed => 0,
};
sqlx::query(
"INSERT INTO TMTask \
(uuid, title, notes, type, status, trashed, creationDate, userModificationDate, cachedTags) \
VALUES (?, ?, NULL, 0, ?, 0, 0, 0, ?)",
)
.bind(uuid.to_string())
.bind(title)
.bind(status_n)
.bind(blob)
.execute(&db.pool)
.await
.unwrap();
uuid
}
async fn insert_task_with_type(
db: &ThingsDatabase,
title: &str,
task_type: crate::models::TaskType,
) -> Uuid {
let uuid = Uuid::new_v4();
let blob = serialize_tags_to_blob(&Vec::<String>::new()).unwrap();
let type_n: i64 = match task_type {
crate::models::TaskType::Todo => 0,
crate::models::TaskType::Project => 1,
crate::models::TaskType::Heading => 2,
crate::models::TaskType::Area => 3,
};
sqlx::query(
"INSERT INTO TMTask \
(uuid, title, notes, type, status, trashed, creationDate, userModificationDate, cachedTags) \
VALUES (?, ?, NULL, ?, 0, 0, 0, 0, ?)",
)
.bind(uuid.to_string())
.bind(title)
.bind(type_n)
.bind(blob)
.execute(&db.pool)
.await
.unwrap();
uuid
}
#[tokio::test]
async fn test_execute_with_where_expr_or_status() {
use crate::filter_expr::FilterExpr;
let (db, _f) = open_test_db().await;
let inc = insert_task_with_status(&db, "inc", TaskStatus::Incomplete).await;
let comp = insert_task_with_status(&db, "comp", TaskStatus::Completed).await;
let canc = insert_task_with_status(&db, "canc", TaskStatus::Canceled).await;
let tasks = TaskQueryBuilder::new()
.where_expr(
FilterExpr::status(TaskStatus::Incomplete)
.or(FilterExpr::status(TaskStatus::Completed)),
)
.execute(&db)
.await
.unwrap();
let uuids: std::collections::HashSet<_> = tasks.iter().map(|t| t.uuid).collect();
assert!(uuids.contains(&inc));
assert!(uuids.contains(&comp));
assert!(!uuids.contains(&canc));
}
#[tokio::test]
async fn test_execute_with_where_expr_not_type() {
use crate::filter_expr::FilterExpr;
use crate::models::TaskType;
let (db, _f) = open_test_db().await;
let todo = insert_task_with_type(&db, "todo", TaskType::Todo).await;
let project = insert_task_with_type(&db, "project", TaskType::Project).await;
let tasks = TaskQueryBuilder::new()
.where_expr(FilterExpr::task_type(TaskType::Project).not())
.execute(&db)
.await
.unwrap();
let uuids: std::collections::HashSet<_> = tasks.iter().map(|t| t.uuid).collect();
assert!(uuids.contains(&todo));
assert!(!uuids.contains(&project));
}
#[tokio::test]
async fn test_execute_pagination_defers_to_rust_when_where_expr_set() {
use crate::filter_expr::FilterExpr;
let (db, _f) = open_test_db().await;
insert_task_with_status(&db, "inc-1", TaskStatus::Incomplete).await;
insert_task_with_status(&db, "inc-2", TaskStatus::Incomplete).await;
insert_task_with_status(&db, "inc-3", TaskStatus::Incomplete).await;
insert_task_with_status(&db, "comp", TaskStatus::Completed).await;
let page0 = TaskQueryBuilder::new()
.where_expr(FilterExpr::status(TaskStatus::Incomplete))
.limit(1)
.offset(0)
.execute(&db)
.await
.unwrap();
let page1 = TaskQueryBuilder::new()
.where_expr(FilterExpr::status(TaskStatus::Incomplete))
.limit(1)
.offset(1)
.execute(&db)
.await
.unwrap();
assert_eq!(page0.len(), 1);
assert_eq!(page1.len(), 1);
assert_ne!(page0[0].uuid, page1[0].uuid);
assert_eq!(page0[0].status, TaskStatus::Incomplete);
assert_eq!(page1[0].status, TaskStatus::Incomplete);
}
#[tokio::test]
async fn test_execute_combines_where_expr_with_filters_status() {
use crate::filter_expr::FilterExpr;
let (db, _f) = open_test_db().await;
let target = insert_task(&db, "needle", None, &["work"]).await;
insert_task(&db, "decoy", None, &["work"]).await;
let tasks = TaskQueryBuilder::new()
.status(TaskStatus::Incomplete)
.where_expr(FilterExpr::title_contains("needle"))
.execute(&db)
.await
.unwrap();
let uuids: std::collections::HashSet<_> = tasks.iter().map(|t| t.uuid).collect();
assert!(uuids.contains(&target));
assert_eq!(tasks.len(), 1);
}
#[tokio::test]
async fn test_execute_combines_where_expr_with_any_tags() {
use crate::filter_expr::FilterExpr;
let (db, _f) = open_test_db().await;
let target = insert_task(&db, "needle-task", None, &["work"]).await;
insert_task(&db, "decoy-task", None, &["work"]).await;
insert_task(&db, "needle-but-wrong-tag", None, &["personal"]).await;
let tasks = TaskQueryBuilder::new()
.any_tags(vec!["work".to_string()])
.where_expr(FilterExpr::title_contains("needle"))
.execute(&db)
.await
.unwrap();
let uuids: std::collections::HashSet<_> = tasks.iter().map(|t| t.uuid).collect();
assert!(uuids.contains(&target));
assert_eq!(tasks.len(), 1);
}
#[cfg(feature = "batch-operations")]
mod cursor_pagination_tests {
use super::*;
#[tokio::test]
async fn test_execute_paged_walks_through_all_tasks() {
let (db, _f) = open_test_db().await;
let mut inserted = vec![];
for i in 0..5 {
inserted.push(insert_task(&db, &format!("task-{i}"), None, &[]).await);
}
let mut all_collected: Vec<Uuid> = vec![];
let mut cursor = None;
let mut page_count = 0;
loop {
let mut builder = TaskQueryBuilder::new().limit(2);
if let Some(c) = cursor.take() {
builder = builder.after(c);
}
let page = builder.execute_paged(&db).await.unwrap();
page_count += 1;
all_collected.extend(page.items.iter().map(|t| t.uuid));
if let Some(next) = page.next_cursor {
cursor = Some(next);
} else {
break;
}
assert!(page_count < 10, "runaway pagination loop");
}
let inserted_set: std::collections::HashSet<_> = inserted.iter().copied().collect();
let collected_set: std::collections::HashSet<_> =
all_collected.iter().copied().collect();
for uuid in &inserted_set {
assert!(collected_set.contains(uuid), "missing inserted uuid {uuid}");
}
let mut sorted = all_collected.clone();
sorted.sort();
sorted.dedup();
assert_eq!(sorted.len(), all_collected.len(), "duplicates in pages");
}
#[tokio::test]
async fn test_execute_paged_last_page_has_no_next_cursor() {
let (db, _f) = open_test_db().await;
insert_task(&db, "only-task", None, &[]).await;
let page = TaskQueryBuilder::new()
.status(TaskStatus::Incomplete)
.limit(100)
.execute_paged(&db)
.await
.unwrap();
assert!(
page.next_cursor.is_none(),
"non-full page should not have a next cursor"
);
}
#[tokio::test]
async fn test_execute_paged_with_status_filter() {
let (db, _f) = open_test_db().await;
let target = insert_task(&db, "incomplete-task", None, &[]).await;
let page = TaskQueryBuilder::new()
.status(TaskStatus::Incomplete)
.limit(50)
.execute_paged(&db)
.await
.unwrap();
let uuids: std::collections::HashSet<_> =
page.items.iter().map(|t| t.uuid).collect();
assert!(uuids.contains(&target));
for task in &page.items {
assert_eq!(task.status, TaskStatus::Incomplete);
}
}
#[tokio::test]
async fn test_execute_paged_with_post_filter_any_tags() {
let (db, _f) = open_test_db().await;
let a1 = insert_task_with_tags(&db, "a1", &["a"]).await;
let a2 = insert_task_with_tags(&db, "a2", &["a"]).await;
let a3 = insert_task_with_tags(&db, "a3", &["a"]).await;
let _b = insert_task_with_tags(&db, "b1", &["b"]).await;
let mut all: Vec<Uuid> = vec![];
let mut cursor = None;
loop {
let mut builder = TaskQueryBuilder::new()
.any_tags(vec!["a".to_string()])
.limit(2);
if let Some(c) = cursor.take() {
builder = builder.after(c);
}
let page = builder.execute_paged(&db).await.unwrap();
all.extend(page.items.iter().map(|t| t.uuid));
if let Some(n) = page.next_cursor {
cursor = Some(n);
} else {
break;
}
}
let collected: std::collections::HashSet<_> = all.iter().copied().collect();
assert!(collected.contains(&a1));
assert!(collected.contains(&a2));
assert!(collected.contains(&a3));
assert_eq!(collected.len(), 3, "should contain only a-tagged tasks");
}
#[tokio::test]
async fn test_execute_paged_default_page_size_when_no_limit() {
let (db, _f) = open_test_db().await;
let page = TaskQueryBuilder::new()
.status(TaskStatus::Incomplete)
.execute_paged(&db)
.await
.unwrap();
assert!(page.items.len() <= 100);
assert!(page.next_cursor.is_none());
}
#[tokio::test]
async fn test_query_tasks_order_is_deterministic_by_uuid_tiebreak() {
let (db, _f) = open_test_db().await;
for i in 0..3 {
insert_task(&db, &format!("dup-time-{i}"), None, &[]).await;
}
let first = db.query_tasks(&TaskFilters::default()).await.unwrap();
let second = db.query_tasks(&TaskFilters::default()).await.unwrap();
let first_uuids: Vec<_> = first.iter().map(|t| t.uuid).collect();
let second_uuids: Vec<_> = second.iter().map(|t| t.uuid).collect();
assert_eq!(
first_uuids, second_uuids,
"tied-creationDate ordering should be deterministic"
);
}
}
#[cfg(feature = "batch-operations")]
mod cursor_streaming_tests {
use super::*;
use futures_util::{StreamExt, TryStreamExt};
#[tokio::test]
async fn test_execute_stream_yields_all_tasks() {
let (db, _f) = open_test_db().await;
let mut inserted = vec![];
for i in 0..5 {
inserted.push(insert_task(&db, &format!("task-{i}"), None, &[]).await);
}
let collected: Vec<_> = TaskQueryBuilder::new()
.limit(2)
.execute_stream(&db)
.try_collect::<Vec<_>>()
.await
.unwrap();
let inserted_set: std::collections::HashSet<_> = inserted.iter().copied().collect();
let collected_set: std::collections::HashSet<_> =
collected.iter().map(|t| t.uuid).collect();
for uuid in &inserted_set {
assert!(
collected_set.contains(uuid),
"stream missing inserted uuid {uuid}"
);
}
assert_eq!(
collected.len(),
collected_set.len(),
"stream yielded duplicates"
);
}
#[tokio::test]
async fn test_execute_stream_with_status_filter() {
let (db, _f) = open_test_db().await;
let target = insert_task(&db, "incomplete-task", None, &[]).await;
let tasks = TaskQueryBuilder::new()
.status(TaskStatus::Incomplete)
.limit(50)
.execute_stream(&db)
.try_collect::<Vec<_>>()
.await
.unwrap();
let uuids: std::collections::HashSet<_> = tasks.iter().map(|t| t.uuid).collect();
assert!(uuids.contains(&target));
for task in &tasks {
assert_eq!(task.status, TaskStatus::Incomplete);
}
}
#[tokio::test]
async fn test_execute_stream_with_any_tags_post_filter() {
let (db, _f) = open_test_db().await;
let a1 = insert_task_with_tags(&db, "a1", &["a"]).await;
let a2 = insert_task_with_tags(&db, "a2", &["a"]).await;
let a3 = insert_task_with_tags(&db, "a3", &["a"]).await;
let _b = insert_task_with_tags(&db, "b1", &["b"]).await;
let tasks = TaskQueryBuilder::new()
.any_tags(vec!["a".to_string()])
.limit(2)
.execute_stream(&db)
.try_collect::<Vec<_>>()
.await
.unwrap();
let uuids: std::collections::HashSet<_> = tasks.iter().map(|t| t.uuid).collect();
assert!(uuids.contains(&a1));
assert!(uuids.contains(&a2));
assert!(uuids.contains(&a3));
assert_eq!(uuids.len(), 3, "should yield only a-tagged tasks");
}
#[tokio::test]
async fn test_execute_stream_empty_result() {
let (db, _f) = open_test_db().await;
let tasks = TaskQueryBuilder::new()
.project_uuid(Uuid::new_v4())
.execute_stream(&db)
.try_collect::<Vec<_>>()
.await
.unwrap();
assert!(tasks.is_empty());
}
#[tokio::test]
async fn test_execute_stream_rejects_fuzzy_search() {
let (db, _f) = open_test_db().await;
let mut stream = TaskQueryBuilder::new()
.fuzzy_search("anything")
.execute_stream(&db);
match stream.next().await {
Some(Err(crate::error::ThingsError::InvalidCursor(msg))) => {
assert!(msg.contains("fuzzy"), "msg: {msg}");
}
other => panic!("expected first item to be InvalidCursor, got {other:?}"),
}
assert!(stream.next().await.is_none());
}
#[tokio::test]
async fn test_execute_stream_cross_page_ordering() {
let (db, _f) = open_test_db().await;
for i in 0..5 {
insert_task(&db, &format!("task-{i}"), None, &[]).await;
}
let stream_uuids: Vec<Uuid> = TaskQueryBuilder::new()
.limit(2)
.execute_stream(&db)
.try_collect::<Vec<_>>()
.await
.unwrap()
.into_iter()
.map(|t| t.uuid)
.collect();
let full_uuids: Vec<Uuid> = db
.query_tasks(&TaskFilters::default())
.await
.unwrap()
.into_iter()
.map(|t| t.uuid)
.collect();
let stream_set: std::collections::HashSet<_> =
stream_uuids.iter().copied().collect();
let filtered_full: Vec<Uuid> = full_uuids
.into_iter()
.filter(|u| stream_set.contains(u))
.collect();
assert_eq!(
stream_uuids, filtered_full,
"stream ordering should agree with full-query (creationDate DESC, uuid DESC)"
);
}
}
}
}