use crate::{
database::{mappers::map_task_row, query_builders::TaskUpdateBuilder, validators},
error::{Result as ThingsResult, ThingsError},
models::{
Area, CreateTaskRequest, DeleteChildHandling, Project, Task, TaskStatus, TaskType,
UpdateTaskRequest,
},
};
use chrono::{DateTime, NaiveDate, Utc};
use serde::{Deserialize, Serialize};
use sqlx::{pool::PoolOptions, Row, SqlitePool};
use std::path::{Path, PathBuf};
use std::time::Duration;
use tracing::{debug, error, info, instrument};
use uuid::Uuid;
pub(crate) fn safe_timestamp_convert(ts_f64: f64) -> i64 {
if ts_f64.is_finite() && ts_f64 >= 0.0 {
let max_timestamp = 4_102_444_800_f64; if ts_f64 <= max_timestamp {
let ts_str = format!("{:.0}", ts_f64.trunc());
ts_str.parse::<i64>().unwrap_or(0)
} else {
0 }
} else {
0 }
}
pub(crate) fn things_date_to_naive_date(seconds_since_2001: i64) -> Option<chrono::NaiveDate> {
use chrono::{TimeZone, Utc};
if seconds_since_2001 <= 0 {
return None;
}
let base_date = Utc.with_ymd_and_hms(2001, 1, 1, 0, 0, 0).single().unwrap();
let date_time = base_date + chrono::Duration::seconds(seconds_since_2001);
Some(date_time.date_naive())
}
pub fn naive_date_to_things_timestamp(date: NaiveDate) -> i64 {
use chrono::{NaiveTime, TimeZone, Utc};
let base_date = Utc.with_ymd_and_hms(2001, 1, 1, 0, 0, 0).single().unwrap();
let date_time = date
.and_time(NaiveTime::from_hms_opt(0, 0, 0).unwrap())
.and_local_timezone(Utc)
.single()
.unwrap();
date_time.timestamp() - base_date.timestamp()
}
pub fn serialize_tags_to_blob(tags: &[String]) -> ThingsResult<Vec<u8>> {
serde_json::to_vec(tags)
.map_err(|e| ThingsError::unknown(format!("Failed to serialize tags: {e}")))
}
pub fn deserialize_tags_from_blob(blob: &[u8]) -> ThingsResult<Vec<String>> {
serde_json::from_slice(blob)
.map_err(|e| ThingsError::unknown(format!("Failed to deserialize tags: {e}")))
}
pub(crate) fn things_uuid_to_uuid(things_uuid: &str) -> Uuid {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
things_uuid.hash(&mut hasher);
let hash = hasher.finish();
let bytes = [
((hash >> 56) & 0xFF) as u8,
((hash >> 48) & 0xFF) as u8,
((hash >> 40) & 0xFF) as u8,
((hash >> 32) & 0xFF) as u8,
((hash >> 24) & 0xFF) as u8,
((hash >> 16) & 0xFF) as u8,
((hash >> 8) & 0xFF) as u8,
(hash & 0xFF) as u8,
u8::try_from(things_uuid.len().min(255)).unwrap_or(255),
things_uuid.chars().next().unwrap_or('0') as u8,
things_uuid.chars().nth(1).unwrap_or('0') as u8,
things_uuid.chars().nth(2).unwrap_or('0') as u8,
things_uuid.chars().nth(3).unwrap_or('0') as u8,
things_uuid.chars().nth(4).unwrap_or('0') as u8,
things_uuid.chars().nth(5).unwrap_or('0') as u8,
things_uuid.chars().nth(6).unwrap_or('0') as u8,
];
Uuid::from_bytes(bytes)
}
impl TaskStatus {
fn from_i32(value: i32) -> Option<Self> {
match value {
0 => Some(TaskStatus::Incomplete),
1 => Some(TaskStatus::Completed),
2 => Some(TaskStatus::Canceled),
3 => Some(TaskStatus::Trashed),
_ => None,
}
}
}
impl TaskType {
fn from_i32(value: i32) -> Option<Self> {
match value {
0 => Some(TaskType::Todo),
1 => Some(TaskType::Project),
2 => Some(TaskType::Heading),
3 => Some(TaskType::Area),
_ => None,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DatabasePoolConfig {
pub max_connections: u32,
pub min_connections: u32,
pub connect_timeout: Duration,
pub idle_timeout: Duration,
pub max_lifetime: Duration,
pub test_before_acquire: bool,
pub sqlite_optimizations: SqliteOptimizations,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SqliteOptimizations {
pub enable_wal_mode: bool,
pub synchronous_mode: String,
pub cache_size: i32,
pub enable_foreign_keys: bool,
pub journal_mode: String,
pub temp_store: String,
pub mmap_size: i64,
pub enable_query_planner: bool,
}
impl Default for DatabasePoolConfig {
fn default() -> Self {
Self {
max_connections: 10,
min_connections: 1,
connect_timeout: Duration::from_secs(30),
idle_timeout: Duration::from_secs(600), max_lifetime: Duration::from_secs(1800), test_before_acquire: true,
sqlite_optimizations: SqliteOptimizations::default(),
}
}
}
impl Default for SqliteOptimizations {
fn default() -> Self {
Self {
enable_wal_mode: true,
synchronous_mode: "NORMAL".to_string(),
cache_size: -20000, enable_foreign_keys: true,
journal_mode: "WAL".to_string(),
temp_store: "MEMORY".to_string(),
mmap_size: 268_435_456, enable_query_planner: true,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PoolHealthStatus {
pub is_healthy: bool,
pub pool_size: u32,
pub active_connections: u32,
pub idle_connections: u32,
pub max_connections: u32,
pub min_connections: u32,
pub connection_timeout: Duration,
pub idle_timeout: Option<Duration>,
pub max_lifetime: Option<Duration>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PoolMetrics {
pub pool_size: u32,
pub active_connections: u32,
pub idle_connections: u32,
pub max_connections: u32,
pub min_connections: u32,
pub utilization_percentage: f64,
pub is_healthy: bool,
pub response_time_ms: u64,
pub connection_timeout: Duration,
pub idle_timeout: Option<Duration>,
pub max_lifetime: Option<Duration>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ComprehensiveHealthStatus {
pub overall_healthy: bool,
pub pool_health: PoolHealthStatus,
pub pool_metrics: PoolMetrics,
pub database_stats: DatabaseStats,
pub timestamp: DateTime<Utc>,
}
#[derive(Debug, Clone)]
pub struct ThingsDatabase {
pool: SqlitePool,
config: DatabasePoolConfig,
}
impl ThingsDatabase {
#[instrument]
pub async fn new(database_path: &Path) -> ThingsResult<Self> {
Self::new_with_config(database_path, DatabasePoolConfig::default()).await
}
#[instrument]
pub async fn new_with_config(
database_path: &Path,
config: DatabasePoolConfig,
) -> ThingsResult<Self> {
let database_url = format!("sqlite:{}", database_path.display());
info!(
"Connecting to SQLite database at: {} with optimized pool",
database_url
);
let pool = PoolOptions::new()
.max_connections(config.max_connections)
.min_connections(config.min_connections)
.acquire_timeout(config.connect_timeout)
.idle_timeout(Some(config.idle_timeout))
.max_lifetime(Some(config.max_lifetime))
.test_before_acquire(config.test_before_acquire)
.connect(&database_url)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to connect to database: {e}")))?;
Self::apply_sqlite_optimizations(&pool, &config.sqlite_optimizations).await?;
info!(
"Database connection pool established successfully with {} max connections",
config.max_connections
);
Ok(Self { pool, config })
}
async fn apply_sqlite_optimizations(
pool: &SqlitePool,
optimizations: &SqliteOptimizations,
) -> ThingsResult<()> {
sqlx::query(&format!(
"PRAGMA journal_mode = {}",
optimizations.journal_mode
))
.execute(pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to set journal mode: {e}")))?;
sqlx::query(&format!(
"PRAGMA synchronous = {}",
optimizations.synchronous_mode
))
.execute(pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to set synchronous mode: {e}")))?;
sqlx::query(&format!("PRAGMA cache_size = {}", optimizations.cache_size))
.execute(pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to set cache size: {e}")))?;
let fk_setting = if optimizations.enable_foreign_keys {
"ON"
} else {
"OFF"
};
sqlx::query(&format!("PRAGMA foreign_keys = {fk_setting}"))
.execute(pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to set foreign keys: {e}")))?;
sqlx::query(&format!("PRAGMA temp_store = {}", optimizations.temp_store))
.execute(pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to set temp store: {e}")))?;
sqlx::query(&format!("PRAGMA mmap_size = {}", optimizations.mmap_size))
.execute(pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to set mmap size: {e}")))?;
if optimizations.enable_query_planner {
sqlx::query("PRAGMA optimize")
.execute(pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to optimize database: {e}")))?;
}
debug!(
"Applied SQLite optimizations: WAL={}, sync={}, cache={}KB, fk={}, temp={}, mmap={}MB",
optimizations.enable_wal_mode,
optimizations.synchronous_mode,
optimizations.cache_size.abs() / 1024,
optimizations.enable_foreign_keys,
optimizations.temp_store,
optimizations.mmap_size / 1024 / 1024
);
Ok(())
}
#[instrument]
pub async fn from_connection_string(database_url: &str) -> ThingsResult<Self> {
Self::from_connection_string_with_config(database_url, DatabasePoolConfig::default()).await
}
#[instrument]
pub async fn from_connection_string_with_config(
database_url: &str,
config: DatabasePoolConfig,
) -> ThingsResult<Self> {
info!(
"Connecting to SQLite database: {} with optimized pool",
database_url
);
let pool = PoolOptions::new()
.max_connections(config.max_connections)
.min_connections(config.min_connections)
.acquire_timeout(config.connect_timeout)
.idle_timeout(Some(config.idle_timeout))
.max_lifetime(Some(config.max_lifetime))
.test_before_acquire(config.test_before_acquire)
.connect(database_url)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to connect to database: {e}")))?;
Self::apply_sqlite_optimizations(&pool, &config.sqlite_optimizations).await?;
info!(
"Database connection pool established successfully with {} max connections",
config.max_connections
);
Ok(Self { pool, config })
}
#[must_use]
pub fn pool(&self) -> &SqlitePool {
&self.pool
}
#[instrument]
pub async fn is_connected(&self) -> bool {
match sqlx::query("SELECT 1").fetch_one(&self.pool).await {
Ok(_) => {
debug!("Database connection is healthy");
true
}
Err(e) => {
error!("Database connection check failed: {}", e);
false
}
}
}
#[instrument]
pub async fn get_pool_health(&self) -> ThingsResult<PoolHealthStatus> {
let pool_size = self.pool.size();
let idle_connections = self.pool.num_idle();
let active_connections = pool_size - u32::try_from(idle_connections).unwrap_or(0);
let is_healthy = self.is_connected().await;
Ok(PoolHealthStatus {
is_healthy,
pool_size,
active_connections,
idle_connections: u32::try_from(idle_connections).unwrap_or(0),
max_connections: self.config.max_connections,
min_connections: self.config.min_connections,
connection_timeout: self.config.connect_timeout,
idle_timeout: Some(self.config.idle_timeout),
max_lifetime: Some(self.config.max_lifetime),
})
}
#[instrument]
pub async fn get_pool_metrics(&self) -> ThingsResult<PoolMetrics> {
let pool_size = self.pool.size();
let idle_connections = self.pool.num_idle();
let active_connections = pool_size - u32::try_from(idle_connections).unwrap_or(0);
let max_connections = self.config.max_connections;
let utilization_percentage = if max_connections > 0 {
(f64::from(active_connections) / f64::from(max_connections)) * 100.0
} else {
0.0
};
let start_time = std::time::Instant::now();
let is_connected = self.is_connected().await;
let response_time_ms = u64::try_from(start_time.elapsed().as_millis()).unwrap_or(0);
Ok(PoolMetrics {
pool_size,
active_connections,
idle_connections: u32::try_from(idle_connections).unwrap_or(0),
max_connections,
min_connections: self.config.min_connections,
utilization_percentage,
is_healthy: is_connected,
response_time_ms,
connection_timeout: self.config.connect_timeout,
idle_timeout: Some(self.config.idle_timeout),
max_lifetime: Some(self.config.max_lifetime),
})
}
#[instrument]
pub async fn comprehensive_health_check(&self) -> ThingsResult<ComprehensiveHealthStatus> {
let pool_health = self.get_pool_health().await?;
let pool_metrics = self.get_pool_metrics().await?;
let db_stats = self.get_stats().await?;
let overall_healthy = pool_health.is_healthy && pool_metrics.is_healthy;
Ok(ComprehensiveHealthStatus {
overall_healthy,
pool_health,
pool_metrics,
database_stats: db_stats,
timestamp: Utc::now(),
})
}
#[instrument]
pub async fn get_stats(&self) -> ThingsResult<DatabaseStats> {
let task_count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM TMTask")
.fetch_one(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to get task count: {e}")))?;
let project_count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM TMTask WHERE type = 1")
.fetch_one(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to get project count: {e}")))?;
let area_count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM TMArea")
.fetch_one(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to get area count: {e}")))?;
Ok(DatabaseStats {
task_count: task_count.try_into().unwrap_or(0),
project_count: project_count.try_into().unwrap_or(0),
area_count: area_count.try_into().unwrap_or(0),
})
}
#[instrument]
pub async fn get_all_tasks(&self) -> ThingsResult<Vec<Task>> {
let rows = sqlx::query(
r"
SELECT
uuid, title, status, type,
start_date, due_date,
project_uuid, area_uuid,
notes, tags,
created, modified
FROM TMTask
ORDER BY created DESC
",
)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to fetch tasks: {e}")))?;
let mut tasks = Vec::new();
for row in rows {
let task = Task {
uuid: Uuid::parse_str(&row.get::<String, _>("uuid"))
.map_err(|e| ThingsError::unknown(format!("Invalid task UUID: {e}")))?,
title: row.get("title"),
status: TaskStatus::from_i32(row.get("status")).unwrap_or(TaskStatus::Incomplete),
task_type: TaskType::from_i32(row.get("type")).unwrap_or(TaskType::Todo),
start_date: row
.get::<Option<String>, _>("start_date")
.and_then(|s| NaiveDate::parse_from_str(&s, "%Y-%m-%d").ok()),
deadline: row
.get::<Option<String>, _>("due_date")
.and_then(|s| NaiveDate::parse_from_str(&s, "%Y-%m-%d").ok()),
project_uuid: row
.get::<Option<String>, _>("project_uuid")
.and_then(|s| Uuid::parse_str(&s).ok()),
area_uuid: row
.get::<Option<String>, _>("area_uuid")
.and_then(|s| Uuid::parse_str(&s).ok()),
parent_uuid: None, notes: row.get("notes"),
tags: row
.get::<Option<String>, _>("tags")
.map(|s| s.split(',').map(|s| s.trim().to_string()).collect())
.unwrap_or_default(),
children: Vec::new(), created: DateTime::parse_from_rfc3339(&row.get::<String, _>("created"))
.ok()
.map_or_else(Utc::now, |dt| dt.with_timezone(&Utc)),
modified: DateTime::parse_from_rfc3339(&row.get::<String, _>("modified"))
.ok()
.map_or_else(Utc::now, |dt| dt.with_timezone(&Utc)),
stop_date: None, };
tasks.push(task);
}
debug!("Fetched {} tasks", tasks.len());
Ok(tasks)
}
#[instrument]
pub async fn get_all_projects(&self) -> ThingsResult<Vec<Project>> {
let rows = sqlx::query(
r"
SELECT
uuid, title, status,
area, notes,
creationDate, userModificationDate,
startDate, deadline
FROM TMTask
WHERE type = 1 AND trashed = 0
ORDER BY creationDate DESC
",
)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to fetch projects: {e}")))?;
let mut projects = Vec::new();
for row in rows {
let project = Project {
uuid: things_uuid_to_uuid(&row.get::<String, _>("uuid")),
title: row.get("title"),
status: TaskStatus::from_i32(row.get("status")).unwrap_or(TaskStatus::Incomplete),
area_uuid: row
.get::<Option<String>, _>("area")
.map(|s| things_uuid_to_uuid(&s)),
notes: row.get("notes"),
deadline: row
.get::<Option<i64>, _>("deadline")
.and_then(|ts| DateTime::from_timestamp(ts, 0))
.map(|dt| dt.date_naive()),
start_date: row
.get::<Option<i64>, _>("startDate")
.and_then(|ts| DateTime::from_timestamp(ts, 0))
.map(|dt| dt.date_naive()),
tags: Vec::new(), tasks: Vec::new(), created: {
let ts_f64 = row.get::<f64, _>("creationDate");
let ts = safe_timestamp_convert(ts_f64);
DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
},
modified: {
let ts_f64 = row.get::<f64, _>("userModificationDate");
let ts = safe_timestamp_convert(ts_f64);
DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
},
};
projects.push(project);
}
debug!("Fetched {} projects", projects.len());
Ok(projects)
}
#[instrument]
pub async fn get_all_areas(&self) -> ThingsResult<Vec<Area>> {
let rows = sqlx::query(
r"
SELECT
uuid, title, visible, `index`
FROM TMArea
ORDER BY `index` ASC
",
)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to fetch areas: {e}")))?;
let mut areas = Vec::new();
for row in rows {
let uuid_str: String = row.get("uuid");
let uuid =
Uuid::parse_str(&uuid_str).unwrap_or_else(|_| things_uuid_to_uuid(&uuid_str));
let area = Area {
uuid,
title: row.get("title"),
notes: None, projects: Vec::new(), tags: Vec::new(), created: Utc::now(), modified: Utc::now(), };
areas.push(area);
}
debug!("Fetched {} areas", areas.len());
Ok(areas)
}
#[instrument]
pub async fn get_tasks_by_status(&self, status: TaskStatus) -> ThingsResult<Vec<Task>> {
let status_value = status as i32;
let rows = sqlx::query(
r"
SELECT
uuid, title, status, type,
start_date, due_date,
project_uuid, area_uuid,
notes, tags,
created, modified
FROM TMTask
WHERE status = ?
ORDER BY created DESC
",
)
.bind(status_value)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to fetch tasks by status: {e}")))?;
let mut tasks = Vec::new();
for row in rows {
let task = Task {
uuid: Uuid::parse_str(&row.get::<String, _>("uuid"))
.map_err(|e| ThingsError::unknown(format!("Invalid task UUID: {e}")))?,
title: row.get("title"),
status: TaskStatus::from_i32(row.get("status")).unwrap_or(TaskStatus::Incomplete),
task_type: TaskType::from_i32(row.get("type")).unwrap_or(TaskType::Todo),
start_date: row
.get::<Option<String>, _>("start_date")
.and_then(|s| NaiveDate::parse_from_str(&s, "%Y-%m-%d").ok()),
deadline: row
.get::<Option<String>, _>("due_date")
.and_then(|s| NaiveDate::parse_from_str(&s, "%Y-%m-%d").ok()),
project_uuid: row
.get::<Option<String>, _>("project_uuid")
.and_then(|s| Uuid::parse_str(&s).ok()),
area_uuid: row
.get::<Option<String>, _>("area_uuid")
.and_then(|s| Uuid::parse_str(&s).ok()),
parent_uuid: None, notes: row.get("notes"),
tags: row
.get::<Option<String>, _>("tags")
.map(|s| s.split(',').map(|s| s.trim().to_string()).collect())
.unwrap_or_default(),
children: Vec::new(), created: DateTime::parse_from_rfc3339(&row.get::<String, _>("created"))
.ok()
.map_or_else(Utc::now, |dt| dt.with_timezone(&Utc)),
modified: DateTime::parse_from_rfc3339(&row.get::<String, _>("modified"))
.ok()
.map_or_else(Utc::now, |dt| dt.with_timezone(&Utc)),
stop_date: None, };
tasks.push(task);
}
debug!("Fetched {} tasks with status {:?}", tasks.len(), status);
Ok(tasks)
}
#[instrument]
pub async fn search_tasks(&self, query: &str) -> ThingsResult<Vec<Task>> {
let search_pattern = format!("%{query}%");
let rows = sqlx::query(
r"
SELECT
uuid, title, status, type,
startDate, deadline, stopDate,
project, area, heading,
notes, cachedTags,
creationDate, userModificationDate
FROM TMTask
WHERE (title LIKE ? OR notes LIKE ?) AND trashed = 0 AND type = 0
ORDER BY creationDate DESC
",
)
.bind(&search_pattern)
.bind(&search_pattern)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to search tasks: {e}")))?;
let tasks = rows
.iter()
.map(map_task_row)
.collect::<ThingsResult<Vec<Task>>>()?;
debug!("Found {} tasks matching query: {}", tasks.len(), query);
Ok(tasks)
}
#[allow(clippy::too_many_arguments)]
#[instrument(skip(self))]
pub async fn search_logbook(
&self,
search_text: Option<String>,
from_date: Option<NaiveDate>,
to_date: Option<NaiveDate>,
project_uuid: Option<Uuid>,
area_uuid: Option<Uuid>,
tags: Option<Vec<String>>,
limit: Option<u32>,
) -> ThingsResult<Vec<Task>> {
let result_limit = limit.unwrap_or(50).min(500);
let rows = if let Some(ref text) = search_text {
let pattern = format!("%{text}%");
let mut q = String::from(
"SELECT uuid, title, status, type, startDate, deadline, stopDate, project, area, heading, notes, cachedTags, creationDate, userModificationDate FROM TMTask WHERE status = 1 AND trashed = 0 AND type = 0",
);
q.push_str(" AND (title LIKE ? OR notes LIKE ?)");
if let Some(date) = from_date {
let date_time = date.and_hms_opt(0, 0, 0).unwrap().and_utc();
let timestamp = date_time.timestamp() as f64;
q.push_str(&format!(" AND stopDate >= {}", timestamp));
}
if let Some(date) = to_date {
let end_date = date + chrono::Duration::days(1);
let date_time = end_date.and_hms_opt(0, 0, 0).unwrap().and_utc();
let timestamp = date_time.timestamp() as f64;
q.push_str(&format!(" AND stopDate < {}", timestamp));
}
if let Some(uuid) = project_uuid {
q.push_str(&format!(" AND project = '{}'", uuid));
}
if let Some(uuid) = area_uuid {
q.push_str(&format!(" AND area = '{}'", uuid));
}
q.push_str(&format!(" ORDER BY stopDate DESC LIMIT {result_limit}"));
sqlx::query(&q)
.bind(&pattern)
.bind(&pattern)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to search logbook: {e}")))?
} else {
let mut q = String::from(
"SELECT uuid, title, status, type, startDate, deadline, stopDate, project, area, heading, notes, cachedTags, creationDate, userModificationDate FROM TMTask WHERE status = 1 AND trashed = 0 AND type = 0",
);
if let Some(date) = from_date {
let date_time = date.and_hms_opt(0, 0, 0).unwrap().and_utc();
let timestamp = date_time.timestamp() as f64;
q.push_str(&format!(" AND stopDate >= {}", timestamp));
}
if let Some(date) = to_date {
let end_date = date + chrono::Duration::days(1);
let date_time = end_date.and_hms_opt(0, 0, 0).unwrap().and_utc();
let timestamp = date_time.timestamp() as f64;
q.push_str(&format!(" AND stopDate < {}", timestamp));
}
if let Some(uuid) = project_uuid {
q.push_str(&format!(" AND project = '{}'", uuid));
}
if let Some(uuid) = area_uuid {
q.push_str(&format!(" AND area = '{}'", uuid));
}
q.push_str(&format!(" ORDER BY stopDate DESC LIMIT {result_limit}"));
sqlx::query(&q)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to search logbook: {e}")))?
};
let mut tasks = rows
.iter()
.map(map_task_row)
.collect::<ThingsResult<Vec<Task>>>()?;
if let Some(ref filter_tags) = tags {
if !filter_tags.is_empty() {
tasks.retain(|task| {
filter_tags
.iter()
.all(|filter_tag| task.tags.contains(filter_tag))
});
}
}
debug!("Found {} completed tasks in logbook", tasks.len());
Ok(tasks)
}
#[instrument(skip(self))]
pub async fn get_inbox(&self, limit: Option<usize>) -> ThingsResult<Vec<Task>> {
let query = if let Some(limit) = limit {
format!("SELECT uuid, title, type, status, notes, startDate, deadline, stopDate, creationDate, userModificationDate, project, area, heading, cachedTags FROM TMTask WHERE type = 0 AND status = 0 AND project IS NULL AND trashed = 0 ORDER BY creationDate DESC LIMIT {limit}")
} else {
"SELECT uuid, title, type, status, notes, startDate, deadline, stopDate, creationDate, userModificationDate, project, area, heading, cachedTags FROM TMTask WHERE type = 0 AND status = 0 AND project IS NULL AND trashed = 0 ORDER BY creationDate DESC"
.to_string()
};
let rows = sqlx::query(&query)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to fetch inbox tasks: {e}")))?;
let tasks = rows
.iter()
.map(map_task_row)
.collect::<ThingsResult<Vec<Task>>>()?;
Ok(tasks)
}
#[instrument(skip(self))]
pub async fn get_today(&self, limit: Option<usize>) -> ThingsResult<Vec<Task>> {
let query = if let Some(limit) = limit {
format!(
"SELECT uuid, title, type, status, notes, startDate, deadline, stopDate, creationDate, userModificationDate, project, area, heading, cachedTags FROM TMTask WHERE status = 0 AND todayIndex IS NOT NULL AND todayIndex != 0 AND trashed = 0 ORDER BY todayIndex ASC LIMIT {limit}"
)
} else {
"SELECT uuid, title, type, status, notes, startDate, deadline, stopDate, creationDate, userModificationDate, project, area, heading, cachedTags FROM TMTask WHERE status = 0 AND todayIndex IS NOT NULL AND todayIndex != 0 AND trashed = 0 ORDER BY todayIndex ASC".to_string()
};
let rows = sqlx::query(&query)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to fetch today's tasks: {e}")))?;
let tasks = rows
.iter()
.map(map_task_row)
.collect::<ThingsResult<Vec<Task>>>()?;
Ok(tasks)
}
#[instrument(skip(self))]
pub async fn get_projects(&self, limit: Option<usize>) -> ThingsResult<Vec<Project>> {
let _ = limit; self.get_all_projects().await
}
#[instrument(skip(self))]
pub async fn get_areas(&self) -> ThingsResult<Vec<Area>> {
self.get_all_areas().await
}
#[instrument(skip(self))]
pub async fn create_task(&self, request: CreateTaskRequest) -> ThingsResult<Uuid> {
crate::database::validate_date_range(request.start_date, request.deadline)?;
let uuid = Uuid::new_v4();
let uuid_str = uuid.to_string();
if let Some(project_uuid) = &request.project_uuid {
validators::validate_project_exists(&self.pool, project_uuid).await?;
}
if let Some(area_uuid) = &request.area_uuid {
validators::validate_area_exists(&self.pool, area_uuid).await?;
}
if let Some(parent_uuid) = &request.parent_uuid {
validators::validate_task_exists(&self.pool, parent_uuid).await?;
}
let start_date_ts = request.start_date.map(naive_date_to_things_timestamp);
let deadline_ts = request.deadline.map(naive_date_to_things_timestamp);
let now = Utc::now().timestamp() as f64;
let cached_tags = request
.tags
.as_ref()
.map(|tags| serialize_tags_to_blob(tags))
.transpose()?;
sqlx::query(
r"
INSERT INTO TMTask (
uuid, title, type, status, notes,
startDate, deadline, project, area, heading,
cachedTags, creationDate, userModificationDate,
trashed
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
",
)
.bind(&uuid_str)
.bind(&request.title)
.bind(request.task_type.unwrap_or(TaskType::Todo) as i32)
.bind(request.status.unwrap_or(TaskStatus::Incomplete) as i32)
.bind(request.notes.as_ref())
.bind(start_date_ts)
.bind(deadline_ts)
.bind(request.project_uuid.map(|u| u.to_string()))
.bind(request.area_uuid.map(|u| u.to_string()))
.bind(request.parent_uuid.map(|u| u.to_string()))
.bind(cached_tags)
.bind(now)
.bind(now)
.bind(0) .execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to create task: {e}")))?;
info!("Created task with UUID: {}", uuid);
Ok(uuid)
}
#[instrument(skip(self))]
pub async fn create_project(
&self,
request: crate::models::CreateProjectRequest,
) -> ThingsResult<Uuid> {
crate::database::validate_date_range(request.start_date, request.deadline)?;
let uuid = Uuid::new_v4();
let uuid_str = uuid.to_string();
if let Some(area_uuid) = &request.area_uuid {
validators::validate_area_exists(&self.pool, area_uuid).await?;
}
let start_date_ts = request.start_date.map(naive_date_to_things_timestamp);
let deadline_ts = request.deadline.map(naive_date_to_things_timestamp);
let now = Utc::now().timestamp() as f64;
let cached_tags = request
.tags
.as_ref()
.map(|tags| serialize_tags_to_blob(tags))
.transpose()?;
sqlx::query(
r"
INSERT INTO TMTask (
uuid, title, type, status, notes,
startDate, deadline, project, area, heading,
cachedTags, creationDate, userModificationDate,
trashed
) VALUES (?, ?, 1, 0, ?, ?, ?, NULL, ?, NULL, ?, ?, ?, 0)
",
)
.bind(&uuid_str)
.bind(&request.title)
.bind(request.notes.as_ref())
.bind(start_date_ts)
.bind(deadline_ts)
.bind(request.area_uuid.map(|u| u.to_string()))
.bind(cached_tags)
.bind(now)
.bind(now)
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to create project: {e}")))?;
info!("Created project with UUID: {}", uuid);
Ok(uuid)
}
#[instrument(skip(self))]
pub async fn update_task(&self, request: UpdateTaskRequest) -> ThingsResult<()> {
validators::validate_task_exists(&self.pool, &request.uuid).await?;
if request.start_date.is_some() || request.deadline.is_some() {
if let Some(current_task) = self.get_task_by_uuid(&request.uuid).await? {
let final_start = request.start_date.or(current_task.start_date);
let final_deadline = request.deadline.or(current_task.deadline);
crate::database::validate_date_range(final_start, final_deadline)?;
}
}
if let Some(project_uuid) = &request.project_uuid {
validators::validate_project_exists(&self.pool, project_uuid).await?;
}
if let Some(area_uuid) = &request.area_uuid {
validators::validate_area_exists(&self.pool, area_uuid).await?;
}
let builder = TaskUpdateBuilder::from_request(&request);
if builder.is_empty() {
return Ok(());
}
let query_string = builder.build_query_string();
let mut q = sqlx::query(&query_string);
if let Some(title) = &request.title {
q = q.bind(title);
}
if let Some(notes) = &request.notes {
q = q.bind(notes);
}
if let Some(start_date) = request.start_date {
q = q.bind(naive_date_to_things_timestamp(start_date));
}
if let Some(deadline) = request.deadline {
q = q.bind(naive_date_to_things_timestamp(deadline));
}
if let Some(status) = request.status {
q = q.bind(status as i32);
}
if let Some(project_uuid) = request.project_uuid {
q = q.bind(project_uuid.to_string());
}
if let Some(area_uuid) = request.area_uuid {
q = q.bind(area_uuid.to_string());
}
if let Some(tags) = &request.tags {
let cached_tags = serialize_tags_to_blob(tags)?;
q = q.bind(cached_tags);
}
let now = Utc::now().timestamp() as f64;
q = q.bind(now).bind(request.uuid.to_string());
q.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update task: {e}")))?;
info!("Updated task with UUID: {}", request.uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn get_project_by_uuid(&self, uuid: &Uuid) -> ThingsResult<Option<Project>> {
let row = sqlx::query(
r"
SELECT
uuid, title, status,
area, notes,
creationDate, userModificationDate,
startDate, deadline,
trashed, type
FROM TMTask
WHERE uuid = ? AND type = 1
",
)
.bind(uuid.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to fetch project: {e}")))?;
if let Some(row) = row {
let trashed: i64 = row.get("trashed");
if trashed == 1 {
return Ok(None); }
let project = Project {
uuid: things_uuid_to_uuid(&row.get::<String, _>("uuid")),
title: row.get("title"),
status: TaskStatus::from_i32(row.get("status")).unwrap_or(TaskStatus::Incomplete),
area_uuid: row
.get::<Option<String>, _>("area")
.map(|s| things_uuid_to_uuid(&s)),
notes: row.get("notes"),
deadline: row
.get::<Option<i64>, _>("deadline")
.and_then(|ts| DateTime::from_timestamp(ts, 0))
.map(|dt| dt.date_naive()),
start_date: row
.get::<Option<i64>, _>("startDate")
.and_then(|ts| DateTime::from_timestamp(ts, 0))
.map(|dt| dt.date_naive()),
tags: Vec::new(), tasks: Vec::new(), created: {
let ts_f64 = row.get::<f64, _>("creationDate");
let ts = safe_timestamp_convert(ts_f64);
DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
},
modified: {
let ts_f64 = row.get::<f64, _>("userModificationDate");
let ts = safe_timestamp_convert(ts_f64);
DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
},
};
Ok(Some(project))
} else {
Ok(None)
}
}
#[instrument(skip(self))]
pub async fn update_project(
&self,
request: crate::models::UpdateProjectRequest,
) -> ThingsResult<()> {
validators::validate_project_exists(&self.pool, &request.uuid).await?;
if request.start_date.is_some() || request.deadline.is_some() {
if let Some(current_project) = self.get_project_by_uuid(&request.uuid).await? {
let final_start = request.start_date.or(current_project.start_date);
let final_deadline = request.deadline.or(current_project.deadline);
crate::database::validate_date_range(final_start, final_deadline)?;
}
}
if let Some(area_uuid) = &request.area_uuid {
validators::validate_area_exists(&self.pool, area_uuid).await?;
}
let mut builder = TaskUpdateBuilder::new();
if request.title.is_some() {
builder = builder.add_field("title");
}
if request.notes.is_some() {
builder = builder.add_field("notes");
}
if request.start_date.is_some() {
builder = builder.add_field("startDate");
}
if request.deadline.is_some() {
builder = builder.add_field("deadline");
}
if request.area_uuid.is_some() {
builder = builder.add_field("area");
}
if request.tags.is_some() {
builder = builder.add_field("cachedTags");
}
if builder.is_empty() {
return Ok(());
}
let query_str = builder.build_query_string();
let mut q = sqlx::query(&query_str);
if let Some(ref title) = request.title {
q = q.bind(title);
}
if let Some(ref notes) = request.notes {
q = q.bind(notes);
}
if let Some(start_date) = request.start_date {
q = q.bind(naive_date_to_things_timestamp(start_date));
}
if let Some(deadline) = request.deadline {
q = q.bind(naive_date_to_things_timestamp(deadline));
}
if let Some(area_uuid) = request.area_uuid {
q = q.bind(area_uuid.to_string());
}
if let Some(tags) = &request.tags {
let cached_tags = serialize_tags_to_blob(tags)?;
q = q.bind(cached_tags);
}
let now = Utc::now().timestamp() as f64;
q = q.bind(now).bind(request.uuid.to_string());
q.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update project: {e}")))?;
info!("Updated project with UUID: {}", request.uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn get_task_by_uuid(&self, uuid: &Uuid) -> ThingsResult<Option<Task>> {
let row = sqlx::query(
r"
SELECT
uuid, title, status, type,
startDate, deadline, stopDate,
project, area, heading,
notes, cachedTags,
creationDate, userModificationDate,
trashed
FROM TMTask
WHERE uuid = ?
",
)
.bind(uuid.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to fetch task: {e}")))?;
if let Some(row) = row {
let trashed: i64 = row.get("trashed");
if trashed == 1 {
return Ok(None); }
let task = map_task_row(&row)?;
Ok(Some(task))
} else {
Ok(None)
}
}
#[instrument(skip(self))]
pub async fn complete_task(&self, uuid: &Uuid) -> ThingsResult<()> {
validators::validate_task_exists(&self.pool, uuid).await?;
let now = Utc::now().timestamp() as f64;
sqlx::query(
"UPDATE TMTask SET status = 1, stopDate = ?, userModificationDate = ? WHERE uuid = ?",
)
.bind(now)
.bind(now)
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to complete task: {e}")))?;
info!("Completed task with UUID: {}", uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn uncomplete_task(&self, uuid: &Uuid) -> ThingsResult<()> {
validators::validate_task_exists(&self.pool, uuid).await?;
let now = Utc::now().timestamp() as f64;
sqlx::query(
"UPDATE TMTask SET status = 0, stopDate = NULL, userModificationDate = ? WHERE uuid = ?",
)
.bind(now)
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to uncomplete task: {e}")))?;
info!("Uncompleted task with UUID: {}", uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn complete_project(
&self,
uuid: &Uuid,
child_handling: crate::models::ProjectChildHandling,
) -> ThingsResult<()> {
validators::validate_project_exists(&self.pool, uuid).await?;
let now = Utc::now().timestamp() as f64;
match child_handling {
crate::models::ProjectChildHandling::Error => {
let child_count: i64 = sqlx::query_scalar(
"SELECT COUNT(*) FROM TMTask WHERE project = ? AND trashed = 0",
)
.bind(uuid.to_string())
.fetch_one(&self.pool)
.await
.map_err(|e| {
ThingsError::unknown(format!("Failed to check for child tasks: {e}"))
})?;
if child_count > 0 {
return Err(ThingsError::unknown(format!(
"Project {} has {} child task(s). Use cascade or orphan mode to complete.",
uuid, child_count
)));
}
}
crate::models::ProjectChildHandling::Cascade => {
sqlx::query(
"UPDATE TMTask SET status = 1, stopDate = ?, userModificationDate = ? WHERE project = ? AND trashed = 0",
)
.bind(now)
.bind(now)
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to complete child tasks: {e}")))?;
}
crate::models::ProjectChildHandling::Orphan => {
sqlx::query(
"UPDATE TMTask SET project = NULL, userModificationDate = ? WHERE project = ? AND trashed = 0",
)
.bind(now)
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to orphan child tasks: {e}")))?;
}
}
sqlx::query(
"UPDATE TMTask SET status = 1, stopDate = ?, userModificationDate = ? WHERE uuid = ?",
)
.bind(now)
.bind(now)
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to complete project: {e}")))?;
info!("Completed project with UUID: {}", uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn delete_task(
&self,
uuid: &Uuid,
child_handling: DeleteChildHandling,
) -> ThingsResult<()> {
validators::validate_task_exists(&self.pool, uuid).await?;
let children = sqlx::query("SELECT uuid FROM TMTask WHERE heading = ? AND trashed = 0")
.bind(uuid.to_string())
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to query child tasks: {e}")))?;
let has_children = !children.is_empty();
if has_children {
match child_handling {
DeleteChildHandling::Error => {
return Err(ThingsError::unknown(format!(
"Task {} has {} child task(s). Use cascade or orphan mode to delete.",
uuid,
children.len()
)));
}
DeleteChildHandling::Cascade => {
let now = Utc::now().timestamp() as f64;
for child_row in &children {
let child_uuid: String = child_row.get("uuid");
sqlx::query(
"UPDATE TMTask SET trashed = 1, userModificationDate = ? WHERE uuid = ?",
)
.bind(now)
.bind(&child_uuid)
.execute(&self.pool)
.await
.map_err(|e| {
ThingsError::unknown(format!("Failed to delete child task: {e}"))
})?;
}
info!("Cascade deleted {} child task(s)", children.len());
}
DeleteChildHandling::Orphan => {
let now = Utc::now().timestamp() as f64;
for child_row in &children {
let child_uuid: String = child_row.get("uuid");
sqlx::query(
"UPDATE TMTask SET heading = NULL, userModificationDate = ? WHERE uuid = ?",
)
.bind(now)
.bind(&child_uuid)
.execute(&self.pool)
.await
.map_err(|e| {
ThingsError::unknown(format!("Failed to orphan child task: {e}"))
})?;
}
info!("Orphaned {} child task(s)", children.len());
}
}
}
let now = Utc::now().timestamp() as f64;
sqlx::query("UPDATE TMTask SET trashed = 1, userModificationDate = ? WHERE uuid = ?")
.bind(now)
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to delete task: {e}")))?;
info!("Deleted task with UUID: {}", uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn delete_project(
&self,
uuid: &Uuid,
child_handling: crate::models::ProjectChildHandling,
) -> ThingsResult<()> {
validators::validate_project_exists(&self.pool, uuid).await?;
let now = Utc::now().timestamp() as f64;
match child_handling {
crate::models::ProjectChildHandling::Error => {
let child_count: i64 = sqlx::query_scalar(
"SELECT COUNT(*) FROM TMTask WHERE project = ? AND trashed = 0",
)
.bind(uuid.to_string())
.fetch_one(&self.pool)
.await
.map_err(|e| {
ThingsError::unknown(format!("Failed to check for child tasks: {e}"))
})?;
if child_count > 0 {
return Err(ThingsError::unknown(format!(
"Project {} has {} child task(s). Use cascade or orphan mode to delete.",
uuid, child_count
)));
}
}
crate::models::ProjectChildHandling::Cascade => {
sqlx::query(
"UPDATE TMTask SET trashed = 1, userModificationDate = ? WHERE project = ? AND trashed = 0",
)
.bind(now)
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to delete child tasks: {e}")))?;
}
crate::models::ProjectChildHandling::Orphan => {
sqlx::query(
"UPDATE TMTask SET project = NULL, userModificationDate = ? WHERE project = ? AND trashed = 0",
)
.bind(now)
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to orphan child tasks: {e}")))?;
}
}
sqlx::query("UPDATE TMTask SET trashed = 1, userModificationDate = ? WHERE uuid = ?")
.bind(now)
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to delete project: {e}")))?;
info!("Deleted project with UUID: {}", uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn create_area(
&self,
request: crate::models::CreateAreaRequest,
) -> ThingsResult<Uuid> {
let uuid = Uuid::new_v4();
let uuid_str = uuid.to_string();
let now = Utc::now().timestamp() as f64;
let max_index: Option<i64> = sqlx::query_scalar("SELECT MAX(`index`) FROM TMArea")
.fetch_one(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to get max area index: {e}")))?;
let next_index = max_index.unwrap_or(-1) + 1;
sqlx::query(
r"
INSERT INTO TMArea (
uuid, title, visible, `index`,
creationDate, userModificationDate
) VALUES (?, ?, 1, ?, ?, ?)
",
)
.bind(&uuid_str)
.bind(&request.title)
.bind(next_index)
.bind(now)
.bind(now)
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to create area: {e}")))?;
info!("Created area with UUID: {}", uuid);
Ok(uuid)
}
#[instrument(skip(self))]
pub async fn update_area(&self, request: crate::models::UpdateAreaRequest) -> ThingsResult<()> {
validators::validate_area_exists(&self.pool, &request.uuid).await?;
let now = Utc::now().timestamp() as f64;
sqlx::query("UPDATE TMArea SET title = ?, userModificationDate = ? WHERE uuid = ?")
.bind(&request.title)
.bind(now)
.bind(request.uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update area: {e}")))?;
info!("Updated area with UUID: {}", request.uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn delete_area(&self, uuid: &Uuid) -> ThingsResult<()> {
validators::validate_area_exists(&self.pool, uuid).await?;
let now = Utc::now().timestamp() as f64;
sqlx::query(
"UPDATE TMTask SET area = NULL, userModificationDate = ? WHERE area = ? AND type = 1 AND trashed = 0",
)
.bind(now)
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to orphan projects in area: {e}")))?;
sqlx::query("DELETE FROM TMArea WHERE uuid = ?")
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to delete area: {e}")))?;
info!("Deleted area with UUID: {}", uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn find_tag_by_normalized_title(
&self,
normalized: &str,
) -> ThingsResult<Option<crate::models::Tag>> {
let row = sqlx::query(
"SELECT uuid, title, shortcut, parent, creationDate, userModificationDate, usedDate
FROM TMTag
WHERE LOWER(title) = LOWER(?)",
)
.bind(normalized)
.fetch_optional(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to find tag by title: {e}")))?;
if let Some(row) = row {
let uuid_str: String = row.get("uuid");
let uuid =
Uuid::parse_str(&uuid_str).unwrap_or_else(|_| things_uuid_to_uuid(&uuid_str));
let title: String = row.get("title");
let shortcut: Option<String> = row.get("shortcut");
let parent_str: Option<String> = row.get("parent");
let parent_uuid =
parent_str.map(|s| Uuid::parse_str(&s).unwrap_or_else(|_| things_uuid_to_uuid(&s)));
let creation_ts: f64 = row.get("creationDate");
let created = {
let ts = safe_timestamp_convert(creation_ts);
DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
};
let modification_ts: f64 = row.get("userModificationDate");
let modified = {
let ts = safe_timestamp_convert(modification_ts);
DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
};
let used_ts: Option<f64> = row.get("usedDate");
let last_used = used_ts.and_then(|ts| {
let ts_i64 = safe_timestamp_convert(ts);
DateTime::from_timestamp(ts_i64, 0)
});
let usage_count: i64 = sqlx::query_scalar(
"SELECT COUNT(*) FROM TMTask
WHERE cachedTags IS NOT NULL
AND json_extract(cachedTags, '$') LIKE ?
AND trashed = 0",
)
.bind(format!("%\"{}\"%", title))
.fetch_one(&self.pool)
.await
.unwrap_or(0);
Ok(Some(crate::models::Tag {
uuid,
title,
shortcut,
parent_uuid,
created,
modified,
usage_count: usage_count as u32,
last_used,
}))
} else {
Ok(None)
}
}
#[instrument(skip(self))]
pub async fn find_similar_tags(
&self,
title: &str,
min_similarity: f32,
) -> ThingsResult<Vec<crate::models::TagMatch>> {
use crate::database::tag_utils::{calculate_similarity, get_match_type};
let all_tags = self.get_all_tags().await?;
let mut matches: Vec<crate::models::TagMatch> = all_tags
.into_iter()
.filter_map(|tag| {
let similarity = calculate_similarity(title, &tag.title);
if similarity >= min_similarity {
let match_type = get_match_type(title, &tag.title, min_similarity);
Some(crate::models::TagMatch {
tag,
similarity_score: similarity,
match_type,
})
} else {
None
}
})
.collect();
matches.sort_by(|a, b| {
b.similarity_score
.partial_cmp(&a.similarity_score)
.unwrap_or(std::cmp::Ordering::Equal)
});
Ok(matches)
}
#[instrument(skip(self))]
pub async fn search_tags(&self, query: &str) -> ThingsResult<Vec<crate::models::Tag>> {
let rows = sqlx::query(
"SELECT uuid, title, shortcut, parent, creationDate, userModificationDate, usedDate
FROM TMTag
WHERE title LIKE ?
ORDER BY title",
)
.bind(format!("%{}%", query))
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to search tags: {e}")))?;
let mut tags = Vec::new();
for row in rows {
let uuid_str: String = row.get("uuid");
let uuid =
Uuid::parse_str(&uuid_str).unwrap_or_else(|_| things_uuid_to_uuid(&uuid_str));
let title: String = row.get("title");
let shortcut: Option<String> = row.get("shortcut");
let parent_str: Option<String> = row.get("parent");
let parent_uuid =
parent_str.map(|s| Uuid::parse_str(&s).unwrap_or_else(|_| things_uuid_to_uuid(&s)));
let creation_ts: f64 = row.get("creationDate");
let created = {
let ts = safe_timestamp_convert(creation_ts);
DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
};
let modification_ts: f64 = row.get("userModificationDate");
let modified = {
let ts = safe_timestamp_convert(modification_ts);
DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
};
let used_ts: Option<f64> = row.get("usedDate");
let last_used = used_ts.and_then(|ts| {
let ts_i64 = safe_timestamp_convert(ts);
DateTime::from_timestamp(ts_i64, 0)
});
let usage_count: i64 = sqlx::query_scalar(
"SELECT COUNT(*) FROM TMTask
WHERE cachedTags IS NOT NULL
AND json_extract(cachedTags, '$') LIKE ?
AND trashed = 0",
)
.bind(format!("%\"{}\"%", title))
.fetch_one(&self.pool)
.await
.unwrap_or(0);
tags.push(crate::models::Tag {
uuid,
title,
shortcut,
parent_uuid,
created,
modified,
usage_count: usage_count as u32,
last_used,
});
}
Ok(tags)
}
#[instrument(skip(self))]
pub async fn get_all_tags(&self) -> ThingsResult<Vec<crate::models::Tag>> {
let rows = sqlx::query(
"SELECT uuid, title, shortcut, parent, creationDate, userModificationDate, usedDate
FROM TMTag
ORDER BY title",
)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to get all tags: {e}")))?;
let mut tags = Vec::new();
for row in rows {
let uuid_str: String = row.get("uuid");
let uuid =
Uuid::parse_str(&uuid_str).unwrap_or_else(|_| things_uuid_to_uuid(&uuid_str));
let title: String = row.get("title");
let shortcut: Option<String> = row.get("shortcut");
let parent_str: Option<String> = row.get("parent");
let parent_uuid =
parent_str.map(|s| Uuid::parse_str(&s).unwrap_or_else(|_| things_uuid_to_uuid(&s)));
let creation_ts: f64 = row.get("creationDate");
let created = {
let ts = safe_timestamp_convert(creation_ts);
DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
};
let modification_ts: f64 = row.get("userModificationDate");
let modified = {
let ts = safe_timestamp_convert(modification_ts);
DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
};
let used_ts: Option<f64> = row.get("usedDate");
let last_used = used_ts.and_then(|ts| {
let ts_i64 = safe_timestamp_convert(ts);
DateTime::from_timestamp(ts_i64, 0)
});
let usage_count: i64 = sqlx::query_scalar(
"SELECT COUNT(*) FROM TMTask
WHERE cachedTags IS NOT NULL
AND json_extract(cachedTags, '$') LIKE ?
AND trashed = 0",
)
.bind(format!("%\"{}\"%", title))
.fetch_one(&self.pool)
.await
.unwrap_or(0);
tags.push(crate::models::Tag {
uuid,
title,
shortcut,
parent_uuid,
created,
modified,
usage_count: usage_count as u32,
last_used,
});
}
Ok(tags)
}
#[instrument(skip(self))]
pub async fn get_popular_tags(&self, limit: usize) -> ThingsResult<Vec<crate::models::Tag>> {
let mut all_tags = self.get_all_tags().await?;
all_tags.sort_by_key(|t| std::cmp::Reverse(t.usage_count));
all_tags.truncate(limit);
Ok(all_tags)
}
#[instrument(skip(self))]
pub async fn get_recent_tags(&self, limit: usize) -> ThingsResult<Vec<crate::models::Tag>> {
let rows = sqlx::query(
"SELECT uuid, title, shortcut, parent, creationDate, userModificationDate, usedDate
FROM TMTag
WHERE usedDate IS NOT NULL
ORDER BY usedDate DESC
LIMIT ?",
)
.bind(limit as i64)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to get recent tags: {e}")))?;
let mut tags = Vec::new();
for row in rows {
let uuid_str: String = row.get("uuid");
let uuid =
Uuid::parse_str(&uuid_str).unwrap_or_else(|_| things_uuid_to_uuid(&uuid_str));
let title: String = row.get("title");
let shortcut: Option<String> = row.get("shortcut");
let parent_str: Option<String> = row.get("parent");
let parent_uuid =
parent_str.map(|s| Uuid::parse_str(&s).unwrap_or_else(|_| things_uuid_to_uuid(&s)));
let creation_ts: f64 = row.get("creationDate");
let created = {
let ts = safe_timestamp_convert(creation_ts);
DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
};
let modification_ts: f64 = row.get("userModificationDate");
let modified = {
let ts = safe_timestamp_convert(modification_ts);
DateTime::from_timestamp(ts, 0).unwrap_or_else(Utc::now)
};
let used_ts: Option<f64> = row.get("usedDate");
let last_used = used_ts.and_then(|ts| {
let ts_i64 = safe_timestamp_convert(ts);
DateTime::from_timestamp(ts_i64, 0)
});
let usage_count: i64 = sqlx::query_scalar(
"SELECT COUNT(*) FROM TMTask
WHERE cachedTags IS NOT NULL
AND json_extract(cachedTags, '$') LIKE ?
AND trashed = 0",
)
.bind(format!("%\"{}\"%", title))
.fetch_one(&self.pool)
.await
.unwrap_or(0);
tags.push(crate::models::Tag {
uuid,
title,
shortcut,
parent_uuid,
created,
modified,
usage_count: usage_count as u32,
last_used,
});
}
Ok(tags)
}
#[instrument(skip(self))]
pub async fn create_tag_smart(
&self,
request: crate::models::CreateTagRequest,
) -> ThingsResult<crate::models::TagCreationResult> {
use crate::database::tag_utils::normalize_tag_title;
use crate::models::TagCreationResult;
let normalized = normalize_tag_title(&request.title);
if let Some(existing) = self.find_tag_by_normalized_title(&normalized).await? {
return Ok(TagCreationResult::Existing {
tag: existing,
is_new: false,
});
}
let similar_tags = self.find_similar_tags(&normalized, 0.8).await?;
if !similar_tags.is_empty() {
return Ok(TagCreationResult::SimilarFound {
similar_tags,
requested_title: request.title,
});
}
let uuid = Uuid::new_v4();
let now = Utc::now().timestamp() as f64;
sqlx::query(
"INSERT INTO TMTag (uuid, title, shortcut, parent, creationDate, userModificationDate, usedDate, `index`)
VALUES (?, ?, ?, ?, ?, ?, NULL, 0)"
)
.bind(uuid.to_string())
.bind(&request.title)
.bind(request.shortcut.as_ref())
.bind(request.parent_uuid.map(|u| u.to_string()))
.bind(now)
.bind(now)
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to create tag: {e}")))?;
info!("Created tag with UUID: {}", uuid);
Ok(TagCreationResult::Created { uuid, is_new: true })
}
#[instrument(skip(self))]
pub async fn create_tag_force(
&self,
request: crate::models::CreateTagRequest,
) -> ThingsResult<Uuid> {
let uuid = Uuid::new_v4();
let now = Utc::now().timestamp() as f64;
sqlx::query(
"INSERT INTO TMTag (uuid, title, shortcut, parent, creationDate, userModificationDate, usedDate, `index`)
VALUES (?, ?, ?, ?, ?, ?, NULL, 0)"
)
.bind(uuid.to_string())
.bind(&request.title)
.bind(request.shortcut.as_ref())
.bind(request.parent_uuid.map(|u| u.to_string()))
.bind(now)
.bind(now)
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to create tag: {e}")))?;
info!("Forcefully created tag with UUID: {}", uuid);
Ok(uuid)
}
#[instrument(skip(self))]
pub async fn update_tag(&self, request: crate::models::UpdateTagRequest) -> ThingsResult<()> {
use crate::database::tag_utils::normalize_tag_title;
let existing = self
.find_tag_by_normalized_title(&request.uuid.to_string())
.await?;
if existing.is_none() {
let row = sqlx::query("SELECT 1 FROM TMTag WHERE uuid = ?")
.bind(request.uuid.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to validate tag: {e}")))?;
if row.is_none() {
return Err(ThingsError::unknown(format!(
"Tag not found: {}",
request.uuid
)));
}
}
if let Some(new_title) = &request.title {
let normalized = normalize_tag_title(new_title);
if let Some(duplicate) = self.find_tag_by_normalized_title(&normalized).await? {
if duplicate.uuid != request.uuid {
return Err(ThingsError::unknown(format!(
"Tag with title '{}' already exists",
new_title
)));
}
}
}
let now = Utc::now().timestamp() as f64;
let mut updates = Vec::new();
let mut params: Vec<String> = Vec::new();
if let Some(title) = &request.title {
updates.push("title = ?");
params.push(title.clone());
}
if let Some(shortcut) = &request.shortcut {
updates.push("shortcut = ?");
params.push(shortcut.clone());
}
if let Some(parent_uuid) = request.parent_uuid {
updates.push("parent = ?");
params.push(parent_uuid.to_string());
}
if updates.is_empty() {
return Ok(()); }
updates.push("userModificationDate = ?");
params.push(now.to_string());
let sql = format!("UPDATE TMTag SET {} WHERE uuid = ?", updates.join(", "));
params.push(request.uuid.to_string());
let mut query = sqlx::query(&sql);
for param in params {
query = query.bind(param);
}
query
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update tag: {e}")))?;
info!("Updated tag with UUID: {}", request.uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn delete_tag(&self, uuid: &Uuid, remove_from_tasks: bool) -> ThingsResult<()> {
let tag = self.find_tag_by_normalized_title(&uuid.to_string()).await?;
if tag.is_none() {
let row = sqlx::query("SELECT title FROM TMTag WHERE uuid = ?")
.bind(uuid.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to find tag: {e}")))?;
if row.is_none() {
return Err(ThingsError::unknown(format!("Tag not found: {}", uuid)));
}
}
if remove_from_tasks {
info!("Removing tag {} from all tasks (not yet implemented)", uuid);
}
sqlx::query("DELETE FROM TMTag WHERE uuid = ?")
.bind(uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to delete tag: {e}")))?;
info!("Deleted tag with UUID: {}", uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn merge_tags(&self, source_uuid: &Uuid, target_uuid: &Uuid) -> ThingsResult<()> {
let source_row = sqlx::query("SELECT title FROM TMTag WHERE uuid = ?")
.bind(source_uuid.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to find source tag: {e}")))?;
if source_row.is_none() {
return Err(ThingsError::unknown(format!(
"Source tag not found: {}",
source_uuid
)));
}
let target_row = sqlx::query("SELECT title FROM TMTag WHERE uuid = ?")
.bind(target_uuid.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to find target tag: {e}")))?;
if target_row.is_none() {
return Err(ThingsError::unknown(format!(
"Target tag not found: {}",
target_uuid
)));
}
info!(
"Merging tag {} into {} (tag replacement in tasks not yet fully implemented)",
source_uuid, target_uuid
);
let now = Utc::now().timestamp() as f64;
sqlx::query("UPDATE TMTag SET userModificationDate = ?, usedDate = ? WHERE uuid = ?")
.bind(now)
.bind(now)
.bind(target_uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update target tag: {e}")))?;
sqlx::query("DELETE FROM TMTag WHERE uuid = ?")
.bind(source_uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to delete source tag: {e}")))?;
info!("Merged tag {} into {}", source_uuid, target_uuid);
Ok(())
}
#[instrument(skip(self))]
pub async fn add_tag_to_task(
&self,
task_uuid: &Uuid,
tag_title: &str,
) -> ThingsResult<crate::models::TagAssignmentResult> {
use crate::database::tag_utils::normalize_tag_title;
use crate::models::TagAssignmentResult;
validators::validate_task_exists(&self.pool, task_uuid).await?;
let normalized = normalize_tag_title(tag_title);
let tag = if let Some(existing_tag) = self.find_tag_by_normalized_title(&normalized).await?
{
existing_tag
} else {
let similar_tags = self.find_similar_tags(&normalized, 0.8).await?;
if !similar_tags.is_empty() {
return Ok(TagAssignmentResult::Suggestions { similar_tags });
}
let request = crate::models::CreateTagRequest {
title: tag_title.to_string(),
shortcut: None,
parent_uuid: None,
};
let _uuid = self.create_tag_force(request).await?;
self.find_tag_by_normalized_title(&normalized)
.await?
.ok_or_else(|| ThingsError::unknown("Failed to retrieve newly created tag"))?
};
let row = sqlx::query("SELECT cachedTags FROM TMTask WHERE uuid = ?")
.bind(task_uuid.to_string())
.fetch_one(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to fetch task tags: {e}")))?;
let cached_tags_blob: Option<Vec<u8>> = row.get("cachedTags");
let mut tags: Vec<String> = if let Some(blob) = cached_tags_blob {
deserialize_tags_from_blob(&blob)?
} else {
Vec::new()
};
if !tags.contains(&tag.title) {
tags.push(tag.title.clone());
let cached_tags = serialize_tags_to_blob(&tags)?;
let now = Utc::now().timestamp() as f64;
sqlx::query(
"UPDATE TMTask SET cachedTags = ?, userModificationDate = ? WHERE uuid = ?",
)
.bind(cached_tags)
.bind(now)
.bind(task_uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update task tags: {e}")))?;
sqlx::query("UPDATE TMTag SET usedDate = ?, userModificationDate = ? WHERE uuid = ?")
.bind(now)
.bind(now)
.bind(tag.uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update tag usedDate: {e}")))?;
info!("Added tag '{}' to task {}", tag.title, task_uuid);
}
Ok(TagAssignmentResult::Assigned { tag_uuid: tag.uuid })
}
#[instrument(skip(self))]
pub async fn remove_tag_from_task(
&self,
task_uuid: &Uuid,
tag_title: &str,
) -> ThingsResult<()> {
use crate::database::tag_utils::normalize_tag_title;
validators::validate_task_exists(&self.pool, task_uuid).await?;
let row = sqlx::query("SELECT cachedTags FROM TMTask WHERE uuid = ?")
.bind(task_uuid.to_string())
.fetch_one(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to fetch task tags: {e}")))?;
let cached_tags_blob: Option<Vec<u8>> = row.get("cachedTags");
let mut tags: Vec<String> = if let Some(blob) = cached_tags_blob {
deserialize_tags_from_blob(&blob)?
} else {
return Ok(()); };
let normalized = normalize_tag_title(tag_title);
let original_len = tags.len();
tags.retain(|t| normalize_tag_title(t) != normalized);
if tags.len() < original_len {
let cached_tags = if tags.is_empty() {
None
} else {
Some(serialize_tags_to_blob(&tags)?)
};
let now = Utc::now().timestamp() as f64;
if let Some(cached_tags_val) = cached_tags {
sqlx::query(
"UPDATE TMTask SET cachedTags = ?, userModificationDate = ? WHERE uuid = ?",
)
.bind(cached_tags_val)
.bind(now)
.bind(task_uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update task tags: {e}")))?;
} else {
sqlx::query(
"UPDATE TMTask SET cachedTags = NULL, userModificationDate = ? WHERE uuid = ?",
)
.bind(now)
.bind(task_uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update task tags: {e}")))?;
}
info!("Removed tag '{}' from task {}", tag_title, task_uuid);
}
Ok(())
}
#[instrument(skip(self))]
pub async fn set_task_tags(
&self,
task_uuid: &Uuid,
tag_titles: Vec<String>,
) -> ThingsResult<Vec<crate::models::TagMatch>> {
use crate::database::tag_utils::normalize_tag_title;
validators::validate_task_exists(&self.pool, task_uuid).await?;
let mut resolved_tags = Vec::new();
let mut suggestions = Vec::new();
for title in tag_titles {
let normalized = normalize_tag_title(&title);
if let Some(existing_tag) = self.find_tag_by_normalized_title(&normalized).await? {
resolved_tags.push(existing_tag.title);
} else {
let similar_tags = self.find_similar_tags(&normalized, 0.8).await?;
if !similar_tags.is_empty() {
suggestions.extend(similar_tags);
}
resolved_tags.push(title);
}
}
for title in &resolved_tags {
let normalized = normalize_tag_title(title);
if self
.find_tag_by_normalized_title(&normalized)
.await?
.is_none()
{
let request = crate::models::CreateTagRequest {
title: title.clone(),
shortcut: None,
parent_uuid: None,
};
self.create_tag_force(request).await?;
}
}
let cached_tags = if resolved_tags.is_empty() {
None
} else {
Some(serialize_tags_to_blob(&resolved_tags)?)
};
let now = Utc::now().timestamp() as f64;
if let Some(cached_tags_val) = cached_tags {
sqlx::query(
"UPDATE TMTask SET cachedTags = ?, userModificationDate = ? WHERE uuid = ?",
)
.bind(cached_tags_val)
.bind(now)
.bind(task_uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update task tags: {e}")))?;
} else {
sqlx::query(
"UPDATE TMTask SET cachedTags = NULL, userModificationDate = ? WHERE uuid = ?",
)
.bind(now)
.bind(task_uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update task tags: {e}")))?;
}
for title in &resolved_tags {
let normalized = normalize_tag_title(title);
if let Some(tag) = self.find_tag_by_normalized_title(&normalized).await? {
sqlx::query(
"UPDATE TMTag SET usedDate = ?, userModificationDate = ? WHERE uuid = ?",
)
.bind(now)
.bind(now)
.bind(tag.uuid.to_string())
.execute(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to update tag usedDate: {e}")))?;
}
}
info!("Set tags on task {} to: {:?}", task_uuid, resolved_tags);
Ok(suggestions)
}
#[instrument(skip(self))]
pub async fn get_tag_completions(
&self,
partial_input: &str,
limit: usize,
) -> ThingsResult<Vec<crate::models::TagCompletion>> {
use crate::database::tag_utils::{calculate_similarity, normalize_tag_title};
let normalized_input = normalize_tag_title(partial_input);
let all_tags = self.get_all_tags().await?;
let mut completions: Vec<crate::models::TagCompletion> = all_tags
.into_iter()
.filter_map(|tag| {
let normalized_tag = normalize_tag_title(&tag.title);
let score = if normalized_tag.starts_with(&normalized_input) {
3.0 + (tag.usage_count as f32 / 100.0)
} else if normalized_tag.contains(&normalized_input) {
2.0 + (tag.usage_count as f32 / 100.0)
} else {
let similarity = calculate_similarity(partial_input, &tag.title);
if similarity >= 0.6 {
similarity + (tag.usage_count as f32 / 1000.0)
} else {
return None; }
};
Some(crate::models::TagCompletion { tag, score })
})
.collect();
completions.sort_by(|a, b| {
b.score
.partial_cmp(&a.score)
.unwrap_or(std::cmp::Ordering::Equal)
});
completions.truncate(limit);
Ok(completions)
}
#[instrument(skip(self))]
pub async fn get_tag_statistics(
&self,
uuid: &Uuid,
) -> ThingsResult<crate::models::TagStatistics> {
let tag_row = sqlx::query("SELECT title FROM TMTag WHERE uuid = ?")
.bind(uuid.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to find tag: {e}")))?;
let title: String = tag_row
.ok_or_else(|| ThingsError::unknown(format!("Tag not found: {}", uuid)))?
.get("title");
let task_rows = sqlx::query(
"SELECT uuid, cachedTags FROM TMTask
WHERE cachedTags IS NOT NULL
AND trashed = 0",
)
.fetch_all(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to query tasks with tag: {e}")))?;
let mut task_uuids = Vec::new();
for row in task_rows {
let uuid_str: String = row.get("uuid");
let cached_tags_blob: Option<Vec<u8>> = row.get("cachedTags");
if let Some(blob) = cached_tags_blob {
if let Ok(tags) = deserialize_tags_from_blob(&blob) {
if tags.iter().any(|t| t.eq_ignore_ascii_case(&title)) {
let task_uuid = Uuid::parse_str(&uuid_str)
.unwrap_or_else(|_| things_uuid_to_uuid(&uuid_str));
task_uuids.push(task_uuid);
}
}
}
}
let usage_count = task_uuids.len() as u32;
let mut related_tags: std::collections::HashMap<String, u32> =
std::collections::HashMap::new();
for task_uuid in &task_uuids {
let row = sqlx::query("SELECT cachedTags FROM TMTask WHERE uuid = ?")
.bind(task_uuid.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to fetch task tags: {e}")))?;
if let Some(row) = row {
let cached_tags_blob: Option<Vec<u8>> = row.get("cachedTags");
if let Some(blob) = cached_tags_blob {
let tags: Vec<String> = deserialize_tags_from_blob(&blob)?;
for tag in tags {
if tag != title {
*related_tags.entry(tag).or_insert(0) += 1;
}
}
}
}
}
let mut related_vec: Vec<(String, u32)> = related_tags.into_iter().collect();
related_vec.sort_by_key(|r| std::cmp::Reverse(r.1));
Ok(crate::models::TagStatistics {
uuid: *uuid,
title,
usage_count,
task_uuids,
related_tags: related_vec,
})
}
#[instrument(skip(self))]
pub async fn find_duplicate_tags(
&self,
min_similarity: f32,
) -> ThingsResult<Vec<crate::models::TagPair>> {
use crate::database::tag_utils::calculate_similarity;
let all_tags = self.get_all_tags().await?;
let mut pairs = Vec::new();
for i in 0..all_tags.len() {
for j in (i + 1)..all_tags.len() {
let tag1 = &all_tags[i];
let tag2 = &all_tags[j];
let similarity = calculate_similarity(&tag1.title, &tag2.title);
if similarity >= min_similarity {
pairs.push(crate::models::TagPair {
tag1: tag1.clone(),
tag2: tag2.clone(),
similarity,
});
}
}
}
pairs.sort_by(|a, b| {
b.similarity
.partial_cmp(&a.similarity)
.unwrap_or(std::cmp::Ordering::Equal)
});
Ok(pairs)
}
const MAX_BULK_BATCH_SIZE: usize = 1000;
#[instrument(skip(self))]
pub async fn bulk_move(
&self,
request: crate::models::BulkMoveRequest,
) -> ThingsResult<crate::models::BulkOperationResult> {
if request.task_uuids.is_empty() {
return Err(ThingsError::validation("Task UUIDs cannot be empty"));
}
if request.task_uuids.len() > Self::MAX_BULK_BATCH_SIZE {
return Err(ThingsError::validation(format!(
"Batch size {} exceeds maximum of {}",
request.task_uuids.len(),
Self::MAX_BULK_BATCH_SIZE
)));
}
if request.project_uuid.is_none() && request.area_uuid.is_none() {
return Err(ThingsError::validation(
"Must specify either project_uuid or area_uuid",
));
}
if let Some(project_uuid) = &request.project_uuid {
validators::validate_project_exists(&self.pool, project_uuid).await?;
}
if let Some(area_uuid) = &request.area_uuid {
validators::validate_area_exists(&self.pool, area_uuid).await?;
}
let mut tx = self
.pool
.begin()
.await
.map_err(|e| ThingsError::unknown(format!("Failed to begin transaction: {e}")))?;
let placeholders = request
.task_uuids
.iter()
.map(|_| "?")
.collect::<Vec<_>>()
.join(",");
let query_str = format!(
"SELECT uuid FROM TMTask WHERE uuid IN ({}) AND trashed = 0",
placeholders
);
let mut query = sqlx::query(&query_str);
for uuid in &request.task_uuids {
query = query.bind(uuid.to_string());
}
let found_uuids: Vec<String> = query
.fetch_all(&mut *tx)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to validate tasks: {e}")))?
.iter()
.map(|row| row.get("uuid"))
.collect();
if found_uuids.len() != request.task_uuids.len() {
for uuid in &request.task_uuids {
if !found_uuids.contains(&uuid.to_string()) {
tx.rollback().await.ok();
return Err(ThingsError::TaskNotFound {
uuid: uuid.to_string(),
});
}
}
}
let now = Utc::now().timestamp() as f64;
let placeholders = request
.task_uuids
.iter()
.map(|_| "?")
.collect::<Vec<_>>()
.join(",");
let query_str = format!(
"UPDATE TMTask SET project = ?, area = ?, userModificationDate = ? WHERE uuid IN ({})",
placeholders
);
let mut query = sqlx::query(&query_str)
.bind(request.project_uuid.map(|u| u.to_string()))
.bind(request.area_uuid.map(|u| u.to_string()))
.bind(now);
for uuid in &request.task_uuids {
query = query.bind(uuid.to_string());
}
query
.execute(&mut *tx)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to bulk move tasks: {e}")))?;
tx.commit()
.await
.map_err(|e| ThingsError::unknown(format!("Failed to commit transaction: {e}")))?;
info!("Bulk moved {} task(s)", request.task_uuids.len());
Ok(crate::models::BulkOperationResult {
success: true,
processed_count: request.task_uuids.len(),
message: format!("Successfully moved {} task(s)", request.task_uuids.len()),
})
}
#[instrument(skip(self))]
pub async fn bulk_update_dates(
&self,
request: crate::models::BulkUpdateDatesRequest,
) -> ThingsResult<crate::models::BulkOperationResult> {
use crate::database::{safe_things_date_to_naive_date, validate_date_range};
if request.task_uuids.is_empty() {
return Err(ThingsError::validation("Task UUIDs cannot be empty"));
}
if request.task_uuids.len() > Self::MAX_BULK_BATCH_SIZE {
return Err(ThingsError::validation(format!(
"Batch size {} exceeds maximum of {}",
request.task_uuids.len(),
Self::MAX_BULK_BATCH_SIZE
)));
}
if let (Some(start), Some(deadline)) = (request.start_date, request.deadline) {
validate_date_range(Some(start), Some(deadline))?;
}
let mut tx = self
.pool
.begin()
.await
.map_err(|e| ThingsError::unknown(format!("Failed to begin transaction: {e}")))?;
let placeholders = request
.task_uuids
.iter()
.map(|_| "?")
.collect::<Vec<_>>()
.join(",");
let query_str = format!(
"SELECT uuid, startDate, deadline FROM TMTask WHERE uuid IN ({}) AND trashed = 0",
placeholders
);
let mut query = sqlx::query(&query_str);
for uuid in &request.task_uuids {
query = query.bind(uuid.to_string());
}
let rows = query
.fetch_all(&mut *tx)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to validate tasks: {e}")))?;
if rows.len() != request.task_uuids.len() {
let found_uuids: Vec<String> = rows.iter().map(|row| row.get("uuid")).collect();
for uuid in &request.task_uuids {
if !found_uuids.contains(&uuid.to_string()) {
tx.rollback().await.ok();
return Err(ThingsError::TaskNotFound {
uuid: uuid.to_string(),
});
}
}
}
for row in &rows {
let current_start: Option<i64> = row.get("startDate");
let current_deadline: Option<i64> = row.get("deadline");
let final_start = if request.clear_start_date {
None
} else if let Some(new_start) = request.start_date {
Some(new_start)
} else {
current_start.and_then(|ts| safe_things_date_to_naive_date(ts).ok())
};
let final_deadline = if request.clear_deadline {
None
} else if let Some(new_deadline) = request.deadline {
Some(new_deadline)
} else {
current_deadline.and_then(|ts| safe_things_date_to_naive_date(ts).ok())
};
validate_date_range(final_start, final_deadline)?;
}
let now = Utc::now().timestamp() as f64;
let placeholders = request
.task_uuids
.iter()
.map(|_| "?")
.collect::<Vec<_>>()
.join(",");
let start_date_value = if request.clear_start_date {
None
} else {
request.start_date.map(naive_date_to_things_timestamp)
};
let deadline_value = if request.clear_deadline {
None
} else {
request.deadline.map(naive_date_to_things_timestamp)
};
let query_str = format!(
"UPDATE TMTask SET startDate = ?, deadline = ?, userModificationDate = ? WHERE uuid IN ({})",
placeholders
);
let mut query = sqlx::query(&query_str)
.bind(start_date_value)
.bind(deadline_value)
.bind(now);
for uuid in &request.task_uuids {
query = query.bind(uuid.to_string());
}
query
.execute(&mut *tx)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to bulk update dates: {e}")))?;
tx.commit()
.await
.map_err(|e| ThingsError::unknown(format!("Failed to commit transaction: {e}")))?;
info!(
"Bulk updated dates for {} task(s)",
request.task_uuids.len()
);
Ok(crate::models::BulkOperationResult {
success: true,
processed_count: request.task_uuids.len(),
message: format!(
"Successfully updated dates for {} task(s)",
request.task_uuids.len()
),
})
}
#[instrument(skip(self))]
pub async fn bulk_complete(
&self,
request: crate::models::BulkCompleteRequest,
) -> ThingsResult<crate::models::BulkOperationResult> {
if request.task_uuids.is_empty() {
return Err(ThingsError::validation("Task UUIDs cannot be empty"));
}
if request.task_uuids.len() > Self::MAX_BULK_BATCH_SIZE {
return Err(ThingsError::validation(format!(
"Batch size {} exceeds maximum of {}",
request.task_uuids.len(),
Self::MAX_BULK_BATCH_SIZE
)));
}
let mut tx = self
.pool
.begin()
.await
.map_err(|e| ThingsError::unknown(format!("Failed to begin transaction: {e}")))?;
let placeholders = request
.task_uuids
.iter()
.map(|_| "?")
.collect::<Vec<_>>()
.join(",");
let query_str = format!(
"SELECT uuid FROM TMTask WHERE uuid IN ({}) AND trashed = 0",
placeholders
);
let mut query = sqlx::query(&query_str);
for uuid in &request.task_uuids {
query = query.bind(uuid.to_string());
}
let found_uuids: Vec<String> = query
.fetch_all(&mut *tx)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to validate tasks: {e}")))?
.iter()
.map(|row| row.get("uuid"))
.collect();
if found_uuids.len() != request.task_uuids.len() {
for uuid in &request.task_uuids {
if !found_uuids.contains(&uuid.to_string()) {
tx.rollback().await.ok();
return Err(ThingsError::TaskNotFound {
uuid: uuid.to_string(),
});
}
}
}
let now = Utc::now().timestamp() as f64;
let placeholders = request
.task_uuids
.iter()
.map(|_| "?")
.collect::<Vec<_>>()
.join(",");
let query_str = format!(
"UPDATE TMTask SET status = 1, stopDate = ?, userModificationDate = ? WHERE uuid IN ({})",
placeholders
);
let mut query = sqlx::query(&query_str).bind(now).bind(now);
for uuid in &request.task_uuids {
query = query.bind(uuid.to_string());
}
query
.execute(&mut *tx)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to bulk complete tasks: {e}")))?;
tx.commit()
.await
.map_err(|e| ThingsError::unknown(format!("Failed to commit transaction: {e}")))?;
info!("Bulk completed {} task(s)", request.task_uuids.len());
Ok(crate::models::BulkOperationResult {
success: true,
processed_count: request.task_uuids.len(),
message: format!(
"Successfully completed {} task(s)",
request.task_uuids.len()
),
})
}
#[instrument(skip(self))]
pub async fn bulk_delete(
&self,
request: crate::models::BulkDeleteRequest,
) -> ThingsResult<crate::models::BulkOperationResult> {
if request.task_uuids.is_empty() {
return Err(ThingsError::validation("Task UUIDs cannot be empty"));
}
if request.task_uuids.len() > Self::MAX_BULK_BATCH_SIZE {
return Err(ThingsError::validation(format!(
"Batch size {} exceeds maximum of {}",
request.task_uuids.len(),
Self::MAX_BULK_BATCH_SIZE
)));
}
let mut tx = self
.pool
.begin()
.await
.map_err(|e| ThingsError::unknown(format!("Failed to begin transaction: {e}")))?;
let placeholders = request
.task_uuids
.iter()
.map(|_| "?")
.collect::<Vec<_>>()
.join(",");
let query_str = format!(
"SELECT uuid FROM TMTask WHERE uuid IN ({}) AND trashed = 0",
placeholders
);
let mut query = sqlx::query(&query_str);
for uuid in &request.task_uuids {
query = query.bind(uuid.to_string());
}
let found_uuids: Vec<String> = query
.fetch_all(&mut *tx)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to validate tasks: {e}")))?
.iter()
.map(|row| row.get("uuid"))
.collect();
if found_uuids.len() != request.task_uuids.len() {
for uuid in &request.task_uuids {
if !found_uuids.contains(&uuid.to_string()) {
tx.rollback().await.ok();
return Err(ThingsError::TaskNotFound {
uuid: uuid.to_string(),
});
}
}
}
let now = Utc::now().timestamp() as f64;
let placeholders = request
.task_uuids
.iter()
.map(|_| "?")
.collect::<Vec<_>>()
.join(",");
let query_str = format!(
"UPDATE TMTask SET trashed = 1, userModificationDate = ? WHERE uuid IN ({})",
placeholders
);
let mut query = sqlx::query(&query_str).bind(now);
for uuid in &request.task_uuids {
query = query.bind(uuid.to_string());
}
query
.execute(&mut *tx)
.await
.map_err(|e| ThingsError::unknown(format!("Failed to bulk delete tasks: {e}")))?;
tx.commit()
.await
.map_err(|e| ThingsError::unknown(format!("Failed to commit transaction: {e}")))?;
info!("Bulk deleted {} task(s)", request.task_uuids.len());
Ok(crate::models::BulkOperationResult {
success: true,
processed_count: request.task_uuids.len(),
message: format!("Successfully deleted {} task(s)", request.task_uuids.len()),
})
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DatabaseStats {
pub task_count: u64,
pub project_count: u64,
pub area_count: u64,
}
impl DatabaseStats {
#[must_use]
pub fn total_items(&self) -> u64 {
self.task_count + self.project_count + self.area_count
}
}
#[must_use]
pub fn get_default_database_path() -> PathBuf {
let home = std::env::var("HOME").unwrap_or_else(|_| "~".to_string());
PathBuf::from(format!(
"{home}/Library/Group Containers/JLMPQHK86H.com.culturedcode.ThingsMac/ThingsData-0Z0Z2/Things Database.thingsdatabase/main.sqlite"
))
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::{NamedTempFile, TempDir};
#[tokio::test]
async fn test_database_connection() {
let temp_dir = TempDir::new().unwrap();
let db_path = temp_dir.path().join("test.db");
let result = super::ThingsDatabase::new(&db_path).await;
assert!(result.is_err());
}
#[tokio::test]
async fn test_connection_string() {
let result = super::ThingsDatabase::from_connection_string("sqlite::memory:").await;
assert!(result.is_ok());
}
#[test]
fn test_task_status_from_i32() {
assert_eq!(TaskStatus::from_i32(0), Some(TaskStatus::Incomplete));
assert_eq!(TaskStatus::from_i32(1), Some(TaskStatus::Completed));
assert_eq!(TaskStatus::from_i32(2), Some(TaskStatus::Canceled));
assert_eq!(TaskStatus::from_i32(3), Some(TaskStatus::Trashed));
assert_eq!(TaskStatus::from_i32(4), None);
assert_eq!(TaskStatus::from_i32(-1), None);
}
#[test]
fn test_task_type_from_i32() {
assert_eq!(TaskType::from_i32(0), Some(TaskType::Todo));
assert_eq!(TaskType::from_i32(1), Some(TaskType::Project));
assert_eq!(TaskType::from_i32(2), Some(TaskType::Heading));
assert_eq!(TaskType::from_i32(3), Some(TaskType::Area));
assert_eq!(TaskType::from_i32(4), None);
assert_eq!(TaskType::from_i32(-1), None);
}
#[test]
fn test_database_stats_total_items() {
let stats = DatabaseStats {
task_count: 10,
project_count: 5,
area_count: 3,
};
assert_eq!(stats.total_items(), 18);
let empty_stats = DatabaseStats {
task_count: 0,
project_count: 0,
area_count: 0,
};
assert_eq!(empty_stats.total_items(), 0);
}
#[test]
fn test_database_pool_config_default() {
let config = DatabasePoolConfig::default();
assert_eq!(config.max_connections, 10);
assert_eq!(config.min_connections, 1);
assert_eq!(config.connect_timeout, Duration::from_secs(30));
assert_eq!(config.idle_timeout, Duration::from_secs(600));
assert_eq!(config.max_lifetime, Duration::from_secs(1800));
assert!(config.test_before_acquire);
}
#[test]
fn test_sqlite_optimizations_default() {
let opts = SqliteOptimizations::default();
assert!(opts.enable_wal_mode);
assert_eq!(opts.cache_size, -20000);
assert_eq!(opts.synchronous_mode, "NORMAL".to_string());
assert_eq!(opts.temp_store, "MEMORY".to_string());
assert_eq!(opts.journal_mode, "WAL".to_string());
assert_eq!(opts.mmap_size, 268_435_456);
assert!(opts.enable_foreign_keys);
assert!(opts.enable_query_planner);
}
#[test]
fn test_pool_health_status_creation() {
let status = PoolHealthStatus {
is_healthy: true,
pool_size: 8,
active_connections: 5,
idle_connections: 3,
max_connections: 10,
min_connections: 1,
connection_timeout: Duration::from_secs(30),
idle_timeout: Some(Duration::from_secs(600)),
max_lifetime: Some(Duration::from_secs(1800)),
};
assert!(status.is_healthy);
assert_eq!(status.active_connections, 5);
assert_eq!(status.idle_connections, 3);
assert_eq!(status.pool_size, 8);
}
#[test]
fn test_pool_metrics_creation() {
let metrics = PoolMetrics {
pool_size: 8,
active_connections: 5,
idle_connections: 3,
max_connections: 10,
min_connections: 1,
utilization_percentage: 80.0,
is_healthy: true,
response_time_ms: 50,
connection_timeout: Duration::from_secs(30),
idle_timeout: Some(Duration::from_secs(600)),
max_lifetime: Some(Duration::from_secs(1800)),
};
assert!(metrics.is_healthy);
assert_eq!(metrics.pool_size, 8);
assert_eq!(metrics.active_connections, 5);
assert_eq!(metrics.idle_connections, 3);
assert!((metrics.utilization_percentage - 80.0).abs() < f64::EPSILON);
assert_eq!(metrics.response_time_ms, 50);
}
#[test]
fn test_comprehensive_health_status_creation() {
let pool_health = PoolHealthStatus {
is_healthy: true,
pool_size: 8,
active_connections: 5,
idle_connections: 3,
max_connections: 10,
min_connections: 1,
connection_timeout: Duration::from_secs(30),
idle_timeout: Some(Duration::from_secs(600)),
max_lifetime: Some(Duration::from_secs(1800)),
};
let pool_metrics = PoolMetrics {
pool_size: 8,
active_connections: 5,
idle_connections: 3,
max_connections: 10,
min_connections: 1,
utilization_percentage: 80.0,
is_healthy: true,
response_time_ms: 50,
connection_timeout: Duration::from_secs(30),
idle_timeout: Some(Duration::from_secs(600)),
max_lifetime: Some(Duration::from_secs(1800)),
};
let db_stats = DatabaseStats {
task_count: 50,
project_count: 10,
area_count: 5,
};
let health_status = ComprehensiveHealthStatus {
overall_healthy: true,
pool_health,
pool_metrics,
database_stats: db_stats,
timestamp: Utc::now(),
};
assert!(health_status.overall_healthy);
assert_eq!(health_status.database_stats.total_items(), 65);
}
#[test]
fn test_safe_timestamp_convert_edge_cases() {
assert_eq!(safe_timestamp_convert(1_609_459_200.0), 1_609_459_200);
assert_eq!(safe_timestamp_convert(0.0), 0);
assert_eq!(safe_timestamp_convert(-1.0), 0);
assert_eq!(safe_timestamp_convert(f64::INFINITY), 0);
assert_eq!(safe_timestamp_convert(f64::NAN), 0);
assert_eq!(safe_timestamp_convert(5_000_000_000.0), 0);
let max_timestamp = 4_102_444_800_f64; assert_eq!(safe_timestamp_convert(max_timestamp), 4_102_444_800);
}
#[test]
fn test_things_uuid_to_uuid_consistency() {
let things_id = "test-id-123";
let uuid1 = things_uuid_to_uuid(things_id);
let uuid2 = things_uuid_to_uuid(things_id);
assert_eq!(uuid1, uuid2, "UUIDs should be consistent for same input");
let uuid3 = things_uuid_to_uuid("different-id");
assert_ne!(
uuid1, uuid3,
"Different inputs should produce different UUIDs"
);
let uuid_empty = things_uuid_to_uuid("");
assert!(!uuid_empty.to_string().is_empty());
let long_string = "a".repeat(1000);
let uuid_long = things_uuid_to_uuid(&long_string);
assert!(!uuid_long.to_string().is_empty());
}
#[test]
fn test_task_status_from_i32_all_variants() {
assert_eq!(TaskStatus::from_i32(0), Some(TaskStatus::Incomplete));
assert_eq!(TaskStatus::from_i32(1), Some(TaskStatus::Completed));
assert_eq!(TaskStatus::from_i32(2), Some(TaskStatus::Canceled));
assert_eq!(TaskStatus::from_i32(3), Some(TaskStatus::Trashed));
assert_eq!(TaskStatus::from_i32(999), None);
assert_eq!(TaskStatus::from_i32(-1), None);
}
#[test]
fn test_task_type_from_i32_all_variants() {
assert_eq!(TaskType::from_i32(0), Some(TaskType::Todo));
assert_eq!(TaskType::from_i32(1), Some(TaskType::Project));
assert_eq!(TaskType::from_i32(2), Some(TaskType::Heading));
assert_eq!(TaskType::from_i32(3), Some(TaskType::Area));
assert_eq!(TaskType::from_i32(999), None);
assert_eq!(TaskType::from_i32(-1), None);
}
#[test]
fn test_database_pool_config_default_values() {
let config = DatabasePoolConfig::default();
assert_eq!(config.max_connections, 10);
assert_eq!(config.min_connections, 1);
assert_eq!(config.connect_timeout, Duration::from_secs(30));
assert_eq!(config.idle_timeout, Duration::from_secs(600));
assert_eq!(config.max_lifetime, Duration::from_secs(1800));
assert!(config.test_before_acquire);
}
#[test]
fn test_database_stats_total_items_calculation() {
let stats = DatabaseStats {
task_count: 10,
project_count: 5,
area_count: 3,
};
assert_eq!(stats.total_items(), 18);
let empty_stats = DatabaseStats {
task_count: 0,
project_count: 0,
area_count: 0,
};
assert_eq!(empty_stats.total_items(), 0);
}
#[test]
fn test_pool_health_status_creation_comprehensive() {
let status = PoolHealthStatus {
is_healthy: true,
pool_size: 8,
active_connections: 2,
idle_connections: 3,
max_connections: 10,
min_connections: 1,
connection_timeout: Duration::from_secs(30),
idle_timeout: Some(Duration::from_secs(600)),
max_lifetime: Some(Duration::from_secs(1800)),
};
assert!(status.is_healthy);
assert_eq!(status.pool_size, 8);
assert_eq!(status.max_connections, 10);
}
#[test]
fn test_pool_metrics_creation_comprehensive() {
let metrics = PoolMetrics {
pool_size: 8,
active_connections: 5,
idle_connections: 3,
max_connections: 10,
min_connections: 1,
utilization_percentage: 80.0,
is_healthy: true,
response_time_ms: 50,
connection_timeout: Duration::from_secs(30),
idle_timeout: Some(Duration::from_secs(600)),
max_lifetime: Some(Duration::from_secs(1800)),
};
assert_eq!(metrics.pool_size, 8);
assert_eq!(metrics.response_time_ms, 50);
assert!(metrics.is_healthy);
}
#[test]
fn test_comprehensive_health_status_creation_full() {
let pool_health = PoolHealthStatus {
is_healthy: true,
pool_size: 8,
active_connections: 2,
idle_connections: 3,
max_connections: 10,
min_connections: 1,
connection_timeout: Duration::from_secs(30),
idle_timeout: Some(Duration::from_secs(600)),
max_lifetime: Some(Duration::from_secs(1800)),
};
let pool_metrics = PoolMetrics {
pool_size: 8,
active_connections: 5,
idle_connections: 3,
max_connections: 10,
min_connections: 1,
utilization_percentage: 80.0,
is_healthy: true,
response_time_ms: 50,
connection_timeout: Duration::from_secs(30),
idle_timeout: Some(Duration::from_secs(600)),
max_lifetime: Some(Duration::from_secs(1800)),
};
let database_stats = DatabaseStats {
task_count: 100,
project_count: 20,
area_count: 5,
};
let status = ComprehensiveHealthStatus {
overall_healthy: true,
pool_health,
pool_metrics,
database_stats,
timestamp: Utc::now(),
};
assert!(status.overall_healthy);
assert_eq!(status.database_stats.total_items(), 125);
}
#[test]
fn test_sqlite_optimizations_default_values() {
let opts = SqliteOptimizations::default();
assert!(opts.enable_wal_mode);
assert!(opts.enable_foreign_keys);
assert_eq!(opts.cache_size, -20000);
assert_eq!(opts.temp_store, "MEMORY");
assert_eq!(opts.mmap_size, 268_435_456);
assert_eq!(opts.synchronous_mode, "NORMAL");
assert_eq!(opts.journal_mode, "WAL");
}
#[test]
fn test_get_default_database_path_format() {
let path = get_default_database_path();
let path_str = path.to_string_lossy();
assert!(path_str.contains("Things Database.thingsdatabase"));
assert!(path_str.contains("main.sqlite"));
assert!(path_str.contains("Library/Group Containers"));
}
#[tokio::test]
async fn test_database_new_with_config() {
let temp_file = NamedTempFile::new().unwrap();
let db_path = temp_file.path();
crate::test_utils::create_test_database(db_path)
.await
.unwrap();
let config = DatabasePoolConfig {
max_connections: 5,
min_connections: 1,
connect_timeout: Duration::from_secs(10),
idle_timeout: Duration::from_secs(300),
max_lifetime: Duration::from_secs(900),
test_before_acquire: true,
sqlite_optimizations: SqliteOptimizations::default(),
};
let database = ThingsDatabase::new_with_config(db_path, config)
.await
.unwrap();
let pool = database.pool();
assert!(!pool.is_closed());
}
#[tokio::test]
async fn test_database_error_handling_invalid_path() {
let result = ThingsDatabase::new(Path::new("/non/existent/path.db")).await;
assert!(result.is_err(), "Should fail with non-existent path");
}
#[tokio::test]
async fn test_database_get_stats() {
let temp_file = NamedTempFile::new().unwrap();
let db_path = temp_file.path();
crate::test_utils::create_test_database(db_path)
.await
.unwrap();
let database = ThingsDatabase::new(db_path).await.unwrap();
let stats = database.get_stats().await.unwrap();
assert!(stats.task_count > 0, "Should have test tasks");
assert!(stats.area_count > 0, "Should have test areas");
assert!(stats.total_items() > 0, "Should have total items");
}
#[tokio::test]
async fn test_database_comprehensive_health_check() {
let temp_file = NamedTempFile::new().unwrap();
let db_path = temp_file.path();
crate::test_utils::create_test_database(db_path)
.await
.unwrap();
let database = ThingsDatabase::new(db_path).await.unwrap();
let health = database.comprehensive_health_check().await.unwrap();
assert!(health.overall_healthy, "Database should be healthy");
assert!(health.pool_health.is_healthy, "Pool should be healthy");
assert!(
health.pool_metrics.is_healthy,
"Pool metrics should be healthy"
);
}
#[test]
fn test_things_date_negative_returns_none() {
assert_eq!(things_date_to_naive_date(-1), None);
assert_eq!(things_date_to_naive_date(-100), None);
assert_eq!(things_date_to_naive_date(i64::MIN), None);
}
#[test]
fn test_things_date_zero_returns_none() {
assert_eq!(things_date_to_naive_date(0), None);
}
#[test]
fn test_things_date_boundary_2001() {
use chrono::Datelike;
let result = things_date_to_naive_date(1);
assert!(result.is_some());
let date = result.unwrap();
assert_eq!(date.year(), 2001);
assert_eq!(date.month(), 1);
assert_eq!(date.day(), 1);
}
#[test]
fn test_things_date_one_day() {
use chrono::Datelike;
let seconds_per_day = 86400i64;
let result = things_date_to_naive_date(seconds_per_day);
assert!(result.is_some());
let date = result.unwrap();
assert_eq!(date.year(), 2001);
assert_eq!(date.month(), 1);
assert_eq!(date.day(), 2);
}
#[test]
fn test_things_date_one_year() {
use chrono::Datelike;
let seconds_per_year = 365 * 86400i64;
let result = things_date_to_naive_date(seconds_per_year);
assert!(result.is_some());
let date = result.unwrap();
assert_eq!(date.year(), 2002);
}
#[test]
fn test_things_date_current_era() {
use chrono::Datelike;
let days_to_2024 = 8401i64;
let seconds_to_2024 = days_to_2024 * 86400;
let result = things_date_to_naive_date(seconds_to_2024);
assert!(result.is_some());
let date = result.unwrap();
assert_eq!(date.year(), 2024);
}
#[test]
fn test_things_date_leap_year() {
use chrono::{Datelike, TimeZone, Utc};
let base_date = Utc.with_ymd_and_hms(2001, 1, 1, 0, 0, 0).single().unwrap();
let target_date = Utc.with_ymd_and_hms(2004, 2, 29, 0, 0, 0).single().unwrap();
let seconds_diff = (target_date - base_date).num_seconds();
let result = things_date_to_naive_date(seconds_diff);
assert!(result.is_some());
let date = result.unwrap();
assert_eq!(date.year(), 2004);
assert_eq!(date.month(), 2);
assert_eq!(date.day(), 29);
}
#[test]
fn test_uuid_conversion_consistency() {
let input = "ABC123";
let uuid1 = things_uuid_to_uuid(input);
let uuid2 = things_uuid_to_uuid(input);
assert_eq!(uuid1, uuid2);
}
#[test]
fn test_uuid_conversion_uniqueness() {
let uuid1 = things_uuid_to_uuid("ABC123");
let uuid2 = things_uuid_to_uuid("ABC124");
let uuid3 = things_uuid_to_uuid("XYZ789");
assert_ne!(uuid1, uuid2);
assert_ne!(uuid1, uuid3);
assert_ne!(uuid2, uuid3);
}
#[test]
fn test_uuid_conversion_empty_string() {
let uuid = things_uuid_to_uuid("");
assert!(!uuid.to_string().is_empty());
}
#[test]
fn test_uuid_conversion_special_characters() {
let uuid1 = things_uuid_to_uuid("test-with-dashes");
let uuid2 = things_uuid_to_uuid("test_with_underscores");
let uuid3 = things_uuid_to_uuid("test.with.dots");
assert_ne!(uuid1, uuid2);
assert_ne!(uuid1, uuid3);
assert_ne!(uuid2, uuid3);
}
#[test]
fn test_safe_timestamp_convert_normal_values() {
let ts = 1_700_000_000.0; let result = safe_timestamp_convert(ts);
assert_eq!(result, 1_700_000_000);
}
#[test]
fn test_safe_timestamp_convert_zero() {
assert_eq!(safe_timestamp_convert(0.0), 0);
}
#[test]
fn test_safe_timestamp_convert_negative() {
assert_eq!(safe_timestamp_convert(-1.0), 0);
assert_eq!(safe_timestamp_convert(-1000.0), 0);
}
#[test]
fn test_safe_timestamp_convert_infinity() {
assert_eq!(safe_timestamp_convert(f64::INFINITY), 0);
assert_eq!(safe_timestamp_convert(f64::NEG_INFINITY), 0);
}
#[test]
fn test_safe_timestamp_convert_nan() {
assert_eq!(safe_timestamp_convert(f64::NAN), 0);
}
#[test]
fn test_date_roundtrip_known_dates() {
use chrono::{Datelike, TimeZone, Utc};
let test_cases = vec![
(2001, 1, 2), (2010, 6, 15),
(2020, 12, 31),
(2024, 2, 29), (2025, 7, 4),
];
for (year, month, day) in test_cases {
let base_date = Utc.with_ymd_and_hms(2001, 1, 1, 0, 0, 0).single().unwrap();
let target_date = Utc
.with_ymd_and_hms(year, month, day, 0, 0, 0)
.single()
.unwrap();
let seconds = (target_date - base_date).num_seconds();
let converted = things_date_to_naive_date(seconds);
assert!(
converted.is_some(),
"Failed to convert {}-{:02}-{:02}",
year,
month,
day
);
let result_date = converted.unwrap();
assert_eq!(
result_date.year(),
year,
"Year mismatch for {}-{:02}-{:02}",
year,
month,
day
);
assert_eq!(
result_date.month(),
month,
"Month mismatch for {}-{:02}-{:02}",
year,
month,
day
);
assert_eq!(
result_date.day(),
day,
"Day mismatch for {}-{:02}-{:02}",
year,
month,
day
);
}
}
}