ceres_core/traits.rs
1//! Trait definitions for external dependencies.
2//!
3//! This module defines traits that abstract over external dependencies
4//! (embedding providers, portal clients, data stores), enabling:
5//!
6//! - **Testability**: Mock implementations for unit testing
7//! - **Flexibility**: Different backend implementations (e.g., different embedding APIs)
8//! - **Decoupling**: Core business logic doesn't depend on specific implementations
9//!
10//! # Example
11//!
12//! ```
13//! use ceres_core::traits::{EmbeddingProvider, DatasetStore};
14//!
15//! // Business logic uses traits, not concrete types
16//! async fn search_datasets<E, S>(
17//! embedding: &E,
18//! store: &S,
19//! query: &str,
20//! ) -> Result<Vec<ceres_core::SearchResult>, ceres_core::AppError>
21//! where
22//! E: EmbeddingProvider,
23//! S: DatasetStore,
24//! {
25//! let vector: Vec<f32> = embedding.generate(query).await?;
26//! store.search(vector, 10).await
27//! }
28//! ```
29
30use std::collections::HashMap;
31use std::future::Future;
32
33use chrono::{DateTime, Utc};
34use futures::stream::BoxStream;
35use uuid::Uuid;
36
37use crate::config::PortalType;
38use crate::{AppError, Dataset, NewDataset, SearchResult};
39
40/// Provider for generating text embeddings.
41///
42/// Implementations convert text into vector representations for semantic search.
43/// Different providers may produce vectors of different dimensions:
44/// - Gemini text-embedding-004: 768 dimensions
45/// - OpenAI text-embedding-3-small: 1536 dimensions
46/// - OpenAI text-embedding-3-large: 3072 dimensions
47pub trait EmbeddingProvider: Send + Sync + Clone {
48 /// Returns the provider identifier for logging and configuration.
49 ///
50 /// # Examples
51 ///
52 /// - `"gemini"` for Google Gemini
53 /// - `"openai"` for OpenAI
54 fn name(&self) -> &'static str;
55
56 /// Returns the embedding dimension this provider generates.
57 ///
58 /// This value must match the database column dimension for vector storage.
59 /// Mismatched dimensions will cause insertion failures.
60 fn dimension(&self) -> usize;
61
62 /// Generates an embedding vector for the given text.
63 ///
64 /// # Arguments
65 ///
66 /// * `text` - The text to embed
67 ///
68 /// # Returns
69 ///
70 /// A vector of floating-point values representing the text embedding.
71 /// The vector length must equal `self.dimension()`.
72 fn generate(&self, text: &str) -> impl Future<Output = Result<Vec<f32>, AppError>> + Send;
73
74 /// Maximum number of texts supported per batch API call.
75 ///
76 /// The harvest pipeline uses `min(config.embedding_batch_size, max_batch_size())`
77 /// to ensure batches never exceed provider limits.
78 ///
79 /// # Defaults
80 ///
81 /// Returns `1` (single-item batches). Providers with native batch support
82 /// should override to enable efficient batching.
83 fn max_batch_size(&self) -> usize {
84 1
85 }
86
87 /// Generates embeddings for multiple texts in a batch.
88 ///
89 /// The default implementation calls `generate()` sequentially.
90 /// Providers with native batch API support should override for efficiency.
91 ///
92 /// # Arguments
93 ///
94 /// * `texts` - Slice of texts to embed
95 ///
96 /// # Returns
97 ///
98 /// A vector of embedding vectors, one per input text.
99 fn generate_batch(
100 &self,
101 texts: &[String],
102 ) -> impl Future<Output = Result<Vec<Vec<f32>>, AppError>> + Send {
103 let texts_owned: Vec<String> = texts.to_vec();
104 async move {
105 let mut results = Vec::with_capacity(texts_owned.len());
106 for text in &texts_owned {
107 results.push(self.generate(text).await?);
108 }
109 Ok(results)
110 }
111 }
112}
113
114/// Client for accessing open data portals (CKAN, Socrata, etc.).
115///
116/// Implementations fetch dataset metadata from portal APIs.
117pub trait PortalClient: Send + Sync + Clone {
118 /// Type representing raw portal data before transformation.
119 type PortalData: Send;
120
121 /// Returns the portal type identifier (e.g., "ckan", "socrata", "dcat").
122 fn portal_type(&self) -> &'static str;
123
124 /// Returns the base URL of the portal.
125 fn base_url(&self) -> &str;
126
127 /// Lists all dataset IDs available on the portal.
128 fn list_dataset_ids(&self) -> impl Future<Output = Result<Vec<String>, AppError>> + Send;
129
130 /// Fetches detailed metadata for a specific dataset.
131 ///
132 /// # Arguments
133 ///
134 /// * `id` - The dataset identifier
135 fn get_dataset(
136 &self,
137 id: &str,
138 ) -> impl Future<Output = Result<Self::PortalData, AppError>> + Send;
139
140 /// Converts portal-specific data into a normalized NewDataset.
141 ///
142 /// # Arguments
143 ///
144 /// * `data` - The raw portal data
145 /// * `portal_url` - The portal URL for source tracking
146 /// * `url_template` - Optional URL template with `{id}` and `{name}` placeholders
147 /// * `language` - Preferred language for resolving multilingual fields
148 fn into_new_dataset(
149 data: Self::PortalData,
150 portal_url: &str,
151 url_template: Option<&str>,
152 language: &str,
153 ) -> NewDataset;
154
155 /// Searches for datasets modified since the given timestamp.
156 ///
157 /// Used for incremental harvesting to fetch only recently modified datasets.
158 /// Returns full dataset objects, eliminating the need for separate get_dataset calls.
159 ///
160 /// # Arguments
161 ///
162 /// * `since` - Only return datasets modified after this timestamp
163 ///
164 /// # Returns
165 ///
166 /// A vector of portal-specific dataset objects modified since the given time.
167 /// Returns an error if the portal doesn't support incremental search.
168 fn search_modified_since(
169 &self,
170 since: DateTime<Utc>,
171 ) -> impl Future<Output = Result<Vec<Self::PortalData>, AppError>> + Send;
172
173 /// Fetches all datasets from the portal in bulk using paginated search.
174 ///
175 /// This is far more efficient than `list_dataset_ids()` + individual
176 /// `get_dataset()` calls for large portals (e.g., HDX with ~40k datasets),
177 /// as it avoids per-dataset HTTP requests and rate limiting.
178 ///
179 /// Returns full dataset objects ready for processing.
180 /// The default implementation falls back to `list_dataset_ids()` + `get_dataset()`
181 /// for portals that don't support bulk search.
182 fn search_all_datasets(
183 &self,
184 ) -> impl Future<Output = Result<Vec<Self::PortalData>, AppError>> + Send {
185 async {
186 Err(AppError::Generic(
187 "search_all_datasets not supported".to_string(),
188 ))
189 }
190 }
191}
192
193/// Factory for creating portal clients.
194///
195/// Separate from PortalClient to avoid issues with async trait constructors.
196pub trait PortalClientFactory: Send + Sync + Clone {
197 /// The type of portal client this factory creates.
198 type Client: PortalClient;
199
200 /// Creates a new portal client for the given URL and portal type.
201 ///
202 /// # Arguments
203 ///
204 /// * `portal_url` - The portal API base URL
205 /// * `portal_type` - The type of portal to create a client for
206 fn create(&self, portal_url: &str, portal_type: PortalType) -> Result<Self::Client, AppError>;
207}
208
209/// Store for dataset persistence and retrieval.
210///
211/// Implementations handle database operations for datasets.
212pub trait DatasetStore: Send + Sync + Clone {
213 /// Retrieves a dataset by its unique ID.
214 ///
215 /// # Arguments
216 ///
217 /// * `id` - The dataset's UUID
218 ///
219 /// # Returns
220 ///
221 /// The dataset if found, or None if not exists.
222 fn get_by_id(&self, id: Uuid)
223 -> impl Future<Output = Result<Option<Dataset>, AppError>> + Send;
224
225 /// Retrieves content hashes for all datasets from a specific portal.
226 ///
227 /// Used for delta detection to determine which datasets need reprocessing.
228 ///
229 /// # Arguments
230 ///
231 /// * `portal_url` - The source portal URL
232 ///
233 /// # Returns
234 ///
235 /// A map from original_id to optional content_hash.
236 fn get_hashes_for_portal(
237 &self,
238 portal_url: &str,
239 ) -> impl Future<Output = Result<HashMap<String, Option<String>>, AppError>> + Send;
240
241 /// Updates only the timestamp for an unchanged dataset.
242 ///
243 /// Used when content hash matches but we want to track "last seen" time.
244 ///
245 /// # Arguments
246 ///
247 /// * `portal_url` - The source portal URL
248 /// * `original_id` - The dataset's original ID from the portal
249 fn update_timestamp_only(
250 &self,
251 portal_url: &str,
252 original_id: &str,
253 ) -> impl Future<Output = Result<(), AppError>> + Send;
254
255 /// Batch updates timestamps for multiple unchanged datasets.
256 ///
257 /// More efficient than calling `update_timestamp_only` for each dataset.
258 ///
259 /// # Arguments
260 ///
261 /// * `portal_url` - The source portal URL
262 /// * `original_ids` - Slice of dataset original IDs to update
263 ///
264 /// # Returns
265 ///
266 /// The number of rows actually updated.
267 fn batch_update_timestamps(
268 &self,
269 portal_url: &str,
270 original_ids: &[String],
271 ) -> impl Future<Output = Result<u64, AppError>> + Send;
272
273 /// Inserts or updates a dataset.
274 ///
275 /// # Arguments
276 ///
277 /// * `dataset` - The dataset to upsert
278 ///
279 /// # Returns
280 ///
281 /// The UUID of the affected row.
282 fn upsert(&self, dataset: &NewDataset) -> impl Future<Output = Result<Uuid, AppError>> + Send;
283
284 /// Batch upserts multiple datasets in a single operation.
285 ///
286 /// Much faster than calling `upsert` in a loop because it reduces
287 /// database round-trips and amortizes index update costs.
288 ///
289 /// # Arguments
290 ///
291 /// * `datasets` - Slice of datasets to upsert
292 ///
293 /// # Returns
294 ///
295 /// The UUIDs of all affected rows.
296 fn batch_upsert(
297 &self,
298 datasets: &[NewDataset],
299 ) -> impl Future<Output = Result<Vec<Uuid>, AppError>> + Send;
300
301 /// Performs vector similarity search.
302 ///
303 /// # Arguments
304 ///
305 /// * `query_vector` - The embedding vector to search for
306 /// * `limit` - Maximum number of results
307 ///
308 /// # Returns
309 ///
310 /// Datasets ranked by similarity score (highest first).
311 fn search(
312 &self,
313 query_vector: Vec<f32>,
314 limit: usize,
315 ) -> impl Future<Output = Result<Vec<SearchResult>, AppError>> + Send;
316
317 /// Lists datasets as a stream with optional filtering.
318 ///
319 /// This method returns a stream of datasets for memory-efficient
320 /// processing of large result sets. Unlike batch methods, it streams
321 /// results directly from the database without loading everything into memory.
322 ///
323 /// # Arguments
324 ///
325 /// * `portal_filter` - Optional portal URL to filter by
326 /// * `limit` - Optional maximum number of records
327 fn list_stream<'a>(
328 &'a self,
329 portal_filter: Option<&'a str>,
330 limit: Option<usize>,
331 ) -> BoxStream<'a, Result<Dataset, AppError>>;
332
333 /// Retrieves the last successful sync timestamp for a portal.
334 ///
335 /// Used for incremental harvesting to determine which datasets
336 /// have been modified since the last sync.
337 ///
338 /// # Arguments
339 ///
340 /// * `portal_url` - The source portal URL
341 ///
342 /// # Returns
343 ///
344 /// The timestamp of the last successful sync, or None if never synced.
345 fn get_last_sync_time(
346 &self,
347 portal_url: &str,
348 ) -> impl Future<Output = Result<Option<DateTime<Utc>>, AppError>> + Send;
349
350 /// Records a sync status for a portal.
351 ///
352 /// Called after a harvest operation to update the sync status.
353 /// The `sync_status` parameter indicates the outcome: "completed" or "cancelled".
354 ///
355 /// # Arguments
356 ///
357 /// * `portal_url` - The source portal URL
358 /// * `sync_time` - The timestamp of this sync
359 /// * `sync_mode` - Either "full" or "incremental"
360 /// TODO(design): sync_mode/sync_status should be typed enums, not &str
361 /// * `sync_status` - The outcome: "completed" or "cancelled"
362 /// * `datasets_synced` - Number of datasets processed
363 fn record_sync_status(
364 &self,
365 portal_url: &str,
366 sync_time: DateTime<Utc>,
367 sync_mode: &str,
368 sync_status: &str,
369 datasets_synced: i32,
370 ) -> impl Future<Output = Result<(), AppError>> + Send;
371
372 /// Returns lowercased titles that appear across multiple portals.
373 ///
374 /// Used for cross-portal duplicate detection in Parquet exports.
375 /// Typically returns ~21k titles (~2MB) for the full dataset.
376 fn get_duplicate_titles(
377 &self,
378 ) -> impl Future<Output = Result<std::collections::HashSet<String>, AppError>> + Send;
379
380 /// Checks database connectivity.
381 ///
382 /// Performs a simple query to verify the database is reachable and responsive.
383 /// Used by health check endpoints.
384 fn health_check(&self) -> impl Future<Output = Result<(), AppError>> + Send;
385}