adaptive_pipeline_domain/repositories/
pipeline_repository.rs

1// /////////////////////////////////////////////////////////////////////////////
2// Adaptive Pipeline
3// Copyright (c) 2025 Michael Gardner, A Bit of Help, Inc.
4// SPDX-License-Identifier: BSD-3-Clause
5// See LICENSE file in the project root.
6// /////////////////////////////////////////////////////////////////////////////
7
8//! # Pipeline Repository Interface
9//!
10//! This module defines the repository pattern interface for pipeline
11//! persistence, providing an abstraction layer between the domain and
12//! infrastructure layers.
13//!
14//! ## Overview
15//!
16//! The `PipelineRepository` trait defines the contract for pipeline data
17//! persistence operations. This abstraction enables:
18//!
19//! - **Separation of Concerns**: Domain logic independent of storage technology
20//! - **Testability**: Easy mocking and testing with in-memory implementations
21//! - **Flexibility**: Support for different storage backends (SQL, NoSQL, etc.)
22//! - **Consistency**: Standardized data access patterns across the application
23//!
24//! ## Repository Pattern Benefits
25//!
26//! ### Domain Independence
27//! The repository pattern keeps domain logic free from infrastructure concerns:
28//! - Domain entities don't know about database schemas
29//! - Business rules are not coupled to persistence technology
30//! - Clean separation enables better testing and maintenance
31//!
32//! ### Implementation Flexibility
33//! Different storage technologies can be used:
34//! - SQL databases (PostgreSQL, MySQL, SQLite)
35//! - NoSQL databases (MongoDB, DynamoDB)
36//! - In-memory storage for testing
37//! - File-based storage for simple deployments
38//!
39//! ## Usage Examples
40//!
41//! ### Basic CRUD Operations
42//!
43//!
44//! ### Querying and Listing
45//!
46//!
47//! ### Archive Management
48//!
49//!
50//! ## Implementation Guidelines
51//!
52//! ### Error Handling
53//! Repository implementations should:
54//! - Return `PipelineError` for all error conditions
55//! - Handle database connection failures gracefully
56//! - Provide meaningful error messages for debugging
57//! - Log errors appropriately for monitoring
58//!
59//! ### Transaction Support
60//! Implementations should consider:
61//! - Atomic operations for data consistency
62//! - Transaction rollback on failures
63//! - Isolation levels for concurrent access
64//! - Deadlock detection and retry logic
65//!
66//! ### Performance Considerations
67//! - **Indexing**: Ensure proper database indexes for queries
68//! - **Caching**: Implement caching for frequently accessed data
69//! - **Connection Pooling**: Use connection pools for database efficiency
70//! - **Batch Operations**: Support batch saves/updates when possible
71//!
72//! ### Security
73//! Repository implementations must:
74//! - Validate all input parameters
75//! - Use parameterized queries to prevent SQL injection
76//! - Implement proper access controls
77//! - Audit sensitive operations
78//!
79//! ## Testing Strategies
80//!
81//! ### Unit Testing
82// # use async_trait::async_trait;
83// In-memory implementation for testing
84// struct InMemoryPipelineRepository {
85//     pipelines: std::sync::Mutex<HashMap<String, String>>,
86// }
87//
88// #[async_trait]
89// impl PipelineRepository for InMemoryPipelineRepository {
90//     fn save(&self, pipeline: &String) -> Result<(), String> {
91//         let mut pipelines = self.pipelines.lock().await.unwrap();
92//         pipelines.insert(pipeline.id().clone(), pipeline.clone());
93//     }
94//
95//     fn find_by_id(&self, id: String) -> Result<Option<String>, String> {
96//         let pipelines = self.pipelines.lock().await.unwrap();
97//         Ok(pipelines.get(&id).cloned())
98//     }
99//
100//     // ... implement other methods ...
101// #   fn find_by_name(&self, _name: &str) -> Result<Option<String>, String> {
102// Ok(None) } #   fn list_all(&self) -> Result<Vec<String>, String> { Ok(vec![])
103// } #   fn find_all(&self) -> Result<Vec<String>, String> { Ok(vec![]) }
104// #   fn list_paginated(&self, _offset: usize, _limit: usize) ->
105// Result<Vec<String>, String> { Ok(vec![]) } #   fn update(&self, _pipeline:
106// &String) -> Result<(), String> { Ok(()) } #   fn delete(&self, _id: String)
107// -> Result<bool, String> { Ok(true) } #   fn exists(&self, _id: String) ->
108// Result<bool, String> { Ok(false) } #   fn count(&self) -> Result<usize,
109// String> { Ok(0) } #   fn find_by_config(&self, _key: &str, _value: &str) ->
110// Result<Vec<String>, String> { Ok(vec![]) } #   fn archive(&self, _id: String)
111// -> Result<bool, String> { Ok(true) } #   fn restore(&self, _id: String) ->
112// Result<bool, String> { Ok(true) } #   fn list_archived(&self) ->
113// Result<Vec<String>, String> { Ok(vec![]) } }
114// ```
115//
116// ### Integration Testing
117// Test with real database implementations:
118// - Verify data persistence across application restarts
119// - Test concurrent access scenarios
120// - Validate transaction behavior
121// - Performance testing with large datasets
122//
123// ## Concrete Implementations
124//
125// The infrastructure layer provides concrete implementations:
126// - `SqlitePipelineRepository`: SQLite-based implementation
127// - `PostgresPipelineRepository`: PostgreSQL implementation
128// - `InMemoryPipelineRepository`: Testing implementation
129//
130// Each implementation handles storage-specific concerns while
131// maintaining the same interface contract.
132
133use crate::entities::Pipeline;
134use crate::value_objects::PipelineId;
135use crate::PipelineError;
136use async_trait::async_trait;
137
138/// Repository interface for pipeline persistence operations
139///
140/// This trait defines the contract for pipeline data access operations,
141/// providing an abstraction layer between domain logic and storage technology.
142/// All methods are asynchronous to support non-blocking I/O operations.
143///
144/// # Design Principles
145///
146/// - **Async-First**: All operations are asynchronous for scalability
147/// - **Error Handling**: Comprehensive error handling with `PipelineError`
148/// - **Type Safety**: Strong typing with `PipelineId` and `Pipeline` entities
149/// - **Flexibility**: Support for different storage implementations
150///
151/// # Thread Safety
152///
153/// Implementations must be thread-safe (`Send + Sync`) to support
154/// concurrent access in multi-threaded environments.
155#[async_trait]
156pub trait PipelineRepository: Send + Sync {
157    /// Saves a pipeline
158    async fn save(&self, pipeline: &Pipeline) -> Result<(), PipelineError>;
159
160    /// Finds a pipeline by ID
161    async fn find_by_id(&self, id: PipelineId) -> Result<Option<Pipeline>, PipelineError>;
162
163    /// Finds a pipeline by name
164    async fn find_by_name(&self, name: &str) -> Result<Option<Pipeline>, PipelineError>;
165
166    /// Lists all pipelines
167    async fn list_all(&self) -> Result<Vec<Pipeline>, PipelineError>;
168
169    /// Finds all pipelines (alias for list_all)
170    async fn find_all(&self) -> Result<Vec<Pipeline>, PipelineError>;
171
172    /// Lists pipelines with pagination
173    async fn list_paginated(&self, offset: usize, limit: usize) -> Result<Vec<Pipeline>, PipelineError>;
174
175    /// Updates a pipeline
176    async fn update(&self, pipeline: &Pipeline) -> Result<(), PipelineError>;
177
178    /// Deletes a pipeline by ID
179    async fn delete(&self, id: PipelineId) -> Result<bool, PipelineError>;
180
181    /// Checks if a pipeline exists
182    async fn exists(&self, id: PipelineId) -> Result<bool, PipelineError>;
183
184    /// Counts total pipelines
185    async fn count(&self) -> Result<usize, PipelineError>;
186
187    /// Finds pipelines by configuration parameter
188    async fn find_by_config(&self, key: &str, value: &str) -> Result<Vec<Pipeline>, PipelineError>;
189
190    /// Archives a pipeline (soft delete)
191    async fn archive(&self, id: PipelineId) -> Result<bool, PipelineError>;
192
193    /// Restores an archived pipeline
194    async fn restore(&self, id: PipelineId) -> Result<bool, PipelineError>;
195
196    /// Lists archived pipelines
197    async fn list_archived(&self) -> Result<Vec<Pipeline>, PipelineError>;
198}