dataflow_rs/engine/
mod.rs

1/*!
2# Engine Module
3
4This module implements the core workflow engine for dataflow-rs. The engine processes
5messages through workflows composed of tasks, providing a flexible and extensible
6data processing pipeline.
7
8## Key Components
9
10- **Engine**: The main engine that processes messages through workflows
11- **Workflow**: A collection of tasks with conditions that determine when they should be applied
12- **Task**: An individual processing unit that performs a specific function on a message
13- **AsyncFunctionHandler**: A trait implemented by task handlers to define custom async processing logic
14- **Message**: The data structure that flows through the engine, with data, metadata, and processing results
15*/
16
17pub mod error;
18pub mod functions;
19pub mod message;
20pub mod task;
21pub mod workflow;
22
23// Re-export key types for easier access
24pub use error::{DataflowError, ErrorInfo, Result};
25pub use functions::AsyncFunctionHandler;
26pub use message::Message;
27pub use task::Task;
28pub use workflow::Workflow;
29
30// Re-export the jsonlogic library under our namespace
31pub use datalogic_rs as jsonlogic;
32
33use chrono::Utc;
34use datalogic_rs::DataLogic;
35use log::{debug, error, info, warn};
36use message::AuditTrail;
37use serde_json::{json, Map, Number, Value};
38use std::{cell::RefCell, collections::HashMap};
39use tokio::time::sleep;
40
41// Thread-local DataLogic instance to avoid mutex contention
42thread_local! {
43    static THREAD_LOCAL_DATA_LOGIC: RefCell<DataLogic> = RefCell::new(DataLogic::new());
44}
45
46/// Configuration for retry behavior
47#[derive(Debug, Clone)]
48pub struct RetryConfig {
49    /// Maximum number of retries
50    pub max_retries: u32,
51    /// Delay between retries in milliseconds
52    pub retry_delay_ms: u64,
53    /// Whether to use exponential backoff
54    pub use_backoff: bool,
55}
56
57impl Default for RetryConfig {
58    fn default() -> Self {
59        Self {
60            max_retries: 3,
61            retry_delay_ms: 1000,
62            use_backoff: true,
63        }
64    }
65}
66
67/// Engine that processes messages through workflows using non-blocking async IO.
68///
69/// This engine is optimized for IO-bound workloads like HTTP requests, database access,
70/// and file operations. It uses Tokio for efficient async task execution.
71///
72/// Workflows are processed sequentially to ensure that later workflows can depend
73/// on the results of earlier workflows.
74pub struct Engine {
75    /// Registry of available workflows
76    workflows: HashMap<String, Workflow>,
77    /// Registry of function handlers that can be executed by tasks
78    task_functions: HashMap<String, Box<dyn AsyncFunctionHandler + Send + Sync>>,
79    /// Configuration for retry behavior
80    retry_config: RetryConfig,
81}
82
83impl Default for Engine {
84    fn default() -> Self {
85        Self::new()
86    }
87}
88
89impl Engine {
90    /// Creates a new Engine instance with built-in function handlers pre-registered.
91    ///
92    /// # Example
93    ///
94    /// ```
95    /// use dataflow_rs::Engine;
96    ///
97    /// let engine = Engine::new();
98    /// ```
99    pub fn new() -> Self {
100        let mut engine = Self {
101            workflows: HashMap::new(),
102            task_functions: HashMap::new(),
103            retry_config: RetryConfig::default(),
104        };
105
106        // Register built-in function handlers
107        for (name, handler) in functions::builtins::get_all_functions() {
108            engine.register_task_function(name, handler);
109        }
110
111        engine
112    }
113
114    /// Create a new engine instance without any pre-registered functions
115    pub fn new_empty() -> Self {
116        Self {
117            task_functions: HashMap::new(),
118            workflows: HashMap::new(),
119            retry_config: RetryConfig::default(),
120        }
121    }
122
123    /// Configure retry behavior
124    pub fn with_retry_config(mut self, config: RetryConfig) -> Self {
125        self.retry_config = config;
126        self
127    }
128
129    /// Adds a workflow to the engine.
130    ///
131    /// # Arguments
132    ///
133    /// * `workflow` - The workflow to add
134    pub fn add_workflow(&mut self, workflow: &Workflow) {
135        if workflow.validate().is_ok() {
136            self.workflows.insert(workflow.id.clone(), workflow.clone());
137        } else {
138            error!("Invalid workflow: {}", workflow.id);
139        }
140    }
141
142    /// Registers a custom function handler with the engine.
143    ///
144    /// # Arguments
145    ///
146    /// * `name` - The name of the function handler
147    /// * `handler` - The function handler implementation
148    pub fn register_task_function(
149        &mut self,
150        name: String,
151        handler: Box<dyn AsyncFunctionHandler + Send + Sync>,
152    ) {
153        self.task_functions.insert(name, handler);
154    }
155
156    /// Check if a function with the given name is registered
157    pub fn has_function(&self, name: &str) -> bool {
158        self.task_functions.contains_key(name)
159    }
160
161    /// Processes a message through workflows that match their conditions.
162    ///
163    /// This async method:
164    /// 1. Iterates through workflows sequentially in deterministic order (sorted by ID)
165    /// 2. Evaluates conditions for each workflow right before execution
166    /// 3. Executes matching workflows one after another (not concurrently)
167    /// 4. Updates the message with processing results and audit trail
168    ///
169    /// Workflows are executed sequentially because later workflows may depend
170    /// on the results of earlier workflows, and their conditions may change
171    /// based on modifications made by previous workflows.
172    ///
173    /// # Arguments
174    ///
175    /// * `message` - The message to process
176    ///
177    /// # Returns
178    ///
179    /// * `Result<()>` - Success or an error if processing failed
180    pub async fn process_message(&self, message: &mut Message) -> Result<()> {
181        debug!(
182            "Processing message {} sequentially through workflows",
183            message.id
184        );
185
186        // Collect and sort workflows by ID to ensure deterministic execution order
187        // This prevents non-deterministic behavior caused by HashMap iteration order
188        let mut sorted_workflows: Vec<_> = self.workflows.iter().collect();
189        sorted_workflows.sort_by_key(|(id, _)| id.as_str());
190
191        // Process workflows sequentially in sorted order, evaluating conditions just before execution
192        for (_, workflow) in sorted_workflows {
193            // Evaluate workflow condition using current message state
194            let condition = workflow.condition.clone().unwrap_or(Value::Bool(true));
195
196            if !self
197                .evaluate_condition(&condition, &message.metadata)
198                .await?
199            {
200                debug!("Workflow {} skipped - condition not met", workflow.id);
201                continue;
202            }
203
204            info!("Processing workflow {}", workflow.id);
205
206            // Execute this workflow and merge results back into the message
207            let (workflow_id, workflow_message) =
208                Self::process_workflow(workflow.clone(), message.clone(), &self.task_functions)
209                    .await;
210
211            // Merge workflow results back into the original message
212            message.data = workflow_message.data;
213            message.metadata = workflow_message.metadata;
214            message.temp_data = workflow_message.temp_data;
215            message.audit_trail.extend(workflow_message.audit_trail);
216            message.errors.extend(workflow_message.errors);
217
218            info!("Completed processing workflow {}", workflow_id);
219
220            // If there were errors in this workflow, we may want to decide whether to continue
221            // For now, we continue processing remaining workflows even if one fails
222        }
223
224        debug!(
225            "Completed processing all workflows for message {}",
226            message.id
227        );
228        Ok(())
229    }
230
231    /// Process a single workflow with sequential task execution
232    async fn process_workflow(
233        workflow: Workflow,
234        mut message: Message,
235        task_functions: &HashMap<String, Box<dyn AsyncFunctionHandler + Send + Sync>>,
236    ) -> (String, Message) {
237        let workflow_id = workflow.id.clone();
238        let mut workflow_errors = Vec::new();
239
240        // Process tasks SEQUENTIALLY within this workflow
241        // IMPORTANT: Task order matters! Results from previous tasks are used by subsequent tasks.
242        // We intentionally process tasks one after another rather than concurrently.
243        for task in &workflow.tasks {
244            let task_condition = task.condition.clone().unwrap_or(Value::Bool(true));
245
246            // Evaluate task condition using thread-local DataLogic
247            let should_execute = THREAD_LOCAL_DATA_LOGIC.with(|data_logic_cell| {
248                let data_logic = data_logic_cell.borrow_mut();
249                data_logic
250                    .evaluate_json(&task_condition, &message.metadata, None)
251                    .map_err(|e| {
252                        DataflowError::LogicEvaluation(format!("Error evaluating condition: {}", e))
253                    })
254                    .map(|result| result.as_bool().unwrap_or(false))
255            });
256
257            // Handle condition evaluation result
258            let should_execute = match should_execute {
259                Ok(result) => result,
260                Err(e) => {
261                    workflow_errors.push(ErrorInfo::new(
262                        Some(workflow_id.clone()),
263                        Some(task.id.clone()),
264                        e.clone(),
265                    ));
266                    false
267                }
268            };
269
270            if !should_execute {
271                debug!("Task {} skipped - condition not met", task.id);
272                continue;
273            }
274
275            // Execute task if we have a handler
276            if let Some(function) = task_functions.get(&task.function.name) {
277                let task_id = task.id.clone();
278                let function_input = task.function.input.clone();
279
280                // Execute this task (with retries)
281                match Self::execute_task_static(
282                    &task_id,
283                    &workflow_id,
284                    &mut message,
285                    &function_input,
286                    function.as_ref(),
287                )
288                .await
289                {
290                    Ok(_) => {
291                        debug!("Task {} completed successfully", task_id);
292                    }
293                    Err(error) => {
294                        workflow_errors.push(ErrorInfo::new(
295                            Some(workflow_id.clone()),
296                            Some(task_id.clone()),
297                            error.clone(),
298                        ));
299
300                        // Break the task sequence if a task fails
301                        break;
302                    }
303                }
304            } else {
305                let error =
306                    DataflowError::Workflow(format!("Function '{}' not found", task.function.name));
307
308                workflow_errors.push(ErrorInfo::new(
309                    Some(workflow_id.clone()),
310                    Some(task.id.clone()),
311                    error,
312                ));
313
314                // Break the task sequence if a function is not found
315                break;
316            }
317        }
318
319        // Add any errors encountered to the message
320        message.errors.extend(workflow_errors);
321
322        // Return the processed message for this workflow
323        (workflow_id, message)
324    }
325
326    /// Static helper method to execute a task with retries
327    async fn execute_task_static(
328        task_id: &str,
329        workflow_id: &str,
330        message: &mut Message,
331        input_json: &Value,
332        function: &dyn AsyncFunctionHandler,
333    ) -> Result<()> {
334        info!("Executing task {} in workflow {}", task_id, workflow_id);
335
336        let mut last_error = None;
337        let mut retry_count = 0;
338        let max_retries = 3; // Default max retries
339        let retry_delay_ms = 1000; // Default retry delay in ms
340        let use_backoff = true; // Default backoff behavior
341
342        // Try executing the task with retries
343        while retry_count <= max_retries {
344            match function.execute(message, input_json).await {
345                Ok((status_code, changes)) => {
346                    // Success! Record audit trail and return
347                    message.audit_trail.push(AuditTrail {
348                        workflow_id: workflow_id.to_string(),
349                        task_id: task_id.to_string(),
350                        timestamp: Utc::now().to_rfc3339(),
351                        changes,
352                        status_code,
353                    });
354
355                    info!("Task {} completed with status {}", task_id, status_code);
356
357                    // Add progress metadata
358                    let mut progress = Map::new();
359                    progress.insert("task_id".to_string(), Value::String(task_id.to_string()));
360                    progress.insert(
361                        "workflow_id".to_string(),
362                        Value::String(workflow_id.to_string()),
363                    );
364                    progress.insert(
365                        "status_code".to_string(),
366                        Value::Number(Number::from(status_code)),
367                    );
368                    progress.insert(
369                        "timestamp".to_string(),
370                        Value::String(Utc::now().to_rfc3339()),
371                    );
372
373                    if retry_count > 0 {
374                        progress.insert(
375                            "retries".to_string(),
376                            Value::Number(Number::from(retry_count)),
377                        );
378                    }
379
380                    message.metadata["progress"] = json!(progress);
381
382                    return Ok(());
383                }
384                Err(e) => {
385                    last_error = Some(e.clone());
386
387                    if retry_count < max_retries {
388                        warn!(
389                            "Task {} execution failed, retry {}/{}: {:?}",
390                            task_id,
391                            retry_count + 1,
392                            max_retries,
393                            e
394                        );
395
396                        // Calculate delay with optional exponential backoff
397                        let delay = if use_backoff {
398                            retry_delay_ms * (2_u64.pow(retry_count))
399                        } else {
400                            retry_delay_ms
401                        };
402
403                        // Use tokio's non-blocking sleep
404                        sleep(std::time::Duration::from_millis(delay)).await;
405
406                        retry_count += 1;
407                    } else {
408                        break;
409                    }
410                }
411            }
412        }
413
414        // If we're here, all retries failed
415        let error = last_error.unwrap_or_else(|| {
416            DataflowError::Unknown("Unknown error during task execution".to_string())
417        });
418
419        error!(
420            "Task {} in workflow {} failed after {} retries: {:?}",
421            task_id, workflow_id, retry_count, error
422        );
423
424        Err(error)
425    }
426
427    /// Evaluates a condition using DataLogic
428    async fn evaluate_condition(&self, condition: &Value, data: &Value) -> Result<bool> {
429        // For simple boolean conditions, short-circuit
430        if let Value::Bool(b) = condition {
431            return Ok(*b);
432        }
433
434        // Use thread-local DataLogic instance instead of mutex-protected one
435        THREAD_LOCAL_DATA_LOGIC.with(|data_logic_cell| {
436            let data_logic = data_logic_cell.borrow_mut();
437            data_logic
438                .evaluate_json(condition, data, None)
439                .map_err(|e| {
440                    DataflowError::LogicEvaluation(format!("Error evaluating condition: {}", e))
441                })
442                .map(|result| result.as_bool().unwrap_or(false))
443        })
444    }
445}