oar_ocr/pipeline/oarocr/
orchestration.rs

1//! Orchestration utilities for reducing duplication in OCR pipeline processing.
2//!
3//! This module provides abstractions to eliminate the duplicated orchestration logic
4//! found across process_single_image, process_single_image_from_memory,
5//! process_images_individually, and dynamic batching paths.
6//!
7//! # Architecture Overview
8//!
9//! The orchestration system consists of several key components:
10//!
11//! ## Core Components
12//!
13//! - **[`ImageProcessingOrchestrator`]**: Main orchestrator that handles batch processing
14//!   with configurable parallel/sequential strategies and unified result management.
15//!
16//! - **[`PipelineExecutor`]**: Executes individual pipeline stages with configurable
17//!   entry points, supporting both full pipeline execution and partial execution
18//!   for dynamic batching scenarios.
19//!
20//! - **[`ImageInputSource`]**: Abstraction over different image input sources
21//!   (file paths, in-memory images, pre-loaded images with paths).
22//!
23//! - **[`ProcessingStrategy`]**: Configurable strategy for parallel vs sequential
24//!   processing decisions based on item count thresholds.
25//!
26//! - **[`PipelineStageConfig`]**: Configuration for pipeline execution, allowing
27//!   stages to be skipped or execution to start from specific stages.
28//!
29//! ## Benefits
30//!
31//! ### Eliminated Duplication
32//!
33//! Before this refactoring, the following patterns were duplicated across multiple methods:
34//!
35//! - **Parallel Processing Logic**: Threshold-based decisions between `map()` and `par_iter()`
36//! - **Index Management**: `enumerate()`, result collection as `(index, result)` tuples
37//! - **Result Sorting**: `sort_by_key(|(index, _)| *index)` and extraction
38//! - **Pipeline Stage Execution**: Identical sequences of orientation → rectification → detection → etc.
39//! - **Error Handling**: Similar error propagation and logging patterns
40//!
41//! ### Unified Abstractions
42//!
43//! The new orchestration system provides:
44//!
45//! - **Single Source of Truth**: All orchestration logic centralized in one place
46//! - **Configurable Execution**: Support for different processing strategies and stage configurations
47//! - **Type Safety**: Enums and traits prevent invalid configurations at compile time
48//! - **Maintainability**: Changes to orchestration logic only need to be made once
49//! - **Testability**: Each component can be tested independently
50//!
51//! # Usage Examples
52//!
53//! ## Basic Single Image Processing
54//!
55//! ```rust,ignore
56//! use crate::pipeline::oarocr::{ImageProcessingOrchestrator, ImageInputSource, PipelineStageConfig};
57//! use std::path::Path;
58//!
59//! let orchestrator = ImageProcessingOrchestrator::new(&oarocr);
60//! let input_source = ImageInputSource::Path(Path::new("image.jpg"));
61//! let config = PipelineStageConfig::default(); // Full pipeline
62//!
63//! let result = orchestrator.process_single(input_source, 0, config)?;
64//! ```
65//!
66//! ## Batch Processing with Auto Strategy
67//!
68//! ```rust,ignore
69//! use crate::pipeline::oarocr::{ProcessingStrategy, ImageInputSource};
70//!
71//! let orchestrator = ImageProcessingOrchestrator::new(&oarocr);
72//! let inputs: Vec<(usize, &Path)> = image_paths.iter().enumerate().collect();
73//! let strategy = ProcessingStrategy::Auto(5); // Parallel if > 5 images
74//! let config = PipelineStageConfig::default();
75//!
76//! let results = orchestrator.process_batch(inputs, strategy, config)?;
77//! ```
78//!
79//! ## Custom Pipeline Configuration
80//!
81//! ```rust,ignore
82//! use crate::pipeline::oarocr::{PipelineStageConfig, PipelineStage};
83//! use std::collections::HashSet;
84//!
85//! let mut config = PipelineStageConfig::default();
86//! config.skip_stages.insert(PipelineStage::Recognition); // Skip text recognition
87//! config.start_from = PipelineStage::Detection; // Start from detection stage
88//!
89//! let result = orchestrator.process_single(input_source, 0, config)?;
90//! ```
91//!
92//! # Migration Guide
93//!
94//! The refactoring maintains backward compatibility at the public API level.
95//! Internal methods have been simplified:
96//!
97//! ## Before (Duplicated Logic)
98//!
99//! ```rust,ignore
100//! // Each method had its own parallel processing logic
101//! let results: Result<Vec<_>, OCRError> = if images.len() <= threshold {
102//!     images.iter().enumerate().map(|(index, img)| {
103//!         let mut result = self.process_single_image_from_memory(img, index)?;
104//!         result.index = index;
105//!         Ok((index, result))
106//!     }).collect()
107//! } else {
108//!     images.par_iter().enumerate().map(|(index, img)| {
109//!         // Same logic but parallel
110//!     }).collect()
111//! };
112//! ```
113//!
114//! ## After (Unified Orchestration)
115//!
116//! ```rust,ignore
117//! // Single line using orchestration abstraction
118//! let orchestrator = ImageProcessingOrchestrator::new(self);
119//! let inputs: Vec<(usize, &RgbImage)> = images.iter().enumerate().collect();
120//! let strategy = ProcessingStrategy::Auto(threshold);
121//! orchestrator.process_batch(inputs, strategy, PipelineStageConfig::default())
122//! ```
123
124use crate::core::{OCRError, traits::StandardPredictor};
125use crate::pipeline::oarocr::{OAROCRResult, SingleImageProcessingParams};
126use image::RgbImage;
127use rayon::prelude::*;
128use std::collections::HashSet;
129use std::path::Path;
130use std::sync::Arc;
131use tracing::{debug, info};
132
133/// Type alias for recognition stage result: (recognized_texts, recognition_scores, failed_recognitions)
134type RecognitionStageResult = Result<(Vec<Arc<str>>, Vec<f32>, usize), OCRError>;
135
136/// Represents different sources of image input for processing.
137#[derive(Debug)]
138pub enum ImageInputSource<'a> {
139    /// Load image from file path
140    Path(&'a Path),
141    /// Use image already in memory
142    Memory(&'a RgbImage),
143    /// Pre-loaded image with associated path (for dynamic batching)
144    LoadedWithPath(Arc<RgbImage>, &'a Path),
145}
146
147impl<'a> ImageInputSource<'a> {
148    /// Load the image into an Arc<RgbImage>, handling different input sources
149    pub fn load_image(&self) -> Result<Arc<RgbImage>, OCRError> {
150        match self {
151            ImageInputSource::Path(path) => {
152                let img = crate::utils::load_image(path)?;
153                Ok(Arc::new(img))
154            }
155            ImageInputSource::Memory(img) => Ok(Arc::new((*img).clone())),
156            ImageInputSource::LoadedWithPath(img_arc, _) => Ok(Arc::clone(img_arc)),
157        }
158    }
159
160    /// Get the associated path if available
161    pub fn path(&self) -> Option<&Path> {
162        match self {
163            ImageInputSource::Path(path) => Some(path),
164            ImageInputSource::Memory(_) => None,
165            ImageInputSource::LoadedWithPath(_, path) => Some(path),
166        }
167    }
168}
169
170/// Strategy for processing multiple images
171#[derive(Debug, Clone)]
172pub enum ProcessingStrategy {
173    /// Always process sequentially
174    Sequential,
175    /// Always process in parallel
176    Parallel,
177    /// Automatically decide based on threshold
178    Auto(usize),
179}
180
181impl ProcessingStrategy {
182    /// Determine if parallel processing should be used for the given item count
183    pub fn should_use_parallel(&self, item_count: usize) -> bool {
184        match self {
185            ProcessingStrategy::Sequential => false,
186            ProcessingStrategy::Parallel => true,
187            ProcessingStrategy::Auto(threshold) => item_count > *threshold,
188        }
189    }
190}
191
192/// Configuration for pipeline stage execution
193#[derive(Debug, Clone)]
194pub struct PipelineStageConfig<'a> {
195    /// Which stage to start processing from
196    pub start_from: PipelineStage,
197    /// Stages to skip during processing
198    pub skip_stages: HashSet<PipelineStage>,
199    /// Custom parameters for continuing from detection stage
200    pub custom_params: Option<SingleImageProcessingParams<'a>>,
201}
202
203impl<'a> Default for PipelineStageConfig<'a> {
204    fn default() -> Self {
205        Self {
206            start_from: PipelineStage::Orientation,
207            skip_stages: HashSet::new(),
208            custom_params: None,
209        }
210    }
211}
212
213/// Represents different stages in the OCR pipeline
214#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
215pub enum PipelineStage {
216    Orientation,
217    Rectification,
218    Detection,
219    Cropping,
220    TextLineOrientation,
221    Recognition,
222}
223
224/// Main orchestrator for image processing operations.
225///
226/// This struct encapsulates the common patterns found across different
227/// processing methods, eliminating duplication while maintaining flexibility.
228pub struct ImageProcessingOrchestrator<'a> {
229    /// Reference to the main OAROCR instance
230    oar_ocr: &'a super::OAROCR,
231}
232
233/// Parameters for executing remaining pipeline stages
234struct RemainingStagesParams<'a> {
235    input_img_arc: Arc<RgbImage>,
236    current_img: RgbImage,
237    text_boxes: Vec<crate::processors::BoundingBox>,
238    orientation_angle: Option<f32>,
239    rectified_img: Option<Arc<RgbImage>>,
240    image_path: Option<&'a Path>,
241    index: usize,
242    config: &'a PipelineStageConfig<'a>,
243}
244
245/// Parameters for building the final OCR result
246struct FinalResultParams<'a> {
247    input_img_arc: Arc<RgbImage>,
248    _current_img: RgbImage,
249    text_boxes: Vec<crate::processors::BoundingBox>,
250    _cropped_images: Vec<Option<RgbImage>>,
251    recognized_texts: Vec<Arc<str>>,
252    recognition_scores: Vec<f32>,
253    text_line_orientations: Vec<Option<f32>>,
254    orientation_angle: Option<f32>,
255    rectified_img: Option<Arc<RgbImage>>,
256    image_path: Option<&'a Path>,
257    index: usize,
258    failed_crops: usize,
259    failed_recognitions: usize,
260}
261
262impl<'a> ImageProcessingOrchestrator<'a> {
263    /// Create a new orchestrator with a reference to the OAROCR instance
264    pub fn new(oar_ocr: &'a super::OAROCR) -> Self {
265        Self { oar_ocr }
266    }
267
268    /// Process a batch of images with the specified strategy and configuration.
269    ///
270    /// This method handles the common orchestration patterns:
271    /// - Parallel vs sequential processing decisions
272    /// - Index management and result sorting
273    /// - Progress logging and error handling
274    ///
275    /// # Arguments
276    ///
277    /// * `inputs` - Vector of (index, input_source) pairs to process
278    /// * `strategy` - Processing strategy (sequential, parallel, or auto)
279    /// * `stage_config` - Configuration for pipeline stage execution
280    ///
281    /// # Returns
282    ///
283    /// A Result containing a vector of OAROCRResult ordered by original index
284    pub fn process_batch<I>(
285        &self,
286        inputs: Vec<(usize, I)>,
287        strategy: ProcessingStrategy,
288        stage_config: PipelineStageConfig<'a>,
289    ) -> Result<Vec<OAROCRResult>, OCRError>
290    where
291        I: Into<ImageInputSource<'a>> + Send + Sync,
292    {
293        debug!("Processing {} images with orchestrator", inputs.len());
294
295        let use_parallel = strategy.should_use_parallel(inputs.len());
296
297        let results: Result<Vec<_>, OCRError> = if use_parallel {
298            debug!("Using parallel processing for {} images", inputs.len());
299            inputs
300                .into_par_iter()
301                .map(|(index, input)| {
302                    let input_source = input.into();
303                    debug!(
304                        "Processing image {} in parallel: {:?}",
305                        index + 1,
306                        input_source.path().unwrap_or_else(|| Path::new("memory"))
307                    );
308
309                    let mut result =
310                        self.process_single(input_source, index, stage_config.clone())?;
311                    result.index = index;
312                    Ok((index, result))
313                })
314                .collect()
315        } else {
316            debug!("Using sequential processing for {} images", inputs.len());
317            inputs
318                .into_iter()
319                .map(|(index, input)| {
320                    let input_source = input.into();
321                    debug!(
322                        "Processing image {} sequentially: {:?}",
323                        index + 1,
324                        input_source.path().unwrap_or_else(|| Path::new("memory"))
325                    );
326
327                    let mut result =
328                        self.process_single(input_source, index, stage_config.clone())?;
329                    result.index = index;
330                    Ok((index, result))
331                })
332                .collect()
333        };
334
335        // Sort results by original index and extract final results
336        let mut indexed_results = results?;
337        indexed_results.sort_by_key(|(index, _)| *index);
338        let final_results: Vec<OAROCRResult> = indexed_results
339            .into_iter()
340            .map(|(_, result)| result)
341            .collect();
342
343        info!(
344            "OCR pipeline completed for {} images using orchestrator",
345            final_results.len()
346        );
347        Ok(final_results)
348    }
349
350    /// Process a single image with the specified configuration.
351    ///
352    /// This method provides a unified entry point for single image processing
353    /// that can handle different input sources and pipeline configurations.
354    ///
355    /// # Arguments
356    ///
357    /// * `input` - The image input source (path, memory, or pre-loaded)
358    /// * `index` - Index of this image in the batch (for logging and results)
359    /// * `stage_config` - Configuration for pipeline stage execution
360    ///
361    /// # Returns
362    ///
363    /// A Result containing the OAROCRResult for this image
364    pub fn process_single(
365        &self,
366        input: ImageInputSource<'a>,
367        index: usize,
368        stage_config: PipelineStageConfig<'a>,
369    ) -> Result<OAROCRResult, OCRError> {
370        // Load the image based on input source
371        let input_img_arc = input.load_image()?;
372        let image_path = input.path();
373
374        // Delegate to the pipeline executor
375        let executor = PipelineExecutor::new(self.oar_ocr);
376        executor.execute_pipeline(input_img_arc, image_path, index, stage_config)
377    }
378}
379
380/// Executor for OCR pipeline stages with configurable entry points.
381///
382/// This struct handles the actual pipeline execution logic, supporting
383/// different entry points for dynamic batching scenarios.
384pub struct PipelineExecutor<'a> {
385    /// Reference to the main OAROCR instance
386    oar_ocr: &'a super::OAROCR,
387}
388
389impl<'a> PipelineExecutor<'a> {
390    /// Create a new pipeline executor
391    pub fn new(oar_ocr: &'a super::OAROCR) -> Self {
392        Self { oar_ocr }
393    }
394
395    /// Execute the full pipeline or a subset based on configuration.
396    ///
397    /// This method consolidates the duplicated pipeline execution logic
398    /// from process_single_image and process_single_image_from_memory.
399    pub fn execute_pipeline(
400        &self,
401        input_img_arc: Arc<RgbImage>,
402        image_path: Option<&Path>,
403        index: usize,
404        config: PipelineStageConfig<'a>,
405    ) -> Result<OAROCRResult, OCRError> {
406        // Handle custom parameters for detection-onwards processing
407        if let Some(params) = config.custom_params {
408            return self.execute_from_detection(params);
409        }
410
411        // Stage 1: Document orientation classification
412        let (orientation_angle, mut current_img) =
413            if config.skip_stages.contains(&PipelineStage::Orientation)
414                || config.start_from > PipelineStage::Orientation
415            {
416                (None, input_img_arc.as_ref().clone())
417            } else {
418                self.execute_orientation_stage(input_img_arc.clone())?
419            };
420
421        // Stage 2: Document rectification
422        let rectified_img = if config.skip_stages.contains(&PipelineStage::Rectification)
423            || config.start_from > PipelineStage::Rectification
424        {
425            None
426        } else {
427            self.execute_rectification_stage(&mut current_img)?
428        };
429
430        // Stage 3: Text detection
431        let text_boxes = if config.skip_stages.contains(&PipelineStage::Detection)
432            || config.start_from > PipelineStage::Detection
433        {
434            Vec::new()
435        } else {
436            self.execute_detection_stage(&current_img)?
437        };
438
439        // Continue with remaining stages using the existing logic pattern
440        let params = RemainingStagesParams {
441            input_img_arc,
442            current_img,
443            text_boxes,
444            orientation_angle,
445            rectified_img,
446            image_path,
447            index,
448            config: &config,
449        };
450        self.execute_remaining_stages(params)
451    }
452
453    /// Execute pipeline from the detection stage onwards.
454    ///
455    /// This method handles the case where detection has already been performed
456    /// in dynamic batching scenarios.
457    pub fn execute_from_detection(
458        &self,
459        params: SingleImageProcessingParams,
460    ) -> Result<OAROCRResult, OCRError> {
461        // Delegate to the existing method for now
462        // This maintains compatibility while we refactor
463        self.oar_ocr.process_single_image_from_detection(params)
464    }
465
466    /// Execute the document orientation classification stage
467    fn execute_orientation_stage(
468        &self,
469        input_img_arc: Arc<RgbImage>,
470    ) -> Result<(Option<f32>, RgbImage), OCRError> {
471        use crate::pipeline::stages::OrientationStageProcessor;
472
473        let orientation_config = self.oar_ocr.config.orientation_stage.as_ref().cloned();
474
475        let orientation_stage_result = OrientationStageProcessor::process_single(
476            input_img_arc,
477            self.oar_ocr.doc_orientation_classifier.as_ref(),
478            orientation_config.as_ref(),
479        )?;
480
481        let orientation_angle = orientation_stage_result.data.orientation_angle;
482        let current_img = orientation_stage_result.data.corrected_image;
483
484        Ok((orientation_angle, current_img))
485    }
486
487    /// Execute the document rectification stage
488    fn execute_rectification_stage(
489        &self,
490        current_img: &mut RgbImage,
491    ) -> Result<Option<Arc<RgbImage>>, OCRError> {
492        if let Some(ref rectifier) = self.oar_ocr.doc_rectifier {
493            let result = rectifier.predict(vec![current_img.clone()], None)?;
494            if let Some(rectified) = result.rectified_img.first() {
495                *current_img = (**rectified).clone();
496                Ok(Some(Arc::clone(rectified)))
497            } else {
498                Ok(Some(Arc::new(current_img.clone())))
499            }
500        } else {
501            Ok(None)
502        }
503    }
504
505    /// Execute the text detection stage
506    fn execute_detection_stage(
507        &self,
508        current_img: &RgbImage,
509    ) -> Result<Vec<crate::processors::BoundingBox>, OCRError> {
510        let result = self
511            .oar_ocr
512            .text_detector
513            .predict(vec![current_img.clone()], None)?;
514        let text_boxes: Vec<crate::processors::BoundingBox> =
515            result.dt_polys.into_iter().flatten().collect();
516        Ok(text_boxes)
517    }
518
519    /// Execute the remaining pipeline stages (cropping, text line orientation, recognition)
520    fn execute_remaining_stages(
521        &self,
522        params: RemainingStagesParams<'_>,
523    ) -> Result<OAROCRResult, OCRError> {
524        // Stage 4: Text box cropping
525        let (cropped_images, failed_crops) =
526            if params.config.skip_stages.contains(&PipelineStage::Cropping)
527                || params.config.start_from > PipelineStage::Cropping
528            {
529                (Vec::new(), 0)
530            } else {
531                self.execute_cropping_stage(&params.current_img, &params.text_boxes)?
532            };
533
534        // Stage 5: Text line orientation classification
535        let text_line_orientations = if params
536            .config
537            .skip_stages
538            .contains(&PipelineStage::TextLineOrientation)
539            || params.config.start_from > PipelineStage::TextLineOrientation
540        {
541            Vec::new()
542        } else {
543            self.execute_text_line_orientation_stage(&cropped_images, &params.text_boxes)?
544        };
545
546        // Stage 6: Text recognition
547        let (recognized_texts, recognition_scores, failed_recognitions) = if params
548            .config
549            .skip_stages
550            .contains(&PipelineStage::Recognition)
551            || params.config.start_from > PipelineStage::Recognition
552        {
553            (Vec::new(), Vec::new(), 0)
554        } else {
555            self.execute_recognition_stage(&cropped_images, &text_line_orientations)?
556        };
557
558        // Build the final result
559        let final_params = FinalResultParams {
560            input_img_arc: params.input_img_arc,
561            _current_img: params.current_img,
562            text_boxes: params.text_boxes,
563            _cropped_images: cropped_images,
564            recognized_texts,
565            recognition_scores,
566            text_line_orientations,
567            orientation_angle: params.orientation_angle,
568            rectified_img: params.rectified_img,
569            image_path: params.image_path,
570            index: params.index,
571            failed_crops,
572            failed_recognitions,
573        };
574        self.build_final_result(final_params)
575    }
576
577    /// Execute the text box cropping stage
578    fn execute_cropping_stage(
579        &self,
580        current_img: &RgbImage,
581        text_boxes: &[crate::processors::BoundingBox],
582    ) -> Result<(Vec<Option<RgbImage>>, usize), OCRError> {
583        use crate::pipeline::stages::{CroppingConfig, CroppingStageProcessor};
584
585        let cropping_config = CroppingConfig::default();
586        let cropping_stage_result = CroppingStageProcessor::process_single(
587            current_img,
588            text_boxes,
589            Some(&cropping_config),
590        )?;
591
592        let cropped_images = cropping_stage_result.data.cropped_images;
593        let failed_crops = cropping_stage_result.data.failed_crops;
594
595        Ok((cropped_images, failed_crops))
596    }
597
598    /// Execute the text line orientation classification stage
599    fn execute_text_line_orientation_stage(
600        &self,
601        cropped_images: &[Option<RgbImage>],
602        text_boxes: &[crate::processors::BoundingBox],
603    ) -> Result<Vec<Option<f32>>, OCRError> {
604        let mut text_line_orientations: Vec<Option<f32>> = Vec::new();
605
606        if self.oar_ocr.config.use_textline_orientation && !text_boxes.is_empty() {
607            if let Some(ref classifier) = self.oar_ocr.text_line_classifier {
608                let valid_images: Vec<RgbImage> = cropped_images
609                    .iter()
610                    .filter_map(|o| o.as_ref().cloned())
611                    .collect();
612
613                if !valid_images.is_empty() {
614                    match classifier.predict(valid_images, None) {
615                        Ok(result) => {
616                            let mut result_idx = 0usize;
617                            for cropped_img_opt in cropped_images {
618                                if cropped_img_opt.is_some() {
619                                    if let (Some(labels), Some(score_list)) = (
620                                        result.label_names.get(result_idx),
621                                        result.scores.get(result_idx),
622                                    ) {
623                                        if let (Some(label), Some(&score)) =
624                                            (labels.first(), score_list.first())
625                                        {
626                                            let confidence_threshold = self
627                                                .oar_ocr
628                                                .config
629                                                .text_line_orientation_stage
630                                                .as_ref()
631                                                .and_then(|config| config.confidence_threshold);
632
633                                            let orientation_result =
634                                                crate::core::parse_text_line_orientation(
635                                                    label.as_ref(),
636                                                    score,
637                                                    confidence_threshold,
638                                                );
639
640                                            text_line_orientations.push(
641                                                if orientation_result.is_confident {
642                                                    Some(orientation_result.angle)
643                                                } else {
644                                                    None
645                                                },
646                                            );
647                                        } else {
648                                            text_line_orientations.push(None);
649                                        }
650                                    } else {
651                                        text_line_orientations.push(None);
652                                    }
653                                    result_idx += 1;
654                                } else {
655                                    text_line_orientations.push(None);
656                                }
657                            }
658                        }
659                        Err(_) => {
660                            // Fill with None values for failed classification
661                            text_line_orientations = vec![None; cropped_images.len()];
662                        }
663                    }
664                } else {
665                    text_line_orientations = vec![None; cropped_images.len()];
666                }
667            } else {
668                text_line_orientations = vec![None; cropped_images.len()];
669            }
670        } else {
671            text_line_orientations = vec![None; cropped_images.len()];
672        }
673
674        Ok(text_line_orientations)
675    }
676
677    /// Execute the text recognition stage
678    fn execute_recognition_stage(
679        &self,
680        cropped_images: &[Option<RgbImage>],
681        text_line_orientations: &[Option<f32>],
682    ) -> RecognitionStageResult {
683        use crate::pipeline::stages::{RecognitionConfig, RecognitionStageProcessor};
684
685        let recognition_config = RecognitionConfig::from_legacy_config(
686            self.oar_ocr.config.use_textline_orientation,
687            self.oar_ocr.config.aspect_ratio_bucketing.clone(),
688        );
689
690        let recognition_stage_result = RecognitionStageProcessor::process_single(
691            cropped_images.to_vec(),
692            Some(text_line_orientations),
693            Some(&self.oar_ocr.text_recognizer),
694            Some(&recognition_config),
695        )?;
696
697        let recognized_texts = recognition_stage_result.data.rec_texts;
698        let recognition_scores = recognition_stage_result.data.rec_scores;
699        let failed_recognitions = recognition_stage_result.data.failed_recognitions;
700
701        Ok((recognized_texts, recognition_scores, failed_recognitions))
702    }
703
704    /// Build the final OAROCRResult from all pipeline stage results
705    fn build_final_result(&self, params: FinalResultParams<'_>) -> Result<OAROCRResult, OCRError> {
706        use crate::pipeline::oarocr::ErrorMetrics;
707
708        // Convert recognition results to the format expected by OAROCRResult
709        let mut final_texts = Vec::new();
710        let mut final_scores = Vec::new();
711
712        for i in 0..params.text_boxes.len() {
713            if i < params.recognized_texts.len() {
714                final_texts.push(Some(Arc::clone(&params.recognized_texts[i])));
715            } else {
716                final_texts.push(None);
717            }
718
719            if i < params.recognition_scores.len() {
720                final_scores.push(Some(params.recognition_scores[i]));
721            } else {
722                final_scores.push(None);
723            }
724        }
725
726        // Build text regions using the helper method
727        let text_regions = OAROCRResult::create_text_regions_from_vectors(
728            &params.text_boxes,
729            &final_texts,
730            &final_scores,
731            &params.text_line_orientations,
732        );
733
734        // Calculate error metrics
735        let error_metrics = ErrorMetrics {
736            failed_crops: params.failed_crops,
737            failed_recognitions: params.failed_recognitions,
738            failed_orientations: 0, // This would need to be tracked if we implement it
739            total_text_boxes: params.text_boxes.len(),
740        };
741
742        // Build the final result
743        let input_path_str = params
744            .image_path
745            .map(|p| p.to_string_lossy().to_string())
746            .unwrap_or_else(|| "memory".to_string());
747
748        Ok(OAROCRResult {
749            input_path: Arc::from(input_path_str),
750            index: params.index,
751            input_img: params.input_img_arc,
752            text_regions,
753            orientation_angle: params.orientation_angle,
754            rectified_img: params.rectified_img,
755            error_metrics,
756        })
757    }
758}
759
760// Implement conversion traits for convenience
761impl<'a> From<&'a Path> for ImageInputSource<'a> {
762    fn from(path: &'a Path) -> Self {
763        ImageInputSource::Path(path)
764    }
765}
766
767impl<'a> From<&'a RgbImage> for ImageInputSource<'a> {
768    fn from(image: &'a RgbImage) -> Self {
769        ImageInputSource::Memory(image)
770    }
771}