locus_core/lib.rs
1//! High-performance AprilTag and ArUco detection engine.
2//!
3//! Locus is a research-oriented, memory-safe fiducial marker detector targeting low
4//! latency. It provides a performance-focused pipeline for robotics and computer vision,
5//! with strict zero-heap allocation in the detection hot-path.
6//!
7//! # Key Features
8//!
9//! - **Performance**: SIMD-accelerated adaptive thresholding and connected components.
10//! - **Accuracy**: Advanced sub-pixel refinement and probabilistic pose estimation.
11//! - **Flexibility**: Pluggable tag families (AprilTag 36h11, 16h5, ArUco).
12//! - **Memory Safety**: Arena-based memory management ([`bumpalo`]).
13//!
14//! # Architecture
15//!
16//! The pipeline is designed around Data-Oriented Design (DOD) principles:
17//!
18//! 1. **Preprocessing**: [`threshold::ThresholdEngine`] computes local tile statistics and performs adaptive binarization.
19//! 2. **Segmentation**: [`segmentation::label_components_threshold_model`] identifies quad candidates using Union-Find.
20//! 3. **Extraction**: [`quad::extract_quads_with_config`] traces contours and fits initial polygon candidates.
21//! 4. **Decoding**: [`Detector`] samples bit grids and performs Hamming error correction via [`strategy::DecodingStrategy`].
22//! 5. **Pose Estimation**: [`pose::estimate_tag_pose`] recovers 6-DOF transforms using IPPE or weighted LM.
23//!
24//! # Examples
25//!
26//! ```
27//! use locus_core::{Detector, DetectorConfig, DetectOptions, TagFamily, ImageView};
28//!
29//! // Create a detector with default settings
30//! let mut detector = Detector::new();
31//!
32//! // Create a view into your image data (zero-copy)
33//! let pixels = vec![0u8; 640 * 480];
34//! let img = ImageView::new(&pixels, 640, 480, 640).unwrap();
35//!
36//! // Detect tags (default family: AprilTag 36h11)
37//! let detections = detector.detect(&img);
38//!
39//! for detection in detections {
40//! println!("Detected tag {} at {:?}", detection.id, detection.center);
41//! }
42//! ```
43
44/// Configuration types for the detector pipeline.
45pub mod config;
46/// Tag decoding traits and implementations.
47pub mod decoder;
48/// Tag family dictionaries (AprilTag, ArUco).
49pub mod dictionaries;
50/// Edge-preserving filtering for small tag detection.
51pub mod filter;
52/// Gradient computation for edge refinement.
53pub mod gradient;
54/// Image buffer abstractions.
55pub mod image;
56/// 3D Pose Estimation (PnP).
57pub mod pose;
58/// Weighted pose estimation logic.
59pub mod pose_weighted;
60/// Quad extraction and geometric primitives.
61pub mod quad;
62/// Connected components labeling using Union-Find.
63pub mod segmentation;
64/// Decoding strategies (Hard vs Soft).
65pub mod strategy;
66/// Utilities for testing and synthetic data generation.
67pub mod test_utils;
68/// Adaptive thresholding implementation.
69pub mod threshold;
70
71pub use crate::config::{DetectOptions, DetectorConfig, TagFamily};
72use crate::decoder::TagDecoder;
73pub use crate::decoder::family_to_decoder;
74pub use crate::image::ImageView;
75use bumpalo::Bump;
76use rayon::prelude::*;
77use std::sync::atomic::{AtomicUsize, Ordering};
78
79/// Result of a tag detection.
80#[derive(Clone, Debug, Default)]
81pub struct Detection {
82 /// The decoded ID of the tag.
83 pub id: u32,
84 /// The center coordinates of the tag in image pixels (x, y).
85 pub center: [f64; 2],
86 /// The 4 corners of the tag in image pixels.
87 pub corners: [[f64; 2]; 4],
88 /// The number of hamming errors corrected during decoding.
89 pub hamming: u32,
90 /// The decision margin of the decoding (higher is more confident).
91 pub decision_margin: f64,
92 /// The extracted bits from the tag (for debugging).
93 pub bits: u64,
94 /// The 3D pose of the tag relative to the camera (if requested).
95 pub pose: Option<crate::pose::Pose>,
96 /// The covariance of the estimated 3D pose (6x6 matrix), if computed.
97 /// Order: [tx, ty, tz, rx, ry, rz] (Lie Algebra se3).
98 pub pose_covariance: Option<[[f64; 6]; 6]>,
99}
100
101/// Statistics for the detection pipeline stages.
102/// Pipeline-wide statistics for a single detection call.
103#[derive(Clone, Copy, Debug, Default)]
104pub struct PipelineStats {
105 /// Time taken for adaptive thresholding in milliseconds.
106 pub threshold_ms: f64,
107 /// Time taken for connected components labeling in milliseconds.
108 pub segmentation_ms: f64,
109 /// Time taken for quad extraction and fitting in milliseconds.
110 pub quad_extraction_ms: f64,
111 /// Time taken for decoding in milliseconds.
112 pub decoding_ms: f64,
113 /// Total pipeline time in milliseconds.
114 pub total_ms: f64,
115 /// Number of quad candidates passed to decoder.
116 pub num_candidates: usize,
117 /// Number of final detections.
118 pub num_detections: usize,
119 /// Number of candidates rejected due to low contrast.
120 pub num_rejected_by_contrast: usize,
121 /// Number of candidates that passed contrast check but failed to match any tag code (hamming distance too high).
122 pub num_rejected_by_hamming: usize,
123}
124
125/// Full result of a detection including intermediate data for debugging.
126pub struct FullDetectionResult {
127 /// Final detections.
128 pub detections: Vec<Detection>,
129 /// All quad candidates found before decoding.
130 pub candidates: Vec<Detection>,
131 /// The binarized image (thresholded).
132 pub binarized: Option<Vec<u8>>,
133 /// Labeled connected components image.
134 pub labels: Option<Vec<u32>>,
135 /// Pipeline statistics.
136 pub stats: PipelineStats,
137}
138
139/// The main entry point for detecting AprilTags.
140///
141/// The detector holds reusable state (arena allocator, threshold engine)
142/// and can be configured at construction time via [`DetectorConfig`].
143pub struct Detector {
144 arena: Bump,
145 config: DetectorConfig,
146 decoders: Vec<Box<dyn TagDecoder + Send + Sync>>,
147 upscale_buf: Vec<u8>,
148}
149
150impl Detector {
151 /// Create a new detector instance with default configuration.
152 #[must_use]
153 pub fn new() -> Self {
154 Self::with_config(DetectorConfig::default())
155 }
156
157 /// Create a detector with custom pipeline configuration.
158 #[must_use]
159 pub fn with_config(config: DetectorConfig) -> Self {
160 Self {
161 arena: Bump::new(),
162 config,
163 decoders: vec![Box::new(crate::decoder::AprilTag36h11)],
164 upscale_buf: Vec::new(),
165 }
166 }
167
168 /// Get the current detector configuration.
169 pub fn get_config(&self) -> DetectorConfig {
170 self.config
171 }
172
173 /// Add a decoder to the pipeline.
174 pub fn add_decoder(&mut self, decoder: Box<dyn TagDecoder + Send + Sync>) {
175 self.decoders.push(decoder);
176 }
177
178 /// Clear all decoders and set new ones based on tag families.
179 pub fn set_families(&mut self, families: &[config::TagFamily]) {
180 self.decoders.clear();
181 for family in families {
182 self.decoders.push(family_to_decoder(*family));
183 }
184 }
185
186 /// Primary detection entry point using detector's configured decoders.
187 pub fn detect(&mut self, img: &ImageView) -> Vec<Detection> {
188 self.detect_with_options(img, &DetectOptions::default())
189 }
190
191 /// Detection with custom per-call options (e.g., specific tag families).
192 pub fn detect_with_options(
193 &mut self,
194 img: &ImageView,
195 options: &DetectOptions,
196 ) -> Vec<Detection> {
197 self.detect_with_stats_and_options(img, options).0
198 }
199
200 /// Detection with detailed timing statistics.
201 pub fn detect_with_stats(&mut self, img: &ImageView) -> (Vec<Detection>, PipelineStats) {
202 self.detect_with_stats_and_options(img, &DetectOptions::default())
203 }
204
205 /// Detection with both custom options and timing statistics.
206 ///
207 /// # Panics
208 /// Panics if the upscaled image buffer cannot be created or viewed.
209 #[allow(clippy::too_many_lines, clippy::unwrap_used)]
210 pub fn detect_with_stats_and_options(
211 &mut self,
212 img: &ImageView,
213 options: &DetectOptions,
214 ) -> (Vec<Detection>, PipelineStats) {
215 let res = self.detect_internal(img, options, false);
216 (res.detections, res.stats)
217 }
218
219 /// Debugging: Extract quad candidates without decoding.
220 ///
221 /// This runs the pipeline up to the quad extraction stage and returns all distinct
222 /// quads found. Useful for visualizing what the detector "sees" before the
223 /// decoder rejects invalid tags.
224 #[allow(clippy::too_many_lines, clippy::unwrap_used)]
225 pub fn extract_candidates(
226 &mut self,
227 img: &ImageView,
228 options: &DetectOptions,
229 ) -> (Vec<Detection>, PipelineStats) {
230 // This is now just a shortcut to detect_internal with a flag if we want to keep it,
231 // but it's better to just return the full result if needed.
232 // For now, let's keep the API compatible.
233 let res = self.detect_internal(img, options, true);
234 (res.candidates, res.stats)
235 }
236
237 /// Perform full detection and return all intermediate debug data.
238 pub fn detect_full(&mut self, img: &ImageView, options: &DetectOptions) -> FullDetectionResult {
239 self.detect_internal(img, options, true)
240 }
241
242 /// Internal unified detection pipeline.
243 #[allow(clippy::too_many_lines, clippy::unwrap_used)]
244 fn detect_internal(
245 &mut self,
246 img: &ImageView,
247 options: &DetectOptions,
248 capture_debug: bool,
249 ) -> FullDetectionResult {
250 let mut stats = PipelineStats::default();
251 let start_total = std::time::Instant::now();
252
253 // 0. Decimation or Upscaling
254 let decimation = options.decimation.max(1);
255 let upscale_factor = self.config.upscale_factor.max(1);
256
257 self.arena.reset();
258
259 // We handle decimation and upscaling as mutually exclusive for simplicity in the hot path,
260 // though upscaling is usually for tiny tags and decimation for high-res streams.
261 let (detection_img, effective_scale, refinement_img) = if decimation > 1 {
262 let new_w = img.width / decimation;
263 let new_h = img.height / decimation;
264 let decimated_data = self.arena.alloc_slice_fill_copy(new_w * new_h, 0u8);
265 let decimated_img = img
266 .decimate_to(decimation, decimated_data)
267 .expect("decimation failed");
268 (decimated_img, 1.0 / decimation as f64, *img)
269 } else if upscale_factor > 1 {
270 let new_w = img.width * upscale_factor;
271 let new_h = img.height * upscale_factor;
272 self.upscale_buf.resize(new_w * new_h, 0);
273
274 let upscaled_img = img
275 .upscale_to(upscale_factor, &mut self.upscale_buf)
276 .expect("valid upscaled view");
277 (upscaled_img, upscale_factor as f64, upscaled_img)
278 } else {
279 (*img, 1.0, *img)
280 };
281
282 let img = &detection_img;
283
284 // Backup config for potential restoration
285 let original_tile_size = self.config.threshold_tile_size;
286 let original_max_radius = self.config.threshold_max_radius;
287 let original_edge_score = self.config.quad_min_edge_score;
288 let original_quad_min_area = self.config.quad_min_area;
289 let original_quad_min_edge_length = self.config.quad_min_edge_length;
290
291 if upscale_factor > 1 {
292 // Auto-scale parameters for upscaling
293 self.config.threshold_tile_size *= upscale_factor;
294 self.config.threshold_max_radius *= upscale_factor;
295 self.config.quad_min_edge_score /= upscale_factor as f64;
296 } else if decimation > 1 {
297 // Auto-scale parameters for decimation (downscaling)
298 self.config.threshold_tile_size = (self.config.threshold_tile_size / decimation).max(1);
299 self.config.threshold_max_radius =
300 (self.config.threshold_max_radius / decimation).max(1);
301 let factor_sq = (decimation * decimation) as f64;
302 #[allow(clippy::cast_sign_loss)]
303 {
304 self.config.quad_min_area =
305 (f64::from(self.config.quad_min_area) / factor_sq).max(1.0) as u32;
306 }
307 self.config.quad_min_edge_length =
308 (self.config.quad_min_edge_length / decimation as f64).max(1.0);
309 }
310
311 // 1a. Optional bilateral pre-filtering
312 let filtered_img = if self.config.enable_bilateral {
313 let filtered = self
314 .arena
315 .alloc_slice_fill_copy(img.width * img.height, 0u8);
316 crate::filter::bilateral_filter(
317 &self.arena,
318 img,
319 filtered,
320 3, // spatial radius
321 self.config.bilateral_sigma_space,
322 self.config.bilateral_sigma_color,
323 );
324 ImageView::new(filtered, img.width, img.height, img.width).expect("valid filtered view")
325 } else {
326 *img
327 };
328
329 // 1b. Optional Laplacian sharpening
330 let sharpened_img = if self.config.enable_sharpening {
331 let sharpened = self
332 .arena
333 .alloc_slice_fill_copy(filtered_img.width * filtered_img.height, 0u8);
334 crate::filter::laplacian_sharpen(&filtered_img, sharpened);
335
336 ImageView::new(
337 sharpened,
338 filtered_img.width,
339 filtered_img.height,
340 filtered_img.width,
341 )
342 .expect("valid sharpened view")
343 } else {
344 filtered_img
345 };
346
347 let binarized = self
348 .arena
349 .alloc_slice_fill_copy(img.width * img.height, 0u8);
350 let threshold_map = self
351 .arena
352 .alloc_slice_fill_copy(img.width * img.height, 0u8);
353
354 let start_thresh = std::time::Instant::now();
355 {
356 let _span = tracing::info_span!("threshold_binarize").entered();
357 if self.config.enable_adaptive_window {
358 // Adaptive window is robust for tiny tags but slow (O(N) per pixel)
359 let stride = sharpened_img.width + 1;
360 let integral = self
361 .arena
362 .alloc_slice_fill_copy(stride * (sharpened_img.height + 1), 0u64);
363 crate::threshold::compute_integral_image(&sharpened_img, integral);
364
365 let gradient = self
366 .arena
367 .alloc_slice_fill_copy(img.width * img.height, 0u8);
368 crate::filter::compute_gradient_map(&sharpened_img, gradient);
369
370 crate::threshold::adaptive_threshold_gradient_window(
371 &sharpened_img,
372 gradient,
373 integral,
374 binarized,
375 self.config.threshold_min_radius,
376 self.config.threshold_max_radius,
377 self.config.adaptive_threshold_gradient_threshold,
378 self.config.adaptive_threshold_constant,
379 );
380 threshold_map.fill(128); // Not implemented for adaptive
381 } else {
382 // Tile-based thresholding is fast (sub-2ms)
383 let engine = crate::threshold::ThresholdEngine::from_config(&self.config);
384 let stats = engine.compute_tile_stats(&self.arena, &sharpened_img);
385 engine.apply_threshold_with_map(
386 &self.arena,
387 &sharpened_img,
388 &stats,
389 binarized,
390 threshold_map,
391 );
392 }
393 }
394 stats.threshold_ms = start_thresh.elapsed().as_secs_f64() * 1000.0;
395
396 // 2. Segmentation - Standard binary CCL
397 let start_seg = std::time::Instant::now();
398 let label_result = {
399 let _span = tracing::info_span!("segmentation").entered();
400 crate::segmentation::label_components_threshold_model(
401 &self.arena,
402 sharpened_img.data,
403 sharpened_img.stride,
404 threshold_map,
405 img.width,
406 img.height,
407 self.config.segmentation_connectivity == config::SegmentationConnectivity::Eight,
408 self.config.quad_min_area, // Keep the noise suppression
409 self.config.segmentation_margin,
410 )
411 };
412 stats.segmentation_ms = start_seg.elapsed().as_secs_f64() * 1000.0;
413
414 // 3. Quad Extraction
415 let start_quad = std::time::Instant::now();
416 let candidates = {
417 let _span = tracing::info_span!("quad_extraction").entered();
418 crate::quad::extract_quads_with_config(
419 &self.arena,
420 &sharpened_img,
421 &label_result,
422 &self.config,
423 decimation,
424 &refinement_img,
425 )
426 };
427 stats.quad_extraction_ms = start_quad.elapsed().as_secs_f64() * 1000.0;
428 stats.num_candidates = candidates.len();
429
430 // 4. Decoding
431 let start_decode = std::time::Instant::now();
432 let temp_decoders: Vec<Box<dyn TagDecoder + Send + Sync>>;
433 let active_decoders: &[Box<dyn TagDecoder + Send + Sync>] = if options.families.is_empty() {
434 &self.decoders
435 } else {
436 temp_decoders = options
437 .families
438 .iter()
439 .map(|f| family_to_decoder(*f))
440 .collect();
441 &temp_decoders
442 };
443
444 let (mut final_detections, mut processed_candidates, rejected_contrast, rejected_hamming) =
445 match self.config.decode_mode {
446 config::DecodeMode::Hard => {
447 Self::decode_candidates::<crate::strategy::HardStrategy>(
448 &self.config,
449 candidates,
450 &refinement_img,
451 active_decoders,
452 capture_debug,
453 options,
454 )
455 },
456 config::DecodeMode::Soft => {
457 Self::decode_candidates::<crate::strategy::SoftStrategy>(
458 &self.config,
459 candidates,
460 &refinement_img,
461 active_decoders,
462 capture_debug,
463 options,
464 )
465 },
466 };
467
468 stats.num_rejected_by_contrast = rejected_contrast;
469 stats.num_rejected_by_hamming = rejected_hamming;
470
471 stats.decoding_ms = start_decode.elapsed().as_secs_f64() * 1000.0;
472 stats.num_detections = final_detections.len();
473
474 // Final coordinate adjustment and scaling
475 let inv_scale = if upscale_factor > 1 {
476 1.0 / effective_scale
477 } else {
478 1.0
479 };
480
481 for d in &mut final_detections {
482 for corner in &mut d.corners {
483 corner[0] = (corner[0] + 0.5) * inv_scale;
484 corner[1] = (corner[1] + 0.5) * inv_scale;
485 }
486 d.center[0] = (d.center[0] + 0.5) * inv_scale;
487 d.center[1] = (d.center[1] + 0.5) * inv_scale;
488
489 if let (Some(intrinsics), Some(tag_size)) = (options.intrinsics, options.tag_size) {
490 let (pose, covariance) = crate::pose::estimate_tag_pose(
491 &intrinsics,
492 &d.corners,
493 tag_size,
494 Some(img),
495 options.pose_estimation_mode,
496 );
497 d.pose = pose;
498 d.pose_covariance = covariance;
499 }
500 }
501
502 if capture_debug {
503 for d in &mut processed_candidates {
504 for corner in &mut d.corners {
505 corner[0] = (corner[0] + 0.5) * inv_scale;
506 corner[1] = (corner[1] + 0.5) * inv_scale;
507 }
508 d.center[0] = (d.center[0] + 0.5) * inv_scale;
509 d.center[1] = (d.center[1] + 0.5) * inv_scale;
510 }
511 }
512
513 stats.total_ms = start_total.elapsed().as_secs_f64() * 1000.0;
514
515 // Restore config
516 self.config.threshold_tile_size = original_tile_size;
517 self.config.threshold_max_radius = original_max_radius;
518 self.config.quad_min_edge_score = original_edge_score;
519 self.config.quad_min_area = original_quad_min_area;
520 self.config.quad_min_edge_length = original_quad_min_edge_length;
521
522 FullDetectionResult {
523 detections: final_detections,
524 candidates: processed_candidates,
525 binarized: if capture_debug {
526 Some(binarized.to_vec())
527 } else {
528 None
529 },
530 labels: if capture_debug {
531 Some(label_result.labels.to_vec())
532 } else {
533 None
534 },
535 stats,
536 }
537 }
538}
539
540impl Detector {
541 fn decode_candidates<S: crate::strategy::DecodingStrategy>(
542 config: &crate::config::DetectorConfig,
543 candidates: Vec<Detection>,
544 refinement_img: &ImageView,
545 active_decoders: &[Box<dyn TagDecoder + Send + Sync>],
546 capture_debug: bool,
547 options: &DetectOptions,
548 ) -> (Vec<Detection>, Vec<Detection>, usize, usize) {
549 let rejected_contrast = AtomicUsize::new(0);
550 let rejected_hamming = AtomicUsize::new(0);
551
552 let results: Vec<_> = candidates
553 .into_par_iter()
554 .map(|cand| {
555 let cand_copy = if capture_debug {
556 Some(cand.clone())
557 } else {
558 None
559 };
560 let (det, failed_contrast, failed_hamming, best_h, bits) =
561 Self::decode_single_candidate::<S>(
562 config,
563 cand,
564 refinement_img,
565 active_decoders,
566 options,
567 );
568 (
569 det,
570 failed_contrast,
571 failed_hamming,
572 best_h,
573 bits,
574 cand_copy.unwrap_or(Detection::default()),
575 )
576 })
577 .collect();
578
579 let mut final_detections = Vec::new();
580 let mut processed_candidates = Vec::new();
581
582 for (det, failed_contrast, failed_hamming, best_h, bits, mut cand) in results {
583 if failed_contrast {
584 rejected_contrast.fetch_add(1, Ordering::Relaxed);
585 }
586 if failed_hamming {
587 rejected_hamming.fetch_add(1, Ordering::Relaxed);
588 }
589 if let Some(d) = det {
590 final_detections.push(d);
591 }
592 if capture_debug {
593 cand.hamming = best_h;
594 cand.bits = bits;
595 processed_candidates.push(cand);
596 }
597 }
598
599 (
600 final_detections,
601 processed_candidates,
602 rejected_contrast.load(Ordering::Relaxed),
603 rejected_hamming.load(Ordering::Relaxed),
604 )
605 }
606
607 #[allow(clippy::too_many_lines)]
608 fn decode_single_candidate<S: crate::strategy::DecodingStrategy>(
609 config: &crate::config::DetectorConfig,
610 mut cand: Detection,
611 refinement_img: &ImageView,
612 active_decoders: &[Box<dyn TagDecoder + Send + Sync>],
613 options: &DetectOptions,
614 ) -> (Option<Detection>, bool, bool, u32, u64) {
615 let scales = [1.0, 0.9, 1.1]; // Try most likely scales
616
617 let mut best_overall_h = u32::MAX;
618 let mut best_overall_code: Option<S::Code> = None;
619 let mut passed_contrast = false;
620
621 for scale in scales {
622 let center = cand.center;
623 let mut scaled_corners = cand.corners;
624 if (scale - 1.0f64).abs() > 1e-4 {
625 for c in &mut scaled_corners {
626 c[0] = center[0] + (c[0] - center[0]) * scale;
627 c[1] = center[1] + (c[1] - center[1]) * scale;
628 }
629 }
630
631 let Some(h) = crate::decoder::Homography::square_to_quad(&scaled_corners) else {
632 continue;
633 };
634
635 for decoder in active_decoders {
636 if let Some(code) =
637 crate::decoder::sample_grid_generic::<S>(refinement_img, &h, decoder.as_ref())
638 {
639 passed_contrast = true;
640 if let Some((id, hamming, rot)) = S::decode(&code, decoder.as_ref(), 255) {
641 if hamming < best_overall_h {
642 best_overall_h = hamming;
643 best_overall_code = Some(code.clone());
644 }
645
646 if hamming <= if decoder.name() == "36h11" { 4 } else { 1 } {
647 cand.id = id;
648 cand.hamming = hamming;
649 cand.bits = S::to_debug_bits(&code);
650
651 // Correct rotation on ORIGINAL corners
652 let mut reordered = [[0.0; 2]; 4];
653 for (i, item) in reordered.iter_mut().enumerate() {
654 let src_idx = (i + usize::from(rot)) % 4;
655 *item = cand.corners[src_idx];
656 }
657 cand.corners = reordered;
658
659 // Always perform ERF refinement for finalists if requested
660 if config.refinement_mode == crate::config::CornerRefinementMode::Erf {
661 let decode_arena = bumpalo::Bump::new();
662 let refined_corners = crate::decoder::refine_corners_erf(
663 &decode_arena,
664 refinement_img,
665 &cand.corners,
666 config.subpixel_refinement_sigma,
667 );
668
669 // Verify that refined corners still yield a valid decode
670 if let Some(h_ref) =
671 crate::decoder::Homography::square_to_quad(&refined_corners)
672 && let Some(code_ref) = crate::decoder::sample_grid_generic::<S>(
673 refinement_img,
674 &h_ref,
675 decoder.as_ref(),
676 )
677 && let Some((id_ref, hamming_ref, _)) =
678 S::decode(&code_ref, decoder.as_ref(), 255)
679 {
680 // Only keep if it's the same tag and hamming is not worse
681 if id_ref == id && hamming_ref <= hamming {
682 cand.corners = refined_corners;
683 cand.hamming = hamming_ref;
684 cand.bits = S::to_debug_bits(&code_ref);
685 }
686 }
687 }
688
689 if let (Some(intrinsics), Some(tag_size)) =
690 (options.intrinsics, options.tag_size)
691 {
692 let (pose, covariance) = crate::pose::estimate_tag_pose(
693 &intrinsics,
694 &cand.corners,
695 tag_size,
696 Some(refinement_img),
697 options.pose_estimation_mode,
698 );
699 cand.pose = pose;
700 cand.pose_covariance = covariance;
701 }
702 return (Some(cand), false, false, hamming, S::to_debug_bits(&code));
703 }
704 }
705 }
706 }
707
708 // If 1.0 works perfectly, don't even try 0.9/1.1
709 if best_overall_h == 0 {
710 break;
711 }
712 }
713
714 // Stage 2: Configurable Corner Refinement
715 let max_h_for_refine = if active_decoders.iter().any(|d| d.name() == "36h11") {
716 10
717 } else {
718 4
719 };
720 if best_overall_h
721 > if active_decoders.iter().any(|d| d.name() == "36h11") {
722 4
723 } else {
724 1
725 }
726 && best_overall_h <= max_h_for_refine
727 && best_overall_code.is_some()
728 {
729 let mut current_corners = cand.corners;
730 let best_code = best_overall_code
731 .as_ref()
732 .expect("best_overall_code is some")
733 .clone();
734
735 match config.refinement_mode {
736 crate::config::CornerRefinementMode::None => {},
737 crate::config::CornerRefinementMode::Edge => {
738 // Current edge-based subpixel refinement (Gradient Hill Climbing)
739 let nudge = 0.2;
740
741 for _pass in 0..2 {
742 let mut pass_improved = false;
743 for c_idx in 0..4 {
744 for (dx, dy) in
745 [(nudge, 0.0), (-nudge, 0.0), (0.0, nudge), (0.0, -nudge)]
746 {
747 let mut test_corners = current_corners;
748 test_corners[c_idx][0] += dx;
749 test_corners[c_idx][1] += dy;
750
751 if let Some(h) =
752 crate::decoder::Homography::square_to_quad(&test_corners)
753 {
754 for decoder in active_decoders {
755 if let Some(code) = crate::decoder::sample_grid_generic::<S>(
756 refinement_img,
757 &h,
758 decoder.as_ref(),
759 ) && let Some((id, hamming, rot)) =
760 S::decode(&code, decoder.as_ref(), 255)
761 && hamming < best_overall_h
762 {
763 best_overall_h = hamming;
764 best_overall_code = Some(code.clone());
765 current_corners = test_corners;
766 pass_improved = true;
767 // Success check
768 if hamming
769 <= if decoder.name() == "36h11" { 4 } else { 1 }
770 {
771 cand.id = id;
772 cand.hamming = hamming;
773 cand.bits = S::to_debug_bits(&code);
774 cand.corners = current_corners;
775
776 // Fix rotation
777 let mut reordered = [[0.0; 2]; 4];
778 for (i, item) in reordered.iter_mut().enumerate() {
779 let src_idx = (i + usize::from(rot)) % 4;
780 *item = cand.corners[src_idx];
781 }
782 cand.corners = reordered;
783 return (
784 Some(cand),
785 false,
786 false,
787 hamming,
788 S::to_debug_bits(&code),
789 );
790 }
791 }
792 }
793 }
794 }
795 }
796 if !pass_improved {
797 break;
798 }
799 }
800 },
801 crate::config::CornerRefinementMode::GridFit => {
802 // New GridFit optimization
803 for decoder in active_decoders {
804 // Recover ID and rotation from the best bits to construct the ideal target
805 if let Some((id, hamming, rot)) =
806 S::decode(&best_code, decoder.as_ref(), 255)
807 {
808 // Only proceed if this decoder explains the bits as well as our best guess
809 if hamming == best_overall_h {
810 // Fetch canonical code
811 if let Some(mut target_bits) = decoder.get_code(id as u16) {
812 // Rotate to match observed orientation
813 for _ in 0..rot {
814 target_bits = crate::dictionaries::rotate90(
815 target_bits,
816 decoder.dimension(),
817 );
818 }
819
820 // Run GridFit with the CLEAN target pattern
821 // NOTE: GridFit uses contrast against binary target.
822 let refined = crate::decoder::refine_corners_gridfit(
823 refinement_img,
824 ¤t_corners,
825 decoder.as_ref(), // Use the decoder to get sample points
826 target_bits, // Maximize contrast against GROUND TRUTH CODE
827 );
828
829 // Check if refined corners actually improve Hamming
830 if let Some(h) =
831 crate::decoder::Homography::square_to_quad(&refined)
832 && let Some(code) = crate::decoder::sample_grid_generic::<S>(
833 refinement_img,
834 &h,
835 decoder.as_ref(),
836 )
837 && let Some((id, hamming, rot)) =
838 S::decode(&code, decoder.as_ref(), 255)
839 && hamming < best_overall_h
840 {
841 best_overall_h = hamming;
842 best_overall_code = Some(code.clone());
843 current_corners = refined;
844 // Success check
845 if hamming <= if decoder.name() == "36h11" { 4 } else { 1 }
846 {
847 cand.id = id;
848 cand.hamming = hamming;
849 cand.bits = S::to_debug_bits(&code);
850 cand.corners = current_corners;
851
852 // Fix rotation
853 let mut reordered = [[0.0; 2]; 4];
854 for (i, item) in reordered.iter_mut().enumerate() {
855 let src_idx = (i + usize::from(rot)) % 4;
856 *item = cand.corners[src_idx];
857 }
858 cand.corners = reordered;
859
860 return (
861 Some(cand),
862 false,
863 false,
864 hamming,
865 S::to_debug_bits(&code),
866 );
867 }
868 }
869 }
870 }
871 }
872 }
873 },
874 crate::config::CornerRefinementMode::Erf => {
875 let nudge = 0.2;
876 for _pass in 0..2 {
877 let mut pass_improved = false;
878 for c_idx in 0..4 {
879 for (dx, dy) in
880 [(nudge, 0.0), (-nudge, 0.0), (0.0, nudge), (0.0, -nudge)]
881 {
882 let mut test_corners = current_corners;
883 test_corners[c_idx][0] += dx;
884 test_corners[c_idx][1] += dy;
885
886 if let Some(h) =
887 crate::decoder::Homography::square_to_quad(&test_corners)
888 {
889 for decoder in active_decoders {
890 if let Some(code) = crate::decoder::sample_grid_generic::<S>(
891 refinement_img,
892 &h,
893 decoder.as_ref(),
894 ) && let Some((id, hamming, rot)) =
895 S::decode(&code, decoder.as_ref(), 255)
896 && hamming < best_overall_h
897 {
898 best_overall_h = hamming;
899 best_overall_code = Some(code.clone());
900 current_corners = test_corners;
901 pass_improved = true;
902
903 if hamming
904 <= if decoder.name() == "36h11" { 4 } else { 1 }
905 {
906 cand.id = id;
907 cand.hamming = hamming;
908 cand.bits = S::to_debug_bits(&code);
909 cand.corners = current_corners;
910
911 // Final ERF polish for maximum precision
912 let decode_arena = bumpalo::Bump::new();
913 let refined = crate::decoder::refine_corners_erf(
914 &decode_arena,
915 refinement_img,
916 &cand.corners,
917 config.subpixel_refinement_sigma,
918 );
919
920 // Verify refined corners
921 if let Some(h_ref) =
922 crate::decoder::Homography::square_to_quad(
923 &refined,
924 )
925 && let Some(code_ref) =
926 crate::decoder::sample_grid_generic::<S>(
927 refinement_img,
928 &h_ref,
929 decoder.as_ref(),
930 )
931 && let Some((id_ref, hamming_ref, _)) =
932 S::decode(&code_ref, decoder.as_ref(), 255)
933 && id_ref == id
934 && hamming_ref <= hamming
935 {
936 cand.corners = refined;
937 cand.hamming = hamming_ref;
938 cand.bits = S::to_debug_bits(&code_ref);
939 }
940
941 let mut reordered = [[0.0; 2]; 4];
942 for (i, item) in reordered.iter_mut().enumerate() {
943 let src_idx = (i + usize::from(rot)) % 4;
944 *item = cand.corners[src_idx];
945 }
946 cand.corners = reordered;
947 return (
948 Some(cand),
949 false,
950 false,
951 hamming,
952 S::to_debug_bits(&code),
953 );
954 }
955 }
956 }
957 }
958 }
959 }
960 if !pass_improved {
961 break;
962 }
963 }
964 },
965 }
966 }
967
968 (
969 None,
970 !passed_contrast,
971 passed_contrast,
972 best_overall_h,
973 best_overall_code.map_or(0, |c| S::to_debug_bits(&c)),
974 )
975 }
976}
977
978impl Default for Detector {
979 fn default() -> Self {
980 Self::new()
981 }
982}
983
984/// Returns version and build information for the core library.
985#[must_use]
986pub fn core_info() -> String {
987 "Locus Core v0.1.0 Engine".to_string()
988}