mecha10-nodes-object-detector 0.1.46

Real-time object detection node using YOLO and ONNX
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
//! Object Detector Node
//!
//! Real-time object detection node that:
//! - Subscribes to camera frames from any source
//! - Runs YOLO inference using ONNX Runtime
//! - Publishes detection results with bounding boxes
//! - Supports enable/disable control commands
//!
//! # Topic Interface
//!
//! **Input:** `/camera/rgb` (CameraImage)
//! **Output:** `/vision/object/detections` (DetectionResult)
//! **Control:** `/vision/object/control` (InferenceCommand)

mod config;

pub use config::ObjectDetectorConfig;

use anyhow::{Context as AnyhowContext, Result};
use mecha10_core::health::HealthReportingExt;
use mecha10_core::messages::{HealthStatus, Message};
use mecha10_core::prelude::*;
use mecha10_core::topics::Topic;
use ort::session::builder::GraphOptimizationLevel;
use ort::session::Session;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::time::Instant;
use tracing::info;

// === Message Types ===

/// Camera image message (reused from simulation-bridge)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CameraImage {
    pub camera_id: String,
    pub width: u32,
    pub height: u32,
    pub timestamp: u64,
    #[serde(with = "serde_bytes")]
    pub image_bytes: Vec<u8>,
    pub format: String,
}

impl Message for CameraImage {}

/// Bounding box in normalized coordinates (0-1)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BoundingBox {
    pub x: f32,
    pub y: f32,
    pub width: f32,
    pub height: f32,
}

impl BoundingBox {
    pub fn new(x: f32, y: f32, width: f32, height: f32) -> Self {
        Self { x, y, width, height }
    }

    /// Calculate Intersection over Union (IoU) with another box
    pub fn iou(&self, other: &BoundingBox) -> f32 {
        let x1 = self.x.max(other.x);
        let y1 = self.y.max(other.y);
        let x2 = (self.x + self.width).min(other.x + other.width);
        let y2 = (self.y + self.height).min(other.y + other.height);

        if x2 < x1 || y2 < y1 {
            return 0.0;
        }

        let intersection = (x2 - x1) * (y2 - y1);
        let union = (self.width * self.height) + (other.width * other.height) - intersection;

        intersection / union
    }
}

/// Single object detection
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Detection {
    pub class_id: u32,
    pub class_name: String,
    pub confidence: f32,
    pub bbox: BoundingBox,
}

/// Detection result message (matches dashboard interface)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DetectionResult {
    pub frame_id: u64,
    pub timestamp: u64,
    pub detections: Vec<Detection>,
    pub inference_time_ms: f32,
    pub model_name: String,
}

impl Message for DetectionResult {}

/// Inference control commands
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct InferenceCommand {
    pub action: String,
    #[serde(skip_serializing_if = "Option::is_none")]
    pub params: Option<serde_json::Value>,
}

impl Message for InferenceCommand {}

/// Inference state
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum InferenceMode {
    Idle,
    Active,
}

// === Object Detector Node ===

pub struct ObjectDetectorNode {
    config: ObjectDetectorConfig,
    session: Option<Session>,
    frame_count: u64,
    mode: InferenceMode,
    class_names: Vec<String>,
    /// Semaphore to limit concurrent async preprocessing tasks
    preprocessing_semaphore: std::sync::Arc<tokio::sync::Semaphore>,
}

impl ObjectDetectorNode {
    pub fn new(config: ObjectDetectorConfig) -> Self {
        // Limit concurrent preprocessing based on config
        // This prevents memory buildup if frames arrive faster than we can infer
        let max_concurrent = config.max_async_frames.max(1); // At least 1
        let preprocessing_semaphore = std::sync::Arc::new(tokio::sync::Semaphore::new(max_concurrent));

        Self {
            config,
            session: None,
            frame_count: 0,
            mode: InferenceMode::Idle,
            class_names: Self::get_coco_class_names(),
            preprocessing_semaphore,
        }
    }

    /// Handle inference control command
    fn handle_control(&mut self, cmd: InferenceCommand) {
        info!("🎮 Received control command: {:?}", cmd.action);
        match cmd.action.as_str() {
            "enable" => {
                self.mode = InferenceMode::Active;
                info!("✅ Object detection enabled");
            }
            "disable" => {
                self.mode = InferenceMode::Idle;
                info!("⏸️  Object detection disabled");
            }
            "set_threshold" => {
                if let Some(params) = &cmd.params {
                    if let Some(conf) = params.get("confidence").and_then(|v| v.as_f64()) {
                        self.config.confidence_threshold = conf as f32;
                        info!("🎯 Confidence threshold set to {:.2}", conf);
                    }
                }
            }
            _ => {
                tracing::warn!("❌ Unknown command: {}", cmd.action);
            }
        }
    }

    /// Process a camera image and return detections
    async fn detect_objects(&mut self, msg: &CameraImage) -> Result<DetectionResult> {
        let start = Instant::now();

        // OPTIMIZATION: Run CPU-bound preprocessing in separate thread to avoid blocking
        // This allows overlap with GPU inference from previous frame
        // Semaphore limits concurrent preprocessing to prevent memory buildup
        let permit = self.preprocessing_semaphore.clone().acquire_owned().await?;

        let msg_clone = msg.clone();
        let input_size = self.config.input_size;

        let input_tensor = tokio::task::spawn_blocking(move || -> Result<ndarray::Array4<f32>> {
            // 1. Decode image (CPU-bound)
            let image = Self::decode_camera_image_static(&msg_clone)?;

            // 2. Preprocess for YOLO (CPU-bound: resize + normalize)
            let result = Self::preprocess_yolo_image_static(image, input_size);

            // Permit is dropped here, allowing next frame to preprocess
            drop(permit);

            result
        })
        .await??;

        // 3. Run ONNX inference and extract output in a separate scope
        let output_vec: Vec<f32> = {
            let input_value = ort::value::TensorRef::from_array_view(input_tensor.view())?;

            let session = self.session.as_mut().context("ONNX session not initialized")?;
            let outputs = session.run(ort::inputs![input_value])?;

            // Extract output tensor and copy data (YOLOv8 format: [1, 84, 8400])
            let output_array: ndarray::ArrayViewD<f32> = outputs[0]
                .try_extract_array()
                .context("Failed to extract output tensor")?;
            output_array
                .as_slice()
                .context("Failed to get tensor data as slice")?
                .to_vec()
        };
        // outputs and session borrow are dropped here

        // 4. Process YOLO output to detections
        let detections = self.process_yolo_output(&output_vec, msg.width, msg.height)?;

        let inference_time_ms = start.elapsed().as_secs_f32() * 1000.0;

        Ok(DetectionResult {
            frame_id: self.frame_count,
            timestamp: msg.timestamp,
            detections,
            inference_time_ms,
            model_name: self.config.model_name.clone(),
        })
    }

    /// Decode camera image from various formats (static version for async)
    fn decode_camera_image_static(msg: &CameraImage) -> Result<image::DynamicImage> {
        let bytes = &msg.image_bytes;

        let image = match msg.format.as_str() {
            "jpeg" | "jpg" => image::load_from_memory_with_format(bytes, image::ImageFormat::Jpeg)?,
            "png" => image::load_from_memory_with_format(bytes, image::ImageFormat::Png)?,
            "rgb" | "rgb8" => {
                let img = image::RgbImage::from_raw(msg.width, msg.height, bytes.clone())
                    .context("Failed to create RGB image from raw bytes")?;
                image::DynamicImage::ImageRgb8(img)
            }
            _ => anyhow::bail!("Unsupported image format: {}", msg.format),
        };

        Ok(image)
    }

    /// Decode camera image from various formats (instance method for compatibility)
    #[allow(dead_code)]
    fn decode_camera_image(&self, msg: &CameraImage) -> Result<image::DynamicImage> {
        Self::decode_camera_image_static(msg)
    }

    /// Preprocess image for YOLO (static version for async)
    fn preprocess_yolo_image_static(image: image::DynamicImage, size: u32) -> Result<ndarray::Array4<f32>> {
        // Resize to square using Triangle filter (much faster than Lanczos3, minimal quality loss for object detection)
        let resized = image.resize_exact(size, size, image::imageops::FilterType::Triangle);
        let rgb = resized.to_rgb8();

        // Create tensor [1, 3, H, W] with normalization [0, 1]
        let mut array = ndarray::Array4::<f32>::zeros((1, 3, size as usize, size as usize));

        for (x, y, pixel) in rgb.enumerate_pixels() {
            array[[0, 0, y as usize, x as usize]] = pixel[0] as f32 / 255.0;
            array[[0, 1, y as usize, x as usize]] = pixel[1] as f32 / 255.0;
            array[[0, 2, y as usize, x as usize]] = pixel[2] as f32 / 255.0;
        }

        Ok(array)
    }

    /// Preprocess image for YOLO (instance method for compatibility)
    #[allow(dead_code)]
    fn preprocess_yolo_image(&self, image: image::DynamicImage) -> Result<ndarray::Array4<f32>> {
        Self::preprocess_yolo_image_static(image, self.config.input_size)
    }

    /// Process YOLO output tensor to detection list
    fn process_yolo_output(&self, output_slice: &[f32], _img_width: u32, _img_height: u32) -> Result<Vec<Detection>> {
        // YOLOv8 output format: [1, 84, 8400]
        // 84 = 4 bbox coords + 80 class scores
        let num_detections = 8400;
        let num_classes = 80;

        let mut raw_detections = Vec::new();
        let mut max_score_seen = 0.0f32;

        for i in 0..num_detections {
            // Get bbox coordinates (center_x, center_y, width, height) - normalized to input size
            let cx = output_slice[i];
            let cy = output_slice[num_detections + i];
            let w = output_slice[2 * num_detections + i];
            let h = output_slice[3 * num_detections + i];

            // Find best class and confidence (optimized with iterator)
            let (max_class_idx, max_score) = (0..num_classes)
                .map(|c| {
                    let score = output_slice[(4 + c) * num_detections + i];
                    (c, score)
                })
                .max_by(|a, b| a.1.partial_cmp(&b.1).unwrap_or(std::cmp::Ordering::Equal))
                .unwrap_or((0, 0.0));

            // Track maximum score seen across all anchors for debugging
            if max_score > max_score_seen {
                max_score_seen = max_score;
            }

            if max_score >= self.config.confidence_threshold {
                // Convert from center format to corner format
                // Normalize to 0-1 based on input size
                let input_size = self.config.input_size as f32;
                let x = (cx - w / 2.0) / input_size;
                let y = (cy - h / 2.0) / input_size;
                let width = w / input_size;
                let height = h / input_size;

                raw_detections.push(Detection {
                    class_id: max_class_idx as u32,
                    class_name: self
                        .class_names
                        .get(max_class_idx)
                        .cloned()
                        .unwrap_or_else(|| format!("class_{}", max_class_idx)),
                    confidence: max_score,
                    bbox: BoundingBox::new(x, y, width, height),
                });
            }
        }

        // Apply Non-Maximum Suppression
        let filtered = self.non_max_suppression(raw_detections);

        // Debug logging
        if filtered.is_empty() {
            tracing::debug!(
                "No detections above threshold {:.2}. Max score seen: {:.4}",
                self.config.confidence_threshold,
                max_score_seen
            );
        }

        Ok(filtered)
    }

    /// Apply Non-Maximum Suppression to filter overlapping detections
    fn non_max_suppression(&self, mut detections: Vec<Detection>) -> Vec<Detection> {
        // Sort by confidence (descending)
        detections.sort_by(|a, b| b.confidence.partial_cmp(&a.confidence).unwrap());

        let mut keep = Vec::new();
        while !detections.is_empty() {
            let current = detections.remove(0);
            keep.push(current.clone());

            detections.retain(|det| {
                // Keep if different class or IoU below threshold
                det.class_id != current.class_id || det.bbox.iou(&current.bbox) < self.config.iou_threshold
            });
        }

        keep
    }

    /// Get COCO class names (80 classes for COCO dataset)
    fn get_coco_class_names() -> Vec<String> {
        vec![
            "person",
            "bicycle",
            "car",
            "motorcycle",
            "airplane",
            "bus",
            "train",
            "truck",
            "boat",
            "traffic light",
            "fire hydrant",
            "stop sign",
            "parking meter",
            "bench",
            "bird",
            "cat",
            "dog",
            "horse",
            "sheep",
            "cow",
            "elephant",
            "bear",
            "zebra",
            "giraffe",
            "backpack",
            "umbrella",
            "handbag",
            "tie",
            "suitcase",
            "frisbee",
            "skis",
            "snowboard",
            "sports ball",
            "kite",
            "baseball bat",
            "baseball glove",
            "skateboard",
            "surfboard",
            "tennis racket",
            "bottle",
            "wine glass",
            "cup",
            "fork",
            "knife",
            "spoon",
            "bowl",
            "banana",
            "apple",
            "sandwich",
            "orange",
            "broccoli",
            "carrot",
            "hot dog",
            "pizza",
            "donut",
            "cake",
            "chair",
            "couch",
            "potted plant",
            "bed",
            "dining table",
            "toilet",
            "tv",
            "laptop",
            "mouse",
            "remote",
            "keyboard",
            "cell phone",
            "microwave",
            "oven",
            "toaster",
            "sink",
            "refrigerator",
            "book",
            "clock",
            "vase",
            "scissors",
            "teddy bear",
            "hair drier",
            "toothbrush",
        ]
        .iter()
        .map(|s| s.to_string())
        .collect()
    }
}

// === Node Entry Point ===

pub async fn run() -> Result<()> {
    info!("🤖 Starting Object Detector Node");

    // Create context
    let ctx = Context::new("object-detector").await?;

    // Start automatic health reporting (reports healthy every 5 seconds)
    ctx.start_health_reporting(|| async { HealthStatus::healthy() }).await?;

    // Load config
    let config: ObjectDetectorConfig = ctx.load_node_config("object-detector").await?;
    info!("Configuration: {:?}", config);

    // Load ONNX model
    let model_path = PathBuf::from(&config.model_path);
    if !model_path.exists() {
        anyhow::bail!(
            "Model not found at {}. Please download a YOLOv8 ONNX model.",
            model_path.display()
        );
    }

    info!("📦 Loading model from: {}", model_path.display());

    // INT8 quantization support - use quantized model if enabled
    let final_model_path = if config.use_int8 {
        let int8_path =
            model_path.with_file_name(model_path.file_stem().unwrap().to_string_lossy().to_string() + "-int8.onnx");

        if int8_path.exists() {
            info!("🔢 Using INT8 quantized model for 2x speedup");
            int8_path
        } else {
            info!(
                "⚠️  INT8 enabled but quantized model not found at {}",
                int8_path.display()
            );
            info!("   Falling back to FP32 model. Run conversion script to create INT8 model.");
            model_path
        }
    } else {
        model_path
    };

    // Try to use hardware acceleration (CoreML on macOS, CUDA on Linux with GPU)
    let mut session_builder = Session::builder()?;

    // Configure for INT8 if enabled
    if config.use_int8 {
        session_builder = session_builder
            .with_optimization_level(GraphOptimizationLevel::Level3)?
            .with_intra_threads(4)?; // Parallel INT8 ops
    }

    let session = session_builder
        .with_execution_providers([
            // CoreML for macOS (Apple Silicon or Intel with Neural Engine)
            #[cfg(target_os = "macos")]
            ort::execution_providers::CoreMLExecutionProvider::default().build(),
            // CUDA for NVIDIA GPUs (Linux/Windows)
            #[cfg(not(target_os = "macos"))]
            ort::execution_providers::CUDAExecutionProvider::default().build(),
            // Fallback to CPU if hardware acceleration unavailable
            ort::execution_providers::CPUExecutionProvider::default().build(),
        ])?
        .commit_from_file(&final_model_path)?;

    info!(
        "✅ Model loaded successfully ({}) with hardware acceleration",
        if config.use_int8 { "INT8" } else { "FP32" }
    );

    // Create node
    let mut node = ObjectDetectorNode::new(config.clone());
    node.session = Some(session);

    // Setup topics
    let input_topic = config.input_topic();
    let output_topic = config.output_topic();
    let control_topic = config.control_topic();

    info!("📡 Input: {}", input_topic);
    info!("📡 Control: {}", control_topic);
    info!("📤 Output: {}", output_topic);
    info!(
        "⏸️  Starting in {} mode",
        if config.default_enabled { "ACTIVE" } else { "IDLE" }
    );

    if config.default_enabled {
        node.mode = InferenceMode::Active;
    }

    // Create topic references (leak strings for 'static lifetime)
    let input_topic_str: &'static str = Box::leak(input_topic.into_boxed_str());
    let control_topic_str: &'static str = Box::leak(control_topic.into_boxed_str());
    let output_topic_str: &'static str = Box::leak(output_topic.into_boxed_str());

    let camera_topic = Topic::<mecha10_core::messages::RedisMessage<CameraImage>>::new(input_topic_str);
    let control_topic = Topic::<InferenceCommand>::new(control_topic_str);
    let output_topic = Topic::<DetectionResult>::new(output_topic_str);

    // Subscribe
    let mut camera_receiver = ctx.subscribe(camera_topic).await?;
    let mut control_receiver = ctx.subscribe(control_topic).await?;

    info!("✅ Subscribed to camera and control topics");
    info!("🔄 Entering main processing loop with frame dropping (latest-only)");
    info!("🕐 System time at start: {} μs", now_micros());

    // Main loop
    loop {
        tokio::select! {
            // Handle control commands
            Some(control_msg) = control_receiver.recv() => {
                node.handle_control(control_msg);
            }

            // Handle camera frames - with aggressive frame dropping for low latency
            Some(camera_envelope) = camera_receiver.recv() => {
                // CRITICAL: Drop all queued frames to ensure we process only the latest
                // This prevents processing stale frames during long inference times
                let mut latest_envelope = camera_envelope;
                let mut dropped_count = 0;

                // Drain channel to get the absolute latest frame
                while let Ok(newer_envelope) = camera_receiver.try_recv() {
                    latest_envelope = newer_envelope;
                    dropped_count += 1;
                }

                if dropped_count > 0 {
                    tracing::debug!("📉 Dropped {} old frames, processing latest", dropped_count);
                }

                // Check frame age for diagnostics (but don't reject - timestamps may use different epochs)
                let current_time = now_micros();
                let frame_timestamp = latest_envelope.timestamp;

                // Debug: Log timestamps on first few frames to diagnose epoch issues
                static FRAME_DEBUG_COUNT: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0);
                let debug_count = FRAME_DEBUG_COUNT.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
                if debug_count < 3 {
                    tracing::info!(
                        "🕐 Timestamp debug: system={} μs, frame={} μs, diff={} μs",
                        current_time,
                        frame_timestamp,
                        current_time.saturating_sub(frame_timestamp)
                    );
                }

                // Only log frame age if it seems reasonable (< 10 seconds old)
                // This handles cases where Godot and system use different time bases
                if current_time > frame_timestamp {
                    let frame_age_us = current_time - frame_timestamp;
                    let frame_age_ms = frame_age_us as f64 / 1000.0;

                    if frame_age_ms > 1000.0 && frame_age_ms < 10_000.0 {
                        // Frame is 1-10 seconds old - log warning but still process
                        tracing::warn!(
                            "⏰ Processing old frame ({:.1}ms old) - possible system lag",
                            frame_age_ms
                        );
                    } else if frame_age_ms > 100.0 && frame_age_ms < 1000.0 {
                        // Frame is 100ms-1s old - debug log only
                        tracing::debug!("⏱️  Frame age: {:.1}ms", frame_age_ms);
                    }
                    // If > 10s, likely timestamp epoch mismatch - ignore
                }

                let camera_msg = latest_envelope.payload;

                if node.mode == InferenceMode::Active {
                    node.frame_count += 1;

                    // Process every Nth frame based on config
                    if node.frame_count % config.frame_skip as u64 == 0 {
                        match node.detect_objects(&camera_msg).await {
                            Ok(result) => {
                                info!(
                                    "🎯 Detected {} objects in {:.1}ms",
                                    result.detections.len(),
                                    result.inference_time_ms
                                );

                                // Log detected classes
                                for det in &result.detections {
                                    info!(
                                        "  - {} ({:.1}%)",
                                        det.class_name,
                                        det.confidence * 100.0
                                    );
                                }

                                // Publish detections
                                ctx.publish_to(output_topic, &result).await?;
                            }
                            Err(e) => {
                                tracing::warn!("❌ Detection failed: {}", e);
                            }
                        }
                    }
                }
            }
        }
    }
}