Skip to main content

feagi_agent/sdk/sensory/video/
encoder.rs

1//! Video encoder implementation.
2
3use crate::core::SdkError;
4use crate::sdk::base::TopologyCache;
5use crate::sdk::sensory::traits::SensoryEncoder;
6use crate::sdk::sensory::video::config::{VideoEncoderConfig, VideoEncodingStrategy};
7use crate::sdk::types::{
8    ColorChannelLayout, ColorSpace, CorticalChannelCount, CorticalChannelIndex, CorticalUnitIndex,
9    FrameChangeHandling, GazeProperties, ImageFrame, ImageFrameProperties, ImageXYResolution,
10    SegmentedImageFrameProperties, SensorDeviceCache, SensoryCorticalUnit, WrappedIOData,
11};
12use feagi_sensorimotor::data_pipeline::{PipelineStageProperties, PipelineStagePropertyIndex};
13use feagi_sensorimotor::data_types::descriptors::SegmentedXYImageResolutions;
14use feagi_sensorimotor::data_types::processing::ImageFrameProcessor;
15use feagi_sensorimotor::data_types::Percentage as SensorPercentage;
16use feagi_sensorimotor::data_types::{Percentage, Percentage2D};
17
18/// Video encoder backed by a sensor cache.
19pub struct VideoEncoder {
20    config: VideoEncoderConfig,
21    cache: SensorDeviceCache,
22    cortical_ids: Vec<crate::sdk::types::CorticalID>,
23    channel_count: CorticalChannelCount,
24    segmented_props: Option<SegmentedImageFrameProperties>,
25    gaze_properties: GazeProperties,
26    input_properties: Option<ImageFrameProperties>,
27}
28
29impl VideoEncoder {
30    /// Create a new video encoder with topology-aware configuration.
31    pub async fn new(
32        config: VideoEncoderConfig,
33        topology_cache: &TopologyCache,
34    ) -> Result<Self, SdkError> {
35        let unit = CorticalUnitIndex::from(config.cortical_unit_id);
36        let frame = FrameChangeHandling::Absolute;
37
38        let cortical_ids = match config.encoding_strategy {
39            VideoEncodingStrategy::SimpleVision => {
40                SensoryCorticalUnit::get_cortical_ids_array_for_vision_with_parameters(frame, unit)
41                    .to_vec()
42            }
43            VideoEncodingStrategy::SegmentedVision => {
44                SensoryCorticalUnit::get_cortical_ids_array_for_segmented_vision_with_parameters(
45                    frame, unit,
46                )
47                .to_vec()
48            }
49        };
50
51        let (channel_count, segmented_props) = if config.encoding_strategy
52            == VideoEncodingStrategy::SegmentedVision
53        {
54            let topologies = topology_cache.get_topologies(&cortical_ids).await?;
55            let channels = topologies.first().map(|topo| topo.channels).unwrap_or(1);
56            let channel_count = CorticalChannelCount::new(channels)
57                .map_err(|e| SdkError::Other(format!("Segmented channel count invalid: {e}")))?;
58            let center_topo = topologies.get(4).ok_or_else(|| {
59                SdkError::Other("Segmented vision center topology missing".to_string())
60            })?;
61            let peripheral_topo = topologies.first().ok_or_else(|| {
62                SdkError::Other("Segmented vision peripheral topology missing".to_string())
63            })?;
64            let center_res = ImageXYResolution::new(center_topo.width, center_topo.height)
65                .map_err(|e| SdkError::Other(format!("Segmented center resolution: {e}")))?;
66            let peripheral_res =
67                ImageXYResolution::new(peripheral_topo.width, peripheral_topo.height).map_err(
68                    |e| SdkError::Other(format!("Segmented peripheral resolution: {e}")),
69                )?;
70            let resolutions = SegmentedXYImageResolutions::create_with_same_sized_peripheral(
71                center_res,
72                peripheral_res,
73            );
74
75            let center_layout = layout_from_depth(center_topo.depth)?;
76            let peripheral_layout = layout_from_depth(peripheral_topo.depth)?;
77            // TODO: allow caller-configurable ColorSpace and layouts.
78            let segmented_props = SegmentedImageFrameProperties::new(
79                resolutions,
80                center_layout,
81                peripheral_layout,
82                ColorSpace::Gamma,
83            );
84            (channel_count, Some(segmented_props))
85        } else {
86            let topo = topology_cache.get_topology(&cortical_ids[0]).await?;
87            let channel_count = CorticalChannelCount::new(topo.channels)
88                .map_err(|e| SdkError::Other(format!("Vision channel count invalid: {e}")))?;
89            (channel_count, None)
90        };
91
92        Ok(Self {
93            config,
94            cache: SensorDeviceCache::new(),
95            cortical_ids,
96            channel_count,
97            segmented_props,
98            gaze_properties: GazeProperties::create_default_centered(),
99            input_properties: None,
100        })
101    }
102
103    /// Set gaze properties for segmented vision encoding.
104    pub fn set_gaze_properties(&mut self, gaze: &GazeProperties) -> Result<(), SdkError> {
105        self.gaze_properties = *gaze;
106        self.update_segmented_gaze_stage()?;
107        Ok(())
108    }
109
110    /// Set gaze position and modulation for segmented vision.
111    pub fn set_gaze(&mut self, x: f32, y: f32, modulation: f32) -> Result<(), SdkError> {
112        let x_pct = Percentage::new_from_0_1(x)
113            .map_err(|e| SdkError::Other(format!("Invalid gaze x: {e}")))?;
114        let y_pct = Percentage::new_from_0_1(y)
115            .map_err(|e| SdkError::Other(format!("Invalid gaze y: {e}")))?;
116        let mod_pct = Percentage::new_from_0_1(modulation)
117            .map_err(|e| SdkError::Other(format!("Invalid gaze modulation: {e}")))?;
118        let pos = Percentage2D::new(x_pct, y_pct);
119        let gaze = GazeProperties::new(pos, mod_pct);
120        self.set_gaze_properties(&gaze)
121    }
122
123    /// Set brightness adjustment applied before encoding.
124    pub fn set_brightness(&mut self, brightness: i32) -> Result<(), SdkError> {
125        self.config.brightness = brightness;
126        self.update_segmented_tuning_stages()?;
127        Ok(())
128    }
129
130    /// Set contrast adjustment applied before encoding.
131    pub fn set_contrast(&mut self, contrast: f32) -> Result<(), SdkError> {
132        self.config.contrast = contrast;
133        self.update_segmented_tuning_stages()?;
134        Ok(())
135    }
136
137    /// Set diff threshold for segmented/simple vision encoders.
138    pub fn set_diff_threshold(&mut self, threshold: u8) -> Result<(), SdkError> {
139        self.config.diff_threshold = threshold;
140        self.update_segmented_tuning_stages()?;
141        Ok(())
142    }
143
144    /// Returns true if this encoder uses segmented vision.
145    pub fn is_segmented_vision(&self) -> bool {
146        self.config.encoding_strategy == VideoEncodingStrategy::SegmentedVision
147    }
148
149    fn update_segmented_tuning_stages(&mut self) -> Result<(), SdkError> {
150        if self.config.encoding_strategy != VideoEncodingStrategy::SegmentedVision {
151            return self.update_simple_vision_tuning_stages();
152        }
153        let Some(props) = self.input_properties else {
154            return Ok(());
155        };
156
157        let mut processor = ImageFrameProcessor::new(props);
158        processor
159            .set_brightness_offset(self.config.brightness)
160            .map_err(|e| SdkError::Other(format!("Segmented processor brightness failed: {e}")))?;
161        processor
162            .set_contrast_change(self.config.contrast)
163            .map_err(|e| SdkError::Other(format!("Segmented processor contrast failed: {e}")))?;
164
165        let processor_stage = PipelineStageProperties::new_image_frame_processor(processor);
166        let per_pixel_min = self.config.diff_threshold.max(1);
167        let per_pixel_range = per_pixel_min..=u8::MAX;
168        let activity_min = SensorPercentage::new_from_0_1(0.0)
169            .map_err(|e| SdkError::Other(format!("Segmented diff activity min failed: {e}")))?;
170        let activity_max = SensorPercentage::new_from_0_1(1.0)
171            .map_err(|e| SdkError::Other(format!("Segmented diff activity max failed: {e}")))?;
172        let diff_stage = PipelineStageProperties::new_image_quick_diff(
173            per_pixel_range,
174            activity_min..=activity_max,
175            props,
176        );
177
178        let processor_index = PipelineStagePropertyIndex::from(0u32);
179        let diff_index = PipelineStagePropertyIndex::from(1u32);
180        let unit = CorticalUnitIndex::from(self.config.cortical_unit_id);
181        for channel in 0..*self.channel_count {
182            let channel_index = CorticalChannelIndex::from(channel);
183            self.cache
184                .segmented_vision_update_single_stage_properties(
185                    unit,
186                    channel_index,
187                    processor_index,
188                    processor_stage.clone(),
189                )
190                .map_err(|e| {
191                    SdkError::Other(format!(
192                        "Segmented vision update processor stage failed: {e}"
193                    ))
194                })?;
195            self.cache
196                .segmented_vision_update_single_stage_properties(
197                    unit,
198                    channel_index,
199                    diff_index,
200                    diff_stage.clone(),
201                )
202                .map_err(|e| {
203                    SdkError::Other(format!("Segmented vision update diff stage failed: {e}"))
204                })?;
205        }
206
207        Ok(())
208    }
209
210    fn update_simple_vision_tuning_stages(&mut self) -> Result<(), SdkError> {
211        if self.config.encoding_strategy != VideoEncodingStrategy::SimpleVision {
212            return Ok(());
213        }
214        let Some(props) = self.input_properties else {
215            return Ok(());
216        };
217
218        let mut processor = ImageFrameProcessor::new(props);
219        processor
220            .set_brightness_offset(self.config.brightness)
221            .map_err(|e| SdkError::Other(format!("Vision processor brightness failed: {e}")))?;
222        processor
223            .set_contrast_change(self.config.contrast)
224            .map_err(|e| SdkError::Other(format!("Vision processor contrast failed: {e}")))?;
225
226        let processor_stage = PipelineStageProperties::new_image_frame_processor(processor);
227        let per_pixel_min = self.config.diff_threshold.max(1);
228        let per_pixel_range = per_pixel_min..=u8::MAX;
229        let activity_min = SensorPercentage::new_from_0_1(0.0)
230            .map_err(|e| SdkError::Other(format!("Vision diff activity min failed: {e}")))?;
231        let activity_max = SensorPercentage::new_from_0_1(1.0)
232            .map_err(|e| SdkError::Other(format!("Vision diff activity max failed: {e}")))?;
233        let diff_stage = PipelineStageProperties::new_image_quick_diff(
234            per_pixel_range,
235            activity_min..=activity_max,
236            props,
237        );
238
239        let processor_index = PipelineStagePropertyIndex::from(0u32);
240        let unit = CorticalUnitIndex::from(self.config.cortical_unit_id);
241        for channel in 0..*self.channel_count {
242            let channel_index = CorticalChannelIndex::from(channel);
243            let segmentator_stage = self
244                .cache
245                .vision_get_single_stage_properties(unit, channel_index, processor_index)
246                .map_err(|e| {
247                    SdkError::Other(format!("Vision fetch segmentator stage failed: {e}"))
248                })?;
249            let new_pipeline = vec![
250                processor_stage.clone(),
251                diff_stage.clone(),
252                segmentator_stage,
253            ];
254            self.cache
255                .vision_replace_all_stages(unit, channel_index, new_pipeline)
256                .map_err(|e| SdkError::Other(format!("Vision replace pipeline failed: {e}")))?;
257        }
258
259        Ok(())
260    }
261
262    fn update_segmented_gaze_stage(&mut self) -> Result<(), SdkError> {
263        if self.config.encoding_strategy != VideoEncodingStrategy::SegmentedVision {
264            return Ok(());
265        }
266        let Some(props) = self.input_properties else {
267            return Ok(());
268        };
269        let Some(segmented_props) = self.segmented_props else {
270            return Ok(());
271        };
272
273        let segmentator_stage = PipelineStageProperties::new_image_frame_segmentator(
274            props,
275            segmented_props,
276            self.gaze_properties,
277        );
278        let segmentator_index = PipelineStagePropertyIndex::from(2u32);
279        let unit = CorticalUnitIndex::from(self.config.cortical_unit_id);
280        for channel in 0..*self.channel_count {
281            let channel_index = CorticalChannelIndex::from(channel);
282            self.cache
283                .segmented_vision_update_single_stage_properties(
284                    unit,
285                    channel_index,
286                    segmentator_index,
287                    segmentator_stage.clone(),
288                )
289                .map_err(|e| {
290                    SdkError::Other(format!("Segmented vision update gaze stage failed: {e}"))
291                })?;
292        }
293
294        Ok(())
295    }
296}
297
298impl SensoryEncoder for VideoEncoder {
299    type Input = ImageFrame;
300
301    fn encode(&mut self, input: &Self::Input) -> Result<Vec<u8>, SdkError> {
302        let unit = CorticalUnitIndex::from(self.config.cortical_unit_id);
303        let channel = CorticalChannelIndex::from(0u32);
304
305        if self.input_properties.is_none() {
306            let props = input.get_image_frame_properties();
307            self.input_properties = Some(props);
308            match self.config.encoding_strategy {
309                VideoEncodingStrategy::SimpleVision => {
310                    self.cache
311                        .vision_register(
312                            unit,
313                            self.channel_count,
314                            FrameChangeHandling::Absolute,
315                            props,
316                        )
317                        .map_err(|e| SdkError::Other(format!("Vision register failed: {e}")))?;
318                }
319                VideoEncodingStrategy::SegmentedVision => {
320                    let segmented_props = self.segmented_props.ok_or_else(|| {
321                        SdkError::Other("Segmented vision properties missing".to_string())
322                    })?;
323                    self.cache
324                        .segmented_vision_register(
325                            unit,
326                            self.channel_count,
327                            FrameChangeHandling::Absolute,
328                            props,
329                            segmented_props,
330                            self.gaze_properties,
331                        )
332                        .map_err(|e| {
333                            SdkError::Other(format!("Segmented vision register failed: {e}"))
334                        })?;
335
336                    let mut processor = ImageFrameProcessor::new(props);
337                    processor
338                        .set_brightness_offset(self.config.brightness)
339                        .map_err(|e| {
340                            SdkError::Other(format!("Segmented processor brightness failed: {e}"))
341                        })?;
342                    processor
343                        .set_contrast_change(self.config.contrast)
344                        .map_err(|e| {
345                            SdkError::Other(format!("Segmented processor contrast failed: {e}"))
346                        })?;
347
348                    let processor_stage =
349                        PipelineStageProperties::new_image_frame_processor(processor);
350                    let per_pixel_min = self.config.diff_threshold.max(1);
351                    let per_pixel_range = per_pixel_min..=u8::MAX;
352                    let activity_min = SensorPercentage::new_from_0_1(0.0).map_err(|e| {
353                        SdkError::Other(format!("Segmented diff activity min failed: {e}"))
354                    })?;
355                    let activity_max = SensorPercentage::new_from_0_1(1.0).map_err(|e| {
356                        SdkError::Other(format!("Segmented diff activity max failed: {e}"))
357                    })?;
358                    let diff_stage = PipelineStageProperties::new_image_quick_diff(
359                        per_pixel_range,
360                        activity_min..=activity_max,
361                        props,
362                    );
363
364                    let stage_index = PipelineStagePropertyIndex::from(0u32);
365                    for channel in 0..*self.channel_count {
366                        let channel_index = CorticalChannelIndex::from(channel);
367                        let segmentor_stage = self
368                            .cache
369                            .segmented_vision_get_single_stage_properties(
370                                unit,
371                                channel_index,
372                                stage_index,
373                            )
374                            .map_err(|e| {
375                                SdkError::Other(format!(
376                                    "Segmented vision fetch segmentor stage failed: {e}"
377                                ))
378                            })?;
379                        let new_pipeline =
380                            vec![processor_stage.clone(), diff_stage.clone(), segmentor_stage];
381                        self.cache
382                            .segmented_vision_replace_all_stages(unit, channel_index, new_pipeline)
383                            .map_err(|e| {
384                                SdkError::Other(format!(
385                                    "Segmented vision replace pipeline failed: {e}"
386                                ))
387                            })?;
388                    }
389                }
390            }
391        }
392
393        match self.config.encoding_strategy {
394            VideoEncodingStrategy::SimpleVision => {
395                // TODO: apply brightness/contrast/diff threshold preprocessing here.
396                self.cache
397                    .vision_write(unit, channel, WrappedIOData::ImageFrame(input.clone()))
398                    .map_err(|e| SdkError::Other(format!("Vision write failed: {e}")))?;
399            }
400            VideoEncodingStrategy::SegmentedVision => {
401                // TODO: apply brightness/contrast/diff threshold preprocessing here.
402                self.cache
403                    .segmented_vision_write(unit, channel, input.clone().into())
404                    .map_err(|e| SdkError::Other(format!("Segmented write failed: {e}")))?;
405            }
406        }
407
408        self.cache
409            .encode_all_sensors_to_neurons(std::time::Instant::now())
410            .map_err(|e| SdkError::Other(format!("Video sensor encode failed: {e}")))?;
411        self.cache
412            .encode_neurons_to_bytes()
413            .map_err(|e| SdkError::Other(format!("Video byte encode failed: {e}")))?;
414
415        Ok(self
416            .cache
417            .get_feagi_byte_container()
418            .get_byte_ref()
419            .to_vec())
420    }
421
422    fn cortical_ids(&self) -> &[crate::sdk::types::CorticalID] {
423        &self.cortical_ids
424    }
425}
426
427fn layout_from_depth(depth: u32) -> Result<ColorChannelLayout, SdkError> {
428    match depth {
429        1 => Ok(ColorChannelLayout::GrayScale),
430        3 => Ok(ColorChannelLayout::RGB),
431        _ => Err(SdkError::Other(format!(
432            "Unsupported channel depth: {depth}"
433        ))),
434    }
435}