videocall_cli/producers/
test_pattern_sender.rs

1use crate::producers::camera::since_the_epoch;
2use crate::producers::producer::Producer;
3use anyhow::Result;
4use image::imageops::FilterType;
5use image::ImageBuffer;
6use image::ImageReader;
7use image::Rgb;
8use std::fs::read;
9use std::sync::atomic::AtomicBool;
10use std::sync::Arc;
11use std::thread::JoinHandle;
12use std::time::Duration;
13use tracing::{debug, error};
14use videocall_nokhwa::utils::FrameFormat;
15
16use tokio::sync::mpsc::{self, Sender};
17
18use super::camera::CameraConfig;
19use super::camera::CameraPacket;
20use super::encoder_thread::encoder_thread;
21
22pub struct TestPatternSender {
23    user_id: String,
24    cam_rx: Option<mpsc::Receiver<Option<CameraPacket>>>,
25    cam_tx: Arc<mpsc::Sender<Option<CameraPacket>>>,
26    quic_tx: Arc<Sender<Vec<u8>>>,
27    quit: Arc<AtomicBool>,
28    handles: Vec<JoinHandle<()>>,
29    config: CameraConfig,
30}
31
32impl TestPatternSender {
33    pub fn from_config(config: CameraConfig, user_id: String, quic_tx: Sender<Vec<u8>>) -> Self {
34        let (cam_tx, cam_rx) = mpsc::channel(100);
35        Self {
36            config,
37            user_id,
38            cam_rx: Some(cam_rx),
39            cam_tx: Arc::new(cam_tx),
40            quit: Arc::new(AtomicBool::new(false)),
41            handles: vec![],
42            quic_tx: Arc::new(quic_tx),
43        }
44    }
45
46    fn camera_thread(&self) -> Result<JoinHandle<()>> {
47        let quit = self.quit.clone();
48        let cam_tx = self.cam_tx.clone();
49        let frame_format = FrameFormat::NV12;
50        let interval = Duration::from_millis(1000 / self.config.framerate as u64);
51        let mut frames = vec![];
52
53        for i in 120..125 {
54            let path = format!("assets/images/sample_video_save/output_{}.jpg", i);
55            let img = read(path).unwrap();
56            let img = ImageReader::new(std::io::Cursor::new(img))
57                .with_guessed_format()
58                .unwrap();
59            // Transform the image to NV12 format
60            let img = img.decode().unwrap();
61            // Resize the image to value in config
62            let img = img.resize_exact(self.config.width, self.config.height, FilterType::Nearest);
63            let img = rgb_to_nv12(&img.to_rgb8());
64            frames.push(img);
65        }
66        let mut iterator = frames.into_iter().cycle();
67        // rotate the image
68        Ok(std::thread::spawn(move || {
69            debug!("Camera opened... waiting for frames");
70            loop {
71                // Check if we should quit
72                if quit.load(std::sync::atomic::Ordering::Relaxed) {
73                    return;
74                }
75                // Generate a test pattern
76                // Send the frame to the encoder
77                // Try sending the frame over the channel
78                let next = iterator.next().unwrap();
79                if let Err(e) = cam_tx.try_send(Some(CameraPacket::new(
80                    next,
81                    frame_format,
82                    since_the_epoch().as_millis(),
83                ))) {
84                    error!("Error sending image: {}", e);
85                }
86                // Sleep for the interval
87                std::thread::sleep(interval);
88            }
89        }))
90    }
91}
92
93impl Producer for TestPatternSender {
94    fn start(&mut self) -> anyhow::Result<()> {
95        self.handles.push(self.camera_thread()?);
96        let encoder = encoder_thread(
97            self.cam_rx.take().unwrap(),
98            self.quic_tx.clone(),
99            self.quit.clone(),
100            self.config.clone(),
101            self.user_id.clone(),
102        );
103        self.handles.push(encoder);
104        Ok(())
105    }
106
107    fn stop(&mut self) -> anyhow::Result<()> {
108        todo!()
109    }
110}
111
112// Function to convert a grayscale image buffer to NV12 format
113fn rgb_to_nv12(image: &ImageBuffer<Rgb<u8>, Vec<u8>>) -> Vec<u8> {
114    let width = image.width() as usize;
115    let height = image.height() as usize;
116    let mut nv12_data = vec![0u8; width * height * 3 / 2];
117
118    rgb_to_i420(image.as_raw(), width, height, &mut nv12_data);
119    nv12_data
120}
121
122pub fn rgb_to_i420(rgb: &[u8], width: usize, height: usize, i420: &mut [u8]) {
123    assert!(
124        i420.len() >= width * height * 3 / 2,
125        "Insufficient I420 buffer size"
126    );
127
128    let (y_plane, uv_planes) = i420.split_at_mut(width * height);
129    let (u_plane, v_plane) = uv_planes.split_at_mut(width * height / 4);
130
131    for y in 0..height {
132        for x in 0..width {
133            let rgb_index = (y * width + x) * 3;
134            let r = rgb[rgb_index] as f32;
135            let g = rgb[rgb_index + 1] as f32;
136            let b = rgb[rgb_index + 2] as f32;
137
138            // Calculate Y, U, V components
139            let y_value = (0.257 * r + 0.504 * g + 0.098 * b + 16.0).round() as u8;
140            let u_value = (-0.148 * r - 0.291 * g + 0.439 * b + 128.0).round() as u8;
141            let v_value = (0.439 * r - 0.368 * g - 0.071 * b + 128.0).round() as u8;
142
143            y_plane[y * width + x] = y_value;
144
145            if y % 2 == 0 && x % 2 == 0 {
146                let uv_index = (y / 2) * (width / 2) + (x / 2);
147                u_plane[uv_index] = u_value;
148                v_plane[uv_index] = v_value;
149            }
150        }
151    }
152}