1use arrow::buffer::ScalarBuffer;
4use re_types_core::{Archetype as _, ArchetypeName};
5use smallvec::{SmallVec, smallvec};
6
7use crate::{
8 archetypes,
9 datatypes::{Blob, ChannelDatatype, TensorBuffer, TensorData},
10};
11
12#[cfg(feature = "image")]
13use crate::datatypes::ImageFormat;
14
15#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
19pub enum ImageKind {
20 Color,
22
23 Depth,
25
26 Segmentation,
31}
32
33impl ImageKind {
34 pub fn from_archetype_name(archetype_name: Option<ArchetypeName>) -> Self {
36 if archetype_name == Some(archetypes::SegmentationImage::name()) {
37 Self::Segmentation
38 } else if archetype_name == Some(archetypes::DepthImage::name()) {
39 Self::Depth
40 } else {
41 Self::Color
43 }
44 }
45}
46
47impl re_byte_size::SizeBytes for ImageKind {
48 fn heap_size_bytes(&self) -> u64 {
49 0
50 }
51
52 fn is_pod() -> bool {
53 true
54 }
55}
56
57#[cfg(feature = "image")]
61#[derive(thiserror::Error, Clone, Debug)]
62pub enum ImageConversionError {
63 #[error(
69 "Unsupported color type: {0:?}. We support 8-bit, 16-bit, and f32 images, and RGB, RGBA, Luminance, and Luminance-Alpha."
70 )]
71 UnsupportedImageColorType(image::ColorType),
72}
73
74#[cfg(feature = "image")]
76#[derive(thiserror::Error, Clone, Debug)]
77pub enum ImageLoadError {
78 #[error(transparent)]
80 Image(std::sync::Arc<image::ImageError>),
81
82 #[error(transparent)]
84 Tiff(std::sync::Arc<tiff::TiffError>),
85
86 #[error("Failed to load file: {0}")]
88 ReadError(std::sync::Arc<std::io::Error>),
89
90 #[error(transparent)]
92 ImageConversionError(#[from] ImageConversionError),
93
94 #[error("MIME type '{0}' is not supported for images")]
96 UnsupportedMimeType(String),
97
98 #[error("Could not detect MIME type from the image contents")]
100 UnrecognizedMimeType,
101}
102
103#[cfg(feature = "image")]
104impl From<image::ImageError> for ImageLoadError {
105 #[inline]
106 fn from(err: image::ImageError) -> Self {
107 Self::Image(std::sync::Arc::new(err))
108 }
109}
110
111#[cfg(feature = "image")]
112impl From<tiff::TiffError> for ImageLoadError {
113 #[inline]
114 fn from(err: tiff::TiffError) -> Self {
115 Self::Tiff(std::sync::Arc::new(err))
116 }
117}
118
119#[cfg(feature = "image")]
120impl From<std::io::Error> for ImageLoadError {
121 #[inline]
122 fn from(err: std::io::Error) -> Self {
123 Self::ReadError(std::sync::Arc::new(err))
124 }
125}
126
127#[derive(thiserror::Error, Clone, Debug)]
131pub enum ImageConstructionError<T: TryInto<TensorData>>
132where
133 T::Error: std::error::Error,
134{
135 #[error("Could not convert source to TensorData: {0}")]
137 TensorDataConversion(T::Error),
138
139 #[error("Could not create Image from TensorData with shape {0:?}")]
141 BadImageShape(ScalarBuffer<u64>),
142
143 #[error(
145 "Chroma downsampling is not supported for this image type (e.g. DepthImage or SegmentationImage)"
146 )]
147 ChromaDownsamplingNotSupported,
148}
149
150pub fn blob_and_datatype_from_tensor(tensor_buffer: TensorBuffer) -> (Blob, ChannelDatatype) {
152 match tensor_buffer {
153 TensorBuffer::U8(buffer) => (Blob(buffer), ChannelDatatype::U8),
154 TensorBuffer::U16(buffer) => (Blob(cast_to_u8(&buffer)), ChannelDatatype::U16),
155 TensorBuffer::U32(buffer) => (Blob(cast_to_u8(&buffer)), ChannelDatatype::U32),
156 TensorBuffer::U64(buffer) => (Blob(cast_to_u8(&buffer)), ChannelDatatype::U64),
157 TensorBuffer::I8(buffer) => (Blob(cast_to_u8(&buffer)), ChannelDatatype::I8),
158 TensorBuffer::I16(buffer) => (Blob(cast_to_u8(&buffer)), ChannelDatatype::I16),
159 TensorBuffer::I32(buffer) => (Blob(cast_to_u8(&buffer)), ChannelDatatype::I32),
160 TensorBuffer::I64(buffer) => (Blob(cast_to_u8(&buffer)), ChannelDatatype::I64),
161 TensorBuffer::F16(buffer) => (Blob(cast_to_u8(&buffer)), ChannelDatatype::F16),
162 TensorBuffer::F32(buffer) => (Blob(cast_to_u8(&buffer)), ChannelDatatype::F32),
163 TensorBuffer::F64(buffer) => (Blob(cast_to_u8(&buffer)), ChannelDatatype::F64),
164 }
165}
166
167#[inline]
169pub fn cast_to_u8<T: arrow::datatypes::ArrowNativeType>(
170 buffer: &arrow::buffer::ScalarBuffer<T>,
171) -> ScalarBuffer<u8> {
172 arrow::buffer::ScalarBuffer::new(buffer.inner().clone(), 0, buffer.inner().len())
173}
174
175pub trait ImageChannelType: bytemuck::Pod {
181 const CHANNEL_TYPE: ChannelDatatype;
183}
184
185impl ImageChannelType for u8 {
186 const CHANNEL_TYPE: ChannelDatatype = ChannelDatatype::U8;
187}
188
189impl ImageChannelType for u16 {
190 const CHANNEL_TYPE: ChannelDatatype = ChannelDatatype::U16;
191}
192
193impl ImageChannelType for u32 {
194 const CHANNEL_TYPE: ChannelDatatype = ChannelDatatype::U32;
195}
196
197impl ImageChannelType for u64 {
198 const CHANNEL_TYPE: ChannelDatatype = ChannelDatatype::U64;
199}
200
201impl ImageChannelType for i8 {
202 const CHANNEL_TYPE: ChannelDatatype = ChannelDatatype::I8;
203}
204
205impl ImageChannelType for i16 {
206 const CHANNEL_TYPE: ChannelDatatype = ChannelDatatype::I16;
207}
208
209impl ImageChannelType for i32 {
210 const CHANNEL_TYPE: ChannelDatatype = ChannelDatatype::I32;
211}
212
213impl ImageChannelType for i64 {
214 const CHANNEL_TYPE: ChannelDatatype = ChannelDatatype::I64;
215}
216
217impl ImageChannelType for half::f16 {
218 const CHANNEL_TYPE: ChannelDatatype = ChannelDatatype::F16;
219}
220
221impl ImageChannelType for f32 {
222 const CHANNEL_TYPE: ChannelDatatype = ChannelDatatype::F32;
223}
224
225impl ImageChannelType for f64 {
226 const CHANNEL_TYPE: ChannelDatatype = ChannelDatatype::F64;
227}
228
229pub fn find_non_empty_dim_indices(shape: &[u64]) -> SmallVec<[usize; 4]> {
238 match shape.len() {
239 0 => return smallvec![],
240 1 => return smallvec![0],
241 2 => return smallvec![0, 1],
242 _ => {}
243 }
244
245 let mut non_unit_indices = shape
250 .iter()
251 .enumerate()
252 .filter_map(|(ind, &dim)| if dim != 1 { Some(ind) } else { None });
253
254 let mut min = non_unit_indices.next().unwrap_or(0);
256 let mut max = non_unit_indices.next_back().unwrap_or(min);
257
258 while max == min && max + 1 < shape.len() {
264 max += 1;
265 }
266
267 let target_len = match shape[max] {
272 3 | 4 => 3,
273 _ => 2,
274 };
275
276 while max - min + 1 < target_len && 0 < min {
277 min -= 1;
278 }
279
280 (min..=max).collect()
281}
282
283#[test]
284fn test_find_non_empty_dim_indices() {
285 fn expect(shape: &[u64], expected: &[usize]) {
286 let got = find_non_empty_dim_indices(shape);
287 assert!(
288 got.as_slice() == expected,
289 "Input: {shape:?}, got {got:?}, expected {expected:?}"
290 );
291 }
292
293 expect(&[], &[]);
294 expect(&[0], &[0]);
295 expect(&[1], &[0]);
296 expect(&[100], &[0]);
297
298 expect(&[480, 640], &[0, 1]);
299 expect(&[480, 640, 1], &[0, 1]);
300 expect(&[480, 640, 1, 1], &[0, 1]);
301 expect(&[480, 640, 3], &[0, 1, 2]);
302 expect(&[1, 480, 640], &[1, 2]);
303 expect(&[1, 480, 640, 3, 1], &[1, 2, 3]);
304 expect(&[1, 3, 480, 640, 1], &[1, 2, 3]);
305 expect(&[1, 1, 480, 640], &[2, 3]);
306 expect(&[1, 1, 480, 640, 1, 1], &[2, 3]);
307
308 expect(&[1, 1, 3], &[0, 1, 2]);
309 expect(&[1, 1, 3, 1], &[2, 3]);
310}
311
312#[derive(Clone, Copy, Debug)]
329pub enum YuvMatrixCoefficients {
330 Bt601,
334
335 Bt709,
347 }
356
357pub fn rgb_from_yuv(
363 y: u8,
364 u: u8,
365 v: u8,
366 limited_range: bool,
367 coefficients: YuvMatrixCoefficients,
368) -> [u8; 3] {
369 let (mut y, mut u, mut v) = (y as f32, u as f32, v as f32);
370
371 if limited_range {
373 y = (y - 16.0) / 219.0;
377 u = (u - 128.0) / 224.0;
378 v = (v - 128.0) / 224.0;
379 } else {
380 y /= 255.0;
381 u = (u - 128.0) / 255.0;
382 v = (v - 128.0) / 255.0;
383 }
384
385 let r;
386 let g;
387 let b;
388
389 match coefficients {
390 YuvMatrixCoefficients::Bt601 => {
391 r = y + 1.402 * v;
393 g = y - 0.344 * u - 0.714 * v;
394 b = y + 1.772 * u;
395 }
396
397 YuvMatrixCoefficients::Bt709 => {
398 r = y + 1.575 * v;
400 g = y - 0.187 * u - 0.468 * v;
401 b = y + 1.856 * u;
402 }
403 }
404
405 [(255.0 * r) as u8, (255.0 * g) as u8, (255.0 * b) as u8]
406}
407
408#[cfg(feature = "image")]
412pub fn blob_and_format_from_tiff(bytes: &[u8]) -> Result<(Blob, ImageFormat), ImageLoadError> {
413 use tiff::decoder::DecodingResult;
414
415 let cursor = std::io::Cursor::new(bytes);
416 let mut decoder = tiff::decoder::Decoder::new(cursor)?;
417 let img = decoder.read_image()?;
418
419 let (bytes, data_type): (&[u8], ChannelDatatype) = match &img {
420 DecodingResult::U8(data) => (bytemuck::cast_slice(data), ChannelDatatype::U8),
421 DecodingResult::U16(data) => (bytemuck::cast_slice(data), ChannelDatatype::U16),
422 DecodingResult::U32(data) => (bytemuck::cast_slice(data), ChannelDatatype::U32),
423 DecodingResult::U64(data) => (bytemuck::cast_slice(data), ChannelDatatype::U64),
424 DecodingResult::F32(data) => (bytemuck::cast_slice(data), ChannelDatatype::F32),
425 DecodingResult::F64(data) => (bytemuck::cast_slice(data), ChannelDatatype::F64),
426 DecodingResult::I8(data) => (bytemuck::cast_slice(data), ChannelDatatype::I8),
427 DecodingResult::I16(data) => (bytemuck::cast_slice(data), ChannelDatatype::I16),
428 DecodingResult::I32(data) => (bytemuck::cast_slice(data), ChannelDatatype::I32),
429 DecodingResult::I64(data) => (bytemuck::cast_slice(data), ChannelDatatype::I64),
430 };
431
432 let (width, height) = decoder.dimensions()?;
433 let image_format = ImageFormat {
434 width,
435 height,
436 channel_datatype: Some(data_type),
437 pixel_format: None,
438 color_model: None,
439 };
440
441 Ok((Blob::from(bytes), image_format))
442}