1use std::{
2 fmt, io, mem, ptr, slice, str,
3 sync::{Arc, Mutex},
4};
5
6#[repr(transparent)]
7#[derive(Clone, Copy, Debug, blade_macros::Flat)]
8struct TextureFormatWrap(blade_graphics::TextureFormat);
9
10#[derive(blade_macros::Flat)]
11struct CookedMip<'a> {
12 data: &'a [u8],
13}
14
15#[derive(blade_macros::Flat)]
16pub struct CookedImage<'a> {
17 name: &'a [u8],
18 extent: [u32; 3],
19 format: TextureFormatWrap,
20 mips: Vec<CookedMip<'a>>,
21}
22
23#[derive(Clone, Debug, PartialEq, Eq, Hash)]
24pub struct Meta {
25 pub format: blade_graphics::TextureFormat,
26 pub generate_mips: bool,
27 pub y_flip: bool,
28}
29
30impl fmt::Display for Meta {
31 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
32 fmt::Debug::fmt(&self.format, f)
33 }
34}
35
36pub struct Texture {
37 pub object: blade_graphics::Texture,
38 pub view: blade_graphics::TextureView,
39 pub extent: blade_graphics::Extent,
40}
41
42struct Initialization {
43 dst: blade_graphics::Texture,
44}
45
46struct Transfer {
47 stage: blade_graphics::Buffer,
48 bytes_per_row: u32,
49 dst: blade_graphics::Texture,
50 extent: blade_graphics::Extent,
51 mip_level: u32,
52}
53
54#[derive(Default)]
56struct PendingOperations {
57 initializations: Vec<Initialization>,
58 transfers: Vec<Transfer>,
59}
60
61pub struct Baker {
62 gpu_context: Arc<blade_graphics::Context>,
63 pending_operations: Mutex<PendingOperations>,
64}
65
66impl Baker {
67 pub fn new(gpu_context: &Arc<blade_graphics::Context>) -> Self {
68 Self {
69 gpu_context: Arc::clone(gpu_context),
70 pending_operations: Mutex::new(PendingOperations::default()),
71 }
72 }
73
74 pub fn flush(
75 &self,
76 encoder: &mut blade_graphics::CommandEncoder,
77 temp_buffers: &mut Vec<blade_graphics::Buffer>,
78 ) {
79 let mut pending_ops = self.pending_operations.lock().unwrap();
80 for init in pending_ops.initializations.drain(..) {
81 encoder.init_texture(init.dst);
82 }
83 if !pending_ops.transfers.is_empty() {
84 let mut pass = encoder.transfer("init textures");
85 for transfer in pending_ops.transfers.drain(..) {
86 let dst = blade_graphics::TexturePiece {
87 texture: transfer.dst,
88 mip_level: transfer.mip_level,
89 array_layer: 0,
90 origin: [0; 3],
91 };
92 pass.copy_buffer_to_texture(
93 transfer.stage.into(),
94 transfer.bytes_per_row,
95 dst,
96 transfer.extent,
97 );
98 temp_buffers.push(transfer.stage);
99 }
100 }
101 }
102}
103
104impl Baker {
105 pub fn create_texture(&self, name: &str, width: u32, height: u32, data: &[[u8; 4]]) -> Texture {
108 use blade_graphics as gpu;
109
110 assert_eq!(data.len(), (width * height) as usize);
111 let format = gpu::TextureFormat::Rgba8Unorm;
112 let extent = gpu::Extent {
113 width,
114 height,
115 depth: 1,
116 };
117 let texture = self.gpu_context.create_texture(gpu::TextureDesc {
118 name,
119 format,
120 size: extent,
121 array_layer_count: 1,
122 mip_level_count: 1,
123 dimension: gpu::TextureDimension::D2,
124 usage: gpu::TextureUsage::COPY | gpu::TextureUsage::RESOURCE,
125 sample_count: 1,
126 external: None,
127 });
128 let view = self.gpu_context.create_texture_view(
129 texture,
130 gpu::TextureViewDesc {
131 name,
132 format,
133 dimension: gpu::ViewDimension::D2,
134 subresources: &Default::default(),
135 },
136 );
137
138 let byte_data = unsafe {
139 std::slice::from_raw_parts(data.as_ptr() as *const u8, std::mem::size_of_val(data))
140 };
141 let stage = self.gpu_context.create_buffer(gpu::BufferDesc {
142 name: &format!("{name}/stage"),
143 size: byte_data.len() as u64,
144 memory: gpu::Memory::Upload,
145 });
146 unsafe {
147 ptr::copy_nonoverlapping(byte_data.as_ptr(), stage.data(), byte_data.len());
148 }
149
150 let bytes_per_row = width * 4;
151 let mut pending_ops = self.pending_operations.lock().unwrap();
152 pending_ops
153 .initializations
154 .push(Initialization { dst: texture });
155 pending_ops.transfers.push(Transfer {
156 stage,
157 bytes_per_row,
158 dst: texture,
159 extent,
160 mip_level: 0,
161 });
162
163 Texture {
164 object: texture,
165 view,
166 extent,
167 }
168 }
169}
170
171impl blade_asset::Baker for Baker {
172 type Meta = Meta;
173 type Data<'a> = CookedImage<'a>;
174 type Output = Texture;
175 fn cook(
176 &self,
177 source: &[u8],
178 extension: &str,
179 meta: Meta,
180 cooker: Arc<blade_asset::Cooker<Self>>,
181 exe_context: &choir::ExecutionContext,
182 ) {
183 use blade_graphics::TextureFormat as Tf;
184
185 type LdrTexel = [u8; 4];
186 type HdrTexel = [f32; 3];
187 enum PlainData {
188 Ldr(Vec<LdrTexel>),
189 Hdr(Vec<HdrTexel>),
190 }
191 struct PlainImage {
192 width: usize,
193 height: usize,
194 data: PlainData,
195 }
196
197 let src: PlainImage = match extension {
198 #[cfg(feature = "asset")]
199 "png" => {
200 profiling::scope!("decode png");
201 let options =
202 zune_core::options::DecoderOptions::default().png_set_add_alpha_channel(true);
203 let mut decoder = zune_png::PngDecoder::new_with_options(source, options);
204 decoder.decode_headers().unwrap();
205 let info = decoder.get_info().unwrap().clone();
206 let mut data = vec![[0u8; 4]; info.width * info.height];
207 let count = data.len() * data[0].len();
208 assert_eq!(count, decoder.output_buffer_size().unwrap());
209 decoder
210 .decode_into(unsafe {
211 slice::from_raw_parts_mut(data.as_mut_ptr() as *mut u8, count)
212 })
213 .unwrap();
214 PlainImage {
215 width: info.width,
216 height: info.height,
217 data: PlainData::Ldr(data),
218 }
219 }
220 #[cfg(feature = "asset")]
221 "jpg" | "jpeg" => {
222 profiling::scope!("decode jpeg");
223 let options = zune_core::options::DecoderOptions::default()
224 .jpeg_set_out_colorspace(zune_core::colorspace::ColorSpace::RGBA);
225 let mut decoder = zune_jpeg::JpegDecoder::new_with_options(source, options);
226 decoder.decode_headers().unwrap();
227 let info = decoder.info().unwrap();
228 let mut data = vec![[0u8; 4]; info.width as usize * info.height as usize];
229 let count = data.len() * data[0].len();
230 assert_eq!(count, decoder.output_buffer_size().unwrap());
231 decoder
232 .decode_into(unsafe {
233 slice::from_raw_parts_mut(data.as_mut_ptr() as *mut u8, count)
234 })
235 .unwrap();
236 PlainImage {
237 width: info.width as usize,
238 height: info.height as usize,
239 data: PlainData::Ldr(data),
240 }
241 }
242 #[cfg(feature = "asset")]
243 "hdr" => {
244 profiling::scope!("decode hdr");
245 let options = zune_core::options::DecoderOptions::default();
246 let mut decoder = zune_hdr::HdrDecoder::new_with_options(source, options);
247 decoder.decode_headers().unwrap();
248 let (width, height) = decoder.get_dimensions().unwrap();
249 let colorspace = decoder.get_colorspace().unwrap();
250 assert_eq!(colorspace, zune_core::colorspace::ColorSpace::RGB);
251 let mut data = vec![[0f32; 3]; width * height];
252 let count = data.len() * data[0].len();
253 assert_eq!(count, decoder.output_buffer_size().unwrap());
254 decoder
255 .decode_into(unsafe {
256 slice::from_raw_parts_mut(data.as_mut_ptr() as *mut f32, count)
257 })
258 .unwrap();
259 PlainImage {
260 width,
261 height,
262 data: PlainData::Hdr(data),
263 }
264 }
265 #[cfg(feature = "asset")]
266 "exr" => {
267 use exr::prelude::{ReadChannels as _, ReadLayers as _};
268 profiling::scope!("decode exr");
269 struct RawImage {
270 width: usize,
271 data: Vec<HdrTexel>,
272 }
273 let image = exr::image::read::read()
274 .no_deep_data()
275 .largest_resolution_level()
276 .rgba_channels(
277 |size, _| RawImage {
278 width: size.width(),
279 data: vec![[0f32; 3]; size.width() * size.height()],
280 },
281 |image, position, (r, g, b, _): (f32, f32, f32, f32)| {
282 image.data[position.y() * image.width + position.x()] = [r, g, b];
283 },
284 )
285 .first_valid_layer()
286 .all_attributes()
287 .from_buffered(io::Cursor::new(source))
288 .unwrap();
289 PlainImage {
290 width: image.layer_data.size.width(),
291 height: image.layer_data.size.height(),
292 data: PlainData::Hdr(image.layer_data.channel_data.pixels.data),
293 }
294 }
295 other => panic!("Unknown texture extension: {}", other),
296 };
297
298 #[cfg(feature = "asset")]
299 match src.data {
300 PlainData::Ldr(mut data) => {
301 if meta.y_flip {
302 profiling::scope!("y-flip");
303 zune_imageprocs::flip::vertical_flip(&mut data, src.width);
304 }
305
306 let dst_format = match meta.format {
307 Tf::Bc1Unorm | Tf::Bc1UnormSrgb => texpresso::Format::Bc1,
308 Tf::Bc2Unorm | Tf::Bc2UnormSrgb => texpresso::Format::Bc2,
309 Tf::Bc3Unorm | Tf::Bc3UnormSrgb => texpresso::Format::Bc3,
310 Tf::Bc4Unorm | Tf::Bc4Snorm => texpresso::Format::Bc4,
311 Tf::Bc5Unorm | Tf::Bc5Snorm => texpresso::Format::Bc5,
312 other => panic!("Unsupported destination format {:?}", other),
313 };
314
315 let mut src_mips = vec![data];
316 let mut mips = {
317 let compressed_size = dst_format.compressed_size(src.width, src.height);
318 vec![vec![0u8; compressed_size]]
319 };
320 let base_extent = blade_graphics::Extent {
321 width: src.width as u32,
322 height: src.height as u32,
323 depth: 1,
324 };
325 if meta.generate_mips {
326 profiling::scope!("generate mipmap");
327 for i in 1..base_extent.max_mip_levels() {
328 let prev_extent = base_extent.at_mip_level(i - 1);
329 let cur_extent = base_extent.at_mip_level(i);
330 let prev_data = src_mips.last().unwrap();
331 let prev_raw = unsafe {
332 slice::from_raw_parts(
333 prev_data.as_ptr() as *const u8,
334 prev_data.len() * 4,
335 )
336 };
337 let mut cur_data =
338 vec![[0u8; 4]; cur_extent.width as usize * cur_extent.height as usize];
339 let cur_raw = unsafe {
340 slice::from_raw_parts_mut(
341 cur_data.as_mut_ptr() as *mut u8,
342 cur_data.len() * 4,
343 )
344 };
345 zune_imageprocs::resize::resize(
346 prev_raw,
347 cur_raw,
348 zune_imageprocs::resize::ResizeMethod::Bilinear,
349 prev_extent.width as _,
350 prev_extent.height as _,
351 cur_extent.width as _,
352 cur_extent.height as _,
353 );
354 src_mips.push(cur_data);
355 let compressed_size = dst_format
356 .compressed_size(cur_extent.width as _, cur_extent.height as _);
357 mips.push(vec![0u8; compressed_size]);
358 }
359 }
360
361 struct CompressTask {
362 src: Vec<LdrTexel>,
363 dst_ptr: *mut u8,
364 }
365 unsafe impl Send for CompressTask {}
366 unsafe impl Sync for CompressTask {}
367
368 let compress_task = exe_context
369 .fork("compress")
370 .init_iter(
371 src_mips
372 .into_iter()
373 .zip(mips.iter_mut())
374 .map(|(src, dst)| CompressTask {
375 src,
376 dst_ptr: dst.as_mut_ptr(),
377 })
378 .enumerate(),
379 move |_, (i, task)| {
380 let extent = base_extent.at_mip_level(i as u32);
381 let compressed_size =
382 dst_format.compressed_size(extent.width as _, extent.height as _);
383 let params = texpresso::Params {
384 algorithm: texpresso::Algorithm::RangeFit,
386 ..Default::default()
387 };
388 let dst =
389 unsafe { slice::from_raw_parts_mut(task.dst_ptr, compressed_size) };
390 let raw = unsafe {
391 slice::from_raw_parts(
392 task.src.as_ptr() as *const u8,
393 task.src.len() * 4,
394 )
395 };
396 dst_format.compress(
397 raw,
398 extent.width as _,
399 extent.height as _,
400 params,
401 dst,
402 );
403 },
404 )
405 .run();
406
407 exe_context
408 .fork("finish")
409 .init(move |_| {
410 cooker.finish(CookedImage {
411 name: &[],
412 extent: [base_extent.width, base_extent.height, base_extent.depth],
413 format: TextureFormatWrap(meta.format),
414 mips: mips.iter().map(|data| CookedMip { data }).collect(),
415 });
416 })
417 .depend_on(&compress_task);
418 }
419 PlainData::Hdr(data) => {
420 assert_eq!(meta.format, blade_graphics::TextureFormat::Rgba32Float);
423 let in_texel_elements = data[0].len();
424 let out_texel_size = 4 * mem::size_of::<f32>();
425 let mut buf = vec![0u8; data.len() * out_texel_size];
426 for (slice, texel) in buf.chunks_mut(out_texel_size).zip(data) {
427 unsafe {
428 ptr::copy_nonoverlapping(
429 texel.as_ptr(),
430 slice.as_mut_ptr() as *mut f32,
431 in_texel_elements,
432 )
433 }
434 }
435 cooker.finish(CookedImage {
436 name: &[],
437 extent: [src.width as u32, src.height as u32, 1],
438 format: TextureFormatWrap(meta.format),
439 mips: vec![CookedMip { data: &buf }],
440 });
441 }
442 }
443 }
444
445 fn serve(
446 &self,
447 image: CookedImage<'_>,
448 _exe_context: &choir::ExecutionContext,
449 ) -> Self::Output {
450 let name = str::from_utf8(image.name).unwrap();
451 let base_extent = blade_graphics::Extent {
452 width: image.extent[0],
453 height: image.extent[1],
454 depth: image.extent[2],
455 };
456 let texture = self
457 .gpu_context
458 .create_texture(blade_graphics::TextureDesc {
459 name,
460 format: image.format.0,
461 size: base_extent,
462 array_layer_count: 1,
463 mip_level_count: image.mips.len() as u32,
464 dimension: blade_graphics::TextureDimension::D2,
465 usage: blade_graphics::TextureUsage::COPY | blade_graphics::TextureUsage::RESOURCE,
466 sample_count: 1,
467 external: None,
468 });
469 let view = self.gpu_context.create_texture_view(
470 texture,
471 blade_graphics::TextureViewDesc {
472 name,
473 format: image.format.0,
474 dimension: blade_graphics::ViewDimension::D2,
475 subresources: &Default::default(),
476 },
477 );
478 self.pending_operations
479 .lock()
480 .unwrap()
481 .initializations
482 .push(Initialization { dst: texture });
483
484 for (i, mip) in image.mips.iter().enumerate() {
485 let stage = self.gpu_context.create_buffer(blade_graphics::BufferDesc {
486 name: &format!("{name}[{i}]/stage"),
487 size: mip.data.len() as u64,
488 memory: blade_graphics::Memory::Upload,
489 });
490 unsafe {
491 ptr::copy_nonoverlapping(mip.data.as_ptr(), stage.data(), mip.data.len());
492 }
493
494 let block_info = image.format.0.block_info();
495 let extent = base_extent.at_mip_level(i as u32);
496 let bytes_per_row =
497 extent.width.div_ceil(block_info.dimensions.0 as u32) * block_info.size as u32;
498 let rows_per_image = extent.height.div_ceil(block_info.dimensions.1 as u32);
499 assert!(
500 mip.data.len() >= rows_per_image as usize * bytes_per_row as usize,
501 "Image mip[{i}] data of size {} is insufficient for {bytes_per_row} bytes per {rows_per_image} rows",
502 mip.data.len()
503 );
504
505 let mut pending_ops = self.pending_operations.lock().unwrap();
506 pending_ops.transfers.push(Transfer {
507 stage,
508 bytes_per_row,
509 dst: texture,
510 extent,
511 mip_level: i as u32,
512 });
513 }
514
515 Texture {
516 object: texture,
517 view,
518 extent: base_extent,
519 }
520 }
521
522 fn delete(&self, texture: Self::Output) {
523 self.gpu_context.destroy_texture_view(texture.view);
524 self.gpu_context.destroy_texture(texture.object);
525 }
526}