twgpu_tools/
download_texture.rs

1use std::ops;
2use std::sync::{mpsc, Arc};
3use twgpu::{image, wgpu};
4
5pub struct DownloadTexturePool {
6    receiver: mpsc::Receiver<DownloadTextureInner>,
7    sender: mpsc::Sender<DownloadTextureInner>,
8    device: wgpu::Device,
9    queue: wgpu::Queue,
10    width: u32,
11    height: u32,
12    format: wgpu::TextureFormat,
13}
14
15pub struct DownloadTexture {
16    /// ALWAYS `Some` until dropped.
17    inner: Option<Arc<DownloadTextureInner>>,
18    sender: mpsc::Sender<DownloadTextureInner>,
19}
20
21impl ops::Deref for DownloadTexture {
22    type Target = DownloadTextureInner;
23
24    fn deref(&self) -> &Self::Target {
25        self.inner.as_ref().unwrap()
26    }
27}
28
29impl ops::Drop for DownloadTexture {
30    fn drop(&mut self) {
31        let inner = self.inner.take().unwrap();
32        self.sender.send(Arc::into_inner(inner).unwrap()).unwrap();
33    }
34}
35
36impl DownloadTexturePool {
37    pub fn new(
38        width: u32,
39        height: u32,
40        format: wgpu::TextureFormat,
41        device: wgpu::Device,
42        queue: wgpu::Queue,
43    ) -> Self {
44        assert_ne!(width, 0);
45        assert_ne!(height, 0);
46        let (sender, receiver) = mpsc::channel();
47        Self {
48            receiver,
49            sender,
50            device,
51            queue,
52            width,
53            height,
54            format,
55        }
56    }
57
58    pub fn fetch(&self) -> DownloadTexture {
59        let inner = self.receiver.try_recv().unwrap_or_else(|_| {
60            println!("!!!!! NEW TEXTURE!!!!");
61            DownloadTextureInner::new(
62                self.width,
63                self.height,
64                self.format,
65                self.device.clone(),
66                self.queue.clone(),
67            )
68        });
69        DownloadTexture {
70            inner: Some(Arc::new(inner)),
71            sender: self.sender.clone(),
72        }
73    }
74}
75
76impl DownloadTexture {
77    pub fn map_then_send<T: Send + 'static>(self, sender: mpsc::Sender<(Self, T)>, payload: T) {
78        let inner_clone = self.inner.as_ref().unwrap().clone();
79        inner_clone.map(move || sender.send((self, payload)).unwrap())
80    }
81
82    pub fn map_then_send_sync<T: Send + 'static>(
83        self,
84        sender: mpsc::SyncSender<(Self, T)>,
85        payload: T,
86    ) {
87        let inner_clone = self.inner.as_ref().unwrap().clone();
88        inner_clone.map(move || sender.send((self, payload)).unwrap())
89    }
90}
91
92/// Warning: Don't use for masses of textures to download.
93/// Instead, use `DownloadTexturePool`
94pub struct DownloadTextureInner {
95    device: wgpu::Device,
96    queue: wgpu::Queue,
97    texture: wgpu::Texture,
98    download_buffer: wgpu::Buffer,
99    width: u32,
100    height: u32,
101    buffer_size: u64,
102    bytes_per_row: u32,
103    padded_bytes_per_row: u32,
104}
105
106impl DownloadTextureInner {
107    pub fn new(
108        width: u32,
109        height: u32,
110        format: wgpu::TextureFormat,
111        device: wgpu::Device,
112        queue: wgpu::Queue,
113    ) -> Self {
114        assert_ne!(width, 0);
115        assert_ne!(height, 0);
116        let texture = device.create_texture(&wgpu::TextureDescriptor {
117            label: Some("Render Target Texture"),
118            size: wgpu::Extent3d {
119                width,
120                height,
121                depth_or_array_layers: 1,
122            },
123            mip_level_count: 1,
124            sample_count: 1,
125            dimension: wgpu::TextureDimension::D2,
126            format,
127            usage: wgpu::TextureUsages::RENDER_ATTACHMENT
128                | wgpu::TextureUsages::COPY_DST
129                | wgpu::TextureUsages::COPY_SRC,
130            view_formats: &[],
131        });
132        let bytes_per_pixel: u32 = format.block_copy_size(None).unwrap();
133        let bytes_per_row: u32 = width * bytes_per_pixel;
134        let align: u32 = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT;
135        let padding: u32 = (align - (bytes_per_row % align)) % align;
136        let padded_bytes_per_row: u32 = bytes_per_row + padding;
137        let buffer_size: u64 = u64::from(padded_bytes_per_row) * u64::from(height);
138        let download_buffer = device.create_buffer(&wgpu::BufferDescriptor {
139            label: Some("Image Download Buffer"),
140            size: buffer_size,
141            usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_DST,
142            mapped_at_creation: false,
143        });
144        Self {
145            device,
146            queue,
147            texture,
148            download_buffer,
149            width,
150            height,
151            buffer_size,
152            bytes_per_row,
153            padded_bytes_per_row,
154        }
155    }
156
157    pub fn texture(&self) -> &wgpu::Texture {
158        &self.texture
159    }
160
161    pub fn texture_view(&self) -> wgpu::TextureView {
162        self.texture
163            .create_view(&wgpu::TextureViewDescriptor::default())
164    }
165
166    /// You are responsible for fulfilling the contract of `map_async`.
167    pub fn map<T: FnOnce() + Send + 'static>(&self, on_map: T) {
168        let mut command_encoder =
169            self.device
170                .create_command_encoder(&wgpu::CommandEncoderDescriptor {
171                    label: Some("Download Texture Copy"),
172                });
173        command_encoder.copy_texture_to_buffer(
174            self.texture.as_image_copy(),
175            wgpu::TexelCopyBufferInfo {
176                buffer: &self.download_buffer,
177                layout: wgpu::TexelCopyBufferLayout {
178                    offset: 0,
179                    bytes_per_row: Some(self.padded_bytes_per_row),
180                    rows_per_image: Some(self.height),
181                },
182            },
183            wgpu::Extent3d {
184                width: self.width,
185                height: self.height,
186                depth_or_array_layers: 1,
187            },
188        );
189        command_encoder.map_buffer_on_submit(&self.download_buffer, wgpu::MapMode::Read, .., |e| {
190            e.unwrap();
191            on_map()
192        });
193        self.queue.submit([command_encoder.finish()]);
194    }
195
196    pub fn download_mapped<T: FnMut(&[u8])>(&self, row_closure: T) {
197        {
198            let downloaded = self.download_buffer.slice(..).get_mapped_range();
199            assert_eq!(downloaded.len(), self.buffer_size as usize);
200            downloaded
201                .chunks_exact(self.padded_bytes_per_row as usize)
202                .map(|padded_row| &padded_row[..self.bytes_per_row as usize])
203                .for_each(row_closure);
204        }
205        self.download_buffer.unmap();
206    }
207
208    pub fn download_rgba(&self) -> image::RgbaImage {
209        assert_eq!(self.texture.format(), wgpu::TextureFormat::Rgba8Unorm);
210        self.map(|| {});
211        self.device
212            .poll(wgpu::PollType::Wait {
213                submission_index: None,
214                timeout: None,
215            })
216            .unwrap();
217        self.download_mapped_rgba()
218    }
219
220    pub fn download_mapped_rgba(&self) -> image::RgbaImage {
221        let mut image_buffer =
222            Vec::with_capacity(self.bytes_per_row as usize * self.height as usize);
223        self.download_mapped(|row| image_buffer.extend(row));
224        image::RgbaImage::from_vec(self.width, self.height, image_buffer).unwrap()
225    }
226}