1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
use anyhow::Result;
use crossbeam_channel::Receiver;
use crate::{hsl, webgpu};
/// Rendering members that need to be recreated when window dimensions change.
struct SizedState {
/// wgpu-specific state that depends on window dimensions
wgpu_state: webgpu::WgpuSizedState,
/// The number of logical rows in analyzer_buf in vertical orientation.
analyzer_height: usize,
/// The analyzer value data, which is only updated once per batch of inputs.
analyzer_buf: Vec<u32>,
/// The number of logical rows in voiceprint_buf in vertical orientation.
voiceprint_height: usize,
/// The voiceprint value data, acts as a ring buffer where voiceprint_row is input offset.
voiceprint_buf: Vec<u32>,
/// The "active" row to update in voiceprint_buf.
voiceprint_row: usize,
}
impl SizedState {
/// (Re)initializes the render state to reflect a change in window size.
/// This avoids being a method of State to keep it clear what information is needed.
fn new(
wgpu_state: &webgpu::WgpuState,
// The desired orientation to use in rendering. Affects texture size.
orientation: &webgpu::Orientation,
// Multiplier for the texture height, reducing render size while speeding up voiceprint scroll
scroll_rate: f32,
// Width of the texture, corresponding to the size of Vecs produced by recv_processed, except with some data discarded.
texture_width: usize,
) -> SizedState {
// Scale texture height according to the window size in the correct direction
// Texture width meanwhile is always locked to buf_sizes.input_channels.
let window_dims = wgpu_state.window_dims();
let texture_height = match orientation {
webgpu::Orientation::Vertical => (window_dims.height as f32 / scroll_rate) as usize,
webgpu::Orientation::Horizontal => (window_dims.width as f32 / scroll_rate) as usize,
};
// Circular voiceprint buffer written to top 80% of texture
let voiceprint_height = (0.8 * (texture_height as f32)) as usize;
let mut voiceprint_buf = Vec::with_capacity(voiceprint_height * texture_width);
// Ensure the buffer is opaque black rather than transparent
voiceprint_buf.resize(voiceprint_buf.capacity(), webgpu::VALUE_BLACK);
// Analyzer written to bottom 20% of texture
let analyzer_height = texture_height as usize - voiceprint_height;
let mut analyzer_buf = Vec::with_capacity(analyzer_height * texture_width);
// Ensure the buffer is opaque black rather than transparent
analyzer_buf.resize(analyzer_buf.capacity(), webgpu::VALUE_BLACK);
SizedState {
wgpu_state: webgpu::WgpuSizedState::new(
wgpu_state,
webgpu::Dimensions {
width: texture_width,
height: texture_height,
},
orientation,
),
analyzer_height,
analyzer_buf,
voiceprint_height,
voiceprint_buf,
voiceprint_row: 0,
}
}
}
/// The rendering state.
pub struct State {
/// The wgpu output components. Initialized once on startup.
wgpu_state: webgpu::WgpuState,
/// A multiplier for the speed of voiceprint scrolling should be.
/// Higher value shrinks the texture height, which should reduce load,
/// but it also "speeds up" voiceprint since we're doing 1 texture row per sample.
/// This value is fixed and kept around to recalculate texture height on window resize.
scroll_rate: f32,
/// The width of the texture to render, or the number of audio channels in the spectrum.
/// This is proportional to recv_processed_len, except without discarded high-piteched data.
texture_width: usize,
/// The desired render orientation, used when reinitializing sized_state
orientation: webgpu::Orientation,
/// Various buffers/configs that need to be reinitialized if the window is resized
sized_state: SizedState,
/// Mapping of [0.0,1.0] audio values to RGBA colors
hsl: hsl::HSL,
/// Input channel for audio data, where each Vec must have length buf_sizes.input_channels
recv_processed: Receiver<Vec<f32>>,
}
impl State {
/// Sets up a visualization rendering pipeline for the provided window.
pub async fn new(
wgpu_state: webgpu::WgpuState,
hsl: hsl::HSL,
recv_processed: Receiver<Vec<f32>>,
orientation: webgpu::Orientation,
scroll_rate: f32,
texture_width: usize,
) -> Self {
let sized_state = SizedState::new(&wgpu_state, &orientation, scroll_rate, texture_width);
State {
wgpu_state,
scroll_rate,
texture_width,
orientation,
sized_state,
hsl,
recv_processed,
}
}
/// Switches the render orientation between horizontal and vertical
pub fn toggle_orientation(&mut self) {
let orientation = match self.orientation {
webgpu::Orientation::Vertical => webgpu::Orientation::Horizontal,
webgpu::Orientation::Horizontal => webgpu::Orientation::Vertical,
};
// No changes needed to wgpu config, since the surface dimensions aren't changing.
// Reinitialize sized_state with new buffers to reflect the new orientation.
self.sized_state = SizedState::new(
&self.wgpu_state,
&orientation,
self.scroll_rate,
self.texture_width,
);
// Update desired orientation so that it's available in future resize() calls.
self.orientation = orientation;
}
/// Updates the render size to reflect a change in window size
pub fn resize(&mut self, new_size: Option<webgpu::Dimensions>) {
self.wgpu_state.resize(new_size);
// Reinitialize sized_state with new buffers to reflect the new dimensions.
self.sized_state = SizedState::new(
&self.wgpu_state,
&self.orientation,
self.scroll_rate,
self.texture_width,
);
}
/// Returns the underlying surface texture for rendering.
pub fn surface_texture(&mut self) -> Result<wgpu::SurfaceTexture, wgpu::SurfaceError> {
self.wgpu_state.surface_texture()
}
/// Re-renders the display:
/// - Collects a batch of audio frequency data from recv_processed
/// - For each audio entry in the batch, adds it to the voiceprint to be rendered
/// - For the last audio entry in the batch, updates the analyzer to be rendered
/// - Writes the resulting buffers to a texture which is then rotated/scaled and rendered to the output
pub fn render(&mut self, output: wgpu::SurfaceTexture) -> Result<()> {
// Collect a batch of data. The batch is capped at the capacity of the channel.
let audio_vec: Vec<Vec<f32>> = self.recv_processed.try_iter().collect();
// Here's how the voiceprint/analyzer buffers are mapped to the texture.
// - The scrolling of "voiceprint_row" allows us to only need to update one row of the vector at a time.
// - Keeping texture in this orientation when writing to it enables passing subranges of voiceprint_buf to write_texture().
//
// lowfreq hifreq
// |----------------|
// | old voiceprint |
// | [row, end] |
// |----------------| <- start/end of voiceprint_buf
// | new voiceprint |
// | [0, row] |
// |----------------| <- current voiceprint_row (increments forward through voiceprint_buf)
// | analyzer |
// |----------------|
//
// Horizontal mode:
// - texture is rotated 90 degrees counter-clockwise
// - the analyzer is at the right and low frequencies are at the bottom
// Vertical mode:
// - texture is mirrored vertically
// - the analyzer is at the top and low frequencies are at the left
// Iterate over the batch. only render analyzer on the last entry of the batch
for (audio_idx, audio) in audio_vec.iter().enumerate() {
// Set to buffer index for the start of the current row in voiceprint_buf
// row index in voiceprint_buf * width of voiceprint_buf
let mut voiceprint_idx = self.sized_state.voiceprint_row * self.texture_width;
if audio_idx == audio_vec.len() - 1 {
// We're at the last entry of the batch (common case unless we're falling behind).
// Render both the voiceprint and the analyzer.
let mut analyzer_idx = 0;
// analyzer: Reset to opaque black
self.sized_state.analyzer_buf.clear();
self.sized_state.analyzer_buf.resize(
self.sized_state.analyzer_buf.capacity(),
webgpu::VALUE_BLACK,
);
for amplitude in audio {
self.draw_voiceprint_analyzer(
*amplitude,
&mut voiceprint_idx,
&mut analyzer_idx,
);
if analyzer_idx >= self.texture_width {
// Discard the remaining high-pitched data
break;
}
}
} else {
// We're not at the last entry of the batch yet. Only update the voiceprint buffer.
// Skip updating the analyzer since only the last batch entry will be displayed there.
let mut column = 0;
for amplitude in audio {
self.draw_voiceprint(*amplitude, &mut voiceprint_idx);
column += 1;
if column >= self.texture_width {
// Discard the remaining high-pitched data
break;
}
}
}
// Increment voiceprint row, with wraparound to row 0 when we reach the height of the voiceprint.
self.sized_state.voiceprint_row =
(self.sized_state.voiceprint_row + 1) % self.sized_state.voiceprint_height;
}
// Now that we've updated the voiceprint and analyzer buffers, write them to the texture to be displayed.
// Write the older data below voiceprint_row to the top of the voiceprint region
self.wgpu_state.write_texture(
&self.sized_state.wgpu_state,
as_u8_slice(&self.sized_state.voiceprint_buf),
self.sized_state.voiceprint_row * self.texture_width,
webgpu::Dimensions {
width: self.texture_width,
height: self.sized_state.voiceprint_height,
},
webgpu::Dimensions {
width: self.texture_width,
height: self.sized_state.voiceprint_height - self.sized_state.voiceprint_row,
},
0,
);
// Write the newer data above voiceprint_row to the bottom of the voiceprint region
self.wgpu_state.write_texture(
&self.sized_state.wgpu_state,
as_u8_slice(&self.sized_state.voiceprint_buf),
0,
webgpu::Dimensions {
width: self.texture_width,
height: self.sized_state.voiceprint_row,
},
webgpu::Dimensions {
width: self.texture_width,
height: self.sized_state.voiceprint_row,
},
self.sized_state.voiceprint_height - self.sized_state.voiceprint_row,
);
// Write analyzer_buf to the bottom 20% of the texture
self.wgpu_state.write_texture(
&self.sized_state.wgpu_state,
as_u8_slice(&self.sized_state.analyzer_buf),
0,
webgpu::Dimensions {
width: self.texture_width,
height: self.sized_state.analyzer_height,
},
webgpu::Dimensions {
width: self.texture_width,
height: self.sized_state.analyzer_height,
},
self.sized_state.voiceprint_height,
);
self.wgpu_state.render(&self.sized_state.wgpu_state, output)
}
/// Updates a value in voiceprint_buf only
/// This is used when we know there's pending frequency data to be written,
/// in which case writing this data to the analyzer buffer is a waste of time.
fn draw_voiceprint(&mut self, val: f32, voiceprint_idx: &mut usize) {
// Convert value to rgba color. val is scaled to [0.0, 1.0] by fourier.rs.
let pixel = self.hsl.value_to_color(val);
// voiceprint: draw rgba pixel on row
self.sized_state.voiceprint_buf[*voiceprint_idx] = pixel;
(*voiceprint_idx) += 1; // seek to next rgba pixel on row
}
/// Updates a value in both voiceprint_buf and analyzer_buf
/// Structured this way to share a little work between voiceprint/analyzer
fn draw_voiceprint_analyzer(
&mut self,
val: f32,
voiceprint_idx: &mut usize,
analyzer_idx: &mut usize,
) {
// Convert value to rgba color. val is scaled to [0.0, 1.0] by fourier.rs.
let pixel = self.hsl.value_to_color(val);
// analyzer: line height is proportional to value
let bar_width = (val * self.sized_state.analyzer_height as f32) as usize;
// voiceprint: draw rgba pixel on row
self.sized_state.voiceprint_buf[*voiceprint_idx] = pixel;
(*voiceprint_idx) += 1; // seek to next rgba pixel on row
// analyzer: draw vertical rgba line on single column (rotated to horizontal later)
let mut col_idx = *analyzer_idx; // top of column
for _pixel_col in 0..bar_width {
self.sized_state.analyzer_buf[col_idx] = pixel;
col_idx += self.texture_width; // next row in column
}
(*analyzer_idx) += 1; // seek to next col to draw another vertical line
}
}
/// Recasts a &[u32] as a &[u8] for passing to gfx
fn as_u8_slice(v: &[u32]) -> &[u8] {
unsafe {
std::slice::from_raw_parts(
v.as_ptr() as *const u8,
v.len() * std::mem::size_of::<u32>(),
)
}
}