use crate::context::{ArcTextureViewSampler, Context, RenderTargetState};
use crate::render_target::RenderTargetId;
use crate::CommonNodeProps;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::num::NonZeroU32;
use std::string::String;
mod preprocess_shader;
use preprocess_shader::preprocess_shader;
const EFFECT_HEADER: &str = include_str!("effect_header.wgsl");
const EFFECT_FOOTER: &str = include_str!("effect_footer.wgsl");
const INTENSITY_INTEGRAL_PERIOD: f32 = 1024.;
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct EffectNodeProps {
pub name: String,
pub intensity: Option<f32>,
pub frequency: Option<f32>,
pub input_count: Option<u32>,
}
impl From<&EffectNodeProps> for CommonNodeProps {
fn from(props: &EffectNodeProps) -> Self {
CommonNodeProps {
input_count: props.input_count,
}
}
}
#[allow(clippy::large_enum_variant)]
pub enum EffectNodeState {
Uninitialized,
Ready(EffectNodeStateReady),
Error_(String), }
pub struct EffectNodeStateReady {
name: String,
intensity: f32,
frequency: f32,
input_count: u32,
intensity_integral: f32,
channel_count: u32,
bind_group_1_layout: wgpu::BindGroupLayout,
bind_group_2_layout: wgpu::BindGroupLayout,
uniform_buffer: wgpu::Buffer,
sampler: wgpu::Sampler,
render_pipelines: Vec<wgpu::RenderPipeline>,
paint_states: HashMap<RenderTargetId, EffectNodePaintState>,
}
struct EffectNodePaintState {
channel_textures: Vec<ArcTextureViewSampler>,
output_texture: ArcTextureViewSampler,
}
#[repr(C)]
#[derive(Default, Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct Uniforms {
audio: [f32; 4],
time: f32,
frequency: f32,
intensity: f32,
intensity_integral: f32,
resolution: [f32; 2],
dt: f32,
_padding: [u8; 4],
}
#[allow(dead_code)]
fn handle_shader_error(error: wgpu::Error) {
eprintln!("wgpu error: {}\n", error);
}
#[allow(clippy::single_match)]
impl EffectNodeState {
fn setup_render_pipeline(
ctx: &Context,
device: &wgpu::Device,
_queue: &wgpu::Queue,
props: &EffectNodeProps,
) -> Result<EffectNodeStateReady, String> {
let name = &props.name;
let source_name = format!("{name}.wgsl");
let effect_source = ctx.fetch_library_content(&source_name).map_err(|_| {
format!("Failed to read effect shader file from library: \"{source_name}\"")
})?;
let (effect_sources_processed, shader_input_count, default_frequency) =
preprocess_shader(&effect_source)?;
let input_count = match props.input_count {
Some(input_count) => {
if shader_input_count != input_count {
return Err(
"Shader input count does not match input count declared in graph"
.to_string(),
);
}
input_count
}
None => shader_input_count,
};
let channel_count: u32 = effect_sources_processed.len() as u32;
let intensity = props.intensity.unwrap_or(0.);
let frequency = props.frequency.unwrap_or(default_frequency);
let shader_sources = effect_sources_processed
.iter()
.map(|effect_source_processed| {
format!(
"{}\n{}\n{}\n",
EFFECT_HEADER, effect_source_processed, EFFECT_FOOTER
)
});
let shader_modules = shader_sources.enumerate().map(|(i, shader_source)| {
device.push_error_scope(wgpu::ErrorFilter::Validation);
let shader_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some(&format!("EffectNode {} channel {}", name, i)),
source: wgpu::ShaderSource::Wgsl(shader_source.into()),
});
let result = pollster::block_on(device.pop_error_scope());
if let Some(error) = result {
return Err(format!(
"EffectNode shader compilation error: {} channel {}: {}\n",
name, i, error
));
}
Ok(shader_module)
});
let shader_modules: Result<Vec<wgpu::ShaderModule>, String> = shader_modules.collect();
let shader_modules = shader_modules?;
let bind_group_1_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0, visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
label: Some(&format!(
"EffectNode {} bind group layout 1 (uniforms)",
name
)),
});
let bind_group_2_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0, visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1, visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
},
count: NonZeroU32::new(input_count),
},
wgpu::BindGroupLayoutEntry {
binding: 2, visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 3, visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
},
count: NonZeroU32::new(channel_count),
},
],
label: Some(&format!(
"EffectNode {} bind group layout 2 (textures)",
name
)),
});
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&bind_group_1_layout, &bind_group_2_layout],
push_constant_ranges: &[],
});
let render_pipelines: Vec<wgpu::RenderPipeline> = shader_modules
.into_iter()
.map(|shader_module| {
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some(&format!("EffectNode {} render pipeline", name)),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader_module,
entry_point: Some("vs_main"),
buffers: &[],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader_module,
entry_point: Some("fs_main"),
targets: &[Some(wgpu::ColorTargetState {
format: wgpu::TextureFormat::Rgba16Float,
blend: None,
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleStrip,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
multiview: None,
cache: None,
})
})
.collect();
let uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some(&format!("EffectNode {} uniform buffer", name)),
size: std::mem::size_of::<Uniforms>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::MirrorRepeat,
address_mode_v: wgpu::AddressMode::MirrorRepeat,
address_mode_w: wgpu::AddressMode::MirrorRepeat,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Linear,
..Default::default()
});
Ok(EffectNodeStateReady {
name: name.clone(),
intensity,
frequency,
input_count,
intensity_integral: 0.,
channel_count,
bind_group_1_layout,
bind_group_2_layout,
uniform_buffer,
sampler,
render_pipelines,
paint_states: HashMap::new(),
})
}
fn new_paint_state(
self_ready: &EffectNodeStateReady,
_ctx: &Context,
device: &wgpu::Device,
_queue: &wgpu::Queue,
render_target_state: &RenderTargetState,
) -> EffectNodePaintState {
let texture_desc = wgpu::TextureDescriptor {
size: wgpu::Extent3d {
width: render_target_state.width(),
height: render_target_state.height(),
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba16Float,
usage: wgpu::TextureUsages::COPY_SRC
| wgpu::TextureUsages::RENDER_ATTACHMENT
| wgpu::TextureUsages::TEXTURE_BINDING,
label: None,
view_formats: &[wgpu::TextureFormat::Rgba16Float],
};
let make_texture = || {
let texture = device.create_texture(&texture_desc);
let view = texture.create_view(&Default::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Linear,
..Default::default()
});
ArcTextureViewSampler::new(texture, view, sampler)
};
let output_texture = make_texture();
let channel_textures: Vec<ArcTextureViewSampler> = (0..self_ready.channel_count)
.map(|_| make_texture())
.collect();
EffectNodePaintState {
channel_textures,
output_texture,
}
}
fn update_paint_states(
self_ready: &mut EffectNodeStateReady,
ctx: &Context,
device: &wgpu::Device,
queue: &wgpu::Queue,
) {
self_ready
.paint_states
.retain(|id, _| ctx.render_target_states().contains_key(id));
for (check_render_target_id, render_target_state) in ctx.render_target_states().iter() {
if !self_ready.paint_states.contains_key(check_render_target_id) {
self_ready.paint_states.insert(
*check_render_target_id,
Self::new_paint_state(self_ready, ctx, device, queue, render_target_state),
);
}
}
}
pub fn new(
ctx: &Context,
device: &wgpu::Device,
queue: &wgpu::Queue,
props: &EffectNodeProps,
) -> Self {
match Self::setup_render_pipeline(ctx, device, queue, props) {
Ok(mut new_obj_ready) => {
Self::update_paint_states(&mut new_obj_ready, ctx, device, queue);
Self::Ready(new_obj_ready)
}
Err(msg) => {
eprintln!("Unable to configure EffectNode: {}", msg);
Self::Error_(msg)
}
}
}
pub fn update(
&mut self,
ctx: &Context,
device: &wgpu::Device,
queue: &wgpu::Queue,
props: &mut EffectNodeProps,
) {
match self {
EffectNodeState::Ready(self_ready) => {
if props.name != self_ready.name {
*self = EffectNodeState::Error_(
"EffectNode name changed after construction".to_string(),
);
return;
}
match props.input_count {
Some(input_count) => {
if input_count != self_ready.input_count {
*self = EffectNodeState::Error_(
"EffectNode input_count changed after construction".to_string(),
);
return;
}
}
_ => {}
}
match props.intensity {
Some(intensity) => {
self_ready.intensity = intensity;
}
_ => {}
}
match props.frequency {
Some(frequency) => {
self_ready.frequency = frequency;
}
_ => {}
}
self_ready.update_props(props);
Self::update_paint_states(self_ready, ctx, device, queue);
self_ready.intensity_integral = (self_ready.intensity_integral
+ self_ready.intensity * ctx.dt)
% INTENSITY_INTEGRAL_PERIOD;
}
_ => {}
}
}
pub fn paint(
&mut self,
ctx: &Context,
device: &wgpu::Device,
queue: &wgpu::Queue,
encoder: &mut wgpu::CommandEncoder,
render_target_id: RenderTargetId,
inputs: &[Option<ArcTextureViewSampler>],
) -> ArcTextureViewSampler {
match self {
EffectNodeState::Ready(self_ready) => {
let render_target_state = ctx
.render_target_state(render_target_id)
.expect("Call to paint() with a render target ID unknown to the context");
let paint_state = self_ready.paint_states.get_mut(&render_target_id).expect("Call to paint() with a render target ID unknown to the node (did you call update() first?)");
{
let width = render_target_state.width();
let height = render_target_state.height();
let uniforms = Uniforms {
audio: [
ctx.audio.low,
ctx.audio.mid,
ctx.audio.high,
ctx.audio.level,
],
time: ctx.time,
frequency: self_ready.frequency,
intensity: self_ready.intensity,
intensity_integral: self_ready.intensity_integral,
resolution: [width as f32, height as f32],
dt: render_target_state.dt(),
..Default::default()
};
queue.write_buffer(
&self_ready.uniform_buffer,
0,
bytemuck::cast_slice(&[uniforms]),
);
}
let input_binding: Vec<&wgpu::TextureView> = (0..self_ready.input_count)
.map(|i| match inputs.get(i as usize) {
Some(Some(tex)) => tex.view.as_ref(),
_ => ctx.blank_texture().view.as_ref(),
})
.collect();
for channel in (0..self_ready.channel_count).rev() {
let channels: Vec<&wgpu::TextureView> = paint_state
.channel_textures
.iter()
.map(|t| t.view.as_ref())
.collect();
let bind_group_1 = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &self_ready.bind_group_1_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: self_ready.uniform_buffer.as_entire_binding(),
}],
label: Some("EffectNode bind group 1 (uniforms)"),
});
let bind_group_2 = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &self_ready.bind_group_2_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0, resource: wgpu::BindingResource::Sampler(&self_ready.sampler),
},
wgpu::BindGroupEntry {
binding: 1, resource: wgpu::BindingResource::TextureViewArray(
input_binding.as_slice(),
),
},
wgpu::BindGroupEntry {
binding: 2, resource: wgpu::BindingResource::TextureView(
&render_target_state.noise_texture().view,
),
},
wgpu::BindGroupEntry {
binding: 3, resource: wgpu::BindingResource::TextureViewArray(
channels.as_slice(),
),
},
],
label: Some("EffectNode bind group 2 (textures)"),
});
{
let mut render_pass =
encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("EffectNode render pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: paint_state.output_texture.view.as_ref(),
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
},
depth_slice: None,
})],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
});
render_pass.set_pipeline(&self_ready.render_pipelines[channel as usize]);
render_pass.set_bind_group(0, &bind_group_1, &[]);
render_pass.set_bind_group(1, &bind_group_2, &[]);
render_pass.draw(0..4, 0..1);
}
std::mem::swap(
&mut paint_state.channel_textures[channel as usize],
&mut paint_state.output_texture,
);
}
paint_state.channel_textures[0].clone()
}
_ => inputs
.first()
.cloned()
.flatten()
.unwrap_or_else(|| ctx.blank_texture().clone()),
}
}
}
impl EffectNodeStateReady {
fn update_props(&self, props: &mut EffectNodeProps) {
props.name.clone_from(&self.name);
props.intensity = Some(self.intensity);
props.frequency = Some(self.frequency);
props.input_count = Some(self.input_count);
}
}