use crate::{Actor, ActorBehavior, Message, Port};
use anyhow::{Error, Result};
use reflow_actor::{message::EncodableValue, ActorContext};
use reflow_actor_macro::actor;
use serde_json::json;
use std::collections::HashMap;
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct SceneUniforms {
view_proj: [[f32; 4]; 4], light_dir: [f32; 3], _pad0: f32, camera_pos: [f32; 3], time: f32, light_count: u32, _pad4: [u32; 3], }
#[actor(
SceneRenderActor,
inports::<10>(scene, meshes, terrain_mesh, texture, material, lights),
outports::<1>(output, metadata, error),
state(MemoryState)
)]
pub async fn scene_render_actor(ctx: ActorContext) -> Result<HashMap<String, Message>, Error> {
let payload = ctx.get_payload();
let config = ctx.get_config_hashmap();
let width = config.get("width").and_then(|v| v.as_u64()).unwrap_or(512) as u32;
let height = config.get("height").and_then(|v| v.as_u64()).unwrap_or(512) as u32;
let fov = config.get("fov").and_then(|v| v.as_f64()).unwrap_or(45.0) as f32;
let cam_pos = [
config
.get("cameraPosX")
.and_then(|v| v.as_f64())
.unwrap_or(8.0) as f32,
config
.get("cameraPosY")
.and_then(|v| v.as_f64())
.unwrap_or(6.0) as f32,
config
.get("cameraPosZ")
.and_then(|v| v.as_f64())
.unwrap_or(10.0) as f32,
];
let cam_target = [
config
.get("cameraTargetX")
.and_then(|v| v.as_f64())
.unwrap_or(0.0) as f32,
config
.get("cameraTargetY")
.and_then(|v| v.as_f64())
.unwrap_or(0.0) as f32,
config
.get("cameraTargetZ")
.and_then(|v| v.as_f64())
.unwrap_or(0.0) as f32,
];
let msaa_samples = config.get("msaa").and_then(|v| v.as_u64()).unwrap_or(4) as u32;
let near_plane = config.get("near").and_then(|v| v.as_f64()).unwrap_or(1.0) as f32;
let far_plane = config
.get("far")
.and_then(|v| v.as_f64())
.unwrap_or(10000.0) as f32;
let clear_color = [
config.get("bgR").and_then(|v| v.as_f64()).unwrap_or(0.1),
config.get("bgG").and_then(|v| v.as_f64()).unwrap_or(0.1),
config.get("bgB").and_then(|v| v.as_f64()).unwrap_or(0.15),
];
if let Some(Message::Bytes(b)) = payload.get("meshes") {
*get_mesh_cache().lock() = Some(std::sync::Arc::new(b.to_vec()));
}
if let Some(Message::Bytes(b)) = payload.get("terrain_mesh") {
*get_terrain_cache().lock() = Some(std::sync::Arc::new(b.to_vec()));
}
if let Some(Message::Bytes(b)) = payload.get("texture") {
if get_texture_cache().lock().is_none() {
if let Ok(img) = image::load_from_memory(&b) {
let rgba = img.to_rgba8();
let w = rgba.width();
let h = rgba.height();
let mut buf = Vec::with_capacity(8 + (w * h * 4) as usize);
buf.extend_from_slice(&w.to_le_bytes());
buf.extend_from_slice(&h.to_le_bytes());
buf.extend_from_slice(rgba.as_raw());
*get_texture_cache().lock() = Some(std::sync::Arc::new(buf));
}
}
}
if let Some(Message::Bytes(b)) = payload.get("lights") {
*get_light_cache().lock() = Some(std::sync::Arc::new(b.to_vec()));
}
if let Some(Message::Integer(n)) = payload.get("light_count") {
*get_light_count_cache().lock() = Some(*n as u32);
}
if let Some(Message::Object(obj)) = payload.get("material") {
let v: serde_json::Value = obj.as_ref().clone().into();
set_compiled_material(v);
}
let scene_data = match payload.get("scene") {
Some(Message::Object(obj)) => {
let v: serde_json::Value = obj.as_ref().clone().into();
v
}
_ => return Ok(HashMap::new()),
};
let prefab_mesh: Option<std::sync::Arc<Vec<u8>>> = get_mesh_cache().lock().clone();
let terrain_mesh: Option<std::sync::Arc<Vec<u8>>> = get_terrain_cache().lock().clone();
let texture_rgba: Option<std::sync::Arc<Vec<u8>>> = get_texture_cache().lock().clone();
let cache: HashMap<String, serde_json::Value> = ctx.get_pool("_cache").into_iter().collect();
let objects = scene_data
.get("objects")
.and_then(|v| v.as_array())
.cloned()
.unwrap_or_default();
let objects_clone = objects.clone();
let compiled_mat = get_compiled_material();
let pixels = tokio::task::spawn_blocking(move || {
render_scene(
width,
height,
fov,
cam_pos,
cam_target,
msaa_samples,
clear_color,
near_plane,
far_plane,
&objects_clone,
prefab_mesh.as_deref().map(|v| v.as_slice()),
terrain_mesh.as_deref().map(|v| v.as_slice()),
texture_rgba.as_deref().map(|v| v.as_slice()),
compiled_mat.as_ref(),
)
})
.await
.map_err(|e| anyhow::anyhow!("Spawn failed: {}", e))?
.map_err(|e| anyhow::anyhow!("{}", e))?;
let mut results = HashMap::new();
results.insert("output".to_string(), Message::bytes(pixels));
results.insert(
"metadata".to_string(),
Message::object(EncodableValue::from(json!({
"width": width,
"height": height,
"format": "RGBA8",
"objectCount": objects.len(),
}))),
);
Ok(results)
}
fn parse_prefab_mesh(data: &[u8]) -> Vec<([f32; 3], [f32; 3])> {
let stride = 24; let vertex_count = data.len() / stride;
let mut verts = Vec::with_capacity(vertex_count);
for i in 0..vertex_count {
let off = i * stride;
if off + stride > data.len() {
break;
}
let px = f32::from_le_bytes(data[off..off + 4].try_into().unwrap());
let py = f32::from_le_bytes(data[off + 4..off + 8].try_into().unwrap());
let pz = f32::from_le_bytes(data[off + 8..off + 12].try_into().unwrap());
let nx = f32::from_le_bytes(data[off + 12..off + 16].try_into().unwrap());
let ny = f32::from_le_bytes(data[off + 16..off + 20].try_into().unwrap());
let nz = f32::from_le_bytes(data[off + 20..off + 24].try_into().unwrap());
verts.push(([px, py, pz], [nx, ny, nz]));
}
verts
}
fn parse_terrain_mesh(data: &[u8]) -> (Vec<([f32; 3], [f32; 3])>, Vec<u32>) {
let stride = 32;
let total = data.len();
let mut vertex_bytes = total; let mut index_bytes = 0;
for grid_size in 2..=512 {
let vc = grid_size * grid_size;
let vb = vc * stride;
let ic = (grid_size - 1) * (grid_size - 1) * 6;
let ib = ic * 4;
if vb + ib == total {
vertex_bytes = vb;
index_bytes = ib;
break;
}
}
let vertex_count = vertex_bytes / stride;
let mut verts = Vec::with_capacity(vertex_count);
for i in 0..vertex_count {
let off = i * stride;
if off + stride > vertex_bytes {
break;
}
let px = f32::from_le_bytes(data[off..off + 4].try_into().unwrap());
let py = f32::from_le_bytes(data[off + 4..off + 8].try_into().unwrap());
let pz = f32::from_le_bytes(data[off + 8..off + 12].try_into().unwrap());
let nx = f32::from_le_bytes(data[off + 12..off + 16].try_into().unwrap());
let ny = f32::from_le_bytes(data[off + 16..off + 20].try_into().unwrap());
let nz = f32::from_le_bytes(data[off + 20..off + 24].try_into().unwrap());
verts.push(([px, py, pz], [nx, ny, nz]));
}
let index_count = index_bytes / 4;
let mut indices = Vec::with_capacity(index_count);
for i in 0..index_count {
let off = vertex_bytes + i * 4;
if off + 4 > total {
break;
}
let idx = u32::from_le_bytes(data[off..off + 4].try_into().unwrap());
indices.push(idx);
}
(verts, indices)
}
fn transform_pos(pos: [f32; 3], translate: [f32; 3], scale: [f32; 3]) -> [f32; 3] {
[
pos[0] * scale[0] + translate[0],
pos[1] * scale[1] + translate[1],
pos[2] * scale[2] + translate[2],
]
}
fn build_vertex_buffer(
objects: &[serde_json::Value],
prefab_mesh: Option<&[u8]>,
terrain_mesh_data: Option<&[u8]>,
) -> Vec<f32> {
let mut all_vertices: Vec<f32> = Vec::new();
let mesh_stride = objects
.first()
.and_then(|o| o.get("meshStride"))
.and_then(|v| v.as_u64())
.unwrap_or(24) as usize;
let prefab_is_colored = mesh_stride >= 36;
let prefab_verts = if prefab_is_colored {
None
} else {
prefab_mesh.as_ref().map(|m| parse_prefab_mesh(m))
};
let terrain_parsed = terrain_mesh_data.map(parse_terrain_mesh);
let fallback_colors: [[f32; 3]; 6] = [
[0.85, 0.45, 0.20], [0.30, 0.55, 0.85], [0.80, 0.25, 0.35], [0.60, 0.75, 0.30], [0.70, 0.40, 0.75], [0.90, 0.75, 0.25], ];
let mut color_idx = 0;
for obj in objects {
let transform = obj.get("transform").cloned().unwrap_or(json!({}));
let pos = transform
.get("position")
.and_then(|p| p.as_array())
.map(|a| {
[
a.first().and_then(|v| v.as_f64()).unwrap_or(0.0) as f32,
a.get(1).and_then(|v| v.as_f64()).unwrap_or(0.0) as f32,
a.get(2).and_then(|v| v.as_f64()).unwrap_or(0.0) as f32,
]
})
.unwrap_or([0.0; 3]);
let scl = transform
.get("scale")
.and_then(|s| s.as_array())
.map(|a| {
[
a.first().and_then(|v| v.as_f64()).unwrap_or(1.0) as f32,
a.get(1).and_then(|v| v.as_f64()).unwrap_or(1.0) as f32,
a.get(2).and_then(|v| v.as_f64()).unwrap_or(1.0) as f32,
]
})
.unwrap_or([1.0; 3]);
let obj_type = obj
.get("type")
.and_then(|v| v.as_str())
.unwrap_or("instance");
match obj_type {
"terrain" => {
if let Some((ref verts, ref indices)) = terrain_parsed {
if !indices.is_empty() {
for idx in indices {
let i = *idx as usize;
if i < verts.len() {
let (p, n) = verts[i];
let tp = transform_pos(p, pos, scl);
all_vertices.extend_from_slice(&tp);
all_vertices.extend_from_slice(&n);
let height_factor = (tp[1] + 1.0).max(0.0).min(3.0) / 3.0;
let c = [
0.25 + 0.15 * height_factor,
0.55 + 0.20 * height_factor,
0.15 + 0.10 * (1.0 - height_factor),
];
all_vertices.extend_from_slice(&c);
}
}
} else {
for (p, n) in verts {
let tp = transform_pos(*p, pos, scl);
all_vertices.extend_from_slice(&tp);
all_vertices.extend_from_slice(n);
let height_factor = (tp[1] + 1.0).max(0.0).min(3.0) / 3.0;
let c = [
0.25 + 0.15 * height_factor,
0.55 + 0.20 * height_factor,
0.15 + 0.10 * (1.0 - height_factor),
];
all_vertices.extend_from_slice(&c);
}
}
} else {
let tw = obj
.get("terrain")
.and_then(|t| t.get("width"))
.and_then(|v| v.as_f64())
.unwrap_or(10.0) as f32;
let td = obj
.get("terrain")
.and_then(|t| t.get("depth"))
.and_then(|v| v.as_f64())
.unwrap_or(10.0) as f32;
let segs = 8;
for gz in 0..segs {
for gx in 0..segs {
let x0 = pos[0] + (gx as f32 / segs as f32 - 0.5) * tw;
let z0 = pos[2] + (gz as f32 / segs as f32 - 0.5) * td;
let x1 = pos[0] + ((gx + 1) as f32 / segs as f32 - 0.5) * tw;
let z1 = pos[2] + ((gz + 1) as f32 / segs as f32 - 0.5) * td;
let y = pos[1];
let n = [0.0f32, 1.0, 0.0];
let c = [0.3f32, 0.6, 0.2];
for v in &[
[x0, y, z0],
[x1, y, z0],
[x1, y, z1],
[x0, y, z0],
[x1, y, z1],
[x0, y, z1],
] {
all_vertices.extend_from_slice(v);
all_vertices.extend_from_slice(&n);
all_vertices.extend_from_slice(&c);
}
}
}
}
}
_ => {
let mat = obj.get("material");
let base_color = mat
.and_then(|m| m.get("color"))
.and_then(|c| c.as_array())
.map(|a| {
[
a.first().and_then(|v| v.as_f64()).unwrap_or(0.8) as f32,
a.get(1).and_then(|v| v.as_f64()).unwrap_or(0.8) as f32,
a.get(2).and_then(|v| v.as_f64()).unwrap_or(0.8) as f32,
]
})
.unwrap_or_else(|| {
let c = fallback_colors[color_idx % fallback_colors.len()];
color_idx += 1;
c
});
if prefab_is_colored {
if let Some(ref mesh) = prefab_mesh {
let stride = 36; let vc = mesh.len() / stride;
for vi in 0..vc {
let off = vi * stride;
if off + stride > mesh.len() {
break;
}
let px = f32::from_le_bytes(mesh[off..off + 4].try_into().unwrap());
let py = f32::from_le_bytes(mesh[off + 4..off + 8].try_into().unwrap());
let pz =
f32::from_le_bytes(mesh[off + 8..off + 12].try_into().unwrap());
let tp = transform_pos([px, py, pz], pos, scl);
all_vertices.extend_from_slice(&tp);
for k in 0..3 {
let f = f32::from_le_bytes(
mesh[off + 12 + k * 4..off + 16 + k * 4].try_into().unwrap(),
);
all_vertices.push(f);
}
for k in 0..3 {
let f = f32::from_le_bytes(
mesh[off + 24 + k * 4..off + 28 + k * 4].try_into().unwrap(),
);
all_vertices.push(f);
}
}
}
} else if let Some(ref verts) = prefab_verts {
for (p, n) in verts {
let tp = transform_pos(*p, pos, scl);
all_vertices.extend_from_slice(&tp);
all_vertices.extend_from_slice(n);
all_vertices.extend_from_slice(&base_color);
}
} else {
let s = 0.4 * scl[0];
let cube = generate_cube(pos, s, base_color);
all_vertices.extend_from_slice(&cube);
}
}
}
}
all_vertices
}
#[allow(dead_code)]
struct CachedScenePipeline {
pipeline: wgpu::RenderPipeline,
bgl: wgpu::BindGroupLayout,
sample_count: u32,
}
static MESH_CACHE: std::sync::OnceLock<parking_lot::Mutex<Option<std::sync::Arc<Vec<u8>>>>> =
std::sync::OnceLock::new();
static TERRAIN_CACHE: std::sync::OnceLock<parking_lot::Mutex<Option<std::sync::Arc<Vec<u8>>>>> =
std::sync::OnceLock::new();
static TEXTURE_CACHE: std::sync::OnceLock<parking_lot::Mutex<Option<std::sync::Arc<Vec<u8>>>>> =
std::sync::OnceLock::new();
fn get_mesh_cache() -> &'static parking_lot::Mutex<Option<std::sync::Arc<Vec<u8>>>> {
MESH_CACHE.get_or_init(|| parking_lot::Mutex::new(None))
}
fn get_terrain_cache() -> &'static parking_lot::Mutex<Option<std::sync::Arc<Vec<u8>>>> {
TERRAIN_CACHE.get_or_init(|| parking_lot::Mutex::new(None))
}
fn get_texture_cache() -> &'static parking_lot::Mutex<Option<std::sync::Arc<Vec<u8>>>> {
TEXTURE_CACHE.get_or_init(|| parking_lot::Mutex::new(None))
}
static LIGHT_CACHE: std::sync::OnceLock<parking_lot::Mutex<Option<std::sync::Arc<Vec<u8>>>>> =
std::sync::OnceLock::new();
static LIGHT_COUNT_CACHE: std::sync::OnceLock<parking_lot::Mutex<Option<u32>>> =
std::sync::OnceLock::new();
fn get_light_cache() -> &'static parking_lot::Mutex<Option<std::sync::Arc<Vec<u8>>>> {
LIGHT_CACHE.get_or_init(|| parking_lot::Mutex::new(None))
}
fn get_light_count_cache() -> &'static parking_lot::Mutex<Option<u32>> {
LIGHT_COUNT_CACHE.get_or_init(|| parking_lot::Mutex::new(None))
}
struct CachedGpuDiffuse {
view: wgpu::TextureView,
sampler: wgpu::Sampler,
_texture: wgpu::Texture, }
static GPU_DIFFUSE_CACHE: std::sync::OnceLock<CachedGpuDiffuse> = std::sync::OnceLock::new();
use std::sync::OnceLock;
static SCENE_PIPELINE_4X: OnceLock<CachedScenePipeline> = OnceLock::new();
static SCENE_PIPELINE_1X: OnceLock<CachedScenePipeline> = OnceLock::new();
static COMPILED_MATERIAL: OnceLock<parking_lot::Mutex<Option<serde_json::Value>>> = OnceLock::new();
fn get_compiled_material_cache() -> &'static parking_lot::Mutex<Option<serde_json::Value>> {
COMPILED_MATERIAL.get_or_init(|| parking_lot::Mutex::new(None))
}
fn set_compiled_material(mat: serde_json::Value) {
*get_compiled_material_cache().lock() = Some(mat);
}
fn get_compiled_material() -> Option<serde_json::Value> {
get_compiled_material_cache().lock().clone()
}
struct CachedDynamicPipeline {
pipeline: wgpu::RenderPipeline,
bgl: wgpu::BindGroupLayout,
}
static DYNAMIC_PIPELINE_CACHE: OnceLock<
parking_lot::Mutex<std::collections::HashMap<u64, CachedDynamicPipeline>>,
> = OnceLock::new();
fn get_dynamic_cache(
) -> &'static parking_lot::Mutex<std::collections::HashMap<u64, CachedDynamicPipeline>> {
DYNAMIC_PIPELINE_CACHE.get_or_init(|| parking_lot::Mutex::new(std::collections::HashMap::new()))
}
fn get_or_create_pipeline(
device: &wgpu::Device,
sample_count: u32,
) -> &'static CachedScenePipeline {
let lock = if sample_count > 1 {
&SCENE_PIPELINE_4X
} else {
&SCENE_PIPELINE_1X
};
lock.get_or_init(|| {
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Scene Shader"),
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(SCENE_SHADER)),
});
let bgl = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
});
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Scene Pipeline"),
layout: Some(
&device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: &[&bgl],
push_constant_ranges: &[],
}),
),
vertex: wgpu::VertexState {
module: &shader,
entry_point: Some("vs_main"),
compilation_options: Default::default(),
buffers: &[wgpu::VertexBufferLayout {
array_stride: 36,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &[
wgpu::VertexAttribute {
format: wgpu::VertexFormat::Float32x3,
offset: 0,
shader_location: 0,
},
wgpu::VertexAttribute {
format: wgpu::VertexFormat::Float32x3,
offset: 12,
shader_location: 1,
},
wgpu::VertexAttribute {
format: wgpu::VertexFormat::Float32x3,
offset: 24,
shader_location: 2,
},
],
}],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: Some("fs_main"),
compilation_options: Default::default(),
targets: &[Some(wgpu::ColorTargetState {
format: wgpu::TextureFormat::Rgba8UnormSrgb,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrites::ALL,
})],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
cull_mode: None,
..Default::default()
},
depth_stencil: Some(wgpu::DepthStencilState {
format: wgpu::TextureFormat::Depth32Float,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState::default(),
}),
multisample: wgpu::MultisampleState {
count: sample_count,
mask: !0,
alpha_to_coverage_enabled: false,
},
multiview: None,
cache: None,
});
CachedScenePipeline {
pipeline,
bgl,
sample_count,
}
})
}
static TEXTURED_PIPELINE_1X: OnceLock<CachedScenePipeline> = OnceLock::new();
static TEXTURED_PIPELINE_4X: OnceLock<CachedScenePipeline> = OnceLock::new();
fn get_or_create_textured_pipeline(
device: &wgpu::Device,
sample_count: u32,
) -> &'static CachedScenePipeline {
let lock = if sample_count > 1 {
&TEXTURED_PIPELINE_4X
} else {
&TEXTURED_PIPELINE_1X
};
lock.get_or_init(|| {
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Textured Scene Shader"),
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(TEXTURED_SHADER)),
});
let bgl = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
});
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Textured Scene Pipeline"),
layout: Some(
&device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: &[&bgl],
push_constant_ranges: &[],
}),
),
vertex: wgpu::VertexState {
module: &shader,
entry_point: Some("vs_main"),
compilation_options: Default::default(),
buffers: &[wgpu::VertexBufferLayout {
array_stride: 32, step_mode: wgpu::VertexStepMode::Vertex,
attributes: &[
wgpu::VertexAttribute {
format: wgpu::VertexFormat::Float32x3,
offset: 0,
shader_location: 0,
}, wgpu::VertexAttribute {
format: wgpu::VertexFormat::Float32x3,
offset: 12,
shader_location: 1,
}, wgpu::VertexAttribute {
format: wgpu::VertexFormat::Float32x2,
offset: 24,
shader_location: 2,
}, ],
}],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: Some("fs_main"),
compilation_options: Default::default(),
targets: &[Some(wgpu::ColorTargetState {
format: wgpu::TextureFormat::Rgba8UnormSrgb,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrites::ALL,
})],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
cull_mode: None,
..Default::default()
},
depth_stencil: Some(wgpu::DepthStencilState {
format: wgpu::TextureFormat::Depth32Float,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState::default(),
}),
multisample: wgpu::MultisampleState {
count: sample_count,
mask: !0,
alpha_to_coverage_enabled: false,
},
multiview: None,
cache: None,
});
CachedScenePipeline {
pipeline,
bgl,
sample_count,
}
})
}
const TEXTURED_SHADER: &str = r#"
struct Uniforms {
view_proj: mat4x4f,
light_dir: vec3f,
_pad: f32,
camera_pos: vec3f,
time: f32,
};
@group(0) @binding(0) var<uniform> u: Uniforms;
@group(0) @binding(1) var diffuse_tex: texture_2d<f32>;
@group(0) @binding(2) var diffuse_samp: sampler;
struct VertexInput {
@location(0) position: vec3f,
@location(1) normal: vec3f,
@location(2) uv: vec2f,
};
struct VertexOutput {
@builtin(position) clip_pos: vec4f,
@location(0) normal: vec3f,
@location(1) uv: vec2f,
};
@vertex
fn vs_main(in: VertexInput) -> VertexOutput {
var out: VertexOutput;
out.clip_pos = u.view_proj * vec4f(in.position, 1.0);
out.normal = in.normal;
out.uv = in.uv;
return out;
}
@fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4f {
let uv = vec2f(in.uv.x, 1.0 - in.uv.y); // flip V for OpenGL→Vulkan
let tex_color = textureSample(diffuse_tex, diffuse_samp, uv);
let n = normalize(in.normal);
let diff = max(dot(n, u.light_dir), 0.0);
let light = 0.2 + diff * 0.8;
let col = tex_color.rgb * light;
return vec4f(col, 1.0);
}
"#;
fn render_scene(
width: u32,
height: u32,
fov: f32,
cam_pos: [f32; 3],
cam_target: [f32; 3],
msaa_samples: u32,
clear_color: [f64; 3],
near: f32,
far: f32,
objects: &[serde_json::Value],
prefab_mesh: Option<&[u8]>,
terrain_mesh: Option<&[u8]>,
texture_rgba: Option<&[u8]>,
compiled_material: Option<&serde_json::Value>,
) -> Result<Vec<u8>, String> {
use wgpu::util::DeviceExt;
let ctx = &*crate::gpu::context::GPU_CONTEXT;
let device = ctx.device();
let queue = ctx.queue();
let mesh_stride = objects
.first()
.and_then(|o| o.get("meshStride"))
.and_then(|v| v.as_u64())
.unwrap_or(24) as usize;
let has_compiled_material = compiled_material.is_some() && prefab_mesh.is_some();
let has_texture = !has_compiled_material
&& texture_rgba.is_some()
&& mesh_stride == 32
&& prefab_mesh.is_some();
let render_mode: u8;
let (vertex_data, vertex_count, use_textured_pipeline) = if has_compiled_material {
let mat = compiled_material.unwrap();
let mat_stride = mat
.get("vertexStride")
.and_then(|v| v.as_u64())
.unwrap_or(mesh_stride as u64) as usize;
let mesh = prefab_mesh.unwrap();
let vc = mesh.len() / mat_stride.max(1);
let transform = objects
.first()
.and_then(|o| o.get("transform"))
.cloned()
.unwrap_or(serde_json::json!({}));
let pos = transform
.get("position")
.and_then(|p| p.as_array())
.map(|a| {
[
a.first().and_then(|v| v.as_f64()).unwrap_or(0.0) as f32,
a.get(1).and_then(|v| v.as_f64()).unwrap_or(0.0) as f32,
a.get(2).and_then(|v| v.as_f64()).unwrap_or(0.0) as f32,
]
})
.unwrap_or([0.0; 3]);
let scl = transform
.get("scale")
.and_then(|s| s.as_array())
.map(|a| {
[
a.first().and_then(|v| v.as_f64()).unwrap_or(1.0) as f32,
a.get(1).and_then(|v| v.as_f64()).unwrap_or(1.0) as f32,
a.get(2).and_then(|v| v.as_f64()).unwrap_or(1.0) as f32,
]
})
.unwrap_or([1.0; 3]);
let mut transformed = Vec::with_capacity(mesh.len());
for i in 0..vc {
let off = i * mat_stride;
let px = f32::from_le_bytes(mesh[off..off + 4].try_into().unwrap());
let py = f32::from_le_bytes(mesh[off + 4..off + 8].try_into().unwrap());
let pz = f32::from_le_bytes(mesh[off + 8..off + 12].try_into().unwrap());
let tp = transform_pos([px, py, pz], pos, scl);
transformed.extend_from_slice(&tp[0].to_le_bytes());
transformed.extend_from_slice(&tp[1].to_le_bytes());
transformed.extend_from_slice(&tp[2].to_le_bytes());
transformed.extend_from_slice(&mesh[off + 12..off + mat_stride]);
}
render_mode = 2;
(transformed, vc, false)
} else if has_texture {
let mesh = prefab_mesh.unwrap();
let vc = mesh.len() / 32;
let transform = objects
.first()
.and_then(|o| o.get("transform"))
.cloned()
.unwrap_or(serde_json::json!({}));
let pos = transform
.get("position")
.and_then(|p| p.as_array())
.map(|a| {
[
a.first().and_then(|v| v.as_f64()).unwrap_or(0.0) as f32,
a.get(1).and_then(|v| v.as_f64()).unwrap_or(0.0) as f32,
a.get(2).and_then(|v| v.as_f64()).unwrap_or(0.0) as f32,
]
})
.unwrap_or([0.0; 3]);
let scl = transform
.get("scale")
.and_then(|s| s.as_array())
.map(|a| {
[
a.first().and_then(|v| v.as_f64()).unwrap_or(1.0) as f32,
a.get(1).and_then(|v| v.as_f64()).unwrap_or(1.0) as f32,
a.get(2).and_then(|v| v.as_f64()).unwrap_or(1.0) as f32,
]
})
.unwrap_or([1.0; 3]);
let mut transformed = Vec::with_capacity(mesh.len());
for i in 0..vc {
let off = i * 32;
let px = f32::from_le_bytes(mesh[off..off + 4].try_into().unwrap());
let py = f32::from_le_bytes(mesh[off + 4..off + 8].try_into().unwrap());
let pz = f32::from_le_bytes(mesh[off + 8..off + 12].try_into().unwrap());
let tp = transform_pos([px, py, pz], pos, scl);
transformed.extend_from_slice(&tp[0].to_le_bytes());
transformed.extend_from_slice(&tp[1].to_le_bytes());
transformed.extend_from_slice(&tp[2].to_le_bytes());
transformed.extend_from_slice(&mesh[off + 12..off + 32]);
}
render_mode = 1;
(transformed, vc, true)
} else {
let all_vertices = build_vertex_buffer(objects, prefab_mesh, terrain_mesh);
if all_vertices.is_empty() {
return Ok(vec![30; (width * height * 4) as usize]);
}
let vc = all_vertices.len() / 9;
render_mode = 0;
(bytemuck::cast_slice(&all_vertices).to_vec(), vc, false)
};
if vertex_count == 0 {
return Ok(vec![30; (width * height * 4) as usize]);
}
let sample_count = match msaa_samples {
1 => 1,
2 => 2,
_ => 4,
};
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertices"),
contents: &vertex_data,
usage: wgpu::BufferUsages::VERTEX,
});
let view_proj = build_view_proj(
cam_pos,
cam_target,
fov,
width as f32 / height as f32,
near,
far,
);
let light_buffer_data: Option<Vec<u8>> = get_light_cache().lock().as_ref().map(|a| a.to_vec());
let light_count = get_light_count_cache().lock().unwrap_or(0);
let uniforms = SceneUniforms {
view_proj,
light_dir: [0.577, 0.577, -0.577],
_pad0: 0.0,
camera_pos: cam_pos,
time: 0.0,
light_count,
_pad4: [0; 3],
};
let uniform_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Uniforms"),
contents: bytemuck::bytes_of(&uniforms),
usage: wgpu::BufferUsages::UNIFORM,
});
let resolve_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Resolve"),
size: wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC,
view_formats: &[],
});
let resolve_view = resolve_texture.create_view(&wgpu::TextureViewDescriptor::default());
let color_usage = if sample_count > 1 {
wgpu::TextureUsages::RENDER_ATTACHMENT
} else {
wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC
};
let color_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Color"),
size: wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: color_usage,
view_formats: &[],
});
let color_view = color_texture.create_view(&wgpu::TextureViewDescriptor::default());
let depth_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Depth"),
size: wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Depth32Float,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
view_formats: &[],
});
let depth_view = depth_texture.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
if render_mode == 2 {
let mat = compiled_material.unwrap();
let vert_wgsl = mat.get("vertexWgsl").and_then(|v| v.as_str()).unwrap_or("");
let frag_wgsl = mat
.get("fragmentWgsl")
.and_then(|v| v.as_str())
.unwrap_or("");
let pipeline_hash = mat
.get("pipelineHash")
.and_then(|v| v.as_u64())
.unwrap_or(0);
let mat_stride = mat
.get("vertexStride")
.and_then(|v| v.as_u64())
.unwrap_or(24) as u64;
let has_uv = mat_stride >= 32;
let mut dyn_cache = get_dynamic_cache().lock();
if !dyn_cache.contains_key(&pipeline_hash) {
let vs_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("PBR Vertex"),
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(vert_wgsl)),
});
let fs_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("PBR Fragment"),
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(frag_wgsl)),
});
let bgl = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
],
});
let mut attrs = vec![
wgpu::VertexAttribute {
format: wgpu::VertexFormat::Float32x3,
offset: 0,
shader_location: 0,
},
wgpu::VertexAttribute {
format: wgpu::VertexFormat::Float32x3,
offset: 12,
shader_location: 1,
},
];
if has_uv {
attrs.push(wgpu::VertexAttribute {
format: wgpu::VertexFormat::Float32x2,
offset: 24,
shader_location: 2,
});
}
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("PBR Pipeline"),
layout: Some(
&device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: &[&bgl],
push_constant_ranges: &[],
}),
),
vertex: wgpu::VertexState {
module: &vs_module,
entry_point: Some("vs_main"),
compilation_options: Default::default(),
buffers: &[wgpu::VertexBufferLayout {
array_stride: mat_stride,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &attrs,
}],
},
fragment: Some(wgpu::FragmentState {
module: &fs_module,
entry_point: Some("fs_main"),
compilation_options: Default::default(),
targets: &[Some(wgpu::ColorTargetState {
format: wgpu::TextureFormat::Rgba8UnormSrgb,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrites::ALL,
})],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
cull_mode: None,
..Default::default()
},
depth_stencil: Some(wgpu::DepthStencilState {
format: wgpu::TextureFormat::Depth32Float,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState::default(),
}),
multisample: wgpu::MultisampleState {
count: sample_count,
mask: !0,
alpha_to_coverage_enabled: false,
},
multiview: None,
cache: None,
});
dyn_cache.insert(pipeline_hash, CachedDynamicPipeline { pipeline, bgl });
}
let cached = dyn_cache.get(&pipeline_hash).unwrap();
let light_data = light_buffer_data.as_deref().unwrap_or(&[0u8; 64]); let light_gpu_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Lights"),
contents: light_data,
usage: wgpu::BufferUsages::STORAGE,
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &cached.bgl,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: uniform_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: light_gpu_buffer.as_entire_binding(),
},
],
});
{
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("PBR Material Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &color_view,
resolve_target: if sample_count > 1 {
Some(&resolve_view)
} else {
None
},
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: clear_color[0],
g: clear_color[1],
b: clear_color[2],
a: 1.0,
}),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: &depth_view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(1.0),
store: wgpu::StoreOp::Store,
}),
stencil_ops: None,
}),
timestamp_writes: None,
occlusion_query_set: None,
});
pass.set_pipeline(&cached.pipeline);
pass.set_bind_group(0, &bind_group, &[]);
pass.set_vertex_buffer(0, vertex_buffer.slice(..));
pass.draw(0..vertex_count as u32, 0..1);
}
drop(dyn_cache);
} else if use_textured_pipeline {
let cached_pipe = get_or_create_textured_pipeline(device, sample_count);
let gpu_diff = GPU_DIFFUSE_CACHE.get_or_init(|| {
let tex_data = texture_rgba.as_ref().unwrap();
let tex_w = u32::from_le_bytes(tex_data[0..4].try_into().unwrap());
let tex_h = u32::from_le_bytes(tex_data[4..8].try_into().unwrap());
let tex_pixels = &tex_data[8..];
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Diffuse"),
size: wgpu::Extent3d {
width: tex_w,
height: tex_h,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
view_formats: &[],
});
queue.write_texture(
wgpu::TexelCopyTextureInfo {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
tex_pixels,
wgpu::TexelCopyBufferLayout {
offset: 0,
bytes_per_row: Some(tex_w * 4),
rows_per_image: Some(tex_h),
},
wgpu::Extent3d {
width: tex_w,
height: tex_h,
depth_or_array_layers: 1,
},
);
CachedGpuDiffuse {
view: texture.create_view(&wgpu::TextureViewDescriptor::default()),
sampler: device.create_sampler(&wgpu::SamplerDescriptor {
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
..Default::default()
}),
_texture: texture,
}
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &cached_pipe.bgl,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: uniform_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(&gpu_diff.view),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::Sampler(&gpu_diff.sampler),
},
],
});
{
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Textured Scene Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &color_view,
resolve_target: if sample_count > 1 {
Some(&resolve_view)
} else {
None
},
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: clear_color[0],
g: clear_color[1],
b: clear_color[2],
a: 1.0,
}),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: &depth_view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(1.0),
store: wgpu::StoreOp::Store,
}),
stencil_ops: None,
}),
timestamp_writes: None,
occlusion_query_set: None,
});
pass.set_pipeline(&cached_pipe.pipeline);
pass.set_bind_group(0, &bind_group, &[]);
pass.set_vertex_buffer(0, vertex_buffer.slice(..));
pass.draw(0..vertex_count as u32, 0..1);
}
} else {
let cached = get_or_create_pipeline(device, sample_count);
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &cached.bgl,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: uniform_buffer.as_entire_binding(),
}],
});
{
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Scene Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &color_view,
resolve_target: if sample_count > 1 {
Some(&resolve_view)
} else {
None
},
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: clear_color[0],
g: clear_color[1],
b: clear_color[2],
a: 1.0,
}),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: &depth_view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(1.0),
store: wgpu::StoreOp::Store,
}),
stencil_ops: None,
}),
timestamp_writes: None,
occlusion_query_set: None,
});
pass.set_pipeline(&cached.pipeline);
pass.set_bind_group(0, &bind_group, &[]);
pass.set_vertex_buffer(0, vertex_buffer.slice(..));
pass.draw(0..vertex_count as u32, 0..1);
}
}
let padded_row = (width * 4 + 255) & !255;
let readback = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Readback"),
size: (padded_row * height) as u64,
usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
mapped_at_creation: false,
});
let readback_src = if sample_count > 1 {
&resolve_texture
} else {
&color_texture
};
encoder.copy_texture_to_buffer(
wgpu::TexelCopyTextureInfo {
texture: readback_src,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
wgpu::TexelCopyBufferInfo {
buffer: &readback,
layout: wgpu::TexelCopyBufferLayout {
offset: 0,
bytes_per_row: Some(padded_row),
rows_per_image: Some(height),
},
},
wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
},
);
queue.submit(std::iter::once(encoder.finish()));
let slice = readback.slice(..);
let (tx, rx) = flume::bounded(1);
slice.map_async(wgpu::MapMode::Read, move |r| {
let _ = tx.send(r);
});
device.poll(wgpu::Maintain::Wait);
rx.recv()
.map_err(|_| "Map failed".to_string())?
.map_err(|e| format!("Map: {:?}", e))?;
let data = slice.get_mapped_range();
let mut pixels = Vec::with_capacity((width * height * 4) as usize);
for y in 0..height {
let start = (y * padded_row) as usize;
let end = start + (width * 4) as usize;
pixels.extend_from_slice(&data[start..end]);
}
drop(data);
readback.unmap();
Ok(pixels)
}
fn generate_cube(center: [f32; 3], half: f32, color: [f32; 3]) -> Vec<f32> {
let [cx, cy, cz] = center;
let mut verts = Vec::new();
let faces: [([f32; 3], [[f32; 3]; 4]); 6] = [
(
[0.0, 0.0, -1.0],
[
[cx - half, cy - half, cz - half],
[cx + half, cy - half, cz - half],
[cx + half, cy + half, cz - half],
[cx - half, cy + half, cz - half],
],
),
(
[0.0, 0.0, 1.0],
[
[cx + half, cy - half, cz + half],
[cx - half, cy - half, cz + half],
[cx - half, cy + half, cz + half],
[cx + half, cy + half, cz + half],
],
),
(
[0.0, 1.0, 0.0],
[
[cx - half, cy + half, cz - half],
[cx + half, cy + half, cz - half],
[cx + half, cy + half, cz + half],
[cx - half, cy + half, cz + half],
],
),
(
[0.0, -1.0, 0.0],
[
[cx - half, cy - half, cz + half],
[cx + half, cy - half, cz + half],
[cx + half, cy - half, cz - half],
[cx - half, cy - half, cz - half],
],
),
(
[1.0, 0.0, 0.0],
[
[cx + half, cy - half, cz - half],
[cx + half, cy - half, cz + half],
[cx + half, cy + half, cz + half],
[cx + half, cy + half, cz - half],
],
),
(
[-1.0, 0.0, 0.0],
[
[cx - half, cy - half, cz + half],
[cx - half, cy - half, cz - half],
[cx - half, cy + half, cz - half],
[cx - half, cy + half, cz + half],
],
),
];
for (normal, quad) in &faces {
for idx in &[0, 1, 2, 0, 2, 3] {
verts.extend_from_slice(&quad[*idx]);
verts.extend_from_slice(normal);
verts.extend_from_slice(&color);
}
}
verts
}
fn build_view_proj(
eye: [f32; 3],
target: [f32; 3],
fov_deg: f32,
aspect: f32,
near: f32,
far: f32,
) -> [[f32; 4]; 4] {
let fwd = normalize([target[0] - eye[0], target[1] - eye[1], target[2] - eye[2]]);
let right = normalize(cross([0.0, 1.0, 0.0], fwd));
let up = cross(fwd, right);
let view = [
[right[0], up[0], -fwd[0], 0.0],
[right[1], up[1], -fwd[1], 0.0],
[right[2], up[2], -fwd[2], 0.0],
[-dot(right, eye), -dot(up, eye), dot(fwd, eye), 1.0],
];
let fov = fov_deg.to_radians();
let f = 1.0 / (fov / 2.0).tan();
let nf = 1.0 / (near - far);
let proj = [
[f / aspect, 0.0, 0.0, 0.0],
[0.0, f, 0.0, 0.0],
[0.0, 0.0, (far + near) * nf, -1.0],
[0.0, 0.0, 2.0 * far * near * nf, 0.0],
];
mat4_mul(proj, view)
}
fn normalize(v: [f32; 3]) -> [f32; 3] {
let l = (v[0] * v[0] + v[1] * v[1] + v[2] * v[2]).sqrt();
if l > 1e-6 {
[v[0] / l, v[1] / l, v[2] / l]
} else {
[0.0, 0.0, -1.0]
}
}
fn cross(a: [f32; 3], b: [f32; 3]) -> [f32; 3] {
[
a[1] * b[2] - a[2] * b[1],
a[2] * b[0] - a[0] * b[2],
a[0] * b[1] - a[1] * b[0],
]
}
fn dot(a: [f32; 3], b: [f32; 3]) -> f32 {
a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
}
fn mat4_mul(a: [[f32; 4]; 4], b: [[f32; 4]; 4]) -> [[f32; 4]; 4] {
let mut r = [[0.0f32; 4]; 4];
for col in 0..4 {
for row in 0..4 {
r[col][row] = a[0][row] * b[col][0]
+ a[1][row] * b[col][1]
+ a[2][row] * b[col][2]
+ a[3][row] * b[col][3];
}
}
r
}
#[allow(dead_code)]
fn error_output(msg: &str) -> HashMap<String, Message> {
let mut out = HashMap::new();
out.insert("error".to_string(), Message::Error(msg.to_string().into()));
out
}
const SCENE_SHADER: &str = r#"
struct Uniforms {
view_proj: mat4x4f,
light_dir: vec3f,
_pad: f32,
camera_pos: vec3f,
time: f32,
};
@group(0) @binding(0) var<uniform> u: Uniforms;
struct VertexInput {
@location(0) position: vec3f,
@location(1) normal: vec3f,
@location(2) color: vec3f,
};
struct VertexOutput {
@builtin(position) clip_pos: vec4f,
@location(0) normal: vec3f,
@location(1) color: vec3f,
@location(2) world_pos: vec3f,
};
@vertex
fn vs_main(in: VertexInput) -> VertexOutput {
var out: VertexOutput;
out.clip_pos = u.view_proj * vec4f(in.position, 1.0);
out.normal = in.normal;
out.color = in.color;
out.world_pos = in.position;
return out;
}
fn procedural_pattern(p: vec3f, base: vec3f) -> vec3f {
let u_coord = p.x * 1.5;
let v_coord = atan2(p.y, max(abs(p.z), 0.001)) * 3.0;
let diamond = sin(u_coord * 8.0 + v_coord) * sin(u_coord * 8.0 - v_coord);
let scales = smoothstep(-0.1, 0.1, diamond);
let stripe = smoothstep(0.3, 0.7, sin(u_coord * 3.0) * 0.5 + 0.5);
let dorsal = smoothstep(0.85, 0.95, cos(v_coord / 3.0));
let pattern = scales * 0.4 + stripe * 0.3 + 0.3;
let dark = base * 0.5;
var col = mix(dark, base, pattern);
col = mix(col, base * 0.3, dorsal * 0.5);
return col;
}
@fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4f {
let n = normalize(in.normal);
let diff = max(dot(n, u.light_dir), 0.0);
let light = 0.2 + diff * 0.8;
let patterned = procedural_pattern(in.world_pos, in.color);
let col = patterned * light;
return vec4f(col, 1.0);
}
"#;