1#[macro_use]
2#[allow(unused)]
3extern crate anyhow;
4
5use std::ops::Deref;
6
7use call::{Call, CallType, GpuPath};
8use mesh::Mesh;
9use nvgx::*;
10use pipeline::{PipelineConfig, PipelineManager, PipelineUsage};
11use texture::{texture_type_map, TextureManager};
12use unifroms::{RenderCommand, Unifrom};
13use wgpu::{util::DeviceExt, TextureView};
14
15mod call;
16pub mod fb;
17mod instance;
18mod mesh;
19mod pipeline;
20mod renderer;
21mod texture;
22mod unifroms;
23
24pub struct RenderConfig {
25 pub antialias: bool,
26 pub format: nvgx::TextureType,
27}
28
29impl RenderConfig {
30 pub fn antialias(mut self, antialias: bool) -> Self {
31 self.antialias = antialias;
32 self
33 }
34
35 pub fn format(mut self, format: nvgx::TextureType) -> Self {
36 self.format = format;
37 self
38 }
39
40 pub fn format_match(&self, format: &wgpu::TextureFormat) -> bool {
41 format == &texture_type_map(self.format)
42 }
43}
44
45impl Default for RenderConfig {
46 fn default() -> Self {
47 Self {
48 antialias: true,
49 format: nvgx::TextureType::BGRA,
50 }
51 }
52}
53
54pub struct RenderResource {
55 config: RenderConfig,
56 mesh: Mesh,
57 paths: Vec<GpuPath>,
58 calls: Vec<Call>,
59 viewsize_uniform: Unifrom<Extent>,
60 render_unifrom: Unifrom<Vec<RenderCommand>>,
61 texture_manager: TextureManager,
62 default_instace: wgpu::Buffer,
63}
64
65impl RenderResource {
66 #[inline]
67 fn do_fill(
68 &self,
69 call: &Call,
70 render_pass: &mut wgpu::RenderPass<'_>,
71 pipeline_manager: &PipelineManager,
72 ) {
73 let paths = &self.paths[call.path_range.clone()];
74 let buffer = call
75 .vertex_buffer
76 .as_ref()
77 .map(|v| v.deref())
78 .unwrap_or(&self.mesh.vertex_buffer);
79 let (instance_buffer, instance_slice) = call
80 .instances
81 .as_ref()
82 .map(|i| (i.0.deref(), i.1.clone()))
83 .unwrap_or((&self.default_instace, 0..1));
84 {
85 {
86 render_pass.set_pipeline(pipeline_manager.fill_stencil.pipeline());
88 render_pass.set_stencil_reference(0);
89 render_pass.set_bind_group(0, &self.viewsize_uniform.bind_group, &[]);
90 render_pass.set_bind_group(
91 1,
92 &self.render_unifrom.bind_group,
93 &[call.uniform_offset(0)],
94 );
95 render_pass.set_bind_group(2, self.texture_manager.get_bindgroup(call.image), &[]);
96 render_pass
97 .set_index_buffer(self.mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
98 render_pass.set_vertex_buffer(0, buffer.slice(..));
99 render_pass.set_vertex_buffer(1, instance_buffer.slice(..));
100 for path in paths {
101 let count = path.triangle_fan_count();
102 render_pass.draw_indexed(
103 0..(count * 3),
104 path.triangle_fan_offset(),
105 instance_slice.clone(),
106 );
107 }
108 }
109 {
110 render_pass.set_pipeline(pipeline_manager.fill_stroke.pipeline());
112 render_pass.set_stencil_reference(0);
113 render_pass.set_bind_group(0, &self.viewsize_uniform.bind_group, &[]);
114 render_pass.set_bind_group(
115 1,
116 &self.render_unifrom.bind_group,
117 &[call.uniform_offset(1)],
118 );
119 render_pass.set_bind_group(2, self.texture_manager.get_bindgroup(call.image), &[]);
120 render_pass.set_vertex_buffer(0, buffer.slice(..));
121 render_pass.set_vertex_buffer(1, instance_buffer.slice(..));
122
123 for path in paths {
124 render_pass.draw(path.stroke_vert(), instance_slice.clone());
125 }
126 }
127 {
128 render_pass.set_pipeline(pipeline_manager.fill_inner.pipeline());
130 render_pass.set_stencil_reference(0);
131 render_pass.set_bind_group(0, &self.viewsize_uniform.bind_group, &[]);
132 render_pass.set_bind_group(
133 1,
134 &self.render_unifrom.bind_group,
135 &[call.uniform_offset(1)],
136 );
137 render_pass.set_bind_group(2, self.texture_manager.get_bindgroup(call.image), &[]);
138 render_pass.set_vertex_buffer(0, buffer.slice(..));
139 render_pass.set_vertex_buffer(1, instance_buffer.slice(..));
140
141 render_pass.draw(call.triangle_vert(), instance_slice.clone());
142 }
143 }
144 }
145
146 #[inline]
147 fn do_convex_fill(
148 &self,
149 call: &Call,
150 render_pass: &mut wgpu::RenderPass<'_>,
151 pipeline_manager: &PipelineManager,
152 ) {
153 let paths = &self.paths[call.path_range.clone()];
154 let buffer = call
155 .vertex_buffer
156 .as_ref()
157 .map(|v| v.deref())
158 .unwrap_or(&self.mesh.vertex_buffer);
159 let (instance_buffer, instance_slice) = call
160 .instances
161 .as_ref()
162 .map(|i| (i.0.deref(), i.1.clone()))
163 .unwrap_or((&self.default_instace, 0..1));
164 {
165 render_pass.set_pipeline(pipeline_manager.fill_convex.pipeline());
166 render_pass.set_stencil_reference(0);
167 render_pass.set_bind_group(0, &self.viewsize_uniform.bind_group, &[]);
168 render_pass.set_bind_group(
169 1,
170 &self.render_unifrom.bind_group,
171 &[call.uniform_offset(0)],
172 );
173 render_pass.set_bind_group(2, self.texture_manager.get_bindgroup(call.image), &[]);
174 render_pass
175 .set_index_buffer(self.mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
176 render_pass.set_vertex_buffer(0, buffer.slice(..));
177 render_pass.set_vertex_buffer(1, instance_buffer.slice(..));
178
179 for path in paths {
180 render_pass.draw_indexed(
181 0..path.triangle_fan_count() * 3,
182 path.triangle_fan_offset(),
183 instance_slice.clone(),
184 );
185 }
186 }
187
188 {
189 render_pass.set_pipeline(pipeline_manager.fill_stroke.pipeline());
190 render_pass.set_stencil_reference(0);
191 render_pass.set_bind_group(0, &self.viewsize_uniform.bind_group, &[]);
192 render_pass.set_bind_group(
193 1,
194 &self.render_unifrom.bind_group,
195 &[call.uniform_offset(0)],
196 );
197 render_pass.set_bind_group(2, self.texture_manager.get_bindgroup(call.image), &[]);
198 render_pass.set_vertex_buffer(0, buffer.slice(..));
199 render_pass.set_vertex_buffer(1, instance_buffer.slice(..));
200
201 for path in paths {
202 render_pass.draw(path.stroke_vert(), instance_slice.clone());
203 }
204 }
205 }
206
207 #[inline]
208 fn do_stroke(
209 &self,
210 call: &Call,
211 render_pass: &mut wgpu::RenderPass<'_>,
212 pipeline_manager: &PipelineManager,
213 ) {
214 let paths = &self.paths[call.path_range.clone()];
215 let buffer = call
216 .vertex_buffer
217 .as_ref()
218 .map(|v| v.deref())
219 .unwrap_or(&self.mesh.vertex_buffer);
220 let (instance_buffer, instance_slice) = call
221 .instances
222 .as_ref()
223 .map(|i| (i.0.deref(), i.1.clone()))
224 .unwrap_or((&self.default_instace, 0..1));
225 render_pass.set_pipeline(pipeline_manager.fill_stroke.pipeline());
226 render_pass.set_stencil_reference(0);
227 render_pass.set_bind_group(0, &self.viewsize_uniform.bind_group, &[]);
228 render_pass.set_bind_group(
229 1,
230 &self.render_unifrom.bind_group,
231 &[call.uniform_offset(0)],
232 );
233 render_pass.set_bind_group(2, self.texture_manager.get_bindgroup(call.image), &[]);
234 render_pass.set_vertex_buffer(0, buffer.slice(..));
235 render_pass.set_vertex_buffer(1, instance_buffer.slice(..));
236
237 for path in paths {
238 render_pass.draw(path.stroke_vert(), instance_slice.clone());
239 }
240 }
241
242 #[inline]
243 fn do_triangles(
244 &self,
245 call: &Call,
246 render_pass: &mut wgpu::RenderPass<'_>,
247 pipeline_manager: &PipelineManager,
248 ) {
249 let buffer = call
250 .vertex_buffer
251 .as_ref()
252 .map(|v| v.deref())
253 .unwrap_or(&self.mesh.vertex_buffer);
254 let (instance_buffer, instance_slice) = call
255 .instances
256 .as_ref()
257 .map(|i| (i.0.deref(), i.1.clone()))
258 .unwrap_or((&self.default_instace, 0..1));
259 render_pass.set_pipeline(pipeline_manager.triangles.pipeline());
260 render_pass.set_bind_group(0, &self.viewsize_uniform.bind_group, &[]);
261 render_pass.set_bind_group(
262 1,
263 &self.render_unifrom.bind_group,
264 &[call.uniform_offset(0)],
265 );
266 render_pass.set_bind_group(2, self.texture_manager.get_bindgroup(call.image), &[]);
267 render_pass.set_vertex_buffer(0, buffer.slice(..));
268 render_pass.set_vertex_buffer(1, instance_buffer.slice(..));
269
270 render_pass.draw(call.triangle_vert(), instance_slice.clone());
271 }
272
273 #[inline]
274 #[cfg(feature = "wirelines")]
275 fn do_lines(
276 &self,
277 call: &Call,
278 render_pass: &mut wgpu::RenderPass<'_>,
279 pipeline_manager: &PipelineManager,
280 ) {
281 let paths = &self.paths[call.path_range.clone()];
282 let buffer = call
283 .vertex_buffer
284 .as_ref()
285 .map(|v| v.deref())
286 .unwrap_or(&self.mesh.vertex_buffer);
287 let (instance_buffer, instance_slice) = call
288 .instances
289 .as_ref()
290 .map(|i| (i.0.deref(), i.1.clone()))
291 .unwrap_or((&self.default_instace, 0..1));
292 render_pass.set_pipeline(pipeline_manager.wirelines.pipeline());
293 render_pass.set_bind_group(0, &self.viewsize_uniform.bind_group, &[]);
294 render_pass.set_bind_group(
295 1,
296 &self.render_unifrom.bind_group,
297 &[call.uniform_offset(0)],
298 );
299 render_pass.set_bind_group(2, self.texture_manager.get_bindgroup(call.image), &[]);
300 render_pass.set_vertex_buffer(0, buffer.slice(..));
301 render_pass.set_vertex_buffer(1, instance_buffer.slice(..));
302
303 for path in paths {
304 render_pass.draw(path.stroke_vert(), instance_slice.clone());
305 }
306 }
307
308 fn render(
309 &self,
310 device: &wgpu::Device,
311 queue: &wgpu::Queue,
312 color_view: &TextureView,
313 format: TextureType,
314 stencil_view: &TextureView,
315 pipeline_manager: &mut PipelineManager,
316 clear_cmd: Option<wgpu::Color>,
317 ) {
318 let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
319 label: Some("Nvg Flush Render Encoder"),
320 });
321 {
322 let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
323 label: Some("NVG Render Pass"),
324 depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
325 view: stencil_view,
326 stencil_ops: Some(wgpu::Operations {
327 load: if clear_cmd.is_some() {
328 wgpu::LoadOp::Clear(0)
329 } else {
330 wgpu::LoadOp::Load
331 },
332 store: wgpu::StoreOp::Store,
333 }),
334 depth_ops: None,
335 }),
336 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
337 view: color_view,
338 resolve_target: None,
339 ops: wgpu::Operations {
340 load: if let Some(color) = clear_cmd {
341 wgpu::LoadOp::Clear(color)
342 } else {
343 wgpu::LoadOp::Load
344 },
345 store: wgpu::StoreOp::Store,
346 },
347 })],
348
349 ..Default::default()
350 });
351 for call in &self.calls {
352 match call.call_type {
353 CallType::Fill(t) => {
354 pipeline_manager.update_pipeline(
355 &device,
356 PipelineConfig {
357 format,
358 usage: PipelineUsage::FillStencil(t),
359 },
360 );
361 pipeline_manager.update_pipeline(
362 &device,
363 PipelineConfig {
364 format,
365 usage: PipelineUsage::FillStroke(call.blend_func.clone()),
366 },
367 );
368 pipeline_manager.update_pipeline(
369 &device,
370 PipelineConfig {
371 format,
372 usage: PipelineUsage::FillInner(call.blend_func),
373 },
374 );
375 self.do_fill(call, &mut render_pass, &pipeline_manager);
376 }
377 CallType::ConvexFill => {
378 pipeline_manager.update_pipeline(
379 &device,
380 PipelineConfig {
381 format,
382 usage: PipelineUsage::FillConvex(call.blend_func.clone()),
383 },
384 );
385 pipeline_manager.update_pipeline(
386 &device,
387 PipelineConfig {
388 format,
389 usage: PipelineUsage::FillStroke(call.blend_func),
390 },
391 );
392 self.do_convex_fill(call, &mut render_pass, &pipeline_manager);
393 }
394 CallType::Stroke => {
395 pipeline_manager.update_pipeline(
396 &device,
397 PipelineConfig {
398 format,
399 usage: PipelineUsage::FillStroke(call.blend_func),
400 },
401 );
402 self.do_stroke(call, &mut render_pass, &pipeline_manager);
403 }
404 CallType::Triangles => {
405 pipeline_manager.update_pipeline(
406 &device,
407 PipelineConfig {
408 format,
409 usage: PipelineUsage::Triangles(call.blend_func),
410 },
411 );
412 self.do_triangles(call, &mut render_pass, &pipeline_manager);
413 }
414 #[cfg(feature = "wirelines")]
415 CallType::Lines => {
416 pipeline_manager.update_pipeline(
417 &device,
418 PipelineConfig {
419 format,
420 usage: PipelineUsage::Lines(call.blend_func),
421 },
422 );
423 self.do_lines(call, &mut render_pass, &pipeline_manager);
424 }
425 }
426 }
427 }
428 queue.submit(std::iter::once(encoder.finish()));
429 }
430}
431
432pub struct Renderer {
433 device: wgpu::Device,
434 queue: wgpu::Queue,
435 surface: wgpu::Surface<'static>,
436 surface_config: wgpu::SurfaceConfiguration,
437 pipeline_manager: PipelineManager,
438 target_fb: Option<(ImageId, TextureView)>,
439 clear_cmd: Option<wgpu::Color>,
440 resources: RenderResource,
441}
442
443impl Renderer {
444 pub fn create(
445 config: RenderConfig,
446 device: wgpu::Device,
447 queue: wgpu::Queue,
448 surface: wgpu::Surface<'static>,
449 surface_config: wgpu::SurfaceConfiguration,
450 ) -> anyhow::Result<Self> {
451 let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
452 label: Some("NVG Shader"),
453 source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
454 });
455 let viewsize_uniform: Unifrom<Extent> =
456 Unifrom::new(&device, 0, wgpu::ShaderStages::VERTEX, None);
457 let render_unifrom: Unifrom<Vec<RenderCommand>> =
458 Unifrom::new(&device, 0, wgpu::ShaderStages::FRAGMENT, Some(64));
459
460 let mesh = Mesh::new(&device, &queue, 10);
461 let texture_manager = TextureManager::new(&device, &surface_config);
462
463 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
464 label: Some("NVG Render Pipeline Layout"),
465 bind_group_layouts: &[
466 &viewsize_uniform.layout,
467 &render_unifrom.layout,
468 &texture_manager.layout,
469 ],
470 push_constant_ranges: &[],
471 });
472
473 let pipeline_manager =
474 PipelineManager::new(shader, pipeline_layout, &device, config.format);
475
476 let identity_instance = Transform::identity();
477 let default_instace = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
478 label: Some("Default Instace"),
479 contents: bytemuck::bytes_of(&identity_instance),
480 usage: wgpu::BufferUsages::VERTEX,
481 });
482
483 return Ok(Self {
484 device,
485 queue,
486 surface,
487 surface_config,
488 target_fb: None,
489 pipeline_manager,
490 clear_cmd: None,
491 resources: RenderResource {
492 config,
493 mesh,
494 paths: Vec::new(),
495 calls: Vec::new(),
496 viewsize_uniform,
497 render_unifrom,
498 texture_manager,
499 default_instace,
500 },
501 });
502 }
503}