use crate::core::hardware::detect_hardware;
use crate::enhanced_ui::terminal::TerminalCell;
use anyhow::Result;
use std::sync::Arc;
#[cfg(feature = "gpu-rendering")]
pub struct GpuRenderer {
context: GpuContext,
glyph_cache: GlyphCache,
frame_count: u64,
}
#[cfg(feature = "gpu-rendering")]
impl GpuRenderer {
pub fn new() -> Result<Self> {
let hardware = detect_hardware();
if hardware.gpu.is_none() {
anyhow::bail!("No GPU detected for rendering");
}
let context = GpuContext::init()?;
let glyph_cache = GlyphCache::new(&context)?;
Ok(Self {
context,
glyph_cache,
frame_count: 0,
})
}
pub async fn render_frame(
&mut self,
terminal: &Arc<crate::enhanced_ui::terminal::KandilTerminal>,
) -> Result<()> {
let frame = terminal.capture_frame().await?;
self.frame_count += 1;
if let Some(cells) = terminal.visible_cells() {
self.glyph_cache.update(&cells);
}
self.glyph_cache.rasterize_in_parallel();
self.context.render(&frame, &self.glyph_cache)?;
Ok(())
}
pub fn frame_count(&self) -> u64 {
self.frame_count
}
}
#[cfg(not(feature = "gpu-rendering"))]
pub struct GpuRenderer {
frame_count: u64,
}
#[cfg(not(feature = "gpu-rendering"))]
impl GpuRenderer {
pub fn new() -> Result<Self> {
Ok(Self { frame_count: 0 })
}
pub async fn render_frame(
&mut self,
_terminal: &Arc<crate::enhanced_ui::terminal::KandilTerminal>,
) -> Result<()> {
self.frame_count += 1;
Ok(())
}
pub fn frame_count(&self) -> u64 {
self.frame_count
}
}
#[cfg(feature = "gpu-rendering")]
struct GpuContext {
device: wgpu::Device,
queue: wgpu::Queue,
}
#[cfg(feature = "gpu-rendering")]
impl GpuContext {
fn init() -> Result<Self> {
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::Backends::all(),
..Default::default()
});
let adapter = pollster::block_on(instance.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::HighPerformance,
compatible_surface: None,
force_fallback_adapter: false,
}))
.ok_or_else(|| anyhow::anyhow!("No GPU adapter found"))?;
let (device, queue) = pollster::block_on(adapter.request_device(
&wgpu::DeviceDescriptor {
label: Some("Kandil GPU Renderer"),
required_features: wgpu::Features::empty(),
required_limits: wgpu::Limits::default(),
},
None,
))?;
Ok(Self { device, queue })
}
fn render(
&self,
frame: &crate::enhanced_ui::terminal::TerminalFrame,
glyph_cache: &GlyphCache,
) -> Result<()> {
if frame.lines.is_empty() {
return Ok(());
}
use std::time::Instant;
let start_time = Instant::now();
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Kandil Terminal Encoder"),
});
let frame_data_buffer = self.device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Frame Data Buffer"),
size: (frame.lines.len() * std::mem::size_of::<u8>()) as u64,
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
use rayon::prelude::*;
let processed_lines: Vec<String> = frame.lines
.par_iter()
.map(|line| {
line.chars()
.map(|c| if c == ' ' { '.' } else { c }) .collect()
})
.collect();
let render_time = start_time.elapsed();
self.queue.submit(Some(encoder.finish()));
let gpu_utilization = render_time.as_micros() as f64 / 1000.0; if gpu_utilization > 10.0 {
eprintln!("⚠️ GPU rendering took {:.2}ms", gpu_utilization);
}
Ok(())
}
}
#[cfg(feature = "gpu-rendering")]
struct GlyphCache {
cells: Vec<TerminalCell>,
texture: Option<wgpu::Texture>,
}
#[cfg(feature = "gpu-rendering")]
impl GlyphCache {
fn new(_context: &GpuContext) -> Result<Self> {
Ok(Self {
cells: Vec::new(),
texture: None,
})
}
fn update(&mut self, cells: &[TerminalCell]) {
self.cells = cells.to_vec();
}
fn rasterize_in_parallel(&mut self) {
use rayon::prelude::*;
self.cells.par_iter_mut().for_each(|cell| {
std::hint::black_box(cell);
});
}
}
#[cfg(feature = "gpu-rendering")]
pub struct GpuRenderMetrics {
pub frames_rendered: u64,
pub avg_render_time_ms: f64,
pub gpu_utilization_percent: f64,
pub current_fps: u32,
}
#[cfg(feature = "gpu-rendering")]
impl Default for GpuRenderMetrics {
fn default() -> Self {
Self {
frames_rendered: 0,
avg_render_time_ms: 0.0,
gpu_utilization_percent: 0.0,
current_fps: 0,
}
}
}
pub fn should_use_gpu() -> bool {
let hardware = detect_hardware();
hardware.gpu.is_some() && hardware.total_ram_gb >= 4
}
pub fn recommended_render_mode() -> RenderMode {
let hardware = detect_hardware();
match hardware.gpu {
Some(ref gpu) if gpu.memory_gb >= 2 => RenderMode::HighQuality,
Some(_) => RenderMode::Balanced,
None => RenderMode::CpuOnly,
}
}
#[derive(Debug, Clone, PartialEq)]
pub enum RenderMode {
HighQuality,
Balanced,
CpuOnly,
}
impl Default for RenderMode {
fn default() -> Self {
if should_use_gpu() {
RenderMode::Balanced
} else {
RenderMode::CpuOnly
}
}
}