prgpu 0.1.11

GPU-accelerated rendering utilities for Adobe Premiere Pro and After Effects plugins
use std::env;
use std::path::{Path, PathBuf};

// Reuse src/build helpers so prgpu's own build.rs runs the same slangc + bridge pipeline as effects via prgpu::build::compile_shaders.
#[path = "src/build/sdk.rs"]
mod sdk;
#[path = "src/build/reflection.rs"]
mod reflection;
#[path = "src/build/bindings.rs"]
mod bindings;
#[path = "src/build/compile.rs"]
mod compile;
#[path = "src/build/cpu_dispatch.rs"]
mod cpu_dispatch;

fn main() {
	let target = env::var("TARGET").expect("TARGET env var missing");

	let is_windows = target.contains("windows");
	let is_apple = target.contains("apple-darwin") || target.contains("apple-ios");

	let backend = if is_apple {
		"metal"
	} else if is_windows {
		if env::var_os("CARGO_FEATURE_OPENCL").is_some() { "opencl" } else { "cuda" }
	} else {
		"other"
	};

	let backend = if let Ok(overridden) = env::var("GPU_BACKEND") {
		Box::leak(overridden.into_boxed_str())
	} else {
		backend
	};

	println!("cargo:rustc-check-cfg=cfg(gpu_backend, values(\"metal\", \"cuda\", \"opencl\", \"other\"))");
	println!("cargo:rustc-cfg=with_premiere");
	println!("cargo:rustc-cfg=gpu_backend=\"{}\"", backend);

	// Shader hot-reload Cargo feature gates the cudarc/nvrtc dep, so it must be checked here.
	if env::var_os("CARGO_FEATURE_SHADER_HOTRELOAD").is_some() {
		println!("cargo:rustc-cfg=shader_hotreload");
	}

	println!("cargo:rerun-if-env-changed=GPU_BACKEND");
	println!("cargo:rerun-if-env-changed=CARGO_FEATURE_OPENCL");
	println!("cargo:rerun-if-changed=build.rs");

	// Compile prgpu's built-in shaders into OUT_DIR; metallib/ptx is picked up by include_shader!, CPU bridge is linked into prgpu_slang_cpu.a.
	let manifest_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR"));
	let shader_dir = manifest_dir.join("shaders");
	if shader_dir.is_dir() {
		println!("cargo:rerun-if-changed=shaders");
		println!("cargo:rerun-if-changed=../vekl");
		compile_builtin_shaders(&shader_dir);
	}
}

fn compile_builtin_shaders(shader_dir: &Path) {
	let out_dir = PathBuf::from(env::var("OUT_DIR").expect("OUT_DIR"));

	// Vendored vekl ships in the prgpu crate tarball for crates.io consumers; fall back to the workspace sibling during local dev.
	let manifest_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap());
	let vendored = manifest_dir.join("vekl");
	let sibling = manifest_dir.parent().map(|p| p.join("vekl"));

	let mut include_dirs: Vec<PathBuf> = vec![shader_dir.to_path_buf()];
	if vendored.is_dir() {
		include_dirs.push(vendored);
	} else if let Some(sibling) = sibling.filter(|p| p.is_dir()) {
		include_dirs.push(sibling);
	}

	let slang_files: Vec<PathBuf> = std::fs::read_dir(shader_dir)
		.unwrap()
		.filter_map(|e| e.ok())
		.map(|e| e.path())
		.filter(|p| p.extension().and_then(|s| s.to_str()) == Some("slang"))
		.collect();

	if slang_files.is_empty() {
		return;
	}

	let sdk_path = sdk::sdk_dir();
	let slangc = sdk::slangc_bin(&sdk_path);
	if !slangc.exists() {
		panic!(
			"slangc not found at {}. Slang SDK v{} auto-download failed.",
			slangc.display(),
			sdk::SLANG_VERSION
		);
	}

	let mut cpu_cpp_paths: Vec<PathBuf> = Vec::new();

	for slang_file in &slang_files {
		let name = slang_file.file_stem().unwrap().to_str().unwrap().to_string();
		let compiled = compile::compile_shader(&sdk_path, slang_file, &name, &out_dir, &include_dirs);

		cpu_cpp_paths.push(compiled.cpp_path.clone());

		let cpu_json = std::fs::read_to_string(&compiled.cpu_reflection_path)
			.unwrap_or_else(|e| panic!("Failed to read CPU reflection for {name}: {e}"));
		let cpu_refl = reflection::parse_reflection(&cpu_json)
			.unwrap_or_else(|e| panic!("Failed to parse CPU reflection JSON for {name}: {e}"));

		let bridge_path = cpu_dispatch::generate_bridge(&name, &cpu_refl, &sdk_path, &out_dir);
		cpu_cpp_paths.push(bridge_path);

		// Reference-only header; the kernel itself wires up via declare_kernel!.
		let mut all_bindings = String::from("// Auto-generated by prgpu build from slangc -reflection-json\n\n");
		if let Some(metal_ref_path) = &compiled.metal_reflection_path {
			if let Ok(json) = std::fs::read_to_string(metal_ref_path) {
				if let Ok(refl) = reflection::parse_reflection(&json) {
					all_bindings.push_str("// --- Metal target bindings ---\n");
					all_bindings.push_str(&bindings::generate_bindings(&refl, &format!("METAL_{name}")));
					all_bindings.push('\n');
				}
			}
		}
		if let Some(cuda_ref_path) = &compiled.cuda_reflection_path {
			if let Ok(json) = std::fs::read_to_string(cuda_ref_path) {
				if let Ok(refl) = reflection::parse_reflection(&json) {
					all_bindings.push_str("// --- CUDA target bindings ---\n");
					all_bindings.push_str(&bindings::generate_bindings(&refl, &format!("CUDA_{name}")));
					all_bindings.push('\n');
				}
			}
		}
		all_bindings.push_str("// --- CPU target bindings ---\n");
		all_bindings.push_str(&bindings::generate_bindings(&cpu_refl, "CPU"));
		let bindings_path = out_dir.join(format!("{name}_bindings.rs"));
		std::fs::write(&bindings_path, &all_bindings).ok();
		println!("cargo:warning=[slang] Binding map written to: {}", bindings_path.display());
	}

	let cpu_paths_refs: Vec<&Path> = cpu_cpp_paths.iter().map(|p| p.as_path()).collect();
	cpu_dispatch::compile_cpu_all(&cpu_paths_refs, &sdk_path);
}