#[cfg(not(all(feature = "memory_management", feature = "gpu")))]
#[allow(dead_code)]
fn main() {
println!("This example requires both 'memory_management' and 'gpu' features to be enabled.");
println!("Run with: cargo run --example memory_metrics_gpu --features memory_management,gpu");
}
#[cfg(all(feature = "memory_management", feature = "gpu"))]
use scirs2_core::gpu::GpuBackend;
#[cfg(all(feature = "memory_management", feature = "gpu"))]
use scirs2_core::memory::metrics::{
format_bytes, format_memory_report, generate_memory_report, reset_memory_metrics,
setup_gpu_memory_tracking, TrackedGpuContext,
};
#[cfg(all(feature = "memory_management", feature = "gpu"))]
use std::time::Instant;
#[cfg(all(feature = "memory_management", feature = "gpu"))]
#[allow(dead_code)]
fn main() {
println!("Memory Metrics with GPU Operations Example");
println!("==========================================\n");
reset_memory_metrics();
setup_gpu_memory_tracking();
println!("GPU memory tracking set up");
let context = match TrackedGpuContext::with_backend(GpuBackend::Cpu, "GPUDemo") {
Ok(ctx) => ctx,
Err(err) => {
println!("Failed to create GPU context: {}", err);
return;
}
};
println!(
"Created GPU context with {} backend",
context.backend_name()
);
println!("\nExample 1: Basic Buffer Operations");
println!("----------------------------------");
let buffersizes = [1000, 5000, 10000, 50000];
let mut buffers = Vec::new();
for &size in &buffersizes {
let bytes = size * std::mem::size_of::<f32>();
println!(
"Creating buffer with {} elements ({})",
size,
format_bytes(bytes)
);
let buffer = context.create_buffer::<f32>(size);
buffers.push(buffer);
let report = generate_memory_report();
println!(
" Current GPU memory: {}",
format_bytes(report.total_current_usage)
);
}
println!("\nMemory Report after buffer allocations:");
println!("{}", format_memory_report());
println!("\nReleasing first two buffers");
buffers.drain(0..2);
println!("\nMemory Report after releasing buffers:");
println!("{}", format_memory_report());
println!("\nExample 2: Buffer Data Transfer");
println!("-------------------------------");
let host_data: Vec<f32> = (0..10000).map(|i| i as f32).collect();
let data_size = host_data.len() * std::mem::size_of::<f32>();
println!(
"Creating buffer from {} elements ({})",
host_data.len(),
format_bytes(data_size)
);
let buffer = context.create_buffer_from_slice(&host_data);
let device_data = buffer.to_vec();
let matching = host_data
.iter()
.zip(device_data.iter())
.all(|(a, b)| (a - b).abs() < 1e-10);
println!(
"Data transfer validation: {}",
if matching { "PASSED" } else { "FAILED" }
);
println!("\nExample 3: Simulating a GPU Computation");
println!("--------------------------------------");
println!("Matrix multiplication simulation not yet implemented");
println!("\nReleasing all remaining buffers");
drop(buffers);
drop(buffer);
println!("\nFinal Memory Report:");
println!("{}", format_memory_report());
}
#[cfg(all(feature = "memory_management", feature = "gpu"))]
#[allow(dead_code)]
fn gpu_operation_example(context: &TrackedGpuContext) {
let start = Instant::now();
let m = 1000;
let n = 1000;
let k = 1000;
println!("Creating matrices:");
println!(" Matrix A: {}x{}", m, k);
println!(" Matrix B: {}x{}", k, n);
println!(" Matrix C: {}x{}", m, n);
let a_size = m * k;
let b_size = k * n;
let c_size = m * n;
let buffer_a = context.create_buffer::<f32>(a_size);
println!(
" Allocated buffer for matrix A: {}",
format_bytes(a_size * std::mem::size_of::<f32>())
);
let buffer_b = context.create_buffer::<f32>(b_size);
println!(
" Allocated buffer for matrix B: {}",
format_bytes(b_size * std::mem::size_of::<f32>())
);
let buffer_c = context.create_buffer::<f32>(c_size);
println!(
" Allocated buffer for matrix C: {}",
format_bytes(c_size * std::mem::size_of::<f32>())
);
let report = generate_memory_report();
println!(
"\nGPU memory usage for matrices: {}",
format_bytes(report.total_current_usage)
);
let a_data = vec![1.0f32; a_size];
let b_data = vec![2.0f32; b_size];
println!("\nUploading matrices to GPU");
buffer_a.copy_from_host(&a_data);
buffer_b.copy_from_host(&b_data);
println!("Executing matrix multiplication...");
std::thread::sleep(std::time::Duration::from_millis(100));
println!("Downloading result matrix");
let mut c_data = vec![0.0f32; c_size];
buffer_c.copy_to_host(&mut c_data);
let elapsed = start.elapsed();
println!("Matrix multiplication completed in {:?}", elapsed);
println!("Cleaning up GPU buffers");
drop(buffer_a);
drop(buffer_b);
drop(buffer_c);
let report = generate_memory_report();
println!(
"GPU memory after cleanup: {}",
format_bytes(report.total_current_usage)
);
}