use carla::{
client::{Client, Sensor, Vehicle},
geom::{Location, Rotation, Transform},
rpc::AttachmentType,
sensor::data::{Image, LidarMeasurement},
};
use std::{
fs,
sync::{Arc, Mutex},
thread,
time::Duration,
};
const IMAGE_WIDTH: u32 = 800;
const IMAGE_HEIGHT: u32 = 600;
const FOV: f32 = 90.0;
const NUM_FRAMES: usize = 20;
#[derive(Default)]
struct SensorFrame {
rgb_image: Option<Image>,
depth_image: Option<Image>,
semantic_image: Option<Image>,
lidar_measurement: Option<LidarMeasurement>,
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("Multiple Sensor Visualization Example");
println!("=====================================\n");
fs::create_dir_all("_out/sensors")?;
let client = Client::connect("localhost", 2000, None)?;
let mut world = client.world()?;
println!("Connected to CARLA simulator");
println!("Map: {}\n", world.map()?.name());
println!("Spawning vehicle...");
let bp_lib = world.blueprint_library()?;
let vehicle_bp = bp_lib
.find("vehicle.tesla.model3")?
.ok_or("Tesla Model 3 blueprint not found")?;
let spawn_points = world.map()?.recommended_spawn_points()?;
if spawn_points.is_empty() {
return Err("No spawn points available".into());
}
let mut vehicle = None;
for (i, spawn_point) in spawn_points.iter().take(10).enumerate() {
match world.spawn_actor(&vehicle_bp, spawn_point) {
Ok(vehicle_actor) => match Vehicle::try_from(vehicle_actor) {
Ok(v) => {
println!(" Vehicle spawned at spawn point {} with autopilot", i);
v.set_autopilot(true)?;
vehicle = Some(v);
break;
}
Err(_) => continue,
},
Err(_) => {
continue;
}
}
}
let vehicle = vehicle.ok_or("Failed to spawn vehicle at any of the first 10 spawn points. Try running scripts/clean-carla-world.py")?;
println!();
let sensor_transform = Transform {
location: Location::new(2.0, 0.0, 1.5),
rotation: Rotation::new(0.0, 0.0, 0.0),
};
println!("Spawning sensors...");
let mut rgb_bp = bp_lib
.find("sensor.camera.rgb")?
.ok_or("RGB camera not found")?;
configure_camera(&mut rgb_bp)?;
let rgb_sensor = spawn_sensor(&mut world, &rgb_bp, &sensor_transform, &vehicle)?;
println!(" RGB Camera");
let mut depth_bp = bp_lib
.find("sensor.camera.depth")?
.ok_or("Depth camera not found")?;
configure_camera(&mut depth_bp)?;
let depth_sensor = spawn_sensor(&mut world, &depth_bp, &sensor_transform, &vehicle)?;
println!(" Depth Camera");
let mut semantic_bp = bp_lib
.find("sensor.camera.semantic_segmentation")?
.ok_or("Semantic camera not found")?;
configure_camera(&mut semantic_bp)?;
let semantic_sensor = spawn_sensor(&mut world, &semantic_bp, &sensor_transform, &vehicle)?;
println!(" Semantic Segmentation Camera");
let mut lidar_bp = bp_lib
.find("sensor.lidar.ray_cast")?
.ok_or("LiDAR not found")?;
if !lidar_bp.set_attribute("range", "50.0") {
return Err("Failed to set lidar range".into());
}
if !lidar_bp.set_attribute("rotation_frequency", "20.0") {
return Err("Failed to set lidar rotation_frequency".into());
}
if !lidar_bp.set_attribute("channels", "32") {
return Err("Failed to set lidar channels".into());
}
if !lidar_bp.set_attribute("points_per_second", "100000") {
return Err("Failed to set lidar points_per_second".into());
}
let lidar_transform = Transform {
location: Location::new(0.0, 0.0, 2.5),
rotation: Rotation::new(0.0, 0.0, 0.0),
};
let lidar_sensor = spawn_sensor(&mut world, &lidar_bp, &lidar_transform, &vehicle)?;
println!(" LiDAR\n");
let current_frame: Arc<Mutex<SensorFrame>> = Arc::new(Mutex::new(SensorFrame::default()));
let frame_rgb = current_frame.clone();
rgb_sensor.listen(move |sensor_data| {
if let Ok(image) = Image::try_from(sensor_data) {
let mut frame = frame_rgb.lock().unwrap();
frame.rgb_image = Some(image);
}
})?;
let frame_depth = current_frame.clone();
depth_sensor.listen(move |sensor_data| {
if let Ok(image) = Image::try_from(sensor_data) {
let mut frame = frame_depth.lock().unwrap();
frame.depth_image = Some(image);
}
})?;
let frame_semantic = current_frame.clone();
semantic_sensor.listen(move |sensor_data| {
if let Ok(image) = Image::try_from(sensor_data) {
let mut frame = frame_semantic.lock().unwrap();
frame.semantic_image = Some(image);
}
})?;
let frame_lidar = current_frame.clone();
lidar_sensor.listen(move |sensor_data| {
if let Ok(measurement) = LidarMeasurement::try_from(sensor_data) {
let mut frame = frame_lidar.lock().unwrap();
frame.lidar_measurement = Some(measurement);
}
})?;
println!(" All sensors listening\n");
println!("Capturing {} frames...\n", NUM_FRAMES);
for i in 0..NUM_FRAMES {
thread::sleep(Duration::from_millis(100));
let frame = current_frame.lock().unwrap();
if frame.rgb_image.is_some()
&& frame.depth_image.is_some()
&& frame.semantic_image.is_some()
&& frame.lidar_measurement.is_some()
{
println!("Frame {:02}: Saving all sensor outputs...", i);
if let Some(ref rgb) = frame.rgb_image {
let path = format!("_out/sensors/rgb_{:04}.png", i);
rgb.save_to_disk(&path)?;
}
if let Some(ref depth) = frame.depth_image {
let path = format!("_out/sensors/depth_{:04}.png", i);
depth.save_to_disk(&path)?;
}
if let Some(ref semantic) = frame.semantic_image {
let path = format!("_out/sensors/semantic_{:04}.png", i);
semantic.save_to_disk(&path)?;
}
if let Some(ref lidar) = frame.lidar_measurement {
println!(
" LiDAR: {} points, {} channels",
lidar.len(),
lidar.channel_count()
);
}
} else {
println!("Frame {:02}: Waiting for all sensors...", i);
}
}
println!("\n Capture complete!");
println!(" Sensor data saved to _out/sensors/\n");
println!("Cleaning up...");
rgb_sensor.stop()?;
depth_sensor.stop()?;
semantic_sensor.stop()?;
lidar_sensor.stop()?;
println!(" Cleanup complete\n");
println!("=== Visualization Demo Complete ===");
println!("\nGenerated sensor outputs:");
println!(" RGB images: _out/sensors/rgb_*.png");
println!(" Depth maps: _out/sensors/depth_*.png");
println!(" Semantic segmentation: _out/sensors/semantic_*.png");
println!(" LiDAR statistics logged to console");
Ok(())
}
fn configure_camera(
bp: &mut carla::client::ActorBlueprint,
) -> Result<(), Box<dyn std::error::Error>> {
if !bp.set_attribute("image_size_x", &IMAGE_WIDTH.to_string()) {
return Err("Failed to set image_size_x".into());
}
if !bp.set_attribute("image_size_y", &IMAGE_HEIGHT.to_string()) {
return Err("Failed to set image_size_y".into());
}
if !bp.set_attribute("fov", &FOV.to_string()) {
return Err("Failed to set fov".into());
}
Ok(())
}
fn spawn_sensor(
world: &mut carla::client::World,
bp: &carla::client::ActorBlueprint,
transform: &Transform,
parent: &Vehicle,
) -> Result<Sensor, Box<dyn std::error::Error>> {
let actor = world
.spawn_actor_opt(bp, transform, Some(parent), AttachmentType::Rigid)
.expect("API call failed");
Sensor::try_from(actor).map_err(|_| "Failed to convert to Sensor".into())
}