use pmat::claude_integration::{
BridgeConfig, ClaudeBridge, FeatureFlagsBuilder, MetricsCollector, RolloutStrategy,
};
use std::time::Duration;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
tracing_subscriber::fmt::init();
println!("=== Claude Agent SDK Integration Examples ===\n");
basic_usage().await?;
feature_flags_example().await?;
caching_example().await?;
observability_example().await?;
progressive_rollout_example().await?;
println!("\n=== All examples completed successfully ===");
Ok(())
}
async fn basic_usage() -> Result<(), Box<dyn std::error::Error>> {
println!("📝 Example 1: Basic Usage\n");
let config = BridgeConfig::default();
let bridge = ClaudeBridge::new(config).await?;
println!("✓ Bridge initialized in {:?}", bridge.init_time());
let code = r#"
fn complex_function() {
if condition1 {
for item in items {
while processing {
match state {
State::A => {},
State::B => {},
}
}
}
}
}
"#;
let result = bridge.analyze_code(code).await?;
println!("Analysis Results:");
println!(" Complexity: {}", result.complexity);
println!(" Cognitive Complexity: {}", result.cognitive_complexity);
println!(" SATD Count: {}", result.satd_count);
let healthy = bridge.health_check().await;
println!(" Health: {}\n", if healthy { "✓" } else { "✗" });
Ok(())
}
async fn feature_flags_example() -> Result<(), Box<dyn std::error::Error>> {
println!("🚩 Example 2: Feature Flags\n");
let flags = FeatureFlagsBuilder::new()
.enabled(true)
.strategy(RolloutStrategy::Allowlist)
.add_to_allowlist("user_123")
.add_to_allowlist("user_456")
.build();
let test_users = vec!["user_123", "user_456", "user_789", "user_000"];
for user in test_users {
let should_use = flags.should_use_claude(user);
println!(
" {} {} Claude: {}",
user,
if should_use { "✓ uses" } else { "✗ skips" },
if should_use { "enabled" } else { "disabled" }
);
}
println!("\n Switching to 50% rollout...");
flags.set_strategy(RolloutStrategy::Percentage(50));
let mut enabled_count = 0;
for i in 0..100 {
if flags.should_use_claude(&format!("user_{}", i)) {
enabled_count += 1;
}
}
println!(" 50% rollout: {}/100 users enabled", enabled_count);
println!("\n Testing kill switch...");
flags.disable();
println!(" After disable: {}", flags.should_use_claude("any_user"));
println!();
Ok(())
}
async fn caching_example() -> Result<(), Box<dyn std::error::Error>> {
println!("⚡ Example 3: Caching Performance\n");
let config = BridgeConfig {
enable_cache: true,
..Default::default()
};
let bridge = ClaudeBridge::new(config).await?;
let test_code = "fn test() { if x { for y in z {} } }";
let start = std::time::Instant::now();
let _ = bridge.analyze_code(test_code).await?;
let first_duration = start.elapsed();
let start = std::time::Instant::now();
let _ = bridge.analyze_code(test_code).await?;
let second_duration = start.elapsed();
println!(" First call (cache miss): {:?}", first_duration);
println!(" Second call (cache hit): {:?}", second_duration);
let speedup = first_duration.as_micros() as f64 / second_duration.as_micros() as f64;
println!(" Speedup: {:.2}x faster", speedup);
let stats = bridge.cache_stats();
println!("\n Cache Statistics:");
println!(" L1 hit rate: {:.1}%", stats.l1_hit_rate * 100.0);
println!(" L2 hit rate: {:.1}%", stats.l2_hit_rate * 100.0);
println!(
" Effective hit rate: {:.1}%\n",
stats.effective_hit_rate * 100.0
);
Ok(())
}
async fn observability_example() -> Result<(), Box<dyn std::error::Error>> {
println!("📊 Example 4: Observability\n");
let metrics = MetricsCollector::new();
for i in 0..10 {
let latency = Duration::from_micros(1000 + (i * 100));
if i < 8 {
metrics.record_success(latency);
} else {
metrics.record_error(
pmat::claude_integration::observability::ErrorType::Application,
latency,
);
}
}
for _ in 0..7 {
metrics.record_cache_hit();
}
for _ in 0..3 {
metrics.record_cache_miss();
}
let snapshot = metrics.snapshot();
println!(" Metrics Summary:");
println!(" Total requests: {}", snapshot.requests_total);
println!(" Success rate: {:.1}%", snapshot.success_rate * 100.0);
println!(" Avg latency: {}μs", snapshot.avg_latency_us);
println!(" Min latency: {}μs", snapshot.min_latency_us);
println!(" Max latency: {}μs", snapshot.max_latency_us);
println!(
" Cache hit rate: {:.1}%\n",
snapshot.cache_hit_rate * 100.0
);
println!(" Formatted: {}\n", snapshot.format_summary());
Ok(())
}
async fn progressive_rollout_example() -> Result<(), Box<dyn std::error::Error>> {
println!("🎯 Example 5: Progressive Rollout\n");
let flags = FeatureFlagsBuilder::new()
.enabled(true)
.strategy(RolloutStrategy::Percentage(10))
.max_latency(1000) .build();
println!(" Starting with 10% rollout...");
println!(" Current percentage: {}%", flags.get_percentage());
println!("\n Simulating good performance (500ms)...");
let rolled_back = flags.auto_rollback_on_degradation(500);
println!(" Rolled back: {}", rolled_back);
println!(" Still enabled: {}", flags.is_enabled());
println!("\n Simulating bad performance (2000ms)...");
let rolled_back = flags.auto_rollback_on_degradation(2000);
println!(" Rolled back: {}", rolled_back);
println!(" Still enabled: {}", flags.is_enabled());
println!("\n Attempting to use after rollback:");
println!(" User allowed: {}\n", flags.should_use_claude("test_user"));
Ok(())
}
#[allow(dead_code)]
async fn complete_workflow_example() -> Result<(), Box<dyn std::error::Error>> {
println!("🔄 Example 6: Complete Workflow\n");
let flags = FeatureFlagsBuilder::new()
.enabled(true)
.strategy(RolloutStrategy::Percentage(25))
.max_latency(3000)
.build();
let metrics = MetricsCollector::new();
let config = BridgeConfig {
pool_size: 5,
enable_cache: true,
..Default::default()
};
let bridge = ClaudeBridge::new(config).await?;
println!(" ✓ Bridge initialized");
for i in 0..100 {
let user_id = format!("user_{}", i);
if flags.should_use_claude(&user_id) {
let timer = pmat::claude_integration::observability::Timer::new();
match bridge.analyze_code("fn test() {}").await {
Ok(_result) => {
let latency = timer.elapsed();
metrics.record_success(latency);
if flags.auto_rollback_on_degradation(latency.as_millis() as u32) {
println!(" ⚠️ Automatic rollback triggered!");
break;
}
}
Err(_e) => {
let latency = timer.elapsed();
metrics.record_error(
pmat::claude_integration::observability::ErrorType::Application,
latency,
);
}
}
}
}
let snapshot = metrics.snapshot();
println!("\n Final Results:");
println!(" {}", snapshot.format_summary());
let pool_stats = bridge.pool_stats();
println!(
" Pool: {} successes, {} failures",
pool_stats.successes, pool_stats.failures
);
let cache_stats = bridge.cache_stats();
println!(
" Cache: {:.1}% effective hit rate\n",
cache_stats.effective_hit_rate * 100.0
);
Ok(())
}