industry40_autoscale/
industry40_autoscale.rs1use avila_async::{Runtime, RuntimeConfig, ScalingConfig, ResourceLimits};
2use std::time::Duration;
3
4fn main() {
5 let scaling_config = ScalingConfig {
6 min_threads: 2,
7 max_threads: 12,
8 target_queue_length: 100,
9 scale_up_threshold: 0.75,
10 scale_down_threshold: 0.25,
11 cooldown_period: Duration::from_secs(3),
12 };
13
14 let resource_limits = ResourceLimits {
15 max_queue_size: Some(500),
16 max_task_duration: Some(Duration::from_secs(60)),
17 ..Default::default()
18 };
19
20 let config = RuntimeConfig {
21 num_threads: Some(4),
22 enable_autoscaling: true,
23 scaling_config,
24 resource_limits,
25 };
26
27 let rt = Runtime::with_config(config);
28
29 println!("āļø Auto-Scaling Demonstration - Industry 4.0");
30 println!("============================================\n");
31
32 rt.block_on(async move {
33 println!("š Monitoring workload and scaling decisions...\n");
34
35 println!("Phase 1: Light Load (10 tasks)");
37 println!("-------------------------------");
38 spawn_tasks(&rt, 10, Duration::from_millis(100));
39 monitor_for_seconds(&rt, 2).await;
40
41 println!("\nPhase 2: Medium Load (50 tasks)");
43 println!("--------------------------------");
44 spawn_tasks(&rt, 50, Duration::from_millis(150));
45 monitor_for_seconds(&rt, 3).await;
46
47 println!("\nPhase 3: Heavy Load (200 tasks)");
49 println!("--------------------------------");
50 spawn_tasks(&rt, 200, Duration::from_millis(100));
51 monitor_for_seconds(&rt, 4).await;
52
53 println!("\nPhase 4: Cool Down");
55 println!("------------------");
56 monitor_for_seconds(&rt, 5).await;
57
58 println!("\nš Final Performance Report");
59 println!("==========================");
60 let final_metrics = rt.metrics().snapshot();
61 println!("Total tasks spawned: {}", final_metrics.tasks_spawned);
62 println!("Total tasks completed: {}", final_metrics.tasks_completed);
63 println!("Tasks failed: {}", final_metrics.tasks_failed);
64 println!("Peak queue length: {}", final_metrics.max_queue_length);
65 println!("Average execution time: {:?}", final_metrics.avg_execution_time);
66 println!("P95 execution time: {:?}", final_metrics.p95_execution_time);
67 println!("P99 execution time: {:?}", final_metrics.p99_execution_time);
68 println!("Final throughput: {} tasks/sec", final_metrics.tasks_per_second);
69 });
70}
71
72fn spawn_tasks(rt: &Runtime, count: usize, delay: Duration) {
73 for i in 0..count {
74 rt.spawn(async move {
75 avila_async::sleep(delay).await;
76 let mut sum = 0u64;
78 for j in 0..1000 {
79 sum = sum.wrapping_add(j);
80 }
81 });
82 }
83}
84
85async fn monitor_for_seconds(rt: &Runtime, seconds: u64) {
86 let end_time = std::time::Instant::now() + Duration::from_secs(seconds);
87
88 while std::time::Instant::now() < end_time {
89 avila_async::sleep(Duration::from_millis(500)).await;
90
91 let snapshot = rt.metrics().snapshot();
92 let active_tasks = snapshot.tasks_spawned - snapshot.tasks_completed;
93
94 println!(
95 " [{}s] Tasks: {} active, {} queued | Threads: {} active, {} idle | TPS: {}",
96 (seconds - (end_time - std::time::Instant::now()).as_secs()),
97 active_tasks,
98 snapshot.queue_length,
99 snapshot.active_threads,
100 snapshot.idle_threads,
101 snapshot.tasks_per_second
102 );
103 }
104}