pub struct Runtime { /* private fields */ }Implementations§
Source§impl Runtime
impl Runtime
Sourcepub fn new() -> Self
pub fn new() -> Self
Create a new runtime instance
Examples found in repository?
More examples
examples/timeout_demo.rs (line 15)
14fn main() {
15 let rt = Runtime::new();
16
17 rt.block_on(async {
18 // This will timeout
19 match timeout(Duration::from_secs(1), slow_operation()).await {
20 Ok(val) => println!("Slow operation completed: {}", val),
21 Err(_) => println!("Slow operation timed out!"),
22 }
23
24 // This will succeed
25 match timeout(Duration::from_secs(1), fast_operation()).await {
26 Ok(val) => println!("Fast operation completed: {}", val),
27 Err(_) => println!("Fast operation timed out!"),
28 }
29 });
30}examples/parallel_tasks.rs (line 5)
4fn main() {
5 let rt = Runtime::new();
6
7 rt.block_on(async move {
8 println!("Spawning 100 concurrent tasks...");
9
10 let mut handles = vec![];
11
12 for i in 0..100 {
13 let handle = rt.spawn_with_handle(async move {
14 avila_async::sleep(Duration::from_millis(10)).await;
15 i * i
16 });
17 handles.push(handle);
18 }
19
20 println!("Waiting for all tasks to complete...");
21
22 let mut sum = 0;
23 for handle in handles {
24 if let Some(result) = handle.await_result().await {
25 sum += result;
26 }
27 }
28
29 println!("Sum of squares from 0 to 99: {}", sum);
30 println!("Active tasks: {}", rt.task_count());
31 });
32}examples/industry40_tracing.rs (line 41)
40fn main() {
41 let rt = Runtime::new();
42
43 println!("🔍 Distributed Tracing Demo - Industry 4.0");
44 println!("=========================================\n");
45
46 rt.block_on(async move {
47 let ctx = TraceContext::new("order-processing-service");
48
49 println!("Trace ID: {:016x}", ctx.trace_id);
50 println!("Starting order processing...\n");
51
52 // Process multiple batches
53 let batch1 = vec![1001, 1002, 1003];
54 let batch2 = vec![2001, 2002];
55
56 process_batch(&ctx, 1, batch1).await;
57 println!();
58 process_batch(&ctx, 2, batch2).await;
59 println!();
60
61 // Export trace data
62 println!("📤 Jaeger Trace Export");
63 println!("=====================");
64 println!("{}", rt.tracer().to_jaeger_json());
65 });
66}examples/channel_demo.rs (line 5)
4fn main() {
5 let rt = Runtime::new();
6
7 rt.block_on(async move {
8 let (tx, rx) = channel::bounded::<String>(10);
9
10 // Spawn producer task
11 rt.spawn({
12 let tx = tx.clone();
13 async move {
14 for i in 0..5 {
15 let msg = format!("Message {}", i);
16 println!("Sending: {}", msg);
17 tx.send(msg).await.unwrap();
18 avila_async::sleep(Duration::from_millis(500)).await;
19 }
20 }
21 });
22
23 // Spawn another producer
24 rt.spawn({
25 async move {
26 for i in 0..5 {
27 let msg = format!("Urgent {}", i);
28 println!("Sending: {}", msg);
29 tx.send(msg).await.unwrap();
30 avila_async::sleep(Duration::from_millis(300)).await;
31 }
32 }
33 });
34
35 // Receive messages
36 let mut count = 0;
37 while let Some(msg) = rx.recv().await {
38 println!("Received: {}", msg);
39 count += 1;
40 if count >= 10 {
41 break;
42 }
43 }
44
45 println!("All messages received!");
46 });
47}Sourcepub fn with_config(config: RuntimeConfig) -> Self
pub fn with_config(config: RuntimeConfig) -> Self
Create runtime with custom configuration
Examples found in repository?
examples/industry40_metrics.rs (line 12)
4fn main() {
5 // Create runtime with Industry 4.0 features
6 let config = RuntimeConfig {
7 num_threads: Some(4),
8 enable_autoscaling: false,
9 ..Default::default()
10 };
11
12 let rt = Runtime::with_config(config);
13
14 println!("🏭 Industry 4.0 Metrics Dashboard");
15 println!("================================\n");
16
17 rt.block_on(async move {
18 // Spawn multiple tasks to generate metrics
19 for i in 0..20 {
20 rt.spawn(async move {
21 avila_async::sleep(Duration::from_millis(50 * (i % 5) as u64)).await;
22 // Simulate work
23 });
24 }
25
26 // Monitor metrics in real-time
27 for iteration in 0..5 {
28 avila_async::sleep(Duration::from_millis(200)).await;
29
30 let snapshot = rt.metrics().snapshot();
31 let health = rt.health().get_report();
32
33 println!("📊 Iteration {}", iteration + 1);
34 println!(" {}", snapshot);
35 println!(" Health: {} | Ready: {} | Alive: {}",
36 health.status, health.ready, health.alive);
37 println!();
38 }
39
40 // Wait for all tasks to complete
41 while rt.task_count() > 0 {
42 avila_async::sleep(Duration::from_millis(50)).await;
43 }
44
45 println!("📈 Final Metrics Report");
46 println!("=====================");
47 let final_snapshot = rt.metrics().snapshot();
48 println!("{}", final_snapshot);
49 println!();
50
51 println!("🏥 Health Check Report");
52 println!("====================");
53 let health_report = rt.health().get_report();
54 println!("{}", health_report);
55 println!();
56
57 println!("📤 Prometheus Export");
58 println!("===================");
59 println!("{}", rt.metrics().to_prometheus());
60 });
61}More examples
examples/industry40_health.rs (line 21)
4fn main() {
5 let scaling_config = ScalingConfig {
6 min_threads: 2,
7 max_threads: 8,
8 target_queue_length: 50,
9 scale_up_threshold: 0.7,
10 scale_down_threshold: 0.3,
11 cooldown_period: Duration::from_secs(2),
12 };
13
14 let config = RuntimeConfig {
15 num_threads: Some(4),
16 enable_autoscaling: true,
17 scaling_config,
18 ..Default::default()
19 };
20
21 let rt = Runtime::with_config(config);
22
23 println!("🏥 Health Monitoring System - Industry 4.0");
24 println!("=========================================\n");
25
26 rt.block_on(async move {
27 // Simulate various workload conditions
28 println!("📊 Phase 1: Normal Operation");
29 println!("---------------------------");
30
31 for i in 0..10 {
32 rt.spawn(async move {
33 avila_async::sleep(Duration::from_millis(100)).await;
34 });
35 }
36
37 avila_async::sleep(Duration::from_millis(500)).await;
38 print_health_status(&rt);
39
40 println!("\n📊 Phase 2: High Load");
41 println!("--------------------");
42
43 for i in 0..50 {
44 rt.spawn(async move {
45 avila_async::sleep(Duration::from_millis(200)).await;
46 });
47 }
48
49 avila_async::sleep(Duration::from_millis(300)).await;
50 print_health_status(&rt);
51
52 // Simulate a degraded state
53 println!("\n⚠️ Phase 3: Degraded Service");
54 println!("---------------------------");
55
56 rt.health().add_check(
57 "database_connection",
58 HealthStatus::Degraded,
59 "Connection pool at 85% capacity"
60 );
61
62 rt.health().add_check(
63 "cache_latency",
64 HealthStatus::Degraded,
65 "Cache response time > 100ms"
66 );
67
68 print_health_status(&rt);
69
70 // Simulate recovery
71 println!("\n✅ Phase 4: Recovery");
72 println!("------------------");
73
74 rt.health().clear_checks();
75
76 // Wait for queue to drain
77 while rt.task_count() > 0 {
78 avila_async::sleep(Duration::from_millis(100)).await;
79 }
80
81 print_health_status(&rt);
82
83 println!("\n📤 Health Check JSON Export");
84 println!("==========================");
85 let report = rt.health().get_report();
86 println!("{}", report.to_json());
87 });
88}examples/industry40_autoscale.rs (line 27)
4fn main() {
5 let scaling_config = ScalingConfig {
6 min_threads: 2,
7 max_threads: 12,
8 target_queue_length: 100,
9 scale_up_threshold: 0.75,
10 scale_down_threshold: 0.25,
11 cooldown_period: Duration::from_secs(3),
12 };
13
14 let resource_limits = ResourceLimits {
15 max_queue_size: Some(500),
16 max_task_duration: Some(Duration::from_secs(60)),
17 ..Default::default()
18 };
19
20 let config = RuntimeConfig {
21 num_threads: Some(4),
22 enable_autoscaling: true,
23 scaling_config,
24 resource_limits,
25 };
26
27 let rt = Runtime::with_config(config);
28
29 println!("⚙️ Auto-Scaling Demonstration - Industry 4.0");
30 println!("============================================\n");
31
32 rt.block_on(async move {
33 println!("📊 Monitoring workload and scaling decisions...\n");
34
35 // Phase 1: Light load
36 println!("Phase 1: Light Load (10 tasks)");
37 println!("-------------------------------");
38 spawn_tasks(&rt, 10, Duration::from_millis(100));
39 monitor_for_seconds(&rt, 2).await;
40
41 // Phase 2: Medium load
42 println!("\nPhase 2: Medium Load (50 tasks)");
43 println!("--------------------------------");
44 spawn_tasks(&rt, 50, Duration::from_millis(150));
45 monitor_for_seconds(&rt, 3).await;
46
47 // Phase 3: Heavy load
48 println!("\nPhase 3: Heavy Load (200 tasks)");
49 println!("--------------------------------");
50 spawn_tasks(&rt, 200, Duration::from_millis(100));
51 monitor_for_seconds(&rt, 4).await;
52
53 // Phase 4: Cool down
54 println!("\nPhase 4: Cool Down");
55 println!("------------------");
56 monitor_for_seconds(&rt, 5).await;
57
58 println!("\n📈 Final Performance Report");
59 println!("==========================");
60 let final_metrics = rt.metrics().snapshot();
61 println!("Total tasks spawned: {}", final_metrics.tasks_spawned);
62 println!("Total tasks completed: {}", final_metrics.tasks_completed);
63 println!("Tasks failed: {}", final_metrics.tasks_failed);
64 println!("Peak queue length: {}", final_metrics.max_queue_length);
65 println!("Average execution time: {:?}", final_metrics.avg_execution_time);
66 println!("P95 execution time: {:?}", final_metrics.p95_execution_time);
67 println!("P99 execution time: {:?}", final_metrics.p99_execution_time);
68 println!("Final throughput: {} tasks/sec", final_metrics.tasks_per_second);
69 });
70}Sourcepub fn metrics(&self) -> &Metrics
pub fn metrics(&self) -> &Metrics
Get metrics collector
Examples found in repository?
examples/industry40_health.rs (line 91)
90fn print_health_status(rt: &Runtime) {
91 let metrics = rt.metrics().snapshot();
92 let health = rt.health().get_report();
93
94 println!("Status: {} | Ready: {} | Alive: {}",
95 health.status, health.ready, health.alive);
96 println!("Tasks: {} active | Queue: {} items",
97 metrics.tasks_spawned - metrics.tasks_completed,
98 metrics.queue_length);
99 println!("Threads: {} active, {} idle",
100 metrics.active_threads, metrics.idle_threads);
101
102 if !health.checks.is_empty() {
103 println!("Health Checks:");
104 for check in &health.checks {
105 println!(" - {} [{}]: {}", check.name, check.status, check.message);
106 }
107 }
108}More examples
examples/industry40_metrics.rs (line 30)
4fn main() {
5 // Create runtime with Industry 4.0 features
6 let config = RuntimeConfig {
7 num_threads: Some(4),
8 enable_autoscaling: false,
9 ..Default::default()
10 };
11
12 let rt = Runtime::with_config(config);
13
14 println!("🏭 Industry 4.0 Metrics Dashboard");
15 println!("================================\n");
16
17 rt.block_on(async move {
18 // Spawn multiple tasks to generate metrics
19 for i in 0..20 {
20 rt.spawn(async move {
21 avila_async::sleep(Duration::from_millis(50 * (i % 5) as u64)).await;
22 // Simulate work
23 });
24 }
25
26 // Monitor metrics in real-time
27 for iteration in 0..5 {
28 avila_async::sleep(Duration::from_millis(200)).await;
29
30 let snapshot = rt.metrics().snapshot();
31 let health = rt.health().get_report();
32
33 println!("📊 Iteration {}", iteration + 1);
34 println!(" {}", snapshot);
35 println!(" Health: {} | Ready: {} | Alive: {}",
36 health.status, health.ready, health.alive);
37 println!();
38 }
39
40 // Wait for all tasks to complete
41 while rt.task_count() > 0 {
42 avila_async::sleep(Duration::from_millis(50)).await;
43 }
44
45 println!("📈 Final Metrics Report");
46 println!("=====================");
47 let final_snapshot = rt.metrics().snapshot();
48 println!("{}", final_snapshot);
49 println!();
50
51 println!("🏥 Health Check Report");
52 println!("====================");
53 let health_report = rt.health().get_report();
54 println!("{}", health_report);
55 println!();
56
57 println!("📤 Prometheus Export");
58 println!("===================");
59 println!("{}", rt.metrics().to_prometheus());
60 });
61}examples/industry40_autoscale.rs (line 60)
4fn main() {
5 let scaling_config = ScalingConfig {
6 min_threads: 2,
7 max_threads: 12,
8 target_queue_length: 100,
9 scale_up_threshold: 0.75,
10 scale_down_threshold: 0.25,
11 cooldown_period: Duration::from_secs(3),
12 };
13
14 let resource_limits = ResourceLimits {
15 max_queue_size: Some(500),
16 max_task_duration: Some(Duration::from_secs(60)),
17 ..Default::default()
18 };
19
20 let config = RuntimeConfig {
21 num_threads: Some(4),
22 enable_autoscaling: true,
23 scaling_config,
24 resource_limits,
25 };
26
27 let rt = Runtime::with_config(config);
28
29 println!("⚙️ Auto-Scaling Demonstration - Industry 4.0");
30 println!("============================================\n");
31
32 rt.block_on(async move {
33 println!("📊 Monitoring workload and scaling decisions...\n");
34
35 // Phase 1: Light load
36 println!("Phase 1: Light Load (10 tasks)");
37 println!("-------------------------------");
38 spawn_tasks(&rt, 10, Duration::from_millis(100));
39 monitor_for_seconds(&rt, 2).await;
40
41 // Phase 2: Medium load
42 println!("\nPhase 2: Medium Load (50 tasks)");
43 println!("--------------------------------");
44 spawn_tasks(&rt, 50, Duration::from_millis(150));
45 monitor_for_seconds(&rt, 3).await;
46
47 // Phase 3: Heavy load
48 println!("\nPhase 3: Heavy Load (200 tasks)");
49 println!("--------------------------------");
50 spawn_tasks(&rt, 200, Duration::from_millis(100));
51 monitor_for_seconds(&rt, 4).await;
52
53 // Phase 4: Cool down
54 println!("\nPhase 4: Cool Down");
55 println!("------------------");
56 monitor_for_seconds(&rt, 5).await;
57
58 println!("\n📈 Final Performance Report");
59 println!("==========================");
60 let final_metrics = rt.metrics().snapshot();
61 println!("Total tasks spawned: {}", final_metrics.tasks_spawned);
62 println!("Total tasks completed: {}", final_metrics.tasks_completed);
63 println!("Tasks failed: {}", final_metrics.tasks_failed);
64 println!("Peak queue length: {}", final_metrics.max_queue_length);
65 println!("Average execution time: {:?}", final_metrics.avg_execution_time);
66 println!("P95 execution time: {:?}", final_metrics.p95_execution_time);
67 println!("P99 execution time: {:?}", final_metrics.p99_execution_time);
68 println!("Final throughput: {} tasks/sec", final_metrics.tasks_per_second);
69 });
70}
71
72fn spawn_tasks(rt: &Runtime, count: usize, delay: Duration) {
73 for i in 0..count {
74 rt.spawn(async move {
75 avila_async::sleep(delay).await;
76 // Simulate CPU work
77 let mut sum = 0u64;
78 for j in 0..1000 {
79 sum = sum.wrapping_add(j);
80 }
81 });
82 }
83}
84
85async fn monitor_for_seconds(rt: &Runtime, seconds: u64) {
86 let end_time = std::time::Instant::now() + Duration::from_secs(seconds);
87
88 while std::time::Instant::now() < end_time {
89 avila_async::sleep(Duration::from_millis(500)).await;
90
91 let snapshot = rt.metrics().snapshot();
92 let active_tasks = snapshot.tasks_spawned - snapshot.tasks_completed;
93
94 println!(
95 " [{}s] Tasks: {} active, {} queued | Threads: {} active, {} idle | TPS: {}",
96 (seconds - (end_time - std::time::Instant::now()).as_secs()),
97 active_tasks,
98 snapshot.queue_length,
99 snapshot.active_threads,
100 snapshot.idle_threads,
101 snapshot.tasks_per_second
102 );
103 }
104}Sourcepub fn health(&self) -> &HealthCheck
pub fn health(&self) -> &HealthCheck
Get health checker
Examples found in repository?
examples/industry40_metrics.rs (line 31)
4fn main() {
5 // Create runtime with Industry 4.0 features
6 let config = RuntimeConfig {
7 num_threads: Some(4),
8 enable_autoscaling: false,
9 ..Default::default()
10 };
11
12 let rt = Runtime::with_config(config);
13
14 println!("🏭 Industry 4.0 Metrics Dashboard");
15 println!("================================\n");
16
17 rt.block_on(async move {
18 // Spawn multiple tasks to generate metrics
19 for i in 0..20 {
20 rt.spawn(async move {
21 avila_async::sleep(Duration::from_millis(50 * (i % 5) as u64)).await;
22 // Simulate work
23 });
24 }
25
26 // Monitor metrics in real-time
27 for iteration in 0..5 {
28 avila_async::sleep(Duration::from_millis(200)).await;
29
30 let snapshot = rt.metrics().snapshot();
31 let health = rt.health().get_report();
32
33 println!("📊 Iteration {}", iteration + 1);
34 println!(" {}", snapshot);
35 println!(" Health: {} | Ready: {} | Alive: {}",
36 health.status, health.ready, health.alive);
37 println!();
38 }
39
40 // Wait for all tasks to complete
41 while rt.task_count() > 0 {
42 avila_async::sleep(Duration::from_millis(50)).await;
43 }
44
45 println!("📈 Final Metrics Report");
46 println!("=====================");
47 let final_snapshot = rt.metrics().snapshot();
48 println!("{}", final_snapshot);
49 println!();
50
51 println!("🏥 Health Check Report");
52 println!("====================");
53 let health_report = rt.health().get_report();
54 println!("{}", health_report);
55 println!();
56
57 println!("📤 Prometheus Export");
58 println!("===================");
59 println!("{}", rt.metrics().to_prometheus());
60 });
61}More examples
examples/industry40_health.rs (line 56)
4fn main() {
5 let scaling_config = ScalingConfig {
6 min_threads: 2,
7 max_threads: 8,
8 target_queue_length: 50,
9 scale_up_threshold: 0.7,
10 scale_down_threshold: 0.3,
11 cooldown_period: Duration::from_secs(2),
12 };
13
14 let config = RuntimeConfig {
15 num_threads: Some(4),
16 enable_autoscaling: true,
17 scaling_config,
18 ..Default::default()
19 };
20
21 let rt = Runtime::with_config(config);
22
23 println!("🏥 Health Monitoring System - Industry 4.0");
24 println!("=========================================\n");
25
26 rt.block_on(async move {
27 // Simulate various workload conditions
28 println!("📊 Phase 1: Normal Operation");
29 println!("---------------------------");
30
31 for i in 0..10 {
32 rt.spawn(async move {
33 avila_async::sleep(Duration::from_millis(100)).await;
34 });
35 }
36
37 avila_async::sleep(Duration::from_millis(500)).await;
38 print_health_status(&rt);
39
40 println!("\n📊 Phase 2: High Load");
41 println!("--------------------");
42
43 for i in 0..50 {
44 rt.spawn(async move {
45 avila_async::sleep(Duration::from_millis(200)).await;
46 });
47 }
48
49 avila_async::sleep(Duration::from_millis(300)).await;
50 print_health_status(&rt);
51
52 // Simulate a degraded state
53 println!("\n⚠️ Phase 3: Degraded Service");
54 println!("---------------------------");
55
56 rt.health().add_check(
57 "database_connection",
58 HealthStatus::Degraded,
59 "Connection pool at 85% capacity"
60 );
61
62 rt.health().add_check(
63 "cache_latency",
64 HealthStatus::Degraded,
65 "Cache response time > 100ms"
66 );
67
68 print_health_status(&rt);
69
70 // Simulate recovery
71 println!("\n✅ Phase 4: Recovery");
72 println!("------------------");
73
74 rt.health().clear_checks();
75
76 // Wait for queue to drain
77 while rt.task_count() > 0 {
78 avila_async::sleep(Duration::from_millis(100)).await;
79 }
80
81 print_health_status(&rt);
82
83 println!("\n📤 Health Check JSON Export");
84 println!("==========================");
85 let report = rt.health().get_report();
86 println!("{}", report.to_json());
87 });
88}
89
90fn print_health_status(rt: &Runtime) {
91 let metrics = rt.metrics().snapshot();
92 let health = rt.health().get_report();
93
94 println!("Status: {} | Ready: {} | Alive: {}",
95 health.status, health.ready, health.alive);
96 println!("Tasks: {} active | Queue: {} items",
97 metrics.tasks_spawned - metrics.tasks_completed,
98 metrics.queue_length);
99 println!("Threads: {} active, {} idle",
100 metrics.active_threads, metrics.idle_threads);
101
102 if !health.checks.is_empty() {
103 println!("Health Checks:");
104 for check in &health.checks {
105 println!(" - {} [{}]: {}", check.name, check.status, check.message);
106 }
107 }
108}Sourcepub fn tracer(&self) -> &Tracer
pub fn tracer(&self) -> &Tracer
Get tracer
Examples found in repository?
examples/industry40_tracing.rs (line 64)
40fn main() {
41 let rt = Runtime::new();
42
43 println!("🔍 Distributed Tracing Demo - Industry 4.0");
44 println!("=========================================\n");
45
46 rt.block_on(async move {
47 let ctx = TraceContext::new("order-processing-service");
48
49 println!("Trace ID: {:016x}", ctx.trace_id);
50 println!("Starting order processing...\n");
51
52 // Process multiple batches
53 let batch1 = vec![1001, 1002, 1003];
54 let batch2 = vec![2001, 2002];
55
56 process_batch(&ctx, 1, batch1).await;
57 println!();
58 process_batch(&ctx, 2, batch2).await;
59 println!();
60
61 // Export trace data
62 println!("📤 Jaeger Trace Export");
63 println!("=====================");
64 println!("{}", rt.tracer().to_jaeger_json());
65 });
66}Sourcepub fn task_count(&self) -> usize
pub fn task_count(&self) -> usize
Get the number of active tasks
Examples found in repository?
examples/parallel_tasks.rs (line 30)
4fn main() {
5 let rt = Runtime::new();
6
7 rt.block_on(async move {
8 println!("Spawning 100 concurrent tasks...");
9
10 let mut handles = vec![];
11
12 for i in 0..100 {
13 let handle = rt.spawn_with_handle(async move {
14 avila_async::sleep(Duration::from_millis(10)).await;
15 i * i
16 });
17 handles.push(handle);
18 }
19
20 println!("Waiting for all tasks to complete...");
21
22 let mut sum = 0;
23 for handle in handles {
24 if let Some(result) = handle.await_result().await {
25 sum += result;
26 }
27 }
28
29 println!("Sum of squares from 0 to 99: {}", sum);
30 println!("Active tasks: {}", rt.task_count());
31 });
32}More examples
examples/industry40_metrics.rs (line 41)
4fn main() {
5 // Create runtime with Industry 4.0 features
6 let config = RuntimeConfig {
7 num_threads: Some(4),
8 enable_autoscaling: false,
9 ..Default::default()
10 };
11
12 let rt = Runtime::with_config(config);
13
14 println!("🏭 Industry 4.0 Metrics Dashboard");
15 println!("================================\n");
16
17 rt.block_on(async move {
18 // Spawn multiple tasks to generate metrics
19 for i in 0..20 {
20 rt.spawn(async move {
21 avila_async::sleep(Duration::from_millis(50 * (i % 5) as u64)).await;
22 // Simulate work
23 });
24 }
25
26 // Monitor metrics in real-time
27 for iteration in 0..5 {
28 avila_async::sleep(Duration::from_millis(200)).await;
29
30 let snapshot = rt.metrics().snapshot();
31 let health = rt.health().get_report();
32
33 println!("📊 Iteration {}", iteration + 1);
34 println!(" {}", snapshot);
35 println!(" Health: {} | Ready: {} | Alive: {}",
36 health.status, health.ready, health.alive);
37 println!();
38 }
39
40 // Wait for all tasks to complete
41 while rt.task_count() > 0 {
42 avila_async::sleep(Duration::from_millis(50)).await;
43 }
44
45 println!("📈 Final Metrics Report");
46 println!("=====================");
47 let final_snapshot = rt.metrics().snapshot();
48 println!("{}", final_snapshot);
49 println!();
50
51 println!("🏥 Health Check Report");
52 println!("====================");
53 let health_report = rt.health().get_report();
54 println!("{}", health_report);
55 println!();
56
57 println!("📤 Prometheus Export");
58 println!("===================");
59 println!("{}", rt.metrics().to_prometheus());
60 });
61}examples/industry40_health.rs (line 77)
4fn main() {
5 let scaling_config = ScalingConfig {
6 min_threads: 2,
7 max_threads: 8,
8 target_queue_length: 50,
9 scale_up_threshold: 0.7,
10 scale_down_threshold: 0.3,
11 cooldown_period: Duration::from_secs(2),
12 };
13
14 let config = RuntimeConfig {
15 num_threads: Some(4),
16 enable_autoscaling: true,
17 scaling_config,
18 ..Default::default()
19 };
20
21 let rt = Runtime::with_config(config);
22
23 println!("🏥 Health Monitoring System - Industry 4.0");
24 println!("=========================================\n");
25
26 rt.block_on(async move {
27 // Simulate various workload conditions
28 println!("📊 Phase 1: Normal Operation");
29 println!("---------------------------");
30
31 for i in 0..10 {
32 rt.spawn(async move {
33 avila_async::sleep(Duration::from_millis(100)).await;
34 });
35 }
36
37 avila_async::sleep(Duration::from_millis(500)).await;
38 print_health_status(&rt);
39
40 println!("\n📊 Phase 2: High Load");
41 println!("--------------------");
42
43 for i in 0..50 {
44 rt.spawn(async move {
45 avila_async::sleep(Duration::from_millis(200)).await;
46 });
47 }
48
49 avila_async::sleep(Duration::from_millis(300)).await;
50 print_health_status(&rt);
51
52 // Simulate a degraded state
53 println!("\n⚠️ Phase 3: Degraded Service");
54 println!("---------------------------");
55
56 rt.health().add_check(
57 "database_connection",
58 HealthStatus::Degraded,
59 "Connection pool at 85% capacity"
60 );
61
62 rt.health().add_check(
63 "cache_latency",
64 HealthStatus::Degraded,
65 "Cache response time > 100ms"
66 );
67
68 print_health_status(&rt);
69
70 // Simulate recovery
71 println!("\n✅ Phase 4: Recovery");
72 println!("------------------");
73
74 rt.health().clear_checks();
75
76 // Wait for queue to drain
77 while rt.task_count() > 0 {
78 avila_async::sleep(Duration::from_millis(100)).await;
79 }
80
81 print_health_status(&rt);
82
83 println!("\n📤 Health Check JSON Export");
84 println!("==========================");
85 let report = rt.health().get_report();
86 println!("{}", report.to_json());
87 });
88}Sourcepub fn spawn<F>(&self, future: F)
pub fn spawn<F>(&self, future: F)
Spawn a future onto the runtime
Examples found in repository?
examples/industry40_autoscale.rs (lines 74-81)
72fn spawn_tasks(rt: &Runtime, count: usize, delay: Duration) {
73 for i in 0..count {
74 rt.spawn(async move {
75 avila_async::sleep(delay).await;
76 // Simulate CPU work
77 let mut sum = 0u64;
78 for j in 0..1000 {
79 sum = sum.wrapping_add(j);
80 }
81 });
82 }
83}More examples
examples/channel_demo.rs (lines 11-21)
4fn main() {
5 let rt = Runtime::new();
6
7 rt.block_on(async move {
8 let (tx, rx) = channel::bounded::<String>(10);
9
10 // Spawn producer task
11 rt.spawn({
12 let tx = tx.clone();
13 async move {
14 for i in 0..5 {
15 let msg = format!("Message {}", i);
16 println!("Sending: {}", msg);
17 tx.send(msg).await.unwrap();
18 avila_async::sleep(Duration::from_millis(500)).await;
19 }
20 }
21 });
22
23 // Spawn another producer
24 rt.spawn({
25 async move {
26 for i in 0..5 {
27 let msg = format!("Urgent {}", i);
28 println!("Sending: {}", msg);
29 tx.send(msg).await.unwrap();
30 avila_async::sleep(Duration::from_millis(300)).await;
31 }
32 }
33 });
34
35 // Receive messages
36 let mut count = 0;
37 while let Some(msg) = rx.recv().await {
38 println!("Received: {}", msg);
39 count += 1;
40 if count >= 10 {
41 break;
42 }
43 }
44
45 println!("All messages received!");
46 });
47}examples/industry40_metrics.rs (lines 20-23)
4fn main() {
5 // Create runtime with Industry 4.0 features
6 let config = RuntimeConfig {
7 num_threads: Some(4),
8 enable_autoscaling: false,
9 ..Default::default()
10 };
11
12 let rt = Runtime::with_config(config);
13
14 println!("🏭 Industry 4.0 Metrics Dashboard");
15 println!("================================\n");
16
17 rt.block_on(async move {
18 // Spawn multiple tasks to generate metrics
19 for i in 0..20 {
20 rt.spawn(async move {
21 avila_async::sleep(Duration::from_millis(50 * (i % 5) as u64)).await;
22 // Simulate work
23 });
24 }
25
26 // Monitor metrics in real-time
27 for iteration in 0..5 {
28 avila_async::sleep(Duration::from_millis(200)).await;
29
30 let snapshot = rt.metrics().snapshot();
31 let health = rt.health().get_report();
32
33 println!("📊 Iteration {}", iteration + 1);
34 println!(" {}", snapshot);
35 println!(" Health: {} | Ready: {} | Alive: {}",
36 health.status, health.ready, health.alive);
37 println!();
38 }
39
40 // Wait for all tasks to complete
41 while rt.task_count() > 0 {
42 avila_async::sleep(Duration::from_millis(50)).await;
43 }
44
45 println!("📈 Final Metrics Report");
46 println!("=====================");
47 let final_snapshot = rt.metrics().snapshot();
48 println!("{}", final_snapshot);
49 println!();
50
51 println!("🏥 Health Check Report");
52 println!("====================");
53 let health_report = rt.health().get_report();
54 println!("{}", health_report);
55 println!();
56
57 println!("📤 Prometheus Export");
58 println!("===================");
59 println!("{}", rt.metrics().to_prometheus());
60 });
61}examples/industry40_health.rs (lines 32-34)
4fn main() {
5 let scaling_config = ScalingConfig {
6 min_threads: 2,
7 max_threads: 8,
8 target_queue_length: 50,
9 scale_up_threshold: 0.7,
10 scale_down_threshold: 0.3,
11 cooldown_period: Duration::from_secs(2),
12 };
13
14 let config = RuntimeConfig {
15 num_threads: Some(4),
16 enable_autoscaling: true,
17 scaling_config,
18 ..Default::default()
19 };
20
21 let rt = Runtime::with_config(config);
22
23 println!("🏥 Health Monitoring System - Industry 4.0");
24 println!("=========================================\n");
25
26 rt.block_on(async move {
27 // Simulate various workload conditions
28 println!("📊 Phase 1: Normal Operation");
29 println!("---------------------------");
30
31 for i in 0..10 {
32 rt.spawn(async move {
33 avila_async::sleep(Duration::from_millis(100)).await;
34 });
35 }
36
37 avila_async::sleep(Duration::from_millis(500)).await;
38 print_health_status(&rt);
39
40 println!("\n📊 Phase 2: High Load");
41 println!("--------------------");
42
43 for i in 0..50 {
44 rt.spawn(async move {
45 avila_async::sleep(Duration::from_millis(200)).await;
46 });
47 }
48
49 avila_async::sleep(Duration::from_millis(300)).await;
50 print_health_status(&rt);
51
52 // Simulate a degraded state
53 println!("\n⚠️ Phase 3: Degraded Service");
54 println!("---------------------------");
55
56 rt.health().add_check(
57 "database_connection",
58 HealthStatus::Degraded,
59 "Connection pool at 85% capacity"
60 );
61
62 rt.health().add_check(
63 "cache_latency",
64 HealthStatus::Degraded,
65 "Cache response time > 100ms"
66 );
67
68 print_health_status(&rt);
69
70 // Simulate recovery
71 println!("\n✅ Phase 4: Recovery");
72 println!("------------------");
73
74 rt.health().clear_checks();
75
76 // Wait for queue to drain
77 while rt.task_count() > 0 {
78 avila_async::sleep(Duration::from_millis(100)).await;
79 }
80
81 print_health_status(&rt);
82
83 println!("\n📤 Health Check JSON Export");
84 println!("==========================");
85 let report = rt.health().get_report();
86 println!("{}", report.to_json());
87 });
88}Sourcepub fn spawn_with_handle<F, T>(&self, future: F) -> JoinHandle<T>
pub fn spawn_with_handle<F, T>(&self, future: F) -> JoinHandle<T>
Spawn a future and return a handle to await its result
Examples found in repository?
examples/parallel_tasks.rs (lines 13-16)
4fn main() {
5 let rt = Runtime::new();
6
7 rt.block_on(async move {
8 println!("Spawning 100 concurrent tasks...");
9
10 let mut handles = vec![];
11
12 for i in 0..100 {
13 let handle = rt.spawn_with_handle(async move {
14 avila_async::sleep(Duration::from_millis(10)).await;
15 i * i
16 });
17 handles.push(handle);
18 }
19
20 println!("Waiting for all tasks to complete...");
21
22 let mut sum = 0;
23 for handle in handles {
24 if let Some(result) = handle.await_result().await {
25 sum += result;
26 }
27 }
28
29 println!("Sum of squares from 0 to 99: {}", sum);
30 println!("Active tasks: {}", rt.task_count());
31 });
32}Sourcepub fn block_on<F, T>(&self, future: F) -> T
pub fn block_on<F, T>(&self, future: F) -> T
Examples found in repository?
More examples
examples/timeout_demo.rs (lines 17-29)
14fn main() {
15 let rt = Runtime::new();
16
17 rt.block_on(async {
18 // This will timeout
19 match timeout(Duration::from_secs(1), slow_operation()).await {
20 Ok(val) => println!("Slow operation completed: {}", val),
21 Err(_) => println!("Slow operation timed out!"),
22 }
23
24 // This will succeed
25 match timeout(Duration::from_secs(1), fast_operation()).await {
26 Ok(val) => println!("Fast operation completed: {}", val),
27 Err(_) => println!("Fast operation timed out!"),
28 }
29 });
30}examples/parallel_tasks.rs (lines 7-31)
4fn main() {
5 let rt = Runtime::new();
6
7 rt.block_on(async move {
8 println!("Spawning 100 concurrent tasks...");
9
10 let mut handles = vec![];
11
12 for i in 0..100 {
13 let handle = rt.spawn_with_handle(async move {
14 avila_async::sleep(Duration::from_millis(10)).await;
15 i * i
16 });
17 handles.push(handle);
18 }
19
20 println!("Waiting for all tasks to complete...");
21
22 let mut sum = 0;
23 for handle in handles {
24 if let Some(result) = handle.await_result().await {
25 sum += result;
26 }
27 }
28
29 println!("Sum of squares from 0 to 99: {}", sum);
30 println!("Active tasks: {}", rt.task_count());
31 });
32}examples/industry40_tracing.rs (lines 46-65)
40fn main() {
41 let rt = Runtime::new();
42
43 println!("🔍 Distributed Tracing Demo - Industry 4.0");
44 println!("=========================================\n");
45
46 rt.block_on(async move {
47 let ctx = TraceContext::new("order-processing-service");
48
49 println!("Trace ID: {:016x}", ctx.trace_id);
50 println!("Starting order processing...\n");
51
52 // Process multiple batches
53 let batch1 = vec![1001, 1002, 1003];
54 let batch2 = vec![2001, 2002];
55
56 process_batch(&ctx, 1, batch1).await;
57 println!();
58 process_batch(&ctx, 2, batch2).await;
59 println!();
60
61 // Export trace data
62 println!("📤 Jaeger Trace Export");
63 println!("=====================");
64 println!("{}", rt.tracer().to_jaeger_json());
65 });
66}examples/channel_demo.rs (lines 7-46)
4fn main() {
5 let rt = Runtime::new();
6
7 rt.block_on(async move {
8 let (tx, rx) = channel::bounded::<String>(10);
9
10 // Spawn producer task
11 rt.spawn({
12 let tx = tx.clone();
13 async move {
14 for i in 0..5 {
15 let msg = format!("Message {}", i);
16 println!("Sending: {}", msg);
17 tx.send(msg).await.unwrap();
18 avila_async::sleep(Duration::from_millis(500)).await;
19 }
20 }
21 });
22
23 // Spawn another producer
24 rt.spawn({
25 async move {
26 for i in 0..5 {
27 let msg = format!("Urgent {}", i);
28 println!("Sending: {}", msg);
29 tx.send(msg).await.unwrap();
30 avila_async::sleep(Duration::from_millis(300)).await;
31 }
32 }
33 });
34
35 // Receive messages
36 let mut count = 0;
37 while let Some(msg) = rx.recv().await {
38 println!("Received: {}", msg);
39 count += 1;
40 if count >= 10 {
41 break;
42 }
43 }
44
45 println!("All messages received!");
46 });
47}examples/industry40_metrics.rs (lines 17-60)
4fn main() {
5 // Create runtime with Industry 4.0 features
6 let config = RuntimeConfig {
7 num_threads: Some(4),
8 enable_autoscaling: false,
9 ..Default::default()
10 };
11
12 let rt = Runtime::with_config(config);
13
14 println!("🏭 Industry 4.0 Metrics Dashboard");
15 println!("================================\n");
16
17 rt.block_on(async move {
18 // Spawn multiple tasks to generate metrics
19 for i in 0..20 {
20 rt.spawn(async move {
21 avila_async::sleep(Duration::from_millis(50 * (i % 5) as u64)).await;
22 // Simulate work
23 });
24 }
25
26 // Monitor metrics in real-time
27 for iteration in 0..5 {
28 avila_async::sleep(Duration::from_millis(200)).await;
29
30 let snapshot = rt.metrics().snapshot();
31 let health = rt.health().get_report();
32
33 println!("📊 Iteration {}", iteration + 1);
34 println!(" {}", snapshot);
35 println!(" Health: {} | Ready: {} | Alive: {}",
36 health.status, health.ready, health.alive);
37 println!();
38 }
39
40 // Wait for all tasks to complete
41 while rt.task_count() > 0 {
42 avila_async::sleep(Duration::from_millis(50)).await;
43 }
44
45 println!("📈 Final Metrics Report");
46 println!("=====================");
47 let final_snapshot = rt.metrics().snapshot();
48 println!("{}", final_snapshot);
49 println!();
50
51 println!("🏥 Health Check Report");
52 println!("====================");
53 let health_report = rt.health().get_report();
54 println!("{}", health_report);
55 println!();
56
57 println!("📤 Prometheus Export");
58 println!("===================");
59 println!("{}", rt.metrics().to_prometheus());
60 });
61}Additional examples can be found in:
Trait Implementations§
Auto Trait Implementations§
impl Freeze for Runtime
impl RefUnwindSafe for Runtime
impl Send for Runtime
impl Sync for Runtime
impl Unpin for Runtime
impl UnwindSafe for Runtime
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more