print_timing_summary

Function print_timing_summary 

Source
pub fn print_timing_summary()
Expand description

Print timing summary

Examples found in repository?
examples/basic.rs (line 150)
28fn main() {
29    let args: Vec<String> = std::env::args().collect();
30    let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic");
31    
32    let mut scenario_filter: Option<String> = None;
33    let mut enable_comparison = false;
34    let mut show_timing = false;
35    let mut i = 1;
36    
37    // Parse arguments
38    while i < args.len() {
39        let arg = &args[i];
40        
41        if arg == "-h" || arg == "--help" {
42            print_help(program_name);
43            return;
44        } else if arg == "--compare" {
45            enable_comparison = true;
46        } else if arg == "--timing" {
47            show_timing = true;
48        } else if !arg.starts_with('-') {
49            scenario_filter = Some(arg.clone());
50        } else {
51            eprintln!("Error: unknown option '{}'", arg);
52            print_help(program_name);
53            return;
54        }
55        
56        i += 1;
57    }
58    
59    println!("\nšŸš€ JSON Evaluation - Basic Example (JSON Schema)\n");
60    
61    if enable_comparison {
62        println!("šŸ” Comparison: enabled");
63    }
64    if show_timing {
65        println!("ā±ļø  Internal timing: enabled");
66    }
67    if enable_comparison || show_timing {
68        println!();
69    }
70    
71    let samples_dir = Path::new("samples");
72    let mut scenarios = common::discover_scenarios(samples_dir);
73    
74    // Filter out MessagePack scenarios - only use JSON
75    scenarios.retain(|s| !s.is_msgpack);
76    
77    // Filter scenarios if a filter is provided
78    if let Some(ref filter) = scenario_filter {
79        scenarios.retain(|s| s.name.contains(filter));
80        println!("šŸ“‹ Filtering scenarios matching: '{}'\n", filter);
81    }
82
83    if scenarios.is_empty() {
84        if let Some(filter) = scenario_filter {
85            println!(
86                "ā„¹ļø  No scenarios found matching '{}' in `{}`.",
87                filter,
88                samples_dir.display()
89            );
90        } else {
91            println!(
92                "ā„¹ļø  No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
93                samples_dir.display()
94            );
95        }
96        return;
97    }
98    
99    println!("šŸ“Š Found {} scenario(s)\n", scenarios.len());
100
101    let mut total_parse_time = std::time::Duration::ZERO;
102    let mut total_eval_time = std::time::Duration::ZERO;
103    let mut successful_scenarios = 0;
104    let mut comparison_failures = 0;
105
106    for scenario in &scenarios {
107        println!("==============================");
108        println!("Scenario: {}", scenario.name);
109        println!("Schema: {} ({})", 
110            scenario.schema_path.display(),
111            if scenario.is_msgpack { "MessagePack" } else { "JSON" }
112        );
113        println!("Data: {}\n", scenario.data_path.display());
114
115        // Clear timing data from previous scenarios
116        if show_timing {
117            json_eval_rs::enable_timing();
118            json_eval_rs::clear_timing_data();
119        }
120
121        let data_str = fs::read_to_string(&scenario.data_path)
122            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
123
124        // Step 1: Parse schema (JSONEval::new)
125        let parse_start = Instant::now();
126        
127        let schema_str = fs::read_to_string(&scenario.schema_path)
128            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
129        
130        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str))
131            .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e));
132        
133        let parse_time = parse_start.elapsed();
134        println!("  šŸ“ Parse (new): {:?}", parse_time);
135        
136        // Step 2: Evaluate
137        let eval_start = Instant::now();
138        
139        eval.evaluate(&data_str, Some("{}"))
140            .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
141        
142        let evaluated_schema = eval.get_evaluated_schema(false);
143        let eval_time = eval_start.elapsed();
144        
145        println!("  ⚔ Eval: {:?}", eval_time);
146        println!("  ā±ļø  Total: {:?}\n", parse_time + eval_time);
147        
148        // Print detailed timing breakdown if --timing flag is set
149        if show_timing {
150            json_eval_rs::print_timing_summary();
151        }
152        
153        total_parse_time += parse_time;
154        total_eval_time += eval_time;
155        successful_scenarios += 1;
156
157        // Save results
158        let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
159        let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
160
161        fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
162            .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
163
164        let mut metadata_obj = Map::new();
165        metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
166        metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
167        metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
168
169        fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
170            .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
171
172        println!("āœ… Results saved:");
173        println!("  - {}", evaluated_path.display());
174        println!("  - {}\n", parsed_path.display());
175
176        // Optional comparison
177        if enable_comparison {
178            if let Some(comp_path) = &scenario.comparison_path {
179                if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
180                    comparison_failures += 1;
181                }
182                println!();
183            }
184        }
185    }
186    
187    // Print summary
188    println!("{}", "=".repeat(50));
189    println!("šŸ“Š Summary");
190    println!("{}", "=".repeat(50));
191    println!("Total scenarios run: {}", successful_scenarios);
192    println!("Total parse time: {:?}", total_parse_time);
193    println!("Total eval time: {:?}", total_eval_time);
194    println!("Total time: {:?}", total_parse_time + total_eval_time);
195    
196    if successful_scenarios > 1 {
197        println!("\nAverage per scenario:");
198        println!("  Parse: {:?}", total_parse_time / successful_scenarios as u32);
199        println!("  Eval: {:?}", total_eval_time / successful_scenarios as u32);
200    }
201    
202    if enable_comparison {
203        println!("Comparison failures: {}", comparison_failures);
204    }
205    
206    println!("\nāœ… All scenarios completed!\n");
207}
More examples
Hide additional examples
examples/basic_msgpack.rs (line 149)
28fn main() {
29    let args: Vec<String> = std::env::args().collect();
30    let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic_msgpack");
31    
32    let mut scenario_filter: Option<String> = None;
33    let mut enable_comparison = false;
34    let mut show_timing = false;
35    let mut i = 1;
36    
37    // Parse arguments
38    while i < args.len() {
39        let arg = &args[i];
40        
41        if arg == "-h" || arg == "--help" {
42            print_help(program_name);
43            return;
44        } else if arg == "--compare" {
45            enable_comparison = true;
46        } else if arg == "--timing" {
47            show_timing = true;
48        } else if !arg.starts_with('-') {
49            scenario_filter = Some(arg.clone());
50        } else {
51            eprintln!("Error: unknown option '{}'", arg);
52            print_help(program_name);
53            return;
54        }
55        
56        i += 1;
57    }
58    
59    println!("\nšŸš€ JSON Evaluation - Basic Example (MessagePack Schema)\n");
60    
61    if enable_comparison {
62        println!("šŸ” Comparison: enabled");
63    }
64    if show_timing {
65        println!("ā±ļø  Internal timing: enabled");
66    }
67    if enable_comparison || show_timing {
68        println!();
69    }
70    
71    let samples_dir = Path::new("samples");
72    let mut scenarios = common::discover_scenarios(samples_dir);
73    
74    // Filter to only MessagePack scenarios
75    scenarios.retain(|s| s.is_msgpack);
76    
77    // Filter scenarios if a filter is provided
78    if let Some(ref filter) = scenario_filter {
79        scenarios.retain(|s| s.name.contains(filter));
80        println!("šŸ“‹ Filtering scenarios matching: '{}'\n", filter);
81    }
82
83    if scenarios.is_empty() {
84        if let Some(filter) = scenario_filter {
85            println!(
86                "ā„¹ļø  No MessagePack scenarios found matching '{}' in `{}`.",
87                filter,
88                samples_dir.display()
89            );
90        } else {
91            println!(
92                "ā„¹ļø  No MessagePack scenarios discovered in `{}`. Add files like `name.bform` and `name-data.json`.",
93                samples_dir.display()
94            );
95        }
96        return;
97    }
98    
99    println!("šŸ“Š Found {} MessagePack scenario(s)\n", scenarios.len());
100
101    let mut total_parse_time = std::time::Duration::ZERO;
102    let mut total_eval_time = std::time::Duration::ZERO;
103    let mut successful_scenarios = 0;
104    let mut comparison_failures = 0;
105
106    for scenario in &scenarios {
107        println!("==============================");
108        println!("Scenario: {}", scenario.name);
109        println!("Schema: {} (MessagePack)", scenario.schema_path.display());
110        println!("Data: {}\n", scenario.data_path.display());
111
112        // Clear timing data from previous scenarios
113        if show_timing {
114            json_eval_rs::enable_timing();
115            json_eval_rs::clear_timing_data();
116        }
117
118        let data_str = fs::read_to_string(&scenario.data_path)
119            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
120
121        // Step 1: Parse schema (new_from_msgpack)
122        let parse_start = Instant::now();
123        
124        let schema_msgpack = fs::read(&scenario.schema_path)
125            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
126        
127        println!("  šŸ“¦ MessagePack schema size: {} bytes", schema_msgpack.len());
128        
129        let mut eval = JSONEval::new_from_msgpack(&schema_msgpack, None, Some(&data_str))
130            .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e));
131        
132        let parse_time = parse_start.elapsed();
133        println!("  šŸ“ Parse (msgpack): {:?}", parse_time);
134        
135        // Step 2: Evaluate
136        let eval_start = Instant::now();
137        
138        eval.evaluate(&data_str, Some("{}"))
139            .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
140        
141        let evaluated_schema = eval.get_evaluated_schema(false);
142        let eval_time = eval_start.elapsed();
143        
144        println!("  ⚔ Eval: {:?}", eval_time);
145        println!("  ā±ļø  Total: {:?}\n", parse_time + eval_time);
146        
147        // Print detailed timing breakdown if --timing flag is set
148        if show_timing {
149            json_eval_rs::print_timing_summary();
150        }
151        
152        total_parse_time += parse_time;
153        total_eval_time += eval_time;
154        successful_scenarios += 1;
155
156        // Save results
157        let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
158        let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
159
160        fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
161            .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
162
163        let mut metadata_obj = Map::new();
164        metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
165        metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
166        metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
167
168        fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
169            .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
170
171        println!("āœ… Results saved:");
172        println!("  - {}", evaluated_path.display());
173        println!("  - {}\n", parsed_path.display());
174
175        // Optional comparison
176        if enable_comparison {
177            if let Some(comp_path) = &scenario.comparison_path {
178                if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
179                    comparison_failures += 1;
180                }
181                println!();
182            }
183        }
184    }
185    
186    // Print summary
187    println!("{}", "=".repeat(50));
188    println!("šŸ“Š Summary");
189    println!("{}", "=".repeat(50));
190    println!("Total scenarios run: {}", successful_scenarios);
191    println!("Total parse time: {:?}", total_parse_time);
192    println!("Total eval time: {:?}", total_eval_time);
193    println!("Total time: {:?}", total_parse_time + total_eval_time);
194    
195    if successful_scenarios > 1 {
196        println!("\nAverage per scenario:");
197        println!("  Parse: {:?}", total_parse_time / successful_scenarios as u32);
198        println!("  Eval: {:?}", total_eval_time / successful_scenarios as u32);
199    }
200    
201    if enable_comparison {
202        println!("Comparison failures: {}", comparison_failures);
203    }
204    
205    println!("\nāœ… All scenarios completed!\n");
206}
examples/basic_parsed.rs (line 160)
30fn main() {
31    let args: Vec<String> = std::env::args().collect();
32    let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic_parsed");
33    
34    let mut scenario_filter: Option<String> = None;
35    let mut enable_comparison = false;
36    let mut show_timing = false;
37    let mut i = 1;
38    
39    // Parse arguments
40    while i < args.len() {
41        let arg = &args[i];
42        
43        if arg == "-h" || arg == "--help" {
44            print_help(program_name);
45            return;
46        } else if arg == "--compare" {
47            enable_comparison = true;
48        } else if arg == "--timing" {
49            show_timing = true;
50        } else if !arg.starts_with('-') {
51            scenario_filter = Some(arg.clone());
52        } else {
53            eprintln!("Error: unknown option '{}'", arg);
54            print_help(program_name);
55            return;
56        }
57        
58        i += 1;
59    }
60    
61    println!("\nšŸš€ JSON Evaluation - Basic Example (ParsedSchema)\n");
62    println!("šŸ“¦ Using Arc<ParsedSchema> for efficient caching\n");
63    
64    if enable_comparison {
65        println!("šŸ” Comparison: enabled");
66    }
67    if show_timing {
68        println!("ā±ļø  Internal timing: enabled");
69    }
70    if enable_comparison || show_timing {
71        println!();
72    }
73    
74    let samples_dir = Path::new("samples");
75    let mut scenarios = common::discover_scenarios(samples_dir);
76    
77    // Filter scenarios if a filter is provided
78    if let Some(ref filter) = scenario_filter {
79        scenarios.retain(|s| s.name.contains(filter));
80        println!("šŸ“‹ Filtering scenarios matching: '{}'\n", filter);
81    }
82
83    if scenarios.is_empty() {
84        if let Some(filter) = scenario_filter {
85            println!(
86                "ā„¹ļø  No scenarios found matching '{}' in `{}`.",
87                filter,
88                samples_dir.display()
89            );
90        } else {
91            println!(
92                "ā„¹ļø  No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
93                samples_dir.display()
94            );
95        }
96        return;
97    }
98    
99    println!("šŸ“Š Found {} scenario(s)\n", scenarios.len());
100
101    let mut total_parse_time = std::time::Duration::ZERO;
102    let mut total_eval_time = std::time::Duration::ZERO;
103    let mut successful_scenarios = 0;
104    let mut comparison_failures = 0;
105
106    for scenario in &scenarios {
107        println!("==============================");
108        println!("Scenario: {}", scenario.name);
109        println!("Schema: {} ({})", 
110            scenario.schema_path.display(),
111            if scenario.is_msgpack { "MessagePack" } else { "JSON" }
112        );
113        println!("Data: {}\n", scenario.data_path.display());
114
115        // Clear timing data from previous scenarios
116        if show_timing {
117            json_eval_rs::enable_timing();
118            json_eval_rs::clear_timing_data();
119        }
120
121        let data_str = fs::read_to_string(&scenario.data_path)
122            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
123
124        // Step 1: Parse schema once
125        let parse_start = Instant::now();
126        let parsed_schema = if scenario.is_msgpack {
127            let schema_msgpack = fs::read(&scenario.schema_path)
128                .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
129            println!("  šŸ“¦ MessagePack schema size: {} bytes", schema_msgpack.len());
130            Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
131                .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
132        } else {
133            let schema_str = fs::read_to_string(&scenario.schema_path)
134                .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
135            Arc::new(ParsedSchema::parse(&schema_str)
136                .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
137        };
138        let parse_time = parse_start.elapsed();
139        println!("  šŸ“ Schema parsing: {:?}", parse_time);
140        
141        // Step 2: Create JSONEval from ParsedSchema (reuses compiled logic)
142        let eval_start = Instant::now();
143        let mut eval = JSONEval::with_parsed_schema(
144            parsed_schema.clone(),  // Arc::clone is cheap!
145            Some("{}"),
146            Some(&data_str)
147        ).unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e));
148
149        eval.evaluate(&data_str, Some("{}"))
150            .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
151        
152        let evaluated_schema = eval.get_evaluated_schema(false);
153        let eval_time = eval_start.elapsed();
154        
155        println!("  ⚔ Eval: {:?}", eval_time);
156        println!("  ā±ļø  Total: {:?}\n", parse_time + eval_time);
157        
158        // Print detailed timing breakdown if --timing flag is set
159        if show_timing {
160            json_eval_rs::print_timing_summary();
161        }
162        
163        total_parse_time += parse_time;
164        total_eval_time += eval_time;
165        successful_scenarios += 1;
166
167        // Save results
168        let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
169        let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
170
171        fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
172            .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
173
174        let mut metadata_obj = Map::new();
175        metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
176        metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
177        metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
178
179        fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
180            .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
181
182        println!("āœ… Results saved:");
183        println!("  - {}", evaluated_path.display());
184        println!("  - {}\n", parsed_path.display());
185
186        // Optional comparison
187        if enable_comparison {
188            if let Some(comp_path) = &scenario.comparison_path {
189                if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
190                    comparison_failures += 1;
191                }
192                println!();
193            }
194        }
195    }
196    
197    // Print summary
198    println!("{}", "=".repeat(50));
199    println!("šŸ“Š Summary");
200    println!("{}", "=".repeat(50));
201    println!("Total scenarios run: {}", successful_scenarios);
202    println!("Total parsing time: {:?}", total_parse_time);
203    println!("Total evaluation time: {:?}", total_eval_time);
204    println!("Total time: {:?}", total_parse_time + total_eval_time);
205    
206    if successful_scenarios > 1 {
207        println!("\nAverage per scenario:");
208        println!("  Parsing: {:?}", total_parse_time / successful_scenarios as u32);
209        println!("  Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
210    }
211    
212    if enable_comparison {
213        println!("\nComparison failures: {}", comparison_failures);
214    }
215    
216    println!("\nāœ… All scenarios completed!\n");
217}
examples/benchmark.rs (line 358)
31fn main() {
32    let args: Vec<String> = std::env::args().collect();
33    let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("benchmark");
34    
35    let mut iterations = 1usize;
36    let mut scenario_filter: Option<String> = None;
37    let mut show_cpu_info = false;
38    let mut use_parsed_schema = false;
39    let mut concurrent_count: Option<usize> = None;
40    let mut enable_comparison = false;
41    let mut show_timing = false;
42    let mut i = 1;
43    
44    // Parse arguments
45    while i < args.len() {
46        let arg = &args[i];
47        
48        if arg == "-h" || arg == "--help" {
49            print_help(program_name);
50            return;
51        } else if arg == "--cpu-info" {
52            show_cpu_info = true;
53        } else if arg == "--parsed" {
54            use_parsed_schema = true;
55        } else if arg == "--compare" {
56            enable_comparison = true;
57        } else if arg == "--timing" {
58            show_timing = true;
59        } else if arg == "--concurrent" {
60            if i + 1 >= args.len() {
61                eprintln!("Error: {} requires a value", arg);
62                print_help(program_name);
63                return;
64            }
65            i += 1;
66            match args[i].parse::<usize>() {
67                Ok(n) if n > 0 => concurrent_count = Some(n),
68                _ => {
69                    eprintln!("Error: concurrent count must be a positive integer, got '{}'", args[i]);
70                    return;
71                }
72            }
73        } else if arg == "-i" || arg == "--iterations" {
74            if i + 1 >= args.len() {
75                eprintln!("Error: {} requires a value", arg);
76                print_help(program_name);
77                return;
78            }
79            i += 1;
80            match args[i].parse::<usize>() {
81                Ok(n) if n > 0 => iterations = n,
82                _ => {
83                    eprintln!("Error: iterations must be a positive integer, got '{}'", args[i]);
84                    return;
85                }
86            }
87        } else if !arg.starts_with('-') {
88            scenario_filter = Some(arg.clone());
89        } else {
90            eprintln!("Error: unknown option '{}'", arg);
91            print_help(program_name);
92            return;
93        }
94        
95        i += 1;
96    }
97    
98    println!("\nšŸš€ JSON Evaluation - Benchmark\n");
99    
100    // Show CPU info if requested or if running benchmarks
101    if show_cpu_info || iterations > 1 || concurrent_count.is_some() {
102        common::print_cpu_info();
103    }
104    
105    if use_parsed_schema {
106        println!("šŸ“¦ Mode: ParsedSchema (parse once, reuse for all iterations)\n");
107    }
108    
109    if let Some(count) = concurrent_count {
110        println!("šŸ”€ Concurrent evaluations: {} threads\n", count);
111    } else if iterations > 1 {
112        println!("šŸ”„ Iterations per scenario: {}\n", iterations);
113    }
114    
115    if enable_comparison {
116        println!("šŸ” Comparison: enabled");
117    }
118    if show_timing {
119        println!("ā±ļø  Internal timing: enabled");
120    }
121    if enable_comparison || show_timing {
122        println!();
123    }
124
125    let samples_dir = Path::new("samples");
126    let mut scenarios = common::discover_scenarios(samples_dir);
127    
128    // Filter scenarios if a filter is provided
129    if let Some(ref filter) = scenario_filter {
130        scenarios.retain(|s| s.name.contains(filter));
131        println!("šŸ“‹ Filtering scenarios matching: '{}'\n", filter);
132    }
133
134    if scenarios.is_empty() {
135        if let Some(filter) = scenario_filter {
136            println!(
137                "ā„¹ļø  No scenarios found matching '{}' in `{}`.",
138                filter,
139                samples_dir.display()
140            );
141        } else {
142            println!(
143                "ā„¹ļø  No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
144                samples_dir.display()
145            );
146        }
147        return;
148    }
149    
150    println!("šŸ“Š Found {} scenario(s)\n", scenarios.len());
151
152    let mut total_parse_time = std::time::Duration::ZERO;
153    let mut total_eval_time = std::time::Duration::ZERO;
154    let mut successful_scenarios = 0;
155    let mut comparison_failures = 0;
156
157    for scenario in &scenarios {
158        println!("==============================");
159        println!("Scenario: {}", scenario.name);
160        println!("Schema: {} ({})", 
161            scenario.schema_path.display(),
162            if scenario.is_msgpack { "MessagePack" } else { "JSON" }
163        );
164        println!("Data: {}\n", scenario.data_path.display());
165
166        // Clear timing data from previous scenarios
167        if show_timing {
168            json_eval_rs::enable_timing();
169            json_eval_rs::clear_timing_data();
170        }
171
172        let data_str = fs::read_to_string(&scenario.data_path)
173            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
174
175        println!("Running evaluation...\n");
176
177        let (parse_time, eval_time, evaluated_schema, eval, iteration_times) = if use_parsed_schema {
178            // ParsedSchema mode: parse once, reuse for all iterations/threads
179            let start_time = Instant::now();
180            
181            let parsed_schema = if scenario.is_msgpack {
182                let schema_msgpack = fs::read(&scenario.schema_path)
183                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
184                println!("  šŸ“¦ MessagePack schema size: {} bytes", schema_msgpack.len());
185                Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
186                    .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
187            } else {
188                let schema_str = fs::read_to_string(&scenario.schema_path)
189                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
190                Arc::new(ParsedSchema::parse(&schema_str)
191                    .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
192            };
193            
194            let parse_time = start_time.elapsed();
195            println!("  Schema parsing & compilation: {:?}", parse_time);
196            
197            // Concurrent mode with ParsedSchema
198            if let Some(thread_count) = concurrent_count {
199                use std::thread;
200                
201                let eval_start = Instant::now();
202                let mut handles = vec![];
203                
204                for thread_id in 0..thread_count {
205                    let parsed_clone = parsed_schema.clone();
206                    let data_str_clone = data_str.clone();
207                    let iter_count = iterations;
208                    
209                    let handle = thread::spawn(move || {
210                        let mut thread_times = Vec::with_capacity(iter_count);
211                        let mut last_schema = Value::Null;
212                        
213                        for _ in 0..iter_count {
214                            let iter_start = Instant::now();
215                            let mut eval_instance = JSONEval::with_parsed_schema(
216                                parsed_clone.clone(),
217                                Some("{}"),
218                                Some(&data_str_clone)
219                            ).unwrap();
220                            
221                            eval_instance.evaluate(&data_str_clone, Some("{}")).unwrap();
222                            last_schema = eval_instance.get_evaluated_schema(false);
223                            thread_times.push(iter_start.elapsed());
224                        }
225                        
226                        (thread_times, last_schema, thread_id)
227                    });
228                    handles.push(handle);
229                }
230                
231                let mut all_iteration_times = Vec::new();
232                let mut evaluated_schema = Value::Null;
233                
234                for handle in handles {
235                    let (thread_times, thread_schema, thread_id) = handle.join().unwrap();
236                    println!("  Thread {} completed {} iterations", thread_id, thread_times.len());
237                    all_iteration_times.extend(thread_times);
238                    evaluated_schema = thread_schema; // Use last thread's result
239                }
240                
241                let eval_time = eval_start.elapsed();
242                
243                // Create a temp eval for metadata export
244                let temp_eval = JSONEval::with_parsed_schema(
245                    parsed_schema.clone(),
246                    Some("{}"),
247                    Some(&data_str)
248                ).unwrap();
249                
250                (parse_time, eval_time, evaluated_schema, temp_eval, all_iteration_times)
251            } else {
252                // Sequential iterations with ParsedSchema
253                let eval_start = Instant::now();
254                let mut evaluated_schema = Value::Null;
255                let mut iteration_times = Vec::with_capacity(iterations);
256                let mut eval_instance = JSONEval::with_parsed_schema(
257                    parsed_schema.clone(),
258                    Some("{}"),
259                    Some(&data_str)
260                ).unwrap();
261                
262                for iter in 0..iterations {
263                    let iter_start = Instant::now();
264                    eval_instance.evaluate(&data_str, Some("{}"))
265                        .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
266                    evaluated_schema = eval_instance.get_evaluated_schema(false);
267                    iteration_times.push(iter_start.elapsed());
268                    
269                    if iterations > 1 && (iter + 1) % 10 == 0 {
270                        print!(".");
271                        if (iter + 1) % 50 == 0 {
272                            println!(" {}/{}", iter + 1, iterations);
273                        }
274                    }
275                }
276                
277                if iterations > 1 && iterations % 50 != 0 {
278                    println!(" {}/{}", iterations, iterations);
279                }
280                
281                let eval_time = eval_start.elapsed();
282                (parse_time, eval_time, evaluated_schema, eval_instance, iteration_times)
283            }
284        } else {
285            // Traditional mode: parse and create JSONEval each time
286            let start_time = Instant::now();
287            let mut eval = if scenario.is_msgpack {
288                let schema_msgpack = fs::read(&scenario.schema_path)
289                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
290                println!("  šŸ“¦ MessagePack schema size: {} bytes", schema_msgpack.len());
291                JSONEval::new_from_msgpack(&schema_msgpack, None, Some(&data_str))
292                    .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e))
293            } else {
294                let schema_str = fs::read_to_string(&scenario.schema_path)
295                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
296                JSONEval::new(&schema_str, None, Some(&data_str))
297                    .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e))
298            };
299            let parse_time = start_time.elapsed();
300            println!("  Schema parsing & compilation: {:?}", parse_time);
301            
302            let eval_start = Instant::now();
303            let mut evaluated_schema = Value::Null;
304            let mut iteration_times = Vec::with_capacity(iterations);
305            
306            for iter in 0..iterations {
307                let iter_start = Instant::now();
308                eval.evaluate(&data_str, Some("{}"))
309                    .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
310                evaluated_schema = eval.get_evaluated_schema(false);
311                iteration_times.push(iter_start.elapsed());
312                
313                if iterations > 1 && (iter + 1) % 10 == 0 {
314                    print!(".");
315                    if (iter + 1) % 50 == 0 {
316                        println!(" {}/{}", iter + 1, iterations);
317                    }
318                }
319            }
320            
321            if iterations > 1 && iterations % 50 != 0 {
322                println!(" {}/{}", iterations, iterations);
323            }
324            
325            let eval_time = eval_start.elapsed();
326            (parse_time, eval_time, evaluated_schema, eval, iteration_times)
327        };
328        
329        // Calculate statistics
330        let total_iterations = iteration_times.len();
331        if total_iterations == 1 {
332            println!("  Evaluation: {:?}", eval_time);
333        } else {
334            let avg_time = eval_time / total_iterations as u32;
335            let min_time = iteration_times.iter().min().unwrap();
336            let max_time = iteration_times.iter().max().unwrap();
337            
338            println!("  Total evaluation time: {:?}", eval_time);
339            println!("  Total iterations: {}", total_iterations);
340            println!("  Average per iteration: {:?}", avg_time);
341            println!("  Min: {:?} | Max: {:?}", min_time, max_time);
342            
343            // Show cache statistics
344            let cache_stats = eval.cache_stats();
345            println!("  Cache: {} entries, {} hits, {} misses ({:.1}% hit rate)",
346                cache_stats.entries,
347                cache_stats.hits,
348                cache_stats.misses,
349                cache_stats.hit_rate * 100.0
350            );
351        }
352
353        let total_time = parse_time + eval_time;
354        println!("ā±ļø  Execution time: {:?}\n", total_time);
355        
356        // Print detailed timing breakdown if --timing flag is set
357        if show_timing {
358            json_eval_rs::print_timing_summary();
359        }
360        
361        // Track statistics
362        total_parse_time += parse_time;
363        total_eval_time += eval_time;
364        successful_scenarios += 1;
365
366        let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
367        let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
368
369        fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
370            .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
371
372        let mut metadata_obj = Map::new();
373        metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
374        metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
375
376        fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
377            .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
378
379        println!("āœ… Results saved:");
380        println!("  - {}", evaluated_path.display());
381        println!("  - {}\n", parsed_path.display());
382
383        // Optional comparison
384        if enable_comparison {
385            if let Some(comp_path) = &scenario.comparison_path {
386                if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
387                    comparison_failures += 1;
388                }
389                println!();
390            }
391        }
392    }
393    
394    // Print summary statistics
395    if successful_scenarios > 0 {
396        println!("\n{}", "=".repeat(50));
397        println!("šŸ“Š Summary Statistics");
398        println!("{}", "=".repeat(50));
399        println!("Total scenarios run: {}", successful_scenarios);
400        println!("Total parsing time: {:?}", total_parse_time);
401        println!("Total evaluation time: {:?}", total_eval_time);
402        println!("Total time: {:?}", total_parse_time + total_eval_time);
403        
404        if successful_scenarios > 1 {
405            println!("\nAverage per scenario:");
406            println!("  Parsing: {:?}", total_parse_time / successful_scenarios as u32);
407            println!("  Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
408        }
409        
410        if enable_comparison {
411            println!("\nComparison failures: {}", comparison_failures);
412        }
413        
414        println!("\nāœ… All scenarios completed successfully!\n");
415    }
416}