1mod common;
2
3use std::fs;
4use std::path::Path;
5use std::sync::Arc;
6use std::time::Instant;
7use json_eval_rs::{JSONEval, ParsedSchema};
8use serde_json::{Map, Value};
9
10fn print_help(program_name: &str) {
11 println!("\nš JSON Evaluation - Benchmark Example\n");
12 println!("USAGE:");
13 println!(" {} [OPTIONS] [FILTER]\n", program_name);
14 println!("OPTIONS:");
15 println!(" -h, --help Show this help message");
16 println!(" -i, --iterations <COUNT> Number of evaluation iterations (default: 1)");
17 println!(" --parsed Use ParsedSchema for caching (parse once, reuse)");
18 println!(" --concurrent <COUNT> Test concurrent evaluations with N threads");
19 println!(" --compare Enable comparison with expected results");
20 println!(" --timing Show detailed internal timing breakdown");
21 println!(" --cpu-info Show CPU feature information\n");
22 println!("ARGUMENTS:");
23 println!(" [FILTER] Optional filter to match scenario names\n");
24 println!("EXAMPLES:");
25 println!(" {} -i 100 zlw # Run 'zlw' scenario 100 times", program_name);
26 println!(" {} --parsed -i 100 # Use ParsedSchema, 100 iterations", program_name);
27 println!(" {} --parsed --concurrent 4 # Test 4 concurrent evaluations", program_name);
28 println!(" {} --compare # Run with comparison enabled", program_name);
29}
30
31fn main() {
32 let args: Vec<String> = std::env::args().collect();
33 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("benchmark");
34
35 let mut iterations = 1usize;
36 let mut scenario_filter: Option<String> = None;
37 let mut show_cpu_info = false;
38 let mut use_parsed_schema = false;
39 let mut concurrent_count: Option<usize> = None;
40 let mut enable_comparison = false;
41 let mut show_timing = false;
42 let mut i = 1;
43
44 while i < args.len() {
46 let arg = &args[i];
47
48 if arg == "-h" || arg == "--help" {
49 print_help(program_name);
50 return;
51 } else if arg == "--cpu-info" {
52 show_cpu_info = true;
53 } else if arg == "--parsed" {
54 use_parsed_schema = true;
55 } else if arg == "--compare" {
56 enable_comparison = true;
57 } else if arg == "--timing" {
58 show_timing = true;
59 } else if arg == "--concurrent" {
60 if i + 1 >= args.len() {
61 eprintln!("Error: {} requires a value", arg);
62 print_help(program_name);
63 return;
64 }
65 i += 1;
66 match args[i].parse::<usize>() {
67 Ok(n) if n > 0 => concurrent_count = Some(n),
68 _ => {
69 eprintln!("Error: concurrent count must be a positive integer, got '{}'", args[i]);
70 return;
71 }
72 }
73 } else if arg == "-i" || arg == "--iterations" {
74 if i + 1 >= args.len() {
75 eprintln!("Error: {} requires a value", arg);
76 print_help(program_name);
77 return;
78 }
79 i += 1;
80 match args[i].parse::<usize>() {
81 Ok(n) if n > 0 => iterations = n,
82 _ => {
83 eprintln!("Error: iterations must be a positive integer, got '{}'", args[i]);
84 return;
85 }
86 }
87 } else if !arg.starts_with('-') {
88 scenario_filter = Some(arg.clone());
89 } else {
90 eprintln!("Error: unknown option '{}'", arg);
91 print_help(program_name);
92 return;
93 }
94
95 i += 1;
96 }
97
98 println!("\nš JSON Evaluation - Benchmark\n");
99
100 if show_cpu_info || iterations > 1 || concurrent_count.is_some() {
102 common::print_cpu_info();
103 }
104
105 if use_parsed_schema {
106 println!("š¦ Mode: ParsedSchema (parse once, reuse for all iterations)\n");
107 }
108
109 if let Some(count) = concurrent_count {
110 println!("š Concurrent evaluations: {} threads\n", count);
111 } else if iterations > 1 {
112 println!("š Iterations per scenario: {}\n", iterations);
113 }
114
115 if enable_comparison {
116 println!("š Comparison: enabled");
117 }
118 if show_timing {
119 println!("ā±ļø Internal timing: enabled");
120 }
121 if enable_comparison || show_timing {
122 println!();
123 }
124
125 let samples_dir = Path::new("samples");
126 let mut scenarios = common::discover_scenarios(samples_dir);
127
128 if let Some(ref filter) = scenario_filter {
130 scenarios.retain(|s| s.name.contains(filter));
131 println!("š Filtering scenarios matching: '{}'\n", filter);
132 }
133
134 if scenarios.is_empty() {
135 if let Some(filter) = scenario_filter {
136 println!(
137 "ā¹ļø No scenarios found matching '{}' in `{}`.",
138 filter,
139 samples_dir.display()
140 );
141 } else {
142 println!(
143 "ā¹ļø No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
144 samples_dir.display()
145 );
146 }
147 return;
148 }
149
150 println!("š Found {} scenario(s)\n", scenarios.len());
151
152 let mut total_parse_time = std::time::Duration::ZERO;
153 let mut total_eval_time = std::time::Duration::ZERO;
154 let mut successful_scenarios = 0;
155 let mut comparison_failures = 0;
156
157 for scenario in &scenarios {
158 println!("==============================");
159 println!("Scenario: {}", scenario.name);
160 println!("Schema: {} ({})",
161 scenario.schema_path.display(),
162 if scenario.is_msgpack { "MessagePack" } else { "JSON" }
163 );
164 println!("Data: {}\n", scenario.data_path.display());
165
166 if show_timing {
168 json_eval_rs::enable_timing();
169 json_eval_rs::clear_timing_data();
170 }
171
172 let data_str = fs::read_to_string(&scenario.data_path)
173 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
174
175 println!("Running evaluation...\n");
176
177 let (parse_time, eval_time, evaluated_schema, eval, iteration_times) = if use_parsed_schema {
178 let start_time = Instant::now();
180
181 let parsed_schema = if scenario.is_msgpack {
182 let schema_msgpack = fs::read(&scenario.schema_path)
183 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
184 println!(" š¦ MessagePack schema size: {} bytes", schema_msgpack.len());
185 Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
186 .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
187 } else {
188 let schema_str = fs::read_to_string(&scenario.schema_path)
189 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
190 Arc::new(ParsedSchema::parse(&schema_str)
191 .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
192 };
193
194 let parse_time = start_time.elapsed();
195 println!(" Schema parsing & compilation: {:?}", parse_time);
196
197 if let Some(thread_count) = concurrent_count {
199 use std::thread;
200
201 let eval_start = Instant::now();
202 let mut handles = vec![];
203
204 for thread_id in 0..thread_count {
205 let parsed_clone = parsed_schema.clone();
206 let data_str_clone = data_str.clone();
207 let iter_count = iterations;
208
209 let handle = thread::spawn(move || {
210 let mut thread_times = Vec::with_capacity(iter_count);
211 let mut last_schema = Value::Null;
212
213 for _ in 0..iter_count {
214 let iter_start = Instant::now();
215 let mut eval_instance = JSONEval::with_parsed_schema(
216 parsed_clone.clone(),
217 Some("{}"),
218 Some(&data_str_clone)
219 ).unwrap();
220
221 eval_instance.evaluate(&data_str_clone, Some("{}")).unwrap();
222 last_schema = eval_instance.get_evaluated_schema(false);
223 thread_times.push(iter_start.elapsed());
224 }
225
226 (thread_times, last_schema, thread_id)
227 });
228 handles.push(handle);
229 }
230
231 let mut all_iteration_times = Vec::new();
232 let mut evaluated_schema = Value::Null;
233
234 for handle in handles {
235 let (thread_times, thread_schema, thread_id) = handle.join().unwrap();
236 println!(" Thread {} completed {} iterations", thread_id, thread_times.len());
237 all_iteration_times.extend(thread_times);
238 evaluated_schema = thread_schema; }
240
241 let eval_time = eval_start.elapsed();
242
243 let temp_eval = JSONEval::with_parsed_schema(
245 parsed_schema.clone(),
246 Some("{}"),
247 Some(&data_str)
248 ).unwrap();
249
250 (parse_time, eval_time, evaluated_schema, temp_eval, all_iteration_times)
251 } else {
252 let eval_start = Instant::now();
254 let mut evaluated_schema = Value::Null;
255 let mut iteration_times = Vec::with_capacity(iterations);
256 let mut eval_instance = JSONEval::with_parsed_schema(
257 parsed_schema.clone(),
258 Some("{}"),
259 Some(&data_str)
260 ).unwrap();
261
262 for iter in 0..iterations {
263 let iter_start = Instant::now();
264 eval_instance.evaluate(&data_str, Some("{}"))
265 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
266 evaluated_schema = eval_instance.get_evaluated_schema(false);
267 iteration_times.push(iter_start.elapsed());
268
269 if iterations > 1 && (iter + 1) % 10 == 0 {
270 print!(".");
271 if (iter + 1) % 50 == 0 {
272 println!(" {}/{}", iter + 1, iterations);
273 }
274 }
275 }
276
277 if iterations > 1 && iterations % 50 != 0 {
278 println!(" {}/{}", iterations, iterations);
279 }
280
281 let eval_time = eval_start.elapsed();
282 (parse_time, eval_time, evaluated_schema, eval_instance, iteration_times)
283 }
284 } else {
285 let start_time = Instant::now();
287 let mut eval = if scenario.is_msgpack {
288 let schema_msgpack = fs::read(&scenario.schema_path)
289 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
290 println!(" š¦ MessagePack schema size: {} bytes", schema_msgpack.len());
291 JSONEval::new_from_msgpack(&schema_msgpack, None, Some(&data_str))
292 .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e))
293 } else {
294 let schema_str = fs::read_to_string(&scenario.schema_path)
295 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
296 JSONEval::new(&schema_str, None, Some(&data_str))
297 .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e))
298 };
299 let parse_time = start_time.elapsed();
300 println!(" Schema parsing & compilation: {:?}", parse_time);
301
302 let eval_start = Instant::now();
303 let mut evaluated_schema = Value::Null;
304 let mut iteration_times = Vec::with_capacity(iterations);
305
306 for iter in 0..iterations {
307 let iter_start = Instant::now();
308 eval.evaluate(&data_str, Some("{}"))
309 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
310 evaluated_schema = eval.get_evaluated_schema(false);
311 iteration_times.push(iter_start.elapsed());
312
313 if iterations > 1 && (iter + 1) % 10 == 0 {
314 print!(".");
315 if (iter + 1) % 50 == 0 {
316 println!(" {}/{}", iter + 1, iterations);
317 }
318 }
319 }
320
321 if iterations > 1 && iterations % 50 != 0 {
322 println!(" {}/{}", iterations, iterations);
323 }
324
325 let eval_time = eval_start.elapsed();
326 (parse_time, eval_time, evaluated_schema, eval, iteration_times)
327 };
328
329 let total_iterations = iteration_times.len();
331 if total_iterations == 1 {
332 println!(" Evaluation: {:?}", eval_time);
333 } else {
334 let avg_time = eval_time / total_iterations as u32;
335 let min_time = iteration_times.iter().min().unwrap();
336 let max_time = iteration_times.iter().max().unwrap();
337
338 println!(" Total evaluation time: {:?}", eval_time);
339 println!(" Total iterations: {}", total_iterations);
340 println!(" Average per iteration: {:?}", avg_time);
341 println!(" Min: {:?} | Max: {:?}", min_time, max_time);
342
343 let cache_stats = eval.cache_stats();
345 println!(" Cache: {} entries, {} hits, {} misses ({:.1}% hit rate)",
346 cache_stats.entries,
347 cache_stats.hits,
348 cache_stats.misses,
349 cache_stats.hit_rate * 100.0
350 );
351 }
352
353 let total_time = parse_time + eval_time;
354 println!("ā±ļø Execution time: {:?}\n", total_time);
355
356 if show_timing {
358 json_eval_rs::print_timing_summary();
359 }
360
361 total_parse_time += parse_time;
363 total_eval_time += eval_time;
364 successful_scenarios += 1;
365
366 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
367 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
368
369 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
370 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
371
372 let mut metadata_obj = Map::new();
373 metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
374 metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
375
376 fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
377 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
378
379 println!("ā
Results saved:");
380 println!(" - {}", evaluated_path.display());
381 println!(" - {}\n", parsed_path.display());
382
383 if enable_comparison {
385 if let Some(comp_path) = &scenario.comparison_path {
386 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
387 comparison_failures += 1;
388 }
389 println!();
390 }
391 }
392 }
393
394 if successful_scenarios > 0 {
396 println!("\n{}", "=".repeat(50));
397 println!("š Summary Statistics");
398 println!("{}", "=".repeat(50));
399 println!("Total scenarios run: {}", successful_scenarios);
400 println!("Total parsing time: {:?}", total_parse_time);
401 println!("Total evaluation time: {:?}", total_eval_time);
402 println!("Total time: {:?}", total_parse_time + total_eval_time);
403
404 if successful_scenarios > 1 {
405 println!("\nAverage per scenario:");
406 println!(" Parsing: {:?}", total_parse_time / successful_scenarios as u32);
407 println!(" Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
408 }
409
410 if enable_comparison {
411 println!("\nComparison failures: {}", comparison_failures);
412 }
413
414 println!("\nā
All scenarios completed successfully!\n");
415 }
416}