1mod common;
2
3use json_eval_rs::{JSONEval, ParsedSchema};
4use serde_json::{Map, Value};
5use std::fs;
6use std::path::Path;
7use std::sync::Arc;
8use std::time::Instant;
9
10fn print_help(program_name: &str) {
11 println!("\nš JSON Evaluation - Benchmark Example\n");
12 println!("USAGE:");
13 println!(" {} [OPTIONS] [FILTER]\n", program_name);
14 println!("OPTIONS:");
15 println!(" -h, --help Show this help message");
16 println!(" -i, --iterations <COUNT> Number of evaluation iterations (default: 1)");
17 println!(" --parsed Use ParsedSchema for caching (parse once, reuse)");
18 println!(" --cache Reuse JSONEval instance across iterations");
19 println!(" --concurrent <COUNT> Test concurrent evaluations with N threads");
20 println!(" --compare Enable comparison with expected results");
21 println!(" --timing Show detailed internal timing breakdown");
22 println!(" --cpu-info Show CPU feature information\n");
23 println!("ARGUMENTS:");
24 println!(" [FILTER] Optional filter to match scenario names\n");
25 println!("EXAMPLES:");
26 println!(
27 " {} -i 100 zlw # Run 'zlw' scenario 100 times",
28 program_name
29 );
30 println!(
31 " {} --parsed -i 100 # Use ParsedSchema, 100 iterations",
32 program_name
33 );
34 println!(
35 " {} --parsed --concurrent 4 # Test 4 concurrent evaluations",
36 program_name
37 );
38 println!(
39 " {} --compare # Run with comparison enabled",
40 program_name
41 );
42}
43
44fn main() {
45 let args: Vec<String> = std::env::args().collect();
46 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("benchmark");
47
48 let mut iterations = 1usize;
49 let mut scenario_filter: Option<String> = None;
50 let mut show_cpu_info = false;
51 let mut use_parsed_schema = false;
52 let mut use_cache = false;
53 let mut concurrent_count: Option<usize> = None;
54 let mut enable_comparison = false;
55 let mut show_timing = false;
56 let mut i = 1;
57
58 while i < args.len() {
60 let arg = &args[i];
61
62 if arg == "-h" || arg == "--help" {
63 print_help(program_name);
64 return;
65 } else if arg == "--cpu-info" {
66 show_cpu_info = true;
67 } else if arg == "--parsed" {
68 use_parsed_schema = true;
69 } else if arg == "--cache" {
70 use_cache = true;
71 } else if arg == "--compare" {
72 enable_comparison = true;
73 } else if arg == "--timing" {
74 show_timing = true;
75 } else if arg == "--concurrent" {
76 if i + 1 >= args.len() {
77 eprintln!("Error: {} requires a value", arg);
78 print_help(program_name);
79 return;
80 }
81 i += 1;
82 match args[i].parse::<usize>() {
83 Ok(n) if n > 0 => concurrent_count = Some(n),
84 _ => {
85 eprintln!(
86 "Error: concurrent count must be a positive integer, got '{}'",
87 args[i]
88 );
89 return;
90 }
91 }
92 } else if arg == "-i" || arg == "--iterations" {
93 if i + 1 >= args.len() {
94 eprintln!("Error: {} requires a value", arg);
95 print_help(program_name);
96 return;
97 }
98 i += 1;
99 match args[i].parse::<usize>() {
100 Ok(n) if n > 0 => iterations = n,
101 _ => {
102 eprintln!(
103 "Error: iterations must be a positive integer, got '{}'",
104 args[i]
105 );
106 return;
107 }
108 }
109 } else if !arg.starts_with('-') {
110 scenario_filter = Some(arg.clone());
111 } else {
112 eprintln!("Error: unknown option '{}'", arg);
113 print_help(program_name);
114 return;
115 }
116
117 i += 1;
118 }
119
120 println!("\nš JSON Evaluation - Benchmark\n");
121
122 if show_cpu_info || iterations > 1 || concurrent_count.is_some() {
124 common::print_cpu_info();
125 }
126
127 if use_parsed_schema {
128 println!("š¦ Mode: ParsedSchema (parse once, reuse for all iterations)\n");
129 }
130
131 if use_cache {
132 println!("ā»ļø Mode: Cache (reuse JSONEval instance across iterations)\n");
133 }
134
135 if let Some(count) = concurrent_count {
136 println!("š Concurrent evaluations: {} threads\n", count);
137 } else if iterations > 1 {
138 println!("š Iterations per scenario: {}\n", iterations);
139 }
140
141 if enable_comparison {
142 println!("š Comparison: enabled");
143 }
144 if show_timing {
145 println!("ā±ļø Internal timing: enabled");
146 }
147 if enable_comparison || show_timing {
148 println!();
149 }
150
151 let samples_dir = Path::new("samples");
152 let mut scenarios = common::discover_scenarios(samples_dir);
153
154 if let Some(ref filter) = scenario_filter {
156 scenarios.retain(|s| s.name.contains(filter));
157 println!("š Filtering scenarios matching: '{}'\n", filter);
158 }
159
160 if scenarios.is_empty() {
161 if let Some(filter) = scenario_filter {
162 println!(
163 "ā¹ļø No scenarios found matching '{}' in `{}`.",
164 filter,
165 samples_dir.display()
166 );
167 } else {
168 println!(
169 "ā¹ļø No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
170 samples_dir.display()
171 );
172 }
173 return;
174 }
175
176 println!("š Found {} scenario(s)\n", scenarios.len());
177
178 let mut total_parse_time = std::time::Duration::ZERO;
179 let mut total_eval_time = std::time::Duration::ZERO;
180 let mut successful_scenarios = 0;
181 let mut comparison_failures = 0;
182
183 for scenario in &scenarios {
184 println!("==============================");
185 println!("Scenario: {}", scenario.name);
186 println!(
187 "Schema: {} ({})",
188 scenario.schema_path.display(),
189 if scenario.is_msgpack {
190 "MessagePack"
191 } else {
192 "JSON"
193 }
194 );
195 println!("Data: {}\n", scenario.data_path.display());
196
197 if show_timing {
199 json_eval_rs::enable_timing();
200 json_eval_rs::clear_timing_data();
201 }
202
203 let data_str = fs::read_to_string(&scenario.data_path)
204 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
205
206 println!("Running evaluation...\n");
207
208 let (parse_time, eval_time, evaluated_schema, eval, iteration_times) = if use_parsed_schema
209 {
210 let start_time = Instant::now();
212
213 let parsed_schema = if scenario.is_msgpack {
214 let schema_msgpack = fs::read(&scenario.schema_path).unwrap_or_else(|e| {
215 panic!("failed to read {}: {}", scenario.schema_path.display(), e)
216 });
217 println!(
218 " š¦ MessagePack schema size: {} bytes",
219 schema_msgpack.len()
220 );
221 Arc::new(
222 ParsedSchema::parse_msgpack(&schema_msgpack)
223 .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)),
224 )
225 } else {
226 let schema_str = fs::read_to_string(&scenario.schema_path).unwrap_or_else(|e| {
227 panic!("failed to read {}: {}", scenario.schema_path.display(), e)
228 });
229 Arc::new(
230 ParsedSchema::parse(&schema_str)
231 .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)),
232 )
233 };
234
235 let parse_time = start_time.elapsed();
236 println!(" Schema parsing & compilation: {:?}", parse_time);
237
238 if let Some(thread_count) = concurrent_count {
240 use std::thread;
241
242 let eval_start = Instant::now();
243 let mut handles = vec![];
244
245 for thread_id in 0..thread_count {
246 let parsed_clone = parsed_schema.clone();
247 let data_str_clone = data_str.clone();
248 let iter_count = iterations;
249 let thread_use_cache = use_cache;
250
251 let handle = thread::spawn(move || {
252 let mut thread_times = Vec::with_capacity(iter_count);
253 let mut last_schema = Value::Null;
254
255 let mut eval_instance = JSONEval::with_parsed_schema(
256 parsed_clone.clone(),
257 Some("{}"),
258 Some(&data_str_clone),
259 )
260 .unwrap();
261
262 for iter in 0..iter_count {
263 let iter_start = Instant::now();
264
265 if !thread_use_cache && iter > 0 {
266 eval_instance = JSONEval::with_parsed_schema(
267 parsed_clone.clone(),
268 Some("{}"),
269 Some(&data_str_clone),
270 )
271 .unwrap();
272 }
273
274 eval_instance
275 .evaluate(&data_str_clone, Some("{}"), None, None)
276 .unwrap();
277 last_schema = eval_instance.get_evaluated_schema(false);
278 thread_times.push(iter_start.elapsed());
279 }
280
281 (thread_times, last_schema, thread_id)
282 });
283 handles.push(handle);
284 }
285
286 let mut all_iteration_times = Vec::new();
287 let mut evaluated_schema = Value::Null;
288
289 for handle in handles {
290 let (thread_times, thread_schema, thread_id) = handle.join().unwrap();
291 println!(
292 " Thread {} completed {} iterations",
293 thread_id,
294 thread_times.len()
295 );
296 all_iteration_times.extend(thread_times);
297 evaluated_schema = thread_schema; }
299
300 let eval_time = eval_start.elapsed();
301
302 let temp_eval = JSONEval::with_parsed_schema(
304 parsed_schema.clone(),
305 Some("{}"),
306 Some(&data_str),
307 )
308 .unwrap();
309
310 (
311 parse_time,
312 eval_time,
313 evaluated_schema,
314 temp_eval,
315 all_iteration_times,
316 )
317 } else {
318 let eval_start = Instant::now();
320 let mut evaluated_schema = Value::Null;
321 let mut iteration_times = Vec::with_capacity(iterations);
322 let mut eval_instance = JSONEval::with_parsed_schema(
323 parsed_schema.clone(),
324 Some("{}"),
325 Some(&data_str),
326 )
327 .unwrap();
328
329 for iter in 0..iterations {
330 let iter_start = Instant::now();
331
332 if !use_cache && iter > 0 {
333 eval_instance = JSONEval::with_parsed_schema(
334 parsed_schema.clone(),
335 Some("{}"),
336 Some(&data_str),
337 )
338 .unwrap();
339 }
340
341 eval_instance
342 .evaluate(&data_str, Some("{}"), None, None)
343 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
344 evaluated_schema = eval_instance.get_evaluated_schema(false);
345 iteration_times.push(iter_start.elapsed());
346
347 if iterations > 1 && (iter + 1) % 10 == 0 {
348 print!(".");
349 if (iter + 1) % 50 == 0 {
350 println!(" {}/{}", iter + 1, iterations);
351 }
352 }
353 }
354
355 if iterations > 1 && iterations % 50 != 0 {
356 println!(" {}/{}", iterations, iterations);
357 }
358
359 let eval_time = eval_start.elapsed();
360 (
361 parse_time,
362 eval_time,
363 evaluated_schema,
364 eval_instance,
365 iteration_times,
366 )
367 }
368 } else {
369 let schema_msgpack = if scenario.is_msgpack {
371 let bytes = fs::read(&scenario.schema_path).unwrap_or_else(|e| {
372 panic!("failed to read {}: {}", scenario.schema_path.display(), e)
373 });
374 println!(" š¦ MessagePack schema size: {} bytes", bytes.len());
375 Some(bytes)
376 } else {
377 None
378 };
379
380 let schema_str = if !scenario.is_msgpack {
381 Some(
382 fs::read_to_string(&scenario.schema_path).unwrap_or_else(|e| {
383 panic!("failed to read {}: {}", scenario.schema_path.display(), e)
384 }),
385 )
386 } else {
387 None
388 };
389
390 let start_time = Instant::now();
391 let mut eval = if scenario.is_msgpack {
392 JSONEval::new_from_msgpack(schema_msgpack.as_ref().unwrap(), None, Some(&data_str))
393 .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e))
394 } else {
395 JSONEval::new(schema_str.as_ref().unwrap(), None, Some(&data_str))
396 .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e))
397 };
398 let parse_time = start_time.elapsed();
399 println!(" Schema parsing & compilation: {:?}", parse_time);
400
401 let eval_start = Instant::now();
402 let mut evaluated_schema = Value::Null;
403 let mut iteration_times = Vec::with_capacity(iterations);
404
405 for iter in 0..iterations {
406 let iter_start = Instant::now();
407
408 if !use_cache && iter > 0 {
409 eval = if scenario.is_msgpack {
410 JSONEval::new_from_msgpack(
411 schema_msgpack.as_ref().unwrap(),
412 None,
413 Some(&data_str),
414 )
415 .unwrap_or_else(|e| {
416 panic!("failed to create JSONEval from MessagePack: {}", e)
417 })
418 } else {
419 JSONEval::new(schema_str.as_ref().unwrap(), None, Some(&data_str))
420 .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e))
421 };
422 }
423
424 eval.evaluate(&data_str, Some("{}"), None, None)
425 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
426 evaluated_schema = eval.get_evaluated_schema(false);
427 iteration_times.push(iter_start.elapsed());
428
429 if iterations > 1 && (iter + 1) % 10 == 0 {
430 print!(".");
431 if (iter + 1) % 50 == 0 {
432 println!(" {}/{}", iter + 1, iterations);
433 }
434 }
435 }
436
437 if iterations > 1 && iterations % 50 != 0 {
438 println!(" {}/{}", iterations, iterations);
439 }
440
441 let eval_time = eval_start.elapsed();
442 (
443 parse_time,
444 eval_time,
445 evaluated_schema,
446 eval,
447 iteration_times,
448 )
449 };
450
451 let total_iterations = iteration_times.len();
453 if total_iterations == 1 {
454 println!(" Evaluation: {:?}", eval_time);
455 } else {
456 let avg_time = eval_time / total_iterations as u32;
457 let min_time = iteration_times.iter().min().unwrap();
458 let max_time = iteration_times.iter().max().unwrap();
459
460 println!(" Total evaluation time: {:?}", eval_time);
461 println!(" Total iterations: {}", total_iterations);
462 println!(" Average per iteration: {:?}", avg_time);
463 println!(" Min: {:?} | Max: {:?}", min_time, max_time);
464 }
465
466 let total_time = parse_time + eval_time;
467 println!("ā±ļø Execution time: {:?}\n", total_time);
468
469 if show_timing {
471 json_eval_rs::print_timing_summary();
472 }
473
474 total_parse_time += parse_time;
476 total_eval_time += eval_time;
477 successful_scenarios += 1;
478
479 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
480 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
481
482 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
483 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
484
485 let mut metadata_obj = Map::new();
486 metadata_obj.insert(
487 "dependencies".to_string(),
488 serde_json::to_value(&*eval.dependencies).unwrap(),
489 );
490 metadata_obj.insert(
491 "sorted_evaluations".to_string(),
492 serde_json::to_value(&*eval.sorted_evaluations).unwrap(),
493 );
494
495 fs::write(
496 &parsed_path,
497 common::pretty_json(&Value::Object(metadata_obj)),
498 )
499 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
500
501 println!("ā
Results saved:");
502 println!(" - {}", evaluated_path.display());
503 println!(" - {}\n", parsed_path.display());
504
505 if enable_comparison {
507 if let Some(comp_path) = &scenario.comparison_path {
508 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
509 comparison_failures += 1;
510 }
511 println!();
512 }
513 }
514 }
515
516 if successful_scenarios > 0 {
518 println!("\n{}", "=".repeat(50));
519 println!("š Summary Statistics");
520 println!("{}", "=".repeat(50));
521 println!("Total scenarios run: {}", successful_scenarios);
522 println!("Total parsing time: {:?}", total_parse_time);
523 println!("Total evaluation time: {:?}", total_eval_time);
524 println!("Total time: {:?}", total_parse_time + total_eval_time);
525
526 if successful_scenarios > 1 {
527 println!("\nAverage per scenario:");
528 println!(
529 " Parsing: {:?}",
530 total_parse_time / successful_scenarios as u32
531 );
532 println!(
533 " Evaluation: {:?}",
534 total_eval_time / successful_scenarios as u32
535 );
536 }
537
538 if enable_comparison {
539 println!("\nComparison failures: {}", comparison_failures);
540 }
541
542 println!("\nā
All scenarios completed successfully!\n");
543 }
544}