1mod common;
2
3use std::fs;
4use std::path::Path;
5use std::sync::Arc;
6use std::time::Instant;
7use json_eval_rs::{JSONEval, ParsedSchema};
8use serde_json::{Map, Value};
9
10fn print_help(program_name: &str) {
11 println!("\nš JSON Evaluation - Benchmark Example\n");
12 println!("USAGE:");
13 println!(" {} [OPTIONS] [FILTER]\n", program_name);
14 println!("OPTIONS:");
15 println!(" -h, --help Show this help message");
16 println!(" -i, --iterations <COUNT> Number of evaluation iterations (default: 1)");
17 println!(" --parsed Use ParsedSchema for caching (parse once, reuse)");
18 println!(" --cache Reuse JSONEval instance across iterations");
19 println!(" --concurrent <COUNT> Test concurrent evaluations with N threads");
20 println!(" --compare Enable comparison with expected results");
21 println!(" --timing Show detailed internal timing breakdown");
22 println!(" --cpu-info Show CPU feature information\n");
23 println!("ARGUMENTS:");
24 println!(" [FILTER] Optional filter to match scenario names\n");
25 println!("EXAMPLES:");
26 println!(" {} -i 100 zlw # Run 'zlw' scenario 100 times", program_name);
27 println!(" {} --parsed -i 100 # Use ParsedSchema, 100 iterations", program_name);
28 println!(" {} --parsed --concurrent 4 # Test 4 concurrent evaluations", program_name);
29 println!(" {} --compare # Run with comparison enabled", program_name);
30}
31
32fn main() {
33 let args: Vec<String> = std::env::args().collect();
34 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("benchmark");
35
36 let mut iterations = 1usize;
37 let mut scenario_filter: Option<String> = None;
38 let mut show_cpu_info = false;
39 let mut use_parsed_schema = false;
40 let mut use_cache = false;
41 let mut concurrent_count: Option<usize> = None;
42 let mut enable_comparison = false;
43 let mut show_timing = false;
44 let mut i = 1;
45
46 while i < args.len() {
48 let arg = &args[i];
49
50 if arg == "-h" || arg == "--help" {
51 print_help(program_name);
52 return;
53 } else if arg == "--cpu-info" {
54 show_cpu_info = true;
55 } else if arg == "--parsed" {
56 use_parsed_schema = true;
57 } else if arg == "--cache" {
58 use_cache = true;
59 } else if arg == "--compare" {
60 enable_comparison = true;
61 } else if arg == "--timing" {
62 show_timing = true;
63 } else if arg == "--concurrent" {
64 if i + 1 >= args.len() {
65 eprintln!("Error: {} requires a value", arg);
66 print_help(program_name);
67 return;
68 }
69 i += 1;
70 match args[i].parse::<usize>() {
71 Ok(n) if n > 0 => concurrent_count = Some(n),
72 _ => {
73 eprintln!("Error: concurrent count must be a positive integer, got '{}'", args[i]);
74 return;
75 }
76 }
77 } else if arg == "-i" || arg == "--iterations" {
78 if i + 1 >= args.len() {
79 eprintln!("Error: {} requires a value", arg);
80 print_help(program_name);
81 return;
82 }
83 i += 1;
84 match args[i].parse::<usize>() {
85 Ok(n) if n > 0 => iterations = n,
86 _ => {
87 eprintln!("Error: iterations must be a positive integer, got '{}'", args[i]);
88 return;
89 }
90 }
91 } else if !arg.starts_with('-') {
92 scenario_filter = Some(arg.clone());
93 } else {
94 eprintln!("Error: unknown option '{}'", arg);
95 print_help(program_name);
96 return;
97 }
98
99 i += 1;
100 }
101
102 println!("\nš JSON Evaluation - Benchmark\n");
103
104 if show_cpu_info || iterations > 1 || concurrent_count.is_some() {
106 common::print_cpu_info();
107 }
108
109 if use_parsed_schema {
110 println!("š¦ Mode: ParsedSchema (parse once, reuse for all iterations)\n");
111 }
112
113 if use_cache {
114 println!("ā»ļø Mode: Cache (reuse JSONEval instance across iterations)\n");
115 }
116
117 if let Some(count) = concurrent_count {
118 println!("š Concurrent evaluations: {} threads\n", count);
119 } else if iterations > 1 {
120 println!("š Iterations per scenario: {}\n", iterations);
121 }
122
123 if enable_comparison {
124 println!("š Comparison: enabled");
125 }
126 if show_timing {
127 println!("ā±ļø Internal timing: enabled");
128 }
129 if enable_comparison || show_timing {
130 println!();
131 }
132
133 let samples_dir = Path::new("samples");
134 let mut scenarios = common::discover_scenarios(samples_dir);
135
136 if let Some(ref filter) = scenario_filter {
138 scenarios.retain(|s| s.name.contains(filter));
139 println!("š Filtering scenarios matching: '{}'\n", filter);
140 }
141
142 if scenarios.is_empty() {
143 if let Some(filter) = scenario_filter {
144 println!(
145 "ā¹ļø No scenarios found matching '{}' in `{}`.",
146 filter,
147 samples_dir.display()
148 );
149 } else {
150 println!(
151 "ā¹ļø No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
152 samples_dir.display()
153 );
154 }
155 return;
156 }
157
158 println!("š Found {} scenario(s)\n", scenarios.len());
159
160 let mut total_parse_time = std::time::Duration::ZERO;
161 let mut total_eval_time = std::time::Duration::ZERO;
162 let mut successful_scenarios = 0;
163 let mut comparison_failures = 0;
164
165 for scenario in &scenarios {
166 println!("==============================");
167 println!("Scenario: {}", scenario.name);
168 println!("Schema: {} ({})",
169 scenario.schema_path.display(),
170 if scenario.is_msgpack { "MessagePack" } else { "JSON" }
171 );
172 println!("Data: {}\n", scenario.data_path.display());
173
174 if show_timing {
176 json_eval_rs::enable_timing();
177 json_eval_rs::clear_timing_data();
178 }
179
180 let data_str = fs::read_to_string(&scenario.data_path)
181 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
182
183 println!("Running evaluation...\n");
184
185 let (parse_time, eval_time, evaluated_schema, eval, iteration_times) = if use_parsed_schema {
186 let start_time = Instant::now();
188
189 let parsed_schema = if scenario.is_msgpack {
190 let schema_msgpack = fs::read(&scenario.schema_path)
191 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
192 println!(" š¦ MessagePack schema size: {} bytes", schema_msgpack.len());
193 Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
194 .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
195 } else {
196 let schema_str = fs::read_to_string(&scenario.schema_path)
197 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
198 Arc::new(ParsedSchema::parse(&schema_str)
199 .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
200 };
201
202 let parse_time = start_time.elapsed();
203 println!(" Schema parsing & compilation: {:?}", parse_time);
204
205 if let Some(thread_count) = concurrent_count {
207 use std::thread;
208
209 let eval_start = Instant::now();
210 let mut handles = vec![];
211
212 for thread_id in 0..thread_count {
213 let parsed_clone = parsed_schema.clone();
214 let data_str_clone = data_str.clone();
215 let iter_count = iterations;
216 let thread_use_cache = use_cache;
217
218 let handle = thread::spawn(move || {
219 let mut thread_times = Vec::with_capacity(iter_count);
220 let mut last_schema = Value::Null;
221
222 let mut eval_instance = JSONEval::with_parsed_schema(
223 parsed_clone.clone(),
224 Some("{}"),
225 Some(&data_str_clone)
226 ).unwrap();
227
228 for iter in 0..iter_count {
229 let iter_start = Instant::now();
230
231 if !thread_use_cache && iter > 0 {
232 eval_instance = JSONEval::with_parsed_schema(
233 parsed_clone.clone(),
234 Some("{}"),
235 Some(&data_str_clone)
236 ).unwrap();
237 }
238
239 eval_instance.evaluate(&data_str_clone, Some("{}"), None, None).unwrap();
240 last_schema = eval_instance.get_evaluated_schema(false);
241 thread_times.push(iter_start.elapsed());
242 }
243
244 (thread_times, last_schema, thread_id)
245 });
246 handles.push(handle);
247 }
248
249 let mut all_iteration_times = Vec::new();
250 let mut evaluated_schema = Value::Null;
251
252 for handle in handles {
253 let (thread_times, thread_schema, thread_id) = handle.join().unwrap();
254 println!(" Thread {} completed {} iterations", thread_id, thread_times.len());
255 all_iteration_times.extend(thread_times);
256 evaluated_schema = thread_schema; }
258
259 let eval_time = eval_start.elapsed();
260
261 let temp_eval = JSONEval::with_parsed_schema(
263 parsed_schema.clone(),
264 Some("{}"),
265 Some(&data_str)
266 ).unwrap();
267
268 (parse_time, eval_time, evaluated_schema, temp_eval, all_iteration_times)
269 } else {
270 let eval_start = Instant::now();
272 let mut evaluated_schema = Value::Null;
273 let mut iteration_times = Vec::with_capacity(iterations);
274 let mut eval_instance = JSONEval::with_parsed_schema(
275 parsed_schema.clone(),
276 Some("{}"),
277 Some(&data_str)
278 ).unwrap();
279
280 for iter in 0..iterations {
281 let iter_start = Instant::now();
282
283 if !use_cache && iter > 0 {
284 eval_instance = JSONEval::with_parsed_schema(
285 parsed_schema.clone(),
286 Some("{}"),
287 Some(&data_str)
288 ).unwrap();
289 }
290
291 eval_instance.evaluate(&data_str, Some("{}"), None, None)
292 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
293 evaluated_schema = eval_instance.get_evaluated_schema(false);
294 iteration_times.push(iter_start.elapsed());
295
296 if iterations > 1 && (iter + 1) % 10 == 0 {
297 print!(".");
298 if (iter + 1) % 50 == 0 {
299 println!(" {}/{}", iter + 1, iterations);
300 }
301 }
302 }
303
304 if iterations > 1 && iterations % 50 != 0 {
305 println!(" {}/{}", iterations, iterations);
306 }
307
308 let eval_time = eval_start.elapsed();
309 (parse_time, eval_time, evaluated_schema, eval_instance, iteration_times)
310 }
311 } else {
312 let schema_msgpack = if scenario.is_msgpack {
314 let bytes = fs::read(&scenario.schema_path)
315 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
316 println!(" š¦ MessagePack schema size: {} bytes", bytes.len());
317 Some(bytes)
318 } else {
319 None
320 };
321
322 let schema_str = if !scenario.is_msgpack {
323 Some(fs::read_to_string(&scenario.schema_path)
324 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e)))
325 } else {
326 None
327 };
328
329 let start_time = Instant::now();
330 let mut eval = if scenario.is_msgpack {
331 JSONEval::new_from_msgpack(schema_msgpack.as_ref().unwrap(), None, Some(&data_str))
332 .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e))
333 } else {
334 JSONEval::new(schema_str.as_ref().unwrap(), None, Some(&data_str))
335 .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e))
336 };
337 let parse_time = start_time.elapsed();
338 println!(" Schema parsing & compilation: {:?}", parse_time);
339
340 let eval_start = Instant::now();
341 let mut evaluated_schema = Value::Null;
342 let mut iteration_times = Vec::with_capacity(iterations);
343
344 for iter in 0..iterations {
345 let iter_start = Instant::now();
346
347 if !use_cache && iter > 0 {
348 eval = if scenario.is_msgpack {
349 JSONEval::new_from_msgpack(schema_msgpack.as_ref().unwrap(), None, Some(&data_str))
350 .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e))
351 } else {
352 JSONEval::new(schema_str.as_ref().unwrap(), None, Some(&data_str))
353 .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e))
354 };
355 }
356
357 eval.evaluate(&data_str, Some("{}"), None, None)
358 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
359 evaluated_schema = eval.get_evaluated_schema(false);
360 iteration_times.push(iter_start.elapsed());
361
362 if iterations > 1 && (iter + 1) % 10 == 0 {
363 print!(".");
364 if (iter + 1) % 50 == 0 {
365 println!(" {}/{}", iter + 1, iterations);
366 }
367 }
368 }
369
370 if iterations > 1 && iterations % 50 != 0 {
371 println!(" {}/{}", iterations, iterations);
372 }
373
374 let eval_time = eval_start.elapsed();
375 (parse_time, eval_time, evaluated_schema, eval, iteration_times)
376 };
377
378 let total_iterations = iteration_times.len();
380 if total_iterations == 1 {
381 println!(" Evaluation: {:?}", eval_time);
382 } else {
383 let avg_time = eval_time / total_iterations as u32;
384 let min_time = iteration_times.iter().min().unwrap();
385 let max_time = iteration_times.iter().max().unwrap();
386
387 println!(" Total evaluation time: {:?}", eval_time);
388 println!(" Total iterations: {}", total_iterations);
389 println!(" Average per iteration: {:?}", avg_time);
390 println!(" Min: {:?} | Max: {:?}", min_time, max_time);
391 }
392
393 let total_time = parse_time + eval_time;
394 println!("ā±ļø Execution time: {:?}\n", total_time);
395
396 if show_timing {
398 json_eval_rs::print_timing_summary();
399 }
400
401 total_parse_time += parse_time;
403 total_eval_time += eval_time;
404 successful_scenarios += 1;
405
406 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
407 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
408
409 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
410 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
411
412 let mut metadata_obj = Map::new();
413 metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
414 metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
415
416 fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
417 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
418
419 println!("ā
Results saved:");
420 println!(" - {}", evaluated_path.display());
421 println!(" - {}\n", parsed_path.display());
422
423 if enable_comparison {
425 if let Some(comp_path) = &scenario.comparison_path {
426 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
427 comparison_failures += 1;
428 }
429 println!();
430 }
431 }
432 }
433
434 if successful_scenarios > 0 {
436 println!("\n{}", "=".repeat(50));
437 println!("š Summary Statistics");
438 println!("{}", "=".repeat(50));
439 println!("Total scenarios run: {}", successful_scenarios);
440 println!("Total parsing time: {:?}", total_parse_time);
441 println!("Total evaluation time: {:?}", total_eval_time);
442 println!("Total time: {:?}", total_parse_time + total_eval_time);
443
444 if successful_scenarios > 1 {
445 println!("\nAverage per scenario:");
446 println!(" Parsing: {:?}", total_parse_time / successful_scenarios as u32);
447 println!(" Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
448 }
449
450 if enable_comparison {
451 println!("\nComparison failures: {}", comparison_failures);
452 }
453
454 println!("\nā
All scenarios completed successfully!\n");
455 }
456}