pub fn clear_timing_data()Expand description
Clear timing data
Examples found in repository?
examples/basic_parsed.rs (line 135)
42fn main() {
43 let args: Vec<String> = std::env::args().collect();
44 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic_parsed");
45
46 let mut scenario_filter: Option<String> = None;
47 let mut enable_comparison = false;
48 let mut show_timing = false;
49 let mut i = 1;
50
51 // Parse arguments
52 while i < args.len() {
53 let arg = &args[i];
54
55 if arg == "-h" || arg == "--help" {
56 print_help(program_name);
57 return;
58 } else if arg == "--compare" {
59 enable_comparison = true;
60 } else if arg == "--timing" {
61 show_timing = true;
62 } else if !arg.starts_with('-') {
63 scenario_filter = Some(arg.clone());
64 } else {
65 eprintln!("Error: unknown option '{}'", arg);
66 print_help(program_name);
67 return;
68 }
69
70 i += 1;
71 }
72
73 println!("\nš JSON Evaluation - Basic Example (Parsed / JSON & MsgPack)\n");
74 println!("š¦ Using Arc<ParsedSchema> for efficient caching\n");
75
76 if enable_comparison {
77 println!("š Comparison: enabled");
78 }
79 if show_timing {
80 println!("ā±ļø Internal timing: enabled");
81 }
82 if enable_comparison || show_timing {
83 println!();
84 }
85
86 let samples_dir = Path::new("samples");
87 let mut scenarios = common::discover_scenarios(samples_dir);
88
89 // Filter scenarios if a filter is provided
90 if let Some(ref filter) = scenario_filter {
91 scenarios.retain(|s| s.name.contains(filter));
92 println!("š Filtering scenarios matching: '{}'\n", filter);
93 }
94
95 if scenarios.is_empty() {
96 if let Some(filter) = scenario_filter {
97 println!(
98 "ā¹ļø No scenarios found matching '{}' in `{}`.",
99 filter,
100 samples_dir.display()
101 );
102 } else {
103 println!(
104 "ā¹ļø No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
105 samples_dir.display()
106 );
107 }
108 return;
109 }
110
111 println!("š Found {} scenario(s)\n", scenarios.len());
112
113 let mut total_parse_time = std::time::Duration::ZERO;
114 let mut total_eval_time = std::time::Duration::ZERO;
115 let mut successful_scenarios = 0;
116 let mut comparison_failures = 0;
117
118 for scenario in &scenarios {
119 println!("==============================");
120 println!("Scenario: {}", scenario.name);
121 println!(
122 "Schema: {} ({})",
123 scenario.schema_path.display(),
124 if scenario.is_msgpack {
125 "MessagePack"
126 } else {
127 "JSON"
128 }
129 );
130 println!("Data: {}\n", scenario.data_path.display());
131
132 // Clear timing data from previous scenarios
133 if show_timing {
134 json_eval_rs::enable_timing();
135 json_eval_rs::clear_timing_data();
136 }
137
138 let data_str = fs::read_to_string(&scenario.data_path)
139 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
140
141 // Step 1: Parse schema once
142 let parse_start = Instant::now();
143 let parsed_schema = if scenario.is_msgpack {
144 let schema_msgpack = fs::read(&scenario.schema_path).unwrap_or_else(|e| {
145 panic!("failed to read {}: {}", scenario.schema_path.display(), e)
146 });
147 println!(
148 " š¦ MessagePack schema size: {} bytes",
149 schema_msgpack.len()
150 );
151 Arc::new(
152 ParsedSchema::parse_msgpack(&schema_msgpack)
153 .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)),
154 )
155 } else {
156 let schema_str = fs::read_to_string(&scenario.schema_path).unwrap_or_else(|e| {
157 panic!("failed to read {}: {}", scenario.schema_path.display(), e)
158 });
159 Arc::new(
160 ParsedSchema::parse(&schema_str)
161 .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)),
162 )
163 };
164 let parse_time = parse_start.elapsed();
165 println!(" š Schema parsing: {:?}", parse_time);
166
167 // Step 2: Create JSONEval from ParsedSchema (reuses compiled logic)
168 let eval_start = Instant::now();
169 let mut eval = JSONEval::with_parsed_schema(
170 parsed_schema.clone(), // Arc::clone is cheap!
171 Some("{}"),
172 Some(&data_str),
173 )
174 .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e));
175
176 eval.evaluate(&data_str, Some("{}"), None, None)
177 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
178
179 let evaluated_schema = eval.get_evaluated_schema(false);
180 let eval_time = eval_start.elapsed();
181
182 println!(" ā” Eval: {:?}", eval_time);
183 println!(" ā±ļø Total: {:?}\n", parse_time + eval_time);
184
185 // Print detailed timing breakdown if --timing flag is set
186 if show_timing {
187 json_eval_rs::print_timing_summary();
188 }
189
190 total_parse_time += parse_time;
191 total_eval_time += eval_time;
192 successful_scenarios += 1;
193
194 // Save results
195 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
196 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
197
198 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
199 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
200
201 let mut metadata_obj = Map::new();
202 metadata_obj.insert(
203 "dependencies".to_string(),
204 serde_json::to_value(&*eval.dependencies).unwrap(),
205 );
206 metadata_obj.insert(
207 "evaluations".to_string(),
208 serde_json::to_value(&*eval.evaluations).unwrap(),
209 );
210 metadata_obj.insert(
211 "sorted_evaluations".to_string(),
212 serde_json::to_value(&*eval.sorted_evaluations).unwrap(),
213 );
214
215 fs::write(
216 &parsed_path,
217 common::pretty_json(&Value::Object(metadata_obj)),
218 )
219 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
220
221 println!("ā
Results saved:");
222 println!(" - {}", evaluated_path.display());
223 println!(" - {}\n", parsed_path.display());
224
225 // Optional comparison
226 if enable_comparison {
227 if let Some(comp_path) = &scenario.comparison_path {
228 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
229 comparison_failures += 1;
230 }
231 println!();
232 }
233 }
234 }
235
236 // Print summary
237 println!("{}", "=".repeat(50));
238 println!("š Summary");
239 println!("{}", "=".repeat(50));
240 println!("Total scenarios run: {}", successful_scenarios);
241 println!("Total parsing time: {:?}", total_parse_time);
242 println!("Total evaluation time: {:?}", total_eval_time);
243 println!("Total time: {:?}", total_parse_time + total_eval_time);
244
245 if successful_scenarios > 1 {
246 println!("\nAverage per scenario:");
247 println!(
248 " Parsing: {:?}",
249 total_parse_time / successful_scenarios as u32
250 );
251 println!(
252 " Evaluation: {:?}",
253 total_eval_time / successful_scenarios as u32
254 );
255 }
256
257 if enable_comparison {
258 println!("\nComparison failures: {}", comparison_failures);
259 }
260
261 println!("\nā
All scenarios completed!\n");
262}More examples
examples/basic.rs (line 129)
37fn main() {
38 let args: Vec<String> = std::env::args().collect();
39 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic");
40
41 let mut scenario_filter: Option<String> = None;
42 let mut enable_comparison = false;
43 let mut show_timing = false;
44 let mut i = 1;
45
46 // Parse arguments
47 while i < args.len() {
48 let arg = &args[i];
49
50 if arg == "-h" || arg == "--help" {
51 print_help(program_name);
52 return;
53 } else if arg == "--compare" {
54 enable_comparison = true;
55 } else if arg == "--timing" {
56 show_timing = true;
57 } else if !arg.starts_with('-') {
58 scenario_filter = Some(arg.clone());
59 } else {
60 eprintln!("Error: unknown option '{}'", arg);
61 print_help(program_name);
62 return;
63 }
64
65 i += 1;
66 }
67
68 println!("\nš JSON Evaluation - Basic Example (JSON/MsgPack Schema)\n");
69
70 if enable_comparison {
71 println!("š Comparison: enabled");
72 }
73 if show_timing {
74 println!("ā±ļø Internal timing: enabled");
75 }
76 if enable_comparison || show_timing {
77 println!();
78 }
79
80 let samples_dir = Path::new("samples");
81 let mut scenarios = common::discover_scenarios(samples_dir);
82
83 // Filter scenarios if a filter is provided
84 if let Some(ref filter) = scenario_filter {
85 scenarios.retain(|s| s.name.contains(filter));
86 println!("š Filtering scenarios matching: '{}'\n", filter);
87 }
88
89 if scenarios.is_empty() {
90 if let Some(filter) = scenario_filter {
91 println!(
92 "ā¹ļø No scenarios found matching '{}' in `{}`.",
93 filter,
94 samples_dir.display()
95 );
96 } else {
97 println!(
98 "ā¹ļø No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
99 samples_dir.display()
100 );
101 }
102 return;
103 }
104
105 println!("š Found {} scenario(s)\n", scenarios.len());
106
107 let mut total_parse_time = std::time::Duration::ZERO;
108 let mut total_eval_time = std::time::Duration::ZERO;
109 let mut successful_scenarios = 0;
110 let mut comparison_failures = 0;
111
112 for scenario in &scenarios {
113 println!("==============================");
114 println!("Scenario: {}", scenario.name);
115 println!(
116 "Schema: {} ({})",
117 scenario.schema_path.display(),
118 if scenario.is_msgpack {
119 "MessagePack"
120 } else {
121 "JSON"
122 }
123 );
124 println!("Data: {}\n", scenario.data_path.display());
125
126 // Clear timing data from previous scenarios
127 if show_timing {
128 json_eval_rs::enable_timing();
129 json_eval_rs::clear_timing_data();
130 }
131
132 let data_str = fs::read_to_string(&scenario.data_path)
133 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
134
135 // Step 1: Parse schema
136 let parse_start = Instant::now();
137
138 let mut eval = if scenario.is_msgpack {
139 let schema_msgpack = fs::read(&scenario.schema_path).unwrap_or_else(|e| {
140 panic!("failed to read {}: {}", scenario.schema_path.display(), e)
141 });
142 println!(
143 " š¦ MessagePack schema size: {} bytes",
144 schema_msgpack.len()
145 );
146 JSONEval::new_from_msgpack(&schema_msgpack, None, Some(&data_str))
147 .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e))
148 } else {
149 let schema_str = fs::read_to_string(&scenario.schema_path).unwrap_or_else(|e| {
150 panic!("failed to read {}: {}", scenario.schema_path.display(), e)
151 });
152 JSONEval::new(&schema_str, None, Some(&data_str))
153 .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e))
154 };
155
156 let parse_time = parse_start.elapsed();
157 println!(" š Parse (new): {:?}", parse_time);
158
159 // Step 2: Evaluate
160 let eval_start = Instant::now();
161
162 eval.evaluate(&data_str, Some("{}"), None, None)
163 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
164
165 // Step 3: Validate
166 let validation_start = Instant::now();
167 let validation_result = eval
168 .validate(&data_str, None, None, None)
169 .unwrap_or_else(|e| panic!("validation failed: {}", e));
170 let validation_time = validation_start.elapsed();
171 println!(" š”ļø Validate: {:?}", validation_time);
172
173 // Legacy behavior: get_evaluated_schema takes skip_layout: bool
174 // We pass false to ensure layout IS resolved
175 let evaluated_schema = eval.get_evaluated_schema(false);
176 let schema_value = eval.get_schema_value();
177 let eval_time = eval_start.elapsed();
178
179 println!(" ā” Eval: {:?}", eval_time);
180 println!(" ā±ļø Total: {:?}\n", parse_time + eval_time);
181
182 // Print detailed timing breakdown if --timing flag is set
183 if show_timing {
184 json_eval_rs::print_timing_summary();
185 }
186
187 total_parse_time += parse_time;
188 total_eval_time += eval_time;
189 successful_scenarios += 1;
190
191 // Save results
192 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
193 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
194 let value_path = samples_dir.join(format!("{}-schema-value.json", scenario.name));
195 let validation_path = samples_dir.join(format!("{}-validation.json", scenario.name));
196
197 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
198 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
199
200 let mut metadata_obj = Map::new();
201 metadata_obj.insert(
202 "dependencies".to_string(),
203 serde_json::to_value(&*eval.dependencies).unwrap(),
204 );
205 metadata_obj.insert(
206 "evaluations".to_string(),
207 serde_json::to_value(&*eval.evaluations).unwrap(),
208 );
209 metadata_obj.insert(
210 "sorted_evaluations".to_string(),
211 serde_json::to_value(&*eval.sorted_evaluations).unwrap(),
212 );
213
214 fs::write(
215 &parsed_path,
216 common::pretty_json(&Value::Object(metadata_obj)),
217 )
218 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
219
220 fs::write(&value_path, common::pretty_json(&schema_value))
221 .unwrap_or_else(|e| panic!("failed to write {}: {}", value_path.display(), e));
222
223 let validation_value = serde_json::to_value(&validation_result)
224 .unwrap_or_else(|e| panic!("failed to serialize validation result: {}", e));
225 fs::write(&validation_path, common::pretty_json(&validation_value))
226 .unwrap_or_else(|e| panic!("failed to write {}: {}", validation_path.display(), e));
227
228 println!("ā
Results saved:");
229 println!(" - {}", evaluated_path.display());
230 println!(" - {}", parsed_path.display());
231 println!(" - {}", value_path.display());
232 println!(" - {}\n", validation_path.display());
233
234 // Optional comparison
235 if enable_comparison {
236 if let Some(comp_path) = &scenario.comparison_path {
237 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
238 comparison_failures += 1;
239 }
240 println!();
241 }
242 }
243 }
244
245 // Print summary
246 println!("{}", "=".repeat(50));
247 println!("š Summary");
248 println!("{}", "=".repeat(50));
249 println!("Total scenarios run: {}", successful_scenarios);
250 println!("Total parse time: {:?}", total_parse_time);
251 println!("Total eval time: {:?}", total_eval_time);
252 println!("Total time: {:?}", total_parse_time + total_eval_time);
253
254 if successful_scenarios > 1 {
255 println!("\nAverage per scenario:");
256 println!(
257 " Parse: {:?}",
258 total_parse_time / successful_scenarios as u32
259 );
260 println!(
261 " Eval: {:?}",
262 total_eval_time / successful_scenarios as u32
263 );
264 }
265
266 if enable_comparison {
267 println!("Comparison failures: {}", comparison_failures);
268 }
269
270 println!("\nā
All scenarios completed!\n");
271}examples/benchmark.rs (line 200)
44fn main() {
45 let args: Vec<String> = std::env::args().collect();
46 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("benchmark");
47
48 let mut iterations = 1usize;
49 let mut scenario_filter: Option<String> = None;
50 let mut show_cpu_info = false;
51 let mut use_parsed_schema = false;
52 let mut use_cache = false;
53 let mut concurrent_count: Option<usize> = None;
54 let mut enable_comparison = false;
55 let mut show_timing = false;
56 let mut i = 1;
57
58 // Parse arguments
59 while i < args.len() {
60 let arg = &args[i];
61
62 if arg == "-h" || arg == "--help" {
63 print_help(program_name);
64 return;
65 } else if arg == "--cpu-info" {
66 show_cpu_info = true;
67 } else if arg == "--parsed" {
68 use_parsed_schema = true;
69 } else if arg == "--cache" {
70 use_cache = true;
71 } else if arg == "--compare" {
72 enable_comparison = true;
73 } else if arg == "--timing" {
74 show_timing = true;
75 } else if arg == "--concurrent" {
76 if i + 1 >= args.len() {
77 eprintln!("Error: {} requires a value", arg);
78 print_help(program_name);
79 return;
80 }
81 i += 1;
82 match args[i].parse::<usize>() {
83 Ok(n) if n > 0 => concurrent_count = Some(n),
84 _ => {
85 eprintln!(
86 "Error: concurrent count must be a positive integer, got '{}'",
87 args[i]
88 );
89 return;
90 }
91 }
92 } else if arg == "-i" || arg == "--iterations" {
93 if i + 1 >= args.len() {
94 eprintln!("Error: {} requires a value", arg);
95 print_help(program_name);
96 return;
97 }
98 i += 1;
99 match args[i].parse::<usize>() {
100 Ok(n) if n > 0 => iterations = n,
101 _ => {
102 eprintln!(
103 "Error: iterations must be a positive integer, got '{}'",
104 args[i]
105 );
106 return;
107 }
108 }
109 } else if !arg.starts_with('-') {
110 scenario_filter = Some(arg.clone());
111 } else {
112 eprintln!("Error: unknown option '{}'", arg);
113 print_help(program_name);
114 return;
115 }
116
117 i += 1;
118 }
119
120 println!("\nš JSON Evaluation - Benchmark\n");
121
122 // Show CPU info if requested or if running benchmarks
123 if show_cpu_info || iterations > 1 || concurrent_count.is_some() {
124 common::print_cpu_info();
125 }
126
127 if use_parsed_schema {
128 println!("š¦ Mode: ParsedSchema (parse once, reuse for all iterations)\n");
129 }
130
131 if use_cache {
132 println!("ā»ļø Mode: Cache (reuse JSONEval instance across iterations)\n");
133 }
134
135 if let Some(count) = concurrent_count {
136 println!("š Concurrent evaluations: {} threads\n", count);
137 } else if iterations > 1 {
138 println!("š Iterations per scenario: {}\n", iterations);
139 }
140
141 if enable_comparison {
142 println!("š Comparison: enabled");
143 }
144 if show_timing {
145 println!("ā±ļø Internal timing: enabled");
146 }
147 if enable_comparison || show_timing {
148 println!();
149 }
150
151 let samples_dir = Path::new("samples");
152 let mut scenarios = common::discover_scenarios(samples_dir);
153
154 // Filter scenarios if a filter is provided
155 if let Some(ref filter) = scenario_filter {
156 scenarios.retain(|s| s.name.contains(filter));
157 println!("š Filtering scenarios matching: '{}'\n", filter);
158 }
159
160 if scenarios.is_empty() {
161 if let Some(filter) = scenario_filter {
162 println!(
163 "ā¹ļø No scenarios found matching '{}' in `{}`.",
164 filter,
165 samples_dir.display()
166 );
167 } else {
168 println!(
169 "ā¹ļø No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
170 samples_dir.display()
171 );
172 }
173 return;
174 }
175
176 println!("š Found {} scenario(s)\n", scenarios.len());
177
178 let mut total_parse_time = std::time::Duration::ZERO;
179 let mut total_eval_time = std::time::Duration::ZERO;
180 let mut successful_scenarios = 0;
181 let mut comparison_failures = 0;
182
183 for scenario in &scenarios {
184 println!("==============================");
185 println!("Scenario: {}", scenario.name);
186 println!(
187 "Schema: {} ({})",
188 scenario.schema_path.display(),
189 if scenario.is_msgpack {
190 "MessagePack"
191 } else {
192 "JSON"
193 }
194 );
195 println!("Data: {}\n", scenario.data_path.display());
196
197 // Clear timing data from previous scenarios
198 if show_timing {
199 json_eval_rs::enable_timing();
200 json_eval_rs::clear_timing_data();
201 }
202
203 let data_str = fs::read_to_string(&scenario.data_path)
204 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
205
206 println!("Running evaluation...\n");
207
208 let (parse_time, eval_time, evaluated_schema, eval, iteration_times) = if use_parsed_schema
209 {
210 // ParsedSchema mode: parse once, reuse for all iterations/threads
211 let start_time = Instant::now();
212
213 let parsed_schema = if scenario.is_msgpack {
214 let schema_msgpack = fs::read(&scenario.schema_path).unwrap_or_else(|e| {
215 panic!("failed to read {}: {}", scenario.schema_path.display(), e)
216 });
217 println!(
218 " š¦ MessagePack schema size: {} bytes",
219 schema_msgpack.len()
220 );
221 Arc::new(
222 ParsedSchema::parse_msgpack(&schema_msgpack)
223 .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)),
224 )
225 } else {
226 let schema_str = fs::read_to_string(&scenario.schema_path).unwrap_or_else(|e| {
227 panic!("failed to read {}: {}", scenario.schema_path.display(), e)
228 });
229 Arc::new(
230 ParsedSchema::parse(&schema_str)
231 .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)),
232 )
233 };
234
235 let parse_time = start_time.elapsed();
236 println!(" Schema parsing & compilation: {:?}", parse_time);
237
238 // Concurrent mode with ParsedSchema
239 if let Some(thread_count) = concurrent_count {
240 use std::thread;
241
242 let eval_start = Instant::now();
243 let mut handles = vec![];
244
245 for thread_id in 0..thread_count {
246 let parsed_clone = parsed_schema.clone();
247 let data_str_clone = data_str.clone();
248 let iter_count = iterations;
249 let thread_use_cache = use_cache;
250
251 let handle = thread::spawn(move || {
252 let mut thread_times = Vec::with_capacity(iter_count);
253 let mut last_schema = Value::Null;
254
255 let mut eval_instance = JSONEval::with_parsed_schema(
256 parsed_clone.clone(),
257 Some("{}"),
258 Some(&data_str_clone),
259 )
260 .unwrap();
261
262 for iter in 0..iter_count {
263 let iter_start = Instant::now();
264
265 if !thread_use_cache && iter > 0 {
266 eval_instance = JSONEval::with_parsed_schema(
267 parsed_clone.clone(),
268 Some("{}"),
269 Some(&data_str_clone),
270 )
271 .unwrap();
272 }
273
274 eval_instance
275 .evaluate(&data_str_clone, Some("{}"), None, None)
276 .unwrap();
277 last_schema = eval_instance.get_evaluated_schema(false);
278 thread_times.push(iter_start.elapsed());
279 }
280
281 (thread_times, last_schema, thread_id)
282 });
283 handles.push(handle);
284 }
285
286 let mut all_iteration_times = Vec::new();
287 let mut evaluated_schema = Value::Null;
288
289 for handle in handles {
290 let (thread_times, thread_schema, thread_id) = handle.join().unwrap();
291 println!(
292 " Thread {} completed {} iterations",
293 thread_id,
294 thread_times.len()
295 );
296 all_iteration_times.extend(thread_times);
297 evaluated_schema = thread_schema; // Use last thread's result
298 }
299
300 let eval_time = eval_start.elapsed();
301
302 // Create a temp eval for metadata export
303 let temp_eval = JSONEval::with_parsed_schema(
304 parsed_schema.clone(),
305 Some("{}"),
306 Some(&data_str),
307 )
308 .unwrap();
309
310 (
311 parse_time,
312 eval_time,
313 evaluated_schema,
314 temp_eval,
315 all_iteration_times,
316 )
317 } else {
318 // Sequential iterations with ParsedSchema
319 let eval_start = Instant::now();
320 let mut evaluated_schema = Value::Null;
321 let mut iteration_times = Vec::with_capacity(iterations);
322 let mut eval_instance = JSONEval::with_parsed_schema(
323 parsed_schema.clone(),
324 Some("{}"),
325 Some(&data_str),
326 )
327 .unwrap();
328
329 for iter in 0..iterations {
330 let iter_start = Instant::now();
331
332 if !use_cache && iter > 0 {
333 eval_instance = JSONEval::with_parsed_schema(
334 parsed_schema.clone(),
335 Some("{}"),
336 Some(&data_str),
337 )
338 .unwrap();
339 }
340
341 eval_instance
342 .evaluate(&data_str, Some("{}"), None, None)
343 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
344 evaluated_schema = eval_instance.get_evaluated_schema(false);
345 iteration_times.push(iter_start.elapsed());
346
347 if iterations > 1 && (iter + 1) % 10 == 0 {
348 print!(".");
349 if (iter + 1) % 50 == 0 {
350 println!(" {}/{}", iter + 1, iterations);
351 }
352 }
353 }
354
355 if iterations > 1 && iterations % 50 != 0 {
356 println!(" {}/{}", iterations, iterations);
357 }
358
359 let eval_time = eval_start.elapsed();
360 (
361 parse_time,
362 eval_time,
363 evaluated_schema,
364 eval_instance,
365 iteration_times,
366 )
367 }
368 } else {
369 // Traditional mode: parse and create JSONEval each time
370 let schema_msgpack = if scenario.is_msgpack {
371 let bytes = fs::read(&scenario.schema_path).unwrap_or_else(|e| {
372 panic!("failed to read {}: {}", scenario.schema_path.display(), e)
373 });
374 println!(" š¦ MessagePack schema size: {} bytes", bytes.len());
375 Some(bytes)
376 } else {
377 None
378 };
379
380 let schema_str = if !scenario.is_msgpack {
381 Some(
382 fs::read_to_string(&scenario.schema_path).unwrap_or_else(|e| {
383 panic!("failed to read {}: {}", scenario.schema_path.display(), e)
384 }),
385 )
386 } else {
387 None
388 };
389
390 let start_time = Instant::now();
391 let mut eval = if scenario.is_msgpack {
392 JSONEval::new_from_msgpack(schema_msgpack.as_ref().unwrap(), None, Some(&data_str))
393 .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e))
394 } else {
395 JSONEval::new(schema_str.as_ref().unwrap(), None, Some(&data_str))
396 .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e))
397 };
398 let parse_time = start_time.elapsed();
399 println!(" Schema parsing & compilation: {:?}", parse_time);
400
401 let eval_start = Instant::now();
402 let mut evaluated_schema = Value::Null;
403 let mut iteration_times = Vec::with_capacity(iterations);
404
405 for iter in 0..iterations {
406 let iter_start = Instant::now();
407
408 if !use_cache && iter > 0 {
409 eval = if scenario.is_msgpack {
410 JSONEval::new_from_msgpack(
411 schema_msgpack.as_ref().unwrap(),
412 None,
413 Some(&data_str),
414 )
415 .unwrap_or_else(|e| {
416 panic!("failed to create JSONEval from MessagePack: {}", e)
417 })
418 } else {
419 JSONEval::new(schema_str.as_ref().unwrap(), None, Some(&data_str))
420 .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e))
421 };
422 }
423
424 eval.evaluate(&data_str, Some("{}"), None, None)
425 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
426 evaluated_schema = eval.get_evaluated_schema(false);
427 iteration_times.push(iter_start.elapsed());
428
429 if iterations > 1 && (iter + 1) % 10 == 0 {
430 print!(".");
431 if (iter + 1) % 50 == 0 {
432 println!(" {}/{}", iter + 1, iterations);
433 }
434 }
435 }
436
437 if iterations > 1 && iterations % 50 != 0 {
438 println!(" {}/{}", iterations, iterations);
439 }
440
441 let eval_time = eval_start.elapsed();
442 (
443 parse_time,
444 eval_time,
445 evaluated_schema,
446 eval,
447 iteration_times,
448 )
449 };
450
451 // Calculate statistics
452 let total_iterations = iteration_times.len();
453 if total_iterations == 1 {
454 println!(" Evaluation: {:?}", eval_time);
455 } else {
456 let avg_time = eval_time / total_iterations as u32;
457 let min_time = iteration_times.iter().min().unwrap();
458 let max_time = iteration_times.iter().max().unwrap();
459
460 println!(" Total evaluation time: {:?}", eval_time);
461 println!(" Total iterations: {}", total_iterations);
462 println!(" Average per iteration: {:?}", avg_time);
463 println!(" Min: {:?} | Max: {:?}", min_time, max_time);
464 }
465
466 let total_time = parse_time + eval_time;
467 println!("ā±ļø Execution time: {:?}\n", total_time);
468
469 // Print detailed timing breakdown if --timing flag is set
470 if show_timing {
471 json_eval_rs::print_timing_summary();
472 }
473
474 // Track statistics
475 total_parse_time += parse_time;
476 total_eval_time += eval_time;
477 successful_scenarios += 1;
478
479 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
480 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
481
482 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
483 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
484
485 let mut metadata_obj = Map::new();
486 metadata_obj.insert(
487 "dependencies".to_string(),
488 serde_json::to_value(&*eval.dependencies).unwrap(),
489 );
490 metadata_obj.insert(
491 "sorted_evaluations".to_string(),
492 serde_json::to_value(&*eval.sorted_evaluations).unwrap(),
493 );
494
495 fs::write(
496 &parsed_path,
497 common::pretty_json(&Value::Object(metadata_obj)),
498 )
499 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
500
501 println!("ā
Results saved:");
502 println!(" - {}", evaluated_path.display());
503 println!(" - {}\n", parsed_path.display());
504
505 // Optional comparison
506 if enable_comparison {
507 if let Some(comp_path) = &scenario.comparison_path {
508 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
509 comparison_failures += 1;
510 }
511 println!();
512 }
513 }
514 }
515
516 // Print summary statistics
517 if successful_scenarios > 0 {
518 println!("\n{}", "=".repeat(50));
519 println!("š Summary Statistics");
520 println!("{}", "=".repeat(50));
521 println!("Total scenarios run: {}", successful_scenarios);
522 println!("Total parsing time: {:?}", total_parse_time);
523 println!("Total evaluation time: {:?}", total_eval_time);
524 println!("Total time: {:?}", total_parse_time + total_eval_time);
525
526 if successful_scenarios > 1 {
527 println!("\nAverage per scenario:");
528 println!(
529 " Parsing: {:?}",
530 total_parse_time / successful_scenarios as u32
531 );
532 println!(
533 " Evaluation: {:?}",
534 total_eval_time / successful_scenarios as u32
535 );
536 }
537
538 if enable_comparison {
539 println!("\nComparison failures: {}", comparison_failures);
540 }
541
542 println!("\nā
All scenarios completed successfully!\n");
543 }
544}