pub struct ParsedSchema {Show 15 fields
pub schema: Arc<Value>,
pub engine: Arc<RLogic>,
pub evaluations: Arc<IndexMap<String, LogicId>>,
pub tables: Arc<IndexMap<String, Value>>,
pub table_metadata: Arc<IndexMap<String, TableMetadata>>,
pub dependencies: Arc<IndexMap<String, IndexSet<String>>>,
pub sorted_evaluations: Arc<Vec<Vec<String>>>,
pub dependents_evaluations: Arc<IndexMap<String, Vec<DependentItem>>>,
pub rules_evaluations: Arc<Vec<String>>,
pub fields_with_rules: Arc<Vec<String>>,
pub others_evaluations: Arc<Vec<String>>,
pub value_evaluations: Arc<Vec<String>>,
pub layout_paths: Arc<Vec<String>>,
pub options_templates: Arc<Vec<(String, String, String)>>,
pub subforms: IndexMap<String, Arc<ParsedSchema>>,
}Expand description
Parsed schema containing all pre-compiled evaluation metadata. This structure is separate from JSONEval to enable caching and reuse.
§Caching Strategy
Wrap ParsedSchema in Arc for sharing across threads and caching:
use std::sync::Arc;
// Parse once and wrap in Arc for caching
let parsed = Arc::new(ParsedSchema::parse(schema_str)?);
cache.insert(schema_key, parsed.clone());
// Reuse across multiple evaluations (Arc::clone is cheap)
let eval1 = JSONEval::with_parsed_schema(parsed.clone(), Some(context1), Some(data1))?;
let eval2 = JSONEval::with_parsed_schema(parsed.clone(), Some(context2), Some(data2))?;Fields§
§schema: Arc<Value>The original schema Value (wrapped in Arc for efficient sharing)
engine: Arc<RLogic>RLogic engine with all compiled logic expressions (wrapped in Arc for sharing) Multiple JSONEval instances created from the same ParsedSchema will share this engine
evaluations: Arc<IndexMap<String, LogicId>>Map of evaluation keys to compiled logic IDs (wrapped in Arc for zero-copy sharing)
tables: Arc<IndexMap<String, Value>>Table definitions (rows, datas, skip, clear) (wrapped in Arc for zero-copy sharing)
table_metadata: Arc<IndexMap<String, TableMetadata>>Pre-compiled table metadata (computed at parse time for zero-copy evaluation)
dependencies: Arc<IndexMap<String, IndexSet<String>>>Dependencies map (evaluation key -> set of dependency paths) (wrapped in Arc for zero-copy sharing)
sorted_evaluations: Arc<Vec<Vec<String>>>Evaluations grouped into parallel-executable batches (wrapped in Arc for zero-copy sharing) Each inner Vec contains evaluations that can run concurrently
dependents_evaluations: Arc<IndexMap<String, Vec<DependentItem>>>Evaluations categorized for result handling (wrapped in Arc for zero-copy sharing) Dependents: map from source field to list of dependent items
rules_evaluations: Arc<Vec<String>>Rules: evaluations with “/rules/” in path (wrapped in Arc for zero-copy sharing)
fields_with_rules: Arc<Vec<String>>Fields with rules: dotted paths of all fields that have rules (wrapped in Arc for zero-copy sharing)
others_evaluations: Arc<Vec<String>>Others: all other evaluations not in sorted_evaluations (wrapped in Arc for zero-copy sharing)
value_evaluations: Arc<Vec<String>>Value: evaluations ending with “.value” in path (wrapped in Arc for zero-copy sharing)
layout_paths: Arc<Vec<String>>Cached layout paths (collected at parse time) (wrapped in Arc for zero-copy sharing)
options_templates: Arc<Vec<(String, String, String)>>Options URL templates (url_path, template_str, params_path) (wrapped in Arc for zero-copy sharing)
subforms: IndexMap<String, Arc<ParsedSchema>>Subforms: cached ParsedSchema instances for array fields with items
Key is the schema path (e.g., “#/riders”), value is Arc
Implementations§
Source§impl ParsedSchema
impl ParsedSchema
Sourcepub fn parse(schema: &str) -> Result<Self, String>
pub fn parse(schema: &str) -> Result<Self, String>
Parse a schema string into a ParsedSchema structure
§Arguments
schema- JSON schema string
§Returns
A Result containing the ParsedSchema or an error
Examples found in repository?
38fn demo_local_cache() -> Result<(), Box<dyn std::error::Error>> {
39 println!("📦 Example 1: Local Cache Instance");
40 println!("Creating a dedicated cache for this application...\n");
41
42 let cache = ParsedSchemaCache::new();
43
44 // Simple schema
45 let schema_json = r#"{
46 "$params": {
47 "rate": { "type": "number" }
48 },
49 "result": {
50 "type": "number",
51 "title": "Calculated Result",
52 "$evaluation": {
53 "logic": { "*": [{"var": "$rate"}, 100] }
54 }
55 }
56 }"#;
57
58 // Parse and cache with a custom key
59 println!("📝 Parsing schema and caching with key 'calculation-v1'...");
60 let parsed = ParsedSchema::parse(schema_json)?;
61 cache.insert("calculation-v1".to_string(), Arc::new(parsed));
62
63 println!("✅ Schema cached successfully");
64 println!(" Cache size: {} entries", cache.len());
65 println!(" Keys: {:?}\n", cache.keys());
66
67 // Retrieve and use cached schema
68 println!("🔍 Retrieving cached schema...");
69 if let Some(cached_schema) = cache.get("calculation-v1") {
70 println!("✅ Retrieved from cache");
71
72 // Create JSONEval from cached ParsedSchema
73 let mut eval = JSONEval::with_parsed_schema(cached_schema, Some(r#"{"rate": 1.5}"#), None)?;
74 eval.evaluate("{}", None)?;
75
76 let evaluated = eval.get_evaluated_schema(false);
77 let result = evaluated.pointer("/result")
78 .and_then(|v| v.as_f64())
79 .unwrap_or(0.0);
80 println!(" Evaluation result: {}\n", result);
81 }
82
83 // Check cache stats
84 let stats = cache.stats();
85 println!("📊 Cache Statistics: {}", stats);
86
87 // Remove entry
88 println!("\n🗑️ Removing 'calculation-v1' from cache...");
89 cache.remove("calculation-v1");
90 println!(" Cache size after removal: {}", cache.len());
91
92 Ok(())
93}
94
95fn demo_global_cache() -> Result<(), Box<dyn std::error::Error>> {
96 println!("🌍 Example 2: Global Cache Instance");
97 println!("Using the built-in PARSED_SCHEMA_CACHE...\n");
98
99 let schema_json = r#"{
100 "$params": {
101 "x": { "type": "number" },
102 "y": { "type": "number" }
103 },
104 "sum": {
105 "type": "number",
106 "$evaluation": { "+": [{"var": "$x"}, {"var": "$y"}] }
107 }
108 }"#;
109
110 // Use global cache
111 println!("📝 Caching schema globally with key 'math-operations'...");
112 let parsed = ParsedSchema::parse(schema_json)?;
113 PARSED_SCHEMA_CACHE.insert("math-operations".to_string(), Arc::new(parsed));
114
115 println!("✅ Schema cached globally");
116 println!(" Global cache size: {}\n", PARSED_SCHEMA_CACHE.len());
117
118 // Access from anywhere in the application
119 simulate_another_function()?;
120
121 // Clean up
122 println!("\n🧹 Clearing global cache...");
123 PARSED_SCHEMA_CACHE.clear();
124 println!(" Global cache size: {}", PARSED_SCHEMA_CACHE.len());
125
126 Ok(())
127}
128
129fn simulate_another_function() -> Result<(), Box<dyn std::error::Error>> {
130 println!("🔄 In another function, accessing global cache...");
131
132 if let Some(cached) = PARSED_SCHEMA_CACHE.get("math-operations") {
133 println!("✅ Retrieved schema from global cache");
134
135 let mut eval = JSONEval::with_parsed_schema(cached, Some(r#"{"x": 10, "y": 20}"#), None)?;
136 eval.evaluate("{}", None)?;
137
138 let evaluated = eval.get_evaluated_schema(false);
139 let sum = evaluated.pointer("/sum")
140 .and_then(|v| v.as_f64())
141 .unwrap_or(0.0);
142 println!(" Result: {}", sum);
143 }
144
145 Ok(())
146}
147
148fn demo_performance_comparison() -> Result<(), Box<dyn std::error::Error>> {
149 println!("⚡ Example 3: Performance Comparison");
150 println!("Comparing cached vs non-cached schema usage...\n");
151
152 let schema_json = r#"{
153 "$params": {
154 "value": { "type": "number" }
155 },
156 "doubled": {
157 "type": "number",
158 "$evaluation": { "*": [{"var": "$value"}, 2] }
159 },
160 "tripled": {
161 "type": "number",
162 "$evaluation": { "*": [{"var": "$value"}, 3] }
163 }
164 }"#;
165
166 let iterations = 100;
167
168 // WITHOUT CACHE: Parse schema every time
169 println!("🐌 Without cache (parse + evaluate each time):");
170 let start = Instant::now();
171 for i in 0..iterations {
172 let context = format!(r#"{{"value": {}}}"#, i);
173 let mut eval = JSONEval::new(schema_json, Some(&context), None)?;
174 eval.evaluate("{}", None)?;
175 }
176 let without_cache = start.elapsed();
177 println!(" Time: {:?}", without_cache);
178 println!(" Avg per iteration: {:?}\n", without_cache / iterations);
179
180 // WITH CACHE: Parse once, evaluate many times
181 println!("🚀 With cache (parse once, reuse for all evaluations):");
182 let cache = ParsedSchemaCache::new();
183
184 // Parse once
185 let parse_start = Instant::now();
186 let parsed = ParsedSchema::parse(schema_json)?;
187 cache.insert("perf-test".to_string(), Arc::new(parsed));
188 let parse_time = parse_start.elapsed();
189
190 // Evaluate many times
191 let eval_start = Instant::now();
192 for i in 0..iterations {
193 if let Some(cached) = cache.get("perf-test") {
194 let context = format!(r#"{{"value": {}}}"#, i);
195 let mut eval = JSONEval::with_parsed_schema(cached.clone(), Some(&context), None)?;
196 eval.evaluate("{}", None)?;
197 }
198 }
199 let eval_time = eval_start.elapsed();
200 let with_cache = parse_time + eval_time;
201
202 println!(" Parse time: {:?}", parse_time);
203 println!(" Eval time: {:?}", eval_time);
204 println!(" Total time: {:?}", with_cache);
205 println!(" Avg per iteration: {:?}\n", eval_time / iterations);
206
207 let speedup = without_cache.as_secs_f64() / with_cache.as_secs_f64();
208 println!("📈 Speedup: {:.2}x faster", speedup);
209
210 Ok(())
211}
212
213fn demo_lazy_insertion() -> Result<(), Box<dyn std::error::Error>> {
214 println!("🔄 Example 4: Lazy Insertion with get_or_insert_with");
215 println!("Parse only if not already cached...\n");
216
217 let cache = ParsedSchemaCache::new();
218
219 let schema_json = r#"{
220 "$params": {
221 "name": { "type": "string" }
222 },
223 "greeting": {
224 "type": "string",
225 "$evaluation": {
226 "cat": ["Hello, ", {"var": "$name"}, "!"]
227 }
228 }
229 }"#;
230
231 // First access: will parse
232 println!("📝 First access (will parse)...");
233 let start = Instant::now();
234 let schema1 = cache.get_or_insert_with("greeting-schema", || {
235 println!(" ⚙️ Parsing schema...");
236 Arc::new(ParsedSchema::parse(schema_json).unwrap())
237 });
238 println!(" Time: {:?}\n", start.elapsed());
239
240 // Second access: will use cached
241 println!("🔍 Second access (will use cache)...");
242 let start = Instant::now();
243 let schema2 = cache.get_or_insert_with("greeting-schema", || {
244 println!(" ⚙️ Parsing schema...");
245 Arc::new(ParsedSchema::parse(schema_json).unwrap())
246 });
247 println!(" Time: {:?}", start.elapsed());
248
249 // Verify they're the same Arc (pointer equality)
250 println!("\n✅ Both accesses returned the same cached instance");
251 println!(" Same pointer: {}", Arc::ptr_eq(&schema1, &schema2));
252
253 Ok(())
254}More examples
30fn main() {
31 let args: Vec<String> = std::env::args().collect();
32 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic_parsed");
33
34 let mut scenario_filter: Option<String> = None;
35 let mut enable_comparison = false;
36 let mut show_timing = false;
37 let mut i = 1;
38
39 // Parse arguments
40 while i < args.len() {
41 let arg = &args[i];
42
43 if arg == "-h" || arg == "--help" {
44 print_help(program_name);
45 return;
46 } else if arg == "--compare" {
47 enable_comparison = true;
48 } else if arg == "--timing" {
49 show_timing = true;
50 } else if !arg.starts_with('-') {
51 scenario_filter = Some(arg.clone());
52 } else {
53 eprintln!("Error: unknown option '{}'", arg);
54 print_help(program_name);
55 return;
56 }
57
58 i += 1;
59 }
60
61 println!("\n🚀 JSON Evaluation - Basic Example (ParsedSchema)\n");
62 println!("📦 Using Arc<ParsedSchema> for efficient caching\n");
63
64 if enable_comparison {
65 println!("🔍 Comparison: enabled");
66 }
67 if show_timing {
68 println!("⏱️ Internal timing: enabled");
69 }
70 if enable_comparison || show_timing {
71 println!();
72 }
73
74 let samples_dir = Path::new("samples");
75 let mut scenarios = common::discover_scenarios(samples_dir);
76
77 // Filter scenarios if a filter is provided
78 if let Some(ref filter) = scenario_filter {
79 scenarios.retain(|s| s.name.contains(filter));
80 println!("📋 Filtering scenarios matching: '{}'\n", filter);
81 }
82
83 if scenarios.is_empty() {
84 if let Some(filter) = scenario_filter {
85 println!(
86 "ℹ️ No scenarios found matching '{}' in `{}`.",
87 filter,
88 samples_dir.display()
89 );
90 } else {
91 println!(
92 "ℹ️ No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
93 samples_dir.display()
94 );
95 }
96 return;
97 }
98
99 println!("📊 Found {} scenario(s)\n", scenarios.len());
100
101 let mut total_parse_time = std::time::Duration::ZERO;
102 let mut total_eval_time = std::time::Duration::ZERO;
103 let mut successful_scenarios = 0;
104 let mut comparison_failures = 0;
105
106 for scenario in &scenarios {
107 println!("==============================");
108 println!("Scenario: {}", scenario.name);
109 println!("Schema: {} ({})",
110 scenario.schema_path.display(),
111 if scenario.is_msgpack { "MessagePack" } else { "JSON" }
112 );
113 println!("Data: {}\n", scenario.data_path.display());
114
115 // Clear timing data from previous scenarios
116 if show_timing {
117 json_eval_rs::enable_timing();
118 json_eval_rs::clear_timing_data();
119 }
120
121 let data_str = fs::read_to_string(&scenario.data_path)
122 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
123
124 // Step 1: Parse schema once
125 let parse_start = Instant::now();
126 let parsed_schema = if scenario.is_msgpack {
127 let schema_msgpack = fs::read(&scenario.schema_path)
128 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
129 println!(" 📦 MessagePack schema size: {} bytes", schema_msgpack.len());
130 Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
131 .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
132 } else {
133 let schema_str = fs::read_to_string(&scenario.schema_path)
134 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
135 Arc::new(ParsedSchema::parse(&schema_str)
136 .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
137 };
138 let parse_time = parse_start.elapsed();
139 println!(" 📝 Schema parsing: {:?}", parse_time);
140
141 // Step 2: Create JSONEval from ParsedSchema (reuses compiled logic)
142 let eval_start = Instant::now();
143 let mut eval = JSONEval::with_parsed_schema(
144 parsed_schema.clone(), // Arc::clone is cheap!
145 Some("{}"),
146 Some(&data_str)
147 ).unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e));
148
149 eval.evaluate(&data_str, Some("{}"))
150 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
151
152 let evaluated_schema = eval.get_evaluated_schema(false);
153 let eval_time = eval_start.elapsed();
154
155 println!(" ⚡ Eval: {:?}", eval_time);
156 println!(" ⏱️ Total: {:?}\n", parse_time + eval_time);
157
158 // Print detailed timing breakdown if --timing flag is set
159 if show_timing {
160 json_eval_rs::print_timing_summary();
161 }
162
163 total_parse_time += parse_time;
164 total_eval_time += eval_time;
165 successful_scenarios += 1;
166
167 // Save results
168 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
169 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
170
171 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
172 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
173
174 let mut metadata_obj = Map::new();
175 metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
176 metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
177 metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
178
179 fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
180 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
181
182 println!("✅ Results saved:");
183 println!(" - {}", evaluated_path.display());
184 println!(" - {}\n", parsed_path.display());
185
186 // Optional comparison
187 if enable_comparison {
188 if let Some(comp_path) = &scenario.comparison_path {
189 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
190 comparison_failures += 1;
191 }
192 println!();
193 }
194 }
195 }
196
197 // Print summary
198 println!("{}", "=".repeat(50));
199 println!("📊 Summary");
200 println!("{}", "=".repeat(50));
201 println!("Total scenarios run: {}", successful_scenarios);
202 println!("Total parsing time: {:?}", total_parse_time);
203 println!("Total evaluation time: {:?}", total_eval_time);
204 println!("Total time: {:?}", total_parse_time + total_eval_time);
205
206 if successful_scenarios > 1 {
207 println!("\nAverage per scenario:");
208 println!(" Parsing: {:?}", total_parse_time / successful_scenarios as u32);
209 println!(" Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
210 }
211
212 if enable_comparison {
213 println!("\nComparison failures: {}", comparison_failures);
214 }
215
216 println!("\n✅ All scenarios completed!\n");
217}31fn main() {
32 let args: Vec<String> = std::env::args().collect();
33 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("benchmark");
34
35 let mut iterations = 1usize;
36 let mut scenario_filter: Option<String> = None;
37 let mut show_cpu_info = false;
38 let mut use_parsed_schema = false;
39 let mut concurrent_count: Option<usize> = None;
40 let mut enable_comparison = false;
41 let mut show_timing = false;
42 let mut i = 1;
43
44 // Parse arguments
45 while i < args.len() {
46 let arg = &args[i];
47
48 if arg == "-h" || arg == "--help" {
49 print_help(program_name);
50 return;
51 } else if arg == "--cpu-info" {
52 show_cpu_info = true;
53 } else if arg == "--parsed" {
54 use_parsed_schema = true;
55 } else if arg == "--compare" {
56 enable_comparison = true;
57 } else if arg == "--timing" {
58 show_timing = true;
59 } else if arg == "--concurrent" {
60 if i + 1 >= args.len() {
61 eprintln!("Error: {} requires a value", arg);
62 print_help(program_name);
63 return;
64 }
65 i += 1;
66 match args[i].parse::<usize>() {
67 Ok(n) if n > 0 => concurrent_count = Some(n),
68 _ => {
69 eprintln!("Error: concurrent count must be a positive integer, got '{}'", args[i]);
70 return;
71 }
72 }
73 } else if arg == "-i" || arg == "--iterations" {
74 if i + 1 >= args.len() {
75 eprintln!("Error: {} requires a value", arg);
76 print_help(program_name);
77 return;
78 }
79 i += 1;
80 match args[i].parse::<usize>() {
81 Ok(n) if n > 0 => iterations = n,
82 _ => {
83 eprintln!("Error: iterations must be a positive integer, got '{}'", args[i]);
84 return;
85 }
86 }
87 } else if !arg.starts_with('-') {
88 scenario_filter = Some(arg.clone());
89 } else {
90 eprintln!("Error: unknown option '{}'", arg);
91 print_help(program_name);
92 return;
93 }
94
95 i += 1;
96 }
97
98 println!("\n🚀 JSON Evaluation - Benchmark\n");
99
100 // Show CPU info if requested or if running benchmarks
101 if show_cpu_info || iterations > 1 || concurrent_count.is_some() {
102 common::print_cpu_info();
103 }
104
105 if use_parsed_schema {
106 println!("📦 Mode: ParsedSchema (parse once, reuse for all iterations)\n");
107 }
108
109 if let Some(count) = concurrent_count {
110 println!("🔀 Concurrent evaluations: {} threads\n", count);
111 } else if iterations > 1 {
112 println!("🔄 Iterations per scenario: {}\n", iterations);
113 }
114
115 if enable_comparison {
116 println!("🔍 Comparison: enabled");
117 }
118 if show_timing {
119 println!("⏱️ Internal timing: enabled");
120 }
121 if enable_comparison || show_timing {
122 println!();
123 }
124
125 let samples_dir = Path::new("samples");
126 let mut scenarios = common::discover_scenarios(samples_dir);
127
128 // Filter scenarios if a filter is provided
129 if let Some(ref filter) = scenario_filter {
130 scenarios.retain(|s| s.name.contains(filter));
131 println!("📋 Filtering scenarios matching: '{}'\n", filter);
132 }
133
134 if scenarios.is_empty() {
135 if let Some(filter) = scenario_filter {
136 println!(
137 "ℹ️ No scenarios found matching '{}' in `{}`.",
138 filter,
139 samples_dir.display()
140 );
141 } else {
142 println!(
143 "ℹ️ No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
144 samples_dir.display()
145 );
146 }
147 return;
148 }
149
150 println!("📊 Found {} scenario(s)\n", scenarios.len());
151
152 let mut total_parse_time = std::time::Duration::ZERO;
153 let mut total_eval_time = std::time::Duration::ZERO;
154 let mut successful_scenarios = 0;
155 let mut comparison_failures = 0;
156
157 for scenario in &scenarios {
158 println!("==============================");
159 println!("Scenario: {}", scenario.name);
160 println!("Schema: {} ({})",
161 scenario.schema_path.display(),
162 if scenario.is_msgpack { "MessagePack" } else { "JSON" }
163 );
164 println!("Data: {}\n", scenario.data_path.display());
165
166 // Clear timing data from previous scenarios
167 if show_timing {
168 json_eval_rs::enable_timing();
169 json_eval_rs::clear_timing_data();
170 }
171
172 let data_str = fs::read_to_string(&scenario.data_path)
173 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
174
175 println!("Running evaluation...\n");
176
177 let (parse_time, eval_time, evaluated_schema, eval, iteration_times) = if use_parsed_schema {
178 // ParsedSchema mode: parse once, reuse for all iterations/threads
179 let start_time = Instant::now();
180
181 let parsed_schema = if scenario.is_msgpack {
182 let schema_msgpack = fs::read(&scenario.schema_path)
183 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
184 println!(" 📦 MessagePack schema size: {} bytes", schema_msgpack.len());
185 Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
186 .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
187 } else {
188 let schema_str = fs::read_to_string(&scenario.schema_path)
189 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
190 Arc::new(ParsedSchema::parse(&schema_str)
191 .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
192 };
193
194 let parse_time = start_time.elapsed();
195 println!(" Schema parsing & compilation: {:?}", parse_time);
196
197 // Concurrent mode with ParsedSchema
198 if let Some(thread_count) = concurrent_count {
199 use std::thread;
200
201 let eval_start = Instant::now();
202 let mut handles = vec![];
203
204 for thread_id in 0..thread_count {
205 let parsed_clone = parsed_schema.clone();
206 let data_str_clone = data_str.clone();
207 let iter_count = iterations;
208
209 let handle = thread::spawn(move || {
210 let mut thread_times = Vec::with_capacity(iter_count);
211 let mut last_schema = Value::Null;
212
213 for _ in 0..iter_count {
214 let iter_start = Instant::now();
215 let mut eval_instance = JSONEval::with_parsed_schema(
216 parsed_clone.clone(),
217 Some("{}"),
218 Some(&data_str_clone)
219 ).unwrap();
220
221 eval_instance.evaluate(&data_str_clone, Some("{}")).unwrap();
222 last_schema = eval_instance.get_evaluated_schema(false);
223 thread_times.push(iter_start.elapsed());
224 }
225
226 (thread_times, last_schema, thread_id)
227 });
228 handles.push(handle);
229 }
230
231 let mut all_iteration_times = Vec::new();
232 let mut evaluated_schema = Value::Null;
233
234 for handle in handles {
235 let (thread_times, thread_schema, thread_id) = handle.join().unwrap();
236 println!(" Thread {} completed {} iterations", thread_id, thread_times.len());
237 all_iteration_times.extend(thread_times);
238 evaluated_schema = thread_schema; // Use last thread's result
239 }
240
241 let eval_time = eval_start.elapsed();
242
243 // Create a temp eval for metadata export
244 let temp_eval = JSONEval::with_parsed_schema(
245 parsed_schema.clone(),
246 Some("{}"),
247 Some(&data_str)
248 ).unwrap();
249
250 (parse_time, eval_time, evaluated_schema, temp_eval, all_iteration_times)
251 } else {
252 // Sequential iterations with ParsedSchema
253 let eval_start = Instant::now();
254 let mut evaluated_schema = Value::Null;
255 let mut iteration_times = Vec::with_capacity(iterations);
256 let mut eval_instance = JSONEval::with_parsed_schema(
257 parsed_schema.clone(),
258 Some("{}"),
259 Some(&data_str)
260 ).unwrap();
261
262 for iter in 0..iterations {
263 let iter_start = Instant::now();
264 eval_instance.evaluate(&data_str, Some("{}"))
265 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
266 evaluated_schema = eval_instance.get_evaluated_schema(false);
267 iteration_times.push(iter_start.elapsed());
268
269 if iterations > 1 && (iter + 1) % 10 == 0 {
270 print!(".");
271 if (iter + 1) % 50 == 0 {
272 println!(" {}/{}", iter + 1, iterations);
273 }
274 }
275 }
276
277 if iterations > 1 && iterations % 50 != 0 {
278 println!(" {}/{}", iterations, iterations);
279 }
280
281 let eval_time = eval_start.elapsed();
282 (parse_time, eval_time, evaluated_schema, eval_instance, iteration_times)
283 }
284 } else {
285 // Traditional mode: parse and create JSONEval each time
286 let start_time = Instant::now();
287 let mut eval = if scenario.is_msgpack {
288 let schema_msgpack = fs::read(&scenario.schema_path)
289 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
290 println!(" 📦 MessagePack schema size: {} bytes", schema_msgpack.len());
291 JSONEval::new_from_msgpack(&schema_msgpack, None, Some(&data_str))
292 .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e))
293 } else {
294 let schema_str = fs::read_to_string(&scenario.schema_path)
295 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
296 JSONEval::new(&schema_str, None, Some(&data_str))
297 .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e))
298 };
299 let parse_time = start_time.elapsed();
300 println!(" Schema parsing & compilation: {:?}", parse_time);
301
302 let eval_start = Instant::now();
303 let mut evaluated_schema = Value::Null;
304 let mut iteration_times = Vec::with_capacity(iterations);
305
306 for iter in 0..iterations {
307 let iter_start = Instant::now();
308 eval.evaluate(&data_str, Some("{}"))
309 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
310 evaluated_schema = eval.get_evaluated_schema(false);
311 iteration_times.push(iter_start.elapsed());
312
313 if iterations > 1 && (iter + 1) % 10 == 0 {
314 print!(".");
315 if (iter + 1) % 50 == 0 {
316 println!(" {}/{}", iter + 1, iterations);
317 }
318 }
319 }
320
321 if iterations > 1 && iterations % 50 != 0 {
322 println!(" {}/{}", iterations, iterations);
323 }
324
325 let eval_time = eval_start.elapsed();
326 (parse_time, eval_time, evaluated_schema, eval, iteration_times)
327 };
328
329 // Calculate statistics
330 let total_iterations = iteration_times.len();
331 if total_iterations == 1 {
332 println!(" Evaluation: {:?}", eval_time);
333 } else {
334 let avg_time = eval_time / total_iterations as u32;
335 let min_time = iteration_times.iter().min().unwrap();
336 let max_time = iteration_times.iter().max().unwrap();
337
338 println!(" Total evaluation time: {:?}", eval_time);
339 println!(" Total iterations: {}", total_iterations);
340 println!(" Average per iteration: {:?}", avg_time);
341 println!(" Min: {:?} | Max: {:?}", min_time, max_time);
342
343 // Show cache statistics
344 let cache_stats = eval.cache_stats();
345 println!(" Cache: {} entries, {} hits, {} misses ({:.1}% hit rate)",
346 cache_stats.entries,
347 cache_stats.hits,
348 cache_stats.misses,
349 cache_stats.hit_rate * 100.0
350 );
351 }
352
353 let total_time = parse_time + eval_time;
354 println!("⏱️ Execution time: {:?}\n", total_time);
355
356 // Print detailed timing breakdown if --timing flag is set
357 if show_timing {
358 json_eval_rs::print_timing_summary();
359 }
360
361 // Track statistics
362 total_parse_time += parse_time;
363 total_eval_time += eval_time;
364 successful_scenarios += 1;
365
366 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
367 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
368
369 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
370 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
371
372 let mut metadata_obj = Map::new();
373 metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
374 metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
375
376 fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
377 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
378
379 println!("✅ Results saved:");
380 println!(" - {}", evaluated_path.display());
381 println!(" - {}\n", parsed_path.display());
382
383 // Optional comparison
384 if enable_comparison {
385 if let Some(comp_path) = &scenario.comparison_path {
386 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
387 comparison_failures += 1;
388 }
389 println!();
390 }
391 }
392 }
393
394 // Print summary statistics
395 if successful_scenarios > 0 {
396 println!("\n{}", "=".repeat(50));
397 println!("📊 Summary Statistics");
398 println!("{}", "=".repeat(50));
399 println!("Total scenarios run: {}", successful_scenarios);
400 println!("Total parsing time: {:?}", total_parse_time);
401 println!("Total evaluation time: {:?}", total_eval_time);
402 println!("Total time: {:?}", total_parse_time + total_eval_time);
403
404 if successful_scenarios > 1 {
405 println!("\nAverage per scenario:");
406 println!(" Parsing: {:?}", total_parse_time / successful_scenarios as u32);
407 println!(" Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
408 }
409
410 if enable_comparison {
411 println!("\nComparison failures: {}", comparison_failures);
412 }
413
414 println!("\n✅ All scenarios completed successfully!\n");
415 }
416}Sourcepub fn parse_value(schema_val: Value) -> Result<Self, String>
pub fn parse_value(schema_val: Value) -> Result<Self, String>
Sourcepub fn parse_msgpack(schema_msgpack: &[u8]) -> Result<Self, String>
pub fn parse_msgpack(schema_msgpack: &[u8]) -> Result<Self, String>
Parse a MessagePack-encoded schema into a ParsedSchema structure
§Arguments
schema_msgpack- MessagePack-encoded schema bytes
§Returns
A Result containing the ParsedSchema or an error
Examples found in repository?
30fn main() {
31 let args: Vec<String> = std::env::args().collect();
32 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic_parsed");
33
34 let mut scenario_filter: Option<String> = None;
35 let mut enable_comparison = false;
36 let mut show_timing = false;
37 let mut i = 1;
38
39 // Parse arguments
40 while i < args.len() {
41 let arg = &args[i];
42
43 if arg == "-h" || arg == "--help" {
44 print_help(program_name);
45 return;
46 } else if arg == "--compare" {
47 enable_comparison = true;
48 } else if arg == "--timing" {
49 show_timing = true;
50 } else if !arg.starts_with('-') {
51 scenario_filter = Some(arg.clone());
52 } else {
53 eprintln!("Error: unknown option '{}'", arg);
54 print_help(program_name);
55 return;
56 }
57
58 i += 1;
59 }
60
61 println!("\n🚀 JSON Evaluation - Basic Example (ParsedSchema)\n");
62 println!("📦 Using Arc<ParsedSchema> for efficient caching\n");
63
64 if enable_comparison {
65 println!("🔍 Comparison: enabled");
66 }
67 if show_timing {
68 println!("⏱️ Internal timing: enabled");
69 }
70 if enable_comparison || show_timing {
71 println!();
72 }
73
74 let samples_dir = Path::new("samples");
75 let mut scenarios = common::discover_scenarios(samples_dir);
76
77 // Filter scenarios if a filter is provided
78 if let Some(ref filter) = scenario_filter {
79 scenarios.retain(|s| s.name.contains(filter));
80 println!("📋 Filtering scenarios matching: '{}'\n", filter);
81 }
82
83 if scenarios.is_empty() {
84 if let Some(filter) = scenario_filter {
85 println!(
86 "ℹ️ No scenarios found matching '{}' in `{}`.",
87 filter,
88 samples_dir.display()
89 );
90 } else {
91 println!(
92 "ℹ️ No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
93 samples_dir.display()
94 );
95 }
96 return;
97 }
98
99 println!("📊 Found {} scenario(s)\n", scenarios.len());
100
101 let mut total_parse_time = std::time::Duration::ZERO;
102 let mut total_eval_time = std::time::Duration::ZERO;
103 let mut successful_scenarios = 0;
104 let mut comparison_failures = 0;
105
106 for scenario in &scenarios {
107 println!("==============================");
108 println!("Scenario: {}", scenario.name);
109 println!("Schema: {} ({})",
110 scenario.schema_path.display(),
111 if scenario.is_msgpack { "MessagePack" } else { "JSON" }
112 );
113 println!("Data: {}\n", scenario.data_path.display());
114
115 // Clear timing data from previous scenarios
116 if show_timing {
117 json_eval_rs::enable_timing();
118 json_eval_rs::clear_timing_data();
119 }
120
121 let data_str = fs::read_to_string(&scenario.data_path)
122 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
123
124 // Step 1: Parse schema once
125 let parse_start = Instant::now();
126 let parsed_schema = if scenario.is_msgpack {
127 let schema_msgpack = fs::read(&scenario.schema_path)
128 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
129 println!(" 📦 MessagePack schema size: {} bytes", schema_msgpack.len());
130 Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
131 .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
132 } else {
133 let schema_str = fs::read_to_string(&scenario.schema_path)
134 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
135 Arc::new(ParsedSchema::parse(&schema_str)
136 .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
137 };
138 let parse_time = parse_start.elapsed();
139 println!(" 📝 Schema parsing: {:?}", parse_time);
140
141 // Step 2: Create JSONEval from ParsedSchema (reuses compiled logic)
142 let eval_start = Instant::now();
143 let mut eval = JSONEval::with_parsed_schema(
144 parsed_schema.clone(), // Arc::clone is cheap!
145 Some("{}"),
146 Some(&data_str)
147 ).unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e));
148
149 eval.evaluate(&data_str, Some("{}"))
150 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
151
152 let evaluated_schema = eval.get_evaluated_schema(false);
153 let eval_time = eval_start.elapsed();
154
155 println!(" ⚡ Eval: {:?}", eval_time);
156 println!(" ⏱️ Total: {:?}\n", parse_time + eval_time);
157
158 // Print detailed timing breakdown if --timing flag is set
159 if show_timing {
160 json_eval_rs::print_timing_summary();
161 }
162
163 total_parse_time += parse_time;
164 total_eval_time += eval_time;
165 successful_scenarios += 1;
166
167 // Save results
168 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
169 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
170
171 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
172 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
173
174 let mut metadata_obj = Map::new();
175 metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
176 metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
177 metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
178
179 fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
180 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
181
182 println!("✅ Results saved:");
183 println!(" - {}", evaluated_path.display());
184 println!(" - {}\n", parsed_path.display());
185
186 // Optional comparison
187 if enable_comparison {
188 if let Some(comp_path) = &scenario.comparison_path {
189 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
190 comparison_failures += 1;
191 }
192 println!();
193 }
194 }
195 }
196
197 // Print summary
198 println!("{}", "=".repeat(50));
199 println!("📊 Summary");
200 println!("{}", "=".repeat(50));
201 println!("Total scenarios run: {}", successful_scenarios);
202 println!("Total parsing time: {:?}", total_parse_time);
203 println!("Total evaluation time: {:?}", total_eval_time);
204 println!("Total time: {:?}", total_parse_time + total_eval_time);
205
206 if successful_scenarios > 1 {
207 println!("\nAverage per scenario:");
208 println!(" Parsing: {:?}", total_parse_time / successful_scenarios as u32);
209 println!(" Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
210 }
211
212 if enable_comparison {
213 println!("\nComparison failures: {}", comparison_failures);
214 }
215
216 println!("\n✅ All scenarios completed!\n");
217}More examples
31fn main() {
32 let args: Vec<String> = std::env::args().collect();
33 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("benchmark");
34
35 let mut iterations = 1usize;
36 let mut scenario_filter: Option<String> = None;
37 let mut show_cpu_info = false;
38 let mut use_parsed_schema = false;
39 let mut concurrent_count: Option<usize> = None;
40 let mut enable_comparison = false;
41 let mut show_timing = false;
42 let mut i = 1;
43
44 // Parse arguments
45 while i < args.len() {
46 let arg = &args[i];
47
48 if arg == "-h" || arg == "--help" {
49 print_help(program_name);
50 return;
51 } else if arg == "--cpu-info" {
52 show_cpu_info = true;
53 } else if arg == "--parsed" {
54 use_parsed_schema = true;
55 } else if arg == "--compare" {
56 enable_comparison = true;
57 } else if arg == "--timing" {
58 show_timing = true;
59 } else if arg == "--concurrent" {
60 if i + 1 >= args.len() {
61 eprintln!("Error: {} requires a value", arg);
62 print_help(program_name);
63 return;
64 }
65 i += 1;
66 match args[i].parse::<usize>() {
67 Ok(n) if n > 0 => concurrent_count = Some(n),
68 _ => {
69 eprintln!("Error: concurrent count must be a positive integer, got '{}'", args[i]);
70 return;
71 }
72 }
73 } else if arg == "-i" || arg == "--iterations" {
74 if i + 1 >= args.len() {
75 eprintln!("Error: {} requires a value", arg);
76 print_help(program_name);
77 return;
78 }
79 i += 1;
80 match args[i].parse::<usize>() {
81 Ok(n) if n > 0 => iterations = n,
82 _ => {
83 eprintln!("Error: iterations must be a positive integer, got '{}'", args[i]);
84 return;
85 }
86 }
87 } else if !arg.starts_with('-') {
88 scenario_filter = Some(arg.clone());
89 } else {
90 eprintln!("Error: unknown option '{}'", arg);
91 print_help(program_name);
92 return;
93 }
94
95 i += 1;
96 }
97
98 println!("\n🚀 JSON Evaluation - Benchmark\n");
99
100 // Show CPU info if requested or if running benchmarks
101 if show_cpu_info || iterations > 1 || concurrent_count.is_some() {
102 common::print_cpu_info();
103 }
104
105 if use_parsed_schema {
106 println!("📦 Mode: ParsedSchema (parse once, reuse for all iterations)\n");
107 }
108
109 if let Some(count) = concurrent_count {
110 println!("🔀 Concurrent evaluations: {} threads\n", count);
111 } else if iterations > 1 {
112 println!("🔄 Iterations per scenario: {}\n", iterations);
113 }
114
115 if enable_comparison {
116 println!("🔍 Comparison: enabled");
117 }
118 if show_timing {
119 println!("⏱️ Internal timing: enabled");
120 }
121 if enable_comparison || show_timing {
122 println!();
123 }
124
125 let samples_dir = Path::new("samples");
126 let mut scenarios = common::discover_scenarios(samples_dir);
127
128 // Filter scenarios if a filter is provided
129 if let Some(ref filter) = scenario_filter {
130 scenarios.retain(|s| s.name.contains(filter));
131 println!("📋 Filtering scenarios matching: '{}'\n", filter);
132 }
133
134 if scenarios.is_empty() {
135 if let Some(filter) = scenario_filter {
136 println!(
137 "ℹ️ No scenarios found matching '{}' in `{}`.",
138 filter,
139 samples_dir.display()
140 );
141 } else {
142 println!(
143 "ℹ️ No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
144 samples_dir.display()
145 );
146 }
147 return;
148 }
149
150 println!("📊 Found {} scenario(s)\n", scenarios.len());
151
152 let mut total_parse_time = std::time::Duration::ZERO;
153 let mut total_eval_time = std::time::Duration::ZERO;
154 let mut successful_scenarios = 0;
155 let mut comparison_failures = 0;
156
157 for scenario in &scenarios {
158 println!("==============================");
159 println!("Scenario: {}", scenario.name);
160 println!("Schema: {} ({})",
161 scenario.schema_path.display(),
162 if scenario.is_msgpack { "MessagePack" } else { "JSON" }
163 );
164 println!("Data: {}\n", scenario.data_path.display());
165
166 // Clear timing data from previous scenarios
167 if show_timing {
168 json_eval_rs::enable_timing();
169 json_eval_rs::clear_timing_data();
170 }
171
172 let data_str = fs::read_to_string(&scenario.data_path)
173 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
174
175 println!("Running evaluation...\n");
176
177 let (parse_time, eval_time, evaluated_schema, eval, iteration_times) = if use_parsed_schema {
178 // ParsedSchema mode: parse once, reuse for all iterations/threads
179 let start_time = Instant::now();
180
181 let parsed_schema = if scenario.is_msgpack {
182 let schema_msgpack = fs::read(&scenario.schema_path)
183 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
184 println!(" 📦 MessagePack schema size: {} bytes", schema_msgpack.len());
185 Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
186 .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
187 } else {
188 let schema_str = fs::read_to_string(&scenario.schema_path)
189 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
190 Arc::new(ParsedSchema::parse(&schema_str)
191 .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
192 };
193
194 let parse_time = start_time.elapsed();
195 println!(" Schema parsing & compilation: {:?}", parse_time);
196
197 // Concurrent mode with ParsedSchema
198 if let Some(thread_count) = concurrent_count {
199 use std::thread;
200
201 let eval_start = Instant::now();
202 let mut handles = vec![];
203
204 for thread_id in 0..thread_count {
205 let parsed_clone = parsed_schema.clone();
206 let data_str_clone = data_str.clone();
207 let iter_count = iterations;
208
209 let handle = thread::spawn(move || {
210 let mut thread_times = Vec::with_capacity(iter_count);
211 let mut last_schema = Value::Null;
212
213 for _ in 0..iter_count {
214 let iter_start = Instant::now();
215 let mut eval_instance = JSONEval::with_parsed_schema(
216 parsed_clone.clone(),
217 Some("{}"),
218 Some(&data_str_clone)
219 ).unwrap();
220
221 eval_instance.evaluate(&data_str_clone, Some("{}")).unwrap();
222 last_schema = eval_instance.get_evaluated_schema(false);
223 thread_times.push(iter_start.elapsed());
224 }
225
226 (thread_times, last_schema, thread_id)
227 });
228 handles.push(handle);
229 }
230
231 let mut all_iteration_times = Vec::new();
232 let mut evaluated_schema = Value::Null;
233
234 for handle in handles {
235 let (thread_times, thread_schema, thread_id) = handle.join().unwrap();
236 println!(" Thread {} completed {} iterations", thread_id, thread_times.len());
237 all_iteration_times.extend(thread_times);
238 evaluated_schema = thread_schema; // Use last thread's result
239 }
240
241 let eval_time = eval_start.elapsed();
242
243 // Create a temp eval for metadata export
244 let temp_eval = JSONEval::with_parsed_schema(
245 parsed_schema.clone(),
246 Some("{}"),
247 Some(&data_str)
248 ).unwrap();
249
250 (parse_time, eval_time, evaluated_schema, temp_eval, all_iteration_times)
251 } else {
252 // Sequential iterations with ParsedSchema
253 let eval_start = Instant::now();
254 let mut evaluated_schema = Value::Null;
255 let mut iteration_times = Vec::with_capacity(iterations);
256 let mut eval_instance = JSONEval::with_parsed_schema(
257 parsed_schema.clone(),
258 Some("{}"),
259 Some(&data_str)
260 ).unwrap();
261
262 for iter in 0..iterations {
263 let iter_start = Instant::now();
264 eval_instance.evaluate(&data_str, Some("{}"))
265 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
266 evaluated_schema = eval_instance.get_evaluated_schema(false);
267 iteration_times.push(iter_start.elapsed());
268
269 if iterations > 1 && (iter + 1) % 10 == 0 {
270 print!(".");
271 if (iter + 1) % 50 == 0 {
272 println!(" {}/{}", iter + 1, iterations);
273 }
274 }
275 }
276
277 if iterations > 1 && iterations % 50 != 0 {
278 println!(" {}/{}", iterations, iterations);
279 }
280
281 let eval_time = eval_start.elapsed();
282 (parse_time, eval_time, evaluated_schema, eval_instance, iteration_times)
283 }
284 } else {
285 // Traditional mode: parse and create JSONEval each time
286 let start_time = Instant::now();
287 let mut eval = if scenario.is_msgpack {
288 let schema_msgpack = fs::read(&scenario.schema_path)
289 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
290 println!(" 📦 MessagePack schema size: {} bytes", schema_msgpack.len());
291 JSONEval::new_from_msgpack(&schema_msgpack, None, Some(&data_str))
292 .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e))
293 } else {
294 let schema_str = fs::read_to_string(&scenario.schema_path)
295 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
296 JSONEval::new(&schema_str, None, Some(&data_str))
297 .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e))
298 };
299 let parse_time = start_time.elapsed();
300 println!(" Schema parsing & compilation: {:?}", parse_time);
301
302 let eval_start = Instant::now();
303 let mut evaluated_schema = Value::Null;
304 let mut iteration_times = Vec::with_capacity(iterations);
305
306 for iter in 0..iterations {
307 let iter_start = Instant::now();
308 eval.evaluate(&data_str, Some("{}"))
309 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
310 evaluated_schema = eval.get_evaluated_schema(false);
311 iteration_times.push(iter_start.elapsed());
312
313 if iterations > 1 && (iter + 1) % 10 == 0 {
314 print!(".");
315 if (iter + 1) % 50 == 0 {
316 println!(" {}/{}", iter + 1, iterations);
317 }
318 }
319 }
320
321 if iterations > 1 && iterations % 50 != 0 {
322 println!(" {}/{}", iterations, iterations);
323 }
324
325 let eval_time = eval_start.elapsed();
326 (parse_time, eval_time, evaluated_schema, eval, iteration_times)
327 };
328
329 // Calculate statistics
330 let total_iterations = iteration_times.len();
331 if total_iterations == 1 {
332 println!(" Evaluation: {:?}", eval_time);
333 } else {
334 let avg_time = eval_time / total_iterations as u32;
335 let min_time = iteration_times.iter().min().unwrap();
336 let max_time = iteration_times.iter().max().unwrap();
337
338 println!(" Total evaluation time: {:?}", eval_time);
339 println!(" Total iterations: {}", total_iterations);
340 println!(" Average per iteration: {:?}", avg_time);
341 println!(" Min: {:?} | Max: {:?}", min_time, max_time);
342
343 // Show cache statistics
344 let cache_stats = eval.cache_stats();
345 println!(" Cache: {} entries, {} hits, {} misses ({:.1}% hit rate)",
346 cache_stats.entries,
347 cache_stats.hits,
348 cache_stats.misses,
349 cache_stats.hit_rate * 100.0
350 );
351 }
352
353 let total_time = parse_time + eval_time;
354 println!("⏱️ Execution time: {:?}\n", total_time);
355
356 // Print detailed timing breakdown if --timing flag is set
357 if show_timing {
358 json_eval_rs::print_timing_summary();
359 }
360
361 // Track statistics
362 total_parse_time += parse_time;
363 total_eval_time += eval_time;
364 successful_scenarios += 1;
365
366 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
367 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
368
369 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
370 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
371
372 let mut metadata_obj = Map::new();
373 metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
374 metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
375
376 fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
377 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
378
379 println!("✅ Results saved:");
380 println!(" - {}", evaluated_path.display());
381 println!(" - {}\n", parsed_path.display());
382
383 // Optional comparison
384 if enable_comparison {
385 if let Some(comp_path) = &scenario.comparison_path {
386 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
387 comparison_failures += 1;
388 }
389 println!();
390 }
391 }
392 }
393
394 // Print summary statistics
395 if successful_scenarios > 0 {
396 println!("\n{}", "=".repeat(50));
397 println!("📊 Summary Statistics");
398 println!("{}", "=".repeat(50));
399 println!("Total scenarios run: {}", successful_scenarios);
400 println!("Total parsing time: {:?}", total_parse_time);
401 println!("Total evaluation time: {:?}", total_eval_time);
402 println!("Total time: {:?}", total_parse_time + total_eval_time);
403
404 if successful_scenarios > 1 {
405 println!("\nAverage per scenario:");
406 println!(" Parsing: {:?}", total_parse_time / successful_scenarios as u32);
407 println!(" Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
408 }
409
410 if enable_comparison {
411 println!("\nComparison failures: {}", comparison_failures);
412 }
413
414 println!("\n✅ All scenarios completed successfully!\n");
415 }
416}