pub struct JSONEval {Show 24 fields
pub schema: Arc<Value>,
pub engine: Arc<RLogic>,
pub evaluations: Arc<IndexMap<String, LogicId>>,
pub tables: Arc<IndexMap<String, Value>>,
pub table_metadata: Arc<IndexMap<String, TableMetadata>>,
pub dependencies: Arc<IndexMap<String, IndexSet<String>>>,
pub sorted_evaluations: Arc<Vec<Vec<String>>>,
pub dependents_evaluations: Arc<IndexMap<String, Vec<DependentItem>>>,
pub rules_evaluations: Arc<Vec<String>>,
pub fields_with_rules: Arc<Vec<String>>,
pub others_evaluations: Arc<Vec<String>>,
pub value_evaluations: Arc<Vec<String>>,
pub layout_paths: Arc<Vec<String>>,
pub options_templates: Arc<Vec<(String, String, String)>>,
pub subforms: IndexMap<String, Box<JSONEval>>,
pub reffed_by: Arc<IndexMap<String, Vec<String>>>,
pub conditional_hidden_fields: Arc<Vec<String>>,
pub conditional_readonly_fields: Arc<Vec<String>>,
pub context: Value,
pub data: Value,
pub evaluated_schema: Value,
pub eval_data: EvalData,
pub eval_cache: EvalCache,
pub cache_enabled: bool,
/* private fields */
}Fields§
§schema: Arc<Value>§engine: Arc<RLogic>§evaluations: Arc<IndexMap<String, LogicId>>Zero-copy Arc-wrapped collections (shared from ParsedSchema)
tables: Arc<IndexMap<String, Value>>§table_metadata: Arc<IndexMap<String, TableMetadata>>Pre-compiled table metadata (computed at parse time for zero-copy evaluation)
dependencies: Arc<IndexMap<String, IndexSet<String>>>§sorted_evaluations: Arc<Vec<Vec<String>>>Evaluations grouped into parallel-executable batches Each inner Vec contains evaluations that can run concurrently
dependents_evaluations: Arc<IndexMap<String, Vec<DependentItem>>>Evaluations categorized for result handling Dependents: map from source field to list of dependent items
rules_evaluations: Arc<Vec<String>>Rules: evaluations with “/rules/” in path
fields_with_rules: Arc<Vec<String>>Fields with rules: dotted paths of all fields that have rules (for efficient validation)
others_evaluations: Arc<Vec<String>>Others: all other evaluations not in sorted_evaluations (for evaluated_schema output)
value_evaluations: Arc<Vec<String>>Value: evaluations ending with “.value” in path
layout_paths: Arc<Vec<String>>Cached layout paths (collected at parse time)
options_templates: Arc<Vec<(String, String, String)>>Options URL templates (url_path, template_str, params_path) collected at parse time
subforms: IndexMap<String, Box<JSONEval>>Subforms: isolated JSONEval instances for array fields with items Key is the schema path (e.g., “#/riders”), value is the sub-JSONEval
reffed_by: Arc<IndexMap<String, Vec<String>>>Cached reference to parsed schema mappings (reffed_by)
Cached paths of fields that have hidden conditions
conditional_readonly_fields: Arc<Vec<String>>Cached paths of fields that have disabled conditions and value property
context: Value§data: Value§evaluated_schema: Value§eval_data: EvalData§eval_cache: EvalCacheEvaluation cache with content-based hashing and zero-copy storage
cache_enabled: boolFlag to enable/disable evaluation caching Set to false for web API usage where each request creates a new JSONEval instance
Implementations§
Source§impl JSONEval
impl JSONEval
Sourcepub fn should_cache_dependency(&self, dep_path: &str) -> bool
pub fn should_cache_dependency(&self, dep_path: &str) -> bool
Check if a dependency should be part of the cache key
Sourcepub fn purge_cache_for_changed_data_with_comparison(
&self,
changed_paths: &[String],
old_data: &Value,
new_data: &Value,
)
pub fn purge_cache_for_changed_data_with_comparison( &self, changed_paths: &[String], old_data: &Value, new_data: &Value, )
Purge cache entries affected by changed data paths, comparing old and new values
Sourcepub fn purge_cache_for_changed_data(&self, changed_paths: &[String])
pub fn purge_cache_for_changed_data(&self, changed_paths: &[String])
Purge cache entries affected by changed data paths
Sourcepub fn purge_cache_for_context_change(&self)
pub fn purge_cache_for_context_change(&self)
Purge cache entries affected by context changes
Sourcepub fn cache_stats(&self) -> CacheStats
pub fn cache_stats(&self) -> CacheStats
Get cache statistics
Examples found in repository?
4fn main() {
5 let schema = json!({
6 "type": "object",
7 "properties": {
8 "price": {
9 "type": "number"
10 },
11 "tax": {
12 "type": "number",
13 "value": {
14 "$evaluation": {
15 "*": [
16 { "$ref": "#/properties/price" },
17 0.1
18 ]
19 }
20 }
21 },
22 "total": {
23 "type": "number",
24 "value": {
25 "$evaluation": {
26 "+": [
27 { "$ref": "#/properties/price" },
28 { "$ref": "#/properties/tax" }
29 ]
30 }
31 }
32 }
33 }
34 });
35
36 let schema_str = serde_json::to_string(&schema).unwrap();
37
38 println!("=== Example 1: With Caching (Default) ===");
39 {
40 let data = json!({ "price": 100 });
41 let data_str = serde_json::to_string(&data).unwrap();
42
43 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
44
45 println!("Cache enabled: {}", eval.is_cache_enabled());
46 println!("Initial cache size: {}", eval.cache_len());
47
48 eval.evaluate(&data_str, None, None).unwrap();
49
50 println!("After evaluation cache size: {}", eval.cache_len());
51 let stats = eval.cache_stats();
52 println!("Cache stats: {}", stats);
53 }
54
55 println!("\n=== Example 2: Without Caching (Web API Mode) ===");
56 {
57 let data = json!({ "price": 200 });
58 let data_str = serde_json::to_string(&data).unwrap();
59
60 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
61
62 // Disable caching for single-use web API scenario
63 eval.disable_cache();
64
65 println!("Cache enabled: {}", eval.is_cache_enabled());
66 println!("Initial cache size: {}", eval.cache_len());
67
68 eval.evaluate(&data_str, None, None).unwrap();
69
70 println!("After evaluation cache size: {}", eval.cache_len());
71 let stats = eval.cache_stats();
72 println!("Cache stats: {}", stats);
73
74 println!("\n✅ No cache overhead - perfect for web APIs!");
75 }
76
77 println!("\n=== Example 3: Re-enabling Cache ===");
78 {
79 let data = json!({ "price": 300 });
80 let data_str = serde_json::to_string(&data).unwrap();
81
82 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
83
84 // Disable then re-enable
85 eval.disable_cache();
86 eval.enable_cache();
87
88 println!("Cache enabled: {}", eval.is_cache_enabled());
89 eval.evaluate(&data_str, None, None).unwrap();
90
91 println!("Cache size after evaluation: {}", eval.cache_len());
92 println!("\n✅ Cache can be toggled as needed!");
93 }
94}More examples
31fn main() {
32 let args: Vec<String> = std::env::args().collect();
33 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("benchmark");
34
35 let mut iterations = 1usize;
36 let mut scenario_filter: Option<String> = None;
37 let mut show_cpu_info = false;
38 let mut use_parsed_schema = false;
39 let mut concurrent_count: Option<usize> = None;
40 let mut enable_comparison = false;
41 let mut show_timing = false;
42 let mut i = 1;
43
44 // Parse arguments
45 while i < args.len() {
46 let arg = &args[i];
47
48 if arg == "-h" || arg == "--help" {
49 print_help(program_name);
50 return;
51 } else if arg == "--cpu-info" {
52 show_cpu_info = true;
53 } else if arg == "--parsed" {
54 use_parsed_schema = true;
55 } else if arg == "--compare" {
56 enable_comparison = true;
57 } else if arg == "--timing" {
58 show_timing = true;
59 } else if arg == "--concurrent" {
60 if i + 1 >= args.len() {
61 eprintln!("Error: {} requires a value", arg);
62 print_help(program_name);
63 return;
64 }
65 i += 1;
66 match args[i].parse::<usize>() {
67 Ok(n) if n > 0 => concurrent_count = Some(n),
68 _ => {
69 eprintln!("Error: concurrent count must be a positive integer, got '{}'", args[i]);
70 return;
71 }
72 }
73 } else if arg == "-i" || arg == "--iterations" {
74 if i + 1 >= args.len() {
75 eprintln!("Error: {} requires a value", arg);
76 print_help(program_name);
77 return;
78 }
79 i += 1;
80 match args[i].parse::<usize>() {
81 Ok(n) if n > 0 => iterations = n,
82 _ => {
83 eprintln!("Error: iterations must be a positive integer, got '{}'", args[i]);
84 return;
85 }
86 }
87 } else if !arg.starts_with('-') {
88 scenario_filter = Some(arg.clone());
89 } else {
90 eprintln!("Error: unknown option '{}'", arg);
91 print_help(program_name);
92 return;
93 }
94
95 i += 1;
96 }
97
98 println!("\n🚀 JSON Evaluation - Benchmark\n");
99
100 // Show CPU info if requested or if running benchmarks
101 if show_cpu_info || iterations > 1 || concurrent_count.is_some() {
102 common::print_cpu_info();
103 }
104
105 if use_parsed_schema {
106 println!("📦 Mode: ParsedSchema (parse once, reuse for all iterations)\n");
107 }
108
109 if let Some(count) = concurrent_count {
110 println!("🔀 Concurrent evaluations: {} threads\n", count);
111 } else if iterations > 1 {
112 println!("🔄 Iterations per scenario: {}\n", iterations);
113 }
114
115 if enable_comparison {
116 println!("🔍 Comparison: enabled");
117 }
118 if show_timing {
119 println!("⏱️ Internal timing: enabled");
120 }
121 if enable_comparison || show_timing {
122 println!();
123 }
124
125 let samples_dir = Path::new("samples");
126 let mut scenarios = common::discover_scenarios(samples_dir);
127
128 // Filter scenarios if a filter is provided
129 if let Some(ref filter) = scenario_filter {
130 scenarios.retain(|s| s.name.contains(filter));
131 println!("📋 Filtering scenarios matching: '{}'\n", filter);
132 }
133
134 if scenarios.is_empty() {
135 if let Some(filter) = scenario_filter {
136 println!(
137 "ℹ️ No scenarios found matching '{}' in `{}`.",
138 filter,
139 samples_dir.display()
140 );
141 } else {
142 println!(
143 "ℹ️ No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
144 samples_dir.display()
145 );
146 }
147 return;
148 }
149
150 println!("📊 Found {} scenario(s)\n", scenarios.len());
151
152 let mut total_parse_time = std::time::Duration::ZERO;
153 let mut total_eval_time = std::time::Duration::ZERO;
154 let mut successful_scenarios = 0;
155 let mut comparison_failures = 0;
156
157 for scenario in &scenarios {
158 println!("==============================");
159 println!("Scenario: {}", scenario.name);
160 println!("Schema: {} ({})",
161 scenario.schema_path.display(),
162 if scenario.is_msgpack { "MessagePack" } else { "JSON" }
163 );
164 println!("Data: {}\n", scenario.data_path.display());
165
166 // Clear timing data from previous scenarios
167 if show_timing {
168 json_eval_rs::enable_timing();
169 json_eval_rs::clear_timing_data();
170 }
171
172 let data_str = fs::read_to_string(&scenario.data_path)
173 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
174
175 println!("Running evaluation...\n");
176
177 let (parse_time, eval_time, evaluated_schema, eval, iteration_times) = if use_parsed_schema {
178 // ParsedSchema mode: parse once, reuse for all iterations/threads
179 let start_time = Instant::now();
180
181 let parsed_schema = if scenario.is_msgpack {
182 let schema_msgpack = fs::read(&scenario.schema_path)
183 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
184 println!(" 📦 MessagePack schema size: {} bytes", schema_msgpack.len());
185 Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
186 .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
187 } else {
188 let schema_str = fs::read_to_string(&scenario.schema_path)
189 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
190 Arc::new(ParsedSchema::parse(&schema_str)
191 .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
192 };
193
194 let parse_time = start_time.elapsed();
195 println!(" Schema parsing & compilation: {:?}", parse_time);
196
197 // Concurrent mode with ParsedSchema
198 if let Some(thread_count) = concurrent_count {
199 use std::thread;
200
201 let eval_start = Instant::now();
202 let mut handles = vec![];
203
204 for thread_id in 0..thread_count {
205 let parsed_clone = parsed_schema.clone();
206 let data_str_clone = data_str.clone();
207 let iter_count = iterations;
208
209 let handle = thread::spawn(move || {
210 let mut thread_times = Vec::with_capacity(iter_count);
211 let mut last_schema = Value::Null;
212
213 for _ in 0..iter_count {
214 let iter_start = Instant::now();
215 let mut eval_instance = JSONEval::with_parsed_schema(
216 parsed_clone.clone(),
217 Some("{}"),
218 Some(&data_str_clone)
219 ).unwrap();
220
221 eval_instance.evaluate(&data_str_clone, Some("{}"), None).unwrap();
222 last_schema = eval_instance.get_evaluated_schema(false);
223 thread_times.push(iter_start.elapsed());
224 }
225
226 (thread_times, last_schema, thread_id)
227 });
228 handles.push(handle);
229 }
230
231 let mut all_iteration_times = Vec::new();
232 let mut evaluated_schema = Value::Null;
233
234 for handle in handles {
235 let (thread_times, thread_schema, thread_id) = handle.join().unwrap();
236 println!(" Thread {} completed {} iterations", thread_id, thread_times.len());
237 all_iteration_times.extend(thread_times);
238 evaluated_schema = thread_schema; // Use last thread's result
239 }
240
241 let eval_time = eval_start.elapsed();
242
243 // Create a temp eval for metadata export
244 let temp_eval = JSONEval::with_parsed_schema(
245 parsed_schema.clone(),
246 Some("{}"),
247 Some(&data_str)
248 ).unwrap();
249
250 (parse_time, eval_time, evaluated_schema, temp_eval, all_iteration_times)
251 } else {
252 // Sequential iterations with ParsedSchema
253 let eval_start = Instant::now();
254 let mut evaluated_schema = Value::Null;
255 let mut iteration_times = Vec::with_capacity(iterations);
256 let mut eval_instance = JSONEval::with_parsed_schema(
257 parsed_schema.clone(),
258 Some("{}"),
259 Some(&data_str)
260 ).unwrap();
261
262 for iter in 0..iterations {
263 let iter_start = Instant::now();
264 eval_instance.evaluate(&data_str, Some("{}"), None)
265 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
266 evaluated_schema = eval_instance.get_evaluated_schema(false);
267 iteration_times.push(iter_start.elapsed());
268
269 if iterations > 1 && (iter + 1) % 10 == 0 {
270 print!(".");
271 if (iter + 1) % 50 == 0 {
272 println!(" {}/{}", iter + 1, iterations);
273 }
274 }
275 }
276
277 if iterations > 1 && iterations % 50 != 0 {
278 println!(" {}/{}", iterations, iterations);
279 }
280
281 let eval_time = eval_start.elapsed();
282 (parse_time, eval_time, evaluated_schema, eval_instance, iteration_times)
283 }
284 } else {
285 // Traditional mode: parse and create JSONEval each time
286 let start_time = Instant::now();
287 let mut eval = if scenario.is_msgpack {
288 let schema_msgpack = fs::read(&scenario.schema_path)
289 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
290 println!(" 📦 MessagePack schema size: {} bytes", schema_msgpack.len());
291 JSONEval::new_from_msgpack(&schema_msgpack, None, Some(&data_str))
292 .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e))
293 } else {
294 let schema_str = fs::read_to_string(&scenario.schema_path)
295 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
296 JSONEval::new(&schema_str, None, Some(&data_str))
297 .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e))
298 };
299 let parse_time = start_time.elapsed();
300 println!(" Schema parsing & compilation: {:?}", parse_time);
301
302 let eval_start = Instant::now();
303 let mut evaluated_schema = Value::Null;
304 let mut iteration_times = Vec::with_capacity(iterations);
305
306 for iter in 0..iterations {
307 let iter_start = Instant::now();
308 eval.evaluate(&data_str, Some("{}"), None)
309 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
310 evaluated_schema = eval.get_evaluated_schema(false);
311 iteration_times.push(iter_start.elapsed());
312
313 if iterations > 1 && (iter + 1) % 10 == 0 {
314 print!(".");
315 if (iter + 1) % 50 == 0 {
316 println!(" {}/{}", iter + 1, iterations);
317 }
318 }
319 }
320
321 if iterations > 1 && iterations % 50 != 0 {
322 println!(" {}/{}", iterations, iterations);
323 }
324
325 let eval_time = eval_start.elapsed();
326 (parse_time, eval_time, evaluated_schema, eval, iteration_times)
327 };
328
329 // Calculate statistics
330 let total_iterations = iteration_times.len();
331 if total_iterations == 1 {
332 println!(" Evaluation: {:?}", eval_time);
333 } else {
334 let avg_time = eval_time / total_iterations as u32;
335 let min_time = iteration_times.iter().min().unwrap();
336 let max_time = iteration_times.iter().max().unwrap();
337
338 println!(" Total evaluation time: {:?}", eval_time);
339 println!(" Total iterations: {}", total_iterations);
340 println!(" Average per iteration: {:?}", avg_time);
341 println!(" Min: {:?} | Max: {:?}", min_time, max_time);
342
343 // Show cache statistics
344 let cache_stats = eval.cache_stats();
345 println!(" Cache: {} entries, {} hits, {} misses ({:.1}% hit rate)",
346 cache_stats.entries,
347 cache_stats.hits,
348 cache_stats.misses,
349 cache_stats.hit_rate * 100.0
350 );
351 }
352
353 let total_time = parse_time + eval_time;
354 println!("⏱️ Execution time: {:?}\n", total_time);
355
356 // Print detailed timing breakdown if --timing flag is set
357 if show_timing {
358 json_eval_rs::print_timing_summary();
359 }
360
361 // Track statistics
362 total_parse_time += parse_time;
363 total_eval_time += eval_time;
364 successful_scenarios += 1;
365
366 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
367 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
368
369 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
370 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
371
372 let mut metadata_obj = Map::new();
373 metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
374 metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
375
376 fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
377 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
378
379 println!("✅ Results saved:");
380 println!(" - {}", evaluated_path.display());
381 println!(" - {}\n", parsed_path.display());
382
383 // Optional comparison
384 if enable_comparison {
385 if let Some(comp_path) = &scenario.comparison_path {
386 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
387 comparison_failures += 1;
388 }
389 println!();
390 }
391 }
392 }
393
394 // Print summary statistics
395 if successful_scenarios > 0 {
396 println!("\n{}", "=".repeat(50));
397 println!("📊 Summary Statistics");
398 println!("{}", "=".repeat(50));
399 println!("Total scenarios run: {}", successful_scenarios);
400 println!("Total parsing time: {:?}", total_parse_time);
401 println!("Total evaluation time: {:?}", total_eval_time);
402 println!("Total time: {:?}", total_parse_time + total_eval_time);
403
404 if successful_scenarios > 1 {
405 println!("\nAverage per scenario:");
406 println!(" Parsing: {:?}", total_parse_time / successful_scenarios as u32);
407 println!(" Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
408 }
409
410 if enable_comparison {
411 println!("\nComparison failures: {}", comparison_failures);
412 }
413
414 println!("\n✅ All scenarios completed successfully!\n");
415 }
416}Sourcepub fn clear_cache(&self)
pub fn clear_cache(&self)
Clear the cache manually
Sourcepub fn enable_cache(&mut self)
pub fn enable_cache(&mut self)
Enable caching
Examples found in repository?
4fn main() {
5 let schema = json!({
6 "type": "object",
7 "properties": {
8 "price": {
9 "type": "number"
10 },
11 "tax": {
12 "type": "number",
13 "value": {
14 "$evaluation": {
15 "*": [
16 { "$ref": "#/properties/price" },
17 0.1
18 ]
19 }
20 }
21 },
22 "total": {
23 "type": "number",
24 "value": {
25 "$evaluation": {
26 "+": [
27 { "$ref": "#/properties/price" },
28 { "$ref": "#/properties/tax" }
29 ]
30 }
31 }
32 }
33 }
34 });
35
36 let schema_str = serde_json::to_string(&schema).unwrap();
37
38 println!("=== Example 1: With Caching (Default) ===");
39 {
40 let data = json!({ "price": 100 });
41 let data_str = serde_json::to_string(&data).unwrap();
42
43 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
44
45 println!("Cache enabled: {}", eval.is_cache_enabled());
46 println!("Initial cache size: {}", eval.cache_len());
47
48 eval.evaluate(&data_str, None, None).unwrap();
49
50 println!("After evaluation cache size: {}", eval.cache_len());
51 let stats = eval.cache_stats();
52 println!("Cache stats: {}", stats);
53 }
54
55 println!("\n=== Example 2: Without Caching (Web API Mode) ===");
56 {
57 let data = json!({ "price": 200 });
58 let data_str = serde_json::to_string(&data).unwrap();
59
60 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
61
62 // Disable caching for single-use web API scenario
63 eval.disable_cache();
64
65 println!("Cache enabled: {}", eval.is_cache_enabled());
66 println!("Initial cache size: {}", eval.cache_len());
67
68 eval.evaluate(&data_str, None, None).unwrap();
69
70 println!("After evaluation cache size: {}", eval.cache_len());
71 let stats = eval.cache_stats();
72 println!("Cache stats: {}", stats);
73
74 println!("\n✅ No cache overhead - perfect for web APIs!");
75 }
76
77 println!("\n=== Example 3: Re-enabling Cache ===");
78 {
79 let data = json!({ "price": 300 });
80 let data_str = serde_json::to_string(&data).unwrap();
81
82 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
83
84 // Disable then re-enable
85 eval.disable_cache();
86 eval.enable_cache();
87
88 println!("Cache enabled: {}", eval.is_cache_enabled());
89 eval.evaluate(&data_str, None, None).unwrap();
90
91 println!("Cache size after evaluation: {}", eval.cache_len());
92 println!("\n✅ Cache can be toggled as needed!");
93 }
94}Sourcepub fn disable_cache(&mut self)
pub fn disable_cache(&mut self)
Disable caching
Examples found in repository?
4fn main() {
5 let schema = json!({
6 "type": "object",
7 "properties": {
8 "price": {
9 "type": "number"
10 },
11 "tax": {
12 "type": "number",
13 "value": {
14 "$evaluation": {
15 "*": [
16 { "$ref": "#/properties/price" },
17 0.1
18 ]
19 }
20 }
21 },
22 "total": {
23 "type": "number",
24 "value": {
25 "$evaluation": {
26 "+": [
27 { "$ref": "#/properties/price" },
28 { "$ref": "#/properties/tax" }
29 ]
30 }
31 }
32 }
33 }
34 });
35
36 let schema_str = serde_json::to_string(&schema).unwrap();
37
38 println!("=== Example 1: With Caching (Default) ===");
39 {
40 let data = json!({ "price": 100 });
41 let data_str = serde_json::to_string(&data).unwrap();
42
43 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
44
45 println!("Cache enabled: {}", eval.is_cache_enabled());
46 println!("Initial cache size: {}", eval.cache_len());
47
48 eval.evaluate(&data_str, None, None).unwrap();
49
50 println!("After evaluation cache size: {}", eval.cache_len());
51 let stats = eval.cache_stats();
52 println!("Cache stats: {}", stats);
53 }
54
55 println!("\n=== Example 2: Without Caching (Web API Mode) ===");
56 {
57 let data = json!({ "price": 200 });
58 let data_str = serde_json::to_string(&data).unwrap();
59
60 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
61
62 // Disable caching for single-use web API scenario
63 eval.disable_cache();
64
65 println!("Cache enabled: {}", eval.is_cache_enabled());
66 println!("Initial cache size: {}", eval.cache_len());
67
68 eval.evaluate(&data_str, None, None).unwrap();
69
70 println!("After evaluation cache size: {}", eval.cache_len());
71 let stats = eval.cache_stats();
72 println!("Cache stats: {}", stats);
73
74 println!("\n✅ No cache overhead - perfect for web APIs!");
75 }
76
77 println!("\n=== Example 3: Re-enabling Cache ===");
78 {
79 let data = json!({ "price": 300 });
80 let data_str = serde_json::to_string(&data).unwrap();
81
82 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
83
84 // Disable then re-enable
85 eval.disable_cache();
86 eval.enable_cache();
87
88 println!("Cache enabled: {}", eval.is_cache_enabled());
89 eval.evaluate(&data_str, None, None).unwrap();
90
91 println!("Cache size after evaluation: {}", eval.cache_len());
92 println!("\n✅ Cache can be toggled as needed!");
93 }
94}Sourcepub fn is_cache_enabled(&self) -> bool
pub fn is_cache_enabled(&self) -> bool
Check if cache is enabled
Examples found in repository?
4fn main() {
5 let schema = json!({
6 "type": "object",
7 "properties": {
8 "price": {
9 "type": "number"
10 },
11 "tax": {
12 "type": "number",
13 "value": {
14 "$evaluation": {
15 "*": [
16 { "$ref": "#/properties/price" },
17 0.1
18 ]
19 }
20 }
21 },
22 "total": {
23 "type": "number",
24 "value": {
25 "$evaluation": {
26 "+": [
27 { "$ref": "#/properties/price" },
28 { "$ref": "#/properties/tax" }
29 ]
30 }
31 }
32 }
33 }
34 });
35
36 let schema_str = serde_json::to_string(&schema).unwrap();
37
38 println!("=== Example 1: With Caching (Default) ===");
39 {
40 let data = json!({ "price": 100 });
41 let data_str = serde_json::to_string(&data).unwrap();
42
43 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
44
45 println!("Cache enabled: {}", eval.is_cache_enabled());
46 println!("Initial cache size: {}", eval.cache_len());
47
48 eval.evaluate(&data_str, None, None).unwrap();
49
50 println!("After evaluation cache size: {}", eval.cache_len());
51 let stats = eval.cache_stats();
52 println!("Cache stats: {}", stats);
53 }
54
55 println!("\n=== Example 2: Without Caching (Web API Mode) ===");
56 {
57 let data = json!({ "price": 200 });
58 let data_str = serde_json::to_string(&data).unwrap();
59
60 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
61
62 // Disable caching for single-use web API scenario
63 eval.disable_cache();
64
65 println!("Cache enabled: {}", eval.is_cache_enabled());
66 println!("Initial cache size: {}", eval.cache_len());
67
68 eval.evaluate(&data_str, None, None).unwrap();
69
70 println!("After evaluation cache size: {}", eval.cache_len());
71 let stats = eval.cache_stats();
72 println!("Cache stats: {}", stats);
73
74 println!("\n✅ No cache overhead - perfect for web APIs!");
75 }
76
77 println!("\n=== Example 3: Re-enabling Cache ===");
78 {
79 let data = json!({ "price": 300 });
80 let data_str = serde_json::to_string(&data).unwrap();
81
82 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
83
84 // Disable then re-enable
85 eval.disable_cache();
86 eval.enable_cache();
87
88 println!("Cache enabled: {}", eval.is_cache_enabled());
89 eval.evaluate(&data_str, None, None).unwrap();
90
91 println!("Cache size after evaluation: {}", eval.cache_len());
92 println!("\n✅ Cache can be toggled as needed!");
93 }
94}Sourcepub fn cache_len(&self) -> usize
pub fn cache_len(&self) -> usize
Get cache size
Examples found in repository?
4fn main() {
5 let schema = json!({
6 "type": "object",
7 "properties": {
8 "price": {
9 "type": "number"
10 },
11 "tax": {
12 "type": "number",
13 "value": {
14 "$evaluation": {
15 "*": [
16 { "$ref": "#/properties/price" },
17 0.1
18 ]
19 }
20 }
21 },
22 "total": {
23 "type": "number",
24 "value": {
25 "$evaluation": {
26 "+": [
27 { "$ref": "#/properties/price" },
28 { "$ref": "#/properties/tax" }
29 ]
30 }
31 }
32 }
33 }
34 });
35
36 let schema_str = serde_json::to_string(&schema).unwrap();
37
38 println!("=== Example 1: With Caching (Default) ===");
39 {
40 let data = json!({ "price": 100 });
41 let data_str = serde_json::to_string(&data).unwrap();
42
43 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
44
45 println!("Cache enabled: {}", eval.is_cache_enabled());
46 println!("Initial cache size: {}", eval.cache_len());
47
48 eval.evaluate(&data_str, None, None).unwrap();
49
50 println!("After evaluation cache size: {}", eval.cache_len());
51 let stats = eval.cache_stats();
52 println!("Cache stats: {}", stats);
53 }
54
55 println!("\n=== Example 2: Without Caching (Web API Mode) ===");
56 {
57 let data = json!({ "price": 200 });
58 let data_str = serde_json::to_string(&data).unwrap();
59
60 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
61
62 // Disable caching for single-use web API scenario
63 eval.disable_cache();
64
65 println!("Cache enabled: {}", eval.is_cache_enabled());
66 println!("Initial cache size: {}", eval.cache_len());
67
68 eval.evaluate(&data_str, None, None).unwrap();
69
70 println!("After evaluation cache size: {}", eval.cache_len());
71 let stats = eval.cache_stats();
72 println!("Cache stats: {}", stats);
73
74 println!("\n✅ No cache overhead - perfect for web APIs!");
75 }
76
77 println!("\n=== Example 3: Re-enabling Cache ===");
78 {
79 let data = json!({ "price": 300 });
80 let data_str = serde_json::to_string(&data).unwrap();
81
82 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
83
84 // Disable then re-enable
85 eval.disable_cache();
86 eval.enable_cache();
87
88 println!("Cache enabled: {}", eval.is_cache_enabled());
89 eval.evaluate(&data_str, None, None).unwrap();
90
91 println!("Cache size after evaluation: {}", eval.cache_len());
92 println!("\n✅ Cache can be toggled as needed!");
93 }
94}Source§impl JSONEval
impl JSONEval
Sourcepub fn evaluate(
&mut self,
data: &str,
context: Option<&str>,
paths: Option<&[String]>,
) -> Result<(), String>
pub fn evaluate( &mut self, data: &str, context: Option<&str>, paths: Option<&[String]>, ) -> Result<(), String>
Evaluate the schema with the given data and context.
§Arguments
data- The data to evaluate.context- The context to evaluate.
§Returns
A Result indicating success or an error message.
Examples found in repository?
38fn demo_local_cache() -> Result<(), Box<dyn std::error::Error>> {
39 println!("📦 Example 1: Local Cache Instance");
40 println!("Creating a dedicated cache for this application...\n");
41
42 let cache = ParsedSchemaCache::new();
43
44 // Simple schema
45 let schema_json = r#"{
46 "$params": {
47 "rate": { "type": "number" }
48 },
49 "result": {
50 "type": "number",
51 "title": "Calculated Result",
52 "$evaluation": {
53 "logic": { "*": [{"var": "$rate"}, 100] }
54 }
55 }
56 }"#;
57
58 // Parse and cache with a custom key
59 println!("📝 Parsing schema and caching with key 'calculation-v1'...");
60 let parsed = ParsedSchema::parse(schema_json)?;
61 cache.insert("calculation-v1".to_string(), Arc::new(parsed));
62
63 println!("✅ Schema cached successfully");
64 println!(" Cache size: {} entries", cache.len());
65 println!(" Keys: {:?}\n", cache.keys());
66
67 // Retrieve and use cached schema
68 println!("🔍 Retrieving cached schema...");
69 if let Some(cached_schema) = cache.get("calculation-v1") {
70 println!("✅ Retrieved from cache");
71
72 // Create JSONEval from cached ParsedSchema
73 let mut eval = JSONEval::with_parsed_schema(cached_schema, Some(r#"{"rate": 1.5}"#), None)?;
74 eval.evaluate("{}", None, None)?;
75
76 let evaluated = eval.get_evaluated_schema(false);
77 let result = evaluated.pointer("/result")
78 .and_then(|v| v.as_f64())
79 .unwrap_or(0.0);
80 println!(" Evaluation result: {}\n", result);
81 }
82
83 // Check cache stats
84 let stats = cache.stats();
85 println!("📊 Cache Statistics: {}", stats);
86
87 // Remove entry
88 println!("\n🗑️ Removing 'calculation-v1' from cache...");
89 cache.remove("calculation-v1");
90 println!(" Cache size after removal: {}", cache.len());
91
92 Ok(())
93}
94
95fn demo_global_cache() -> Result<(), Box<dyn std::error::Error>> {
96 println!("🌍 Example 2: Global Cache Instance");
97 println!("Using the built-in PARSED_SCHEMA_CACHE...\n");
98
99 let schema_json = r#"{
100 "$params": {
101 "x": { "type": "number" },
102 "y": { "type": "number" }
103 },
104 "sum": {
105 "type": "number",
106 "$evaluation": { "+": [{"var": "$x"}, {"var": "$y"}] }
107 }
108 }"#;
109
110 // Use global cache
111 println!("📝 Caching schema globally with key 'math-operations'...");
112 let parsed = ParsedSchema::parse(schema_json)?;
113 PARSED_SCHEMA_CACHE.insert("math-operations".to_string(), Arc::new(parsed));
114
115 println!("✅ Schema cached globally");
116 println!(" Global cache size: {}\n", PARSED_SCHEMA_CACHE.len());
117
118 // Access from anywhere in the application
119 simulate_another_function()?;
120
121 // Clean up
122 println!("\n🧹 Clearing global cache...");
123 PARSED_SCHEMA_CACHE.clear();
124 println!(" Global cache size: {}", PARSED_SCHEMA_CACHE.len());
125
126 Ok(())
127}
128
129fn simulate_another_function() -> Result<(), Box<dyn std::error::Error>> {
130 println!("🔄 In another function, accessing global cache...");
131
132 if let Some(cached) = PARSED_SCHEMA_CACHE.get("math-operations") {
133 println!("✅ Retrieved schema from global cache");
134
135 let mut eval = JSONEval::with_parsed_schema(cached, Some(r#"{"x": 10, "y": 20}"#), None)?;
136 eval.evaluate("{}", None, None)?;
137
138 let evaluated = eval.get_evaluated_schema(false);
139 let sum = evaluated.pointer("/sum")
140 .and_then(|v| v.as_f64())
141 .unwrap_or(0.0);
142 println!(" Result: {}", sum);
143 }
144
145 Ok(())
146}
147
148fn demo_performance_comparison() -> Result<(), Box<dyn std::error::Error>> {
149 println!("⚡ Example 3: Performance Comparison");
150 println!("Comparing cached vs non-cached schema usage...\n");
151
152 let schema_json = r#"{
153 "$params": {
154 "value": { "type": "number" }
155 },
156 "doubled": {
157 "type": "number",
158 "$evaluation": { "*": [{"var": "$value"}, 2] }
159 },
160 "tripled": {
161 "type": "number",
162 "$evaluation": { "*": [{"var": "$value"}, 3] }
163 }
164 }"#;
165
166 let iterations = 100;
167
168 // WITHOUT CACHE: Parse schema every time
169 println!("🐌 Without cache (parse + evaluate each time):");
170 let start = Instant::now();
171 for i in 0..iterations {
172 let context = format!(r#"{{"value": {}}}"#, i);
173 let mut eval = JSONEval::new(schema_json, Some(&context), None)?;
174 eval.evaluate("{}", None, None)?;
175 }
176 let without_cache = start.elapsed();
177 println!(" Time: {:?}", without_cache);
178 println!(" Avg per iteration: {:?}\n", without_cache / iterations);
179
180 // WITH CACHE: Parse once, evaluate many times
181 println!("🚀 With cache (parse once, reuse for all evaluations):");
182 let cache = ParsedSchemaCache::new();
183
184 // Parse once
185 let parse_start = Instant::now();
186 let parsed = ParsedSchema::parse(schema_json)?;
187 cache.insert("perf-test".to_string(), Arc::new(parsed));
188 let parse_time = parse_start.elapsed();
189
190 // Evaluate many times
191 let eval_start = Instant::now();
192 for i in 0..iterations {
193 if let Some(cached) = cache.get("perf-test") {
194 let context = format!(r#"{{"value": {}}}"#, i);
195 let mut eval = JSONEval::with_parsed_schema(cached.clone(), Some(&context), None)?;
196 eval.evaluate("{}", None, None)?;
197 }
198 }
199 let eval_time = eval_start.elapsed();
200 let with_cache = parse_time + eval_time;
201
202 println!(" Parse time: {:?}", parse_time);
203 println!(" Eval time: {:?}", eval_time);
204 println!(" Total time: {:?}", with_cache);
205 println!(" Avg per iteration: {:?}\n", eval_time / iterations);
206
207 let speedup = without_cache.as_secs_f64() / with_cache.as_secs_f64();
208 println!("📈 Speedup: {:.2}x faster", speedup);
209
210 Ok(())
211}More examples
4fn main() {
5 let schema = json!({
6 "type": "object",
7 "properties": {
8 "price": {
9 "type": "number"
10 },
11 "tax": {
12 "type": "number",
13 "value": {
14 "$evaluation": {
15 "*": [
16 { "$ref": "#/properties/price" },
17 0.1
18 ]
19 }
20 }
21 },
22 "total": {
23 "type": "number",
24 "value": {
25 "$evaluation": {
26 "+": [
27 { "$ref": "#/properties/price" },
28 { "$ref": "#/properties/tax" }
29 ]
30 }
31 }
32 }
33 }
34 });
35
36 let schema_str = serde_json::to_string(&schema).unwrap();
37
38 println!("=== Example 1: With Caching (Default) ===");
39 {
40 let data = json!({ "price": 100 });
41 let data_str = serde_json::to_string(&data).unwrap();
42
43 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
44
45 println!("Cache enabled: {}", eval.is_cache_enabled());
46 println!("Initial cache size: {}", eval.cache_len());
47
48 eval.evaluate(&data_str, None, None).unwrap();
49
50 println!("After evaluation cache size: {}", eval.cache_len());
51 let stats = eval.cache_stats();
52 println!("Cache stats: {}", stats);
53 }
54
55 println!("\n=== Example 2: Without Caching (Web API Mode) ===");
56 {
57 let data = json!({ "price": 200 });
58 let data_str = serde_json::to_string(&data).unwrap();
59
60 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
61
62 // Disable caching for single-use web API scenario
63 eval.disable_cache();
64
65 println!("Cache enabled: {}", eval.is_cache_enabled());
66 println!("Initial cache size: {}", eval.cache_len());
67
68 eval.evaluate(&data_str, None, None).unwrap();
69
70 println!("After evaluation cache size: {}", eval.cache_len());
71 let stats = eval.cache_stats();
72 println!("Cache stats: {}", stats);
73
74 println!("\n✅ No cache overhead - perfect for web APIs!");
75 }
76
77 println!("\n=== Example 3: Re-enabling Cache ===");
78 {
79 let data = json!({ "price": 300 });
80 let data_str = serde_json::to_string(&data).unwrap();
81
82 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
83
84 // Disable then re-enable
85 eval.disable_cache();
86 eval.enable_cache();
87
88 println!("Cache enabled: {}", eval.is_cache_enabled());
89 eval.evaluate(&data_str, None, None).unwrap();
90
91 println!("Cache size after evaluation: {}", eval.cache_len());
92 println!("\n✅ Cache can be toggled as needed!");
93 }
94}6fn main() {
7 println!("\n🚀 JSON Evaluation - SPAJ Toggle Example\n");
8
9 let schema_path = Path::new("samples/spaj.json");
10 let schema_str = fs::read_to_string(schema_path).expect("Failed to read schema");
11
12 // Initial data with minimal context required
13 let context_str = json!({
14 "agentProfile": { "sob": "AG" }
15 }).to_string();
16
17 let initial_data = json!({
18 "illustration": {
19 "basicinformation": {
20 "print_polflag": false
21 }
22 }
23 }).to_string();
24
25 // Initialize logic
26 let mut eval = JSONEval::new(&schema_str, Some(&context_str), Some(&initial_data))
27 .expect("Failed to create JSONEval");
28
29 // Helper to check visibility
30 let check_visibility = |eval: &mut JSONEval, expected_hidden: bool, step: &str| {
31 let result = eval.get_evaluated_schema(true);
32 let hidden = result.pointer("/illustration/properties/basicinformation/properties/print_poladdress/condition/hidden")
33 .and_then(|v| v.as_bool());
34
35 match hidden {
36 Some(val) => {
37 if val == expected_hidden {
38 println!("✅ {}: Hidden = {} (Expected: {})", step, val, expected_hidden);
39 } else {
40 println!("❌ {}: Hidden = {} (Expected: {})", step, val, expected_hidden);
41 }
42 },
43 None => println!("❌ {}: 'hidden' property not found", step),
44 }
45 };
46
47 // Step 1: Initial state (false)
48 println!("Step 1: Initial State (print_polflag: false)");
49 eval.evaluate(&initial_data, Some(&context_str), None).expect("Evaluation failed");
50 check_visibility(&mut eval, true, "Initial check");
51
52 // Step 2: Toggle to true
53 println!("\nStep 2: Toggle True (print_polflag: true)");
54 let data_true = json!({
55 "illustration": {
56 "basicinformation": {
57 "print_polflag": true
58 }
59 }
60 }).to_string();
61 eval.evaluate(&data_true, Some(&context_str), None).expect("Evaluation failed");
62 check_visibility(&mut eval, false, "Toggle ON check");
63
64 // Step 3: Toggle back to false
65 println!("\nStep 3: Toggle False (print_polflag: false)");
66 let data_false = json!({
67 "illustration": {
68 "basicinformation": {
69 "print_polflag": false
70 }
71 }
72 }).to_string();
73 eval.evaluate(&data_false, Some(&context_str), None).expect("Evaluation failed");
74
75 let hidden_path = "#/illustration/properties/basicinformation/properties/print_poladdress/condition/hidden";
76 if let Some(deps) = eval.dependencies.get(hidden_path) {
77 println!("Debug: Dependencies for hidden: {:?}", deps);
78 } else {
79 println!("Debug: No dependencies found for hidden path");
80 }
81
82 // Debug: Print current flag value
83 if let Some(val) = eval.get_evaluated_schema(true).pointer("/illustration/properties/basicinformation/properties/print_polflag/value") {
84 println!("Debug: print_polflag value is: {}", val);
85 }
86
87 check_visibility(&mut eval, true, "Toggle OFF check");
88}28fn main() {
29 let args: Vec<String> = std::env::args().collect();
30 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic_msgpack");
31
32 let mut scenario_filter: Option<String> = None;
33 let mut enable_comparison = false;
34 let mut show_timing = false;
35 let mut i = 1;
36
37 // Parse arguments
38 while i < args.len() {
39 let arg = &args[i];
40
41 if arg == "-h" || arg == "--help" {
42 print_help(program_name);
43 return;
44 } else if arg == "--compare" {
45 enable_comparison = true;
46 } else if arg == "--timing" {
47 show_timing = true;
48 } else if !arg.starts_with('-') {
49 scenario_filter = Some(arg.clone());
50 } else {
51 eprintln!("Error: unknown option '{}'", arg);
52 print_help(program_name);
53 return;
54 }
55
56 i += 1;
57 }
58
59 println!("\n🚀 JSON Evaluation - Basic Example (MessagePack Schema)\n");
60
61 if enable_comparison {
62 println!("🔍 Comparison: enabled");
63 }
64 if show_timing {
65 println!("⏱️ Internal timing: enabled");
66 }
67 if enable_comparison || show_timing {
68 println!();
69 }
70
71 let samples_dir = Path::new("samples");
72 let mut scenarios = common::discover_scenarios(samples_dir);
73
74 // Filter to only MessagePack scenarios
75 scenarios.retain(|s| s.is_msgpack);
76
77 // Filter scenarios if a filter is provided
78 if let Some(ref filter) = scenario_filter {
79 scenarios.retain(|s| s.name.contains(filter));
80 println!("📋 Filtering scenarios matching: '{}'\n", filter);
81 }
82
83 if scenarios.is_empty() {
84 if let Some(filter) = scenario_filter {
85 println!(
86 "ℹ️ No MessagePack scenarios found matching '{}' in `{}`.",
87 filter,
88 samples_dir.display()
89 );
90 } else {
91 println!(
92 "ℹ️ No MessagePack scenarios discovered in `{}`. Add files like `name.bform` and `name-data.json`.",
93 samples_dir.display()
94 );
95 }
96 return;
97 }
98
99 println!("📊 Found {} MessagePack scenario(s)\n", scenarios.len());
100
101 let mut total_parse_time = std::time::Duration::ZERO;
102 let mut total_eval_time = std::time::Duration::ZERO;
103 let mut successful_scenarios = 0;
104 let mut comparison_failures = 0;
105
106 for scenario in &scenarios {
107 println!("==============================");
108 println!("Scenario: {}", scenario.name);
109 println!("Schema: {} (MessagePack)", scenario.schema_path.display());
110 println!("Data: {}\n", scenario.data_path.display());
111
112 // Clear timing data from previous scenarios
113 if show_timing {
114 json_eval_rs::enable_timing();
115 json_eval_rs::clear_timing_data();
116 }
117
118 let data_str = fs::read_to_string(&scenario.data_path)
119 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
120
121 // Step 1: Parse schema (new_from_msgpack)
122 let parse_start = Instant::now();
123
124 let schema_msgpack = fs::read(&scenario.schema_path)
125 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
126
127 println!(" 📦 MessagePack schema size: {} bytes", schema_msgpack.len());
128
129 let mut eval = JSONEval::new_from_msgpack(&schema_msgpack, None, Some(&data_str))
130 .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e));
131
132 let parse_time = parse_start.elapsed();
133 println!(" 📝 Parse (msgpack): {:?}", parse_time);
134
135 // Step 2: Evaluate
136 let eval_start = Instant::now();
137
138 eval.evaluate(&data_str, Some("{}"), None)
139 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
140
141 let evaluated_schema = eval.get_evaluated_schema(false);
142 let eval_time = eval_start.elapsed();
143
144 println!(" ⚡ Eval: {:?}", eval_time);
145 println!(" ⏱️ Total: {:?}\n", parse_time + eval_time);
146
147 // Print detailed timing breakdown if --timing flag is set
148 if show_timing {
149 json_eval_rs::print_timing_summary();
150 }
151
152 total_parse_time += parse_time;
153 total_eval_time += eval_time;
154 successful_scenarios += 1;
155
156 // Save results
157 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
158 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
159
160 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
161 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
162
163 let mut metadata_obj = Map::new();
164 metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
165 metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
166 metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
167
168 fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
169 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
170
171 println!("✅ Results saved:");
172 println!(" - {}", evaluated_path.display());
173 println!(" - {}\n", parsed_path.display());
174
175 // Optional comparison
176 if enable_comparison {
177 if let Some(comp_path) = &scenario.comparison_path {
178 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
179 comparison_failures += 1;
180 }
181 println!();
182 }
183 }
184 }
185
186 // Print summary
187 println!("{}", "=".repeat(50));
188 println!("📊 Summary");
189 println!("{}", "=".repeat(50));
190 println!("Total scenarios run: {}", successful_scenarios);
191 println!("Total parse time: {:?}", total_parse_time);
192 println!("Total eval time: {:?}", total_eval_time);
193 println!("Total time: {:?}", total_parse_time + total_eval_time);
194
195 if successful_scenarios > 1 {
196 println!("\nAverage per scenario:");
197 println!(" Parse: {:?}", total_parse_time / successful_scenarios as u32);
198 println!(" Eval: {:?}", total_eval_time / successful_scenarios as u32);
199 }
200
201 if enable_comparison {
202 println!("Comparison failures: {}", comparison_failures);
203 }
204
205 println!("\n✅ All scenarios completed!\n");
206}28fn main() {
29 let args: Vec<String> = std::env::args().collect();
30 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic");
31
32 let mut scenario_filter: Option<String> = None;
33 let mut enable_comparison = false;
34 let mut show_timing = false;
35 let mut i = 1;
36
37 // Parse arguments
38 while i < args.len() {
39 let arg = &args[i];
40
41 if arg == "-h" || arg == "--help" {
42 print_help(program_name);
43 return;
44 } else if arg == "--compare" {
45 enable_comparison = true;
46 } else if arg == "--timing" {
47 show_timing = true;
48 } else if !arg.starts_with('-') {
49 scenario_filter = Some(arg.clone());
50 } else {
51 eprintln!("Error: unknown option '{}'", arg);
52 print_help(program_name);
53 return;
54 }
55
56 i += 1;
57 }
58
59 println!("\n🚀 JSON Evaluation - Basic Example (JSON Schema)\n");
60
61 if enable_comparison {
62 println!("🔍 Comparison: enabled");
63 }
64 if show_timing {
65 println!("⏱️ Internal timing: enabled");
66 }
67 if enable_comparison || show_timing {
68 println!();
69 }
70
71 let samples_dir = Path::new("samples");
72 let mut scenarios = common::discover_scenarios(samples_dir);
73
74 // Filter out MessagePack scenarios - only use JSON
75 scenarios.retain(|s| !s.is_msgpack);
76
77 // Filter scenarios if a filter is provided
78 if let Some(ref filter) = scenario_filter {
79 scenarios.retain(|s| s.name.contains(filter));
80 println!("📋 Filtering scenarios matching: '{}'\n", filter);
81 }
82
83 if scenarios.is_empty() {
84 if let Some(filter) = scenario_filter {
85 println!(
86 "ℹ️ No scenarios found matching '{}' in `{}`.",
87 filter,
88 samples_dir.display()
89 );
90 } else {
91 println!(
92 "ℹ️ No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
93 samples_dir.display()
94 );
95 }
96 return;
97 }
98
99 println!("📊 Found {} scenario(s)\n", scenarios.len());
100
101 let mut total_parse_time = std::time::Duration::ZERO;
102 let mut total_eval_time = std::time::Duration::ZERO;
103 let mut successful_scenarios = 0;
104 let mut comparison_failures = 0;
105
106 for scenario in &scenarios {
107 println!("==============================");
108 println!("Scenario: {}", scenario.name);
109 println!("Schema: {} ({})",
110 scenario.schema_path.display(),
111 if scenario.is_msgpack { "MessagePack" } else { "JSON" }
112 );
113 println!("Data: {}\n", scenario.data_path.display());
114
115 // Clear timing data from previous scenarios
116 if show_timing {
117 json_eval_rs::enable_timing();
118 json_eval_rs::clear_timing_data();
119 }
120
121 let data_str = fs::read_to_string(&scenario.data_path)
122 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
123
124 // Step 1: Parse schema (JSONEval::new)
125 let parse_start = Instant::now();
126
127 let schema_str = fs::read_to_string(&scenario.schema_path)
128 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
129
130 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str))
131 .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e));
132
133 let parse_time = parse_start.elapsed();
134 println!(" 📝 Parse (new): {:?}", parse_time);
135
136 // Step 2: Evaluate
137 let eval_start = Instant::now();
138
139 eval.evaluate(&data_str, Some("{}"), None)
140 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
141
142 let evaluated_schema = eval.get_evaluated_schema(false);
143 let schema_value = eval.get_schema_value();
144 let eval_time = eval_start.elapsed();
145
146 println!(" ⚡ Eval: {:?}", eval_time);
147 println!(" ⏱️ Total: {:?}\n", parse_time + eval_time);
148
149 // Print detailed timing breakdown if --timing flag is set
150 if show_timing {
151 json_eval_rs::print_timing_summary();
152 }
153
154 total_parse_time += parse_time;
155 total_eval_time += eval_time;
156 successful_scenarios += 1;
157
158 // Save results
159 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
160 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
161 let value_path = samples_dir.join(format!("{}-schema-value.json", scenario.name));
162
163 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
164 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
165
166 let mut metadata_obj = Map::new();
167 metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
168 metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
169 metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
170
171 fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
172 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
173
174 fs::write(&value_path, common::pretty_json(&schema_value))
175 .unwrap_or_else(|e| panic!("failed to write {}: {}", value_path.display(), e));
176
177 println!("✅ Results saved:");
178 println!(" - {}", evaluated_path.display());
179 println!(" - {}", parsed_path.display());
180 println!(" - {}\n", value_path.display());
181
182 // Optional comparison
183 if enable_comparison {
184 if let Some(comp_path) = &scenario.comparison_path {
185 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
186 comparison_failures += 1;
187 }
188 println!();
189 }
190 }
191 }
192
193 // Print summary
194 println!("{}", "=".repeat(50));
195 println!("📊 Summary");
196 println!("{}", "=".repeat(50));
197 println!("Total scenarios run: {}", successful_scenarios);
198 println!("Total parse time: {:?}", total_parse_time);
199 println!("Total eval time: {:?}", total_eval_time);
200 println!("Total time: {:?}", total_parse_time + total_eval_time);
201
202 if successful_scenarios > 1 {
203 println!("\nAverage per scenario:");
204 println!(" Parse: {:?}", total_parse_time / successful_scenarios as u32);
205 println!(" Eval: {:?}", total_eval_time / successful_scenarios as u32);
206 }
207
208 if enable_comparison {
209 println!("Comparison failures: {}", comparison_failures);
210 }
211
212 println!("\n✅ All scenarios completed!\n");
213}30fn main() {
31 let args: Vec<String> = std::env::args().collect();
32 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic_parsed");
33
34 let mut scenario_filter: Option<String> = None;
35 let mut enable_comparison = false;
36 let mut show_timing = false;
37 let mut i = 1;
38
39 // Parse arguments
40 while i < args.len() {
41 let arg = &args[i];
42
43 if arg == "-h" || arg == "--help" {
44 print_help(program_name);
45 return;
46 } else if arg == "--compare" {
47 enable_comparison = true;
48 } else if arg == "--timing" {
49 show_timing = true;
50 } else if !arg.starts_with('-') {
51 scenario_filter = Some(arg.clone());
52 } else {
53 eprintln!("Error: unknown option '{}'", arg);
54 print_help(program_name);
55 return;
56 }
57
58 i += 1;
59 }
60
61 println!("\n🚀 JSON Evaluation - Basic Example (ParsedSchema)\n");
62 println!("📦 Using Arc<ParsedSchema> for efficient caching\n");
63
64 if enable_comparison {
65 println!("🔍 Comparison: enabled");
66 }
67 if show_timing {
68 println!("⏱️ Internal timing: enabled");
69 }
70 if enable_comparison || show_timing {
71 println!();
72 }
73
74 let samples_dir = Path::new("samples");
75 let mut scenarios = common::discover_scenarios(samples_dir);
76
77 // Filter scenarios if a filter is provided
78 if let Some(ref filter) = scenario_filter {
79 scenarios.retain(|s| s.name.contains(filter));
80 println!("📋 Filtering scenarios matching: '{}'\n", filter);
81 }
82
83 if scenarios.is_empty() {
84 if let Some(filter) = scenario_filter {
85 println!(
86 "ℹ️ No scenarios found matching '{}' in `{}`.",
87 filter,
88 samples_dir.display()
89 );
90 } else {
91 println!(
92 "ℹ️ No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
93 samples_dir.display()
94 );
95 }
96 return;
97 }
98
99 println!("📊 Found {} scenario(s)\n", scenarios.len());
100
101 let mut total_parse_time = std::time::Duration::ZERO;
102 let mut total_eval_time = std::time::Duration::ZERO;
103 let mut successful_scenarios = 0;
104 let mut comparison_failures = 0;
105
106 for scenario in &scenarios {
107 println!("==============================");
108 println!("Scenario: {}", scenario.name);
109 println!("Schema: {} ({})",
110 scenario.schema_path.display(),
111 if scenario.is_msgpack { "MessagePack" } else { "JSON" }
112 );
113 println!("Data: {}\n", scenario.data_path.display());
114
115 // Clear timing data from previous scenarios
116 if show_timing {
117 json_eval_rs::enable_timing();
118 json_eval_rs::clear_timing_data();
119 }
120
121 let data_str = fs::read_to_string(&scenario.data_path)
122 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
123
124 // Step 1: Parse schema once
125 let parse_start = Instant::now();
126 let parsed_schema = if scenario.is_msgpack {
127 let schema_msgpack = fs::read(&scenario.schema_path)
128 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
129 println!(" 📦 MessagePack schema size: {} bytes", schema_msgpack.len());
130 Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
131 .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
132 } else {
133 let schema_str = fs::read_to_string(&scenario.schema_path)
134 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
135 Arc::new(ParsedSchema::parse(&schema_str)
136 .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
137 };
138 let parse_time = parse_start.elapsed();
139 println!(" 📝 Schema parsing: {:?}", parse_time);
140
141 // Step 2: Create JSONEval from ParsedSchema (reuses compiled logic)
142 let eval_start = Instant::now();
143 let mut eval = JSONEval::with_parsed_schema(
144 parsed_schema.clone(), // Arc::clone is cheap!
145 Some("{}"),
146 Some(&data_str)
147 ).unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e));
148
149 eval.evaluate(&data_str, Some("{}"), None)
150 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
151
152 let evaluated_schema = eval.get_evaluated_schema(false);
153 let eval_time = eval_start.elapsed();
154
155 println!(" ⚡ Eval: {:?}", eval_time);
156 println!(" ⏱️ Total: {:?}\n", parse_time + eval_time);
157
158 // Print detailed timing breakdown if --timing flag is set
159 if show_timing {
160 json_eval_rs::print_timing_summary();
161 }
162
163 total_parse_time += parse_time;
164 total_eval_time += eval_time;
165 successful_scenarios += 1;
166
167 // Save results
168 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
169 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
170
171 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
172 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
173
174 let mut metadata_obj = Map::new();
175 metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
176 metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
177 metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
178
179 fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
180 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
181
182 println!("✅ Results saved:");
183 println!(" - {}", evaluated_path.display());
184 println!(" - {}\n", parsed_path.display());
185
186 // Optional comparison
187 if enable_comparison {
188 if let Some(comp_path) = &scenario.comparison_path {
189 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
190 comparison_failures += 1;
191 }
192 println!();
193 }
194 }
195 }
196
197 // Print summary
198 println!("{}", "=".repeat(50));
199 println!("📊 Summary");
200 println!("{}", "=".repeat(50));
201 println!("Total scenarios run: {}", successful_scenarios);
202 println!("Total parsing time: {:?}", total_parse_time);
203 println!("Total evaluation time: {:?}", total_eval_time);
204 println!("Total time: {:?}", total_parse_time + total_eval_time);
205
206 if successful_scenarios > 1 {
207 println!("\nAverage per scenario:");
208 println!(" Parsing: {:?}", total_parse_time / successful_scenarios as u32);
209 println!(" Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
210 }
211
212 if enable_comparison {
213 println!("\nComparison failures: {}", comparison_failures);
214 }
215
216 println!("\n✅ All scenarios completed!\n");
217}Source§impl JSONEval
impl JSONEval
Sourcepub fn get_evaluated_schema(&self, include_hidden: bool) -> Value
pub fn get_evaluated_schema(&self, include_hidden: bool) -> Value
Get the fully evaluated schema
§Arguments
include_hidden- If true, hidden fields are included (but marked). If false, hidden fields are removed from the output.
Examples found in repository?
38fn demo_local_cache() -> Result<(), Box<dyn std::error::Error>> {
39 println!("📦 Example 1: Local Cache Instance");
40 println!("Creating a dedicated cache for this application...\n");
41
42 let cache = ParsedSchemaCache::new();
43
44 // Simple schema
45 let schema_json = r#"{
46 "$params": {
47 "rate": { "type": "number" }
48 },
49 "result": {
50 "type": "number",
51 "title": "Calculated Result",
52 "$evaluation": {
53 "logic": { "*": [{"var": "$rate"}, 100] }
54 }
55 }
56 }"#;
57
58 // Parse and cache with a custom key
59 println!("📝 Parsing schema and caching with key 'calculation-v1'...");
60 let parsed = ParsedSchema::parse(schema_json)?;
61 cache.insert("calculation-v1".to_string(), Arc::new(parsed));
62
63 println!("✅ Schema cached successfully");
64 println!(" Cache size: {} entries", cache.len());
65 println!(" Keys: {:?}\n", cache.keys());
66
67 // Retrieve and use cached schema
68 println!("🔍 Retrieving cached schema...");
69 if let Some(cached_schema) = cache.get("calculation-v1") {
70 println!("✅ Retrieved from cache");
71
72 // Create JSONEval from cached ParsedSchema
73 let mut eval = JSONEval::with_parsed_schema(cached_schema, Some(r#"{"rate": 1.5}"#), None)?;
74 eval.evaluate("{}", None, None)?;
75
76 let evaluated = eval.get_evaluated_schema(false);
77 let result = evaluated.pointer("/result")
78 .and_then(|v| v.as_f64())
79 .unwrap_or(0.0);
80 println!(" Evaluation result: {}\n", result);
81 }
82
83 // Check cache stats
84 let stats = cache.stats();
85 println!("📊 Cache Statistics: {}", stats);
86
87 // Remove entry
88 println!("\n🗑️ Removing 'calculation-v1' from cache...");
89 cache.remove("calculation-v1");
90 println!(" Cache size after removal: {}", cache.len());
91
92 Ok(())
93}
94
95fn demo_global_cache() -> Result<(), Box<dyn std::error::Error>> {
96 println!("🌍 Example 2: Global Cache Instance");
97 println!("Using the built-in PARSED_SCHEMA_CACHE...\n");
98
99 let schema_json = r#"{
100 "$params": {
101 "x": { "type": "number" },
102 "y": { "type": "number" }
103 },
104 "sum": {
105 "type": "number",
106 "$evaluation": { "+": [{"var": "$x"}, {"var": "$y"}] }
107 }
108 }"#;
109
110 // Use global cache
111 println!("📝 Caching schema globally with key 'math-operations'...");
112 let parsed = ParsedSchema::parse(schema_json)?;
113 PARSED_SCHEMA_CACHE.insert("math-operations".to_string(), Arc::new(parsed));
114
115 println!("✅ Schema cached globally");
116 println!(" Global cache size: {}\n", PARSED_SCHEMA_CACHE.len());
117
118 // Access from anywhere in the application
119 simulate_another_function()?;
120
121 // Clean up
122 println!("\n🧹 Clearing global cache...");
123 PARSED_SCHEMA_CACHE.clear();
124 println!(" Global cache size: {}", PARSED_SCHEMA_CACHE.len());
125
126 Ok(())
127}
128
129fn simulate_another_function() -> Result<(), Box<dyn std::error::Error>> {
130 println!("🔄 In another function, accessing global cache...");
131
132 if let Some(cached) = PARSED_SCHEMA_CACHE.get("math-operations") {
133 println!("✅ Retrieved schema from global cache");
134
135 let mut eval = JSONEval::with_parsed_schema(cached, Some(r#"{"x": 10, "y": 20}"#), None)?;
136 eval.evaluate("{}", None, None)?;
137
138 let evaluated = eval.get_evaluated_schema(false);
139 let sum = evaluated.pointer("/sum")
140 .and_then(|v| v.as_f64())
141 .unwrap_or(0.0);
142 println!(" Result: {}", sum);
143 }
144
145 Ok(())
146}More examples
6fn main() {
7 println!("\n🚀 JSON Evaluation - SPAJ Toggle Example\n");
8
9 let schema_path = Path::new("samples/spaj.json");
10 let schema_str = fs::read_to_string(schema_path).expect("Failed to read schema");
11
12 // Initial data with minimal context required
13 let context_str = json!({
14 "agentProfile": { "sob": "AG" }
15 }).to_string();
16
17 let initial_data = json!({
18 "illustration": {
19 "basicinformation": {
20 "print_polflag": false
21 }
22 }
23 }).to_string();
24
25 // Initialize logic
26 let mut eval = JSONEval::new(&schema_str, Some(&context_str), Some(&initial_data))
27 .expect("Failed to create JSONEval");
28
29 // Helper to check visibility
30 let check_visibility = |eval: &mut JSONEval, expected_hidden: bool, step: &str| {
31 let result = eval.get_evaluated_schema(true);
32 let hidden = result.pointer("/illustration/properties/basicinformation/properties/print_poladdress/condition/hidden")
33 .and_then(|v| v.as_bool());
34
35 match hidden {
36 Some(val) => {
37 if val == expected_hidden {
38 println!("✅ {}: Hidden = {} (Expected: {})", step, val, expected_hidden);
39 } else {
40 println!("❌ {}: Hidden = {} (Expected: {})", step, val, expected_hidden);
41 }
42 },
43 None => println!("❌ {}: 'hidden' property not found", step),
44 }
45 };
46
47 // Step 1: Initial state (false)
48 println!("Step 1: Initial State (print_polflag: false)");
49 eval.evaluate(&initial_data, Some(&context_str), None).expect("Evaluation failed");
50 check_visibility(&mut eval, true, "Initial check");
51
52 // Step 2: Toggle to true
53 println!("\nStep 2: Toggle True (print_polflag: true)");
54 let data_true = json!({
55 "illustration": {
56 "basicinformation": {
57 "print_polflag": true
58 }
59 }
60 }).to_string();
61 eval.evaluate(&data_true, Some(&context_str), None).expect("Evaluation failed");
62 check_visibility(&mut eval, false, "Toggle ON check");
63
64 // Step 3: Toggle back to false
65 println!("\nStep 3: Toggle False (print_polflag: false)");
66 let data_false = json!({
67 "illustration": {
68 "basicinformation": {
69 "print_polflag": false
70 }
71 }
72 }).to_string();
73 eval.evaluate(&data_false, Some(&context_str), None).expect("Evaluation failed");
74
75 let hidden_path = "#/illustration/properties/basicinformation/properties/print_poladdress/condition/hidden";
76 if let Some(deps) = eval.dependencies.get(hidden_path) {
77 println!("Debug: Dependencies for hidden: {:?}", deps);
78 } else {
79 println!("Debug: No dependencies found for hidden path");
80 }
81
82 // Debug: Print current flag value
83 if let Some(val) = eval.get_evaluated_schema(true).pointer("/illustration/properties/basicinformation/properties/print_polflag/value") {
84 println!("Debug: print_polflag value is: {}", val);
85 }
86
87 check_visibility(&mut eval, true, "Toggle OFF check");
88}28fn main() {
29 let args: Vec<String> = std::env::args().collect();
30 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic_msgpack");
31
32 let mut scenario_filter: Option<String> = None;
33 let mut enable_comparison = false;
34 let mut show_timing = false;
35 let mut i = 1;
36
37 // Parse arguments
38 while i < args.len() {
39 let arg = &args[i];
40
41 if arg == "-h" || arg == "--help" {
42 print_help(program_name);
43 return;
44 } else if arg == "--compare" {
45 enable_comparison = true;
46 } else if arg == "--timing" {
47 show_timing = true;
48 } else if !arg.starts_with('-') {
49 scenario_filter = Some(arg.clone());
50 } else {
51 eprintln!("Error: unknown option '{}'", arg);
52 print_help(program_name);
53 return;
54 }
55
56 i += 1;
57 }
58
59 println!("\n🚀 JSON Evaluation - Basic Example (MessagePack Schema)\n");
60
61 if enable_comparison {
62 println!("🔍 Comparison: enabled");
63 }
64 if show_timing {
65 println!("⏱️ Internal timing: enabled");
66 }
67 if enable_comparison || show_timing {
68 println!();
69 }
70
71 let samples_dir = Path::new("samples");
72 let mut scenarios = common::discover_scenarios(samples_dir);
73
74 // Filter to only MessagePack scenarios
75 scenarios.retain(|s| s.is_msgpack);
76
77 // Filter scenarios if a filter is provided
78 if let Some(ref filter) = scenario_filter {
79 scenarios.retain(|s| s.name.contains(filter));
80 println!("📋 Filtering scenarios matching: '{}'\n", filter);
81 }
82
83 if scenarios.is_empty() {
84 if let Some(filter) = scenario_filter {
85 println!(
86 "ℹ️ No MessagePack scenarios found matching '{}' in `{}`.",
87 filter,
88 samples_dir.display()
89 );
90 } else {
91 println!(
92 "ℹ️ No MessagePack scenarios discovered in `{}`. Add files like `name.bform` and `name-data.json`.",
93 samples_dir.display()
94 );
95 }
96 return;
97 }
98
99 println!("📊 Found {} MessagePack scenario(s)\n", scenarios.len());
100
101 let mut total_parse_time = std::time::Duration::ZERO;
102 let mut total_eval_time = std::time::Duration::ZERO;
103 let mut successful_scenarios = 0;
104 let mut comparison_failures = 0;
105
106 for scenario in &scenarios {
107 println!("==============================");
108 println!("Scenario: {}", scenario.name);
109 println!("Schema: {} (MessagePack)", scenario.schema_path.display());
110 println!("Data: {}\n", scenario.data_path.display());
111
112 // Clear timing data from previous scenarios
113 if show_timing {
114 json_eval_rs::enable_timing();
115 json_eval_rs::clear_timing_data();
116 }
117
118 let data_str = fs::read_to_string(&scenario.data_path)
119 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
120
121 // Step 1: Parse schema (new_from_msgpack)
122 let parse_start = Instant::now();
123
124 let schema_msgpack = fs::read(&scenario.schema_path)
125 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
126
127 println!(" 📦 MessagePack schema size: {} bytes", schema_msgpack.len());
128
129 let mut eval = JSONEval::new_from_msgpack(&schema_msgpack, None, Some(&data_str))
130 .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e));
131
132 let parse_time = parse_start.elapsed();
133 println!(" 📝 Parse (msgpack): {:?}", parse_time);
134
135 // Step 2: Evaluate
136 let eval_start = Instant::now();
137
138 eval.evaluate(&data_str, Some("{}"), None)
139 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
140
141 let evaluated_schema = eval.get_evaluated_schema(false);
142 let eval_time = eval_start.elapsed();
143
144 println!(" ⚡ Eval: {:?}", eval_time);
145 println!(" ⏱️ Total: {:?}\n", parse_time + eval_time);
146
147 // Print detailed timing breakdown if --timing flag is set
148 if show_timing {
149 json_eval_rs::print_timing_summary();
150 }
151
152 total_parse_time += parse_time;
153 total_eval_time += eval_time;
154 successful_scenarios += 1;
155
156 // Save results
157 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
158 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
159
160 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
161 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
162
163 let mut metadata_obj = Map::new();
164 metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
165 metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
166 metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
167
168 fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
169 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
170
171 println!("✅ Results saved:");
172 println!(" - {}", evaluated_path.display());
173 println!(" - {}\n", parsed_path.display());
174
175 // Optional comparison
176 if enable_comparison {
177 if let Some(comp_path) = &scenario.comparison_path {
178 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
179 comparison_failures += 1;
180 }
181 println!();
182 }
183 }
184 }
185
186 // Print summary
187 println!("{}", "=".repeat(50));
188 println!("📊 Summary");
189 println!("{}", "=".repeat(50));
190 println!("Total scenarios run: {}", successful_scenarios);
191 println!("Total parse time: {:?}", total_parse_time);
192 println!("Total eval time: {:?}", total_eval_time);
193 println!("Total time: {:?}", total_parse_time + total_eval_time);
194
195 if successful_scenarios > 1 {
196 println!("\nAverage per scenario:");
197 println!(" Parse: {:?}", total_parse_time / successful_scenarios as u32);
198 println!(" Eval: {:?}", total_eval_time / successful_scenarios as u32);
199 }
200
201 if enable_comparison {
202 println!("Comparison failures: {}", comparison_failures);
203 }
204
205 println!("\n✅ All scenarios completed!\n");
206}28fn main() {
29 let args: Vec<String> = std::env::args().collect();
30 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic");
31
32 let mut scenario_filter: Option<String> = None;
33 let mut enable_comparison = false;
34 let mut show_timing = false;
35 let mut i = 1;
36
37 // Parse arguments
38 while i < args.len() {
39 let arg = &args[i];
40
41 if arg == "-h" || arg == "--help" {
42 print_help(program_name);
43 return;
44 } else if arg == "--compare" {
45 enable_comparison = true;
46 } else if arg == "--timing" {
47 show_timing = true;
48 } else if !arg.starts_with('-') {
49 scenario_filter = Some(arg.clone());
50 } else {
51 eprintln!("Error: unknown option '{}'", arg);
52 print_help(program_name);
53 return;
54 }
55
56 i += 1;
57 }
58
59 println!("\n🚀 JSON Evaluation - Basic Example (JSON Schema)\n");
60
61 if enable_comparison {
62 println!("🔍 Comparison: enabled");
63 }
64 if show_timing {
65 println!("⏱️ Internal timing: enabled");
66 }
67 if enable_comparison || show_timing {
68 println!();
69 }
70
71 let samples_dir = Path::new("samples");
72 let mut scenarios = common::discover_scenarios(samples_dir);
73
74 // Filter out MessagePack scenarios - only use JSON
75 scenarios.retain(|s| !s.is_msgpack);
76
77 // Filter scenarios if a filter is provided
78 if let Some(ref filter) = scenario_filter {
79 scenarios.retain(|s| s.name.contains(filter));
80 println!("📋 Filtering scenarios matching: '{}'\n", filter);
81 }
82
83 if scenarios.is_empty() {
84 if let Some(filter) = scenario_filter {
85 println!(
86 "ℹ️ No scenarios found matching '{}' in `{}`.",
87 filter,
88 samples_dir.display()
89 );
90 } else {
91 println!(
92 "ℹ️ No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
93 samples_dir.display()
94 );
95 }
96 return;
97 }
98
99 println!("📊 Found {} scenario(s)\n", scenarios.len());
100
101 let mut total_parse_time = std::time::Duration::ZERO;
102 let mut total_eval_time = std::time::Duration::ZERO;
103 let mut successful_scenarios = 0;
104 let mut comparison_failures = 0;
105
106 for scenario in &scenarios {
107 println!("==============================");
108 println!("Scenario: {}", scenario.name);
109 println!("Schema: {} ({})",
110 scenario.schema_path.display(),
111 if scenario.is_msgpack { "MessagePack" } else { "JSON" }
112 );
113 println!("Data: {}\n", scenario.data_path.display());
114
115 // Clear timing data from previous scenarios
116 if show_timing {
117 json_eval_rs::enable_timing();
118 json_eval_rs::clear_timing_data();
119 }
120
121 let data_str = fs::read_to_string(&scenario.data_path)
122 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
123
124 // Step 1: Parse schema (JSONEval::new)
125 let parse_start = Instant::now();
126
127 let schema_str = fs::read_to_string(&scenario.schema_path)
128 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
129
130 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str))
131 .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e));
132
133 let parse_time = parse_start.elapsed();
134 println!(" 📝 Parse (new): {:?}", parse_time);
135
136 // Step 2: Evaluate
137 let eval_start = Instant::now();
138
139 eval.evaluate(&data_str, Some("{}"), None)
140 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
141
142 let evaluated_schema = eval.get_evaluated_schema(false);
143 let schema_value = eval.get_schema_value();
144 let eval_time = eval_start.elapsed();
145
146 println!(" ⚡ Eval: {:?}", eval_time);
147 println!(" ⏱️ Total: {:?}\n", parse_time + eval_time);
148
149 // Print detailed timing breakdown if --timing flag is set
150 if show_timing {
151 json_eval_rs::print_timing_summary();
152 }
153
154 total_parse_time += parse_time;
155 total_eval_time += eval_time;
156 successful_scenarios += 1;
157
158 // Save results
159 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
160 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
161 let value_path = samples_dir.join(format!("{}-schema-value.json", scenario.name));
162
163 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
164 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
165
166 let mut metadata_obj = Map::new();
167 metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
168 metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
169 metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
170
171 fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
172 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
173
174 fs::write(&value_path, common::pretty_json(&schema_value))
175 .unwrap_or_else(|e| panic!("failed to write {}: {}", value_path.display(), e));
176
177 println!("✅ Results saved:");
178 println!(" - {}", evaluated_path.display());
179 println!(" - {}", parsed_path.display());
180 println!(" - {}\n", value_path.display());
181
182 // Optional comparison
183 if enable_comparison {
184 if let Some(comp_path) = &scenario.comparison_path {
185 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
186 comparison_failures += 1;
187 }
188 println!();
189 }
190 }
191 }
192
193 // Print summary
194 println!("{}", "=".repeat(50));
195 println!("📊 Summary");
196 println!("{}", "=".repeat(50));
197 println!("Total scenarios run: {}", successful_scenarios);
198 println!("Total parse time: {:?}", total_parse_time);
199 println!("Total eval time: {:?}", total_eval_time);
200 println!("Total time: {:?}", total_parse_time + total_eval_time);
201
202 if successful_scenarios > 1 {
203 println!("\nAverage per scenario:");
204 println!(" Parse: {:?}", total_parse_time / successful_scenarios as u32);
205 println!(" Eval: {:?}", total_eval_time / successful_scenarios as u32);
206 }
207
208 if enable_comparison {
209 println!("Comparison failures: {}", comparison_failures);
210 }
211
212 println!("\n✅ All scenarios completed!\n");
213}30fn main() {
31 let args: Vec<String> = std::env::args().collect();
32 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic_parsed");
33
34 let mut scenario_filter: Option<String> = None;
35 let mut enable_comparison = false;
36 let mut show_timing = false;
37 let mut i = 1;
38
39 // Parse arguments
40 while i < args.len() {
41 let arg = &args[i];
42
43 if arg == "-h" || arg == "--help" {
44 print_help(program_name);
45 return;
46 } else if arg == "--compare" {
47 enable_comparison = true;
48 } else if arg == "--timing" {
49 show_timing = true;
50 } else if !arg.starts_with('-') {
51 scenario_filter = Some(arg.clone());
52 } else {
53 eprintln!("Error: unknown option '{}'", arg);
54 print_help(program_name);
55 return;
56 }
57
58 i += 1;
59 }
60
61 println!("\n🚀 JSON Evaluation - Basic Example (ParsedSchema)\n");
62 println!("📦 Using Arc<ParsedSchema> for efficient caching\n");
63
64 if enable_comparison {
65 println!("🔍 Comparison: enabled");
66 }
67 if show_timing {
68 println!("⏱️ Internal timing: enabled");
69 }
70 if enable_comparison || show_timing {
71 println!();
72 }
73
74 let samples_dir = Path::new("samples");
75 let mut scenarios = common::discover_scenarios(samples_dir);
76
77 // Filter scenarios if a filter is provided
78 if let Some(ref filter) = scenario_filter {
79 scenarios.retain(|s| s.name.contains(filter));
80 println!("📋 Filtering scenarios matching: '{}'\n", filter);
81 }
82
83 if scenarios.is_empty() {
84 if let Some(filter) = scenario_filter {
85 println!(
86 "ℹ️ No scenarios found matching '{}' in `{}`.",
87 filter,
88 samples_dir.display()
89 );
90 } else {
91 println!(
92 "ℹ️ No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
93 samples_dir.display()
94 );
95 }
96 return;
97 }
98
99 println!("📊 Found {} scenario(s)\n", scenarios.len());
100
101 let mut total_parse_time = std::time::Duration::ZERO;
102 let mut total_eval_time = std::time::Duration::ZERO;
103 let mut successful_scenarios = 0;
104 let mut comparison_failures = 0;
105
106 for scenario in &scenarios {
107 println!("==============================");
108 println!("Scenario: {}", scenario.name);
109 println!("Schema: {} ({})",
110 scenario.schema_path.display(),
111 if scenario.is_msgpack { "MessagePack" } else { "JSON" }
112 );
113 println!("Data: {}\n", scenario.data_path.display());
114
115 // Clear timing data from previous scenarios
116 if show_timing {
117 json_eval_rs::enable_timing();
118 json_eval_rs::clear_timing_data();
119 }
120
121 let data_str = fs::read_to_string(&scenario.data_path)
122 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
123
124 // Step 1: Parse schema once
125 let parse_start = Instant::now();
126 let parsed_schema = if scenario.is_msgpack {
127 let schema_msgpack = fs::read(&scenario.schema_path)
128 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
129 println!(" 📦 MessagePack schema size: {} bytes", schema_msgpack.len());
130 Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
131 .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
132 } else {
133 let schema_str = fs::read_to_string(&scenario.schema_path)
134 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
135 Arc::new(ParsedSchema::parse(&schema_str)
136 .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
137 };
138 let parse_time = parse_start.elapsed();
139 println!(" 📝 Schema parsing: {:?}", parse_time);
140
141 // Step 2: Create JSONEval from ParsedSchema (reuses compiled logic)
142 let eval_start = Instant::now();
143 let mut eval = JSONEval::with_parsed_schema(
144 parsed_schema.clone(), // Arc::clone is cheap!
145 Some("{}"),
146 Some(&data_str)
147 ).unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e));
148
149 eval.evaluate(&data_str, Some("{}"), None)
150 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
151
152 let evaluated_schema = eval.get_evaluated_schema(false);
153 let eval_time = eval_start.elapsed();
154
155 println!(" ⚡ Eval: {:?}", eval_time);
156 println!(" ⏱️ Total: {:?}\n", parse_time + eval_time);
157
158 // Print detailed timing breakdown if --timing flag is set
159 if show_timing {
160 json_eval_rs::print_timing_summary();
161 }
162
163 total_parse_time += parse_time;
164 total_eval_time += eval_time;
165 successful_scenarios += 1;
166
167 // Save results
168 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
169 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
170
171 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
172 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
173
174 let mut metadata_obj = Map::new();
175 metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
176 metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
177 metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
178
179 fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
180 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
181
182 println!("✅ Results saved:");
183 println!(" - {}", evaluated_path.display());
184 println!(" - {}\n", parsed_path.display());
185
186 // Optional comparison
187 if enable_comparison {
188 if let Some(comp_path) = &scenario.comparison_path {
189 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
190 comparison_failures += 1;
191 }
192 println!();
193 }
194 }
195 }
196
197 // Print summary
198 println!("{}", "=".repeat(50));
199 println!("📊 Summary");
200 println!("{}", "=".repeat(50));
201 println!("Total scenarios run: {}", successful_scenarios);
202 println!("Total parsing time: {:?}", total_parse_time);
203 println!("Total evaluation time: {:?}", total_eval_time);
204 println!("Total time: {:?}", total_parse_time + total_eval_time);
205
206 if successful_scenarios > 1 {
207 println!("\nAverage per scenario:");
208 println!(" Parsing: {:?}", total_parse_time / successful_scenarios as u32);
209 println!(" Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
210 }
211
212 if enable_comparison {
213 println!("\nComparison failures: {}", comparison_failures);
214 }
215
216 println!("\n✅ All scenarios completed!\n");
217}31fn main() {
32 let args: Vec<String> = std::env::args().collect();
33 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("benchmark");
34
35 let mut iterations = 1usize;
36 let mut scenario_filter: Option<String> = None;
37 let mut show_cpu_info = false;
38 let mut use_parsed_schema = false;
39 let mut concurrent_count: Option<usize> = None;
40 let mut enable_comparison = false;
41 let mut show_timing = false;
42 let mut i = 1;
43
44 // Parse arguments
45 while i < args.len() {
46 let arg = &args[i];
47
48 if arg == "-h" || arg == "--help" {
49 print_help(program_name);
50 return;
51 } else if arg == "--cpu-info" {
52 show_cpu_info = true;
53 } else if arg == "--parsed" {
54 use_parsed_schema = true;
55 } else if arg == "--compare" {
56 enable_comparison = true;
57 } else if arg == "--timing" {
58 show_timing = true;
59 } else if arg == "--concurrent" {
60 if i + 1 >= args.len() {
61 eprintln!("Error: {} requires a value", arg);
62 print_help(program_name);
63 return;
64 }
65 i += 1;
66 match args[i].parse::<usize>() {
67 Ok(n) if n > 0 => concurrent_count = Some(n),
68 _ => {
69 eprintln!("Error: concurrent count must be a positive integer, got '{}'", args[i]);
70 return;
71 }
72 }
73 } else if arg == "-i" || arg == "--iterations" {
74 if i + 1 >= args.len() {
75 eprintln!("Error: {} requires a value", arg);
76 print_help(program_name);
77 return;
78 }
79 i += 1;
80 match args[i].parse::<usize>() {
81 Ok(n) if n > 0 => iterations = n,
82 _ => {
83 eprintln!("Error: iterations must be a positive integer, got '{}'", args[i]);
84 return;
85 }
86 }
87 } else if !arg.starts_with('-') {
88 scenario_filter = Some(arg.clone());
89 } else {
90 eprintln!("Error: unknown option '{}'", arg);
91 print_help(program_name);
92 return;
93 }
94
95 i += 1;
96 }
97
98 println!("\n🚀 JSON Evaluation - Benchmark\n");
99
100 // Show CPU info if requested or if running benchmarks
101 if show_cpu_info || iterations > 1 || concurrent_count.is_some() {
102 common::print_cpu_info();
103 }
104
105 if use_parsed_schema {
106 println!("📦 Mode: ParsedSchema (parse once, reuse for all iterations)\n");
107 }
108
109 if let Some(count) = concurrent_count {
110 println!("🔀 Concurrent evaluations: {} threads\n", count);
111 } else if iterations > 1 {
112 println!("🔄 Iterations per scenario: {}\n", iterations);
113 }
114
115 if enable_comparison {
116 println!("🔍 Comparison: enabled");
117 }
118 if show_timing {
119 println!("⏱️ Internal timing: enabled");
120 }
121 if enable_comparison || show_timing {
122 println!();
123 }
124
125 let samples_dir = Path::new("samples");
126 let mut scenarios = common::discover_scenarios(samples_dir);
127
128 // Filter scenarios if a filter is provided
129 if let Some(ref filter) = scenario_filter {
130 scenarios.retain(|s| s.name.contains(filter));
131 println!("📋 Filtering scenarios matching: '{}'\n", filter);
132 }
133
134 if scenarios.is_empty() {
135 if let Some(filter) = scenario_filter {
136 println!(
137 "ℹ️ No scenarios found matching '{}' in `{}`.",
138 filter,
139 samples_dir.display()
140 );
141 } else {
142 println!(
143 "ℹ️ No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
144 samples_dir.display()
145 );
146 }
147 return;
148 }
149
150 println!("📊 Found {} scenario(s)\n", scenarios.len());
151
152 let mut total_parse_time = std::time::Duration::ZERO;
153 let mut total_eval_time = std::time::Duration::ZERO;
154 let mut successful_scenarios = 0;
155 let mut comparison_failures = 0;
156
157 for scenario in &scenarios {
158 println!("==============================");
159 println!("Scenario: {}", scenario.name);
160 println!("Schema: {} ({})",
161 scenario.schema_path.display(),
162 if scenario.is_msgpack { "MessagePack" } else { "JSON" }
163 );
164 println!("Data: {}\n", scenario.data_path.display());
165
166 // Clear timing data from previous scenarios
167 if show_timing {
168 json_eval_rs::enable_timing();
169 json_eval_rs::clear_timing_data();
170 }
171
172 let data_str = fs::read_to_string(&scenario.data_path)
173 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
174
175 println!("Running evaluation...\n");
176
177 let (parse_time, eval_time, evaluated_schema, eval, iteration_times) = if use_parsed_schema {
178 // ParsedSchema mode: parse once, reuse for all iterations/threads
179 let start_time = Instant::now();
180
181 let parsed_schema = if scenario.is_msgpack {
182 let schema_msgpack = fs::read(&scenario.schema_path)
183 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
184 println!(" 📦 MessagePack schema size: {} bytes", schema_msgpack.len());
185 Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
186 .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
187 } else {
188 let schema_str = fs::read_to_string(&scenario.schema_path)
189 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
190 Arc::new(ParsedSchema::parse(&schema_str)
191 .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
192 };
193
194 let parse_time = start_time.elapsed();
195 println!(" Schema parsing & compilation: {:?}", parse_time);
196
197 // Concurrent mode with ParsedSchema
198 if let Some(thread_count) = concurrent_count {
199 use std::thread;
200
201 let eval_start = Instant::now();
202 let mut handles = vec![];
203
204 for thread_id in 0..thread_count {
205 let parsed_clone = parsed_schema.clone();
206 let data_str_clone = data_str.clone();
207 let iter_count = iterations;
208
209 let handle = thread::spawn(move || {
210 let mut thread_times = Vec::with_capacity(iter_count);
211 let mut last_schema = Value::Null;
212
213 for _ in 0..iter_count {
214 let iter_start = Instant::now();
215 let mut eval_instance = JSONEval::with_parsed_schema(
216 parsed_clone.clone(),
217 Some("{}"),
218 Some(&data_str_clone)
219 ).unwrap();
220
221 eval_instance.evaluate(&data_str_clone, Some("{}"), None).unwrap();
222 last_schema = eval_instance.get_evaluated_schema(false);
223 thread_times.push(iter_start.elapsed());
224 }
225
226 (thread_times, last_schema, thread_id)
227 });
228 handles.push(handle);
229 }
230
231 let mut all_iteration_times = Vec::new();
232 let mut evaluated_schema = Value::Null;
233
234 for handle in handles {
235 let (thread_times, thread_schema, thread_id) = handle.join().unwrap();
236 println!(" Thread {} completed {} iterations", thread_id, thread_times.len());
237 all_iteration_times.extend(thread_times);
238 evaluated_schema = thread_schema; // Use last thread's result
239 }
240
241 let eval_time = eval_start.elapsed();
242
243 // Create a temp eval for metadata export
244 let temp_eval = JSONEval::with_parsed_schema(
245 parsed_schema.clone(),
246 Some("{}"),
247 Some(&data_str)
248 ).unwrap();
249
250 (parse_time, eval_time, evaluated_schema, temp_eval, all_iteration_times)
251 } else {
252 // Sequential iterations with ParsedSchema
253 let eval_start = Instant::now();
254 let mut evaluated_schema = Value::Null;
255 let mut iteration_times = Vec::with_capacity(iterations);
256 let mut eval_instance = JSONEval::with_parsed_schema(
257 parsed_schema.clone(),
258 Some("{}"),
259 Some(&data_str)
260 ).unwrap();
261
262 for iter in 0..iterations {
263 let iter_start = Instant::now();
264 eval_instance.evaluate(&data_str, Some("{}"), None)
265 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
266 evaluated_schema = eval_instance.get_evaluated_schema(false);
267 iteration_times.push(iter_start.elapsed());
268
269 if iterations > 1 && (iter + 1) % 10 == 0 {
270 print!(".");
271 if (iter + 1) % 50 == 0 {
272 println!(" {}/{}", iter + 1, iterations);
273 }
274 }
275 }
276
277 if iterations > 1 && iterations % 50 != 0 {
278 println!(" {}/{}", iterations, iterations);
279 }
280
281 let eval_time = eval_start.elapsed();
282 (parse_time, eval_time, evaluated_schema, eval_instance, iteration_times)
283 }
284 } else {
285 // Traditional mode: parse and create JSONEval each time
286 let start_time = Instant::now();
287 let mut eval = if scenario.is_msgpack {
288 let schema_msgpack = fs::read(&scenario.schema_path)
289 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
290 println!(" 📦 MessagePack schema size: {} bytes", schema_msgpack.len());
291 JSONEval::new_from_msgpack(&schema_msgpack, None, Some(&data_str))
292 .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e))
293 } else {
294 let schema_str = fs::read_to_string(&scenario.schema_path)
295 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
296 JSONEval::new(&schema_str, None, Some(&data_str))
297 .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e))
298 };
299 let parse_time = start_time.elapsed();
300 println!(" Schema parsing & compilation: {:?}", parse_time);
301
302 let eval_start = Instant::now();
303 let mut evaluated_schema = Value::Null;
304 let mut iteration_times = Vec::with_capacity(iterations);
305
306 for iter in 0..iterations {
307 let iter_start = Instant::now();
308 eval.evaluate(&data_str, Some("{}"), None)
309 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
310 evaluated_schema = eval.get_evaluated_schema(false);
311 iteration_times.push(iter_start.elapsed());
312
313 if iterations > 1 && (iter + 1) % 10 == 0 {
314 print!(".");
315 if (iter + 1) % 50 == 0 {
316 println!(" {}/{}", iter + 1, iterations);
317 }
318 }
319 }
320
321 if iterations > 1 && iterations % 50 != 0 {
322 println!(" {}/{}", iterations, iterations);
323 }
324
325 let eval_time = eval_start.elapsed();
326 (parse_time, eval_time, evaluated_schema, eval, iteration_times)
327 };
328
329 // Calculate statistics
330 let total_iterations = iteration_times.len();
331 if total_iterations == 1 {
332 println!(" Evaluation: {:?}", eval_time);
333 } else {
334 let avg_time = eval_time / total_iterations as u32;
335 let min_time = iteration_times.iter().min().unwrap();
336 let max_time = iteration_times.iter().max().unwrap();
337
338 println!(" Total evaluation time: {:?}", eval_time);
339 println!(" Total iterations: {}", total_iterations);
340 println!(" Average per iteration: {:?}", avg_time);
341 println!(" Min: {:?} | Max: {:?}", min_time, max_time);
342
343 // Show cache statistics
344 let cache_stats = eval.cache_stats();
345 println!(" Cache: {} entries, {} hits, {} misses ({:.1}% hit rate)",
346 cache_stats.entries,
347 cache_stats.hits,
348 cache_stats.misses,
349 cache_stats.hit_rate * 100.0
350 );
351 }
352
353 let total_time = parse_time + eval_time;
354 println!("⏱️ Execution time: {:?}\n", total_time);
355
356 // Print detailed timing breakdown if --timing flag is set
357 if show_timing {
358 json_eval_rs::print_timing_summary();
359 }
360
361 // Track statistics
362 total_parse_time += parse_time;
363 total_eval_time += eval_time;
364 successful_scenarios += 1;
365
366 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
367 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
368
369 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
370 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
371
372 let mut metadata_obj = Map::new();
373 metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
374 metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
375
376 fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
377 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
378
379 println!("✅ Results saved:");
380 println!(" - {}", evaluated_path.display());
381 println!(" - {}\n", parsed_path.display());
382
383 // Optional comparison
384 if enable_comparison {
385 if let Some(comp_path) = &scenario.comparison_path {
386 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
387 comparison_failures += 1;
388 }
389 println!();
390 }
391 }
392 }
393
394 // Print summary statistics
395 if successful_scenarios > 0 {
396 println!("\n{}", "=".repeat(50));
397 println!("📊 Summary Statistics");
398 println!("{}", "=".repeat(50));
399 println!("Total scenarios run: {}", successful_scenarios);
400 println!("Total parsing time: {:?}", total_parse_time);
401 println!("Total evaluation time: {:?}", total_eval_time);
402 println!("Total time: {:?}", total_parse_time + total_eval_time);
403
404 if successful_scenarios > 1 {
405 println!("\nAverage per scenario:");
406 println!(" Parsing: {:?}", total_parse_time / successful_scenarios as u32);
407 println!(" Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
408 }
409
410 if enable_comparison {
411 println!("\nComparison failures: {}", comparison_failures);
412 }
413
414 println!("\n✅ All scenarios completed successfully!\n");
415 }
416}Sourcepub fn get_evaluated_schema_with_layout(&self, include_hidden: bool) -> Value
pub fn get_evaluated_schema_with_layout(&self, include_hidden: bool) -> Value
Get evaluated schema with layout resolution
Sourcepub fn get_schema_value_by_path(&self, path: &str) -> Option<Value>
pub fn get_schema_value_by_path(&self, path: &str) -> Option<Value>
Get specific schema value by path
Sourcepub fn get_schema_value(&self) -> Value
pub fn get_schema_value(&self) -> Value
Get all schema values (data view) This corresponds to subform.get_schema_value() usage
Examples found in repository?
28fn main() {
29 let args: Vec<String> = std::env::args().collect();
30 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic");
31
32 let mut scenario_filter: Option<String> = None;
33 let mut enable_comparison = false;
34 let mut show_timing = false;
35 let mut i = 1;
36
37 // Parse arguments
38 while i < args.len() {
39 let arg = &args[i];
40
41 if arg == "-h" || arg == "--help" {
42 print_help(program_name);
43 return;
44 } else if arg == "--compare" {
45 enable_comparison = true;
46 } else if arg == "--timing" {
47 show_timing = true;
48 } else if !arg.starts_with('-') {
49 scenario_filter = Some(arg.clone());
50 } else {
51 eprintln!("Error: unknown option '{}'", arg);
52 print_help(program_name);
53 return;
54 }
55
56 i += 1;
57 }
58
59 println!("\n🚀 JSON Evaluation - Basic Example (JSON Schema)\n");
60
61 if enable_comparison {
62 println!("🔍 Comparison: enabled");
63 }
64 if show_timing {
65 println!("⏱️ Internal timing: enabled");
66 }
67 if enable_comparison || show_timing {
68 println!();
69 }
70
71 let samples_dir = Path::new("samples");
72 let mut scenarios = common::discover_scenarios(samples_dir);
73
74 // Filter out MessagePack scenarios - only use JSON
75 scenarios.retain(|s| !s.is_msgpack);
76
77 // Filter scenarios if a filter is provided
78 if let Some(ref filter) = scenario_filter {
79 scenarios.retain(|s| s.name.contains(filter));
80 println!("📋 Filtering scenarios matching: '{}'\n", filter);
81 }
82
83 if scenarios.is_empty() {
84 if let Some(filter) = scenario_filter {
85 println!(
86 "ℹ️ No scenarios found matching '{}' in `{}`.",
87 filter,
88 samples_dir.display()
89 );
90 } else {
91 println!(
92 "ℹ️ No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
93 samples_dir.display()
94 );
95 }
96 return;
97 }
98
99 println!("📊 Found {} scenario(s)\n", scenarios.len());
100
101 let mut total_parse_time = std::time::Duration::ZERO;
102 let mut total_eval_time = std::time::Duration::ZERO;
103 let mut successful_scenarios = 0;
104 let mut comparison_failures = 0;
105
106 for scenario in &scenarios {
107 println!("==============================");
108 println!("Scenario: {}", scenario.name);
109 println!("Schema: {} ({})",
110 scenario.schema_path.display(),
111 if scenario.is_msgpack { "MessagePack" } else { "JSON" }
112 );
113 println!("Data: {}\n", scenario.data_path.display());
114
115 // Clear timing data from previous scenarios
116 if show_timing {
117 json_eval_rs::enable_timing();
118 json_eval_rs::clear_timing_data();
119 }
120
121 let data_str = fs::read_to_string(&scenario.data_path)
122 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
123
124 // Step 1: Parse schema (JSONEval::new)
125 let parse_start = Instant::now();
126
127 let schema_str = fs::read_to_string(&scenario.schema_path)
128 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
129
130 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str))
131 .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e));
132
133 let parse_time = parse_start.elapsed();
134 println!(" 📝 Parse (new): {:?}", parse_time);
135
136 // Step 2: Evaluate
137 let eval_start = Instant::now();
138
139 eval.evaluate(&data_str, Some("{}"), None)
140 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
141
142 let evaluated_schema = eval.get_evaluated_schema(false);
143 let schema_value = eval.get_schema_value();
144 let eval_time = eval_start.elapsed();
145
146 println!(" ⚡ Eval: {:?}", eval_time);
147 println!(" ⏱️ Total: {:?}\n", parse_time + eval_time);
148
149 // Print detailed timing breakdown if --timing flag is set
150 if show_timing {
151 json_eval_rs::print_timing_summary();
152 }
153
154 total_parse_time += parse_time;
155 total_eval_time += eval_time;
156 successful_scenarios += 1;
157
158 // Save results
159 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
160 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
161 let value_path = samples_dir.join(format!("{}-schema-value.json", scenario.name));
162
163 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
164 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
165
166 let mut metadata_obj = Map::new();
167 metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
168 metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
169 metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
170
171 fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
172 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
173
174 fs::write(&value_path, common::pretty_json(&schema_value))
175 .unwrap_or_else(|e| panic!("failed to write {}: {}", value_path.display(), e));
176
177 println!("✅ Results saved:");
178 println!(" - {}", evaluated_path.display());
179 println!(" - {}", parsed_path.display());
180 println!(" - {}\n", value_path.display());
181
182 // Optional comparison
183 if enable_comparison {
184 if let Some(comp_path) = &scenario.comparison_path {
185 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
186 comparison_failures += 1;
187 }
188 println!();
189 }
190 }
191 }
192
193 // Print summary
194 println!("{}", "=".repeat(50));
195 println!("📊 Summary");
196 println!("{}", "=".repeat(50));
197 println!("Total scenarios run: {}", successful_scenarios);
198 println!("Total parse time: {:?}", total_parse_time);
199 println!("Total eval time: {:?}", total_eval_time);
200 println!("Total time: {:?}", total_parse_time + total_eval_time);
201
202 if successful_scenarios > 1 {
203 println!("\nAverage per scenario:");
204 println!(" Parse: {:?}", total_parse_time / successful_scenarios as u32);
205 println!(" Eval: {:?}", total_eval_time / successful_scenarios as u32);
206 }
207
208 if enable_comparison {
209 println!("Comparison failures: {}", comparison_failures);
210 }
211
212 println!("\n✅ All scenarios completed!\n");
213}Sourcepub fn get_evaluated_schema_without_params(&self, include_hidden: bool) -> Value
pub fn get_evaluated_schema_without_params(&self, include_hidden: bool) -> Value
Get evaluated schema without $params
Sourcepub fn get_evaluated_schema_msgpack(
&self,
include_hidden: bool,
) -> Result<Vec<u8>, String>
pub fn get_evaluated_schema_msgpack( &self, include_hidden: bool, ) -> Result<Vec<u8>, String>
Get evaluated schema as MessagePack bytes
Sourcepub fn get_evaluated_schema_by_path(
&self,
path: &str,
_skip_layout: bool,
) -> Option<Value>
pub fn get_evaluated_schema_by_path( &self, path: &str, _skip_layout: bool, ) -> Option<Value>
Get value from evaluated schema by path
Sourcepub fn get_evaluated_schema_by_paths(
&self,
paths: &[String],
_skip_layout: bool,
format: ReturnFormat,
) -> Value
pub fn get_evaluated_schema_by_paths( &self, paths: &[String], _skip_layout: bool, format: ReturnFormat, ) -> Value
Get evaluated schema parts by multiple paths
Sourcepub fn get_schema_by_path(&self, path: &str) -> Option<Value>
pub fn get_schema_by_path(&self, path: &str) -> Option<Value>
Get original (unevaluated) schema by path
Sourcepub fn get_schema_by_paths(
&self,
paths: &[String],
format: ReturnFormat,
) -> Value
pub fn get_schema_by_paths( &self, paths: &[String], format: ReturnFormat, ) -> Value
Get original schema by multiple paths
Sourcepub fn flatten_object(
prefix: &str,
value: &Value,
result: &mut Map<String, Value>,
)
pub fn flatten_object( prefix: &str, value: &Value, result: &mut Map<String, Value>, )
Flatten a nested object key-value pair to dotted keys
pub fn convert_to_format(value: Value, format: ReturnFormat) -> Value
Source§impl JSONEval
impl JSONEval
Sourcepub fn new(
schema: &str,
context: Option<&str>,
data: Option<&str>,
) -> Result<Self, Error>
pub fn new( schema: &str, context: Option<&str>, data: Option<&str>, ) -> Result<Self, Error>
Examples found in repository?
148fn demo_performance_comparison() -> Result<(), Box<dyn std::error::Error>> {
149 println!("⚡ Example 3: Performance Comparison");
150 println!("Comparing cached vs non-cached schema usage...\n");
151
152 let schema_json = r#"{
153 "$params": {
154 "value": { "type": "number" }
155 },
156 "doubled": {
157 "type": "number",
158 "$evaluation": { "*": [{"var": "$value"}, 2] }
159 },
160 "tripled": {
161 "type": "number",
162 "$evaluation": { "*": [{"var": "$value"}, 3] }
163 }
164 }"#;
165
166 let iterations = 100;
167
168 // WITHOUT CACHE: Parse schema every time
169 println!("🐌 Without cache (parse + evaluate each time):");
170 let start = Instant::now();
171 for i in 0..iterations {
172 let context = format!(r#"{{"value": {}}}"#, i);
173 let mut eval = JSONEval::new(schema_json, Some(&context), None)?;
174 eval.evaluate("{}", None, None)?;
175 }
176 let without_cache = start.elapsed();
177 println!(" Time: {:?}", without_cache);
178 println!(" Avg per iteration: {:?}\n", without_cache / iterations);
179
180 // WITH CACHE: Parse once, evaluate many times
181 println!("🚀 With cache (parse once, reuse for all evaluations):");
182 let cache = ParsedSchemaCache::new();
183
184 // Parse once
185 let parse_start = Instant::now();
186 let parsed = ParsedSchema::parse(schema_json)?;
187 cache.insert("perf-test".to_string(), Arc::new(parsed));
188 let parse_time = parse_start.elapsed();
189
190 // Evaluate many times
191 let eval_start = Instant::now();
192 for i in 0..iterations {
193 if let Some(cached) = cache.get("perf-test") {
194 let context = format!(r#"{{"value": {}}}"#, i);
195 let mut eval = JSONEval::with_parsed_schema(cached.clone(), Some(&context), None)?;
196 eval.evaluate("{}", None, None)?;
197 }
198 }
199 let eval_time = eval_start.elapsed();
200 let with_cache = parse_time + eval_time;
201
202 println!(" Parse time: {:?}", parse_time);
203 println!(" Eval time: {:?}", eval_time);
204 println!(" Total time: {:?}", with_cache);
205 println!(" Avg per iteration: {:?}\n", eval_time / iterations);
206
207 let speedup = without_cache.as_secs_f64() / with_cache.as_secs_f64();
208 println!("📈 Speedup: {:.2}x faster", speedup);
209
210 Ok(())
211}More examples
4fn main() {
5 let schema = json!({
6 "type": "object",
7 "properties": {
8 "price": {
9 "type": "number"
10 },
11 "tax": {
12 "type": "number",
13 "value": {
14 "$evaluation": {
15 "*": [
16 { "$ref": "#/properties/price" },
17 0.1
18 ]
19 }
20 }
21 },
22 "total": {
23 "type": "number",
24 "value": {
25 "$evaluation": {
26 "+": [
27 { "$ref": "#/properties/price" },
28 { "$ref": "#/properties/tax" }
29 ]
30 }
31 }
32 }
33 }
34 });
35
36 let schema_str = serde_json::to_string(&schema).unwrap();
37
38 println!("=== Example 1: With Caching (Default) ===");
39 {
40 let data = json!({ "price": 100 });
41 let data_str = serde_json::to_string(&data).unwrap();
42
43 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
44
45 println!("Cache enabled: {}", eval.is_cache_enabled());
46 println!("Initial cache size: {}", eval.cache_len());
47
48 eval.evaluate(&data_str, None, None).unwrap();
49
50 println!("After evaluation cache size: {}", eval.cache_len());
51 let stats = eval.cache_stats();
52 println!("Cache stats: {}", stats);
53 }
54
55 println!("\n=== Example 2: Without Caching (Web API Mode) ===");
56 {
57 let data = json!({ "price": 200 });
58 let data_str = serde_json::to_string(&data).unwrap();
59
60 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
61
62 // Disable caching for single-use web API scenario
63 eval.disable_cache();
64
65 println!("Cache enabled: {}", eval.is_cache_enabled());
66 println!("Initial cache size: {}", eval.cache_len());
67
68 eval.evaluate(&data_str, None, None).unwrap();
69
70 println!("After evaluation cache size: {}", eval.cache_len());
71 let stats = eval.cache_stats();
72 println!("Cache stats: {}", stats);
73
74 println!("\n✅ No cache overhead - perfect for web APIs!");
75 }
76
77 println!("\n=== Example 3: Re-enabling Cache ===");
78 {
79 let data = json!({ "price": 300 });
80 let data_str = serde_json::to_string(&data).unwrap();
81
82 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
83
84 // Disable then re-enable
85 eval.disable_cache();
86 eval.enable_cache();
87
88 println!("Cache enabled: {}", eval.is_cache_enabled());
89 eval.evaluate(&data_str, None, None).unwrap();
90
91 println!("Cache size after evaluation: {}", eval.cache_len());
92 println!("\n✅ Cache can be toggled as needed!");
93 }
94}6fn main() {
7 println!("\n🚀 JSON Evaluation - SPAJ Toggle Example\n");
8
9 let schema_path = Path::new("samples/spaj.json");
10 let schema_str = fs::read_to_string(schema_path).expect("Failed to read schema");
11
12 // Initial data with minimal context required
13 let context_str = json!({
14 "agentProfile": { "sob": "AG" }
15 }).to_string();
16
17 let initial_data = json!({
18 "illustration": {
19 "basicinformation": {
20 "print_polflag": false
21 }
22 }
23 }).to_string();
24
25 // Initialize logic
26 let mut eval = JSONEval::new(&schema_str, Some(&context_str), Some(&initial_data))
27 .expect("Failed to create JSONEval");
28
29 // Helper to check visibility
30 let check_visibility = |eval: &mut JSONEval, expected_hidden: bool, step: &str| {
31 let result = eval.get_evaluated_schema(true);
32 let hidden = result.pointer("/illustration/properties/basicinformation/properties/print_poladdress/condition/hidden")
33 .and_then(|v| v.as_bool());
34
35 match hidden {
36 Some(val) => {
37 if val == expected_hidden {
38 println!("✅ {}: Hidden = {} (Expected: {})", step, val, expected_hidden);
39 } else {
40 println!("❌ {}: Hidden = {} (Expected: {})", step, val, expected_hidden);
41 }
42 },
43 None => println!("❌ {}: 'hidden' property not found", step),
44 }
45 };
46
47 // Step 1: Initial state (false)
48 println!("Step 1: Initial State (print_polflag: false)");
49 eval.evaluate(&initial_data, Some(&context_str), None).expect("Evaluation failed");
50 check_visibility(&mut eval, true, "Initial check");
51
52 // Step 2: Toggle to true
53 println!("\nStep 2: Toggle True (print_polflag: true)");
54 let data_true = json!({
55 "illustration": {
56 "basicinformation": {
57 "print_polflag": true
58 }
59 }
60 }).to_string();
61 eval.evaluate(&data_true, Some(&context_str), None).expect("Evaluation failed");
62 check_visibility(&mut eval, false, "Toggle ON check");
63
64 // Step 3: Toggle back to false
65 println!("\nStep 3: Toggle False (print_polflag: false)");
66 let data_false = json!({
67 "illustration": {
68 "basicinformation": {
69 "print_polflag": false
70 }
71 }
72 }).to_string();
73 eval.evaluate(&data_false, Some(&context_str), None).expect("Evaluation failed");
74
75 let hidden_path = "#/illustration/properties/basicinformation/properties/print_poladdress/condition/hidden";
76 if let Some(deps) = eval.dependencies.get(hidden_path) {
77 println!("Debug: Dependencies for hidden: {:?}", deps);
78 } else {
79 println!("Debug: No dependencies found for hidden path");
80 }
81
82 // Debug: Print current flag value
83 if let Some(val) = eval.get_evaluated_schema(true).pointer("/illustration/properties/basicinformation/properties/print_polflag/value") {
84 println!("Debug: print_polflag value is: {}", val);
85 }
86
87 check_visibility(&mut eval, true, "Toggle OFF check");
88}28fn main() {
29 let args: Vec<String> = std::env::args().collect();
30 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic");
31
32 let mut scenario_filter: Option<String> = None;
33 let mut enable_comparison = false;
34 let mut show_timing = false;
35 let mut i = 1;
36
37 // Parse arguments
38 while i < args.len() {
39 let arg = &args[i];
40
41 if arg == "-h" || arg == "--help" {
42 print_help(program_name);
43 return;
44 } else if arg == "--compare" {
45 enable_comparison = true;
46 } else if arg == "--timing" {
47 show_timing = true;
48 } else if !arg.starts_with('-') {
49 scenario_filter = Some(arg.clone());
50 } else {
51 eprintln!("Error: unknown option '{}'", arg);
52 print_help(program_name);
53 return;
54 }
55
56 i += 1;
57 }
58
59 println!("\n🚀 JSON Evaluation - Basic Example (JSON Schema)\n");
60
61 if enable_comparison {
62 println!("🔍 Comparison: enabled");
63 }
64 if show_timing {
65 println!("⏱️ Internal timing: enabled");
66 }
67 if enable_comparison || show_timing {
68 println!();
69 }
70
71 let samples_dir = Path::new("samples");
72 let mut scenarios = common::discover_scenarios(samples_dir);
73
74 // Filter out MessagePack scenarios - only use JSON
75 scenarios.retain(|s| !s.is_msgpack);
76
77 // Filter scenarios if a filter is provided
78 if let Some(ref filter) = scenario_filter {
79 scenarios.retain(|s| s.name.contains(filter));
80 println!("📋 Filtering scenarios matching: '{}'\n", filter);
81 }
82
83 if scenarios.is_empty() {
84 if let Some(filter) = scenario_filter {
85 println!(
86 "ℹ️ No scenarios found matching '{}' in `{}`.",
87 filter,
88 samples_dir.display()
89 );
90 } else {
91 println!(
92 "ℹ️ No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
93 samples_dir.display()
94 );
95 }
96 return;
97 }
98
99 println!("📊 Found {} scenario(s)\n", scenarios.len());
100
101 let mut total_parse_time = std::time::Duration::ZERO;
102 let mut total_eval_time = std::time::Duration::ZERO;
103 let mut successful_scenarios = 0;
104 let mut comparison_failures = 0;
105
106 for scenario in &scenarios {
107 println!("==============================");
108 println!("Scenario: {}", scenario.name);
109 println!("Schema: {} ({})",
110 scenario.schema_path.display(),
111 if scenario.is_msgpack { "MessagePack" } else { "JSON" }
112 );
113 println!("Data: {}\n", scenario.data_path.display());
114
115 // Clear timing data from previous scenarios
116 if show_timing {
117 json_eval_rs::enable_timing();
118 json_eval_rs::clear_timing_data();
119 }
120
121 let data_str = fs::read_to_string(&scenario.data_path)
122 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
123
124 // Step 1: Parse schema (JSONEval::new)
125 let parse_start = Instant::now();
126
127 let schema_str = fs::read_to_string(&scenario.schema_path)
128 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
129
130 let mut eval = JSONEval::new(&schema_str, None, Some(&data_str))
131 .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e));
132
133 let parse_time = parse_start.elapsed();
134 println!(" 📝 Parse (new): {:?}", parse_time);
135
136 // Step 2: Evaluate
137 let eval_start = Instant::now();
138
139 eval.evaluate(&data_str, Some("{}"), None)
140 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
141
142 let evaluated_schema = eval.get_evaluated_schema(false);
143 let schema_value = eval.get_schema_value();
144 let eval_time = eval_start.elapsed();
145
146 println!(" ⚡ Eval: {:?}", eval_time);
147 println!(" ⏱️ Total: {:?}\n", parse_time + eval_time);
148
149 // Print detailed timing breakdown if --timing flag is set
150 if show_timing {
151 json_eval_rs::print_timing_summary();
152 }
153
154 total_parse_time += parse_time;
155 total_eval_time += eval_time;
156 successful_scenarios += 1;
157
158 // Save results
159 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
160 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
161 let value_path = samples_dir.join(format!("{}-schema-value.json", scenario.name));
162
163 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
164 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
165
166 let mut metadata_obj = Map::new();
167 metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
168 metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
169 metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
170
171 fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
172 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
173
174 fs::write(&value_path, common::pretty_json(&schema_value))
175 .unwrap_or_else(|e| panic!("failed to write {}: {}", value_path.display(), e));
176
177 println!("✅ Results saved:");
178 println!(" - {}", evaluated_path.display());
179 println!(" - {}", parsed_path.display());
180 println!(" - {}\n", value_path.display());
181
182 // Optional comparison
183 if enable_comparison {
184 if let Some(comp_path) = &scenario.comparison_path {
185 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
186 comparison_failures += 1;
187 }
188 println!();
189 }
190 }
191 }
192
193 // Print summary
194 println!("{}", "=".repeat(50));
195 println!("📊 Summary");
196 println!("{}", "=".repeat(50));
197 println!("Total scenarios run: {}", successful_scenarios);
198 println!("Total parse time: {:?}", total_parse_time);
199 println!("Total eval time: {:?}", total_eval_time);
200 println!("Total time: {:?}", total_parse_time + total_eval_time);
201
202 if successful_scenarios > 1 {
203 println!("\nAverage per scenario:");
204 println!(" Parse: {:?}", total_parse_time / successful_scenarios as u32);
205 println!(" Eval: {:?}", total_eval_time / successful_scenarios as u32);
206 }
207
208 if enable_comparison {
209 println!("Comparison failures: {}", comparison_failures);
210 }
211
212 println!("\n✅ All scenarios completed!\n");
213}31fn main() {
32 let args: Vec<String> = std::env::args().collect();
33 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("benchmark");
34
35 let mut iterations = 1usize;
36 let mut scenario_filter: Option<String> = None;
37 let mut show_cpu_info = false;
38 let mut use_parsed_schema = false;
39 let mut concurrent_count: Option<usize> = None;
40 let mut enable_comparison = false;
41 let mut show_timing = false;
42 let mut i = 1;
43
44 // Parse arguments
45 while i < args.len() {
46 let arg = &args[i];
47
48 if arg == "-h" || arg == "--help" {
49 print_help(program_name);
50 return;
51 } else if arg == "--cpu-info" {
52 show_cpu_info = true;
53 } else if arg == "--parsed" {
54 use_parsed_schema = true;
55 } else if arg == "--compare" {
56 enable_comparison = true;
57 } else if arg == "--timing" {
58 show_timing = true;
59 } else if arg == "--concurrent" {
60 if i + 1 >= args.len() {
61 eprintln!("Error: {} requires a value", arg);
62 print_help(program_name);
63 return;
64 }
65 i += 1;
66 match args[i].parse::<usize>() {
67 Ok(n) if n > 0 => concurrent_count = Some(n),
68 _ => {
69 eprintln!("Error: concurrent count must be a positive integer, got '{}'", args[i]);
70 return;
71 }
72 }
73 } else if arg == "-i" || arg == "--iterations" {
74 if i + 1 >= args.len() {
75 eprintln!("Error: {} requires a value", arg);
76 print_help(program_name);
77 return;
78 }
79 i += 1;
80 match args[i].parse::<usize>() {
81 Ok(n) if n > 0 => iterations = n,
82 _ => {
83 eprintln!("Error: iterations must be a positive integer, got '{}'", args[i]);
84 return;
85 }
86 }
87 } else if !arg.starts_with('-') {
88 scenario_filter = Some(arg.clone());
89 } else {
90 eprintln!("Error: unknown option '{}'", arg);
91 print_help(program_name);
92 return;
93 }
94
95 i += 1;
96 }
97
98 println!("\n🚀 JSON Evaluation - Benchmark\n");
99
100 // Show CPU info if requested or if running benchmarks
101 if show_cpu_info || iterations > 1 || concurrent_count.is_some() {
102 common::print_cpu_info();
103 }
104
105 if use_parsed_schema {
106 println!("📦 Mode: ParsedSchema (parse once, reuse for all iterations)\n");
107 }
108
109 if let Some(count) = concurrent_count {
110 println!("🔀 Concurrent evaluations: {} threads\n", count);
111 } else if iterations > 1 {
112 println!("🔄 Iterations per scenario: {}\n", iterations);
113 }
114
115 if enable_comparison {
116 println!("🔍 Comparison: enabled");
117 }
118 if show_timing {
119 println!("⏱️ Internal timing: enabled");
120 }
121 if enable_comparison || show_timing {
122 println!();
123 }
124
125 let samples_dir = Path::new("samples");
126 let mut scenarios = common::discover_scenarios(samples_dir);
127
128 // Filter scenarios if a filter is provided
129 if let Some(ref filter) = scenario_filter {
130 scenarios.retain(|s| s.name.contains(filter));
131 println!("📋 Filtering scenarios matching: '{}'\n", filter);
132 }
133
134 if scenarios.is_empty() {
135 if let Some(filter) = scenario_filter {
136 println!(
137 "ℹ️ No scenarios found matching '{}' in `{}`.",
138 filter,
139 samples_dir.display()
140 );
141 } else {
142 println!(
143 "ℹ️ No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
144 samples_dir.display()
145 );
146 }
147 return;
148 }
149
150 println!("📊 Found {} scenario(s)\n", scenarios.len());
151
152 let mut total_parse_time = std::time::Duration::ZERO;
153 let mut total_eval_time = std::time::Duration::ZERO;
154 let mut successful_scenarios = 0;
155 let mut comparison_failures = 0;
156
157 for scenario in &scenarios {
158 println!("==============================");
159 println!("Scenario: {}", scenario.name);
160 println!("Schema: {} ({})",
161 scenario.schema_path.display(),
162 if scenario.is_msgpack { "MessagePack" } else { "JSON" }
163 );
164 println!("Data: {}\n", scenario.data_path.display());
165
166 // Clear timing data from previous scenarios
167 if show_timing {
168 json_eval_rs::enable_timing();
169 json_eval_rs::clear_timing_data();
170 }
171
172 let data_str = fs::read_to_string(&scenario.data_path)
173 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
174
175 println!("Running evaluation...\n");
176
177 let (parse_time, eval_time, evaluated_schema, eval, iteration_times) = if use_parsed_schema {
178 // ParsedSchema mode: parse once, reuse for all iterations/threads
179 let start_time = Instant::now();
180
181 let parsed_schema = if scenario.is_msgpack {
182 let schema_msgpack = fs::read(&scenario.schema_path)
183 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
184 println!(" 📦 MessagePack schema size: {} bytes", schema_msgpack.len());
185 Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
186 .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
187 } else {
188 let schema_str = fs::read_to_string(&scenario.schema_path)
189 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
190 Arc::new(ParsedSchema::parse(&schema_str)
191 .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
192 };
193
194 let parse_time = start_time.elapsed();
195 println!(" Schema parsing & compilation: {:?}", parse_time);
196
197 // Concurrent mode with ParsedSchema
198 if let Some(thread_count) = concurrent_count {
199 use std::thread;
200
201 let eval_start = Instant::now();
202 let mut handles = vec![];
203
204 for thread_id in 0..thread_count {
205 let parsed_clone = parsed_schema.clone();
206 let data_str_clone = data_str.clone();
207 let iter_count = iterations;
208
209 let handle = thread::spawn(move || {
210 let mut thread_times = Vec::with_capacity(iter_count);
211 let mut last_schema = Value::Null;
212
213 for _ in 0..iter_count {
214 let iter_start = Instant::now();
215 let mut eval_instance = JSONEval::with_parsed_schema(
216 parsed_clone.clone(),
217 Some("{}"),
218 Some(&data_str_clone)
219 ).unwrap();
220
221 eval_instance.evaluate(&data_str_clone, Some("{}"), None).unwrap();
222 last_schema = eval_instance.get_evaluated_schema(false);
223 thread_times.push(iter_start.elapsed());
224 }
225
226 (thread_times, last_schema, thread_id)
227 });
228 handles.push(handle);
229 }
230
231 let mut all_iteration_times = Vec::new();
232 let mut evaluated_schema = Value::Null;
233
234 for handle in handles {
235 let (thread_times, thread_schema, thread_id) = handle.join().unwrap();
236 println!(" Thread {} completed {} iterations", thread_id, thread_times.len());
237 all_iteration_times.extend(thread_times);
238 evaluated_schema = thread_schema; // Use last thread's result
239 }
240
241 let eval_time = eval_start.elapsed();
242
243 // Create a temp eval for metadata export
244 let temp_eval = JSONEval::with_parsed_schema(
245 parsed_schema.clone(),
246 Some("{}"),
247 Some(&data_str)
248 ).unwrap();
249
250 (parse_time, eval_time, evaluated_schema, temp_eval, all_iteration_times)
251 } else {
252 // Sequential iterations with ParsedSchema
253 let eval_start = Instant::now();
254 let mut evaluated_schema = Value::Null;
255 let mut iteration_times = Vec::with_capacity(iterations);
256 let mut eval_instance = JSONEval::with_parsed_schema(
257 parsed_schema.clone(),
258 Some("{}"),
259 Some(&data_str)
260 ).unwrap();
261
262 for iter in 0..iterations {
263 let iter_start = Instant::now();
264 eval_instance.evaluate(&data_str, Some("{}"), None)
265 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
266 evaluated_schema = eval_instance.get_evaluated_schema(false);
267 iteration_times.push(iter_start.elapsed());
268
269 if iterations > 1 && (iter + 1) % 10 == 0 {
270 print!(".");
271 if (iter + 1) % 50 == 0 {
272 println!(" {}/{}", iter + 1, iterations);
273 }
274 }
275 }
276
277 if iterations > 1 && iterations % 50 != 0 {
278 println!(" {}/{}", iterations, iterations);
279 }
280
281 let eval_time = eval_start.elapsed();
282 (parse_time, eval_time, evaluated_schema, eval_instance, iteration_times)
283 }
284 } else {
285 // Traditional mode: parse and create JSONEval each time
286 let start_time = Instant::now();
287 let mut eval = if scenario.is_msgpack {
288 let schema_msgpack = fs::read(&scenario.schema_path)
289 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
290 println!(" 📦 MessagePack schema size: {} bytes", schema_msgpack.len());
291 JSONEval::new_from_msgpack(&schema_msgpack, None, Some(&data_str))
292 .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e))
293 } else {
294 let schema_str = fs::read_to_string(&scenario.schema_path)
295 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
296 JSONEval::new(&schema_str, None, Some(&data_str))
297 .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e))
298 };
299 let parse_time = start_time.elapsed();
300 println!(" Schema parsing & compilation: {:?}", parse_time);
301
302 let eval_start = Instant::now();
303 let mut evaluated_schema = Value::Null;
304 let mut iteration_times = Vec::with_capacity(iterations);
305
306 for iter in 0..iterations {
307 let iter_start = Instant::now();
308 eval.evaluate(&data_str, Some("{}"), None)
309 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
310 evaluated_schema = eval.get_evaluated_schema(false);
311 iteration_times.push(iter_start.elapsed());
312
313 if iterations > 1 && (iter + 1) % 10 == 0 {
314 print!(".");
315 if (iter + 1) % 50 == 0 {
316 println!(" {}/{}", iter + 1, iterations);
317 }
318 }
319 }
320
321 if iterations > 1 && iterations % 50 != 0 {
322 println!(" {}/{}", iterations, iterations);
323 }
324
325 let eval_time = eval_start.elapsed();
326 (parse_time, eval_time, evaluated_schema, eval, iteration_times)
327 };
328
329 // Calculate statistics
330 let total_iterations = iteration_times.len();
331 if total_iterations == 1 {
332 println!(" Evaluation: {:?}", eval_time);
333 } else {
334 let avg_time = eval_time / total_iterations as u32;
335 let min_time = iteration_times.iter().min().unwrap();
336 let max_time = iteration_times.iter().max().unwrap();
337
338 println!(" Total evaluation time: {:?}", eval_time);
339 println!(" Total iterations: {}", total_iterations);
340 println!(" Average per iteration: {:?}", avg_time);
341 println!(" Min: {:?} | Max: {:?}", min_time, max_time);
342
343 // Show cache statistics
344 let cache_stats = eval.cache_stats();
345 println!(" Cache: {} entries, {} hits, {} misses ({:.1}% hit rate)",
346 cache_stats.entries,
347 cache_stats.hits,
348 cache_stats.misses,
349 cache_stats.hit_rate * 100.0
350 );
351 }
352
353 let total_time = parse_time + eval_time;
354 println!("⏱️ Execution time: {:?}\n", total_time);
355
356 // Print detailed timing breakdown if --timing flag is set
357 if show_timing {
358 json_eval_rs::print_timing_summary();
359 }
360
361 // Track statistics
362 total_parse_time += parse_time;
363 total_eval_time += eval_time;
364 successful_scenarios += 1;
365
366 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
367 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
368
369 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
370 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
371
372 let mut metadata_obj = Map::new();
373 metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
374 metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
375
376 fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
377 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
378
379 println!("✅ Results saved:");
380 println!(" - {}", evaluated_path.display());
381 println!(" - {}\n", parsed_path.display());
382
383 // Optional comparison
384 if enable_comparison {
385 if let Some(comp_path) = &scenario.comparison_path {
386 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
387 comparison_failures += 1;
388 }
389 println!();
390 }
391 }
392 }
393
394 // Print summary statistics
395 if successful_scenarios > 0 {
396 println!("\n{}", "=".repeat(50));
397 println!("📊 Summary Statistics");
398 println!("{}", "=".repeat(50));
399 println!("Total scenarios run: {}", successful_scenarios);
400 println!("Total parsing time: {:?}", total_parse_time);
401 println!("Total evaluation time: {:?}", total_eval_time);
402 println!("Total time: {:?}", total_parse_time + total_eval_time);
403
404 if successful_scenarios > 1 {
405 println!("\nAverage per scenario:");
406 println!(" Parsing: {:?}", total_parse_time / successful_scenarios as u32);
407 println!(" Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
408 }
409
410 if enable_comparison {
411 println!("\nComparison failures: {}", comparison_failures);
412 }
413
414 println!("\n✅ All scenarios completed successfully!\n");
415 }
416}Sourcepub fn new_from_msgpack(
schema_msgpack: &[u8],
context: Option<&str>,
data: Option<&str>,
) -> Result<Self, String>
pub fn new_from_msgpack( schema_msgpack: &[u8], context: Option<&str>, data: Option<&str>, ) -> Result<Self, String>
Create a new JSONEval instance from MessagePack-encoded schema
§Arguments
schema_msgpack- MessagePack-encoded schema bytescontext- Optional JSON context stringdata- Optional JSON data string
§Returns
A Result containing the JSONEval instance or an error
Examples found in repository?
28fn main() {
29 let args: Vec<String> = std::env::args().collect();
30 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic_msgpack");
31
32 let mut scenario_filter: Option<String> = None;
33 let mut enable_comparison = false;
34 let mut show_timing = false;
35 let mut i = 1;
36
37 // Parse arguments
38 while i < args.len() {
39 let arg = &args[i];
40
41 if arg == "-h" || arg == "--help" {
42 print_help(program_name);
43 return;
44 } else if arg == "--compare" {
45 enable_comparison = true;
46 } else if arg == "--timing" {
47 show_timing = true;
48 } else if !arg.starts_with('-') {
49 scenario_filter = Some(arg.clone());
50 } else {
51 eprintln!("Error: unknown option '{}'", arg);
52 print_help(program_name);
53 return;
54 }
55
56 i += 1;
57 }
58
59 println!("\n🚀 JSON Evaluation - Basic Example (MessagePack Schema)\n");
60
61 if enable_comparison {
62 println!("🔍 Comparison: enabled");
63 }
64 if show_timing {
65 println!("⏱️ Internal timing: enabled");
66 }
67 if enable_comparison || show_timing {
68 println!();
69 }
70
71 let samples_dir = Path::new("samples");
72 let mut scenarios = common::discover_scenarios(samples_dir);
73
74 // Filter to only MessagePack scenarios
75 scenarios.retain(|s| s.is_msgpack);
76
77 // Filter scenarios if a filter is provided
78 if let Some(ref filter) = scenario_filter {
79 scenarios.retain(|s| s.name.contains(filter));
80 println!("📋 Filtering scenarios matching: '{}'\n", filter);
81 }
82
83 if scenarios.is_empty() {
84 if let Some(filter) = scenario_filter {
85 println!(
86 "ℹ️ No MessagePack scenarios found matching '{}' in `{}`.",
87 filter,
88 samples_dir.display()
89 );
90 } else {
91 println!(
92 "ℹ️ No MessagePack scenarios discovered in `{}`. Add files like `name.bform` and `name-data.json`.",
93 samples_dir.display()
94 );
95 }
96 return;
97 }
98
99 println!("📊 Found {} MessagePack scenario(s)\n", scenarios.len());
100
101 let mut total_parse_time = std::time::Duration::ZERO;
102 let mut total_eval_time = std::time::Duration::ZERO;
103 let mut successful_scenarios = 0;
104 let mut comparison_failures = 0;
105
106 for scenario in &scenarios {
107 println!("==============================");
108 println!("Scenario: {}", scenario.name);
109 println!("Schema: {} (MessagePack)", scenario.schema_path.display());
110 println!("Data: {}\n", scenario.data_path.display());
111
112 // Clear timing data from previous scenarios
113 if show_timing {
114 json_eval_rs::enable_timing();
115 json_eval_rs::clear_timing_data();
116 }
117
118 let data_str = fs::read_to_string(&scenario.data_path)
119 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
120
121 // Step 1: Parse schema (new_from_msgpack)
122 let parse_start = Instant::now();
123
124 let schema_msgpack = fs::read(&scenario.schema_path)
125 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
126
127 println!(" 📦 MessagePack schema size: {} bytes", schema_msgpack.len());
128
129 let mut eval = JSONEval::new_from_msgpack(&schema_msgpack, None, Some(&data_str))
130 .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e));
131
132 let parse_time = parse_start.elapsed();
133 println!(" 📝 Parse (msgpack): {:?}", parse_time);
134
135 // Step 2: Evaluate
136 let eval_start = Instant::now();
137
138 eval.evaluate(&data_str, Some("{}"), None)
139 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
140
141 let evaluated_schema = eval.get_evaluated_schema(false);
142 let eval_time = eval_start.elapsed();
143
144 println!(" ⚡ Eval: {:?}", eval_time);
145 println!(" ⏱️ Total: {:?}\n", parse_time + eval_time);
146
147 // Print detailed timing breakdown if --timing flag is set
148 if show_timing {
149 json_eval_rs::print_timing_summary();
150 }
151
152 total_parse_time += parse_time;
153 total_eval_time += eval_time;
154 successful_scenarios += 1;
155
156 // Save results
157 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
158 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
159
160 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
161 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
162
163 let mut metadata_obj = Map::new();
164 metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
165 metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
166 metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
167
168 fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
169 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
170
171 println!("✅ Results saved:");
172 println!(" - {}", evaluated_path.display());
173 println!(" - {}\n", parsed_path.display());
174
175 // Optional comparison
176 if enable_comparison {
177 if let Some(comp_path) = &scenario.comparison_path {
178 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
179 comparison_failures += 1;
180 }
181 println!();
182 }
183 }
184 }
185
186 // Print summary
187 println!("{}", "=".repeat(50));
188 println!("📊 Summary");
189 println!("{}", "=".repeat(50));
190 println!("Total scenarios run: {}", successful_scenarios);
191 println!("Total parse time: {:?}", total_parse_time);
192 println!("Total eval time: {:?}", total_eval_time);
193 println!("Total time: {:?}", total_parse_time + total_eval_time);
194
195 if successful_scenarios > 1 {
196 println!("\nAverage per scenario:");
197 println!(" Parse: {:?}", total_parse_time / successful_scenarios as u32);
198 println!(" Eval: {:?}", total_eval_time / successful_scenarios as u32);
199 }
200
201 if enable_comparison {
202 println!("Comparison failures: {}", comparison_failures);
203 }
204
205 println!("\n✅ All scenarios completed!\n");
206}More examples
31fn main() {
32 let args: Vec<String> = std::env::args().collect();
33 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("benchmark");
34
35 let mut iterations = 1usize;
36 let mut scenario_filter: Option<String> = None;
37 let mut show_cpu_info = false;
38 let mut use_parsed_schema = false;
39 let mut concurrent_count: Option<usize> = None;
40 let mut enable_comparison = false;
41 let mut show_timing = false;
42 let mut i = 1;
43
44 // Parse arguments
45 while i < args.len() {
46 let arg = &args[i];
47
48 if arg == "-h" || arg == "--help" {
49 print_help(program_name);
50 return;
51 } else if arg == "--cpu-info" {
52 show_cpu_info = true;
53 } else if arg == "--parsed" {
54 use_parsed_schema = true;
55 } else if arg == "--compare" {
56 enable_comparison = true;
57 } else if arg == "--timing" {
58 show_timing = true;
59 } else if arg == "--concurrent" {
60 if i + 1 >= args.len() {
61 eprintln!("Error: {} requires a value", arg);
62 print_help(program_name);
63 return;
64 }
65 i += 1;
66 match args[i].parse::<usize>() {
67 Ok(n) if n > 0 => concurrent_count = Some(n),
68 _ => {
69 eprintln!("Error: concurrent count must be a positive integer, got '{}'", args[i]);
70 return;
71 }
72 }
73 } else if arg == "-i" || arg == "--iterations" {
74 if i + 1 >= args.len() {
75 eprintln!("Error: {} requires a value", arg);
76 print_help(program_name);
77 return;
78 }
79 i += 1;
80 match args[i].parse::<usize>() {
81 Ok(n) if n > 0 => iterations = n,
82 _ => {
83 eprintln!("Error: iterations must be a positive integer, got '{}'", args[i]);
84 return;
85 }
86 }
87 } else if !arg.starts_with('-') {
88 scenario_filter = Some(arg.clone());
89 } else {
90 eprintln!("Error: unknown option '{}'", arg);
91 print_help(program_name);
92 return;
93 }
94
95 i += 1;
96 }
97
98 println!("\n🚀 JSON Evaluation - Benchmark\n");
99
100 // Show CPU info if requested or if running benchmarks
101 if show_cpu_info || iterations > 1 || concurrent_count.is_some() {
102 common::print_cpu_info();
103 }
104
105 if use_parsed_schema {
106 println!("📦 Mode: ParsedSchema (parse once, reuse for all iterations)\n");
107 }
108
109 if let Some(count) = concurrent_count {
110 println!("🔀 Concurrent evaluations: {} threads\n", count);
111 } else if iterations > 1 {
112 println!("🔄 Iterations per scenario: {}\n", iterations);
113 }
114
115 if enable_comparison {
116 println!("🔍 Comparison: enabled");
117 }
118 if show_timing {
119 println!("⏱️ Internal timing: enabled");
120 }
121 if enable_comparison || show_timing {
122 println!();
123 }
124
125 let samples_dir = Path::new("samples");
126 let mut scenarios = common::discover_scenarios(samples_dir);
127
128 // Filter scenarios if a filter is provided
129 if let Some(ref filter) = scenario_filter {
130 scenarios.retain(|s| s.name.contains(filter));
131 println!("📋 Filtering scenarios matching: '{}'\n", filter);
132 }
133
134 if scenarios.is_empty() {
135 if let Some(filter) = scenario_filter {
136 println!(
137 "ℹ️ No scenarios found matching '{}' in `{}`.",
138 filter,
139 samples_dir.display()
140 );
141 } else {
142 println!(
143 "ℹ️ No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
144 samples_dir.display()
145 );
146 }
147 return;
148 }
149
150 println!("📊 Found {} scenario(s)\n", scenarios.len());
151
152 let mut total_parse_time = std::time::Duration::ZERO;
153 let mut total_eval_time = std::time::Duration::ZERO;
154 let mut successful_scenarios = 0;
155 let mut comparison_failures = 0;
156
157 for scenario in &scenarios {
158 println!("==============================");
159 println!("Scenario: {}", scenario.name);
160 println!("Schema: {} ({})",
161 scenario.schema_path.display(),
162 if scenario.is_msgpack { "MessagePack" } else { "JSON" }
163 );
164 println!("Data: {}\n", scenario.data_path.display());
165
166 // Clear timing data from previous scenarios
167 if show_timing {
168 json_eval_rs::enable_timing();
169 json_eval_rs::clear_timing_data();
170 }
171
172 let data_str = fs::read_to_string(&scenario.data_path)
173 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
174
175 println!("Running evaluation...\n");
176
177 let (parse_time, eval_time, evaluated_schema, eval, iteration_times) = if use_parsed_schema {
178 // ParsedSchema mode: parse once, reuse for all iterations/threads
179 let start_time = Instant::now();
180
181 let parsed_schema = if scenario.is_msgpack {
182 let schema_msgpack = fs::read(&scenario.schema_path)
183 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
184 println!(" 📦 MessagePack schema size: {} bytes", schema_msgpack.len());
185 Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
186 .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
187 } else {
188 let schema_str = fs::read_to_string(&scenario.schema_path)
189 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
190 Arc::new(ParsedSchema::parse(&schema_str)
191 .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
192 };
193
194 let parse_time = start_time.elapsed();
195 println!(" Schema parsing & compilation: {:?}", parse_time);
196
197 // Concurrent mode with ParsedSchema
198 if let Some(thread_count) = concurrent_count {
199 use std::thread;
200
201 let eval_start = Instant::now();
202 let mut handles = vec![];
203
204 for thread_id in 0..thread_count {
205 let parsed_clone = parsed_schema.clone();
206 let data_str_clone = data_str.clone();
207 let iter_count = iterations;
208
209 let handle = thread::spawn(move || {
210 let mut thread_times = Vec::with_capacity(iter_count);
211 let mut last_schema = Value::Null;
212
213 for _ in 0..iter_count {
214 let iter_start = Instant::now();
215 let mut eval_instance = JSONEval::with_parsed_schema(
216 parsed_clone.clone(),
217 Some("{}"),
218 Some(&data_str_clone)
219 ).unwrap();
220
221 eval_instance.evaluate(&data_str_clone, Some("{}"), None).unwrap();
222 last_schema = eval_instance.get_evaluated_schema(false);
223 thread_times.push(iter_start.elapsed());
224 }
225
226 (thread_times, last_schema, thread_id)
227 });
228 handles.push(handle);
229 }
230
231 let mut all_iteration_times = Vec::new();
232 let mut evaluated_schema = Value::Null;
233
234 for handle in handles {
235 let (thread_times, thread_schema, thread_id) = handle.join().unwrap();
236 println!(" Thread {} completed {} iterations", thread_id, thread_times.len());
237 all_iteration_times.extend(thread_times);
238 evaluated_schema = thread_schema; // Use last thread's result
239 }
240
241 let eval_time = eval_start.elapsed();
242
243 // Create a temp eval for metadata export
244 let temp_eval = JSONEval::with_parsed_schema(
245 parsed_schema.clone(),
246 Some("{}"),
247 Some(&data_str)
248 ).unwrap();
249
250 (parse_time, eval_time, evaluated_schema, temp_eval, all_iteration_times)
251 } else {
252 // Sequential iterations with ParsedSchema
253 let eval_start = Instant::now();
254 let mut evaluated_schema = Value::Null;
255 let mut iteration_times = Vec::with_capacity(iterations);
256 let mut eval_instance = JSONEval::with_parsed_schema(
257 parsed_schema.clone(),
258 Some("{}"),
259 Some(&data_str)
260 ).unwrap();
261
262 for iter in 0..iterations {
263 let iter_start = Instant::now();
264 eval_instance.evaluate(&data_str, Some("{}"), None)
265 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
266 evaluated_schema = eval_instance.get_evaluated_schema(false);
267 iteration_times.push(iter_start.elapsed());
268
269 if iterations > 1 && (iter + 1) % 10 == 0 {
270 print!(".");
271 if (iter + 1) % 50 == 0 {
272 println!(" {}/{}", iter + 1, iterations);
273 }
274 }
275 }
276
277 if iterations > 1 && iterations % 50 != 0 {
278 println!(" {}/{}", iterations, iterations);
279 }
280
281 let eval_time = eval_start.elapsed();
282 (parse_time, eval_time, evaluated_schema, eval_instance, iteration_times)
283 }
284 } else {
285 // Traditional mode: parse and create JSONEval each time
286 let start_time = Instant::now();
287 let mut eval = if scenario.is_msgpack {
288 let schema_msgpack = fs::read(&scenario.schema_path)
289 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
290 println!(" 📦 MessagePack schema size: {} bytes", schema_msgpack.len());
291 JSONEval::new_from_msgpack(&schema_msgpack, None, Some(&data_str))
292 .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e))
293 } else {
294 let schema_str = fs::read_to_string(&scenario.schema_path)
295 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
296 JSONEval::new(&schema_str, None, Some(&data_str))
297 .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e))
298 };
299 let parse_time = start_time.elapsed();
300 println!(" Schema parsing & compilation: {:?}", parse_time);
301
302 let eval_start = Instant::now();
303 let mut evaluated_schema = Value::Null;
304 let mut iteration_times = Vec::with_capacity(iterations);
305
306 for iter in 0..iterations {
307 let iter_start = Instant::now();
308 eval.evaluate(&data_str, Some("{}"), None)
309 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
310 evaluated_schema = eval.get_evaluated_schema(false);
311 iteration_times.push(iter_start.elapsed());
312
313 if iterations > 1 && (iter + 1) % 10 == 0 {
314 print!(".");
315 if (iter + 1) % 50 == 0 {
316 println!(" {}/{}", iter + 1, iterations);
317 }
318 }
319 }
320
321 if iterations > 1 && iterations % 50 != 0 {
322 println!(" {}/{}", iterations, iterations);
323 }
324
325 let eval_time = eval_start.elapsed();
326 (parse_time, eval_time, evaluated_schema, eval, iteration_times)
327 };
328
329 // Calculate statistics
330 let total_iterations = iteration_times.len();
331 if total_iterations == 1 {
332 println!(" Evaluation: {:?}", eval_time);
333 } else {
334 let avg_time = eval_time / total_iterations as u32;
335 let min_time = iteration_times.iter().min().unwrap();
336 let max_time = iteration_times.iter().max().unwrap();
337
338 println!(" Total evaluation time: {:?}", eval_time);
339 println!(" Total iterations: {}", total_iterations);
340 println!(" Average per iteration: {:?}", avg_time);
341 println!(" Min: {:?} | Max: {:?}", min_time, max_time);
342
343 // Show cache statistics
344 let cache_stats = eval.cache_stats();
345 println!(" Cache: {} entries, {} hits, {} misses ({:.1}% hit rate)",
346 cache_stats.entries,
347 cache_stats.hits,
348 cache_stats.misses,
349 cache_stats.hit_rate * 100.0
350 );
351 }
352
353 let total_time = parse_time + eval_time;
354 println!("⏱️ Execution time: {:?}\n", total_time);
355
356 // Print detailed timing breakdown if --timing flag is set
357 if show_timing {
358 json_eval_rs::print_timing_summary();
359 }
360
361 // Track statistics
362 total_parse_time += parse_time;
363 total_eval_time += eval_time;
364 successful_scenarios += 1;
365
366 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
367 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
368
369 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
370 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
371
372 let mut metadata_obj = Map::new();
373 metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
374 metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
375
376 fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
377 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
378
379 println!("✅ Results saved:");
380 println!(" - {}", evaluated_path.display());
381 println!(" - {}\n", parsed_path.display());
382
383 // Optional comparison
384 if enable_comparison {
385 if let Some(comp_path) = &scenario.comparison_path {
386 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
387 comparison_failures += 1;
388 }
389 println!();
390 }
391 }
392 }
393
394 // Print summary statistics
395 if successful_scenarios > 0 {
396 println!("\n{}", "=".repeat(50));
397 println!("📊 Summary Statistics");
398 println!("{}", "=".repeat(50));
399 println!("Total scenarios run: {}", successful_scenarios);
400 println!("Total parsing time: {:?}", total_parse_time);
401 println!("Total evaluation time: {:?}", total_eval_time);
402 println!("Total time: {:?}", total_parse_time + total_eval_time);
403
404 if successful_scenarios > 1 {
405 println!("\nAverage per scenario:");
406 println!(" Parsing: {:?}", total_parse_time / successful_scenarios as u32);
407 println!(" Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
408 }
409
410 if enable_comparison {
411 println!("\nComparison failures: {}", comparison_failures);
412 }
413
414 println!("\n✅ All scenarios completed successfully!\n");
415 }
416}Sourcepub fn with_parsed_schema(
parsed: Arc<ParsedSchema>,
context: Option<&str>,
data: Option<&str>,
) -> Result<Self, String>
pub fn with_parsed_schema( parsed: Arc<ParsedSchema>, context: Option<&str>, data: Option<&str>, ) -> Result<Self, String>
Create a new JSONEval instance from a pre-parsed ParsedSchema
This enables schema caching: parse once, reuse across multiple evaluations with different data/context.
§Arguments
parsed- Arc-wrapped pre-parsed schema (can be cloned and cached)context- Optional JSON context stringdata- Optional JSON data string
§Returns
A Result containing the JSONEval instance or an error
§Example
use std::sync::Arc;
// Parse schema once and wrap in Arc for caching
let parsed = Arc::new(ParsedSchema::parse(schema_str)?);
cache.insert(schema_key, parsed.clone());
// Reuse across multiple evaluations (Arc::clone is cheap)
let eval1 = JSONEval::with_parsed_schema(parsed.clone(), Some(context1), Some(data1))?;
let eval2 = JSONEval::with_parsed_schema(parsed.clone(), Some(context2), Some(data2))?;Examples found in repository?
38fn demo_local_cache() -> Result<(), Box<dyn std::error::Error>> {
39 println!("📦 Example 1: Local Cache Instance");
40 println!("Creating a dedicated cache for this application...\n");
41
42 let cache = ParsedSchemaCache::new();
43
44 // Simple schema
45 let schema_json = r#"{
46 "$params": {
47 "rate": { "type": "number" }
48 },
49 "result": {
50 "type": "number",
51 "title": "Calculated Result",
52 "$evaluation": {
53 "logic": { "*": [{"var": "$rate"}, 100] }
54 }
55 }
56 }"#;
57
58 // Parse and cache with a custom key
59 println!("📝 Parsing schema and caching with key 'calculation-v1'...");
60 let parsed = ParsedSchema::parse(schema_json)?;
61 cache.insert("calculation-v1".to_string(), Arc::new(parsed));
62
63 println!("✅ Schema cached successfully");
64 println!(" Cache size: {} entries", cache.len());
65 println!(" Keys: {:?}\n", cache.keys());
66
67 // Retrieve and use cached schema
68 println!("🔍 Retrieving cached schema...");
69 if let Some(cached_schema) = cache.get("calculation-v1") {
70 println!("✅ Retrieved from cache");
71
72 // Create JSONEval from cached ParsedSchema
73 let mut eval = JSONEval::with_parsed_schema(cached_schema, Some(r#"{"rate": 1.5}"#), None)?;
74 eval.evaluate("{}", None, None)?;
75
76 let evaluated = eval.get_evaluated_schema(false);
77 let result = evaluated.pointer("/result")
78 .and_then(|v| v.as_f64())
79 .unwrap_or(0.0);
80 println!(" Evaluation result: {}\n", result);
81 }
82
83 // Check cache stats
84 let stats = cache.stats();
85 println!("📊 Cache Statistics: {}", stats);
86
87 // Remove entry
88 println!("\n🗑️ Removing 'calculation-v1' from cache...");
89 cache.remove("calculation-v1");
90 println!(" Cache size after removal: {}", cache.len());
91
92 Ok(())
93}
94
95fn demo_global_cache() -> Result<(), Box<dyn std::error::Error>> {
96 println!("🌍 Example 2: Global Cache Instance");
97 println!("Using the built-in PARSED_SCHEMA_CACHE...\n");
98
99 let schema_json = r#"{
100 "$params": {
101 "x": { "type": "number" },
102 "y": { "type": "number" }
103 },
104 "sum": {
105 "type": "number",
106 "$evaluation": { "+": [{"var": "$x"}, {"var": "$y"}] }
107 }
108 }"#;
109
110 // Use global cache
111 println!("📝 Caching schema globally with key 'math-operations'...");
112 let parsed = ParsedSchema::parse(schema_json)?;
113 PARSED_SCHEMA_CACHE.insert("math-operations".to_string(), Arc::new(parsed));
114
115 println!("✅ Schema cached globally");
116 println!(" Global cache size: {}\n", PARSED_SCHEMA_CACHE.len());
117
118 // Access from anywhere in the application
119 simulate_another_function()?;
120
121 // Clean up
122 println!("\n🧹 Clearing global cache...");
123 PARSED_SCHEMA_CACHE.clear();
124 println!(" Global cache size: {}", PARSED_SCHEMA_CACHE.len());
125
126 Ok(())
127}
128
129fn simulate_another_function() -> Result<(), Box<dyn std::error::Error>> {
130 println!("🔄 In another function, accessing global cache...");
131
132 if let Some(cached) = PARSED_SCHEMA_CACHE.get("math-operations") {
133 println!("✅ Retrieved schema from global cache");
134
135 let mut eval = JSONEval::with_parsed_schema(cached, Some(r#"{"x": 10, "y": 20}"#), None)?;
136 eval.evaluate("{}", None, None)?;
137
138 let evaluated = eval.get_evaluated_schema(false);
139 let sum = evaluated.pointer("/sum")
140 .and_then(|v| v.as_f64())
141 .unwrap_or(0.0);
142 println!(" Result: {}", sum);
143 }
144
145 Ok(())
146}
147
148fn demo_performance_comparison() -> Result<(), Box<dyn std::error::Error>> {
149 println!("⚡ Example 3: Performance Comparison");
150 println!("Comparing cached vs non-cached schema usage...\n");
151
152 let schema_json = r#"{
153 "$params": {
154 "value": { "type": "number" }
155 },
156 "doubled": {
157 "type": "number",
158 "$evaluation": { "*": [{"var": "$value"}, 2] }
159 },
160 "tripled": {
161 "type": "number",
162 "$evaluation": { "*": [{"var": "$value"}, 3] }
163 }
164 }"#;
165
166 let iterations = 100;
167
168 // WITHOUT CACHE: Parse schema every time
169 println!("🐌 Without cache (parse + evaluate each time):");
170 let start = Instant::now();
171 for i in 0..iterations {
172 let context = format!(r#"{{"value": {}}}"#, i);
173 let mut eval = JSONEval::new(schema_json, Some(&context), None)?;
174 eval.evaluate("{}", None, None)?;
175 }
176 let without_cache = start.elapsed();
177 println!(" Time: {:?}", without_cache);
178 println!(" Avg per iteration: {:?}\n", without_cache / iterations);
179
180 // WITH CACHE: Parse once, evaluate many times
181 println!("🚀 With cache (parse once, reuse for all evaluations):");
182 let cache = ParsedSchemaCache::new();
183
184 // Parse once
185 let parse_start = Instant::now();
186 let parsed = ParsedSchema::parse(schema_json)?;
187 cache.insert("perf-test".to_string(), Arc::new(parsed));
188 let parse_time = parse_start.elapsed();
189
190 // Evaluate many times
191 let eval_start = Instant::now();
192 for i in 0..iterations {
193 if let Some(cached) = cache.get("perf-test") {
194 let context = format!(r#"{{"value": {}}}"#, i);
195 let mut eval = JSONEval::with_parsed_schema(cached.clone(), Some(&context), None)?;
196 eval.evaluate("{}", None, None)?;
197 }
198 }
199 let eval_time = eval_start.elapsed();
200 let with_cache = parse_time + eval_time;
201
202 println!(" Parse time: {:?}", parse_time);
203 println!(" Eval time: {:?}", eval_time);
204 println!(" Total time: {:?}", with_cache);
205 println!(" Avg per iteration: {:?}\n", eval_time / iterations);
206
207 let speedup = without_cache.as_secs_f64() / with_cache.as_secs_f64();
208 println!("📈 Speedup: {:.2}x faster", speedup);
209
210 Ok(())
211}More examples
30fn main() {
31 let args: Vec<String> = std::env::args().collect();
32 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic_parsed");
33
34 let mut scenario_filter: Option<String> = None;
35 let mut enable_comparison = false;
36 let mut show_timing = false;
37 let mut i = 1;
38
39 // Parse arguments
40 while i < args.len() {
41 let arg = &args[i];
42
43 if arg == "-h" || arg == "--help" {
44 print_help(program_name);
45 return;
46 } else if arg == "--compare" {
47 enable_comparison = true;
48 } else if arg == "--timing" {
49 show_timing = true;
50 } else if !arg.starts_with('-') {
51 scenario_filter = Some(arg.clone());
52 } else {
53 eprintln!("Error: unknown option '{}'", arg);
54 print_help(program_name);
55 return;
56 }
57
58 i += 1;
59 }
60
61 println!("\n🚀 JSON Evaluation - Basic Example (ParsedSchema)\n");
62 println!("📦 Using Arc<ParsedSchema> for efficient caching\n");
63
64 if enable_comparison {
65 println!("🔍 Comparison: enabled");
66 }
67 if show_timing {
68 println!("⏱️ Internal timing: enabled");
69 }
70 if enable_comparison || show_timing {
71 println!();
72 }
73
74 let samples_dir = Path::new("samples");
75 let mut scenarios = common::discover_scenarios(samples_dir);
76
77 // Filter scenarios if a filter is provided
78 if let Some(ref filter) = scenario_filter {
79 scenarios.retain(|s| s.name.contains(filter));
80 println!("📋 Filtering scenarios matching: '{}'\n", filter);
81 }
82
83 if scenarios.is_empty() {
84 if let Some(filter) = scenario_filter {
85 println!(
86 "ℹ️ No scenarios found matching '{}' in `{}`.",
87 filter,
88 samples_dir.display()
89 );
90 } else {
91 println!(
92 "ℹ️ No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
93 samples_dir.display()
94 );
95 }
96 return;
97 }
98
99 println!("📊 Found {} scenario(s)\n", scenarios.len());
100
101 let mut total_parse_time = std::time::Duration::ZERO;
102 let mut total_eval_time = std::time::Duration::ZERO;
103 let mut successful_scenarios = 0;
104 let mut comparison_failures = 0;
105
106 for scenario in &scenarios {
107 println!("==============================");
108 println!("Scenario: {}", scenario.name);
109 println!("Schema: {} ({})",
110 scenario.schema_path.display(),
111 if scenario.is_msgpack { "MessagePack" } else { "JSON" }
112 );
113 println!("Data: {}\n", scenario.data_path.display());
114
115 // Clear timing data from previous scenarios
116 if show_timing {
117 json_eval_rs::enable_timing();
118 json_eval_rs::clear_timing_data();
119 }
120
121 let data_str = fs::read_to_string(&scenario.data_path)
122 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
123
124 // Step 1: Parse schema once
125 let parse_start = Instant::now();
126 let parsed_schema = if scenario.is_msgpack {
127 let schema_msgpack = fs::read(&scenario.schema_path)
128 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
129 println!(" 📦 MessagePack schema size: {} bytes", schema_msgpack.len());
130 Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
131 .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
132 } else {
133 let schema_str = fs::read_to_string(&scenario.schema_path)
134 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
135 Arc::new(ParsedSchema::parse(&schema_str)
136 .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
137 };
138 let parse_time = parse_start.elapsed();
139 println!(" 📝 Schema parsing: {:?}", parse_time);
140
141 // Step 2: Create JSONEval from ParsedSchema (reuses compiled logic)
142 let eval_start = Instant::now();
143 let mut eval = JSONEval::with_parsed_schema(
144 parsed_schema.clone(), // Arc::clone is cheap!
145 Some("{}"),
146 Some(&data_str)
147 ).unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e));
148
149 eval.evaluate(&data_str, Some("{}"), None)
150 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
151
152 let evaluated_schema = eval.get_evaluated_schema(false);
153 let eval_time = eval_start.elapsed();
154
155 println!(" ⚡ Eval: {:?}", eval_time);
156 println!(" ⏱️ Total: {:?}\n", parse_time + eval_time);
157
158 // Print detailed timing breakdown if --timing flag is set
159 if show_timing {
160 json_eval_rs::print_timing_summary();
161 }
162
163 total_parse_time += parse_time;
164 total_eval_time += eval_time;
165 successful_scenarios += 1;
166
167 // Save results
168 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
169 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
170
171 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
172 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
173
174 let mut metadata_obj = Map::new();
175 metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
176 metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
177 metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
178
179 fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
180 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
181
182 println!("✅ Results saved:");
183 println!(" - {}", evaluated_path.display());
184 println!(" - {}\n", parsed_path.display());
185
186 // Optional comparison
187 if enable_comparison {
188 if let Some(comp_path) = &scenario.comparison_path {
189 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
190 comparison_failures += 1;
191 }
192 println!();
193 }
194 }
195 }
196
197 // Print summary
198 println!("{}", "=".repeat(50));
199 println!("📊 Summary");
200 println!("{}", "=".repeat(50));
201 println!("Total scenarios run: {}", successful_scenarios);
202 println!("Total parsing time: {:?}", total_parse_time);
203 println!("Total evaluation time: {:?}", total_eval_time);
204 println!("Total time: {:?}", total_parse_time + total_eval_time);
205
206 if successful_scenarios > 1 {
207 println!("\nAverage per scenario:");
208 println!(" Parsing: {:?}", total_parse_time / successful_scenarios as u32);
209 println!(" Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
210 }
211
212 if enable_comparison {
213 println!("\nComparison failures: {}", comparison_failures);
214 }
215
216 println!("\n✅ All scenarios completed!\n");
217}31fn main() {
32 let args: Vec<String> = std::env::args().collect();
33 let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("benchmark");
34
35 let mut iterations = 1usize;
36 let mut scenario_filter: Option<String> = None;
37 let mut show_cpu_info = false;
38 let mut use_parsed_schema = false;
39 let mut concurrent_count: Option<usize> = None;
40 let mut enable_comparison = false;
41 let mut show_timing = false;
42 let mut i = 1;
43
44 // Parse arguments
45 while i < args.len() {
46 let arg = &args[i];
47
48 if arg == "-h" || arg == "--help" {
49 print_help(program_name);
50 return;
51 } else if arg == "--cpu-info" {
52 show_cpu_info = true;
53 } else if arg == "--parsed" {
54 use_parsed_schema = true;
55 } else if arg == "--compare" {
56 enable_comparison = true;
57 } else if arg == "--timing" {
58 show_timing = true;
59 } else if arg == "--concurrent" {
60 if i + 1 >= args.len() {
61 eprintln!("Error: {} requires a value", arg);
62 print_help(program_name);
63 return;
64 }
65 i += 1;
66 match args[i].parse::<usize>() {
67 Ok(n) if n > 0 => concurrent_count = Some(n),
68 _ => {
69 eprintln!("Error: concurrent count must be a positive integer, got '{}'", args[i]);
70 return;
71 }
72 }
73 } else if arg == "-i" || arg == "--iterations" {
74 if i + 1 >= args.len() {
75 eprintln!("Error: {} requires a value", arg);
76 print_help(program_name);
77 return;
78 }
79 i += 1;
80 match args[i].parse::<usize>() {
81 Ok(n) if n > 0 => iterations = n,
82 _ => {
83 eprintln!("Error: iterations must be a positive integer, got '{}'", args[i]);
84 return;
85 }
86 }
87 } else if !arg.starts_with('-') {
88 scenario_filter = Some(arg.clone());
89 } else {
90 eprintln!("Error: unknown option '{}'", arg);
91 print_help(program_name);
92 return;
93 }
94
95 i += 1;
96 }
97
98 println!("\n🚀 JSON Evaluation - Benchmark\n");
99
100 // Show CPU info if requested or if running benchmarks
101 if show_cpu_info || iterations > 1 || concurrent_count.is_some() {
102 common::print_cpu_info();
103 }
104
105 if use_parsed_schema {
106 println!("📦 Mode: ParsedSchema (parse once, reuse for all iterations)\n");
107 }
108
109 if let Some(count) = concurrent_count {
110 println!("🔀 Concurrent evaluations: {} threads\n", count);
111 } else if iterations > 1 {
112 println!("🔄 Iterations per scenario: {}\n", iterations);
113 }
114
115 if enable_comparison {
116 println!("🔍 Comparison: enabled");
117 }
118 if show_timing {
119 println!("⏱️ Internal timing: enabled");
120 }
121 if enable_comparison || show_timing {
122 println!();
123 }
124
125 let samples_dir = Path::new("samples");
126 let mut scenarios = common::discover_scenarios(samples_dir);
127
128 // Filter scenarios if a filter is provided
129 if let Some(ref filter) = scenario_filter {
130 scenarios.retain(|s| s.name.contains(filter));
131 println!("📋 Filtering scenarios matching: '{}'\n", filter);
132 }
133
134 if scenarios.is_empty() {
135 if let Some(filter) = scenario_filter {
136 println!(
137 "ℹ️ No scenarios found matching '{}' in `{}`.",
138 filter,
139 samples_dir.display()
140 );
141 } else {
142 println!(
143 "ℹ️ No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
144 samples_dir.display()
145 );
146 }
147 return;
148 }
149
150 println!("📊 Found {} scenario(s)\n", scenarios.len());
151
152 let mut total_parse_time = std::time::Duration::ZERO;
153 let mut total_eval_time = std::time::Duration::ZERO;
154 let mut successful_scenarios = 0;
155 let mut comparison_failures = 0;
156
157 for scenario in &scenarios {
158 println!("==============================");
159 println!("Scenario: {}", scenario.name);
160 println!("Schema: {} ({})",
161 scenario.schema_path.display(),
162 if scenario.is_msgpack { "MessagePack" } else { "JSON" }
163 );
164 println!("Data: {}\n", scenario.data_path.display());
165
166 // Clear timing data from previous scenarios
167 if show_timing {
168 json_eval_rs::enable_timing();
169 json_eval_rs::clear_timing_data();
170 }
171
172 let data_str = fs::read_to_string(&scenario.data_path)
173 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
174
175 println!("Running evaluation...\n");
176
177 let (parse_time, eval_time, evaluated_schema, eval, iteration_times) = if use_parsed_schema {
178 // ParsedSchema mode: parse once, reuse for all iterations/threads
179 let start_time = Instant::now();
180
181 let parsed_schema = if scenario.is_msgpack {
182 let schema_msgpack = fs::read(&scenario.schema_path)
183 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
184 println!(" 📦 MessagePack schema size: {} bytes", schema_msgpack.len());
185 Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
186 .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
187 } else {
188 let schema_str = fs::read_to_string(&scenario.schema_path)
189 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
190 Arc::new(ParsedSchema::parse(&schema_str)
191 .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
192 };
193
194 let parse_time = start_time.elapsed();
195 println!(" Schema parsing & compilation: {:?}", parse_time);
196
197 // Concurrent mode with ParsedSchema
198 if let Some(thread_count) = concurrent_count {
199 use std::thread;
200
201 let eval_start = Instant::now();
202 let mut handles = vec![];
203
204 for thread_id in 0..thread_count {
205 let parsed_clone = parsed_schema.clone();
206 let data_str_clone = data_str.clone();
207 let iter_count = iterations;
208
209 let handle = thread::spawn(move || {
210 let mut thread_times = Vec::with_capacity(iter_count);
211 let mut last_schema = Value::Null;
212
213 for _ in 0..iter_count {
214 let iter_start = Instant::now();
215 let mut eval_instance = JSONEval::with_parsed_schema(
216 parsed_clone.clone(),
217 Some("{}"),
218 Some(&data_str_clone)
219 ).unwrap();
220
221 eval_instance.evaluate(&data_str_clone, Some("{}"), None).unwrap();
222 last_schema = eval_instance.get_evaluated_schema(false);
223 thread_times.push(iter_start.elapsed());
224 }
225
226 (thread_times, last_schema, thread_id)
227 });
228 handles.push(handle);
229 }
230
231 let mut all_iteration_times = Vec::new();
232 let mut evaluated_schema = Value::Null;
233
234 for handle in handles {
235 let (thread_times, thread_schema, thread_id) = handle.join().unwrap();
236 println!(" Thread {} completed {} iterations", thread_id, thread_times.len());
237 all_iteration_times.extend(thread_times);
238 evaluated_schema = thread_schema; // Use last thread's result
239 }
240
241 let eval_time = eval_start.elapsed();
242
243 // Create a temp eval for metadata export
244 let temp_eval = JSONEval::with_parsed_schema(
245 parsed_schema.clone(),
246 Some("{}"),
247 Some(&data_str)
248 ).unwrap();
249
250 (parse_time, eval_time, evaluated_schema, temp_eval, all_iteration_times)
251 } else {
252 // Sequential iterations with ParsedSchema
253 let eval_start = Instant::now();
254 let mut evaluated_schema = Value::Null;
255 let mut iteration_times = Vec::with_capacity(iterations);
256 let mut eval_instance = JSONEval::with_parsed_schema(
257 parsed_schema.clone(),
258 Some("{}"),
259 Some(&data_str)
260 ).unwrap();
261
262 for iter in 0..iterations {
263 let iter_start = Instant::now();
264 eval_instance.evaluate(&data_str, Some("{}"), None)
265 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
266 evaluated_schema = eval_instance.get_evaluated_schema(false);
267 iteration_times.push(iter_start.elapsed());
268
269 if iterations > 1 && (iter + 1) % 10 == 0 {
270 print!(".");
271 if (iter + 1) % 50 == 0 {
272 println!(" {}/{}", iter + 1, iterations);
273 }
274 }
275 }
276
277 if iterations > 1 && iterations % 50 != 0 {
278 println!(" {}/{}", iterations, iterations);
279 }
280
281 let eval_time = eval_start.elapsed();
282 (parse_time, eval_time, evaluated_schema, eval_instance, iteration_times)
283 }
284 } else {
285 // Traditional mode: parse and create JSONEval each time
286 let start_time = Instant::now();
287 let mut eval = if scenario.is_msgpack {
288 let schema_msgpack = fs::read(&scenario.schema_path)
289 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
290 println!(" 📦 MessagePack schema size: {} bytes", schema_msgpack.len());
291 JSONEval::new_from_msgpack(&schema_msgpack, None, Some(&data_str))
292 .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e))
293 } else {
294 let schema_str = fs::read_to_string(&scenario.schema_path)
295 .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
296 JSONEval::new(&schema_str, None, Some(&data_str))
297 .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e))
298 };
299 let parse_time = start_time.elapsed();
300 println!(" Schema parsing & compilation: {:?}", parse_time);
301
302 let eval_start = Instant::now();
303 let mut evaluated_schema = Value::Null;
304 let mut iteration_times = Vec::with_capacity(iterations);
305
306 for iter in 0..iterations {
307 let iter_start = Instant::now();
308 eval.evaluate(&data_str, Some("{}"), None)
309 .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
310 evaluated_schema = eval.get_evaluated_schema(false);
311 iteration_times.push(iter_start.elapsed());
312
313 if iterations > 1 && (iter + 1) % 10 == 0 {
314 print!(".");
315 if (iter + 1) % 50 == 0 {
316 println!(" {}/{}", iter + 1, iterations);
317 }
318 }
319 }
320
321 if iterations > 1 && iterations % 50 != 0 {
322 println!(" {}/{}", iterations, iterations);
323 }
324
325 let eval_time = eval_start.elapsed();
326 (parse_time, eval_time, evaluated_schema, eval, iteration_times)
327 };
328
329 // Calculate statistics
330 let total_iterations = iteration_times.len();
331 if total_iterations == 1 {
332 println!(" Evaluation: {:?}", eval_time);
333 } else {
334 let avg_time = eval_time / total_iterations as u32;
335 let min_time = iteration_times.iter().min().unwrap();
336 let max_time = iteration_times.iter().max().unwrap();
337
338 println!(" Total evaluation time: {:?}", eval_time);
339 println!(" Total iterations: {}", total_iterations);
340 println!(" Average per iteration: {:?}", avg_time);
341 println!(" Min: {:?} | Max: {:?}", min_time, max_time);
342
343 // Show cache statistics
344 let cache_stats = eval.cache_stats();
345 println!(" Cache: {} entries, {} hits, {} misses ({:.1}% hit rate)",
346 cache_stats.entries,
347 cache_stats.hits,
348 cache_stats.misses,
349 cache_stats.hit_rate * 100.0
350 );
351 }
352
353 let total_time = parse_time + eval_time;
354 println!("⏱️ Execution time: {:?}\n", total_time);
355
356 // Print detailed timing breakdown if --timing flag is set
357 if show_timing {
358 json_eval_rs::print_timing_summary();
359 }
360
361 // Track statistics
362 total_parse_time += parse_time;
363 total_eval_time += eval_time;
364 successful_scenarios += 1;
365
366 let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
367 let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
368
369 fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
370 .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
371
372 let mut metadata_obj = Map::new();
373 metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
374 metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
375
376 fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
377 .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
378
379 println!("✅ Results saved:");
380 println!(" - {}", evaluated_path.display());
381 println!(" - {}\n", parsed_path.display());
382
383 // Optional comparison
384 if enable_comparison {
385 if let Some(comp_path) = &scenario.comparison_path {
386 if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
387 comparison_failures += 1;
388 }
389 println!();
390 }
391 }
392 }
393
394 // Print summary statistics
395 if successful_scenarios > 0 {
396 println!("\n{}", "=".repeat(50));
397 println!("📊 Summary Statistics");
398 println!("{}", "=".repeat(50));
399 println!("Total scenarios run: {}", successful_scenarios);
400 println!("Total parsing time: {:?}", total_parse_time);
401 println!("Total evaluation time: {:?}", total_eval_time);
402 println!("Total time: {:?}", total_parse_time + total_eval_time);
403
404 if successful_scenarios > 1 {
405 println!("\nAverage per scenario:");
406 println!(" Parsing: {:?}", total_parse_time / successful_scenarios as u32);
407 println!(" Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
408 }
409
410 if enable_comparison {
411 println!("\nComparison failures: {}", comparison_failures);
412 }
413
414 println!("\n✅ All scenarios completed successfully!\n");
415 }
416}pub fn reload_schema( &mut self, schema: &str, context: Option<&str>, data: Option<&str>, ) -> Result<(), String>
Sourcepub fn set_timezone_offset(&mut self, offset_minutes: Option<i32>)
pub fn set_timezone_offset(&mut self, offset_minutes: Option<i32>)
Set the timezone offset for datetime operations (TODAY, NOW)
This method updates the RLogic engine configuration with a new timezone offset. The offset will be applied to all subsequent datetime evaluations.
§Arguments
offset_minutes- Timezone offset in minutes from UTC (e.g., 420 for UTC+7, -300 for UTC-5) PassNoneto reset to UTC (no offset)
§Example
let mut eval = JSONEval::new(schema, None, None)?;
// Set to UTC+7 (Jakarta, Bangkok)
eval.set_timezone_offset(Some(420));
// Reset to UTC
eval.set_timezone_offset(None);Sourcepub fn reload_schema_msgpack(
&mut self,
schema_msgpack: &[u8],
context: Option<&str>,
data: Option<&str>,
) -> Result<(), String>
pub fn reload_schema_msgpack( &mut self, schema_msgpack: &[u8], context: Option<&str>, data: Option<&str>, ) -> Result<(), String>
Sourcepub fn reload_schema_parsed(
&mut self,
parsed: Arc<ParsedSchema>,
context: Option<&str>,
data: Option<&str>,
) -> Result<(), String>
pub fn reload_schema_parsed( &mut self, parsed: Arc<ParsedSchema>, context: Option<&str>, data: Option<&str>, ) -> Result<(), String>
Reload schema from a cached ParsedSchema
This is the most efficient way to reload as it reuses pre-parsed schema compilation.
§Arguments
parsed- Arc reference to a cached ParsedSchemacontext- Optional context data JSON stringdata- Optional initial data JSON string
§Returns
A Result indicating success or an error message
Sourcepub fn reload_schema_from_cache(
&mut self,
cache_key: &str,
context: Option<&str>,
data: Option<&str>,
) -> Result<(), String>
pub fn reload_schema_from_cache( &mut self, cache_key: &str, context: Option<&str>, data: Option<&str>, ) -> Result<(), String>
Reload schema from ParsedSchemaCache using a cache key
This is the recommended way for cross-platform cached schema reloading.
§Arguments
cache_key- Key to lookup in the global ParsedSchemaCachecontext- Optional context data JSON stringdata- Optional initial data JSON string
§Returns
A Result indicating success or an error message
Source§impl JSONEval
impl JSONEval
Sourcepub fn resolve_layout(&mut self, evaluate: bool)
pub fn resolve_layout(&mut self, evaluate: bool)
Resolve layout references with optional evaluation
Source§impl JSONEval
impl JSONEval
Sourcepub fn run_logic(
&mut self,
logic_id: CompiledLogicId,
data: Option<&Value>,
context: Option<&Value>,
) -> Result<Value, String>
pub fn run_logic( &mut self, logic_id: CompiledLogicId, data: Option<&Value>, context: Option<&Value>, ) -> Result<Value, String>
Run pre-compiled logic against current data
Sourcepub fn compile_logic(&self, logic_str: &str) -> Result<CompiledLogicId, String>
pub fn compile_logic(&self, logic_str: &str) -> Result<CompiledLogicId, String>
Compile a logic expression from a JSON string and store it globally
Sourcepub fn compile_logic_value(
&self,
logic: &Value,
) -> Result<CompiledLogicId, String>
pub fn compile_logic_value( &self, logic: &Value, ) -> Result<CompiledLogicId, String>
Compile a logic expression from a Value and store it globally
Source§impl JSONEval
impl JSONEval
Sourcepub fn evaluate_subform(
&mut self,
subform_path: &str,
data: &str,
context: Option<&str>,
paths: Option<&[String]>,
) -> Result<(), String>
pub fn evaluate_subform( &mut self, subform_path: &str, data: &str, context: Option<&str>, paths: Option<&[String]>, ) -> Result<(), String>
Evaluate a subform with data Evaluate a subform with data and optional selective paths
Sourcepub fn validate_subform(
&mut self,
subform_path: &str,
data: &str,
context: Option<&str>,
paths: Option<&[String]>,
) -> Result<ValidationResult, String>
pub fn validate_subform( &mut self, subform_path: &str, data: &str, context: Option<&str>, paths: Option<&[String]>, ) -> Result<ValidationResult, String>
Validate subform data against its schema rules
Sourcepub fn evaluate_dependents_subform(
&mut self,
subform_path: &str,
changed_paths: &[String],
data: Option<&str>,
context: Option<&str>,
re_evaluate: bool,
) -> Result<Value, String>
pub fn evaluate_dependents_subform( &mut self, subform_path: &str, changed_paths: &[String], data: Option<&str>, context: Option<&str>, re_evaluate: bool, ) -> Result<Value, String>
Evaluate dependents in subform when a field changes
Sourcepub fn resolve_layout_subform(
&mut self,
subform_path: &str,
evaluate: bool,
) -> Result<(), String>
pub fn resolve_layout_subform( &mut self, subform_path: &str, evaluate: bool, ) -> Result<(), String>
Resolve layout for subform
Sourcepub fn get_evaluated_schema_subform(
&mut self,
subform_path: &str,
resolve_layout: bool,
) -> Value
pub fn get_evaluated_schema_subform( &mut self, subform_path: &str, resolve_layout: bool, ) -> Value
Get evaluated schema from subform
Sourcepub fn get_schema_value_subform(&mut self, subform_path: &str) -> Value
pub fn get_schema_value_subform(&mut self, subform_path: &str) -> Value
Get schema value from subform (all .value fields)
Sourcepub fn get_evaluated_schema_without_params_subform(
&mut self,
subform_path: &str,
resolve_layout: bool,
) -> Value
pub fn get_evaluated_schema_without_params_subform( &mut self, subform_path: &str, resolve_layout: bool, ) -> Value
Get evaluated schema without $params from subform
Sourcepub fn get_evaluated_schema_by_path_subform(
&mut self,
subform_path: &str,
schema_path: &str,
skip_layout: bool,
) -> Option<Value>
pub fn get_evaluated_schema_by_path_subform( &mut self, subform_path: &str, schema_path: &str, skip_layout: bool, ) -> Option<Value>
Get evaluated schema by specific path from subform
Sourcepub fn get_evaluated_schema_by_paths_subform(
&mut self,
subform_path: &str,
schema_paths: &[String],
skip_layout: bool,
format: Option<ReturnFormat>,
) -> Value
pub fn get_evaluated_schema_by_paths_subform( &mut self, subform_path: &str, schema_paths: &[String], skip_layout: bool, format: Option<ReturnFormat>, ) -> Value
Get evaluated schema by multiple paths from subform
Sourcepub fn get_schema_by_path_subform(
&self,
subform_path: &str,
schema_path: &str,
) -> Option<Value>
pub fn get_schema_by_path_subform( &self, subform_path: &str, schema_path: &str, ) -> Option<Value>
Get schema by specific path from subform
Sourcepub fn get_schema_by_paths_subform(
&self,
subform_path: &str,
schema_paths: &[String],
format: Option<ReturnFormat>,
) -> Value
pub fn get_schema_by_paths_subform( &self, subform_path: &str, schema_paths: &[String], format: Option<ReturnFormat>, ) -> Value
Get schema by multiple paths from subform
Sourcepub fn get_subform_paths(&self) -> Vec<String>
pub fn get_subform_paths(&self) -> Vec<String>
Get list of available subform paths
Sourcepub fn has_subform(&self, subform_path: &str) -> bool
pub fn has_subform(&self, subform_path: &str) -> bool
Check if a subform exists at the given path