JSONEval

Struct JSONEval 

Source
pub struct JSONEval {
Show 21 fields pub schema: Arc<Value>, pub engine: Arc<RLogic>, pub evaluations: Arc<IndexMap<String, LogicId>>, pub tables: Arc<IndexMap<String, Value>>, pub table_metadata: Arc<IndexMap<String, TableMetadata>>, pub dependencies: Arc<IndexMap<String, IndexSet<String>>>, pub sorted_evaluations: Arc<Vec<Vec<String>>>, pub dependents_evaluations: Arc<IndexMap<String, Vec<DependentItem>>>, pub rules_evaluations: Arc<Vec<String>>, pub fields_with_rules: Arc<Vec<String>>, pub others_evaluations: Arc<Vec<String>>, pub value_evaluations: Arc<Vec<String>>, pub layout_paths: Arc<Vec<String>>, pub options_templates: Arc<Vec<(String, String, String)>>, pub subforms: IndexMap<String, Box<JSONEval>>, pub context: Value, pub data: Value, pub evaluated_schema: Value, pub eval_data: EvalData, pub eval_cache: EvalCache, pub cache_enabled: bool, /* private fields */
}

Fields§

§schema: Arc<Value>§engine: Arc<RLogic>§evaluations: Arc<IndexMap<String, LogicId>>

Zero-copy Arc-wrapped collections (shared from ParsedSchema)

§tables: Arc<IndexMap<String, Value>>§table_metadata: Arc<IndexMap<String, TableMetadata>>

Pre-compiled table metadata (computed at parse time for zero-copy evaluation)

§dependencies: Arc<IndexMap<String, IndexSet<String>>>§sorted_evaluations: Arc<Vec<Vec<String>>>

Evaluations grouped into parallel-executable batches Each inner Vec contains evaluations that can run concurrently

§dependents_evaluations: Arc<IndexMap<String, Vec<DependentItem>>>

Evaluations categorized for result handling Dependents: map from source field to list of dependent items

§rules_evaluations: Arc<Vec<String>>

Rules: evaluations with “/rules/” in path

§fields_with_rules: Arc<Vec<String>>

Fields with rules: dotted paths of all fields that have rules (for efficient validation)

§others_evaluations: Arc<Vec<String>>

Others: all other evaluations not in sorted_evaluations (for evaluated_schema output)

§value_evaluations: Arc<Vec<String>>

Value: evaluations ending with “.value” in path

§layout_paths: Arc<Vec<String>>

Cached layout paths (collected at parse time)

§options_templates: Arc<Vec<(String, String, String)>>

Options URL templates (url_path, template_str, params_path) collected at parse time

§subforms: IndexMap<String, Box<JSONEval>>

Subforms: isolated JSONEval instances for array fields with items Key is the schema path (e.g., “#/riders”), value is the sub-JSONEval

§context: Value§data: Value§evaluated_schema: Value§eval_data: EvalData§eval_cache: EvalCache

Evaluation cache with content-based hashing and zero-copy storage

§cache_enabled: bool

Flag to enable/disable evaluation caching Set to false for web API usage where each request creates a new JSONEval instance

Implementations§

Source§

impl JSONEval

Source

pub fn evaluate_subform( &mut self, subform_path: &str, data: &str, context: Option<&str>, ) -> Result<(), String>

Evaluate a subform with data

Source

pub fn validate_subform( &mut self, subform_path: &str, data: &str, context: Option<&str>, paths: Option<&[String]>, ) -> Result<ValidationResult, String>

Validate subform data against its schema rules

Source

pub fn evaluate_dependents_subform( &mut self, subform_path: &str, changed_paths: &[String], data: Option<&str>, context: Option<&str>, re_evaluate: bool, ) -> Result<Value, String>

Evaluate dependents in subform when a field changes

Source

pub fn resolve_layout_subform( &mut self, subform_path: &str, evaluate: bool, ) -> Result<(), String>

Resolve layout for subform

Source

pub fn get_evaluated_schema_subform( &mut self, subform_path: &str, resolve_layout: bool, ) -> Value

Get evaluated schema from subform

Source

pub fn get_schema_value_subform(&mut self, subform_path: &str) -> Value

Get schema value from subform (all .value fields)

Source

pub fn get_evaluated_schema_without_params_subform( &mut self, subform_path: &str, resolve_layout: bool, ) -> Value

Get evaluated schema without $params from subform

Source

pub fn get_evaluated_schema_by_path_subform( &mut self, subform_path: &str, schema_path: &str, skip_layout: bool, ) -> Option<Value>

Get evaluated schema by specific path from subform

Source

pub fn get_evaluated_schema_by_paths_subform( &mut self, subform_path: &str, schema_paths: &[String], skip_layout: bool, format: Option<ReturnFormat>, ) -> Value

Get evaluated schema by multiple paths from subform

Source

pub fn get_schema_by_path_subform( &self, subform_path: &str, schema_path: &str, ) -> Option<Value>

Get schema by specific path from subform

Source

pub fn get_schema_by_paths_subform( &self, subform_path: &str, schema_paths: &[String], format: Option<ReturnFormat>, ) -> Value

Get schema by multiple paths from subform

Source

pub fn get_subform_paths(&self) -> Vec<String>

Get list of available subform paths

Source

pub fn has_subform(&self, subform_path: &str) -> bool

Check if a subform exists at the given path

Source§

impl JSONEval

Source

pub fn new( schema: &str, context: Option<&str>, data: Option<&str>, ) -> Result<Self, Error>

Examples found in repository?
examples/cache_demo.rs (line 173)
148fn demo_performance_comparison() -> Result<(), Box<dyn std::error::Error>> {
149    println!("⚡ Example 3: Performance Comparison");
150    println!("Comparing cached vs non-cached schema usage...\n");
151    
152    let schema_json = r#"{
153        "$params": {
154            "value": { "type": "number" }
155        },
156        "doubled": {
157            "type": "number",
158            "$evaluation": { "*": [{"var": "$value"}, 2] }
159        },
160        "tripled": {
161            "type": "number",
162            "$evaluation": { "*": [{"var": "$value"}, 3] }
163        }
164    }"#;
165    
166    let iterations = 100;
167    
168    // WITHOUT CACHE: Parse schema every time
169    println!("🐌 Without cache (parse + evaluate each time):");
170    let start = Instant::now();
171    for i in 0..iterations {
172        let context = format!(r#"{{"value": {}}}"#, i);
173        let mut eval = JSONEval::new(schema_json, Some(&context), None)?;
174        eval.evaluate("{}", None)?;
175    }
176    let without_cache = start.elapsed();
177    println!("   Time: {:?}", without_cache);
178    println!("   Avg per iteration: {:?}\n", without_cache / iterations);
179    
180    // WITH CACHE: Parse once, evaluate many times
181    println!("🚀 With cache (parse once, reuse for all evaluations):");
182    let cache = ParsedSchemaCache::new();
183    
184    // Parse once
185    let parse_start = Instant::now();
186    let parsed = ParsedSchema::parse(schema_json)?;
187    cache.insert("perf-test".to_string(), Arc::new(parsed));
188    let parse_time = parse_start.elapsed();
189    
190    // Evaluate many times
191    let eval_start = Instant::now();
192    for i in 0..iterations {
193        if let Some(cached) = cache.get("perf-test") {
194            let context = format!(r#"{{"value": {}}}"#, i);
195            let mut eval = JSONEval::with_parsed_schema(cached.clone(), Some(&context), None)?;
196            eval.evaluate("{}", None)?;
197        }
198    }
199    let eval_time = eval_start.elapsed();
200    let with_cache = parse_time + eval_time;
201    
202    println!("   Parse time: {:?}", parse_time);
203    println!("   Eval time: {:?}", eval_time);
204    println!("   Total time: {:?}", with_cache);
205    println!("   Avg per iteration: {:?}\n", eval_time / iterations);
206    
207    let speedup = without_cache.as_secs_f64() / with_cache.as_secs_f64();
208    println!("📈 Speedup: {:.2}x faster", speedup);
209    
210    Ok(())
211}
More examples
Hide additional examples
examples/cache_disable.rs (line 43)
4fn main() {
5    let schema = json!({
6        "type": "object",
7        "properties": {
8            "price": {
9                "type": "number"
10            },
11            "tax": {
12                "type": "number",
13                "value": {
14                    "$evaluation": {
15                        "*": [
16                            { "$ref": "#/properties/price" },
17                            0.1
18                        ]
19                    }
20                }
21            },
22            "total": {
23                "type": "number",
24                "value": {
25                    "$evaluation": {
26                        "+": [
27                            { "$ref": "#/properties/price" },
28                            { "$ref": "#/properties/tax" }
29                        ]
30                    }
31                }
32            }
33        }
34    });
35
36    let schema_str = serde_json::to_string(&schema).unwrap();
37    
38    println!("=== Example 1: With Caching (Default) ===");
39    {
40        let data = json!({ "price": 100 });
41        let data_str = serde_json::to_string(&data).unwrap();
42        
43        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
44        
45        println!("Cache enabled: {}", eval.is_cache_enabled());
46        println!("Initial cache size: {}", eval.cache_len());
47        
48        eval.evaluate(&data_str, None).unwrap();
49        
50        println!("After evaluation cache size: {}", eval.cache_len());
51        let stats = eval.cache_stats();
52        println!("Cache stats: {}", stats);
53    }
54    
55    println!("\n=== Example 2: Without Caching (Web API Mode) ===");
56    {
57        let data = json!({ "price": 200 });
58        let data_str = serde_json::to_string(&data).unwrap();
59        
60        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
61        
62        // Disable caching for single-use web API scenario
63        eval.disable_cache();
64        
65        println!("Cache enabled: {}", eval.is_cache_enabled());
66        println!("Initial cache size: {}", eval.cache_len());
67        
68        eval.evaluate(&data_str, None).unwrap();
69        
70        println!("After evaluation cache size: {}", eval.cache_len());
71        let stats = eval.cache_stats();
72        println!("Cache stats: {}", stats);
73        
74        println!("\n✅ No cache overhead - perfect for web APIs!");
75    }
76    
77    println!("\n=== Example 3: Re-enabling Cache ===");
78    {
79        let data = json!({ "price": 300 });
80        let data_str = serde_json::to_string(&data).unwrap();
81        
82        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
83        
84        // Disable then re-enable
85        eval.disable_cache();
86        eval.enable_cache();
87        
88        println!("Cache enabled: {}", eval.is_cache_enabled());
89        eval.evaluate(&data_str, None).unwrap();
90        
91        println!("Cache size after evaluation: {}", eval.cache_len());
92        println!("\n✅ Cache can be toggled as needed!");
93    }
94}
examples/basic.rs (line 130)
28fn main() {
29    let args: Vec<String> = std::env::args().collect();
30    let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic");
31    
32    let mut scenario_filter: Option<String> = None;
33    let mut enable_comparison = false;
34    let mut show_timing = false;
35    let mut i = 1;
36    
37    // Parse arguments
38    while i < args.len() {
39        let arg = &args[i];
40        
41        if arg == "-h" || arg == "--help" {
42            print_help(program_name);
43            return;
44        } else if arg == "--compare" {
45            enable_comparison = true;
46        } else if arg == "--timing" {
47            show_timing = true;
48        } else if !arg.starts_with('-') {
49            scenario_filter = Some(arg.clone());
50        } else {
51            eprintln!("Error: unknown option '{}'", arg);
52            print_help(program_name);
53            return;
54        }
55        
56        i += 1;
57    }
58    
59    println!("\n🚀 JSON Evaluation - Basic Example (JSON Schema)\n");
60    
61    if enable_comparison {
62        println!("🔍 Comparison: enabled");
63    }
64    if show_timing {
65        println!("⏱️  Internal timing: enabled");
66    }
67    if enable_comparison || show_timing {
68        println!();
69    }
70    
71    let samples_dir = Path::new("samples");
72    let mut scenarios = common::discover_scenarios(samples_dir);
73    
74    // Filter out MessagePack scenarios - only use JSON
75    scenarios.retain(|s| !s.is_msgpack);
76    
77    // Filter scenarios if a filter is provided
78    if let Some(ref filter) = scenario_filter {
79        scenarios.retain(|s| s.name.contains(filter));
80        println!("📋 Filtering scenarios matching: '{}'\n", filter);
81    }
82
83    if scenarios.is_empty() {
84        if let Some(filter) = scenario_filter {
85            println!(
86                "ℹ️  No scenarios found matching '{}' in `{}`.",
87                filter,
88                samples_dir.display()
89            );
90        } else {
91            println!(
92                "ℹ️  No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
93                samples_dir.display()
94            );
95        }
96        return;
97    }
98    
99    println!("📊 Found {} scenario(s)\n", scenarios.len());
100
101    let mut total_parse_time = std::time::Duration::ZERO;
102    let mut total_eval_time = std::time::Duration::ZERO;
103    let mut successful_scenarios = 0;
104    let mut comparison_failures = 0;
105
106    for scenario in &scenarios {
107        println!("==============================");
108        println!("Scenario: {}", scenario.name);
109        println!("Schema: {} ({})", 
110            scenario.schema_path.display(),
111            if scenario.is_msgpack { "MessagePack" } else { "JSON" }
112        );
113        println!("Data: {}\n", scenario.data_path.display());
114
115        // Clear timing data from previous scenarios
116        if show_timing {
117            json_eval_rs::enable_timing();
118            json_eval_rs::clear_timing_data();
119        }
120
121        let data_str = fs::read_to_string(&scenario.data_path)
122            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
123
124        // Step 1: Parse schema (JSONEval::new)
125        let parse_start = Instant::now();
126        
127        let schema_str = fs::read_to_string(&scenario.schema_path)
128            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
129        
130        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str))
131            .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e));
132        
133        let parse_time = parse_start.elapsed();
134        println!("  📝 Parse (new): {:?}", parse_time);
135        
136        // Step 2: Evaluate
137        let eval_start = Instant::now();
138        
139        eval.evaluate(&data_str, Some("{}"))
140            .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
141        
142        let evaluated_schema = eval.get_evaluated_schema(false);
143        let eval_time = eval_start.elapsed();
144        
145        println!("  ⚡ Eval: {:?}", eval_time);
146        println!("  ⏱️  Total: {:?}\n", parse_time + eval_time);
147        
148        // Print detailed timing breakdown if --timing flag is set
149        if show_timing {
150            json_eval_rs::print_timing_summary();
151        }
152        
153        total_parse_time += parse_time;
154        total_eval_time += eval_time;
155        successful_scenarios += 1;
156
157        // Save results
158        let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
159        let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
160
161        fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
162            .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
163
164        let mut metadata_obj = Map::new();
165        metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
166        metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
167        metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
168
169        fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
170            .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
171
172        println!("✅ Results saved:");
173        println!("  - {}", evaluated_path.display());
174        println!("  - {}\n", parsed_path.display());
175
176        // Optional comparison
177        if enable_comparison {
178            if let Some(comp_path) = &scenario.comparison_path {
179                if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
180                    comparison_failures += 1;
181                }
182                println!();
183            }
184        }
185    }
186    
187    // Print summary
188    println!("{}", "=".repeat(50));
189    println!("📊 Summary");
190    println!("{}", "=".repeat(50));
191    println!("Total scenarios run: {}", successful_scenarios);
192    println!("Total parse time: {:?}", total_parse_time);
193    println!("Total eval time: {:?}", total_eval_time);
194    println!("Total time: {:?}", total_parse_time + total_eval_time);
195    
196    if successful_scenarios > 1 {
197        println!("\nAverage per scenario:");
198        println!("  Parse: {:?}", total_parse_time / successful_scenarios as u32);
199        println!("  Eval: {:?}", total_eval_time / successful_scenarios as u32);
200    }
201    
202    if enable_comparison {
203        println!("Comparison failures: {}", comparison_failures);
204    }
205    
206    println!("\n✅ All scenarios completed!\n");
207}
examples/benchmark.rs (line 296)
31fn main() {
32    let args: Vec<String> = std::env::args().collect();
33    let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("benchmark");
34    
35    let mut iterations = 1usize;
36    let mut scenario_filter: Option<String> = None;
37    let mut show_cpu_info = false;
38    let mut use_parsed_schema = false;
39    let mut concurrent_count: Option<usize> = None;
40    let mut enable_comparison = false;
41    let mut show_timing = false;
42    let mut i = 1;
43    
44    // Parse arguments
45    while i < args.len() {
46        let arg = &args[i];
47        
48        if arg == "-h" || arg == "--help" {
49            print_help(program_name);
50            return;
51        } else if arg == "--cpu-info" {
52            show_cpu_info = true;
53        } else if arg == "--parsed" {
54            use_parsed_schema = true;
55        } else if arg == "--compare" {
56            enable_comparison = true;
57        } else if arg == "--timing" {
58            show_timing = true;
59        } else if arg == "--concurrent" {
60            if i + 1 >= args.len() {
61                eprintln!("Error: {} requires a value", arg);
62                print_help(program_name);
63                return;
64            }
65            i += 1;
66            match args[i].parse::<usize>() {
67                Ok(n) if n > 0 => concurrent_count = Some(n),
68                _ => {
69                    eprintln!("Error: concurrent count must be a positive integer, got '{}'", args[i]);
70                    return;
71                }
72            }
73        } else if arg == "-i" || arg == "--iterations" {
74            if i + 1 >= args.len() {
75                eprintln!("Error: {} requires a value", arg);
76                print_help(program_name);
77                return;
78            }
79            i += 1;
80            match args[i].parse::<usize>() {
81                Ok(n) if n > 0 => iterations = n,
82                _ => {
83                    eprintln!("Error: iterations must be a positive integer, got '{}'", args[i]);
84                    return;
85                }
86            }
87        } else if !arg.starts_with('-') {
88            scenario_filter = Some(arg.clone());
89        } else {
90            eprintln!("Error: unknown option '{}'", arg);
91            print_help(program_name);
92            return;
93        }
94        
95        i += 1;
96    }
97    
98    println!("\n🚀 JSON Evaluation - Benchmark\n");
99    
100    // Show CPU info if requested or if running benchmarks
101    if show_cpu_info || iterations > 1 || concurrent_count.is_some() {
102        common::print_cpu_info();
103    }
104    
105    if use_parsed_schema {
106        println!("📦 Mode: ParsedSchema (parse once, reuse for all iterations)\n");
107    }
108    
109    if let Some(count) = concurrent_count {
110        println!("🔀 Concurrent evaluations: {} threads\n", count);
111    } else if iterations > 1 {
112        println!("🔄 Iterations per scenario: {}\n", iterations);
113    }
114    
115    if enable_comparison {
116        println!("🔍 Comparison: enabled");
117    }
118    if show_timing {
119        println!("⏱️  Internal timing: enabled");
120    }
121    if enable_comparison || show_timing {
122        println!();
123    }
124
125    let samples_dir = Path::new("samples");
126    let mut scenarios = common::discover_scenarios(samples_dir);
127    
128    // Filter scenarios if a filter is provided
129    if let Some(ref filter) = scenario_filter {
130        scenarios.retain(|s| s.name.contains(filter));
131        println!("📋 Filtering scenarios matching: '{}'\n", filter);
132    }
133
134    if scenarios.is_empty() {
135        if let Some(filter) = scenario_filter {
136            println!(
137                "ℹ️  No scenarios found matching '{}' in `{}`.",
138                filter,
139                samples_dir.display()
140            );
141        } else {
142            println!(
143                "ℹ️  No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
144                samples_dir.display()
145            );
146        }
147        return;
148    }
149    
150    println!("📊 Found {} scenario(s)\n", scenarios.len());
151
152    let mut total_parse_time = std::time::Duration::ZERO;
153    let mut total_eval_time = std::time::Duration::ZERO;
154    let mut successful_scenarios = 0;
155    let mut comparison_failures = 0;
156
157    for scenario in &scenarios {
158        println!("==============================");
159        println!("Scenario: {}", scenario.name);
160        println!("Schema: {} ({})", 
161            scenario.schema_path.display(),
162            if scenario.is_msgpack { "MessagePack" } else { "JSON" }
163        );
164        println!("Data: {}\n", scenario.data_path.display());
165
166        // Clear timing data from previous scenarios
167        if show_timing {
168            json_eval_rs::enable_timing();
169            json_eval_rs::clear_timing_data();
170        }
171
172        let data_str = fs::read_to_string(&scenario.data_path)
173            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
174
175        println!("Running evaluation...\n");
176
177        let (parse_time, eval_time, evaluated_schema, eval, iteration_times) = if use_parsed_schema {
178            // ParsedSchema mode: parse once, reuse for all iterations/threads
179            let start_time = Instant::now();
180            
181            let parsed_schema = if scenario.is_msgpack {
182                let schema_msgpack = fs::read(&scenario.schema_path)
183                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
184                println!("  📦 MessagePack schema size: {} bytes", schema_msgpack.len());
185                Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
186                    .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
187            } else {
188                let schema_str = fs::read_to_string(&scenario.schema_path)
189                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
190                Arc::new(ParsedSchema::parse(&schema_str)
191                    .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
192            };
193            
194            let parse_time = start_time.elapsed();
195            println!("  Schema parsing & compilation: {:?}", parse_time);
196            
197            // Concurrent mode with ParsedSchema
198            if let Some(thread_count) = concurrent_count {
199                use std::thread;
200                
201                let eval_start = Instant::now();
202                let mut handles = vec![];
203                
204                for thread_id in 0..thread_count {
205                    let parsed_clone = parsed_schema.clone();
206                    let data_str_clone = data_str.clone();
207                    let iter_count = iterations;
208                    
209                    let handle = thread::spawn(move || {
210                        let mut thread_times = Vec::with_capacity(iter_count);
211                        let mut last_schema = Value::Null;
212                        
213                        for _ in 0..iter_count {
214                            let iter_start = Instant::now();
215                            let mut eval_instance = JSONEval::with_parsed_schema(
216                                parsed_clone.clone(),
217                                Some("{}"),
218                                Some(&data_str_clone)
219                            ).unwrap();
220                            
221                            eval_instance.evaluate(&data_str_clone, Some("{}")).unwrap();
222                            last_schema = eval_instance.get_evaluated_schema(false);
223                            thread_times.push(iter_start.elapsed());
224                        }
225                        
226                        (thread_times, last_schema, thread_id)
227                    });
228                    handles.push(handle);
229                }
230                
231                let mut all_iteration_times = Vec::new();
232                let mut evaluated_schema = Value::Null;
233                
234                for handle in handles {
235                    let (thread_times, thread_schema, thread_id) = handle.join().unwrap();
236                    println!("  Thread {} completed {} iterations", thread_id, thread_times.len());
237                    all_iteration_times.extend(thread_times);
238                    evaluated_schema = thread_schema; // Use last thread's result
239                }
240                
241                let eval_time = eval_start.elapsed();
242                
243                // Create a temp eval for metadata export
244                let temp_eval = JSONEval::with_parsed_schema(
245                    parsed_schema.clone(),
246                    Some("{}"),
247                    Some(&data_str)
248                ).unwrap();
249                
250                (parse_time, eval_time, evaluated_schema, temp_eval, all_iteration_times)
251            } else {
252                // Sequential iterations with ParsedSchema
253                let eval_start = Instant::now();
254                let mut evaluated_schema = Value::Null;
255                let mut iteration_times = Vec::with_capacity(iterations);
256                let mut eval_instance = JSONEval::with_parsed_schema(
257                    parsed_schema.clone(),
258                    Some("{}"),
259                    Some(&data_str)
260                ).unwrap();
261                
262                for iter in 0..iterations {
263                    let iter_start = Instant::now();
264                    eval_instance.evaluate(&data_str, Some("{}"))
265                        .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
266                    evaluated_schema = eval_instance.get_evaluated_schema(false);
267                    iteration_times.push(iter_start.elapsed());
268                    
269                    if iterations > 1 && (iter + 1) % 10 == 0 {
270                        print!(".");
271                        if (iter + 1) % 50 == 0 {
272                            println!(" {}/{}", iter + 1, iterations);
273                        }
274                    }
275                }
276                
277                if iterations > 1 && iterations % 50 != 0 {
278                    println!(" {}/{}", iterations, iterations);
279                }
280                
281                let eval_time = eval_start.elapsed();
282                (parse_time, eval_time, evaluated_schema, eval_instance, iteration_times)
283            }
284        } else {
285            // Traditional mode: parse and create JSONEval each time
286            let start_time = Instant::now();
287            let mut eval = if scenario.is_msgpack {
288                let schema_msgpack = fs::read(&scenario.schema_path)
289                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
290                println!("  📦 MessagePack schema size: {} bytes", schema_msgpack.len());
291                JSONEval::new_from_msgpack(&schema_msgpack, None, Some(&data_str))
292                    .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e))
293            } else {
294                let schema_str = fs::read_to_string(&scenario.schema_path)
295                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
296                JSONEval::new(&schema_str, None, Some(&data_str))
297                    .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e))
298            };
299            let parse_time = start_time.elapsed();
300            println!("  Schema parsing & compilation: {:?}", parse_time);
301            
302            let eval_start = Instant::now();
303            let mut evaluated_schema = Value::Null;
304            let mut iteration_times = Vec::with_capacity(iterations);
305            
306            for iter in 0..iterations {
307                let iter_start = Instant::now();
308                eval.evaluate(&data_str, Some("{}"))
309                    .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
310                evaluated_schema = eval.get_evaluated_schema(false);
311                iteration_times.push(iter_start.elapsed());
312                
313                if iterations > 1 && (iter + 1) % 10 == 0 {
314                    print!(".");
315                    if (iter + 1) % 50 == 0 {
316                        println!(" {}/{}", iter + 1, iterations);
317                    }
318                }
319            }
320            
321            if iterations > 1 && iterations % 50 != 0 {
322                println!(" {}/{}", iterations, iterations);
323            }
324            
325            let eval_time = eval_start.elapsed();
326            (parse_time, eval_time, evaluated_schema, eval, iteration_times)
327        };
328        
329        // Calculate statistics
330        let total_iterations = iteration_times.len();
331        if total_iterations == 1 {
332            println!("  Evaluation: {:?}", eval_time);
333        } else {
334            let avg_time = eval_time / total_iterations as u32;
335            let min_time = iteration_times.iter().min().unwrap();
336            let max_time = iteration_times.iter().max().unwrap();
337            
338            println!("  Total evaluation time: {:?}", eval_time);
339            println!("  Total iterations: {}", total_iterations);
340            println!("  Average per iteration: {:?}", avg_time);
341            println!("  Min: {:?} | Max: {:?}", min_time, max_time);
342            
343            // Show cache statistics
344            let cache_stats = eval.cache_stats();
345            println!("  Cache: {} entries, {} hits, {} misses ({:.1}% hit rate)",
346                cache_stats.entries,
347                cache_stats.hits,
348                cache_stats.misses,
349                cache_stats.hit_rate * 100.0
350            );
351        }
352
353        let total_time = parse_time + eval_time;
354        println!("⏱️  Execution time: {:?}\n", total_time);
355        
356        // Print detailed timing breakdown if --timing flag is set
357        if show_timing {
358            json_eval_rs::print_timing_summary();
359        }
360        
361        // Track statistics
362        total_parse_time += parse_time;
363        total_eval_time += eval_time;
364        successful_scenarios += 1;
365
366        let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
367        let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
368
369        fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
370            .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
371
372        let mut metadata_obj = Map::new();
373        metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
374        metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
375
376        fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
377            .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
378
379        println!("✅ Results saved:");
380        println!("  - {}", evaluated_path.display());
381        println!("  - {}\n", parsed_path.display());
382
383        // Optional comparison
384        if enable_comparison {
385            if let Some(comp_path) = &scenario.comparison_path {
386                if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
387                    comparison_failures += 1;
388                }
389                println!();
390            }
391        }
392    }
393    
394    // Print summary statistics
395    if successful_scenarios > 0 {
396        println!("\n{}", "=".repeat(50));
397        println!("📊 Summary Statistics");
398        println!("{}", "=".repeat(50));
399        println!("Total scenarios run: {}", successful_scenarios);
400        println!("Total parsing time: {:?}", total_parse_time);
401        println!("Total evaluation time: {:?}", total_eval_time);
402        println!("Total time: {:?}", total_parse_time + total_eval_time);
403        
404        if successful_scenarios > 1 {
405            println!("\nAverage per scenario:");
406            println!("  Parsing: {:?}", total_parse_time / successful_scenarios as u32);
407            println!("  Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
408        }
409        
410        if enable_comparison {
411            println!("\nComparison failures: {}", comparison_failures);
412        }
413        
414        println!("\n✅ All scenarios completed successfully!\n");
415    }
416}
Source

pub fn new_from_msgpack( schema_msgpack: &[u8], context: Option<&str>, data: Option<&str>, ) -> Result<Self, String>

Create a new JSONEval instance from MessagePack-encoded schema

§Arguments
  • schema_msgpack - MessagePack-encoded schema bytes
  • context - Optional JSON context string
  • data - Optional JSON data string
§Returns

A Result containing the JSONEval instance or an error

Examples found in repository?
examples/basic_msgpack.rs (line 129)
28fn main() {
29    let args: Vec<String> = std::env::args().collect();
30    let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic_msgpack");
31    
32    let mut scenario_filter: Option<String> = None;
33    let mut enable_comparison = false;
34    let mut show_timing = false;
35    let mut i = 1;
36    
37    // Parse arguments
38    while i < args.len() {
39        let arg = &args[i];
40        
41        if arg == "-h" || arg == "--help" {
42            print_help(program_name);
43            return;
44        } else if arg == "--compare" {
45            enable_comparison = true;
46        } else if arg == "--timing" {
47            show_timing = true;
48        } else if !arg.starts_with('-') {
49            scenario_filter = Some(arg.clone());
50        } else {
51            eprintln!("Error: unknown option '{}'", arg);
52            print_help(program_name);
53            return;
54        }
55        
56        i += 1;
57    }
58    
59    println!("\n🚀 JSON Evaluation - Basic Example (MessagePack Schema)\n");
60    
61    if enable_comparison {
62        println!("🔍 Comparison: enabled");
63    }
64    if show_timing {
65        println!("⏱️  Internal timing: enabled");
66    }
67    if enable_comparison || show_timing {
68        println!();
69    }
70    
71    let samples_dir = Path::new("samples");
72    let mut scenarios = common::discover_scenarios(samples_dir);
73    
74    // Filter to only MessagePack scenarios
75    scenarios.retain(|s| s.is_msgpack);
76    
77    // Filter scenarios if a filter is provided
78    if let Some(ref filter) = scenario_filter {
79        scenarios.retain(|s| s.name.contains(filter));
80        println!("📋 Filtering scenarios matching: '{}'\n", filter);
81    }
82
83    if scenarios.is_empty() {
84        if let Some(filter) = scenario_filter {
85            println!(
86                "ℹ️  No MessagePack scenarios found matching '{}' in `{}`.",
87                filter,
88                samples_dir.display()
89            );
90        } else {
91            println!(
92                "ℹ️  No MessagePack scenarios discovered in `{}`. Add files like `name.bform` and `name-data.json`.",
93                samples_dir.display()
94            );
95        }
96        return;
97    }
98    
99    println!("📊 Found {} MessagePack scenario(s)\n", scenarios.len());
100
101    let mut total_parse_time = std::time::Duration::ZERO;
102    let mut total_eval_time = std::time::Duration::ZERO;
103    let mut successful_scenarios = 0;
104    let mut comparison_failures = 0;
105
106    for scenario in &scenarios {
107        println!("==============================");
108        println!("Scenario: {}", scenario.name);
109        println!("Schema: {} (MessagePack)", scenario.schema_path.display());
110        println!("Data: {}\n", scenario.data_path.display());
111
112        // Clear timing data from previous scenarios
113        if show_timing {
114            json_eval_rs::enable_timing();
115            json_eval_rs::clear_timing_data();
116        }
117
118        let data_str = fs::read_to_string(&scenario.data_path)
119            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
120
121        // Step 1: Parse schema (new_from_msgpack)
122        let parse_start = Instant::now();
123        
124        let schema_msgpack = fs::read(&scenario.schema_path)
125            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
126        
127        println!("  📦 MessagePack schema size: {} bytes", schema_msgpack.len());
128        
129        let mut eval = JSONEval::new_from_msgpack(&schema_msgpack, None, Some(&data_str))
130            .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e));
131        
132        let parse_time = parse_start.elapsed();
133        println!("  📝 Parse (msgpack): {:?}", parse_time);
134        
135        // Step 2: Evaluate
136        let eval_start = Instant::now();
137        
138        eval.evaluate(&data_str, Some("{}"))
139            .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
140        
141        let evaluated_schema = eval.get_evaluated_schema(false);
142        let eval_time = eval_start.elapsed();
143        
144        println!("  ⚡ Eval: {:?}", eval_time);
145        println!("  ⏱️  Total: {:?}\n", parse_time + eval_time);
146        
147        // Print detailed timing breakdown if --timing flag is set
148        if show_timing {
149            json_eval_rs::print_timing_summary();
150        }
151        
152        total_parse_time += parse_time;
153        total_eval_time += eval_time;
154        successful_scenarios += 1;
155
156        // Save results
157        let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
158        let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
159
160        fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
161            .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
162
163        let mut metadata_obj = Map::new();
164        metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
165        metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
166        metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
167
168        fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
169            .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
170
171        println!("✅ Results saved:");
172        println!("  - {}", evaluated_path.display());
173        println!("  - {}\n", parsed_path.display());
174
175        // Optional comparison
176        if enable_comparison {
177            if let Some(comp_path) = &scenario.comparison_path {
178                if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
179                    comparison_failures += 1;
180                }
181                println!();
182            }
183        }
184    }
185    
186    // Print summary
187    println!("{}", "=".repeat(50));
188    println!("📊 Summary");
189    println!("{}", "=".repeat(50));
190    println!("Total scenarios run: {}", successful_scenarios);
191    println!("Total parse time: {:?}", total_parse_time);
192    println!("Total eval time: {:?}", total_eval_time);
193    println!("Total time: {:?}", total_parse_time + total_eval_time);
194    
195    if successful_scenarios > 1 {
196        println!("\nAverage per scenario:");
197        println!("  Parse: {:?}", total_parse_time / successful_scenarios as u32);
198        println!("  Eval: {:?}", total_eval_time / successful_scenarios as u32);
199    }
200    
201    if enable_comparison {
202        println!("Comparison failures: {}", comparison_failures);
203    }
204    
205    println!("\n✅ All scenarios completed!\n");
206}
More examples
Hide additional examples
examples/benchmark.rs (line 291)
31fn main() {
32    let args: Vec<String> = std::env::args().collect();
33    let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("benchmark");
34    
35    let mut iterations = 1usize;
36    let mut scenario_filter: Option<String> = None;
37    let mut show_cpu_info = false;
38    let mut use_parsed_schema = false;
39    let mut concurrent_count: Option<usize> = None;
40    let mut enable_comparison = false;
41    let mut show_timing = false;
42    let mut i = 1;
43    
44    // Parse arguments
45    while i < args.len() {
46        let arg = &args[i];
47        
48        if arg == "-h" || arg == "--help" {
49            print_help(program_name);
50            return;
51        } else if arg == "--cpu-info" {
52            show_cpu_info = true;
53        } else if arg == "--parsed" {
54            use_parsed_schema = true;
55        } else if arg == "--compare" {
56            enable_comparison = true;
57        } else if arg == "--timing" {
58            show_timing = true;
59        } else if arg == "--concurrent" {
60            if i + 1 >= args.len() {
61                eprintln!("Error: {} requires a value", arg);
62                print_help(program_name);
63                return;
64            }
65            i += 1;
66            match args[i].parse::<usize>() {
67                Ok(n) if n > 0 => concurrent_count = Some(n),
68                _ => {
69                    eprintln!("Error: concurrent count must be a positive integer, got '{}'", args[i]);
70                    return;
71                }
72            }
73        } else if arg == "-i" || arg == "--iterations" {
74            if i + 1 >= args.len() {
75                eprintln!("Error: {} requires a value", arg);
76                print_help(program_name);
77                return;
78            }
79            i += 1;
80            match args[i].parse::<usize>() {
81                Ok(n) if n > 0 => iterations = n,
82                _ => {
83                    eprintln!("Error: iterations must be a positive integer, got '{}'", args[i]);
84                    return;
85                }
86            }
87        } else if !arg.starts_with('-') {
88            scenario_filter = Some(arg.clone());
89        } else {
90            eprintln!("Error: unknown option '{}'", arg);
91            print_help(program_name);
92            return;
93        }
94        
95        i += 1;
96    }
97    
98    println!("\n🚀 JSON Evaluation - Benchmark\n");
99    
100    // Show CPU info if requested or if running benchmarks
101    if show_cpu_info || iterations > 1 || concurrent_count.is_some() {
102        common::print_cpu_info();
103    }
104    
105    if use_parsed_schema {
106        println!("📦 Mode: ParsedSchema (parse once, reuse for all iterations)\n");
107    }
108    
109    if let Some(count) = concurrent_count {
110        println!("🔀 Concurrent evaluations: {} threads\n", count);
111    } else if iterations > 1 {
112        println!("🔄 Iterations per scenario: {}\n", iterations);
113    }
114    
115    if enable_comparison {
116        println!("🔍 Comparison: enabled");
117    }
118    if show_timing {
119        println!("⏱️  Internal timing: enabled");
120    }
121    if enable_comparison || show_timing {
122        println!();
123    }
124
125    let samples_dir = Path::new("samples");
126    let mut scenarios = common::discover_scenarios(samples_dir);
127    
128    // Filter scenarios if a filter is provided
129    if let Some(ref filter) = scenario_filter {
130        scenarios.retain(|s| s.name.contains(filter));
131        println!("📋 Filtering scenarios matching: '{}'\n", filter);
132    }
133
134    if scenarios.is_empty() {
135        if let Some(filter) = scenario_filter {
136            println!(
137                "ℹ️  No scenarios found matching '{}' in `{}`.",
138                filter,
139                samples_dir.display()
140            );
141        } else {
142            println!(
143                "ℹ️  No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
144                samples_dir.display()
145            );
146        }
147        return;
148    }
149    
150    println!("📊 Found {} scenario(s)\n", scenarios.len());
151
152    let mut total_parse_time = std::time::Duration::ZERO;
153    let mut total_eval_time = std::time::Duration::ZERO;
154    let mut successful_scenarios = 0;
155    let mut comparison_failures = 0;
156
157    for scenario in &scenarios {
158        println!("==============================");
159        println!("Scenario: {}", scenario.name);
160        println!("Schema: {} ({})", 
161            scenario.schema_path.display(),
162            if scenario.is_msgpack { "MessagePack" } else { "JSON" }
163        );
164        println!("Data: {}\n", scenario.data_path.display());
165
166        // Clear timing data from previous scenarios
167        if show_timing {
168            json_eval_rs::enable_timing();
169            json_eval_rs::clear_timing_data();
170        }
171
172        let data_str = fs::read_to_string(&scenario.data_path)
173            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
174
175        println!("Running evaluation...\n");
176
177        let (parse_time, eval_time, evaluated_schema, eval, iteration_times) = if use_parsed_schema {
178            // ParsedSchema mode: parse once, reuse for all iterations/threads
179            let start_time = Instant::now();
180            
181            let parsed_schema = if scenario.is_msgpack {
182                let schema_msgpack = fs::read(&scenario.schema_path)
183                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
184                println!("  📦 MessagePack schema size: {} bytes", schema_msgpack.len());
185                Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
186                    .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
187            } else {
188                let schema_str = fs::read_to_string(&scenario.schema_path)
189                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
190                Arc::new(ParsedSchema::parse(&schema_str)
191                    .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
192            };
193            
194            let parse_time = start_time.elapsed();
195            println!("  Schema parsing & compilation: {:?}", parse_time);
196            
197            // Concurrent mode with ParsedSchema
198            if let Some(thread_count) = concurrent_count {
199                use std::thread;
200                
201                let eval_start = Instant::now();
202                let mut handles = vec![];
203                
204                for thread_id in 0..thread_count {
205                    let parsed_clone = parsed_schema.clone();
206                    let data_str_clone = data_str.clone();
207                    let iter_count = iterations;
208                    
209                    let handle = thread::spawn(move || {
210                        let mut thread_times = Vec::with_capacity(iter_count);
211                        let mut last_schema = Value::Null;
212                        
213                        for _ in 0..iter_count {
214                            let iter_start = Instant::now();
215                            let mut eval_instance = JSONEval::with_parsed_schema(
216                                parsed_clone.clone(),
217                                Some("{}"),
218                                Some(&data_str_clone)
219                            ).unwrap();
220                            
221                            eval_instance.evaluate(&data_str_clone, Some("{}")).unwrap();
222                            last_schema = eval_instance.get_evaluated_schema(false);
223                            thread_times.push(iter_start.elapsed());
224                        }
225                        
226                        (thread_times, last_schema, thread_id)
227                    });
228                    handles.push(handle);
229                }
230                
231                let mut all_iteration_times = Vec::new();
232                let mut evaluated_schema = Value::Null;
233                
234                for handle in handles {
235                    let (thread_times, thread_schema, thread_id) = handle.join().unwrap();
236                    println!("  Thread {} completed {} iterations", thread_id, thread_times.len());
237                    all_iteration_times.extend(thread_times);
238                    evaluated_schema = thread_schema; // Use last thread's result
239                }
240                
241                let eval_time = eval_start.elapsed();
242                
243                // Create a temp eval for metadata export
244                let temp_eval = JSONEval::with_parsed_schema(
245                    parsed_schema.clone(),
246                    Some("{}"),
247                    Some(&data_str)
248                ).unwrap();
249                
250                (parse_time, eval_time, evaluated_schema, temp_eval, all_iteration_times)
251            } else {
252                // Sequential iterations with ParsedSchema
253                let eval_start = Instant::now();
254                let mut evaluated_schema = Value::Null;
255                let mut iteration_times = Vec::with_capacity(iterations);
256                let mut eval_instance = JSONEval::with_parsed_schema(
257                    parsed_schema.clone(),
258                    Some("{}"),
259                    Some(&data_str)
260                ).unwrap();
261                
262                for iter in 0..iterations {
263                    let iter_start = Instant::now();
264                    eval_instance.evaluate(&data_str, Some("{}"))
265                        .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
266                    evaluated_schema = eval_instance.get_evaluated_schema(false);
267                    iteration_times.push(iter_start.elapsed());
268                    
269                    if iterations > 1 && (iter + 1) % 10 == 0 {
270                        print!(".");
271                        if (iter + 1) % 50 == 0 {
272                            println!(" {}/{}", iter + 1, iterations);
273                        }
274                    }
275                }
276                
277                if iterations > 1 && iterations % 50 != 0 {
278                    println!(" {}/{}", iterations, iterations);
279                }
280                
281                let eval_time = eval_start.elapsed();
282                (parse_time, eval_time, evaluated_schema, eval_instance, iteration_times)
283            }
284        } else {
285            // Traditional mode: parse and create JSONEval each time
286            let start_time = Instant::now();
287            let mut eval = if scenario.is_msgpack {
288                let schema_msgpack = fs::read(&scenario.schema_path)
289                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
290                println!("  📦 MessagePack schema size: {} bytes", schema_msgpack.len());
291                JSONEval::new_from_msgpack(&schema_msgpack, None, Some(&data_str))
292                    .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e))
293            } else {
294                let schema_str = fs::read_to_string(&scenario.schema_path)
295                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
296                JSONEval::new(&schema_str, None, Some(&data_str))
297                    .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e))
298            };
299            let parse_time = start_time.elapsed();
300            println!("  Schema parsing & compilation: {:?}", parse_time);
301            
302            let eval_start = Instant::now();
303            let mut evaluated_schema = Value::Null;
304            let mut iteration_times = Vec::with_capacity(iterations);
305            
306            for iter in 0..iterations {
307                let iter_start = Instant::now();
308                eval.evaluate(&data_str, Some("{}"))
309                    .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
310                evaluated_schema = eval.get_evaluated_schema(false);
311                iteration_times.push(iter_start.elapsed());
312                
313                if iterations > 1 && (iter + 1) % 10 == 0 {
314                    print!(".");
315                    if (iter + 1) % 50 == 0 {
316                        println!(" {}/{}", iter + 1, iterations);
317                    }
318                }
319            }
320            
321            if iterations > 1 && iterations % 50 != 0 {
322                println!(" {}/{}", iterations, iterations);
323            }
324            
325            let eval_time = eval_start.elapsed();
326            (parse_time, eval_time, evaluated_schema, eval, iteration_times)
327        };
328        
329        // Calculate statistics
330        let total_iterations = iteration_times.len();
331        if total_iterations == 1 {
332            println!("  Evaluation: {:?}", eval_time);
333        } else {
334            let avg_time = eval_time / total_iterations as u32;
335            let min_time = iteration_times.iter().min().unwrap();
336            let max_time = iteration_times.iter().max().unwrap();
337            
338            println!("  Total evaluation time: {:?}", eval_time);
339            println!("  Total iterations: {}", total_iterations);
340            println!("  Average per iteration: {:?}", avg_time);
341            println!("  Min: {:?} | Max: {:?}", min_time, max_time);
342            
343            // Show cache statistics
344            let cache_stats = eval.cache_stats();
345            println!("  Cache: {} entries, {} hits, {} misses ({:.1}% hit rate)",
346                cache_stats.entries,
347                cache_stats.hits,
348                cache_stats.misses,
349                cache_stats.hit_rate * 100.0
350            );
351        }
352
353        let total_time = parse_time + eval_time;
354        println!("⏱️  Execution time: {:?}\n", total_time);
355        
356        // Print detailed timing breakdown if --timing flag is set
357        if show_timing {
358            json_eval_rs::print_timing_summary();
359        }
360        
361        // Track statistics
362        total_parse_time += parse_time;
363        total_eval_time += eval_time;
364        successful_scenarios += 1;
365
366        let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
367        let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
368
369        fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
370            .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
371
372        let mut metadata_obj = Map::new();
373        metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
374        metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
375
376        fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
377            .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
378
379        println!("✅ Results saved:");
380        println!("  - {}", evaluated_path.display());
381        println!("  - {}\n", parsed_path.display());
382
383        // Optional comparison
384        if enable_comparison {
385            if let Some(comp_path) = &scenario.comparison_path {
386                if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
387                    comparison_failures += 1;
388                }
389                println!();
390            }
391        }
392    }
393    
394    // Print summary statistics
395    if successful_scenarios > 0 {
396        println!("\n{}", "=".repeat(50));
397        println!("📊 Summary Statistics");
398        println!("{}", "=".repeat(50));
399        println!("Total scenarios run: {}", successful_scenarios);
400        println!("Total parsing time: {:?}", total_parse_time);
401        println!("Total evaluation time: {:?}", total_eval_time);
402        println!("Total time: {:?}", total_parse_time + total_eval_time);
403        
404        if successful_scenarios > 1 {
405            println!("\nAverage per scenario:");
406            println!("  Parsing: {:?}", total_parse_time / successful_scenarios as u32);
407            println!("  Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
408        }
409        
410        if enable_comparison {
411            println!("\nComparison failures: {}", comparison_failures);
412        }
413        
414        println!("\n✅ All scenarios completed successfully!\n");
415    }
416}
Source

pub fn with_parsed_schema( parsed: Arc<ParsedSchema>, context: Option<&str>, data: Option<&str>, ) -> Result<Self, String>

Create a new JSONEval instance from a pre-parsed ParsedSchema

This enables schema caching: parse once, reuse across multiple evaluations with different data/context.

§Arguments
  • parsed - Arc-wrapped pre-parsed schema (can be cloned and cached)
  • context - Optional JSON context string
  • data - Optional JSON data string
§Returns

A Result containing the JSONEval instance or an error

§Example
use std::sync::Arc;
 
// Parse schema once and wrap in Arc for caching
let parsed = Arc::new(ParsedSchema::parse(schema_str)?);
cache.insert(schema_key, parsed.clone());
 
// Reuse across multiple evaluations (Arc::clone is cheap)
let eval1 = JSONEval::with_parsed_schema(parsed.clone(), Some(context1), Some(data1))?;
let eval2 = JSONEval::with_parsed_schema(parsed.clone(), Some(context2), Some(data2))?;
Examples found in repository?
examples/cache_demo.rs (line 73)
38fn demo_local_cache() -> Result<(), Box<dyn std::error::Error>> {
39    println!("📦 Example 1: Local Cache Instance");
40    println!("Creating a dedicated cache for this application...\n");
41    
42    let cache = ParsedSchemaCache::new();
43    
44    // Simple schema
45    let schema_json = r#"{
46        "$params": {
47            "rate": { "type": "number" }
48        },
49        "result": {
50            "type": "number",
51            "title": "Calculated Result",
52            "$evaluation": {
53                "logic": { "*": [{"var": "$rate"}, 100] }
54            }
55        }
56    }"#;
57    
58    // Parse and cache with a custom key
59    println!("📝 Parsing schema and caching with key 'calculation-v1'...");
60    let parsed = ParsedSchema::parse(schema_json)?;
61    cache.insert("calculation-v1".to_string(), Arc::new(parsed));
62    
63    println!("✅ Schema cached successfully");
64    println!("   Cache size: {} entries", cache.len());
65    println!("   Keys: {:?}\n", cache.keys());
66    
67    // Retrieve and use cached schema
68    println!("🔍 Retrieving cached schema...");
69    if let Some(cached_schema) = cache.get("calculation-v1") {
70        println!("✅ Retrieved from cache");
71        
72        // Create JSONEval from cached ParsedSchema
73        let mut eval = JSONEval::with_parsed_schema(cached_schema, Some(r#"{"rate": 1.5}"#), None)?;
74        eval.evaluate("{}", None)?;
75        
76        let evaluated = eval.get_evaluated_schema(false);
77        let result = evaluated.pointer("/result")
78            .and_then(|v| v.as_f64())
79            .unwrap_or(0.0);
80        println!("   Evaluation result: {}\n", result);
81    }
82    
83    // Check cache stats
84    let stats = cache.stats();
85    println!("📊 Cache Statistics: {}", stats);
86    
87    // Remove entry
88    println!("\n🗑️  Removing 'calculation-v1' from cache...");
89    cache.remove("calculation-v1");
90    println!("   Cache size after removal: {}", cache.len());
91    
92    Ok(())
93}
94
95fn demo_global_cache() -> Result<(), Box<dyn std::error::Error>> {
96    println!("🌍 Example 2: Global Cache Instance");
97    println!("Using the built-in PARSED_SCHEMA_CACHE...\n");
98    
99    let schema_json = r#"{
100        "$params": {
101            "x": { "type": "number" },
102            "y": { "type": "number" }
103        },
104        "sum": {
105            "type": "number",
106            "$evaluation": { "+": [{"var": "$x"}, {"var": "$y"}] }
107        }
108    }"#;
109    
110    // Use global cache
111    println!("📝 Caching schema globally with key 'math-operations'...");
112    let parsed = ParsedSchema::parse(schema_json)?;
113    PARSED_SCHEMA_CACHE.insert("math-operations".to_string(), Arc::new(parsed));
114    
115    println!("✅ Schema cached globally");
116    println!("   Global cache size: {}\n", PARSED_SCHEMA_CACHE.len());
117    
118    // Access from anywhere in the application
119    simulate_another_function()?;
120    
121    // Clean up
122    println!("\n🧹 Clearing global cache...");
123    PARSED_SCHEMA_CACHE.clear();
124    println!("   Global cache size: {}", PARSED_SCHEMA_CACHE.len());
125    
126    Ok(())
127}
128
129fn simulate_another_function() -> Result<(), Box<dyn std::error::Error>> {
130    println!("🔄 In another function, accessing global cache...");
131    
132    if let Some(cached) = PARSED_SCHEMA_CACHE.get("math-operations") {
133        println!("✅ Retrieved schema from global cache");
134        
135        let mut eval = JSONEval::with_parsed_schema(cached, Some(r#"{"x": 10, "y": 20}"#), None)?;
136        eval.evaluate("{}", None)?;
137        
138        let evaluated = eval.get_evaluated_schema(false);
139        let sum = evaluated.pointer("/sum")
140            .and_then(|v| v.as_f64())
141            .unwrap_or(0.0);
142        println!("   Result: {}", sum);
143    }
144    
145    Ok(())
146}
147
148fn demo_performance_comparison() -> Result<(), Box<dyn std::error::Error>> {
149    println!("⚡ Example 3: Performance Comparison");
150    println!("Comparing cached vs non-cached schema usage...\n");
151    
152    let schema_json = r#"{
153        "$params": {
154            "value": { "type": "number" }
155        },
156        "doubled": {
157            "type": "number",
158            "$evaluation": { "*": [{"var": "$value"}, 2] }
159        },
160        "tripled": {
161            "type": "number",
162            "$evaluation": { "*": [{"var": "$value"}, 3] }
163        }
164    }"#;
165    
166    let iterations = 100;
167    
168    // WITHOUT CACHE: Parse schema every time
169    println!("🐌 Without cache (parse + evaluate each time):");
170    let start = Instant::now();
171    for i in 0..iterations {
172        let context = format!(r#"{{"value": {}}}"#, i);
173        let mut eval = JSONEval::new(schema_json, Some(&context), None)?;
174        eval.evaluate("{}", None)?;
175    }
176    let without_cache = start.elapsed();
177    println!("   Time: {:?}", without_cache);
178    println!("   Avg per iteration: {:?}\n", without_cache / iterations);
179    
180    // WITH CACHE: Parse once, evaluate many times
181    println!("🚀 With cache (parse once, reuse for all evaluations):");
182    let cache = ParsedSchemaCache::new();
183    
184    // Parse once
185    let parse_start = Instant::now();
186    let parsed = ParsedSchema::parse(schema_json)?;
187    cache.insert("perf-test".to_string(), Arc::new(parsed));
188    let parse_time = parse_start.elapsed();
189    
190    // Evaluate many times
191    let eval_start = Instant::now();
192    for i in 0..iterations {
193        if let Some(cached) = cache.get("perf-test") {
194            let context = format!(r#"{{"value": {}}}"#, i);
195            let mut eval = JSONEval::with_parsed_schema(cached.clone(), Some(&context), None)?;
196            eval.evaluate("{}", None)?;
197        }
198    }
199    let eval_time = eval_start.elapsed();
200    let with_cache = parse_time + eval_time;
201    
202    println!("   Parse time: {:?}", parse_time);
203    println!("   Eval time: {:?}", eval_time);
204    println!("   Total time: {:?}", with_cache);
205    println!("   Avg per iteration: {:?}\n", eval_time / iterations);
206    
207    let speedup = without_cache.as_secs_f64() / with_cache.as_secs_f64();
208    println!("📈 Speedup: {:.2}x faster", speedup);
209    
210    Ok(())
211}
More examples
Hide additional examples
examples/basic_parsed.rs (lines 143-147)
30fn main() {
31    let args: Vec<String> = std::env::args().collect();
32    let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic_parsed");
33    
34    let mut scenario_filter: Option<String> = None;
35    let mut enable_comparison = false;
36    let mut show_timing = false;
37    let mut i = 1;
38    
39    // Parse arguments
40    while i < args.len() {
41        let arg = &args[i];
42        
43        if arg == "-h" || arg == "--help" {
44            print_help(program_name);
45            return;
46        } else if arg == "--compare" {
47            enable_comparison = true;
48        } else if arg == "--timing" {
49            show_timing = true;
50        } else if !arg.starts_with('-') {
51            scenario_filter = Some(arg.clone());
52        } else {
53            eprintln!("Error: unknown option '{}'", arg);
54            print_help(program_name);
55            return;
56        }
57        
58        i += 1;
59    }
60    
61    println!("\n🚀 JSON Evaluation - Basic Example (ParsedSchema)\n");
62    println!("📦 Using Arc<ParsedSchema> for efficient caching\n");
63    
64    if enable_comparison {
65        println!("🔍 Comparison: enabled");
66    }
67    if show_timing {
68        println!("⏱️  Internal timing: enabled");
69    }
70    if enable_comparison || show_timing {
71        println!();
72    }
73    
74    let samples_dir = Path::new("samples");
75    let mut scenarios = common::discover_scenarios(samples_dir);
76    
77    // Filter scenarios if a filter is provided
78    if let Some(ref filter) = scenario_filter {
79        scenarios.retain(|s| s.name.contains(filter));
80        println!("📋 Filtering scenarios matching: '{}'\n", filter);
81    }
82
83    if scenarios.is_empty() {
84        if let Some(filter) = scenario_filter {
85            println!(
86                "ℹ️  No scenarios found matching '{}' in `{}`.",
87                filter,
88                samples_dir.display()
89            );
90        } else {
91            println!(
92                "ℹ️  No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
93                samples_dir.display()
94            );
95        }
96        return;
97    }
98    
99    println!("📊 Found {} scenario(s)\n", scenarios.len());
100
101    let mut total_parse_time = std::time::Duration::ZERO;
102    let mut total_eval_time = std::time::Duration::ZERO;
103    let mut successful_scenarios = 0;
104    let mut comparison_failures = 0;
105
106    for scenario in &scenarios {
107        println!("==============================");
108        println!("Scenario: {}", scenario.name);
109        println!("Schema: {} ({})", 
110            scenario.schema_path.display(),
111            if scenario.is_msgpack { "MessagePack" } else { "JSON" }
112        );
113        println!("Data: {}\n", scenario.data_path.display());
114
115        // Clear timing data from previous scenarios
116        if show_timing {
117            json_eval_rs::enable_timing();
118            json_eval_rs::clear_timing_data();
119        }
120
121        let data_str = fs::read_to_string(&scenario.data_path)
122            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
123
124        // Step 1: Parse schema once
125        let parse_start = Instant::now();
126        let parsed_schema = if scenario.is_msgpack {
127            let schema_msgpack = fs::read(&scenario.schema_path)
128                .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
129            println!("  📦 MessagePack schema size: {} bytes", schema_msgpack.len());
130            Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
131                .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
132        } else {
133            let schema_str = fs::read_to_string(&scenario.schema_path)
134                .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
135            Arc::new(ParsedSchema::parse(&schema_str)
136                .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
137        };
138        let parse_time = parse_start.elapsed();
139        println!("  📝 Schema parsing: {:?}", parse_time);
140        
141        // Step 2: Create JSONEval from ParsedSchema (reuses compiled logic)
142        let eval_start = Instant::now();
143        let mut eval = JSONEval::with_parsed_schema(
144            parsed_schema.clone(),  // Arc::clone is cheap!
145            Some("{}"),
146            Some(&data_str)
147        ).unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e));
148
149        eval.evaluate(&data_str, Some("{}"))
150            .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
151        
152        let evaluated_schema = eval.get_evaluated_schema(false);
153        let eval_time = eval_start.elapsed();
154        
155        println!("  ⚡ Eval: {:?}", eval_time);
156        println!("  ⏱️  Total: {:?}\n", parse_time + eval_time);
157        
158        // Print detailed timing breakdown if --timing flag is set
159        if show_timing {
160            json_eval_rs::print_timing_summary();
161        }
162        
163        total_parse_time += parse_time;
164        total_eval_time += eval_time;
165        successful_scenarios += 1;
166
167        // Save results
168        let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
169        let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
170
171        fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
172            .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
173
174        let mut metadata_obj = Map::new();
175        metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
176        metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
177        metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
178
179        fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
180            .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
181
182        println!("✅ Results saved:");
183        println!("  - {}", evaluated_path.display());
184        println!("  - {}\n", parsed_path.display());
185
186        // Optional comparison
187        if enable_comparison {
188            if let Some(comp_path) = &scenario.comparison_path {
189                if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
190                    comparison_failures += 1;
191                }
192                println!();
193            }
194        }
195    }
196    
197    // Print summary
198    println!("{}", "=".repeat(50));
199    println!("📊 Summary");
200    println!("{}", "=".repeat(50));
201    println!("Total scenarios run: {}", successful_scenarios);
202    println!("Total parsing time: {:?}", total_parse_time);
203    println!("Total evaluation time: {:?}", total_eval_time);
204    println!("Total time: {:?}", total_parse_time + total_eval_time);
205    
206    if successful_scenarios > 1 {
207        println!("\nAverage per scenario:");
208        println!("  Parsing: {:?}", total_parse_time / successful_scenarios as u32);
209        println!("  Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
210    }
211    
212    if enable_comparison {
213        println!("\nComparison failures: {}", comparison_failures);
214    }
215    
216    println!("\n✅ All scenarios completed!\n");
217}
examples/benchmark.rs (lines 215-219)
31fn main() {
32    let args: Vec<String> = std::env::args().collect();
33    let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("benchmark");
34    
35    let mut iterations = 1usize;
36    let mut scenario_filter: Option<String> = None;
37    let mut show_cpu_info = false;
38    let mut use_parsed_schema = false;
39    let mut concurrent_count: Option<usize> = None;
40    let mut enable_comparison = false;
41    let mut show_timing = false;
42    let mut i = 1;
43    
44    // Parse arguments
45    while i < args.len() {
46        let arg = &args[i];
47        
48        if arg == "-h" || arg == "--help" {
49            print_help(program_name);
50            return;
51        } else if arg == "--cpu-info" {
52            show_cpu_info = true;
53        } else if arg == "--parsed" {
54            use_parsed_schema = true;
55        } else if arg == "--compare" {
56            enable_comparison = true;
57        } else if arg == "--timing" {
58            show_timing = true;
59        } else if arg == "--concurrent" {
60            if i + 1 >= args.len() {
61                eprintln!("Error: {} requires a value", arg);
62                print_help(program_name);
63                return;
64            }
65            i += 1;
66            match args[i].parse::<usize>() {
67                Ok(n) if n > 0 => concurrent_count = Some(n),
68                _ => {
69                    eprintln!("Error: concurrent count must be a positive integer, got '{}'", args[i]);
70                    return;
71                }
72            }
73        } else if arg == "-i" || arg == "--iterations" {
74            if i + 1 >= args.len() {
75                eprintln!("Error: {} requires a value", arg);
76                print_help(program_name);
77                return;
78            }
79            i += 1;
80            match args[i].parse::<usize>() {
81                Ok(n) if n > 0 => iterations = n,
82                _ => {
83                    eprintln!("Error: iterations must be a positive integer, got '{}'", args[i]);
84                    return;
85                }
86            }
87        } else if !arg.starts_with('-') {
88            scenario_filter = Some(arg.clone());
89        } else {
90            eprintln!("Error: unknown option '{}'", arg);
91            print_help(program_name);
92            return;
93        }
94        
95        i += 1;
96    }
97    
98    println!("\n🚀 JSON Evaluation - Benchmark\n");
99    
100    // Show CPU info if requested or if running benchmarks
101    if show_cpu_info || iterations > 1 || concurrent_count.is_some() {
102        common::print_cpu_info();
103    }
104    
105    if use_parsed_schema {
106        println!("📦 Mode: ParsedSchema (parse once, reuse for all iterations)\n");
107    }
108    
109    if let Some(count) = concurrent_count {
110        println!("🔀 Concurrent evaluations: {} threads\n", count);
111    } else if iterations > 1 {
112        println!("🔄 Iterations per scenario: {}\n", iterations);
113    }
114    
115    if enable_comparison {
116        println!("🔍 Comparison: enabled");
117    }
118    if show_timing {
119        println!("⏱️  Internal timing: enabled");
120    }
121    if enable_comparison || show_timing {
122        println!();
123    }
124
125    let samples_dir = Path::new("samples");
126    let mut scenarios = common::discover_scenarios(samples_dir);
127    
128    // Filter scenarios if a filter is provided
129    if let Some(ref filter) = scenario_filter {
130        scenarios.retain(|s| s.name.contains(filter));
131        println!("📋 Filtering scenarios matching: '{}'\n", filter);
132    }
133
134    if scenarios.is_empty() {
135        if let Some(filter) = scenario_filter {
136            println!(
137                "ℹ️  No scenarios found matching '{}' in `{}`.",
138                filter,
139                samples_dir.display()
140            );
141        } else {
142            println!(
143                "ℹ️  No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
144                samples_dir.display()
145            );
146        }
147        return;
148    }
149    
150    println!("📊 Found {} scenario(s)\n", scenarios.len());
151
152    let mut total_parse_time = std::time::Duration::ZERO;
153    let mut total_eval_time = std::time::Duration::ZERO;
154    let mut successful_scenarios = 0;
155    let mut comparison_failures = 0;
156
157    for scenario in &scenarios {
158        println!("==============================");
159        println!("Scenario: {}", scenario.name);
160        println!("Schema: {} ({})", 
161            scenario.schema_path.display(),
162            if scenario.is_msgpack { "MessagePack" } else { "JSON" }
163        );
164        println!("Data: {}\n", scenario.data_path.display());
165
166        // Clear timing data from previous scenarios
167        if show_timing {
168            json_eval_rs::enable_timing();
169            json_eval_rs::clear_timing_data();
170        }
171
172        let data_str = fs::read_to_string(&scenario.data_path)
173            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
174
175        println!("Running evaluation...\n");
176
177        let (parse_time, eval_time, evaluated_schema, eval, iteration_times) = if use_parsed_schema {
178            // ParsedSchema mode: parse once, reuse for all iterations/threads
179            let start_time = Instant::now();
180            
181            let parsed_schema = if scenario.is_msgpack {
182                let schema_msgpack = fs::read(&scenario.schema_path)
183                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
184                println!("  📦 MessagePack schema size: {} bytes", schema_msgpack.len());
185                Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
186                    .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
187            } else {
188                let schema_str = fs::read_to_string(&scenario.schema_path)
189                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
190                Arc::new(ParsedSchema::parse(&schema_str)
191                    .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
192            };
193            
194            let parse_time = start_time.elapsed();
195            println!("  Schema parsing & compilation: {:?}", parse_time);
196            
197            // Concurrent mode with ParsedSchema
198            if let Some(thread_count) = concurrent_count {
199                use std::thread;
200                
201                let eval_start = Instant::now();
202                let mut handles = vec![];
203                
204                for thread_id in 0..thread_count {
205                    let parsed_clone = parsed_schema.clone();
206                    let data_str_clone = data_str.clone();
207                    let iter_count = iterations;
208                    
209                    let handle = thread::spawn(move || {
210                        let mut thread_times = Vec::with_capacity(iter_count);
211                        let mut last_schema = Value::Null;
212                        
213                        for _ in 0..iter_count {
214                            let iter_start = Instant::now();
215                            let mut eval_instance = JSONEval::with_parsed_schema(
216                                parsed_clone.clone(),
217                                Some("{}"),
218                                Some(&data_str_clone)
219                            ).unwrap();
220                            
221                            eval_instance.evaluate(&data_str_clone, Some("{}")).unwrap();
222                            last_schema = eval_instance.get_evaluated_schema(false);
223                            thread_times.push(iter_start.elapsed());
224                        }
225                        
226                        (thread_times, last_schema, thread_id)
227                    });
228                    handles.push(handle);
229                }
230                
231                let mut all_iteration_times = Vec::new();
232                let mut evaluated_schema = Value::Null;
233                
234                for handle in handles {
235                    let (thread_times, thread_schema, thread_id) = handle.join().unwrap();
236                    println!("  Thread {} completed {} iterations", thread_id, thread_times.len());
237                    all_iteration_times.extend(thread_times);
238                    evaluated_schema = thread_schema; // Use last thread's result
239                }
240                
241                let eval_time = eval_start.elapsed();
242                
243                // Create a temp eval for metadata export
244                let temp_eval = JSONEval::with_parsed_schema(
245                    parsed_schema.clone(),
246                    Some("{}"),
247                    Some(&data_str)
248                ).unwrap();
249                
250                (parse_time, eval_time, evaluated_schema, temp_eval, all_iteration_times)
251            } else {
252                // Sequential iterations with ParsedSchema
253                let eval_start = Instant::now();
254                let mut evaluated_schema = Value::Null;
255                let mut iteration_times = Vec::with_capacity(iterations);
256                let mut eval_instance = JSONEval::with_parsed_schema(
257                    parsed_schema.clone(),
258                    Some("{}"),
259                    Some(&data_str)
260                ).unwrap();
261                
262                for iter in 0..iterations {
263                    let iter_start = Instant::now();
264                    eval_instance.evaluate(&data_str, Some("{}"))
265                        .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
266                    evaluated_schema = eval_instance.get_evaluated_schema(false);
267                    iteration_times.push(iter_start.elapsed());
268                    
269                    if iterations > 1 && (iter + 1) % 10 == 0 {
270                        print!(".");
271                        if (iter + 1) % 50 == 0 {
272                            println!(" {}/{}", iter + 1, iterations);
273                        }
274                    }
275                }
276                
277                if iterations > 1 && iterations % 50 != 0 {
278                    println!(" {}/{}", iterations, iterations);
279                }
280                
281                let eval_time = eval_start.elapsed();
282                (parse_time, eval_time, evaluated_schema, eval_instance, iteration_times)
283            }
284        } else {
285            // Traditional mode: parse and create JSONEval each time
286            let start_time = Instant::now();
287            let mut eval = if scenario.is_msgpack {
288                let schema_msgpack = fs::read(&scenario.schema_path)
289                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
290                println!("  📦 MessagePack schema size: {} bytes", schema_msgpack.len());
291                JSONEval::new_from_msgpack(&schema_msgpack, None, Some(&data_str))
292                    .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e))
293            } else {
294                let schema_str = fs::read_to_string(&scenario.schema_path)
295                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
296                JSONEval::new(&schema_str, None, Some(&data_str))
297                    .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e))
298            };
299            let parse_time = start_time.elapsed();
300            println!("  Schema parsing & compilation: {:?}", parse_time);
301            
302            let eval_start = Instant::now();
303            let mut evaluated_schema = Value::Null;
304            let mut iteration_times = Vec::with_capacity(iterations);
305            
306            for iter in 0..iterations {
307                let iter_start = Instant::now();
308                eval.evaluate(&data_str, Some("{}"))
309                    .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
310                evaluated_schema = eval.get_evaluated_schema(false);
311                iteration_times.push(iter_start.elapsed());
312                
313                if iterations > 1 && (iter + 1) % 10 == 0 {
314                    print!(".");
315                    if (iter + 1) % 50 == 0 {
316                        println!(" {}/{}", iter + 1, iterations);
317                    }
318                }
319            }
320            
321            if iterations > 1 && iterations % 50 != 0 {
322                println!(" {}/{}", iterations, iterations);
323            }
324            
325            let eval_time = eval_start.elapsed();
326            (parse_time, eval_time, evaluated_schema, eval, iteration_times)
327        };
328        
329        // Calculate statistics
330        let total_iterations = iteration_times.len();
331        if total_iterations == 1 {
332            println!("  Evaluation: {:?}", eval_time);
333        } else {
334            let avg_time = eval_time / total_iterations as u32;
335            let min_time = iteration_times.iter().min().unwrap();
336            let max_time = iteration_times.iter().max().unwrap();
337            
338            println!("  Total evaluation time: {:?}", eval_time);
339            println!("  Total iterations: {}", total_iterations);
340            println!("  Average per iteration: {:?}", avg_time);
341            println!("  Min: {:?} | Max: {:?}", min_time, max_time);
342            
343            // Show cache statistics
344            let cache_stats = eval.cache_stats();
345            println!("  Cache: {} entries, {} hits, {} misses ({:.1}% hit rate)",
346                cache_stats.entries,
347                cache_stats.hits,
348                cache_stats.misses,
349                cache_stats.hit_rate * 100.0
350            );
351        }
352
353        let total_time = parse_time + eval_time;
354        println!("⏱️  Execution time: {:?}\n", total_time);
355        
356        // Print detailed timing breakdown if --timing flag is set
357        if show_timing {
358            json_eval_rs::print_timing_summary();
359        }
360        
361        // Track statistics
362        total_parse_time += parse_time;
363        total_eval_time += eval_time;
364        successful_scenarios += 1;
365
366        let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
367        let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
368
369        fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
370            .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
371
372        let mut metadata_obj = Map::new();
373        metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
374        metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
375
376        fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
377            .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
378
379        println!("✅ Results saved:");
380        println!("  - {}", evaluated_path.display());
381        println!("  - {}\n", parsed_path.display());
382
383        // Optional comparison
384        if enable_comparison {
385            if let Some(comp_path) = &scenario.comparison_path {
386                if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
387                    comparison_failures += 1;
388                }
389                println!();
390            }
391        }
392    }
393    
394    // Print summary statistics
395    if successful_scenarios > 0 {
396        println!("\n{}", "=".repeat(50));
397        println!("📊 Summary Statistics");
398        println!("{}", "=".repeat(50));
399        println!("Total scenarios run: {}", successful_scenarios);
400        println!("Total parsing time: {:?}", total_parse_time);
401        println!("Total evaluation time: {:?}", total_eval_time);
402        println!("Total time: {:?}", total_parse_time + total_eval_time);
403        
404        if successful_scenarios > 1 {
405            println!("\nAverage per scenario:");
406            println!("  Parsing: {:?}", total_parse_time / successful_scenarios as u32);
407            println!("  Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
408        }
409        
410        if enable_comparison {
411            println!("\nComparison failures: {}", comparison_failures);
412        }
413        
414        println!("\n✅ All scenarios completed successfully!\n");
415    }
416}
Source

pub fn reload_schema( &mut self, schema: &str, context: Option<&str>, data: Option<&str>, ) -> Result<(), String>

Source

pub fn reload_schema_msgpack( &mut self, schema_msgpack: &[u8], context: Option<&str>, data: Option<&str>, ) -> Result<(), String>

Reload schema from MessagePack-encoded bytes

§Arguments
  • schema_msgpack - MessagePack-encoded schema bytes
  • context - Optional context data JSON string
  • data - Optional initial data JSON string
§Returns

A Result indicating success or an error message

Source

pub fn reload_schema_parsed( &mut self, parsed: Arc<ParsedSchema>, context: Option<&str>, data: Option<&str>, ) -> Result<(), String>

Reload schema from a cached ParsedSchema

This is the most efficient way to reload as it reuses pre-parsed schema compilation.

§Arguments
  • parsed - Arc reference to a cached ParsedSchema
  • context - Optional context data JSON string
  • data - Optional initial data JSON string
§Returns

A Result indicating success or an error message

Source

pub fn reload_schema_from_cache( &mut self, cache_key: &str, context: Option<&str>, data: Option<&str>, ) -> Result<(), String>

Reload schema from ParsedSchemaCache using a cache key

This is the recommended way for cross-platform cached schema reloading.

§Arguments
  • cache_key - Key to lookup in the global ParsedSchemaCache
  • context - Optional context data JSON string
  • data - Optional initial data JSON string
§Returns

A Result indicating success or an error message

Source

pub fn evaluate( &mut self, data: &str, context: Option<&str>, ) -> Result<(), String>

Evaluate the schema with the given data and context.

§Arguments
  • data - The data to evaluate.
  • context - The context to evaluate.
§Returns

A Result indicating success or an error message.

Examples found in repository?
examples/cache_demo.rs (line 74)
38fn demo_local_cache() -> Result<(), Box<dyn std::error::Error>> {
39    println!("📦 Example 1: Local Cache Instance");
40    println!("Creating a dedicated cache for this application...\n");
41    
42    let cache = ParsedSchemaCache::new();
43    
44    // Simple schema
45    let schema_json = r#"{
46        "$params": {
47            "rate": { "type": "number" }
48        },
49        "result": {
50            "type": "number",
51            "title": "Calculated Result",
52            "$evaluation": {
53                "logic": { "*": [{"var": "$rate"}, 100] }
54            }
55        }
56    }"#;
57    
58    // Parse and cache with a custom key
59    println!("📝 Parsing schema and caching with key 'calculation-v1'...");
60    let parsed = ParsedSchema::parse(schema_json)?;
61    cache.insert("calculation-v1".to_string(), Arc::new(parsed));
62    
63    println!("✅ Schema cached successfully");
64    println!("   Cache size: {} entries", cache.len());
65    println!("   Keys: {:?}\n", cache.keys());
66    
67    // Retrieve and use cached schema
68    println!("🔍 Retrieving cached schema...");
69    if let Some(cached_schema) = cache.get("calculation-v1") {
70        println!("✅ Retrieved from cache");
71        
72        // Create JSONEval from cached ParsedSchema
73        let mut eval = JSONEval::with_parsed_schema(cached_schema, Some(r#"{"rate": 1.5}"#), None)?;
74        eval.evaluate("{}", None)?;
75        
76        let evaluated = eval.get_evaluated_schema(false);
77        let result = evaluated.pointer("/result")
78            .and_then(|v| v.as_f64())
79            .unwrap_or(0.0);
80        println!("   Evaluation result: {}\n", result);
81    }
82    
83    // Check cache stats
84    let stats = cache.stats();
85    println!("📊 Cache Statistics: {}", stats);
86    
87    // Remove entry
88    println!("\n🗑️  Removing 'calculation-v1' from cache...");
89    cache.remove("calculation-v1");
90    println!("   Cache size after removal: {}", cache.len());
91    
92    Ok(())
93}
94
95fn demo_global_cache() -> Result<(), Box<dyn std::error::Error>> {
96    println!("🌍 Example 2: Global Cache Instance");
97    println!("Using the built-in PARSED_SCHEMA_CACHE...\n");
98    
99    let schema_json = r#"{
100        "$params": {
101            "x": { "type": "number" },
102            "y": { "type": "number" }
103        },
104        "sum": {
105            "type": "number",
106            "$evaluation": { "+": [{"var": "$x"}, {"var": "$y"}] }
107        }
108    }"#;
109    
110    // Use global cache
111    println!("📝 Caching schema globally with key 'math-operations'...");
112    let parsed = ParsedSchema::parse(schema_json)?;
113    PARSED_SCHEMA_CACHE.insert("math-operations".to_string(), Arc::new(parsed));
114    
115    println!("✅ Schema cached globally");
116    println!("   Global cache size: {}\n", PARSED_SCHEMA_CACHE.len());
117    
118    // Access from anywhere in the application
119    simulate_another_function()?;
120    
121    // Clean up
122    println!("\n🧹 Clearing global cache...");
123    PARSED_SCHEMA_CACHE.clear();
124    println!("   Global cache size: {}", PARSED_SCHEMA_CACHE.len());
125    
126    Ok(())
127}
128
129fn simulate_another_function() -> Result<(), Box<dyn std::error::Error>> {
130    println!("🔄 In another function, accessing global cache...");
131    
132    if let Some(cached) = PARSED_SCHEMA_CACHE.get("math-operations") {
133        println!("✅ Retrieved schema from global cache");
134        
135        let mut eval = JSONEval::with_parsed_schema(cached, Some(r#"{"x": 10, "y": 20}"#), None)?;
136        eval.evaluate("{}", None)?;
137        
138        let evaluated = eval.get_evaluated_schema(false);
139        let sum = evaluated.pointer("/sum")
140            .and_then(|v| v.as_f64())
141            .unwrap_or(0.0);
142        println!("   Result: {}", sum);
143    }
144    
145    Ok(())
146}
147
148fn demo_performance_comparison() -> Result<(), Box<dyn std::error::Error>> {
149    println!("⚡ Example 3: Performance Comparison");
150    println!("Comparing cached vs non-cached schema usage...\n");
151    
152    let schema_json = r#"{
153        "$params": {
154            "value": { "type": "number" }
155        },
156        "doubled": {
157            "type": "number",
158            "$evaluation": { "*": [{"var": "$value"}, 2] }
159        },
160        "tripled": {
161            "type": "number",
162            "$evaluation": { "*": [{"var": "$value"}, 3] }
163        }
164    }"#;
165    
166    let iterations = 100;
167    
168    // WITHOUT CACHE: Parse schema every time
169    println!("🐌 Without cache (parse + evaluate each time):");
170    let start = Instant::now();
171    for i in 0..iterations {
172        let context = format!(r#"{{"value": {}}}"#, i);
173        let mut eval = JSONEval::new(schema_json, Some(&context), None)?;
174        eval.evaluate("{}", None)?;
175    }
176    let without_cache = start.elapsed();
177    println!("   Time: {:?}", without_cache);
178    println!("   Avg per iteration: {:?}\n", without_cache / iterations);
179    
180    // WITH CACHE: Parse once, evaluate many times
181    println!("🚀 With cache (parse once, reuse for all evaluations):");
182    let cache = ParsedSchemaCache::new();
183    
184    // Parse once
185    let parse_start = Instant::now();
186    let parsed = ParsedSchema::parse(schema_json)?;
187    cache.insert("perf-test".to_string(), Arc::new(parsed));
188    let parse_time = parse_start.elapsed();
189    
190    // Evaluate many times
191    let eval_start = Instant::now();
192    for i in 0..iterations {
193        if let Some(cached) = cache.get("perf-test") {
194            let context = format!(r#"{{"value": {}}}"#, i);
195            let mut eval = JSONEval::with_parsed_schema(cached.clone(), Some(&context), None)?;
196            eval.evaluate("{}", None)?;
197        }
198    }
199    let eval_time = eval_start.elapsed();
200    let with_cache = parse_time + eval_time;
201    
202    println!("   Parse time: {:?}", parse_time);
203    println!("   Eval time: {:?}", eval_time);
204    println!("   Total time: {:?}", with_cache);
205    println!("   Avg per iteration: {:?}\n", eval_time / iterations);
206    
207    let speedup = without_cache.as_secs_f64() / with_cache.as_secs_f64();
208    println!("📈 Speedup: {:.2}x faster", speedup);
209    
210    Ok(())
211}
More examples
Hide additional examples
examples/cache_disable.rs (line 48)
4fn main() {
5    let schema = json!({
6        "type": "object",
7        "properties": {
8            "price": {
9                "type": "number"
10            },
11            "tax": {
12                "type": "number",
13                "value": {
14                    "$evaluation": {
15                        "*": [
16                            { "$ref": "#/properties/price" },
17                            0.1
18                        ]
19                    }
20                }
21            },
22            "total": {
23                "type": "number",
24                "value": {
25                    "$evaluation": {
26                        "+": [
27                            { "$ref": "#/properties/price" },
28                            { "$ref": "#/properties/tax" }
29                        ]
30                    }
31                }
32            }
33        }
34    });
35
36    let schema_str = serde_json::to_string(&schema).unwrap();
37    
38    println!("=== Example 1: With Caching (Default) ===");
39    {
40        let data = json!({ "price": 100 });
41        let data_str = serde_json::to_string(&data).unwrap();
42        
43        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
44        
45        println!("Cache enabled: {}", eval.is_cache_enabled());
46        println!("Initial cache size: {}", eval.cache_len());
47        
48        eval.evaluate(&data_str, None).unwrap();
49        
50        println!("After evaluation cache size: {}", eval.cache_len());
51        let stats = eval.cache_stats();
52        println!("Cache stats: {}", stats);
53    }
54    
55    println!("\n=== Example 2: Without Caching (Web API Mode) ===");
56    {
57        let data = json!({ "price": 200 });
58        let data_str = serde_json::to_string(&data).unwrap();
59        
60        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
61        
62        // Disable caching for single-use web API scenario
63        eval.disable_cache();
64        
65        println!("Cache enabled: {}", eval.is_cache_enabled());
66        println!("Initial cache size: {}", eval.cache_len());
67        
68        eval.evaluate(&data_str, None).unwrap();
69        
70        println!("After evaluation cache size: {}", eval.cache_len());
71        let stats = eval.cache_stats();
72        println!("Cache stats: {}", stats);
73        
74        println!("\n✅ No cache overhead - perfect for web APIs!");
75    }
76    
77    println!("\n=== Example 3: Re-enabling Cache ===");
78    {
79        let data = json!({ "price": 300 });
80        let data_str = serde_json::to_string(&data).unwrap();
81        
82        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
83        
84        // Disable then re-enable
85        eval.disable_cache();
86        eval.enable_cache();
87        
88        println!("Cache enabled: {}", eval.is_cache_enabled());
89        eval.evaluate(&data_str, None).unwrap();
90        
91        println!("Cache size after evaluation: {}", eval.cache_len());
92        println!("\n✅ Cache can be toggled as needed!");
93    }
94}
examples/basic.rs (line 139)
28fn main() {
29    let args: Vec<String> = std::env::args().collect();
30    let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic");
31    
32    let mut scenario_filter: Option<String> = None;
33    let mut enable_comparison = false;
34    let mut show_timing = false;
35    let mut i = 1;
36    
37    // Parse arguments
38    while i < args.len() {
39        let arg = &args[i];
40        
41        if arg == "-h" || arg == "--help" {
42            print_help(program_name);
43            return;
44        } else if arg == "--compare" {
45            enable_comparison = true;
46        } else if arg == "--timing" {
47            show_timing = true;
48        } else if !arg.starts_with('-') {
49            scenario_filter = Some(arg.clone());
50        } else {
51            eprintln!("Error: unknown option '{}'", arg);
52            print_help(program_name);
53            return;
54        }
55        
56        i += 1;
57    }
58    
59    println!("\n🚀 JSON Evaluation - Basic Example (JSON Schema)\n");
60    
61    if enable_comparison {
62        println!("🔍 Comparison: enabled");
63    }
64    if show_timing {
65        println!("⏱️  Internal timing: enabled");
66    }
67    if enable_comparison || show_timing {
68        println!();
69    }
70    
71    let samples_dir = Path::new("samples");
72    let mut scenarios = common::discover_scenarios(samples_dir);
73    
74    // Filter out MessagePack scenarios - only use JSON
75    scenarios.retain(|s| !s.is_msgpack);
76    
77    // Filter scenarios if a filter is provided
78    if let Some(ref filter) = scenario_filter {
79        scenarios.retain(|s| s.name.contains(filter));
80        println!("📋 Filtering scenarios matching: '{}'\n", filter);
81    }
82
83    if scenarios.is_empty() {
84        if let Some(filter) = scenario_filter {
85            println!(
86                "ℹ️  No scenarios found matching '{}' in `{}`.",
87                filter,
88                samples_dir.display()
89            );
90        } else {
91            println!(
92                "ℹ️  No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
93                samples_dir.display()
94            );
95        }
96        return;
97    }
98    
99    println!("📊 Found {} scenario(s)\n", scenarios.len());
100
101    let mut total_parse_time = std::time::Duration::ZERO;
102    let mut total_eval_time = std::time::Duration::ZERO;
103    let mut successful_scenarios = 0;
104    let mut comparison_failures = 0;
105
106    for scenario in &scenarios {
107        println!("==============================");
108        println!("Scenario: {}", scenario.name);
109        println!("Schema: {} ({})", 
110            scenario.schema_path.display(),
111            if scenario.is_msgpack { "MessagePack" } else { "JSON" }
112        );
113        println!("Data: {}\n", scenario.data_path.display());
114
115        // Clear timing data from previous scenarios
116        if show_timing {
117            json_eval_rs::enable_timing();
118            json_eval_rs::clear_timing_data();
119        }
120
121        let data_str = fs::read_to_string(&scenario.data_path)
122            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
123
124        // Step 1: Parse schema (JSONEval::new)
125        let parse_start = Instant::now();
126        
127        let schema_str = fs::read_to_string(&scenario.schema_path)
128            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
129        
130        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str))
131            .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e));
132        
133        let parse_time = parse_start.elapsed();
134        println!("  📝 Parse (new): {:?}", parse_time);
135        
136        // Step 2: Evaluate
137        let eval_start = Instant::now();
138        
139        eval.evaluate(&data_str, Some("{}"))
140            .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
141        
142        let evaluated_schema = eval.get_evaluated_schema(false);
143        let eval_time = eval_start.elapsed();
144        
145        println!("  ⚡ Eval: {:?}", eval_time);
146        println!("  ⏱️  Total: {:?}\n", parse_time + eval_time);
147        
148        // Print detailed timing breakdown if --timing flag is set
149        if show_timing {
150            json_eval_rs::print_timing_summary();
151        }
152        
153        total_parse_time += parse_time;
154        total_eval_time += eval_time;
155        successful_scenarios += 1;
156
157        // Save results
158        let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
159        let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
160
161        fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
162            .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
163
164        let mut metadata_obj = Map::new();
165        metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
166        metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
167        metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
168
169        fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
170            .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
171
172        println!("✅ Results saved:");
173        println!("  - {}", evaluated_path.display());
174        println!("  - {}\n", parsed_path.display());
175
176        // Optional comparison
177        if enable_comparison {
178            if let Some(comp_path) = &scenario.comparison_path {
179                if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
180                    comparison_failures += 1;
181                }
182                println!();
183            }
184        }
185    }
186    
187    // Print summary
188    println!("{}", "=".repeat(50));
189    println!("📊 Summary");
190    println!("{}", "=".repeat(50));
191    println!("Total scenarios run: {}", successful_scenarios);
192    println!("Total parse time: {:?}", total_parse_time);
193    println!("Total eval time: {:?}", total_eval_time);
194    println!("Total time: {:?}", total_parse_time + total_eval_time);
195    
196    if successful_scenarios > 1 {
197        println!("\nAverage per scenario:");
198        println!("  Parse: {:?}", total_parse_time / successful_scenarios as u32);
199        println!("  Eval: {:?}", total_eval_time / successful_scenarios as u32);
200    }
201    
202    if enable_comparison {
203        println!("Comparison failures: {}", comparison_failures);
204    }
205    
206    println!("\n✅ All scenarios completed!\n");
207}
examples/basic_msgpack.rs (line 138)
28fn main() {
29    let args: Vec<String> = std::env::args().collect();
30    let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic_msgpack");
31    
32    let mut scenario_filter: Option<String> = None;
33    let mut enable_comparison = false;
34    let mut show_timing = false;
35    let mut i = 1;
36    
37    // Parse arguments
38    while i < args.len() {
39        let arg = &args[i];
40        
41        if arg == "-h" || arg == "--help" {
42            print_help(program_name);
43            return;
44        } else if arg == "--compare" {
45            enable_comparison = true;
46        } else if arg == "--timing" {
47            show_timing = true;
48        } else if !arg.starts_with('-') {
49            scenario_filter = Some(arg.clone());
50        } else {
51            eprintln!("Error: unknown option '{}'", arg);
52            print_help(program_name);
53            return;
54        }
55        
56        i += 1;
57    }
58    
59    println!("\n🚀 JSON Evaluation - Basic Example (MessagePack Schema)\n");
60    
61    if enable_comparison {
62        println!("🔍 Comparison: enabled");
63    }
64    if show_timing {
65        println!("⏱️  Internal timing: enabled");
66    }
67    if enable_comparison || show_timing {
68        println!();
69    }
70    
71    let samples_dir = Path::new("samples");
72    let mut scenarios = common::discover_scenarios(samples_dir);
73    
74    // Filter to only MessagePack scenarios
75    scenarios.retain(|s| s.is_msgpack);
76    
77    // Filter scenarios if a filter is provided
78    if let Some(ref filter) = scenario_filter {
79        scenarios.retain(|s| s.name.contains(filter));
80        println!("📋 Filtering scenarios matching: '{}'\n", filter);
81    }
82
83    if scenarios.is_empty() {
84        if let Some(filter) = scenario_filter {
85            println!(
86                "ℹ️  No MessagePack scenarios found matching '{}' in `{}`.",
87                filter,
88                samples_dir.display()
89            );
90        } else {
91            println!(
92                "ℹ️  No MessagePack scenarios discovered in `{}`. Add files like `name.bform` and `name-data.json`.",
93                samples_dir.display()
94            );
95        }
96        return;
97    }
98    
99    println!("📊 Found {} MessagePack scenario(s)\n", scenarios.len());
100
101    let mut total_parse_time = std::time::Duration::ZERO;
102    let mut total_eval_time = std::time::Duration::ZERO;
103    let mut successful_scenarios = 0;
104    let mut comparison_failures = 0;
105
106    for scenario in &scenarios {
107        println!("==============================");
108        println!("Scenario: {}", scenario.name);
109        println!("Schema: {} (MessagePack)", scenario.schema_path.display());
110        println!("Data: {}\n", scenario.data_path.display());
111
112        // Clear timing data from previous scenarios
113        if show_timing {
114            json_eval_rs::enable_timing();
115            json_eval_rs::clear_timing_data();
116        }
117
118        let data_str = fs::read_to_string(&scenario.data_path)
119            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
120
121        // Step 1: Parse schema (new_from_msgpack)
122        let parse_start = Instant::now();
123        
124        let schema_msgpack = fs::read(&scenario.schema_path)
125            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
126        
127        println!("  📦 MessagePack schema size: {} bytes", schema_msgpack.len());
128        
129        let mut eval = JSONEval::new_from_msgpack(&schema_msgpack, None, Some(&data_str))
130            .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e));
131        
132        let parse_time = parse_start.elapsed();
133        println!("  📝 Parse (msgpack): {:?}", parse_time);
134        
135        // Step 2: Evaluate
136        let eval_start = Instant::now();
137        
138        eval.evaluate(&data_str, Some("{}"))
139            .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
140        
141        let evaluated_schema = eval.get_evaluated_schema(false);
142        let eval_time = eval_start.elapsed();
143        
144        println!("  ⚡ Eval: {:?}", eval_time);
145        println!("  ⏱️  Total: {:?}\n", parse_time + eval_time);
146        
147        // Print detailed timing breakdown if --timing flag is set
148        if show_timing {
149            json_eval_rs::print_timing_summary();
150        }
151        
152        total_parse_time += parse_time;
153        total_eval_time += eval_time;
154        successful_scenarios += 1;
155
156        // Save results
157        let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
158        let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
159
160        fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
161            .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
162
163        let mut metadata_obj = Map::new();
164        metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
165        metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
166        metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
167
168        fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
169            .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
170
171        println!("✅ Results saved:");
172        println!("  - {}", evaluated_path.display());
173        println!("  - {}\n", parsed_path.display());
174
175        // Optional comparison
176        if enable_comparison {
177            if let Some(comp_path) = &scenario.comparison_path {
178                if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
179                    comparison_failures += 1;
180                }
181                println!();
182            }
183        }
184    }
185    
186    // Print summary
187    println!("{}", "=".repeat(50));
188    println!("📊 Summary");
189    println!("{}", "=".repeat(50));
190    println!("Total scenarios run: {}", successful_scenarios);
191    println!("Total parse time: {:?}", total_parse_time);
192    println!("Total eval time: {:?}", total_eval_time);
193    println!("Total time: {:?}", total_parse_time + total_eval_time);
194    
195    if successful_scenarios > 1 {
196        println!("\nAverage per scenario:");
197        println!("  Parse: {:?}", total_parse_time / successful_scenarios as u32);
198        println!("  Eval: {:?}", total_eval_time / successful_scenarios as u32);
199    }
200    
201    if enable_comparison {
202        println!("Comparison failures: {}", comparison_failures);
203    }
204    
205    println!("\n✅ All scenarios completed!\n");
206}
examples/basic_parsed.rs (line 149)
30fn main() {
31    let args: Vec<String> = std::env::args().collect();
32    let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic_parsed");
33    
34    let mut scenario_filter: Option<String> = None;
35    let mut enable_comparison = false;
36    let mut show_timing = false;
37    let mut i = 1;
38    
39    // Parse arguments
40    while i < args.len() {
41        let arg = &args[i];
42        
43        if arg == "-h" || arg == "--help" {
44            print_help(program_name);
45            return;
46        } else if arg == "--compare" {
47            enable_comparison = true;
48        } else if arg == "--timing" {
49            show_timing = true;
50        } else if !arg.starts_with('-') {
51            scenario_filter = Some(arg.clone());
52        } else {
53            eprintln!("Error: unknown option '{}'", arg);
54            print_help(program_name);
55            return;
56        }
57        
58        i += 1;
59    }
60    
61    println!("\n🚀 JSON Evaluation - Basic Example (ParsedSchema)\n");
62    println!("📦 Using Arc<ParsedSchema> for efficient caching\n");
63    
64    if enable_comparison {
65        println!("🔍 Comparison: enabled");
66    }
67    if show_timing {
68        println!("⏱️  Internal timing: enabled");
69    }
70    if enable_comparison || show_timing {
71        println!();
72    }
73    
74    let samples_dir = Path::new("samples");
75    let mut scenarios = common::discover_scenarios(samples_dir);
76    
77    // Filter scenarios if a filter is provided
78    if let Some(ref filter) = scenario_filter {
79        scenarios.retain(|s| s.name.contains(filter));
80        println!("📋 Filtering scenarios matching: '{}'\n", filter);
81    }
82
83    if scenarios.is_empty() {
84        if let Some(filter) = scenario_filter {
85            println!(
86                "ℹ️  No scenarios found matching '{}' in `{}`.",
87                filter,
88                samples_dir.display()
89            );
90        } else {
91            println!(
92                "ℹ️  No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
93                samples_dir.display()
94            );
95        }
96        return;
97    }
98    
99    println!("📊 Found {} scenario(s)\n", scenarios.len());
100
101    let mut total_parse_time = std::time::Duration::ZERO;
102    let mut total_eval_time = std::time::Duration::ZERO;
103    let mut successful_scenarios = 0;
104    let mut comparison_failures = 0;
105
106    for scenario in &scenarios {
107        println!("==============================");
108        println!("Scenario: {}", scenario.name);
109        println!("Schema: {} ({})", 
110            scenario.schema_path.display(),
111            if scenario.is_msgpack { "MessagePack" } else { "JSON" }
112        );
113        println!("Data: {}\n", scenario.data_path.display());
114
115        // Clear timing data from previous scenarios
116        if show_timing {
117            json_eval_rs::enable_timing();
118            json_eval_rs::clear_timing_data();
119        }
120
121        let data_str = fs::read_to_string(&scenario.data_path)
122            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
123
124        // Step 1: Parse schema once
125        let parse_start = Instant::now();
126        let parsed_schema = if scenario.is_msgpack {
127            let schema_msgpack = fs::read(&scenario.schema_path)
128                .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
129            println!("  📦 MessagePack schema size: {} bytes", schema_msgpack.len());
130            Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
131                .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
132        } else {
133            let schema_str = fs::read_to_string(&scenario.schema_path)
134                .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
135            Arc::new(ParsedSchema::parse(&schema_str)
136                .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
137        };
138        let parse_time = parse_start.elapsed();
139        println!("  📝 Schema parsing: {:?}", parse_time);
140        
141        // Step 2: Create JSONEval from ParsedSchema (reuses compiled logic)
142        let eval_start = Instant::now();
143        let mut eval = JSONEval::with_parsed_schema(
144            parsed_schema.clone(),  // Arc::clone is cheap!
145            Some("{}"),
146            Some(&data_str)
147        ).unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e));
148
149        eval.evaluate(&data_str, Some("{}"))
150            .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
151        
152        let evaluated_schema = eval.get_evaluated_schema(false);
153        let eval_time = eval_start.elapsed();
154        
155        println!("  ⚡ Eval: {:?}", eval_time);
156        println!("  ⏱️  Total: {:?}\n", parse_time + eval_time);
157        
158        // Print detailed timing breakdown if --timing flag is set
159        if show_timing {
160            json_eval_rs::print_timing_summary();
161        }
162        
163        total_parse_time += parse_time;
164        total_eval_time += eval_time;
165        successful_scenarios += 1;
166
167        // Save results
168        let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
169        let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
170
171        fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
172            .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
173
174        let mut metadata_obj = Map::new();
175        metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
176        metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
177        metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
178
179        fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
180            .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
181
182        println!("✅ Results saved:");
183        println!("  - {}", evaluated_path.display());
184        println!("  - {}\n", parsed_path.display());
185
186        // Optional comparison
187        if enable_comparison {
188            if let Some(comp_path) = &scenario.comparison_path {
189                if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
190                    comparison_failures += 1;
191                }
192                println!();
193            }
194        }
195    }
196    
197    // Print summary
198    println!("{}", "=".repeat(50));
199    println!("📊 Summary");
200    println!("{}", "=".repeat(50));
201    println!("Total scenarios run: {}", successful_scenarios);
202    println!("Total parsing time: {:?}", total_parse_time);
203    println!("Total evaluation time: {:?}", total_eval_time);
204    println!("Total time: {:?}", total_parse_time + total_eval_time);
205    
206    if successful_scenarios > 1 {
207        println!("\nAverage per scenario:");
208        println!("  Parsing: {:?}", total_parse_time / successful_scenarios as u32);
209        println!("  Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
210    }
211    
212    if enable_comparison {
213        println!("\nComparison failures: {}", comparison_failures);
214    }
215    
216    println!("\n✅ All scenarios completed!\n");
217}
examples/benchmark.rs (line 221)
31fn main() {
32    let args: Vec<String> = std::env::args().collect();
33    let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("benchmark");
34    
35    let mut iterations = 1usize;
36    let mut scenario_filter: Option<String> = None;
37    let mut show_cpu_info = false;
38    let mut use_parsed_schema = false;
39    let mut concurrent_count: Option<usize> = None;
40    let mut enable_comparison = false;
41    let mut show_timing = false;
42    let mut i = 1;
43    
44    // Parse arguments
45    while i < args.len() {
46        let arg = &args[i];
47        
48        if arg == "-h" || arg == "--help" {
49            print_help(program_name);
50            return;
51        } else if arg == "--cpu-info" {
52            show_cpu_info = true;
53        } else if arg == "--parsed" {
54            use_parsed_schema = true;
55        } else if arg == "--compare" {
56            enable_comparison = true;
57        } else if arg == "--timing" {
58            show_timing = true;
59        } else if arg == "--concurrent" {
60            if i + 1 >= args.len() {
61                eprintln!("Error: {} requires a value", arg);
62                print_help(program_name);
63                return;
64            }
65            i += 1;
66            match args[i].parse::<usize>() {
67                Ok(n) if n > 0 => concurrent_count = Some(n),
68                _ => {
69                    eprintln!("Error: concurrent count must be a positive integer, got '{}'", args[i]);
70                    return;
71                }
72            }
73        } else if arg == "-i" || arg == "--iterations" {
74            if i + 1 >= args.len() {
75                eprintln!("Error: {} requires a value", arg);
76                print_help(program_name);
77                return;
78            }
79            i += 1;
80            match args[i].parse::<usize>() {
81                Ok(n) if n > 0 => iterations = n,
82                _ => {
83                    eprintln!("Error: iterations must be a positive integer, got '{}'", args[i]);
84                    return;
85                }
86            }
87        } else if !arg.starts_with('-') {
88            scenario_filter = Some(arg.clone());
89        } else {
90            eprintln!("Error: unknown option '{}'", arg);
91            print_help(program_name);
92            return;
93        }
94        
95        i += 1;
96    }
97    
98    println!("\n🚀 JSON Evaluation - Benchmark\n");
99    
100    // Show CPU info if requested or if running benchmarks
101    if show_cpu_info || iterations > 1 || concurrent_count.is_some() {
102        common::print_cpu_info();
103    }
104    
105    if use_parsed_schema {
106        println!("📦 Mode: ParsedSchema (parse once, reuse for all iterations)\n");
107    }
108    
109    if let Some(count) = concurrent_count {
110        println!("🔀 Concurrent evaluations: {} threads\n", count);
111    } else if iterations > 1 {
112        println!("🔄 Iterations per scenario: {}\n", iterations);
113    }
114    
115    if enable_comparison {
116        println!("🔍 Comparison: enabled");
117    }
118    if show_timing {
119        println!("⏱️  Internal timing: enabled");
120    }
121    if enable_comparison || show_timing {
122        println!();
123    }
124
125    let samples_dir = Path::new("samples");
126    let mut scenarios = common::discover_scenarios(samples_dir);
127    
128    // Filter scenarios if a filter is provided
129    if let Some(ref filter) = scenario_filter {
130        scenarios.retain(|s| s.name.contains(filter));
131        println!("📋 Filtering scenarios matching: '{}'\n", filter);
132    }
133
134    if scenarios.is_empty() {
135        if let Some(filter) = scenario_filter {
136            println!(
137                "ℹ️  No scenarios found matching '{}' in `{}`.",
138                filter,
139                samples_dir.display()
140            );
141        } else {
142            println!(
143                "ℹ️  No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
144                samples_dir.display()
145            );
146        }
147        return;
148    }
149    
150    println!("📊 Found {} scenario(s)\n", scenarios.len());
151
152    let mut total_parse_time = std::time::Duration::ZERO;
153    let mut total_eval_time = std::time::Duration::ZERO;
154    let mut successful_scenarios = 0;
155    let mut comparison_failures = 0;
156
157    for scenario in &scenarios {
158        println!("==============================");
159        println!("Scenario: {}", scenario.name);
160        println!("Schema: {} ({})", 
161            scenario.schema_path.display(),
162            if scenario.is_msgpack { "MessagePack" } else { "JSON" }
163        );
164        println!("Data: {}\n", scenario.data_path.display());
165
166        // Clear timing data from previous scenarios
167        if show_timing {
168            json_eval_rs::enable_timing();
169            json_eval_rs::clear_timing_data();
170        }
171
172        let data_str = fs::read_to_string(&scenario.data_path)
173            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
174
175        println!("Running evaluation...\n");
176
177        let (parse_time, eval_time, evaluated_schema, eval, iteration_times) = if use_parsed_schema {
178            // ParsedSchema mode: parse once, reuse for all iterations/threads
179            let start_time = Instant::now();
180            
181            let parsed_schema = if scenario.is_msgpack {
182                let schema_msgpack = fs::read(&scenario.schema_path)
183                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
184                println!("  📦 MessagePack schema size: {} bytes", schema_msgpack.len());
185                Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
186                    .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
187            } else {
188                let schema_str = fs::read_to_string(&scenario.schema_path)
189                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
190                Arc::new(ParsedSchema::parse(&schema_str)
191                    .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
192            };
193            
194            let parse_time = start_time.elapsed();
195            println!("  Schema parsing & compilation: {:?}", parse_time);
196            
197            // Concurrent mode with ParsedSchema
198            if let Some(thread_count) = concurrent_count {
199                use std::thread;
200                
201                let eval_start = Instant::now();
202                let mut handles = vec![];
203                
204                for thread_id in 0..thread_count {
205                    let parsed_clone = parsed_schema.clone();
206                    let data_str_clone = data_str.clone();
207                    let iter_count = iterations;
208                    
209                    let handle = thread::spawn(move || {
210                        let mut thread_times = Vec::with_capacity(iter_count);
211                        let mut last_schema = Value::Null;
212                        
213                        for _ in 0..iter_count {
214                            let iter_start = Instant::now();
215                            let mut eval_instance = JSONEval::with_parsed_schema(
216                                parsed_clone.clone(),
217                                Some("{}"),
218                                Some(&data_str_clone)
219                            ).unwrap();
220                            
221                            eval_instance.evaluate(&data_str_clone, Some("{}")).unwrap();
222                            last_schema = eval_instance.get_evaluated_schema(false);
223                            thread_times.push(iter_start.elapsed());
224                        }
225                        
226                        (thread_times, last_schema, thread_id)
227                    });
228                    handles.push(handle);
229                }
230                
231                let mut all_iteration_times = Vec::new();
232                let mut evaluated_schema = Value::Null;
233                
234                for handle in handles {
235                    let (thread_times, thread_schema, thread_id) = handle.join().unwrap();
236                    println!("  Thread {} completed {} iterations", thread_id, thread_times.len());
237                    all_iteration_times.extend(thread_times);
238                    evaluated_schema = thread_schema; // Use last thread's result
239                }
240                
241                let eval_time = eval_start.elapsed();
242                
243                // Create a temp eval for metadata export
244                let temp_eval = JSONEval::with_parsed_schema(
245                    parsed_schema.clone(),
246                    Some("{}"),
247                    Some(&data_str)
248                ).unwrap();
249                
250                (parse_time, eval_time, evaluated_schema, temp_eval, all_iteration_times)
251            } else {
252                // Sequential iterations with ParsedSchema
253                let eval_start = Instant::now();
254                let mut evaluated_schema = Value::Null;
255                let mut iteration_times = Vec::with_capacity(iterations);
256                let mut eval_instance = JSONEval::with_parsed_schema(
257                    parsed_schema.clone(),
258                    Some("{}"),
259                    Some(&data_str)
260                ).unwrap();
261                
262                for iter in 0..iterations {
263                    let iter_start = Instant::now();
264                    eval_instance.evaluate(&data_str, Some("{}"))
265                        .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
266                    evaluated_schema = eval_instance.get_evaluated_schema(false);
267                    iteration_times.push(iter_start.elapsed());
268                    
269                    if iterations > 1 && (iter + 1) % 10 == 0 {
270                        print!(".");
271                        if (iter + 1) % 50 == 0 {
272                            println!(" {}/{}", iter + 1, iterations);
273                        }
274                    }
275                }
276                
277                if iterations > 1 && iterations % 50 != 0 {
278                    println!(" {}/{}", iterations, iterations);
279                }
280                
281                let eval_time = eval_start.elapsed();
282                (parse_time, eval_time, evaluated_schema, eval_instance, iteration_times)
283            }
284        } else {
285            // Traditional mode: parse and create JSONEval each time
286            let start_time = Instant::now();
287            let mut eval = if scenario.is_msgpack {
288                let schema_msgpack = fs::read(&scenario.schema_path)
289                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
290                println!("  📦 MessagePack schema size: {} bytes", schema_msgpack.len());
291                JSONEval::new_from_msgpack(&schema_msgpack, None, Some(&data_str))
292                    .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e))
293            } else {
294                let schema_str = fs::read_to_string(&scenario.schema_path)
295                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
296                JSONEval::new(&schema_str, None, Some(&data_str))
297                    .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e))
298            };
299            let parse_time = start_time.elapsed();
300            println!("  Schema parsing & compilation: {:?}", parse_time);
301            
302            let eval_start = Instant::now();
303            let mut evaluated_schema = Value::Null;
304            let mut iteration_times = Vec::with_capacity(iterations);
305            
306            for iter in 0..iterations {
307                let iter_start = Instant::now();
308                eval.evaluate(&data_str, Some("{}"))
309                    .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
310                evaluated_schema = eval.get_evaluated_schema(false);
311                iteration_times.push(iter_start.elapsed());
312                
313                if iterations > 1 && (iter + 1) % 10 == 0 {
314                    print!(".");
315                    if (iter + 1) % 50 == 0 {
316                        println!(" {}/{}", iter + 1, iterations);
317                    }
318                }
319            }
320            
321            if iterations > 1 && iterations % 50 != 0 {
322                println!(" {}/{}", iterations, iterations);
323            }
324            
325            let eval_time = eval_start.elapsed();
326            (parse_time, eval_time, evaluated_schema, eval, iteration_times)
327        };
328        
329        // Calculate statistics
330        let total_iterations = iteration_times.len();
331        if total_iterations == 1 {
332            println!("  Evaluation: {:?}", eval_time);
333        } else {
334            let avg_time = eval_time / total_iterations as u32;
335            let min_time = iteration_times.iter().min().unwrap();
336            let max_time = iteration_times.iter().max().unwrap();
337            
338            println!("  Total evaluation time: {:?}", eval_time);
339            println!("  Total iterations: {}", total_iterations);
340            println!("  Average per iteration: {:?}", avg_time);
341            println!("  Min: {:?} | Max: {:?}", min_time, max_time);
342            
343            // Show cache statistics
344            let cache_stats = eval.cache_stats();
345            println!("  Cache: {} entries, {} hits, {} misses ({:.1}% hit rate)",
346                cache_stats.entries,
347                cache_stats.hits,
348                cache_stats.misses,
349                cache_stats.hit_rate * 100.0
350            );
351        }
352
353        let total_time = parse_time + eval_time;
354        println!("⏱️  Execution time: {:?}\n", total_time);
355        
356        // Print detailed timing breakdown if --timing flag is set
357        if show_timing {
358            json_eval_rs::print_timing_summary();
359        }
360        
361        // Track statistics
362        total_parse_time += parse_time;
363        total_eval_time += eval_time;
364        successful_scenarios += 1;
365
366        let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
367        let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
368
369        fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
370            .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
371
372        let mut metadata_obj = Map::new();
373        metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
374        metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
375
376        fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
377            .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
378
379        println!("✅ Results saved:");
380        println!("  - {}", evaluated_path.display());
381        println!("  - {}\n", parsed_path.display());
382
383        // Optional comparison
384        if enable_comparison {
385            if let Some(comp_path) = &scenario.comparison_path {
386                if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
387                    comparison_failures += 1;
388                }
389                println!();
390            }
391        }
392    }
393    
394    // Print summary statistics
395    if successful_scenarios > 0 {
396        println!("\n{}", "=".repeat(50));
397        println!("📊 Summary Statistics");
398        println!("{}", "=".repeat(50));
399        println!("Total scenarios run: {}", successful_scenarios);
400        println!("Total parsing time: {:?}", total_parse_time);
401        println!("Total evaluation time: {:?}", total_eval_time);
402        println!("Total time: {:?}", total_parse_time + total_eval_time);
403        
404        if successful_scenarios > 1 {
405            println!("\nAverage per scenario:");
406            println!("  Parsing: {:?}", total_parse_time / successful_scenarios as u32);
407            println!("  Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
408        }
409        
410        if enable_comparison {
411            println!("\nComparison failures: {}", comparison_failures);
412        }
413        
414        println!("\n✅ All scenarios completed successfully!\n");
415    }
416}
Source

pub fn get_evaluated_schema(&mut self, skip_layout: bool) -> Value

Get the evaluated schema with optional layout resolution.

§Arguments
  • skip_layout - Whether to skip layout resolution.
§Returns

The evaluated schema as a JSON value.

Examples found in repository?
examples/cache_demo.rs (line 76)
38fn demo_local_cache() -> Result<(), Box<dyn std::error::Error>> {
39    println!("📦 Example 1: Local Cache Instance");
40    println!("Creating a dedicated cache for this application...\n");
41    
42    let cache = ParsedSchemaCache::new();
43    
44    // Simple schema
45    let schema_json = r#"{
46        "$params": {
47            "rate": { "type": "number" }
48        },
49        "result": {
50            "type": "number",
51            "title": "Calculated Result",
52            "$evaluation": {
53                "logic": { "*": [{"var": "$rate"}, 100] }
54            }
55        }
56    }"#;
57    
58    // Parse and cache with a custom key
59    println!("📝 Parsing schema and caching with key 'calculation-v1'...");
60    let parsed = ParsedSchema::parse(schema_json)?;
61    cache.insert("calculation-v1".to_string(), Arc::new(parsed));
62    
63    println!("✅ Schema cached successfully");
64    println!("   Cache size: {} entries", cache.len());
65    println!("   Keys: {:?}\n", cache.keys());
66    
67    // Retrieve and use cached schema
68    println!("🔍 Retrieving cached schema...");
69    if let Some(cached_schema) = cache.get("calculation-v1") {
70        println!("✅ Retrieved from cache");
71        
72        // Create JSONEval from cached ParsedSchema
73        let mut eval = JSONEval::with_parsed_schema(cached_schema, Some(r#"{"rate": 1.5}"#), None)?;
74        eval.evaluate("{}", None)?;
75        
76        let evaluated = eval.get_evaluated_schema(false);
77        let result = evaluated.pointer("/result")
78            .and_then(|v| v.as_f64())
79            .unwrap_or(0.0);
80        println!("   Evaluation result: {}\n", result);
81    }
82    
83    // Check cache stats
84    let stats = cache.stats();
85    println!("📊 Cache Statistics: {}", stats);
86    
87    // Remove entry
88    println!("\n🗑️  Removing 'calculation-v1' from cache...");
89    cache.remove("calculation-v1");
90    println!("   Cache size after removal: {}", cache.len());
91    
92    Ok(())
93}
94
95fn demo_global_cache() -> Result<(), Box<dyn std::error::Error>> {
96    println!("🌍 Example 2: Global Cache Instance");
97    println!("Using the built-in PARSED_SCHEMA_CACHE...\n");
98    
99    let schema_json = r#"{
100        "$params": {
101            "x": { "type": "number" },
102            "y": { "type": "number" }
103        },
104        "sum": {
105            "type": "number",
106            "$evaluation": { "+": [{"var": "$x"}, {"var": "$y"}] }
107        }
108    }"#;
109    
110    // Use global cache
111    println!("📝 Caching schema globally with key 'math-operations'...");
112    let parsed = ParsedSchema::parse(schema_json)?;
113    PARSED_SCHEMA_CACHE.insert("math-operations".to_string(), Arc::new(parsed));
114    
115    println!("✅ Schema cached globally");
116    println!("   Global cache size: {}\n", PARSED_SCHEMA_CACHE.len());
117    
118    // Access from anywhere in the application
119    simulate_another_function()?;
120    
121    // Clean up
122    println!("\n🧹 Clearing global cache...");
123    PARSED_SCHEMA_CACHE.clear();
124    println!("   Global cache size: {}", PARSED_SCHEMA_CACHE.len());
125    
126    Ok(())
127}
128
129fn simulate_another_function() -> Result<(), Box<dyn std::error::Error>> {
130    println!("🔄 In another function, accessing global cache...");
131    
132    if let Some(cached) = PARSED_SCHEMA_CACHE.get("math-operations") {
133        println!("✅ Retrieved schema from global cache");
134        
135        let mut eval = JSONEval::with_parsed_schema(cached, Some(r#"{"x": 10, "y": 20}"#), None)?;
136        eval.evaluate("{}", None)?;
137        
138        let evaluated = eval.get_evaluated_schema(false);
139        let sum = evaluated.pointer("/sum")
140            .and_then(|v| v.as_f64())
141            .unwrap_or(0.0);
142        println!("   Result: {}", sum);
143    }
144    
145    Ok(())
146}
More examples
Hide additional examples
examples/basic.rs (line 142)
28fn main() {
29    let args: Vec<String> = std::env::args().collect();
30    let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic");
31    
32    let mut scenario_filter: Option<String> = None;
33    let mut enable_comparison = false;
34    let mut show_timing = false;
35    let mut i = 1;
36    
37    // Parse arguments
38    while i < args.len() {
39        let arg = &args[i];
40        
41        if arg == "-h" || arg == "--help" {
42            print_help(program_name);
43            return;
44        } else if arg == "--compare" {
45            enable_comparison = true;
46        } else if arg == "--timing" {
47            show_timing = true;
48        } else if !arg.starts_with('-') {
49            scenario_filter = Some(arg.clone());
50        } else {
51            eprintln!("Error: unknown option '{}'", arg);
52            print_help(program_name);
53            return;
54        }
55        
56        i += 1;
57    }
58    
59    println!("\n🚀 JSON Evaluation - Basic Example (JSON Schema)\n");
60    
61    if enable_comparison {
62        println!("🔍 Comparison: enabled");
63    }
64    if show_timing {
65        println!("⏱️  Internal timing: enabled");
66    }
67    if enable_comparison || show_timing {
68        println!();
69    }
70    
71    let samples_dir = Path::new("samples");
72    let mut scenarios = common::discover_scenarios(samples_dir);
73    
74    // Filter out MessagePack scenarios - only use JSON
75    scenarios.retain(|s| !s.is_msgpack);
76    
77    // Filter scenarios if a filter is provided
78    if let Some(ref filter) = scenario_filter {
79        scenarios.retain(|s| s.name.contains(filter));
80        println!("📋 Filtering scenarios matching: '{}'\n", filter);
81    }
82
83    if scenarios.is_empty() {
84        if let Some(filter) = scenario_filter {
85            println!(
86                "ℹ️  No scenarios found matching '{}' in `{}`.",
87                filter,
88                samples_dir.display()
89            );
90        } else {
91            println!(
92                "ℹ️  No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
93                samples_dir.display()
94            );
95        }
96        return;
97    }
98    
99    println!("📊 Found {} scenario(s)\n", scenarios.len());
100
101    let mut total_parse_time = std::time::Duration::ZERO;
102    let mut total_eval_time = std::time::Duration::ZERO;
103    let mut successful_scenarios = 0;
104    let mut comparison_failures = 0;
105
106    for scenario in &scenarios {
107        println!("==============================");
108        println!("Scenario: {}", scenario.name);
109        println!("Schema: {} ({})", 
110            scenario.schema_path.display(),
111            if scenario.is_msgpack { "MessagePack" } else { "JSON" }
112        );
113        println!("Data: {}\n", scenario.data_path.display());
114
115        // Clear timing data from previous scenarios
116        if show_timing {
117            json_eval_rs::enable_timing();
118            json_eval_rs::clear_timing_data();
119        }
120
121        let data_str = fs::read_to_string(&scenario.data_path)
122            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
123
124        // Step 1: Parse schema (JSONEval::new)
125        let parse_start = Instant::now();
126        
127        let schema_str = fs::read_to_string(&scenario.schema_path)
128            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
129        
130        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str))
131            .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e));
132        
133        let parse_time = parse_start.elapsed();
134        println!("  📝 Parse (new): {:?}", parse_time);
135        
136        // Step 2: Evaluate
137        let eval_start = Instant::now();
138        
139        eval.evaluate(&data_str, Some("{}"))
140            .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
141        
142        let evaluated_schema = eval.get_evaluated_schema(false);
143        let eval_time = eval_start.elapsed();
144        
145        println!("  ⚡ Eval: {:?}", eval_time);
146        println!("  ⏱️  Total: {:?}\n", parse_time + eval_time);
147        
148        // Print detailed timing breakdown if --timing flag is set
149        if show_timing {
150            json_eval_rs::print_timing_summary();
151        }
152        
153        total_parse_time += parse_time;
154        total_eval_time += eval_time;
155        successful_scenarios += 1;
156
157        // Save results
158        let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
159        let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
160
161        fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
162            .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
163
164        let mut metadata_obj = Map::new();
165        metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
166        metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
167        metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
168
169        fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
170            .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
171
172        println!("✅ Results saved:");
173        println!("  - {}", evaluated_path.display());
174        println!("  - {}\n", parsed_path.display());
175
176        // Optional comparison
177        if enable_comparison {
178            if let Some(comp_path) = &scenario.comparison_path {
179                if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
180                    comparison_failures += 1;
181                }
182                println!();
183            }
184        }
185    }
186    
187    // Print summary
188    println!("{}", "=".repeat(50));
189    println!("📊 Summary");
190    println!("{}", "=".repeat(50));
191    println!("Total scenarios run: {}", successful_scenarios);
192    println!("Total parse time: {:?}", total_parse_time);
193    println!("Total eval time: {:?}", total_eval_time);
194    println!("Total time: {:?}", total_parse_time + total_eval_time);
195    
196    if successful_scenarios > 1 {
197        println!("\nAverage per scenario:");
198        println!("  Parse: {:?}", total_parse_time / successful_scenarios as u32);
199        println!("  Eval: {:?}", total_eval_time / successful_scenarios as u32);
200    }
201    
202    if enable_comparison {
203        println!("Comparison failures: {}", comparison_failures);
204    }
205    
206    println!("\n✅ All scenarios completed!\n");
207}
examples/basic_msgpack.rs (line 141)
28fn main() {
29    let args: Vec<String> = std::env::args().collect();
30    let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic_msgpack");
31    
32    let mut scenario_filter: Option<String> = None;
33    let mut enable_comparison = false;
34    let mut show_timing = false;
35    let mut i = 1;
36    
37    // Parse arguments
38    while i < args.len() {
39        let arg = &args[i];
40        
41        if arg == "-h" || arg == "--help" {
42            print_help(program_name);
43            return;
44        } else if arg == "--compare" {
45            enable_comparison = true;
46        } else if arg == "--timing" {
47            show_timing = true;
48        } else if !arg.starts_with('-') {
49            scenario_filter = Some(arg.clone());
50        } else {
51            eprintln!("Error: unknown option '{}'", arg);
52            print_help(program_name);
53            return;
54        }
55        
56        i += 1;
57    }
58    
59    println!("\n🚀 JSON Evaluation - Basic Example (MessagePack Schema)\n");
60    
61    if enable_comparison {
62        println!("🔍 Comparison: enabled");
63    }
64    if show_timing {
65        println!("⏱️  Internal timing: enabled");
66    }
67    if enable_comparison || show_timing {
68        println!();
69    }
70    
71    let samples_dir = Path::new("samples");
72    let mut scenarios = common::discover_scenarios(samples_dir);
73    
74    // Filter to only MessagePack scenarios
75    scenarios.retain(|s| s.is_msgpack);
76    
77    // Filter scenarios if a filter is provided
78    if let Some(ref filter) = scenario_filter {
79        scenarios.retain(|s| s.name.contains(filter));
80        println!("📋 Filtering scenarios matching: '{}'\n", filter);
81    }
82
83    if scenarios.is_empty() {
84        if let Some(filter) = scenario_filter {
85            println!(
86                "ℹ️  No MessagePack scenarios found matching '{}' in `{}`.",
87                filter,
88                samples_dir.display()
89            );
90        } else {
91            println!(
92                "ℹ️  No MessagePack scenarios discovered in `{}`. Add files like `name.bform` and `name-data.json`.",
93                samples_dir.display()
94            );
95        }
96        return;
97    }
98    
99    println!("📊 Found {} MessagePack scenario(s)\n", scenarios.len());
100
101    let mut total_parse_time = std::time::Duration::ZERO;
102    let mut total_eval_time = std::time::Duration::ZERO;
103    let mut successful_scenarios = 0;
104    let mut comparison_failures = 0;
105
106    for scenario in &scenarios {
107        println!("==============================");
108        println!("Scenario: {}", scenario.name);
109        println!("Schema: {} (MessagePack)", scenario.schema_path.display());
110        println!("Data: {}\n", scenario.data_path.display());
111
112        // Clear timing data from previous scenarios
113        if show_timing {
114            json_eval_rs::enable_timing();
115            json_eval_rs::clear_timing_data();
116        }
117
118        let data_str = fs::read_to_string(&scenario.data_path)
119            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
120
121        // Step 1: Parse schema (new_from_msgpack)
122        let parse_start = Instant::now();
123        
124        let schema_msgpack = fs::read(&scenario.schema_path)
125            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
126        
127        println!("  📦 MessagePack schema size: {} bytes", schema_msgpack.len());
128        
129        let mut eval = JSONEval::new_from_msgpack(&schema_msgpack, None, Some(&data_str))
130            .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e));
131        
132        let parse_time = parse_start.elapsed();
133        println!("  📝 Parse (msgpack): {:?}", parse_time);
134        
135        // Step 2: Evaluate
136        let eval_start = Instant::now();
137        
138        eval.evaluate(&data_str, Some("{}"))
139            .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
140        
141        let evaluated_schema = eval.get_evaluated_schema(false);
142        let eval_time = eval_start.elapsed();
143        
144        println!("  ⚡ Eval: {:?}", eval_time);
145        println!("  ⏱️  Total: {:?}\n", parse_time + eval_time);
146        
147        // Print detailed timing breakdown if --timing flag is set
148        if show_timing {
149            json_eval_rs::print_timing_summary();
150        }
151        
152        total_parse_time += parse_time;
153        total_eval_time += eval_time;
154        successful_scenarios += 1;
155
156        // Save results
157        let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
158        let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
159
160        fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
161            .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
162
163        let mut metadata_obj = Map::new();
164        metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
165        metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
166        metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
167
168        fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
169            .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
170
171        println!("✅ Results saved:");
172        println!("  - {}", evaluated_path.display());
173        println!("  - {}\n", parsed_path.display());
174
175        // Optional comparison
176        if enable_comparison {
177            if let Some(comp_path) = &scenario.comparison_path {
178                if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
179                    comparison_failures += 1;
180                }
181                println!();
182            }
183        }
184    }
185    
186    // Print summary
187    println!("{}", "=".repeat(50));
188    println!("📊 Summary");
189    println!("{}", "=".repeat(50));
190    println!("Total scenarios run: {}", successful_scenarios);
191    println!("Total parse time: {:?}", total_parse_time);
192    println!("Total eval time: {:?}", total_eval_time);
193    println!("Total time: {:?}", total_parse_time + total_eval_time);
194    
195    if successful_scenarios > 1 {
196        println!("\nAverage per scenario:");
197        println!("  Parse: {:?}", total_parse_time / successful_scenarios as u32);
198        println!("  Eval: {:?}", total_eval_time / successful_scenarios as u32);
199    }
200    
201    if enable_comparison {
202        println!("Comparison failures: {}", comparison_failures);
203    }
204    
205    println!("\n✅ All scenarios completed!\n");
206}
examples/basic_parsed.rs (line 152)
30fn main() {
31    let args: Vec<String> = std::env::args().collect();
32    let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("basic_parsed");
33    
34    let mut scenario_filter: Option<String> = None;
35    let mut enable_comparison = false;
36    let mut show_timing = false;
37    let mut i = 1;
38    
39    // Parse arguments
40    while i < args.len() {
41        let arg = &args[i];
42        
43        if arg == "-h" || arg == "--help" {
44            print_help(program_name);
45            return;
46        } else if arg == "--compare" {
47            enable_comparison = true;
48        } else if arg == "--timing" {
49            show_timing = true;
50        } else if !arg.starts_with('-') {
51            scenario_filter = Some(arg.clone());
52        } else {
53            eprintln!("Error: unknown option '{}'", arg);
54            print_help(program_name);
55            return;
56        }
57        
58        i += 1;
59    }
60    
61    println!("\n🚀 JSON Evaluation - Basic Example (ParsedSchema)\n");
62    println!("📦 Using Arc<ParsedSchema> for efficient caching\n");
63    
64    if enable_comparison {
65        println!("🔍 Comparison: enabled");
66    }
67    if show_timing {
68        println!("⏱️  Internal timing: enabled");
69    }
70    if enable_comparison || show_timing {
71        println!();
72    }
73    
74    let samples_dir = Path::new("samples");
75    let mut scenarios = common::discover_scenarios(samples_dir);
76    
77    // Filter scenarios if a filter is provided
78    if let Some(ref filter) = scenario_filter {
79        scenarios.retain(|s| s.name.contains(filter));
80        println!("📋 Filtering scenarios matching: '{}'\n", filter);
81    }
82
83    if scenarios.is_empty() {
84        if let Some(filter) = scenario_filter {
85            println!(
86                "ℹ️  No scenarios found matching '{}' in `{}`.",
87                filter,
88                samples_dir.display()
89            );
90        } else {
91            println!(
92                "ℹ️  No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
93                samples_dir.display()
94            );
95        }
96        return;
97    }
98    
99    println!("📊 Found {} scenario(s)\n", scenarios.len());
100
101    let mut total_parse_time = std::time::Duration::ZERO;
102    let mut total_eval_time = std::time::Duration::ZERO;
103    let mut successful_scenarios = 0;
104    let mut comparison_failures = 0;
105
106    for scenario in &scenarios {
107        println!("==============================");
108        println!("Scenario: {}", scenario.name);
109        println!("Schema: {} ({})", 
110            scenario.schema_path.display(),
111            if scenario.is_msgpack { "MessagePack" } else { "JSON" }
112        );
113        println!("Data: {}\n", scenario.data_path.display());
114
115        // Clear timing data from previous scenarios
116        if show_timing {
117            json_eval_rs::enable_timing();
118            json_eval_rs::clear_timing_data();
119        }
120
121        let data_str = fs::read_to_string(&scenario.data_path)
122            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
123
124        // Step 1: Parse schema once
125        let parse_start = Instant::now();
126        let parsed_schema = if scenario.is_msgpack {
127            let schema_msgpack = fs::read(&scenario.schema_path)
128                .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
129            println!("  📦 MessagePack schema size: {} bytes", schema_msgpack.len());
130            Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
131                .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
132        } else {
133            let schema_str = fs::read_to_string(&scenario.schema_path)
134                .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
135            Arc::new(ParsedSchema::parse(&schema_str)
136                .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
137        };
138        let parse_time = parse_start.elapsed();
139        println!("  📝 Schema parsing: {:?}", parse_time);
140        
141        // Step 2: Create JSONEval from ParsedSchema (reuses compiled logic)
142        let eval_start = Instant::now();
143        let mut eval = JSONEval::with_parsed_schema(
144            parsed_schema.clone(),  // Arc::clone is cheap!
145            Some("{}"),
146            Some(&data_str)
147        ).unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e));
148
149        eval.evaluate(&data_str, Some("{}"))
150            .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
151        
152        let evaluated_schema = eval.get_evaluated_schema(false);
153        let eval_time = eval_start.elapsed();
154        
155        println!("  ⚡ Eval: {:?}", eval_time);
156        println!("  ⏱️  Total: {:?}\n", parse_time + eval_time);
157        
158        // Print detailed timing breakdown if --timing flag is set
159        if show_timing {
160            json_eval_rs::print_timing_summary();
161        }
162        
163        total_parse_time += parse_time;
164        total_eval_time += eval_time;
165        successful_scenarios += 1;
166
167        // Save results
168        let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
169        let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
170
171        fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
172            .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
173
174        let mut metadata_obj = Map::new();
175        metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
176        metadata_obj.insert("evaluations".to_string(), serde_json::to_value(&*eval.evaluations).unwrap());
177        metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
178
179        fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
180            .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
181
182        println!("✅ Results saved:");
183        println!("  - {}", evaluated_path.display());
184        println!("  - {}\n", parsed_path.display());
185
186        // Optional comparison
187        if enable_comparison {
188            if let Some(comp_path) = &scenario.comparison_path {
189                if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
190                    comparison_failures += 1;
191                }
192                println!();
193            }
194        }
195    }
196    
197    // Print summary
198    println!("{}", "=".repeat(50));
199    println!("📊 Summary");
200    println!("{}", "=".repeat(50));
201    println!("Total scenarios run: {}", successful_scenarios);
202    println!("Total parsing time: {:?}", total_parse_time);
203    println!("Total evaluation time: {:?}", total_eval_time);
204    println!("Total time: {:?}", total_parse_time + total_eval_time);
205    
206    if successful_scenarios > 1 {
207        println!("\nAverage per scenario:");
208        println!("  Parsing: {:?}", total_parse_time / successful_scenarios as u32);
209        println!("  Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
210    }
211    
212    if enable_comparison {
213        println!("\nComparison failures: {}", comparison_failures);
214    }
215    
216    println!("\n✅ All scenarios completed!\n");
217}
examples/benchmark.rs (line 222)
31fn main() {
32    let args: Vec<String> = std::env::args().collect();
33    let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("benchmark");
34    
35    let mut iterations = 1usize;
36    let mut scenario_filter: Option<String> = None;
37    let mut show_cpu_info = false;
38    let mut use_parsed_schema = false;
39    let mut concurrent_count: Option<usize> = None;
40    let mut enable_comparison = false;
41    let mut show_timing = false;
42    let mut i = 1;
43    
44    // Parse arguments
45    while i < args.len() {
46        let arg = &args[i];
47        
48        if arg == "-h" || arg == "--help" {
49            print_help(program_name);
50            return;
51        } else if arg == "--cpu-info" {
52            show_cpu_info = true;
53        } else if arg == "--parsed" {
54            use_parsed_schema = true;
55        } else if arg == "--compare" {
56            enable_comparison = true;
57        } else if arg == "--timing" {
58            show_timing = true;
59        } else if arg == "--concurrent" {
60            if i + 1 >= args.len() {
61                eprintln!("Error: {} requires a value", arg);
62                print_help(program_name);
63                return;
64            }
65            i += 1;
66            match args[i].parse::<usize>() {
67                Ok(n) if n > 0 => concurrent_count = Some(n),
68                _ => {
69                    eprintln!("Error: concurrent count must be a positive integer, got '{}'", args[i]);
70                    return;
71                }
72            }
73        } else if arg == "-i" || arg == "--iterations" {
74            if i + 1 >= args.len() {
75                eprintln!("Error: {} requires a value", arg);
76                print_help(program_name);
77                return;
78            }
79            i += 1;
80            match args[i].parse::<usize>() {
81                Ok(n) if n > 0 => iterations = n,
82                _ => {
83                    eprintln!("Error: iterations must be a positive integer, got '{}'", args[i]);
84                    return;
85                }
86            }
87        } else if !arg.starts_with('-') {
88            scenario_filter = Some(arg.clone());
89        } else {
90            eprintln!("Error: unknown option '{}'", arg);
91            print_help(program_name);
92            return;
93        }
94        
95        i += 1;
96    }
97    
98    println!("\n🚀 JSON Evaluation - Benchmark\n");
99    
100    // Show CPU info if requested or if running benchmarks
101    if show_cpu_info || iterations > 1 || concurrent_count.is_some() {
102        common::print_cpu_info();
103    }
104    
105    if use_parsed_schema {
106        println!("📦 Mode: ParsedSchema (parse once, reuse for all iterations)\n");
107    }
108    
109    if let Some(count) = concurrent_count {
110        println!("🔀 Concurrent evaluations: {} threads\n", count);
111    } else if iterations > 1 {
112        println!("🔄 Iterations per scenario: {}\n", iterations);
113    }
114    
115    if enable_comparison {
116        println!("🔍 Comparison: enabled");
117    }
118    if show_timing {
119        println!("⏱️  Internal timing: enabled");
120    }
121    if enable_comparison || show_timing {
122        println!();
123    }
124
125    let samples_dir = Path::new("samples");
126    let mut scenarios = common::discover_scenarios(samples_dir);
127    
128    // Filter scenarios if a filter is provided
129    if let Some(ref filter) = scenario_filter {
130        scenarios.retain(|s| s.name.contains(filter));
131        println!("📋 Filtering scenarios matching: '{}'\n", filter);
132    }
133
134    if scenarios.is_empty() {
135        if let Some(filter) = scenario_filter {
136            println!(
137                "ℹ️  No scenarios found matching '{}' in `{}`.",
138                filter,
139                samples_dir.display()
140            );
141        } else {
142            println!(
143                "ℹ️  No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
144                samples_dir.display()
145            );
146        }
147        return;
148    }
149    
150    println!("📊 Found {} scenario(s)\n", scenarios.len());
151
152    let mut total_parse_time = std::time::Duration::ZERO;
153    let mut total_eval_time = std::time::Duration::ZERO;
154    let mut successful_scenarios = 0;
155    let mut comparison_failures = 0;
156
157    for scenario in &scenarios {
158        println!("==============================");
159        println!("Scenario: {}", scenario.name);
160        println!("Schema: {} ({})", 
161            scenario.schema_path.display(),
162            if scenario.is_msgpack { "MessagePack" } else { "JSON" }
163        );
164        println!("Data: {}\n", scenario.data_path.display());
165
166        // Clear timing data from previous scenarios
167        if show_timing {
168            json_eval_rs::enable_timing();
169            json_eval_rs::clear_timing_data();
170        }
171
172        let data_str = fs::read_to_string(&scenario.data_path)
173            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
174
175        println!("Running evaluation...\n");
176
177        let (parse_time, eval_time, evaluated_schema, eval, iteration_times) = if use_parsed_schema {
178            // ParsedSchema mode: parse once, reuse for all iterations/threads
179            let start_time = Instant::now();
180            
181            let parsed_schema = if scenario.is_msgpack {
182                let schema_msgpack = fs::read(&scenario.schema_path)
183                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
184                println!("  📦 MessagePack schema size: {} bytes", schema_msgpack.len());
185                Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
186                    .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
187            } else {
188                let schema_str = fs::read_to_string(&scenario.schema_path)
189                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
190                Arc::new(ParsedSchema::parse(&schema_str)
191                    .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
192            };
193            
194            let parse_time = start_time.elapsed();
195            println!("  Schema parsing & compilation: {:?}", parse_time);
196            
197            // Concurrent mode with ParsedSchema
198            if let Some(thread_count) = concurrent_count {
199                use std::thread;
200                
201                let eval_start = Instant::now();
202                let mut handles = vec![];
203                
204                for thread_id in 0..thread_count {
205                    let parsed_clone = parsed_schema.clone();
206                    let data_str_clone = data_str.clone();
207                    let iter_count = iterations;
208                    
209                    let handle = thread::spawn(move || {
210                        let mut thread_times = Vec::with_capacity(iter_count);
211                        let mut last_schema = Value::Null;
212                        
213                        for _ in 0..iter_count {
214                            let iter_start = Instant::now();
215                            let mut eval_instance = JSONEval::with_parsed_schema(
216                                parsed_clone.clone(),
217                                Some("{}"),
218                                Some(&data_str_clone)
219                            ).unwrap();
220                            
221                            eval_instance.evaluate(&data_str_clone, Some("{}")).unwrap();
222                            last_schema = eval_instance.get_evaluated_schema(false);
223                            thread_times.push(iter_start.elapsed());
224                        }
225                        
226                        (thread_times, last_schema, thread_id)
227                    });
228                    handles.push(handle);
229                }
230                
231                let mut all_iteration_times = Vec::new();
232                let mut evaluated_schema = Value::Null;
233                
234                for handle in handles {
235                    let (thread_times, thread_schema, thread_id) = handle.join().unwrap();
236                    println!("  Thread {} completed {} iterations", thread_id, thread_times.len());
237                    all_iteration_times.extend(thread_times);
238                    evaluated_schema = thread_schema; // Use last thread's result
239                }
240                
241                let eval_time = eval_start.elapsed();
242                
243                // Create a temp eval for metadata export
244                let temp_eval = JSONEval::with_parsed_schema(
245                    parsed_schema.clone(),
246                    Some("{}"),
247                    Some(&data_str)
248                ).unwrap();
249                
250                (parse_time, eval_time, evaluated_schema, temp_eval, all_iteration_times)
251            } else {
252                // Sequential iterations with ParsedSchema
253                let eval_start = Instant::now();
254                let mut evaluated_schema = Value::Null;
255                let mut iteration_times = Vec::with_capacity(iterations);
256                let mut eval_instance = JSONEval::with_parsed_schema(
257                    parsed_schema.clone(),
258                    Some("{}"),
259                    Some(&data_str)
260                ).unwrap();
261                
262                for iter in 0..iterations {
263                    let iter_start = Instant::now();
264                    eval_instance.evaluate(&data_str, Some("{}"))
265                        .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
266                    evaluated_schema = eval_instance.get_evaluated_schema(false);
267                    iteration_times.push(iter_start.elapsed());
268                    
269                    if iterations > 1 && (iter + 1) % 10 == 0 {
270                        print!(".");
271                        if (iter + 1) % 50 == 0 {
272                            println!(" {}/{}", iter + 1, iterations);
273                        }
274                    }
275                }
276                
277                if iterations > 1 && iterations % 50 != 0 {
278                    println!(" {}/{}", iterations, iterations);
279                }
280                
281                let eval_time = eval_start.elapsed();
282                (parse_time, eval_time, evaluated_schema, eval_instance, iteration_times)
283            }
284        } else {
285            // Traditional mode: parse and create JSONEval each time
286            let start_time = Instant::now();
287            let mut eval = if scenario.is_msgpack {
288                let schema_msgpack = fs::read(&scenario.schema_path)
289                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
290                println!("  📦 MessagePack schema size: {} bytes", schema_msgpack.len());
291                JSONEval::new_from_msgpack(&schema_msgpack, None, Some(&data_str))
292                    .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e))
293            } else {
294                let schema_str = fs::read_to_string(&scenario.schema_path)
295                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
296                JSONEval::new(&schema_str, None, Some(&data_str))
297                    .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e))
298            };
299            let parse_time = start_time.elapsed();
300            println!("  Schema parsing & compilation: {:?}", parse_time);
301            
302            let eval_start = Instant::now();
303            let mut evaluated_schema = Value::Null;
304            let mut iteration_times = Vec::with_capacity(iterations);
305            
306            for iter in 0..iterations {
307                let iter_start = Instant::now();
308                eval.evaluate(&data_str, Some("{}"))
309                    .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
310                evaluated_schema = eval.get_evaluated_schema(false);
311                iteration_times.push(iter_start.elapsed());
312                
313                if iterations > 1 && (iter + 1) % 10 == 0 {
314                    print!(".");
315                    if (iter + 1) % 50 == 0 {
316                        println!(" {}/{}", iter + 1, iterations);
317                    }
318                }
319            }
320            
321            if iterations > 1 && iterations % 50 != 0 {
322                println!(" {}/{}", iterations, iterations);
323            }
324            
325            let eval_time = eval_start.elapsed();
326            (parse_time, eval_time, evaluated_schema, eval, iteration_times)
327        };
328        
329        // Calculate statistics
330        let total_iterations = iteration_times.len();
331        if total_iterations == 1 {
332            println!("  Evaluation: {:?}", eval_time);
333        } else {
334            let avg_time = eval_time / total_iterations as u32;
335            let min_time = iteration_times.iter().min().unwrap();
336            let max_time = iteration_times.iter().max().unwrap();
337            
338            println!("  Total evaluation time: {:?}", eval_time);
339            println!("  Total iterations: {}", total_iterations);
340            println!("  Average per iteration: {:?}", avg_time);
341            println!("  Min: {:?} | Max: {:?}", min_time, max_time);
342            
343            // Show cache statistics
344            let cache_stats = eval.cache_stats();
345            println!("  Cache: {} entries, {} hits, {} misses ({:.1}% hit rate)",
346                cache_stats.entries,
347                cache_stats.hits,
348                cache_stats.misses,
349                cache_stats.hit_rate * 100.0
350            );
351        }
352
353        let total_time = parse_time + eval_time;
354        println!("⏱️  Execution time: {:?}\n", total_time);
355        
356        // Print detailed timing breakdown if --timing flag is set
357        if show_timing {
358            json_eval_rs::print_timing_summary();
359        }
360        
361        // Track statistics
362        total_parse_time += parse_time;
363        total_eval_time += eval_time;
364        successful_scenarios += 1;
365
366        let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
367        let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
368
369        fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
370            .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
371
372        let mut metadata_obj = Map::new();
373        metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
374        metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
375
376        fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
377            .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
378
379        println!("✅ Results saved:");
380        println!("  - {}", evaluated_path.display());
381        println!("  - {}\n", parsed_path.display());
382
383        // Optional comparison
384        if enable_comparison {
385            if let Some(comp_path) = &scenario.comparison_path {
386                if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
387                    comparison_failures += 1;
388                }
389                println!();
390            }
391        }
392    }
393    
394    // Print summary statistics
395    if successful_scenarios > 0 {
396        println!("\n{}", "=".repeat(50));
397        println!("📊 Summary Statistics");
398        println!("{}", "=".repeat(50));
399        println!("Total scenarios run: {}", successful_scenarios);
400        println!("Total parsing time: {:?}", total_parse_time);
401        println!("Total evaluation time: {:?}", total_eval_time);
402        println!("Total time: {:?}", total_parse_time + total_eval_time);
403        
404        if successful_scenarios > 1 {
405            println!("\nAverage per scenario:");
406            println!("  Parsing: {:?}", total_parse_time / successful_scenarios as u32);
407            println!("  Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
408        }
409        
410        if enable_comparison {
411            println!("\nComparison failures: {}", comparison_failures);
412        }
413        
414        println!("\n✅ All scenarios completed successfully!\n");
415    }
416}
Source

pub fn get_evaluated_schema_msgpack( &mut self, skip_layout: bool, ) -> Result<Vec<u8>, String>

Get the evaluated schema as MessagePack binary format

§Arguments
  • skip_layout - Whether to skip layout resolution.
§Returns

The evaluated schema serialized as MessagePack bytes

§Zero-Copy Optimization

This method serializes the evaluated schema to MessagePack. The resulting Vec is then passed to FFI/WASM boundaries via raw pointers (zero-copy at boundary). The serialization step itself (Value -> MessagePack) cannot be avoided but is highly optimized by rmp-serde.

Source

pub fn get_schema_value(&mut self) -> Value

Get all schema values (evaluations ending with .value) Mutates self.data by overriding with values from value evaluations Returns the modified data

Source

pub fn get_evaluated_schema_without_params( &mut self, skip_layout: bool, ) -> Value

Get the evaluated schema without $params field. This method filters out $params from the root level only.

§Arguments
  • skip_layout - Whether to skip layout resolution.
§Returns

The evaluated schema with $params removed.

Source

pub fn get_evaluated_schema_by_path( &mut self, path: &str, skip_layout: bool, ) -> Option<Value>

Get a value from the evaluated schema using dotted path notation. Converts dotted notation (e.g., “properties.field.value”) to JSON pointer format.

§Arguments
  • path - The dotted path to the value (e.g., “properties.field.value”)
  • skip_layout - Whether to skip layout resolution.
§Returns

The value at the specified path, or None if not found.

Source

pub fn get_evaluated_schema_by_paths( &mut self, paths: &[String], skip_layout: bool, format: Option<ReturnFormat>, ) -> Value

Get values from the evaluated schema using multiple dotted path notations. Returns data in the specified format. Skips paths that are not found.

§Arguments
  • paths - Array of dotted paths to retrieve (e.g., [“properties.field1”, “properties.field2”])
  • skip_layout - Whether to skip layout resolution.
  • format - Optional return format (Nested, Flat, or Array). Defaults to Nested.
§Returns

Data in the specified format, or an empty object/array if no paths are found.

Source

pub fn get_schema_by_path(&self, path: &str) -> Option<Value>

Get a value from the schema using dotted path notation. Converts dotted notation (e.g., “properties.field.value”) to JSON pointer format.

§Arguments
  • path - The dotted path to the value (e.g., “properties.field.value”)
§Returns

The value at the specified path, or None if not found.

Source

pub fn get_schema_by_paths( &self, paths: &[String], format: Option<ReturnFormat>, ) -> Value

Get values from the schema using multiple dotted path notations. Returns data in the specified format. Skips paths that are not found.

§Arguments
  • paths - Array of dotted paths to retrieve (e.g., [“properties.field1”, “properties.field2”])
  • format - Optional return format (Nested, Flat, or Array). Defaults to Nested.
§Returns

Data in the specified format, or an empty object/array if no paths are found.

Source

pub fn cache_stats(&self) -> CacheStats

Get cache statistics

Examples found in repository?
examples/cache_disable.rs (line 51)
4fn main() {
5    let schema = json!({
6        "type": "object",
7        "properties": {
8            "price": {
9                "type": "number"
10            },
11            "tax": {
12                "type": "number",
13                "value": {
14                    "$evaluation": {
15                        "*": [
16                            { "$ref": "#/properties/price" },
17                            0.1
18                        ]
19                    }
20                }
21            },
22            "total": {
23                "type": "number",
24                "value": {
25                    "$evaluation": {
26                        "+": [
27                            { "$ref": "#/properties/price" },
28                            { "$ref": "#/properties/tax" }
29                        ]
30                    }
31                }
32            }
33        }
34    });
35
36    let schema_str = serde_json::to_string(&schema).unwrap();
37    
38    println!("=== Example 1: With Caching (Default) ===");
39    {
40        let data = json!({ "price": 100 });
41        let data_str = serde_json::to_string(&data).unwrap();
42        
43        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
44        
45        println!("Cache enabled: {}", eval.is_cache_enabled());
46        println!("Initial cache size: {}", eval.cache_len());
47        
48        eval.evaluate(&data_str, None).unwrap();
49        
50        println!("After evaluation cache size: {}", eval.cache_len());
51        let stats = eval.cache_stats();
52        println!("Cache stats: {}", stats);
53    }
54    
55    println!("\n=== Example 2: Without Caching (Web API Mode) ===");
56    {
57        let data = json!({ "price": 200 });
58        let data_str = serde_json::to_string(&data).unwrap();
59        
60        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
61        
62        // Disable caching for single-use web API scenario
63        eval.disable_cache();
64        
65        println!("Cache enabled: {}", eval.is_cache_enabled());
66        println!("Initial cache size: {}", eval.cache_len());
67        
68        eval.evaluate(&data_str, None).unwrap();
69        
70        println!("After evaluation cache size: {}", eval.cache_len());
71        let stats = eval.cache_stats();
72        println!("Cache stats: {}", stats);
73        
74        println!("\n✅ No cache overhead - perfect for web APIs!");
75    }
76    
77    println!("\n=== Example 3: Re-enabling Cache ===");
78    {
79        let data = json!({ "price": 300 });
80        let data_str = serde_json::to_string(&data).unwrap();
81        
82        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
83        
84        // Disable then re-enable
85        eval.disable_cache();
86        eval.enable_cache();
87        
88        println!("Cache enabled: {}", eval.is_cache_enabled());
89        eval.evaluate(&data_str, None).unwrap();
90        
91        println!("Cache size after evaluation: {}", eval.cache_len());
92        println!("\n✅ Cache can be toggled as needed!");
93    }
94}
More examples
Hide additional examples
examples/benchmark.rs (line 344)
31fn main() {
32    let args: Vec<String> = std::env::args().collect();
33    let program_name = args.get(0).map(|s| s.as_str()).unwrap_or("benchmark");
34    
35    let mut iterations = 1usize;
36    let mut scenario_filter: Option<String> = None;
37    let mut show_cpu_info = false;
38    let mut use_parsed_schema = false;
39    let mut concurrent_count: Option<usize> = None;
40    let mut enable_comparison = false;
41    let mut show_timing = false;
42    let mut i = 1;
43    
44    // Parse arguments
45    while i < args.len() {
46        let arg = &args[i];
47        
48        if arg == "-h" || arg == "--help" {
49            print_help(program_name);
50            return;
51        } else if arg == "--cpu-info" {
52            show_cpu_info = true;
53        } else if arg == "--parsed" {
54            use_parsed_schema = true;
55        } else if arg == "--compare" {
56            enable_comparison = true;
57        } else if arg == "--timing" {
58            show_timing = true;
59        } else if arg == "--concurrent" {
60            if i + 1 >= args.len() {
61                eprintln!("Error: {} requires a value", arg);
62                print_help(program_name);
63                return;
64            }
65            i += 1;
66            match args[i].parse::<usize>() {
67                Ok(n) if n > 0 => concurrent_count = Some(n),
68                _ => {
69                    eprintln!("Error: concurrent count must be a positive integer, got '{}'", args[i]);
70                    return;
71                }
72            }
73        } else if arg == "-i" || arg == "--iterations" {
74            if i + 1 >= args.len() {
75                eprintln!("Error: {} requires a value", arg);
76                print_help(program_name);
77                return;
78            }
79            i += 1;
80            match args[i].parse::<usize>() {
81                Ok(n) if n > 0 => iterations = n,
82                _ => {
83                    eprintln!("Error: iterations must be a positive integer, got '{}'", args[i]);
84                    return;
85                }
86            }
87        } else if !arg.starts_with('-') {
88            scenario_filter = Some(arg.clone());
89        } else {
90            eprintln!("Error: unknown option '{}'", arg);
91            print_help(program_name);
92            return;
93        }
94        
95        i += 1;
96    }
97    
98    println!("\n🚀 JSON Evaluation - Benchmark\n");
99    
100    // Show CPU info if requested or if running benchmarks
101    if show_cpu_info || iterations > 1 || concurrent_count.is_some() {
102        common::print_cpu_info();
103    }
104    
105    if use_parsed_schema {
106        println!("📦 Mode: ParsedSchema (parse once, reuse for all iterations)\n");
107    }
108    
109    if let Some(count) = concurrent_count {
110        println!("🔀 Concurrent evaluations: {} threads\n", count);
111    } else if iterations > 1 {
112        println!("🔄 Iterations per scenario: {}\n", iterations);
113    }
114    
115    if enable_comparison {
116        println!("🔍 Comparison: enabled");
117    }
118    if show_timing {
119        println!("⏱️  Internal timing: enabled");
120    }
121    if enable_comparison || show_timing {
122        println!();
123    }
124
125    let samples_dir = Path::new("samples");
126    let mut scenarios = common::discover_scenarios(samples_dir);
127    
128    // Filter scenarios if a filter is provided
129    if let Some(ref filter) = scenario_filter {
130        scenarios.retain(|s| s.name.contains(filter));
131        println!("📋 Filtering scenarios matching: '{}'\n", filter);
132    }
133
134    if scenarios.is_empty() {
135        if let Some(filter) = scenario_filter {
136            println!(
137                "ℹ️  No scenarios found matching '{}' in `{}`.",
138                filter,
139                samples_dir.display()
140            );
141        } else {
142            println!(
143                "ℹ️  No scenarios discovered in `{}`. Add files like `name.json` and `name-data.json`.",
144                samples_dir.display()
145            );
146        }
147        return;
148    }
149    
150    println!("📊 Found {} scenario(s)\n", scenarios.len());
151
152    let mut total_parse_time = std::time::Duration::ZERO;
153    let mut total_eval_time = std::time::Duration::ZERO;
154    let mut successful_scenarios = 0;
155    let mut comparison_failures = 0;
156
157    for scenario in &scenarios {
158        println!("==============================");
159        println!("Scenario: {}", scenario.name);
160        println!("Schema: {} ({})", 
161            scenario.schema_path.display(),
162            if scenario.is_msgpack { "MessagePack" } else { "JSON" }
163        );
164        println!("Data: {}\n", scenario.data_path.display());
165
166        // Clear timing data from previous scenarios
167        if show_timing {
168            json_eval_rs::enable_timing();
169            json_eval_rs::clear_timing_data();
170        }
171
172        let data_str = fs::read_to_string(&scenario.data_path)
173            .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.data_path.display(), e));
174
175        println!("Running evaluation...\n");
176
177        let (parse_time, eval_time, evaluated_schema, eval, iteration_times) = if use_parsed_schema {
178            // ParsedSchema mode: parse once, reuse for all iterations/threads
179            let start_time = Instant::now();
180            
181            let parsed_schema = if scenario.is_msgpack {
182                let schema_msgpack = fs::read(&scenario.schema_path)
183                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
184                println!("  📦 MessagePack schema size: {} bytes", schema_msgpack.len());
185                Arc::new(ParsedSchema::parse_msgpack(&schema_msgpack)
186                    .unwrap_or_else(|e| panic!("failed to parse MessagePack schema: {}", e)))
187            } else {
188                let schema_str = fs::read_to_string(&scenario.schema_path)
189                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
190                Arc::new(ParsedSchema::parse(&schema_str)
191                    .unwrap_or_else(|e| panic!("failed to parse schema: {}", e)))
192            };
193            
194            let parse_time = start_time.elapsed();
195            println!("  Schema parsing & compilation: {:?}", parse_time);
196            
197            // Concurrent mode with ParsedSchema
198            if let Some(thread_count) = concurrent_count {
199                use std::thread;
200                
201                let eval_start = Instant::now();
202                let mut handles = vec![];
203                
204                for thread_id in 0..thread_count {
205                    let parsed_clone = parsed_schema.clone();
206                    let data_str_clone = data_str.clone();
207                    let iter_count = iterations;
208                    
209                    let handle = thread::spawn(move || {
210                        let mut thread_times = Vec::with_capacity(iter_count);
211                        let mut last_schema = Value::Null;
212                        
213                        for _ in 0..iter_count {
214                            let iter_start = Instant::now();
215                            let mut eval_instance = JSONEval::with_parsed_schema(
216                                parsed_clone.clone(),
217                                Some("{}"),
218                                Some(&data_str_clone)
219                            ).unwrap();
220                            
221                            eval_instance.evaluate(&data_str_clone, Some("{}")).unwrap();
222                            last_schema = eval_instance.get_evaluated_schema(false);
223                            thread_times.push(iter_start.elapsed());
224                        }
225                        
226                        (thread_times, last_schema, thread_id)
227                    });
228                    handles.push(handle);
229                }
230                
231                let mut all_iteration_times = Vec::new();
232                let mut evaluated_schema = Value::Null;
233                
234                for handle in handles {
235                    let (thread_times, thread_schema, thread_id) = handle.join().unwrap();
236                    println!("  Thread {} completed {} iterations", thread_id, thread_times.len());
237                    all_iteration_times.extend(thread_times);
238                    evaluated_schema = thread_schema; // Use last thread's result
239                }
240                
241                let eval_time = eval_start.elapsed();
242                
243                // Create a temp eval for metadata export
244                let temp_eval = JSONEval::with_parsed_schema(
245                    parsed_schema.clone(),
246                    Some("{}"),
247                    Some(&data_str)
248                ).unwrap();
249                
250                (parse_time, eval_time, evaluated_schema, temp_eval, all_iteration_times)
251            } else {
252                // Sequential iterations with ParsedSchema
253                let eval_start = Instant::now();
254                let mut evaluated_schema = Value::Null;
255                let mut iteration_times = Vec::with_capacity(iterations);
256                let mut eval_instance = JSONEval::with_parsed_schema(
257                    parsed_schema.clone(),
258                    Some("{}"),
259                    Some(&data_str)
260                ).unwrap();
261                
262                for iter in 0..iterations {
263                    let iter_start = Instant::now();
264                    eval_instance.evaluate(&data_str, Some("{}"))
265                        .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
266                    evaluated_schema = eval_instance.get_evaluated_schema(false);
267                    iteration_times.push(iter_start.elapsed());
268                    
269                    if iterations > 1 && (iter + 1) % 10 == 0 {
270                        print!(".");
271                        if (iter + 1) % 50 == 0 {
272                            println!(" {}/{}", iter + 1, iterations);
273                        }
274                    }
275                }
276                
277                if iterations > 1 && iterations % 50 != 0 {
278                    println!(" {}/{}", iterations, iterations);
279                }
280                
281                let eval_time = eval_start.elapsed();
282                (parse_time, eval_time, evaluated_schema, eval_instance, iteration_times)
283            }
284        } else {
285            // Traditional mode: parse and create JSONEval each time
286            let start_time = Instant::now();
287            let mut eval = if scenario.is_msgpack {
288                let schema_msgpack = fs::read(&scenario.schema_path)
289                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
290                println!("  📦 MessagePack schema size: {} bytes", schema_msgpack.len());
291                JSONEval::new_from_msgpack(&schema_msgpack, None, Some(&data_str))
292                    .unwrap_or_else(|e| panic!("failed to create JSONEval from MessagePack: {}", e))
293            } else {
294                let schema_str = fs::read_to_string(&scenario.schema_path)
295                    .unwrap_or_else(|e| panic!("failed to read {}: {}", scenario.schema_path.display(), e));
296                JSONEval::new(&schema_str, None, Some(&data_str))
297                    .unwrap_or_else(|e| panic!("failed to create JSONEval: {}", e))
298            };
299            let parse_time = start_time.elapsed();
300            println!("  Schema parsing & compilation: {:?}", parse_time);
301            
302            let eval_start = Instant::now();
303            let mut evaluated_schema = Value::Null;
304            let mut iteration_times = Vec::with_capacity(iterations);
305            
306            for iter in 0..iterations {
307                let iter_start = Instant::now();
308                eval.evaluate(&data_str, Some("{}"))
309                    .unwrap_or_else(|e| panic!("evaluation failed: {}", e));
310                evaluated_schema = eval.get_evaluated_schema(false);
311                iteration_times.push(iter_start.elapsed());
312                
313                if iterations > 1 && (iter + 1) % 10 == 0 {
314                    print!(".");
315                    if (iter + 1) % 50 == 0 {
316                        println!(" {}/{}", iter + 1, iterations);
317                    }
318                }
319            }
320            
321            if iterations > 1 && iterations % 50 != 0 {
322                println!(" {}/{}", iterations, iterations);
323            }
324            
325            let eval_time = eval_start.elapsed();
326            (parse_time, eval_time, evaluated_schema, eval, iteration_times)
327        };
328        
329        // Calculate statistics
330        let total_iterations = iteration_times.len();
331        if total_iterations == 1 {
332            println!("  Evaluation: {:?}", eval_time);
333        } else {
334            let avg_time = eval_time / total_iterations as u32;
335            let min_time = iteration_times.iter().min().unwrap();
336            let max_time = iteration_times.iter().max().unwrap();
337            
338            println!("  Total evaluation time: {:?}", eval_time);
339            println!("  Total iterations: {}", total_iterations);
340            println!("  Average per iteration: {:?}", avg_time);
341            println!("  Min: {:?} | Max: {:?}", min_time, max_time);
342            
343            // Show cache statistics
344            let cache_stats = eval.cache_stats();
345            println!("  Cache: {} entries, {} hits, {} misses ({:.1}% hit rate)",
346                cache_stats.entries,
347                cache_stats.hits,
348                cache_stats.misses,
349                cache_stats.hit_rate * 100.0
350            );
351        }
352
353        let total_time = parse_time + eval_time;
354        println!("⏱️  Execution time: {:?}\n", total_time);
355        
356        // Print detailed timing breakdown if --timing flag is set
357        if show_timing {
358            json_eval_rs::print_timing_summary();
359        }
360        
361        // Track statistics
362        total_parse_time += parse_time;
363        total_eval_time += eval_time;
364        successful_scenarios += 1;
365
366        let evaluated_path = samples_dir.join(format!("{}-evaluated-schema.json", scenario.name));
367        let parsed_path = samples_dir.join(format!("{}-parsed-schema.json", scenario.name));
368
369        fs::write(&evaluated_path, common::pretty_json(&evaluated_schema))
370            .unwrap_or_else(|e| panic!("failed to write {}: {}", evaluated_path.display(), e));
371
372        let mut metadata_obj = Map::new();
373        metadata_obj.insert("dependencies".to_string(), serde_json::to_value(&*eval.dependencies).unwrap());
374        metadata_obj.insert("sorted_evaluations".to_string(), serde_json::to_value(&*eval.sorted_evaluations).unwrap());
375
376        fs::write(&parsed_path, common::pretty_json(&Value::Object(metadata_obj)))
377            .unwrap_or_else(|e| panic!("failed to write {}: {}", parsed_path.display(), e));
378
379        println!("✅ Results saved:");
380        println!("  - {}", evaluated_path.display());
381        println!("  - {}\n", parsed_path.display());
382
383        // Optional comparison
384        if enable_comparison {
385            if let Some(comp_path) = &scenario.comparison_path {
386                if common::compare_with_expected(&evaluated_schema, comp_path).is_err() {
387                    comparison_failures += 1;
388                }
389                println!();
390            }
391        }
392    }
393    
394    // Print summary statistics
395    if successful_scenarios > 0 {
396        println!("\n{}", "=".repeat(50));
397        println!("📊 Summary Statistics");
398        println!("{}", "=".repeat(50));
399        println!("Total scenarios run: {}", successful_scenarios);
400        println!("Total parsing time: {:?}", total_parse_time);
401        println!("Total evaluation time: {:?}", total_eval_time);
402        println!("Total time: {:?}", total_parse_time + total_eval_time);
403        
404        if successful_scenarios > 1 {
405            println!("\nAverage per scenario:");
406            println!("  Parsing: {:?}", total_parse_time / successful_scenarios as u32);
407            println!("  Evaluation: {:?}", total_eval_time / successful_scenarios as u32);
408        }
409        
410        if enable_comparison {
411            println!("\nComparison failures: {}", comparison_failures);
412        }
413        
414        println!("\n✅ All scenarios completed successfully!\n");
415    }
416}
Source

pub fn clear_cache(&mut self)

Clear evaluation cache

Source

pub fn cache_len(&self) -> usize

Get number of cached entries

Examples found in repository?
examples/cache_disable.rs (line 46)
4fn main() {
5    let schema = json!({
6        "type": "object",
7        "properties": {
8            "price": {
9                "type": "number"
10            },
11            "tax": {
12                "type": "number",
13                "value": {
14                    "$evaluation": {
15                        "*": [
16                            { "$ref": "#/properties/price" },
17                            0.1
18                        ]
19                    }
20                }
21            },
22            "total": {
23                "type": "number",
24                "value": {
25                    "$evaluation": {
26                        "+": [
27                            { "$ref": "#/properties/price" },
28                            { "$ref": "#/properties/tax" }
29                        ]
30                    }
31                }
32            }
33        }
34    });
35
36    let schema_str = serde_json::to_string(&schema).unwrap();
37    
38    println!("=== Example 1: With Caching (Default) ===");
39    {
40        let data = json!({ "price": 100 });
41        let data_str = serde_json::to_string(&data).unwrap();
42        
43        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
44        
45        println!("Cache enabled: {}", eval.is_cache_enabled());
46        println!("Initial cache size: {}", eval.cache_len());
47        
48        eval.evaluate(&data_str, None).unwrap();
49        
50        println!("After evaluation cache size: {}", eval.cache_len());
51        let stats = eval.cache_stats();
52        println!("Cache stats: {}", stats);
53    }
54    
55    println!("\n=== Example 2: Without Caching (Web API Mode) ===");
56    {
57        let data = json!({ "price": 200 });
58        let data_str = serde_json::to_string(&data).unwrap();
59        
60        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
61        
62        // Disable caching for single-use web API scenario
63        eval.disable_cache();
64        
65        println!("Cache enabled: {}", eval.is_cache_enabled());
66        println!("Initial cache size: {}", eval.cache_len());
67        
68        eval.evaluate(&data_str, None).unwrap();
69        
70        println!("After evaluation cache size: {}", eval.cache_len());
71        let stats = eval.cache_stats();
72        println!("Cache stats: {}", stats);
73        
74        println!("\n✅ No cache overhead - perfect for web APIs!");
75    }
76    
77    println!("\n=== Example 3: Re-enabling Cache ===");
78    {
79        let data = json!({ "price": 300 });
80        let data_str = serde_json::to_string(&data).unwrap();
81        
82        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
83        
84        // Disable then re-enable
85        eval.disable_cache();
86        eval.enable_cache();
87        
88        println!("Cache enabled: {}", eval.is_cache_enabled());
89        eval.evaluate(&data_str, None).unwrap();
90        
91        println!("Cache size after evaluation: {}", eval.cache_len());
92        println!("\n✅ Cache can be toggled as needed!");
93    }
94}
Source

pub fn enable_cache(&mut self)

Enable evaluation caching Useful for reusing JSONEval instances with different data

Examples found in repository?
examples/cache_disable.rs (line 86)
4fn main() {
5    let schema = json!({
6        "type": "object",
7        "properties": {
8            "price": {
9                "type": "number"
10            },
11            "tax": {
12                "type": "number",
13                "value": {
14                    "$evaluation": {
15                        "*": [
16                            { "$ref": "#/properties/price" },
17                            0.1
18                        ]
19                    }
20                }
21            },
22            "total": {
23                "type": "number",
24                "value": {
25                    "$evaluation": {
26                        "+": [
27                            { "$ref": "#/properties/price" },
28                            { "$ref": "#/properties/tax" }
29                        ]
30                    }
31                }
32            }
33        }
34    });
35
36    let schema_str = serde_json::to_string(&schema).unwrap();
37    
38    println!("=== Example 1: With Caching (Default) ===");
39    {
40        let data = json!({ "price": 100 });
41        let data_str = serde_json::to_string(&data).unwrap();
42        
43        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
44        
45        println!("Cache enabled: {}", eval.is_cache_enabled());
46        println!("Initial cache size: {}", eval.cache_len());
47        
48        eval.evaluate(&data_str, None).unwrap();
49        
50        println!("After evaluation cache size: {}", eval.cache_len());
51        let stats = eval.cache_stats();
52        println!("Cache stats: {}", stats);
53    }
54    
55    println!("\n=== Example 2: Without Caching (Web API Mode) ===");
56    {
57        let data = json!({ "price": 200 });
58        let data_str = serde_json::to_string(&data).unwrap();
59        
60        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
61        
62        // Disable caching for single-use web API scenario
63        eval.disable_cache();
64        
65        println!("Cache enabled: {}", eval.is_cache_enabled());
66        println!("Initial cache size: {}", eval.cache_len());
67        
68        eval.evaluate(&data_str, None).unwrap();
69        
70        println!("After evaluation cache size: {}", eval.cache_len());
71        let stats = eval.cache_stats();
72        println!("Cache stats: {}", stats);
73        
74        println!("\n✅ No cache overhead - perfect for web APIs!");
75    }
76    
77    println!("\n=== Example 3: Re-enabling Cache ===");
78    {
79        let data = json!({ "price": 300 });
80        let data_str = serde_json::to_string(&data).unwrap();
81        
82        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
83        
84        // Disable then re-enable
85        eval.disable_cache();
86        eval.enable_cache();
87        
88        println!("Cache enabled: {}", eval.is_cache_enabled());
89        eval.evaluate(&data_str, None).unwrap();
90        
91        println!("Cache size after evaluation: {}", eval.cache_len());
92        println!("\n✅ Cache can be toggled as needed!");
93    }
94}
Source

pub fn disable_cache(&mut self)

Disable evaluation caching Useful for web API usage where each request creates a new JSONEval instance Improves performance by skipping cache operations that have no benefit for single-use instances

Examples found in repository?
examples/cache_disable.rs (line 63)
4fn main() {
5    let schema = json!({
6        "type": "object",
7        "properties": {
8            "price": {
9                "type": "number"
10            },
11            "tax": {
12                "type": "number",
13                "value": {
14                    "$evaluation": {
15                        "*": [
16                            { "$ref": "#/properties/price" },
17                            0.1
18                        ]
19                    }
20                }
21            },
22            "total": {
23                "type": "number",
24                "value": {
25                    "$evaluation": {
26                        "+": [
27                            { "$ref": "#/properties/price" },
28                            { "$ref": "#/properties/tax" }
29                        ]
30                    }
31                }
32            }
33        }
34    });
35
36    let schema_str = serde_json::to_string(&schema).unwrap();
37    
38    println!("=== Example 1: With Caching (Default) ===");
39    {
40        let data = json!({ "price": 100 });
41        let data_str = serde_json::to_string(&data).unwrap();
42        
43        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
44        
45        println!("Cache enabled: {}", eval.is_cache_enabled());
46        println!("Initial cache size: {}", eval.cache_len());
47        
48        eval.evaluate(&data_str, None).unwrap();
49        
50        println!("After evaluation cache size: {}", eval.cache_len());
51        let stats = eval.cache_stats();
52        println!("Cache stats: {}", stats);
53    }
54    
55    println!("\n=== Example 2: Without Caching (Web API Mode) ===");
56    {
57        let data = json!({ "price": 200 });
58        let data_str = serde_json::to_string(&data).unwrap();
59        
60        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
61        
62        // Disable caching for single-use web API scenario
63        eval.disable_cache();
64        
65        println!("Cache enabled: {}", eval.is_cache_enabled());
66        println!("Initial cache size: {}", eval.cache_len());
67        
68        eval.evaluate(&data_str, None).unwrap();
69        
70        println!("After evaluation cache size: {}", eval.cache_len());
71        let stats = eval.cache_stats();
72        println!("Cache stats: {}", stats);
73        
74        println!("\n✅ No cache overhead - perfect for web APIs!");
75    }
76    
77    println!("\n=== Example 3: Re-enabling Cache ===");
78    {
79        let data = json!({ "price": 300 });
80        let data_str = serde_json::to_string(&data).unwrap();
81        
82        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
83        
84        // Disable then re-enable
85        eval.disable_cache();
86        eval.enable_cache();
87        
88        println!("Cache enabled: {}", eval.is_cache_enabled());
89        eval.evaluate(&data_str, None).unwrap();
90        
91        println!("Cache size after evaluation: {}", eval.cache_len());
92        println!("\n✅ Cache can be toggled as needed!");
93    }
94}
Source

pub fn is_cache_enabled(&self) -> bool

Check if caching is enabled

Examples found in repository?
examples/cache_disable.rs (line 45)
4fn main() {
5    let schema = json!({
6        "type": "object",
7        "properties": {
8            "price": {
9                "type": "number"
10            },
11            "tax": {
12                "type": "number",
13                "value": {
14                    "$evaluation": {
15                        "*": [
16                            { "$ref": "#/properties/price" },
17                            0.1
18                        ]
19                    }
20                }
21            },
22            "total": {
23                "type": "number",
24                "value": {
25                    "$evaluation": {
26                        "+": [
27                            { "$ref": "#/properties/price" },
28                            { "$ref": "#/properties/tax" }
29                        ]
30                    }
31                }
32            }
33        }
34    });
35
36    let schema_str = serde_json::to_string(&schema).unwrap();
37    
38    println!("=== Example 1: With Caching (Default) ===");
39    {
40        let data = json!({ "price": 100 });
41        let data_str = serde_json::to_string(&data).unwrap();
42        
43        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
44        
45        println!("Cache enabled: {}", eval.is_cache_enabled());
46        println!("Initial cache size: {}", eval.cache_len());
47        
48        eval.evaluate(&data_str, None).unwrap();
49        
50        println!("After evaluation cache size: {}", eval.cache_len());
51        let stats = eval.cache_stats();
52        println!("Cache stats: {}", stats);
53    }
54    
55    println!("\n=== Example 2: Without Caching (Web API Mode) ===");
56    {
57        let data = json!({ "price": 200 });
58        let data_str = serde_json::to_string(&data).unwrap();
59        
60        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
61        
62        // Disable caching for single-use web API scenario
63        eval.disable_cache();
64        
65        println!("Cache enabled: {}", eval.is_cache_enabled());
66        println!("Initial cache size: {}", eval.cache_len());
67        
68        eval.evaluate(&data_str, None).unwrap();
69        
70        println!("After evaluation cache size: {}", eval.cache_len());
71        let stats = eval.cache_stats();
72        println!("Cache stats: {}", stats);
73        
74        println!("\n✅ No cache overhead - perfect for web APIs!");
75    }
76    
77    println!("\n=== Example 3: Re-enabling Cache ===");
78    {
79        let data = json!({ "price": 300 });
80        let data_str = serde_json::to_string(&data).unwrap();
81        
82        let mut eval = JSONEval::new(&schema_str, None, Some(&data_str)).unwrap();
83        
84        // Disable then re-enable
85        eval.disable_cache();
86        eval.enable_cache();
87        
88        println!("Cache enabled: {}", eval.is_cache_enabled());
89        eval.evaluate(&data_str, None).unwrap();
90        
91        println!("Cache size after evaluation: {}", eval.cache_len());
92        println!("\n✅ Cache can be toggled as needed!");
93    }
94}
Source

pub fn compile_logic(&self, logic_str: &str) -> Result<CompiledLogicId, String>

Compile a logic expression from a JSON string and store it globally

Returns a CompiledLogicId that can be used with run_logic for zero-clone evaluation. The compiled logic is stored in a global thread-safe cache and can be shared across different JSONEval instances. If the same logic was compiled before, returns the existing ID.

For repeated evaluations with different data, compile once and run multiple times.

§Arguments
  • logic_str - JSON logic expression as a string
§Returns

A CompiledLogicId that can be reused for multiple evaluations across instances

Source

pub fn compile_logic_value( &self, logic: &Value, ) -> Result<CompiledLogicId, String>

Compile a logic expression from a Value and store it globally

This is more efficient than compile_logic when you already have a parsed Value, as it avoids the JSON string serialization/parsing overhead.

Returns a CompiledLogicId that can be used with run_logic for zero-clone evaluation. The compiled logic is stored in a global thread-safe cache and can be shared across different JSONEval instances. If the same logic was compiled before, returns the existing ID.

§Arguments
  • logic - JSON logic expression as a Value
§Returns

A CompiledLogicId that can be reused for multiple evaluations across instances

Source

pub fn run_logic( &mut self, logic_id: CompiledLogicId, data: Option<&Value>, context: Option<&Value>, ) -> Result<Value, String>

Run pre-compiled logic with zero-clone pattern

Uses references to avoid data cloning - similar to evaluate method. This is the most efficient way to evaluate logic multiple times with different data. The CompiledLogicId is retrieved from global storage, allowing the same compiled logic to be used across different JSONEval instances.

§Arguments
  • logic_id - Pre-compiled logic ID from compile_logic
  • data - Optional data to evaluate against (uses existing data if None)
  • context - Optional context to use (uses existing context if None)
§Returns

The result of the evaluation as a Value

Source

pub fn compile_and_run_logic( &mut self, logic_str: &str, data: Option<&str>, context: Option<&str>, ) -> Result<Value, String>

Compile and run JSON logic in one step (convenience method)

This is a convenience wrapper that combines compile_logic and run_logic. For repeated evaluations with different data, use compile_logic once and run_logic multiple times for better performance.

§Arguments
  • logic_str - JSON logic expression as a string
  • data - Optional data JSON string to evaluate against (uses existing data if None)
  • context - Optional context JSON string to use (uses existing context if None)
§Returns

The result of the evaluation as a Value

Source

pub fn resolve_layout(&mut self, evaluate: bool) -> Result<(), String>

Resolve layout references with optional evaluation

§Arguments
  • evaluate - If true, runs evaluation before resolving layout. If false, only resolves layout.
§Returns

A Result indicating success or an error message.

Source

pub fn evaluate_dependents( &mut self, changed_paths: &[String], data: Option<&str>, context: Option<&str>, re_evaluate: bool, ) -> Result<Value, String>

Evaluate fields that depend on a changed path This processes all dependent fields transitively when a source field changes

§Arguments
  • changed_paths - Array of field paths that changed (supports dot notation or schema pointers)
  • data - Optional JSON data to update before processing
  • context - Optional context data
  • re_evaluate - If true, performs full evaluation after processing dependents
Source

pub fn validate( &mut self, data: &str, context: Option<&str>, paths: Option<&[String]>, ) -> Result<ValidationResult, String>

Validate form data against schema rules Returns validation errors for fields that don’t meet their rules

Trait Implementations§

Source§

impl Clone for JSONEval

Source§

fn clone(&self) -> Self

Returns a duplicate of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.