cairo_lang_test_runner/
lib.rs

1use std::path::Path;
2use std::sync::Mutex;
3
4use anyhow::{Context, Result, bail};
5use cairo_lang_compiler::db::RootDatabase;
6use cairo_lang_compiler::diagnostics::DiagnosticsReporter;
7use cairo_lang_compiler::project::setup_project;
8use cairo_lang_filesystem::cfg::{Cfg, CfgSet};
9use cairo_lang_filesystem::ids::CrateId;
10use cairo_lang_runner::casm_run::format_for_panic;
11use cairo_lang_runner::profiling::{
12    ProfilingInfo, ProfilingInfoProcessor, ProfilingInfoProcessorParams,
13};
14use cairo_lang_runner::{
15    ProfilingInfoCollectionConfig, RunResultValue, SierraCasmRunner, StarknetExecutionResources,
16};
17use cairo_lang_sierra::extensions::gas::CostTokenType;
18use cairo_lang_sierra::ids::FunctionId;
19use cairo_lang_sierra::program::{Program, StatementIdx};
20use cairo_lang_sierra_generator::db::SierraGenGroup;
21use cairo_lang_sierra_to_casm::metadata::MetadataComputationConfig;
22use cairo_lang_starknet::contract::ContractInfo;
23use cairo_lang_starknet::starknet_plugin_suite;
24use cairo_lang_test_plugin::test_config::{PanicExpectation, TestExpectation};
25use cairo_lang_test_plugin::{
26    TestCompilation, TestCompilationMetadata, TestConfig, TestsCompilationConfig,
27    compile_test_prepared_db, test_plugin_suite,
28};
29use cairo_lang_utils::casts::IntoOrPanic;
30use cairo_lang_utils::ordered_hash_map::OrderedHashMap;
31use cairo_lang_utils::unordered_hash_map::UnorderedHashMap;
32use colored::Colorize;
33use itertools::Itertools;
34use num_traits::ToPrimitive;
35use rayon::prelude::{IntoParallelIterator, ParallelIterator};
36use starknet_types_core::felt::Felt as Felt252;
37
38#[cfg(test)]
39mod test;
40
41/// Compile and run tests.
42pub struct TestRunner {
43    compiler: TestCompiler,
44    config: TestRunConfig,
45}
46
47impl TestRunner {
48    /// Configure a new test runner
49    ///
50    /// # Arguments
51    ///
52    /// * `path` - The path to compile and run its tests
53    /// * `filter` - Run only tests containing the filter string
54    /// * `include_ignored` - Include ignored tests as well
55    /// * `ignored` - Run ignored tests only
56    /// * `starknet` - Add the starknet plugin to run the tests
57    pub fn new(
58        path: &Path,
59        starknet: bool,
60        allow_warnings: bool,
61        config: TestRunConfig,
62    ) -> Result<Self> {
63        let compiler = TestCompiler::try_new(
64            path,
65            allow_warnings,
66            config.gas_enabled,
67            TestsCompilationConfig {
68                starknet,
69                add_statements_functions: config.run_profiler == RunProfilerConfig::Cairo,
70                add_statements_code_locations: false,
71                contract_declarations: None,
72                contract_crate_ids: None,
73                executable_crate_ids: None,
74            },
75        )?;
76        Ok(Self { compiler, config })
77    }
78
79    /// Runs the tests and process the results for a summary.
80    pub fn run(&self) -> Result<Option<TestsSummary>> {
81        let runner = CompiledTestRunner::new(self.compiler.build()?, self.config.clone());
82        runner.run(Some(&self.compiler.db))
83    }
84}
85
86pub struct CompiledTestRunner {
87    pub compiled: TestCompilation,
88    pub config: TestRunConfig,
89}
90
91impl CompiledTestRunner {
92    /// Configure a new compiled test runner
93    ///
94    /// # Arguments
95    ///
96    /// * `compiled` - The compiled tests to run
97    /// * `config` - Test run configuration
98    pub fn new(compiled: TestCompilation, config: TestRunConfig) -> Self {
99        Self { compiled, config }
100    }
101
102    /// Execute preconfigured test execution.
103    pub fn run(self, db: Option<&RootDatabase>) -> Result<Option<TestsSummary>> {
104        let (compiled, filtered_out) = filter_test_cases(
105            self.compiled,
106            self.config.include_ignored,
107            self.config.ignored,
108            &self.config.filter,
109        );
110
111        let TestsSummary { passed, failed, ignored, failed_run_results } = run_tests(
112            if self.config.run_profiler == RunProfilerConfig::Cairo {
113                let db = db.expect("db must be passed when profiling.");
114                let statements_locations = compiled
115                    .metadata
116                    .statements_locations
117                    .expect("statements locations must be present when profiling.");
118                Some(PorfilingAuxData {
119                    db,
120                    statements_functions: statements_locations
121                        .get_statements_functions_map_for_tests(db),
122                })
123            } else {
124                None
125            },
126            compiled.metadata.named_tests,
127            compiled.sierra_program.program,
128            compiled.metadata.function_set_costs,
129            compiled.metadata.contracts_info,
130            &self.config,
131        )?;
132
133        if failed.is_empty() {
134            println!(
135                "test result: {}. {} passed; {} failed; {} ignored; {filtered_out} filtered out;",
136                "ok".bright_green(),
137                passed.len(),
138                failed.len(),
139                ignored.len()
140            );
141            Ok(None)
142        } else {
143            println!("failures:");
144            for (failure, run_result) in failed.iter().zip_eq(failed_run_results) {
145                print!("   {failure} - ");
146                match run_result {
147                    RunResultValue::Success(_) => {
148                        println!("expected panic but finished successfully.");
149                    }
150                    RunResultValue::Panic(values) => {
151                        println!("{}", format_for_panic(values.into_iter()));
152                    }
153                }
154            }
155            println!();
156            bail!(
157                "test result: {}. {} passed; {} failed; {} ignored",
158                "FAILED".bright_red(),
159                passed.len(),
160                failed.len(),
161                ignored.len()
162            );
163        }
164    }
165}
166
167/// Whether to run the profiler, and what results to produce.
168///
169/// With `None`, don't run the profiler.
170/// With `Sierra`, run the profiler and produce sierra profiling information.
171/// With `Cairo`, run the profiler and additionally produce cairo profiling information (e.g.
172///     filtering out generated functions).
173#[derive(Clone, Debug, PartialEq, Eq)]
174pub enum RunProfilerConfig {
175    None,
176    Cairo,
177    Sierra,
178}
179
180/// Configuration of compiled tests runner.
181#[derive(Clone, Debug)]
182pub struct TestRunConfig {
183    pub filter: String,
184    pub include_ignored: bool,
185    pub ignored: bool,
186    /// Whether to run the profiler and how.
187    pub run_profiler: RunProfilerConfig,
188    /// Whether to enable gas calculation.
189    pub gas_enabled: bool,
190    /// Whether to print used resources after each test.
191    pub print_resource_usage: bool,
192}
193
194/// The test cases compiler.
195pub struct TestCompiler {
196    pub db: RootDatabase,
197    pub main_crate_ids: Vec<CrateId>,
198    pub test_crate_ids: Vec<CrateId>,
199    pub allow_warnings: bool,
200    pub config: TestsCompilationConfig,
201}
202
203impl TestCompiler {
204    /// Configure a new test compiler
205    ///
206    /// # Arguments
207    ///
208    /// * `path` - The path to compile and run its tests
209    /// * `starknet` - Add the starknet plugin to run the tests
210    pub fn try_new(
211        path: &Path,
212        allow_warnings: bool,
213        gas_enabled: bool,
214        config: TestsCompilationConfig,
215    ) -> Result<Self> {
216        let db = &mut {
217            let mut b = RootDatabase::builder();
218            let mut cfg = CfgSet::from_iter([Cfg::name("test"), Cfg::kv("target", "test")]);
219            if !gas_enabled {
220                cfg.insert(Cfg::kv("gas", "disabled"));
221                b.skip_auto_withdraw_gas();
222            }
223            b.detect_corelib();
224            b.with_cfg(cfg);
225            b.with_plugin_suite(test_plugin_suite());
226            if config.starknet {
227                b.with_plugin_suite(starknet_plugin_suite());
228            }
229            b.build()?
230        };
231
232        let main_crate_ids = setup_project(db, Path::new(&path))?;
233
234        Ok(Self {
235            db: db.snapshot(),
236            test_crate_ids: main_crate_ids.clone(),
237            main_crate_ids,
238            allow_warnings,
239            config,
240        })
241    }
242
243    /// Build the tests and collect metadata.
244    pub fn build(&self) -> Result<TestCompilation> {
245        let mut diag_reporter =
246            DiagnosticsReporter::stderr().with_crates(&self.main_crate_ids.clone());
247        if self.allow_warnings {
248            diag_reporter = diag_reporter.allow_warnings();
249        }
250
251        compile_test_prepared_db(
252            &self.db,
253            self.config.clone(),
254            self.test_crate_ids.clone(),
255            diag_reporter,
256        )
257    }
258}
259
260/// Filter compiled test cases with user provided arguments.
261///
262/// # Arguments
263/// * `compiled` - Compiled test cases with metadata.
264/// * `include_ignored` - Include ignored tests as well.
265/// * `ignored` - Run ignored tests only.l
266/// * `filter` - Include only tests containing the filter string.
267/// # Returns
268/// * (`TestCompilation`, `usize`) - The filtered test cases and the number of filtered out cases.
269pub fn filter_test_cases(
270    compiled: TestCompilation,
271    include_ignored: bool,
272    ignored: bool,
273    filter: &str,
274) -> (TestCompilation, usize) {
275    let total_tests_count = compiled.metadata.named_tests.len();
276    let named_tests = compiled
277        .metadata
278        .named_tests
279        .into_iter()
280        // Filtering unignored tests in `ignored` mode. Keep all tests in `include-ignored` mode.
281        .filter(|(_, test)| !ignored || test.ignored || include_ignored)
282        .map(|(func, mut test)| {
283            // Un-ignoring all the tests in `include-ignored` and `ignored` mode.
284            if include_ignored || ignored {
285                test.ignored = false;
286            }
287            (func, test)
288        })
289        .filter(|(name, _)| name.contains(filter))
290        .collect_vec();
291    let filtered_out = total_tests_count - named_tests.len();
292    let tests = TestCompilation {
293        sierra_program: compiled.sierra_program,
294        metadata: TestCompilationMetadata { named_tests, ..(compiled.metadata) },
295    };
296    (tests, filtered_out)
297}
298
299/// The status of a ran test.
300enum TestStatus {
301    Success,
302    Fail(RunResultValue),
303}
304
305/// The result of a ran test.
306struct TestResult {
307    /// The status of the run.
308    status: TestStatus,
309    /// The gas usage of the run if relevant.
310    gas_usage: Option<i64>,
311    /// The used resources of the run.
312    used_resources: StarknetExecutionResources,
313    /// The profiling info of the run, if requested.
314    profiling_info: Option<ProfilingInfo>,
315}
316
317/// Summary data of the ran tests.
318pub struct TestsSummary {
319    passed: Vec<String>,
320    failed: Vec<String>,
321    ignored: Vec<String>,
322    failed_run_results: Vec<RunResultValue>,
323}
324
325/// Auxiliary data that is required when running tests with profiling.
326pub struct PorfilingAuxData<'a> {
327    pub db: &'a dyn SierraGenGroup,
328    pub statements_functions: UnorderedHashMap<StatementIdx, String>,
329}
330
331/// Runs the tests and process the results for a summary.
332pub fn run_tests(
333    profiler_data: Option<PorfilingAuxData<'_>>,
334    named_tests: Vec<(String, TestConfig)>,
335    sierra_program: Program,
336    function_set_costs: OrderedHashMap<FunctionId, OrderedHashMap<CostTokenType, i32>>,
337    contracts_info: OrderedHashMap<Felt252, ContractInfo>,
338    config: &TestRunConfig,
339) -> Result<TestsSummary> {
340    let runner = SierraCasmRunner::new(
341        sierra_program.clone(),
342        if config.gas_enabled {
343            Some(MetadataComputationConfig {
344                function_set_costs,
345                linear_gas_solver: true,
346                linear_ap_change_solver: true,
347                skip_non_linear_solver_comparisons: false,
348                compute_runtime_costs: false,
349            })
350        } else {
351            None
352        },
353        contracts_info,
354        match config.run_profiler {
355            RunProfilerConfig::None => None,
356            RunProfilerConfig::Cairo | RunProfilerConfig::Sierra => {
357                Some(ProfilingInfoCollectionConfig::default())
358            }
359        },
360    )
361    .with_context(|| "Failed setting up runner.")?;
362    let suffix = if named_tests.len() != 1 { "s" } else { "" };
363    println!("running {} test{}", named_tests.len(), suffix);
364    let wrapped_summary = Mutex::new(Ok(TestsSummary {
365        passed: vec![],
366        failed: vec![],
367        ignored: vec![],
368        failed_run_results: vec![],
369    }));
370
371    // Run in parallel if possible. If running with db, parallelism is impossible.
372    if profiler_data.is_none() {
373        named_tests
374            .into_par_iter()
375            .map(|(name, test)| run_single_test(test, name, &runner))
376            .for_each(|res| {
377                update_summary(
378                    &wrapped_summary,
379                    res,
380                    &None,
381                    &sierra_program,
382                    &ProfilingInfoProcessorParams {
383                        process_by_original_user_function: false,
384                        process_by_cairo_function: false,
385                        ..ProfilingInfoProcessorParams::default()
386                    },
387                    config.print_resource_usage,
388                );
389            });
390    } else {
391        eprintln!("Note: Tests don't run in parallel when running with profiling.");
392        named_tests
393            .into_iter()
394            .map(move |(name, test)| run_single_test(test, name, &runner))
395            .for_each(|test_result| {
396                update_summary(
397                    &wrapped_summary,
398                    test_result,
399                    &profiler_data,
400                    &sierra_program,
401                    &ProfilingInfoProcessorParams::default(),
402                    config.print_resource_usage,
403                );
404            });
405    }
406
407    wrapped_summary.into_inner().unwrap()
408}
409
410/// Runs a single test and returns a tuple of its name and result.
411fn run_single_test(
412    test: TestConfig,
413    name: String,
414    runner: &SierraCasmRunner,
415) -> anyhow::Result<(String, Option<TestResult>)> {
416    if test.ignored {
417        return Ok((name, None));
418    }
419    let func = runner.find_function(name.as_str())?;
420    let result = runner
421        .run_function_with_starknet_context(func, vec![], test.available_gas, Default::default())
422        .with_context(|| format!("Failed to run the function `{}`.", name.as_str()))?;
423    Ok((
424        name,
425        Some(TestResult {
426            status: match &result.value {
427                RunResultValue::Success(_) => match test.expectation {
428                    TestExpectation::Success => TestStatus::Success,
429                    TestExpectation::Panics(_) => TestStatus::Fail(result.value),
430                },
431                RunResultValue::Panic(value) => match test.expectation {
432                    TestExpectation::Success => TestStatus::Fail(result.value),
433                    TestExpectation::Panics(panic_expectation) => match panic_expectation {
434                        PanicExpectation::Exact(expected) if value != &expected => {
435                            TestStatus::Fail(result.value)
436                        }
437                        _ => TestStatus::Success,
438                    },
439                },
440            },
441            gas_usage: test
442                .available_gas
443                .zip(result.gas_counter)
444                .map(|(before, after)| {
445                    before.into_or_panic::<i64>() - after.to_bigint().to_i64().unwrap()
446                })
447                .or_else(|| {
448                    runner.initial_required_gas(func).map(|gas| gas.into_or_panic::<i64>())
449                }),
450            used_resources: result.used_resources,
451            profiling_info: result.profiling_info,
452        }),
453    ))
454}
455
456/// Updates the test summary with the given test result.
457fn update_summary(
458    wrapped_summary: &Mutex<std::prelude::v1::Result<TestsSummary, anyhow::Error>>,
459    test_result: std::prelude::v1::Result<(String, Option<TestResult>), anyhow::Error>,
460    profiler_data: &Option<PorfilingAuxData<'_>>,
461    sierra_program: &Program,
462    profiling_params: &ProfilingInfoProcessorParams,
463    print_resource_usage: bool,
464) {
465    let mut wrapped_summary = wrapped_summary.lock().unwrap();
466    if wrapped_summary.is_err() {
467        return;
468    }
469    let (name, opt_result) = match test_result {
470        Ok((name, opt_result)) => (name, opt_result),
471        Err(err) => {
472            *wrapped_summary = Err(err);
473            return;
474        }
475    };
476    let summary = wrapped_summary.as_mut().unwrap();
477    let (res_type, status_str, gas_usage, used_resources, profiling_info) =
478        if let Some(result) = opt_result {
479            let (res_type, status_str) = match result.status {
480                TestStatus::Success => (&mut summary.passed, "ok".bright_green()),
481                TestStatus::Fail(run_result) => {
482                    summary.failed_run_results.push(run_result);
483                    (&mut summary.failed, "fail".bright_red())
484                }
485            };
486            (
487                res_type,
488                status_str,
489                result.gas_usage,
490                print_resource_usage.then_some(result.used_resources),
491                result.profiling_info,
492            )
493        } else {
494            (&mut summary.ignored, "ignored".bright_yellow(), None, None, None)
495        };
496    if let Some(gas_usage) = gas_usage {
497        println!("test {name} ... {status_str} (gas usage est.: {gas_usage})");
498    } else {
499        println!("test {name} ... {status_str}");
500    }
501    if let Some(used_resources) = used_resources {
502        let filtered = used_resources.basic_resources.filter_unused_builtins();
503        // Prints the used resources per test. E.g.:
504        // ```ignore
505        // test cairo_level_tests::interoperability::test_contract_not_deployed ... ok (gas usage est.: 77320)
506        //     steps: 42
507        //     memory holes: 20
508        //     builtins: ("range_check_builtin": 3)
509        //     syscalls: ("CallContract": 1)
510        // test cairo_level_tests::events::test_pop_log ... ok (gas usage est.: 55440)
511        //     steps: 306
512        //     memory holes: 35
513        //     builtins: ("range_check_builtin": 24)
514        //     syscalls: ("EmitEvent": 2)
515        // ```
516        println!("    steps: {}", filtered.n_steps);
517        println!("    memory holes: {}", filtered.n_memory_holes);
518
519        print_resource_map(
520            filtered.builtin_instance_counter.into_iter().map(|(k, v)| (k.to_string(), v)),
521            "builtins",
522        );
523        print_resource_map(used_resources.syscalls.into_iter(), "syscalls");
524    }
525    if let Some(profiling_info) = profiling_info {
526        let Some(PorfilingAuxData { db, statements_functions }) = profiler_data else {
527            panic!("profiler_data is None");
528        };
529        let profiling_processor = ProfilingInfoProcessor::new(
530            Some(*db),
531            sierra_program.clone(),
532            statements_functions.clone(),
533            Default::default(),
534        );
535        let processed_profiling_info =
536            profiling_processor.process_ex(&profiling_info, profiling_params);
537        println!("Profiling info:\n{processed_profiling_info}");
538    }
539    res_type.push(name);
540}
541
542/// Given an iterator of (String, usize) pairs, prints a usage map. E.g.:
543///     syscalls: ("EmitEvent": 2)
544///     syscalls: ("CallContract": 1)
545fn print_resource_map(m: impl ExactSizeIterator<Item = (String, usize)>, resource_type: &str) {
546    if m.len() != 0 {
547        println!(
548            "    {resource_type}: ({})",
549            m.into_iter().sorted().map(|(k, v)| format!(r#""{k}": {v}"#)).join(", ")
550        );
551    }
552}