forest/tool/subcommands/api_cmd/
report.rs

1// Copyright 2019-2025 ChainSafe Systems
2// SPDX-License-Identifier: Apache-2.0, MIT
3
4use super::ReportMode;
5use crate::rpc;
6use crate::rpc::{FilterList, Permission};
7use crate::tool::subcommands::api_cmd::api_compare_tests::TestSummary;
8use ahash::{HashMap, HashMapExt, HashSet, HashSetExt};
9use chrono::{DateTime, Utc};
10use itertools::Itertools;
11use serde::{Deserialize, Serialize};
12use serde_with::{DisplayFromStr, DurationMilliSeconds, DurationSeconds, serde_as};
13use similar::{ChangeTag, TextDiff};
14use std::path::Path;
15use std::time::{Duration, Instant};
16use tabled::{builder::Builder, settings::Style};
17
18/// Tracks the performance metrics for a single RPC method.
19#[serde_as]
20#[derive(Debug, Clone, Serialize, Deserialize)]
21struct PerformanceMetrics {
22    #[serde_as(as = "DurationMilliSeconds<u64>")]
23    total_duration_ms: Duration,
24
25    #[serde_as(as = "DurationMilliSeconds<u64>")]
26    average_duration_ms: Duration,
27
28    #[serde_as(as = "DurationMilliSeconds<u64>")]
29    min_duration_ms: Duration,
30
31    #[serde_as(as = "DurationMilliSeconds<u64>")]
32    max_duration_ms: Duration,
33    test_count: usize,
34}
35
36impl PerformanceMetrics {
37    pub fn from_durations(durations: &[Duration]) -> Option<Self> {
38        if durations.is_empty() {
39            return None;
40        }
41
42        let test_count = durations.len();
43        let total_duration_ms: Duration = durations.iter().sum();
44        let average_duration_ms = total_duration_ms / test_count as u32;
45
46        Some(Self {
47            total_duration_ms,
48            average_duration_ms,
49            min_duration_ms: *durations.iter().min().expect("durations is not empty"),
50            max_duration_ms: *durations.iter().max().expect("durations is not empty"),
51            test_count,
52        })
53    }
54}
55
56/// Details about a successful test instance
57#[serde_as]
58#[derive(Debug, Clone, Serialize, Deserialize)]
59#[serde(rename_all = "snake_case")]
60struct SuccessfulTest {
61    /// The parameters used for this test
62    request_params: serde_json::Value,
63
64    /// Forest node response
65    forest_status: TestSummary,
66
67    /// Lotus node response
68    lotus_status: TestSummary,
69
70    /// Individual test execution duration in milliseconds
71    #[serde_as(as = "DurationMilliSeconds<u64>")]
72    execution_duration_ms: Duration,
73}
74
75/// Testing status for a method
76#[derive(Debug, Clone, Serialize, Deserialize)]
77#[serde(rename_all = "snake_case", tag = "type")]
78enum MethodTestStatus {
79    /// Method was tested
80    Tested {
81        total_count: usize,
82        success_count: usize,
83        failure_count: usize,
84    },
85    /// Method was filtered out by configuration
86    Filtered,
87    /// Method exists but was not tested
88    NotTested,
89}
90
91/// Details about a failed test instance
92#[serde_as]
93#[derive(Debug, Clone, Serialize, Deserialize)]
94#[serde(rename_all = "snake_case")]
95struct FailedTest {
96    /// The parameters used for this test
97    pub request_params: serde_json::Value,
98
99    /// Forest test result
100    pub forest_status: TestSummary,
101
102    /// Lotus node result
103    pub lotus_status: TestSummary,
104
105    /// Diff between Forest and Lotus responses
106    #[serde(skip_serializing_if = "Option::is_none")]
107    pub response_diff: Option<String>,
108
109    /// Individual test execution duration in milliseconds
110    #[serde_as(as = "DurationMilliSeconds<u64>")]
111    pub execution_duration_ms: Duration,
112}
113
114/// Detailed report for a single RPC method
115#[derive(Debug, Clone, Serialize, Deserialize)]
116#[serde(rename_all = "snake_case")]
117struct MethodReport {
118    /// Full RPC method name
119    name: String,
120
121    /// Required permission level
122    permission: Permission,
123
124    /// Current testing status
125    status: MethodTestStatus,
126
127    // Performance metrics (always included)
128    #[serde(skip_serializing_if = "Option::is_none")]
129    performance: Option<PerformanceMetrics>,
130
131    /// Details of successful test instances (only in full mode)
132    #[serde(skip_serializing_if = "Vec::is_empty")]
133    success_test_params: Vec<SuccessfulTest>,
134
135    /// Details of failed test instances
136    #[serde(skip_serializing_if = "Vec::is_empty")]
137    failed_test_params: Vec<FailedTest>,
138}
139
140/// Report of all API comparison test results
141#[serde_as]
142#[derive(Debug, Clone, Serialize, Deserialize)]
143#[serde(rename_all = "snake_case")]
144pub struct ApiTestReport {
145    /// timestamp of when the test execution started
146    #[serde_as(as = "DisplayFromStr")]
147    execution_datetime_utc: DateTime<Utc>,
148
149    /// Total duration of the test run in seconds
150    #[serde_as(as = "DurationSeconds<u64>")]
151    total_duration_secs: Duration,
152
153    /// Comprehensive report for each RPC method
154    methods: Vec<MethodReport>,
155}
156
157/// Report builder to encapsulate report generation logic
158pub struct ReportBuilder {
159    method_reports: HashMap<String, MethodReport>,
160    method_timings: HashMap<String, Vec<Duration>>,
161    report_mode: ReportMode,
162    start_time: Instant,
163    failed_test_dumps: Vec<super::api_compare_tests::TestDump>,
164}
165
166impl ReportBuilder {
167    pub fn new(filter_list: &FilterList, report_mode: ReportMode) -> Self {
168        let all_methods = rpc::collect_rpc_method_info();
169
170        let method_reports = all_methods
171            .into_iter()
172            .map(|(method_name, permission)| {
173                let report = MethodReport {
174                    name: method_name.to_string(),
175                    permission,
176                    status: if !filter_list.authorize(method_name) {
177                        MethodTestStatus::Filtered
178                    } else {
179                        MethodTestStatus::NotTested
180                    },
181                    performance: None,
182                    success_test_params: vec![],
183                    failed_test_params: vec![],
184                };
185                (method_name.to_string(), report)
186            })
187            .collect();
188
189        Self {
190            method_reports,
191            method_timings: HashMap::new(),
192            report_mode,
193            start_time: Instant::now(),
194            failed_test_dumps: vec![],
195        }
196    }
197
198    pub fn track_test_result(
199        &mut self,
200        method_name: &str,
201        success: bool,
202        test_result: &super::api_compare_tests::TestResult,
203        test_params: &serde_json::Value,
204    ) {
205        if let Some(report) = self.method_reports.get_mut(method_name) {
206            // Update test status
207            match &mut report.status {
208                MethodTestStatus::NotTested | MethodTestStatus::Filtered => {
209                    report.status = MethodTestStatus::Tested {
210                        total_count: 1,
211                        success_count: if success { 1 } else { 0 },
212                        failure_count: if success { 0 } else { 1 },
213                    };
214                }
215                MethodTestStatus::Tested {
216                    total_count,
217                    success_count,
218                    failure_count,
219                    ..
220                } => {
221                    *total_count += 1;
222                    if success {
223                        *success_count += 1;
224                    } else {
225                        *failure_count += 1;
226                    }
227                }
228            }
229
230            // Track timing
231            self.method_timings
232                .entry(method_name.to_string())
233                .or_default()
234                .push(test_result.duration);
235
236            // if there is no test result for the current method, we can skip this test
237            if test_result.test_dump.is_none() {
238                return;
239            }
240
241            let test_dump = test_result.test_dump.as_ref().unwrap();
242
243            if !success {
244                self.failed_test_dumps.push(test_dump.clone());
245            }
246
247            // Add test details based on mode and success
248            if success && matches!(self.report_mode, ReportMode::Full) {
249                if let (Ok(_), Ok(_)) = (&test_dump.forest_response, &test_dump.lotus_response) {
250                    report.success_test_params.push(SuccessfulTest {
251                        request_params: test_params.clone(),
252                        forest_status: test_result.forest_status.clone(),
253                        lotus_status: test_result.lotus_status.clone(),
254                        execution_duration_ms: test_result.duration,
255                    });
256                }
257            } else if !success
258                && matches!(self.report_mode, ReportMode::Full | ReportMode::FailureOnly)
259            {
260                let response_diff = match (&test_dump.forest_response, &test_dump.lotus_response) {
261                    (Ok(forest_json), Ok(lotus_json)) => {
262                        Some(generate_diff(forest_json, lotus_json))
263                    }
264                    _ => None,
265                };
266
267                report.failed_test_params.push(FailedTest {
268                    request_params: test_params.clone(),
269                    forest_status: test_result.forest_status.clone(),
270                    lotus_status: test_result.lotus_status.clone(),
271                    response_diff,
272                    execution_duration_ms: test_result.duration,
273                });
274            }
275        }
276    }
277
278    /// Check if there were any failures
279    pub fn has_failures(&self) -> bool {
280        self.method_reports.values().any(|report| {
281            matches!(
282                report.status,
283                MethodTestStatus::Tested { failure_count, .. } if failure_count > 0
284            )
285        })
286    }
287
288    /// Print a summary of test results
289    pub fn print_summary(&mut self) {
290        // Calculate performance metrics for each method before printing
291        for (method_name, timings) in &self.method_timings {
292            if let Some(report) = self.method_reports.get_mut(method_name) {
293                report.performance = PerformanceMetrics::from_durations(timings);
294            }
295        }
296
297        let mut builder = Builder::default();
298        builder.push_record(["RPC Method", "Forest", "Lotus", "Status"]);
299
300        let mut methods: Vec<&MethodReport> = self.method_reports.values().collect();
301        methods.sort_by(|a, b| a.name.cmp(&b.name));
302
303        for report in methods {
304            match &report.status {
305                MethodTestStatus::Tested {
306                    total_count,
307                    success_count,
308                    failure_count,
309                } => {
310                    let method_name = if *total_count > 1 {
311                        format!("{} ({})", report.name, total_count)
312                    } else {
313                        report.name.clone()
314                    };
315
316                    let status = if *failure_count == 0 {
317                        "āœ… All Passed".into()
318                    } else {
319                        let mut reasons = HashSet::new();
320                        for failure in &report.failed_test_params {
321                            if failure.forest_status != TestSummary::Valid {
322                                reasons.insert(failure.forest_status.to_string());
323                            }
324                            if failure.lotus_status != TestSummary::Valid {
325                                reasons.insert(failure.lotus_status.to_string());
326                            }
327                        }
328
329                        let reasons_str =
330                            reasons.iter().map(|s| s.as_str()).collect_vec().join(", ");
331
332                        if *success_count == 0 {
333                            format!("āŒ All Failed ({reasons_str})")
334                        } else {
335                            format!("āš ļø  Mixed Results ({reasons_str})")
336                        }
337                    };
338
339                    builder.push_record([
340                        method_name.as_str(),
341                        &format!("{success_count}/{total_count}"),
342                        &format!("{success_count}/{total_count}"),
343                        &status,
344                    ]);
345                }
346                MethodTestStatus::NotTested | MethodTestStatus::Filtered => {
347                    // Skip not tested and filtered methods in summary
348                }
349            }
350        }
351
352        let table = builder.build().with(Style::markdown()).to_string();
353        println!("\n{table}");
354
355        // Print overall summary
356        let total_methods = self.method_reports.len();
357        let tested_methods = self
358            .method_reports
359            .values()
360            .filter(|r| matches!(r.status, MethodTestStatus::Tested { .. }))
361            .count();
362        let failed_methods = self
363            .method_reports
364            .values()
365            .filter(|r| {
366                matches!(
367                    r.status,
368                    MethodTestStatus::Tested { failure_count, .. } if failure_count > 0
369                )
370            })
371            .count();
372
373        println!("\nšŸ“Š Test Summary:");
374        println!("  Total methods: {total_methods}");
375        println!("  Tested methods: {tested_methods}");
376        println!("  Failed methods: {failed_methods}");
377        println!("  Duration: {}s", self.start_time.elapsed().as_secs());
378    }
379
380    /// Finalize and save the report in the provided directory
381    pub fn finalize_and_save(mut self, report_dir: &Path) -> anyhow::Result<()> {
382        // Calculate performance metrics for each method
383        for (method_name, timings) in self.method_timings {
384            if let Some(report) = self.method_reports.get_mut(&method_name) {
385                report.performance = PerformanceMetrics::from_durations(&timings);
386            }
387        }
388
389        let mut methods: Vec<MethodReport> = self.method_reports.into_values().collect();
390        methods.sort_by(|a, b| a.name.cmp(&b.name));
391
392        let report = ApiTestReport {
393            execution_datetime_utc: Utc::now(),
394            total_duration_secs: self.start_time.elapsed(),
395            methods,
396        };
397
398        if !report_dir.is_dir() {
399            std::fs::create_dir_all(report_dir)?;
400        }
401
402        let file_name = match self.report_mode {
403            ReportMode::Full => "full_report.json",
404            ReportMode::FailureOnly => "failure_report.json",
405            ReportMode::Summary => "summary_report.json",
406        };
407
408        std::fs::write(
409            report_dir.join(file_name),
410            serde_json::to_string_pretty(&report)?,
411        )?;
412        Ok(())
413    }
414}
415
416/// Generate a diff between forest and lotus responses
417pub fn generate_diff(forest_json: &serde_json::Value, lotus_json: &serde_json::Value) -> String {
418    let forest_pretty = serde_json::to_string_pretty(forest_json).unwrap_or_default();
419    let lotus_pretty = serde_json::to_string_pretty(lotus_json).unwrap_or_default();
420    let diff = TextDiff::from_lines(&forest_pretty, &lotus_pretty);
421
422    let mut diff_text = String::new();
423    for change in diff.iter_all_changes() {
424        let sign = match change.tag() {
425            ChangeTag::Delete => "-",
426            ChangeTag::Insert => "+",
427            ChangeTag::Equal => " ",
428        };
429        diff_text.push_str(&format!("{sign}{change}"));
430    }
431    diff_text
432}
433
434#[cfg(test)]
435mod tests {
436    use super::*;
437    use std::time::Duration;
438
439    #[test]
440    fn test_performance_metrics_calculation() {
441        let durations = vec![
442            Duration::from_millis(100),
443            Duration::from_millis(200),
444            Duration::from_millis(300),
445            Duration::from_millis(400),
446            Duration::from_millis(500),
447        ];
448        let metrics = PerformanceMetrics::from_durations(&durations).unwrap();
449
450        assert_eq!(metrics.test_count, 5);
451        assert_eq!(metrics.total_duration_ms.as_millis(), 1500);
452        assert_eq!(metrics.average_duration_ms.as_millis(), 300);
453        assert_eq!(metrics.min_duration_ms.as_millis(), 100);
454        assert_eq!(metrics.max_duration_ms.as_millis(), 500);
455    }
456
457    #[test]
458    fn test_performance_metrics_empty() {
459        let durations: Vec<Duration> = vec![];
460        let metrics = PerformanceMetrics::from_durations(&durations);
461        assert!(metrics.is_none());
462    }
463
464    #[test]
465    fn test_performance_metrics_single_value() {
466        let durations = vec![Duration::from_millis(150)];
467        let metrics = PerformanceMetrics::from_durations(&durations).unwrap();
468
469        assert_eq!(metrics.test_count, 1);
470        assert_eq!(metrics.total_duration_ms.as_millis(), 150);
471        assert_eq!(metrics.average_duration_ms.as_millis(), 150);
472        assert_eq!(metrics.min_duration_ms.as_millis(), 150);
473        assert_eq!(metrics.max_duration_ms.as_millis(), 150);
474    }
475}