Skip to main content

finance_query/backtesting/
comparison.rs

1//! Side-by-side comparison of multiple backtest results.
2//!
3//! Use [`BacktestComparison`] to rank several [`BacktestResult`]s by a chosen
4//! metric and inspect every strategy's key numbers in one place.
5//!
6//! # Example
7//!
8//! ```ignore
9//! use finance_query::backtesting::{
10//!     BacktestComparison, BacktestConfig, SmaCrossover, MacdSignal,
11//!     optimizer::OptimizeMetric,
12//! };
13//! use finance_query::{Ticker, Interval, TimeRange};
14//!
15//! # async fn example() -> Result<(), Box<dyn std::error::Error>> {
16//! let ticker = Ticker::new("AAPL").await?;
17//! let config  = BacktestConfig::default();
18//!
19//! let result1 = ticker.backtest(SmaCrossover::new(10, 50), Interval::OneDay, TimeRange::OneYear, None).await?;
20//! let result2 = ticker.backtest(MacdSignal::default(),     Interval::OneDay, TimeRange::OneYear, None).await?;
21//!
22//! let report = BacktestComparison::new()
23//!     .add("SMA 10/50", result1)
24//!     .add("MACD Signal", result2)
25//!     .ranked_by(OptimizeMetric::SharpeRatio);
26//!
27//! println!("Winner: {}", report.winner());
28//! for row in report.table() {
29//!     println!("{}: sharpe={:.2} return={:.1}%", row.label, row.sharpe_ratio, row.total_return_pct);
30//! }
31//! # Ok(())
32//! # }
33//! ```
34
35use serde::{Deserialize, Serialize};
36
37use super::optimizer::OptimizeMetric;
38use super::result::BacktestResult;
39
40// ── ComparisonRow ────────────────────────────────────────────────────────────
41
42/// A single row in the comparison table — one strategy's key metrics.
43#[non_exhaustive]
44#[derive(Debug, Clone, Serialize, Deserialize)]
45pub struct ComparisonRow {
46    /// User-supplied label for the strategy (e.g. `"SMA 10/50"`).
47    pub label: String,
48
49    /// Name embedded in the [`BacktestResult`] by the engine.
50    pub strategy_name: String,
51
52    /// Symbol that was tested.
53    pub symbol: String,
54
55    /// Total return percentage.
56    pub total_return_pct: f64,
57
58    /// Annualised return percentage.
59    pub annualized_return_pct: f64,
60
61    /// Sharpe ratio.
62    pub sharpe_ratio: f64,
63
64    /// Sortino ratio.
65    pub sortino_ratio: f64,
66
67    /// Calmar ratio.
68    pub calmar_ratio: f64,
69
70    /// Maximum drawdown as a fraction (0.0–1.0).
71    ///
72    /// Multiply by 100 to get a conventional percentage.
73    pub max_drawdown_pct: f64,
74
75    /// Win rate (`winning_trades / total_trades`).
76    pub win_rate: f64,
77
78    /// Profit factor (`gross_profit / gross_loss`).
79    pub profit_factor: f64,
80
81    /// Total number of completed trades.
82    pub total_trades: usize,
83
84    /// Kelly Criterion: optimal capital fraction to risk per trade.
85    pub kelly_criterion: f64,
86
87    /// System Quality Number.
88    pub sqn: f64,
89
90    /// Expectancy in dollar terms per trade.
91    pub expectancy: f64,
92
93    /// Omega Ratio.
94    pub omega_ratio: f64,
95
96    /// Time in market as a fraction (0.0–1.0).
97    pub time_in_market_pct: f64,
98
99    /// The score on the metric used to rank the comparison.
100    pub rank_score: f64,
101
102    /// 1-based rank within the comparison (1 = best).
103    pub rank: usize,
104}
105
106impl ComparisonRow {
107    fn from_result(label: &str, result: &BacktestResult, metric: OptimizeMetric) -> Self {
108        let m = &result.metrics;
109        let rank_score = metric.score(result);
110        ComparisonRow {
111            label: label.to_owned(),
112            strategy_name: result.strategy_name.clone(),
113            symbol: result.symbol.clone(),
114            total_return_pct: m.total_return_pct,
115            annualized_return_pct: m.annualized_return_pct,
116            sharpe_ratio: m.sharpe_ratio,
117            sortino_ratio: m.sortino_ratio,
118            calmar_ratio: m.calmar_ratio,
119            max_drawdown_pct: m.max_drawdown_pct,
120            win_rate: m.win_rate,
121            profit_factor: m.profit_factor,
122            total_trades: m.total_trades,
123            kelly_criterion: m.kelly_criterion,
124            sqn: m.sqn,
125            expectancy: m.expectancy,
126            omega_ratio: m.omega_ratio,
127            time_in_market_pct: m.time_in_market_pct,
128            rank_score,
129            // placeholder; assigned after sorting
130            rank: 0,
131        }
132    }
133}
134
135// ── BacktestComparison (builder) ──────────────────────────────────────────────
136
137/// Builder that accumulates [`BacktestResult`]s and ranks them.
138///
139/// # Ordering
140///
141/// Call [`ranked_by`](BacktestComparison::ranked_by) to produce a
142/// [`ComparisonReport`] sorted best-first by the chosen [`OptimizeMetric`].
143#[derive(Debug, Default)]
144pub struct BacktestComparison {
145    entries: Vec<(String, BacktestResult)>,
146}
147
148impl BacktestComparison {
149    /// Create an empty comparison.
150    pub fn new() -> Self {
151        Self::default()
152    }
153
154    /// Add a labelled backtest result.
155    ///
156    /// The `label` is an arbitrary human-readable name (e.g. `"SMA 10/50"`).
157    /// It does **not** have to match the strategy's internal name.
158    pub fn add(mut self, label: impl Into<String>, result: BacktestResult) -> Self {
159        self.entries.push((label.into(), result));
160        self
161    }
162
163    /// Rank all added results by `metric` and return a [`ComparisonReport`].
164    ///
165    /// Results are sorted **best-first** (highest score wins for all metrics
166    /// except [`OptimizeMetric::MinDrawdown`], which is already negated
167    /// internally so that a lower drawdown yields a higher score).
168    pub fn ranked_by(self, metric: OptimizeMetric) -> ComparisonReport {
169        let mut rows: Vec<ComparisonRow> = self
170            .entries
171            .iter()
172            .map(|(label, result)| ComparisonRow::from_result(label, result, metric))
173            .collect();
174
175        // Sort best-first; use total_return_pct as a tie-breaker.
176        rows.sort_by(|a, b| {
177            b.rank_score
178                .partial_cmp(&a.rank_score)
179                .unwrap_or(std::cmp::Ordering::Equal)
180                .then_with(|| {
181                    b.total_return_pct
182                        .partial_cmp(&a.total_return_pct)
183                        .unwrap_or(std::cmp::Ordering::Equal)
184                })
185        });
186
187        // Assign 1-based ranks.
188        for (idx, row) in rows.iter_mut().enumerate() {
189            row.rank = idx + 1;
190        }
191
192        ComparisonReport { rows, metric }
193    }
194}
195
196// ── ComparisonReport ──────────────────────────────────────────────────────────
197
198/// Ranked comparison of multiple backtest results produced by
199/// [`BacktestComparison::ranked_by`].
200#[non_exhaustive]
201#[derive(Debug, Clone, Serialize, Deserialize)]
202pub struct ComparisonReport {
203    /// Rows sorted best-first by the chosen metric.
204    pub rows: Vec<ComparisonRow>,
205    /// The metric used for ranking.
206    pub metric: OptimizeMetric,
207}
208
209impl ComparisonReport {
210    /// Label of the best-performing strategy.
211    ///
212    /// Returns `""` when the report contains no entries.
213    pub fn winner(&self) -> &str {
214        self.rows.first().map(|r| r.label.as_str()).unwrap_or("")
215    }
216
217    /// All rows sorted best-first (rank 1 = winner).
218    pub fn table(&self) -> &[ComparisonRow] {
219        &self.rows
220    }
221
222    /// Returns the row for the winning strategy, if any.
223    pub fn winner_row(&self) -> Option<&ComparisonRow> {
224        self.rows.first()
225    }
226
227    /// Returns the number of strategies in the comparison.
228    pub fn len(&self) -> usize {
229        self.rows.len()
230    }
231
232    /// Returns `true` when no results were added.
233    pub fn is_empty(&self) -> bool {
234        self.rows.is_empty()
235    }
236}
237
238// ── Tests ─────────────────────────────────────────────────────────────────────
239
240#[cfg(test)]
241mod tests {
242    use super::*;
243    use crate::backtesting::{
244        BacktestConfig,
245        optimizer::OptimizeMetric,
246        result::{BacktestResult, EquityPoint, PerformanceMetrics},
247    };
248
249    /// Build a minimal `PerformanceMetrics` with only the fields under test set.
250    fn metrics_with(
251        total_return_pct: f64,
252        sharpe_ratio: f64,
253        max_drawdown_pct: f64,
254    ) -> PerformanceMetrics {
255        PerformanceMetrics {
256            total_return_pct,
257            annualized_return_pct: total_return_pct,
258            sharpe_ratio,
259            sortino_ratio: 0.0,
260            calmar_ratio: 0.0,
261            max_drawdown_pct,
262            max_drawdown_duration: 0,
263            win_rate: 0.0,
264            profit_factor: 1.0,
265            avg_trade_return_pct: 0.0,
266            avg_win_pct: 0.0,
267            avg_loss_pct: 0.0,
268            avg_trade_duration: 0.0,
269            total_trades: 1,
270            winning_trades: 1,
271            losing_trades: 0,
272            largest_win: 0.0,
273            largest_loss: 0.0,
274            max_consecutive_wins: 0,
275            max_consecutive_losses: 0,
276            total_commission: 0.0,
277            long_trades: 1,
278            short_trades: 0,
279            total_signals: 1,
280            executed_signals: 1,
281            avg_win_duration: 0.0,
282            avg_loss_duration: 0.0,
283            time_in_market_pct: 0.5,
284            max_idle_period: 0,
285            total_dividend_income: 0.0,
286            kelly_criterion: 0.0,
287            sqn: 0.0,
288            expectancy: 0.0,
289            omega_ratio: 1.0,
290            tail_ratio: 1.0,
291            recovery_factor: 1.0,
292            ulcer_index: 0.0,
293            serenity_ratio: 0.0,
294        }
295    }
296
297    fn make_result(strategy_name: &str, total_return: f64, sharpe: f64, dd: f64) -> BacktestResult {
298        BacktestResult {
299            symbol: "TEST".to_owned(),
300            strategy_name: strategy_name.to_owned(),
301            config: BacktestConfig::default(),
302            start_timestamp: 0,
303            end_timestamp: 1_000_000,
304            initial_capital: 10_000.0,
305            final_equity: 10_000.0 * (1.0 + total_return / 100.0),
306            metrics: metrics_with(total_return, sharpe, dd),
307            trades: vec![],
308            equity_curve: vec![EquityPoint {
309                timestamp: 0,
310                equity: 10_000.0,
311                drawdown_pct: 0.0,
312            }],
313            signals: vec![],
314            open_position: None,
315            benchmark: None,
316            diagnostics: vec![],
317        }
318    }
319
320    #[test]
321    fn empty_comparison() {
322        let report = BacktestComparison::new().ranked_by(OptimizeMetric::SharpeRatio);
323        assert!(report.is_empty());
324        assert_eq!(report.winner(), "");
325        assert!(report.winner_row().is_none());
326        assert_eq!(report.table().len(), 0);
327    }
328
329    #[test]
330    fn single_entry_is_winner() {
331        let result = make_result("SMA", 10.0, 1.5, 0.05);
332        let report = BacktestComparison::new()
333            .add("SMA 10/50", result)
334            .ranked_by(OptimizeMetric::SharpeRatio);
335
336        assert_eq!(report.winner(), "SMA 10/50");
337        assert_eq!(report.len(), 1);
338        assert_eq!(report.table()[0].rank, 1);
339    }
340
341    #[test]
342    fn ranked_by_sharpe() {
343        let r1 = make_result("SMA", 10.0, 0.8, 0.10);
344        let r2 = make_result("MACD", 15.0, 1.5, 0.12);
345        let r3 = make_result("RSI", 5.0, 1.2, 0.08);
346
347        let report = BacktestComparison::new()
348            .add("SMA 10/50", r1)
349            .add("MACD Signal", r2)
350            .add("RSI Mean Rev", r3)
351            .ranked_by(OptimizeMetric::SharpeRatio);
352
353        assert_eq!(report.winner(), "MACD Signal");
354        let table = report.table();
355        assert_eq!(table[0].label, "MACD Signal");
356        assert_eq!(table[1].label, "RSI Mean Rev");
357        assert_eq!(table[2].label, "SMA 10/50");
358        assert_eq!(table[0].rank, 1);
359        assert_eq!(table[1].rank, 2);
360        assert_eq!(table[2].rank, 3);
361    }
362
363    #[test]
364    fn ranked_by_total_return() {
365        let r1 = make_result("SMA", 10.0, 0.8, 0.10);
366        let r2 = make_result("MACD", 25.0, 0.6, 0.20);
367
368        let report = BacktestComparison::new()
369            .add("SMA", r1)
370            .add("MACD", r2)
371            .ranked_by(OptimizeMetric::TotalReturn);
372
373        assert_eq!(report.winner(), "MACD");
374    }
375
376    #[test]
377    fn ranked_by_min_drawdown() {
378        // Lower drawdown should rank higher.
379        let r1 = make_result("SMA", 10.0, 0.8, 0.20);
380        let r2 = make_result("MACD", 10.0, 0.8, 0.05);
381
382        let report = BacktestComparison::new()
383            .add("High DD", r1)
384            .add("Low DD", r2)
385            .ranked_by(OptimizeMetric::MinDrawdown);
386
387        assert_eq!(report.winner(), "Low DD");
388    }
389
390    #[test]
391    fn tie_broken_by_total_return() {
392        // Both have identical Sharpe; higher return should win.
393        let r1 = make_result("A", 20.0, 1.0, 0.10);
394        let r2 = make_result("B", 5.0, 1.0, 0.10);
395
396        let report = BacktestComparison::new()
397            .add("A", r1)
398            .add("B", r2)
399            .ranked_by(OptimizeMetric::SharpeRatio);
400
401        assert_eq!(report.winner(), "A");
402    }
403
404    #[test]
405    fn table_returns_all_rows() {
406        let n = 5;
407        let mut comparison = BacktestComparison::new();
408        for i in 0..n {
409            comparison = comparison.add(
410                format!("Strategy {i}"),
411                make_result(&format!("S{i}"), i as f64 * 2.0, i as f64 * 0.5, 0.1),
412            );
413        }
414        let report = comparison.ranked_by(OptimizeMetric::SharpeRatio);
415        assert_eq!(report.table().len(), n);
416        assert_eq!(report.len(), n);
417    }
418
419    #[test]
420    fn row_fields_populated_correctly() {
421        let result = make_result("SMA", 12.0, 1.3, 0.07);
422        let report = BacktestComparison::new()
423            .add("My Strategy", result)
424            .ranked_by(OptimizeMetric::SharpeRatio);
425
426        let row = &report.table()[0];
427        assert_eq!(row.label, "My Strategy");
428        assert_eq!(row.strategy_name, "SMA");
429        assert_eq!(row.symbol, "TEST");
430        assert!((row.total_return_pct - 12.0).abs() < 1e-10);
431        assert!((row.sharpe_ratio - 1.3).abs() < 1e-10);
432        assert!((row.max_drawdown_pct - 0.07).abs() < 1e-10);
433        assert_eq!(row.rank, 1);
434    }
435}