irithyll_core/tree/builder.rs
1//! Incremental tree construction with histogram-based splitting.
2//!
3//! [`TreeConfig`] defines all hyperparameters for a single streaming decision tree:
4//! depth limits, regularization, binning granularity, and the Hoeffding bound
5//! confidence parameter that controls when splits are committed.
6
7use alloc::vec::Vec;
8
9use crate::feature::FeatureType;
10use crate::tree::leaf_model::LeafModelType;
11
12/// Configuration for a single streaming decision tree.
13///
14/// These parameters control tree growth, regularization, and the statistical
15/// confidence required before committing a split decision.
16///
17/// # Defaults
18///
19/// | Parameter | Default |
20/// |--------------------------|---------|
21/// | `max_depth` | 6 |
22/// | `n_bins` | 64 |
23/// | `lambda` | 1.0 |
24/// | `gamma` | 0.0 |
25/// | `grace_period` | 200 |
26/// | `delta` | 1e-7 |
27/// | `feature_subsample_rate` | 1.0 |
28#[derive(Debug, Clone)]
29pub struct TreeConfig {
30 /// Maximum tree depth. Deeper trees capture more interactions but risk
31 /// overfitting on streaming data. Default: 6.
32 pub max_depth: usize,
33
34 /// Number of histogram bins per feature. More bins give finer split
35 /// resolution at the cost of memory and slower convergence. Default: 64.
36 pub n_bins: usize,
37
38 /// L2 regularization parameter (lambda). Penalizes large leaf weights,
39 /// helping prevent overfitting. Appears in the denominator of the
40 /// leaf weight formula: w = -G / (H + lambda). Default: 1.0.
41 pub lambda: f64,
42
43 /// Minimum split gain threshold (gamma). A candidate split must achieve
44 /// gain > gamma to be accepted. Higher values produce more conservative
45 /// trees. Default: 0.0.
46 pub gamma: f64,
47
48 /// Minimum number of samples a leaf must accumulate before evaluating
49 /// potential splits. Also controls when bin edges are computed from
50 /// observed feature values. Default: 200.
51 pub grace_period: usize,
52
53 /// Hoeffding bound confidence parameter (delta). Smaller values require
54 /// more statistical evidence before committing a split, producing more
55 /// conservative but more reliable trees. The bound guarantees that the
56 /// chosen split is within epsilon of optimal with probability 1-delta.
57 /// Default: 1e-7.
58 pub delta: f64,
59
60 /// Fraction of features to consider at each split evaluation. 1.0 means
61 /// all features are evaluated; smaller values introduce randomness
62 /// (similar to random forest feature bagging). Default: 1.0.
63 pub feature_subsample_rate: f64,
64
65 /// Random seed for feature subsampling. Default: 42.
66 ///
67 /// Set by the ensemble orchestrator to ensure deterministic, diverse
68 /// behavior across trees (typically `config.seed ^ step_index`).
69 pub seed: u64,
70
71 /// Per-sample decay factor for leaf statistics.
72 ///
73 /// Computed from `leaf_half_life` as `exp(-ln(2) / half_life)`.
74 /// When `Some(alpha)`, leaf gradient/hessian sums and histogram bins
75 /// are decayed by `alpha` before each new accumulation.
76 /// `None` (default) means no decay.
77 pub leaf_decay_alpha: Option<f64>,
78
79 /// Re-evaluation interval for max-depth leaves.
80 ///
81 /// When `Some(n)`, leaves at max depth will re-evaluate potential splits
82 /// every `n` samples, allowing the tree to adapt its structure over time.
83 /// `None` (default) disables re-evaluation.
84 pub split_reeval_interval: Option<usize>,
85
86 /// Per-feature type declarations (continuous vs categorical).
87 ///
88 /// When `Some`, categorical features use one-bin-per-category binning and
89 /// Fisher optimal binary partitioning. `None` (default) treats all features
90 /// as continuous.
91 pub feature_types: Option<Vec<FeatureType>>,
92
93 /// Per-leaf gradient clipping threshold in standard deviations.
94 ///
95 /// When `Some(sigma)`, leaf-level EWMA gradient statistics are tracked and
96 /// incoming gradients are clamped to `mean ± sigma * std_dev`.
97 /// `None` (default) disables clipping.
98 pub gradient_clip_sigma: Option<f64>,
99
100 /// Per-feature monotonic constraints: +1 = increasing, -1 = decreasing, 0 = free.
101 ///
102 /// Candidate splits violating monotonicity are rejected.
103 /// `None` (default) means no constraints.
104 pub monotone_constraints: Option<Vec<i8>>,
105
106 /// Maximum absolute leaf output value.
107 ///
108 /// When `Some(max)`, leaf predictions are clamped to `[-max, max]`.
109 /// Prevents runaway leaf weights from causing prediction explosions
110 /// in feedback loops. `None` (default) means no clamping.
111 pub max_leaf_output: Option<f64>,
112
113 /// Per-leaf adaptive output bound (sigma multiplier).
114 ///
115 /// When `Some(k)`, each leaf tracks EWMA of its own output weight and
116 /// clamps predictions to `|output_mean| + k * output_std`.
117 /// `None` (default) disables adaptive bounds.
118 pub adaptive_leaf_bound: Option<f64>,
119
120 /// Per-split information criterion (Lunde-Kleppe-Skaug 2020).
121 ///
122 /// When `Some(cir_factor)`, replaces `max_depth` with a per-split
123 /// generalization test. Each candidate split must have
124 /// `gain > cir_factor * sigma^2_g / n * n_features`.
125 /// `max_depth * 2` becomes a hard safety ceiling.
126 ///
127 /// Typical: 7.5 (<=10 features), 9.0 (<=50), 11.0 (<=200).
128 /// `None` (default) uses static `max_depth` only.
129 pub adaptive_depth: Option<f64>,
130
131 /// Minimum hessian sum before a leaf produces non-zero output.
132 ///
133 /// When `Some(min_h)`, leaves with `hess_sum < min_h` return 0.0.
134 /// Prevents post-replacement spikes from fresh leaves with insufficient
135 /// samples. `None` (default) means all leaves contribute immediately.
136 pub min_hessian_sum: Option<f64>,
137
138 /// Leaf prediction model type.
139 ///
140 /// Controls how each leaf computes its prediction:
141 /// - [`ClosedForm`](LeafModelType::ClosedForm) (default): constant leaf weight
142 /// `w = -G / (H + lambda)`.
143 /// - [`Linear`](LeafModelType::Linear): online ridge regression with AdaGrad
144 /// optimization, learning a local `w . x + b` surface. Optional `decay` for
145 /// concept drift. Recommended for low-depth trees (depth 2--4).
146 /// - [`MLP`](LeafModelType::MLP): single hidden layer neural network per leaf.
147 /// Optional `decay` for concept drift.
148 /// - [`Adaptive`](LeafModelType::Adaptive): starts as closed-form, auto-promotes
149 /// to a more complex model when the Hoeffding bound (using [`delta`](Self::delta))
150 /// confirms it is statistically superior. No arbitrary thresholds.
151 pub leaf_model_type: LeafModelType,
152}
153
154impl Default for TreeConfig {
155 fn default() -> Self {
156 Self {
157 max_depth: 6,
158 n_bins: 64,
159 lambda: 1.0,
160 gamma: 0.0,
161 grace_period: 200,
162 delta: 1e-7,
163 feature_subsample_rate: 1.0,
164 seed: 42,
165 leaf_decay_alpha: None,
166 split_reeval_interval: None,
167 feature_types: None,
168 gradient_clip_sigma: None,
169 monotone_constraints: None,
170 max_leaf_output: None,
171 adaptive_leaf_bound: None,
172 adaptive_depth: None,
173 min_hessian_sum: None,
174 leaf_model_type: LeafModelType::default(),
175 }
176 }
177}
178
179impl TreeConfig {
180 /// Create a new `TreeConfig` with default parameters.
181 ///
182 /// Equivalent to `TreeConfig::default()`, but provided as a named
183 /// constructor for clarity in builder chains.
184 pub fn new() -> Self {
185 Self::default()
186 }
187
188 /// Set the maximum tree depth.
189 #[inline]
190 pub fn max_depth(mut self, max_depth: usize) -> Self {
191 self.max_depth = max_depth;
192 self
193 }
194
195 /// Set the number of histogram bins per feature.
196 #[inline]
197 pub fn n_bins(mut self, n_bins: usize) -> Self {
198 self.n_bins = n_bins;
199 self
200 }
201
202 /// Set the L2 regularization parameter (lambda).
203 #[inline]
204 pub fn lambda(mut self, lambda: f64) -> Self {
205 self.lambda = lambda;
206 self
207 }
208
209 /// Set the minimum split gain threshold (gamma).
210 #[inline]
211 pub fn gamma(mut self, gamma: f64) -> Self {
212 self.gamma = gamma;
213 self
214 }
215
216 /// Set the grace period (minimum samples before evaluating splits).
217 #[inline]
218 pub fn grace_period(mut self, grace_period: usize) -> Self {
219 self.grace_period = grace_period;
220 self
221 }
222
223 /// Set the Hoeffding bound confidence parameter (delta).
224 #[inline]
225 pub fn delta(mut self, delta: f64) -> Self {
226 self.delta = delta;
227 self
228 }
229
230 /// Set the feature subsample rate.
231 #[inline]
232 pub fn feature_subsample_rate(mut self, rate: f64) -> Self {
233 self.feature_subsample_rate = rate.clamp(0.0, 1.0);
234 self
235 }
236
237 /// Set the random seed for feature subsampling.
238 #[inline]
239 pub fn seed(mut self, seed: u64) -> Self {
240 self.seed = seed;
241 self
242 }
243
244 /// Set the per-sample decay factor for leaf statistics.
245 #[inline]
246 pub fn leaf_decay_alpha(mut self, alpha: f64) -> Self {
247 self.leaf_decay_alpha = Some(alpha);
248 self
249 }
250
251 /// Optionally set the per-sample decay factor for leaf statistics.
252 #[inline]
253 pub fn leaf_decay_alpha_opt(mut self, alpha: Option<f64>) -> Self {
254 self.leaf_decay_alpha = alpha;
255 self
256 }
257
258 /// Set the re-evaluation interval for max-depth leaves.
259 #[inline]
260 pub fn split_reeval_interval(mut self, interval: usize) -> Self {
261 self.split_reeval_interval = Some(interval);
262 self
263 }
264
265 /// Optionally set the re-evaluation interval for max-depth leaves.
266 #[inline]
267 pub fn split_reeval_interval_opt(mut self, interval: Option<usize>) -> Self {
268 self.split_reeval_interval = interval;
269 self
270 }
271
272 /// Set the per-feature type declarations.
273 #[inline]
274 pub fn feature_types(mut self, types: Vec<FeatureType>) -> Self {
275 self.feature_types = Some(types);
276 self
277 }
278
279 /// Optionally set the per-feature type declarations.
280 #[inline]
281 pub fn feature_types_opt(mut self, types: Option<Vec<FeatureType>>) -> Self {
282 self.feature_types = types;
283 self
284 }
285
286 /// Set the gradient clipping threshold in standard deviations.
287 #[inline]
288 pub fn gradient_clip_sigma(mut self, sigma: f64) -> Self {
289 self.gradient_clip_sigma = Some(sigma);
290 self
291 }
292
293 /// Optionally set the gradient clipping threshold.
294 #[inline]
295 pub fn gradient_clip_sigma_opt(mut self, sigma: Option<f64>) -> Self {
296 self.gradient_clip_sigma = sigma;
297 self
298 }
299
300 /// Set per-feature monotonic constraints.
301 #[inline]
302 pub fn monotone_constraints(mut self, constraints: Vec<i8>) -> Self {
303 self.monotone_constraints = Some(constraints);
304 self
305 }
306
307 /// Optionally set per-feature monotonic constraints.
308 #[inline]
309 pub fn monotone_constraints_opt(mut self, constraints: Option<Vec<i8>>) -> Self {
310 self.monotone_constraints = constraints;
311 self
312 }
313
314 /// Set the maximum absolute leaf output value.
315 #[inline]
316 pub fn max_leaf_output(mut self, max: f64) -> Self {
317 self.max_leaf_output = Some(max);
318 self
319 }
320
321 /// Optionally set the maximum absolute leaf output value.
322 #[inline]
323 pub fn max_leaf_output_opt(mut self, max: Option<f64>) -> Self {
324 self.max_leaf_output = max;
325 self
326 }
327
328 /// Optionally set per-leaf adaptive output bound.
329 #[inline]
330 pub fn adaptive_leaf_bound_opt(mut self, k: Option<f64>) -> Self {
331 self.adaptive_leaf_bound = k;
332 self
333 }
334
335 /// Set the per-split information criterion factor (Lunde-Kleppe-Skaug 2020).
336 ///
337 /// When enabled, each candidate split must pass a generalization test
338 /// before being committed. `max_depth * 2` becomes a hard safety ceiling.
339 /// Typical: 7.5 (<=10 features), 9.0 (<=50), 11.0 (<=200).
340 #[inline]
341 pub fn adaptive_depth(mut self, factor: f64) -> Self {
342 self.adaptive_depth = Some(factor);
343 self
344 }
345
346 /// Optionally set the per-split information criterion factor.
347 #[inline]
348 pub fn adaptive_depth_opt(mut self, factor: Option<f64>) -> Self {
349 self.adaptive_depth = factor;
350 self
351 }
352
353 /// Set the minimum hessian sum for leaf output.
354 #[inline]
355 pub fn min_hessian_sum(mut self, min_h: f64) -> Self {
356 self.min_hessian_sum = Some(min_h);
357 self
358 }
359
360 /// Optionally set the minimum hessian sum for leaf output.
361 #[inline]
362 pub fn min_hessian_sum_opt(mut self, min_h: Option<f64>) -> Self {
363 self.min_hessian_sum = min_h;
364 self
365 }
366
367 /// Set the leaf prediction model type.
368 ///
369 /// [`LeafModelType::Linear`] is recommended for low-depth configurations
370 /// (depth 2--4) where per-leaf linear models significantly reduce
371 /// approximation error compared to constant leaves.
372 ///
373 /// [`LeafModelType::Adaptive`] automatically selects between closed-form and
374 /// a trainable model per leaf, using the Hoeffding bound for promotion.
375 #[inline]
376 pub fn leaf_model_type(mut self, lmt: LeafModelType) -> Self {
377 self.leaf_model_type = lmt;
378 self
379 }
380}
381
382#[cfg(test)]
383mod tests {
384 use super::*;
385
386 #[test]
387 fn default_values() {
388 let cfg = TreeConfig::default();
389 assert_eq!(cfg.max_depth, 6);
390 assert_eq!(cfg.n_bins, 64);
391 assert!((cfg.lambda - 1.0).abs() < f64::EPSILON);
392 assert!((cfg.gamma - 0.0).abs() < f64::EPSILON);
393 assert_eq!(cfg.grace_period, 200);
394 assert!((cfg.delta - 1e-7).abs() < f64::EPSILON);
395 assert!((cfg.feature_subsample_rate - 1.0).abs() < f64::EPSILON);
396 }
397
398 #[test]
399 fn new_equals_default() {
400 let a = TreeConfig::new();
401 let b = TreeConfig::default();
402 assert_eq!(a.max_depth, b.max_depth);
403 assert_eq!(a.n_bins, b.n_bins);
404 assert!((a.lambda - b.lambda).abs() < f64::EPSILON);
405 assert!((a.gamma - b.gamma).abs() < f64::EPSILON);
406 assert_eq!(a.grace_period, b.grace_period);
407 assert!((a.delta - b.delta).abs() < f64::EPSILON);
408 assert!((a.feature_subsample_rate - b.feature_subsample_rate).abs() < f64::EPSILON);
409 }
410
411 #[test]
412 fn builder_chain() {
413 let cfg = TreeConfig::new()
414 .max_depth(10)
415 .n_bins(128)
416 .lambda(0.5)
417 .gamma(0.1)
418 .grace_period(500)
419 .delta(1e-3)
420 .feature_subsample_rate(0.8);
421
422 assert_eq!(cfg.max_depth, 10);
423 assert_eq!(cfg.n_bins, 128);
424 assert!((cfg.lambda - 0.5).abs() < f64::EPSILON);
425 assert!((cfg.gamma - 0.1).abs() < f64::EPSILON);
426 assert_eq!(cfg.grace_period, 500);
427 assert!((cfg.delta - 1e-3).abs() < f64::EPSILON);
428 assert!((cfg.feature_subsample_rate - 0.8).abs() < f64::EPSILON);
429 }
430
431 #[test]
432 fn feature_subsample_rate_clamped() {
433 let cfg = TreeConfig::new().feature_subsample_rate(1.5);
434 assert!((cfg.feature_subsample_rate - 1.0).abs() < f64::EPSILON);
435
436 let cfg = TreeConfig::new().feature_subsample_rate(-0.3);
437 assert!((cfg.feature_subsample_rate - 0.0).abs() < f64::EPSILON);
438 }
439
440 #[test]
441 fn max_leaf_output_builder() {
442 let cfg = TreeConfig::new().max_leaf_output(1.5);
443 assert_eq!(cfg.max_leaf_output, Some(1.5));
444 }
445
446 #[test]
447 fn min_hessian_sum_builder() {
448 let cfg = TreeConfig::new().min_hessian_sum(10.0);
449 assert_eq!(cfg.min_hessian_sum, Some(10.0));
450 }
451
452 #[test]
453 fn max_leaf_output_default_none() {
454 let cfg = TreeConfig::default();
455 assert!(cfg.max_leaf_output.is_none());
456 assert!(cfg.min_hessian_sum.is_none());
457 }
458}