embedded-charts 0.3.0

A rich graph framework for embedded systems using embedded-graphics with std/no_std support
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
//! Data aggregation and downsampling for efficient chart rendering.
//!
//! This module provides efficient algorithms for reducing the number of data points
//! while preserving important characteristics of the data. This is crucial for:
//! - Large datasets that exceed display resolution
//! - Real-time streaming data that needs performance optimization
//! - Memory-constrained embedded environments
//! - Maintaining visual fidelity while reducing computational load
//!
//! # Aggregation Strategies
//!
//! Different strategies for combining multiple data points into a single representative point:
//!
//! ## Min-Max Aggregation
//! Preserves extremes in the data, essential for identifying peaks and troughs:
//! ```rust
//! use embedded_charts::prelude::*;
//! use embedded_charts::data::aggregation::*;
//!
//! let data = data_points![(0.0, 10.0), (1.0, 25.0), (2.0, 5.0), (3.0, 20.0)];
//! let config = AggregationConfig {
//!     strategy: AggregationStrategy::MinMax,
//!     target_points: 2,
//!     ..Default::default()
//! };
//! let aggregated: StaticDataSeries<_, 8> = data.aggregate(&config)?;
//! // Result preserves the minimum (5.0) and maximum (25.0) values
//! # Ok::<(), embedded_charts::error::DataError>(())
//! ```
//!
//! ## Statistical Aggregation
//! Uses statistical measures to represent groups of data points:
//! ```rust
//! use embedded_charts::prelude::*;
//! use embedded_charts::data::aggregation::*;
//!
//! let data = data_points![(0.0, 10.0), (1.0, 20.0), (2.0, 30.0), (3.0, 40.0)];
//! let config = AggregationConfig {
//!     strategy: AggregationStrategy::Mean,
//!     target_points: 2,
//!     ..Default::default()
//! };
//! let mean_aggregated: StaticDataSeries<_, 8> = data.aggregate(&config)?;
//! # Ok::<(), embedded_charts::error::DataError>(())
//! ```
//!
//! # Downsampling Algorithms
//!
//! ## Largest Triangle Three Buckets (LTTB)
//! Advanced algorithm that preserves visual characteristics:
//! ```rust
//! use embedded_charts::prelude::*;
//! use embedded_charts::data::aggregation::*;
//!
//! let data = data_points![(0.0, 10.0), (1.0, 25.0), (2.0, 5.0), (3.0, 20.0)];
//! let config = DownsamplingConfig {
//!     max_points: 50,
//!     ..Default::default()
//! };
//! let downsampled: StaticDataSeries<_, 8> = data.downsample_lttb(&config)?;
//! # Ok::<(), embedded_charts::error::DataError>(())
//! ```
//!
//! ## Uniform Downsampling
//! Simple algorithm that takes every Nth point:
//! ```rust
//! use embedded_charts::prelude::*;
//! use embedded_charts::data::aggregation::*;
//!
//! let data = data_points![(0.0, 10.0), (1.0, 25.0), (2.0, 5.0), (3.0, 20.0)];
//! let config = DownsamplingConfig {
//!     max_points: 2,
//!     ..Default::default()
//! };
//! let downsampled: StaticDataSeries<_, 8> = data.downsample_uniform(&config)?;
//! # Ok::<(), embedded_charts::error::DataError>(())
//! ```
//!
//! # Memory Efficiency
//!
//! All aggregation operates with bounded memory usage:
//! - Static allocation for intermediate calculations
//! - Configurable output capacity
//! - No heap allocation in no_std environments

use crate::data::{DataPoint, DataSeries, StaticDataSeries};
use crate::error::{DataError, DataResult};

#[cfg(not(feature = "std"))]
use micromath::F32Ext;

/// Strategy for aggregating multiple data points into a single representative point
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AggregationStrategy {
    /// Take the mean (average) of X and Y coordinates
    Mean,
    /// Take the median of X and Y coordinates
    Median,
    /// Preserve minimum and maximum Y values, average X coordinates
    MinMax,
    /// Take the first point in each group
    First,
    /// Take the last point in each group
    Last,
    /// Take the point with maximum Y value
    Max,
    /// Take the point with minimum Y value
    Min,
}

/// Configuration for data aggregation operations
#[derive(Debug, Clone)]
pub struct AggregationConfig {
    /// Strategy to use for combining data points
    pub strategy: AggregationStrategy,
    /// Target number of points after aggregation
    pub target_points: usize,
    /// Whether to preserve first and last points exactly
    pub preserve_endpoints: bool,
    /// Minimum number of source points required for aggregation
    pub min_group_size: usize,
}

impl Default for AggregationConfig {
    fn default() -> Self {
        Self {
            strategy: AggregationStrategy::Mean,
            target_points: 100,
            preserve_endpoints: true,
            min_group_size: 1,
        }
    }
}

/// Configuration for downsampling operations
#[derive(Debug, Clone)]
pub struct DownsamplingConfig {
    /// Maximum number of points in the output
    pub max_points: usize,
    /// Whether to preserve first and last points exactly
    pub preserve_endpoints: bool,
    /// Threshold below which no downsampling is performed
    pub min_reduction_ratio: f32,
}

impl Default for DownsamplingConfig {
    fn default() -> Self {
        Self {
            max_points: 1000,
            preserve_endpoints: true,
            min_reduction_ratio: 1.5, // Only downsample if reducing by at least 50%
        }
    }
}

/// Statistics calculated for a group of data points during aggregation
#[derive(Debug, Clone)]
pub struct GroupStats<T: DataPoint> {
    /// Number of points in the group
    pub count: usize,
    /// Minimum X value
    pub min_x: T::X,
    /// Maximum X value
    pub max_x: T::X,
    /// Minimum Y value
    pub min_y: T::Y,
    /// Maximum Y value
    pub max_y: T::Y,
    /// Mean X value
    pub mean_x: T::X,
    /// Mean Y value
    pub mean_y: T::Y,
    /// First point in the group
    pub first: T,
    /// Last point in the group
    pub last: T,
}

/// Trait providing aggregation and downsampling capabilities for data series
pub trait DataAggregation: DataSeries {
    /// Aggregate data points using the specified strategy
    ///
    /// # Arguments
    /// * `config` - Configuration for the aggregation operation
    ///
    /// # Returns
    /// A new data series with aggregated points
    fn aggregate<const N: usize>(
        &self,
        config: &AggregationConfig,
    ) -> DataResult<StaticDataSeries<Self::Item, N>>;

    /// Downsample data using Largest Triangle Three Buckets algorithm
    ///
    /// This algorithm preserves the visual characteristics of the data better than
    /// simple uniform sampling by considering the area of triangles formed by
    /// adjacent points.
    ///
    /// # Arguments
    /// * `config` - Configuration for the downsampling operation
    ///
    /// # Returns
    /// A new data series with downsampled points
    fn downsample_lttb<const N: usize>(
        &self,
        config: &DownsamplingConfig,
    ) -> DataResult<StaticDataSeries<Self::Item, N>>;

    /// Downsample data using uniform sampling (every Nth point)
    ///
    /// # Arguments
    /// * `config` - Configuration for the downsampling operation
    ///
    /// # Returns
    /// A new data series with uniformly sampled points
    fn downsample_uniform<const N: usize>(
        &self,
        config: &DownsamplingConfig,
    ) -> DataResult<StaticDataSeries<Self::Item, N>>;

    /// Calculate statistics for a group of data points
    ///
    /// # Arguments
    /// * `points` - Slice of data points to analyze
    ///
    /// # Returns
    /// Statistics for the group of points
    fn calculate_group_stats(&self, points: &[Self::Item]) -> DataResult<GroupStats<Self::Item>>
    where
        Self::Item: Clone;
}

/// Implementation of aggregation for StaticDataSeries
impl<T, const M: usize> DataAggregation for StaticDataSeries<T, M>
where
    T: DataPoint + Clone + Copy,
    T::X: PartialOrd
        + Copy
        + core::ops::Add<Output = T::X>
        + core::ops::Div<f32, Output = T::X>
        + Into<f32>
        + From<f32>,
    T::Y: PartialOrd
        + Copy
        + core::ops::Add<Output = T::Y>
        + core::ops::Div<f32, Output = T::Y>
        + Into<f32>
        + From<f32>,
{
    fn aggregate<const N: usize>(
        &self,
        config: &AggregationConfig,
    ) -> DataResult<StaticDataSeries<T, N>> {
        if self.is_empty() {
            return Ok(StaticDataSeries::new());
        }

        if self.len() <= config.target_points {
            // No aggregation needed
            let mut result = StaticDataSeries::new();
            for point in self.iter() {
                result.push(point)?;
            }
            return Ok(result);
        }

        let mut result = StaticDataSeries::new();
        let points = self.as_slice();

        // Calculate group size
        #[allow(clippy::manual_div_ceil)] // div_ceil requires Rust 1.73+
        let group_size = (self.len() + config.target_points - 1) / config.target_points;
        let group_size = group_size.max(config.min_group_size);

        let mut i = 0;

        // Handle first point specially if preserving endpoints
        if config.preserve_endpoints && !points.is_empty() {
            result.push(points[0])?;
            i = 1;
        }

        // Process groups
        while i < points.len() {
            let mut end = (i + group_size).min(points.len());

            // Skip last group if preserving endpoints and this is the final point
            if config.preserve_endpoints && end == points.len() && i + 1 < points.len() {
                end = points.len() - 1;
            }

            if i < end {
                let group = &points[i..end];
                if !group.is_empty() {
                    let aggregated_point = self.aggregate_group(group, config.strategy)?;
                    result.push(aggregated_point)?;
                }
            }

            i = end;
        }

        // Handle last point specially if preserving endpoints
        if config.preserve_endpoints && points.len() > 1 {
            let last_point = points[points.len() - 1];
            // Only add if it's different from the last added point
            if result.is_empty() || result.as_slice()[result.len() - 1].x() != last_point.x() {
                result.push(last_point)?;
            }
        }

        Ok(result)
    }

    fn downsample_lttb<const N: usize>(
        &self,
        config: &DownsamplingConfig,
    ) -> DataResult<StaticDataSeries<T, N>> {
        if self.is_empty() {
            return Ok(StaticDataSeries::new());
        }

        let data_len = self.len();

        // Check if downsampling is needed
        if data_len <= config.max_points {
            let mut result = StaticDataSeries::new();
            for point in self.iter() {
                result.push(point)?;
            }
            return Ok(result);
        }

        // Check reduction ratio
        let reduction_ratio = data_len as f32 / config.max_points as f32;
        if reduction_ratio < config.min_reduction_ratio {
            let mut result = StaticDataSeries::new();
            for point in self.iter() {
                result.push(point)?;
            }
            return Ok(result);
        }

        let mut result = StaticDataSeries::new();
        let points = self.as_slice();

        // Always include first point
        result.push(points[0])?;

        if config.max_points <= 2 {
            // Include last point if we have room
            if config.max_points == 2 && points.len() > 1 {
                result.push(points[points.len() - 1])?;
            }
            return Ok(result);
        }

        // Calculate bucket size for intermediate points
        let bucket_size = (data_len - 2) as f32 / (config.max_points - 2) as f32;
        let mut bucket_start = 1.0;

        // Process each bucket
        for _i in 1..(config.max_points - 1) {
            let bucket_end = bucket_start + bucket_size;
            #[cfg(feature = "std")]
            let start_idx = bucket_start.floor() as usize;
            #[cfg(not(feature = "std"))]
            let start_idx = bucket_start.floor() as usize;
            #[cfg(feature = "std")]
            let end_idx = (bucket_end.ceil() as usize).min(data_len - 1);
            #[cfg(not(feature = "std"))]
            let end_idx = (bucket_end.ceil() as usize).min(data_len - 1);

            if start_idx >= end_idx {
                continue;
            }

            // Calculate average point of next bucket for triangle area calculation
            let next_bucket_start = bucket_end;
            let next_bucket_end = next_bucket_start + bucket_size;
            #[cfg(feature = "std")]
            let next_start_idx = next_bucket_start.floor() as usize;
            #[cfg(not(feature = "std"))]
            let next_start_idx = next_bucket_start.floor() as usize;
            #[cfg(feature = "std")]
            let next_end_idx = (next_bucket_end.ceil() as usize).min(data_len);
            #[cfg(not(feature = "std"))]
            let next_end_idx = (next_bucket_end.ceil() as usize).min(data_len);

            let avg_next = if next_start_idx < next_end_idx && next_end_idx <= data_len {
                self.calculate_average_point(&points[next_start_idx..next_end_idx])?
            } else {
                points[data_len - 1] // Use last point if no next bucket
            };

            // Find point in current bucket that forms largest triangle
            let mut max_area = -1.0;
            let mut selected_idx = start_idx;

            for (j_offset, j) in (start_idx..end_idx).enumerate() {
                let area = self.calculate_triangle_area(
                    &result.as_slice()[result.len() - 1], // Previous selected point
                    &points[j],                           // Current candidate
                    &avg_next,                            // Average of next bucket
                );

                if area > max_area {
                    max_area = area;
                    selected_idx = start_idx + j_offset;
                }
            }

            result.push(points[selected_idx])?;
            bucket_start = bucket_end;
        }

        // Always include last point if preserving endpoints
        if config.preserve_endpoints && points.len() > 1 {
            result.push(points[points.len() - 1])?;
        }

        Ok(result)
    }

    fn downsample_uniform<const N: usize>(
        &self,
        config: &DownsamplingConfig,
    ) -> DataResult<StaticDataSeries<T, N>> {
        if self.is_empty() {
            return Ok(StaticDataSeries::new());
        }

        let data_len = self.len();

        if data_len <= config.max_points {
            let mut result = StaticDataSeries::new();
            for point in self.iter() {
                result.push(point)?;
            }
            return Ok(result);
        }

        let mut result = StaticDataSeries::new();
        let points = self.as_slice();

        // Calculate step size
        let step = data_len as f32 / config.max_points as f32;
        let mut current: f32 = 0.0;

        for _ in 0..config.max_points {
            #[cfg(feature = "std")]
            let idx = (current.round() as usize).min(data_len - 1);
            #[cfg(not(feature = "std"))]
            let idx = (current.round() as usize).min(data_len - 1);
            result.push(points[idx])?;
            current += step;
        }

        Ok(result)
    }

    fn calculate_group_stats(&self, points: &[T]) -> DataResult<GroupStats<T>> {
        if points.is_empty() {
            return Err(DataError::insufficient_data("calculate_group_stats", 1, 0));
        }

        let first = points[0];
        let last = points[points.len() - 1];

        let mut min_x = first.x();
        let mut max_x = first.x();
        let mut min_y = first.y();
        let mut max_y = first.y();

        let mut sum_x: f32 = first.x().into();
        let mut sum_y: f32 = first.y().into();

        for point in points.iter().skip(1) {
            let x = point.x();
            let y = point.y();

            if x < min_x {
                min_x = x;
            }
            if x > max_x {
                max_x = x;
            }
            if y < min_y {
                min_y = y;
            }
            if y > max_y {
                max_y = y;
            }

            sum_x += x.into();
            sum_y += y.into();
        }

        let count_f = points.len() as f32;
        let mean_x = T::X::from(sum_x / count_f);
        let mean_y = T::Y::from(sum_y / count_f);

        Ok(GroupStats {
            count: points.len(),
            min_x,
            max_x,
            min_y,
            max_y,
            mean_x,
            mean_y,
            first,
            last,
        })
    }
}

impl<T, const M: usize> StaticDataSeries<T, M>
where
    T: DataPoint + Clone + Copy,
    T::X: PartialOrd
        + Copy
        + core::ops::Add<Output = T::X>
        + core::ops::Div<f32, Output = T::X>
        + Into<f32>
        + From<f32>,
    T::Y: PartialOrd
        + Copy
        + core::ops::Add<Output = T::Y>
        + core::ops::Div<f32, Output = T::Y>
        + Into<f32>
        + From<f32>,
{
    /// Aggregate a group of points using the specified strategy
    fn aggregate_group(&self, points: &[T], strategy: AggregationStrategy) -> DataResult<T> {
        if points.is_empty() {
            return Err(DataError::insufficient_data("aggregate_group", 1, 0));
        }

        match strategy {
            AggregationStrategy::Mean => {
                let stats = self.calculate_group_stats(points)?;
                Ok(T::new(stats.mean_x, stats.mean_y))
            }
            AggregationStrategy::Median => {
                // For median, we need to sort the coordinates
                let mut x_coords: heapless::Vec<T::X, 32> = heapless::Vec::new();
                let mut y_coords: heapless::Vec<T::Y, 32> = heapless::Vec::new();

                for point in points {
                    let _ = x_coords.push(point.x());
                    let _ = y_coords.push(point.y());
                }

                // Simple sorting for small arrays
                x_coords.sort_by(|a, b| a.partial_cmp(b).unwrap_or(core::cmp::Ordering::Equal));
                y_coords.sort_by(|a, b| a.partial_cmp(b).unwrap_or(core::cmp::Ordering::Equal));

                let median_x = if x_coords.len() % 2 == 0 {
                    let mid = x_coords.len() / 2;
                    let sum: f32 = x_coords[mid - 1].into() + x_coords[mid].into();
                    T::X::from(sum / 2.0)
                } else {
                    x_coords[x_coords.len() / 2]
                };

                let median_y = if y_coords.len() % 2 == 0 {
                    let mid = y_coords.len() / 2;
                    let sum: f32 = y_coords[mid - 1].into() + y_coords[mid].into();
                    T::Y::from(sum / 2.0)
                } else {
                    y_coords[y_coords.len() / 2]
                };

                Ok(T::new(median_x, median_y))
            }
            AggregationStrategy::MinMax => {
                // Use the point with extreme Y value for MinMax strategy
                let point_with_max = points
                    .iter()
                    .max_by(|a, b| {
                        a.y()
                            .partial_cmp(&b.y())
                            .unwrap_or(core::cmp::Ordering::Equal)
                    })
                    .unwrap();
                Ok(*point_with_max)
            }
            AggregationStrategy::First => Ok(points[0]),
            AggregationStrategy::Last => Ok(points[points.len() - 1]),
            AggregationStrategy::Max => {
                let max_point = points
                    .iter()
                    .max_by(|a, b| {
                        a.y()
                            .partial_cmp(&b.y())
                            .unwrap_or(core::cmp::Ordering::Equal)
                    })
                    .unwrap();
                Ok(*max_point)
            }
            AggregationStrategy::Min => {
                let min_point = points
                    .iter()
                    .min_by(|a, b| {
                        a.y()
                            .partial_cmp(&b.y())
                            .unwrap_or(core::cmp::Ordering::Equal)
                    })
                    .unwrap();
                Ok(*min_point)
            }
        }
    }

    /// Calculate the average point of a group for LTTB algorithm
    fn calculate_average_point(&self, points: &[T]) -> DataResult<T> {
        if points.is_empty() {
            return Err(DataError::insufficient_data(
                "calculate_average_point",
                1,
                0,
            ));
        }

        let mut sum_x: f32 = points[0].x().into();
        let mut sum_y: f32 = points[0].y().into();

        for point in points.iter().skip(1) {
            sum_x += point.x().into();
            sum_y += point.y().into();
        }

        let count = points.len() as f32;
        let avg_x = T::X::from(sum_x / count);
        let avg_y = T::Y::from(sum_y / count);

        Ok(T::new(avg_x, avg_y))
    }

    /// Calculate triangle area for LTTB algorithm
    fn calculate_triangle_area(&self, a: &T, b: &T, c: &T) -> f32 {
        let ax: f32 = a.x().into();
        let ay: f32 = a.y().into();
        let bx: f32 = b.x().into();
        let by: f32 = b.y().into();
        let cx: f32 = c.x().into();
        let cy: f32 = c.y().into();

        // Calculate area using cross product formula: 0.5 * |det([[ax, ay, 1], [bx, by, 1], [cx, cy, 1]])|
        let det = ax * (by - cy) + bx * (cy - ay) - cx * (ay - by);

        det.abs() * 0.5
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::data::{Point2D, StaticDataSeries};

    #[test]
    fn test_aggregation_config_default() {
        let config = AggregationConfig::default();
        assert_eq!(config.strategy, AggregationStrategy::Mean);
        assert_eq!(config.target_points, 100);
        assert!(config.preserve_endpoints);
        assert_eq!(config.min_group_size, 1);
    }

    #[test]
    fn test_downsampling_config_default() {
        let config = DownsamplingConfig::default();
        assert_eq!(config.max_points, 1000);
        assert!(config.preserve_endpoints);
        assert_eq!(config.min_reduction_ratio, 1.5);
    }

    #[test]
    fn test_group_stats_calculation() {
        let mut series: StaticDataSeries<Point2D, 256> = StaticDataSeries::new();
        series.push(Point2D::new(0.0, 10.0)).unwrap();
        series.push(Point2D::new(1.0, 20.0)).unwrap();
        series.push(Point2D::new(2.0, 5.0)).unwrap();

        let stats = series.calculate_group_stats(series.as_slice()).unwrap();

        assert_eq!(stats.count, 3);
        assert_eq!(stats.min_x, 0.0);
        assert_eq!(stats.max_x, 2.0);
        assert_eq!(stats.min_y, 5.0);
        assert_eq!(stats.max_y, 20.0);
        assert_eq!(stats.first.x(), 0.0);
        assert_eq!(stats.last.x(), 2.0);
    }

    #[test]
    fn test_mean_aggregation() {
        let mut series: StaticDataSeries<Point2D, 256> = StaticDataSeries::new();
        series.push(Point2D::new(0.0, 10.0)).unwrap();
        series.push(Point2D::new(1.0, 20.0)).unwrap();
        series.push(Point2D::new(2.0, 30.0)).unwrap();
        series.push(Point2D::new(3.0, 40.0)).unwrap();

        let config = AggregationConfig {
            strategy: AggregationStrategy::Mean,
            target_points: 2,
            preserve_endpoints: false,
            min_group_size: 1,
        };

        let aggregated: StaticDataSeries<Point2D, 256> = series.aggregate(&config).unwrap();
        assert_eq!(aggregated.len(), 2);

        // First group: (0,10) and (1,20) -> mean should be (0.5, 15)
        let first = aggregated.get(0).unwrap();
        assert_eq!(first.x(), 0.5);
        assert_eq!(first.y(), 15.0);

        // Second group: (2,30) and (3,40) -> mean should be (2.5, 35)
        let second = aggregated.get(1).unwrap();
        assert_eq!(second.x(), 2.5);
        assert_eq!(second.y(), 35.0);
    }

    #[test]
    fn test_uniform_downsampling() {
        let mut series: StaticDataSeries<Point2D, 256> = StaticDataSeries::new();
        for i in 0..10 {
            series
                .push(Point2D::new(i as f32, (i * 10) as f32))
                .unwrap();
        }

        let config = DownsamplingConfig {
            max_points: 5,
            preserve_endpoints: true,
            min_reduction_ratio: 1.0,
        };

        let downsampled: StaticDataSeries<Point2D, 256> =
            series.downsample_uniform(&config).unwrap();
        assert_eq!(downsampled.len(), 5);
    }

    #[test]
    fn test_no_aggregation_when_not_needed() {
        let mut series: StaticDataSeries<Point2D, 256> = StaticDataSeries::new();
        series.push(Point2D::new(0.0, 10.0)).unwrap();
        series.push(Point2D::new(1.0, 20.0)).unwrap();

        let config = AggregationConfig {
            target_points: 5, // More than we have
            ..Default::default()
        };

        let aggregated: StaticDataSeries<Point2D, 256> = series.aggregate(&config).unwrap();
        assert_eq!(aggregated.len(), 2); // Should be unchanged
        assert_eq!(aggregated.get(0).unwrap().x(), 0.0);
        assert_eq!(aggregated.get(1).unwrap().x(), 1.0);
    }
}