use std::collections::HashMap;
use crate::core::TimeSeries;
#[derive(Debug, Clone, Copy)]
pub enum VisibilityType {
Natural,
Horizontal,
}
pub struct VisibilityEdges<'a, T, F>
where
T: Copy + PartialOrd + Into<f64>,
F: Fn(usize, usize, T, T) -> f64,
{
series: &'a TimeSeries<T>,
rule: VisibilityType,
weight_fn: F,
}
impl<'a, T, F> VisibilityEdges<'a, T, F>
where
T: Copy + PartialOrd + Into<f64>,
F: Fn(usize, usize, T, T) -> f64,
{
pub fn new(series: &'a TimeSeries<T>, rule: VisibilityType, weight_fn: F) -> Self {
Self {
series,
rule,
weight_fn,
}
}
pub fn compute_edges(&self) -> HashMap<(usize, usize), f64> {
let mut edges = HashMap::new();
let mut stack: Vec<usize> = Vec::new();
for i in 0..self.series.len() {
self.update_envelope(&mut stack, i);
self.add_visible_edges(&mut edges, &stack, i);
stack.push(i);
}
edges
}
fn update_envelope(&self, stack: &mut Vec<usize>, i: usize) {
if !matches!(self.rule, VisibilityType::Natural) {
return;
}
while stack.len() >= 2 {
let j = *stack.last().unwrap();
let k = stack[stack.len() - 2];
if self.should_pop(k, j, i) {
stack.pop();
} else {
break;
}
}
}
fn add_visible_edges(
&self,
edges: &mut HashMap<(usize, usize), f64>,
stack: &[usize],
i: usize,
) {
for &j in stack.iter().rev() {
if self.is_visible(j, i) {
let vj = self.series.values[j].unwrap();
let vi = self.series.values[i].unwrap();
let w = (self.weight_fn)(j, i, vj, vi);
edges.insert((j, i), w);
} else if matches!(self.rule, VisibilityType::Horizontal) {
break;
}
}
}
fn is_visible(&self, j: usize, i: usize) -> bool {
match self.rule {
VisibilityType::Natural => self.is_visible_natural(j, i),
VisibilityType::Horizontal => self.is_visible_horizontal(j, i),
}
}
fn is_visible_natural(&self, j: usize, i: usize) -> bool {
let vj: f64 = self.series.values[j].unwrap().into();
let vi: f64 = self.series.values[i].unwrap().into();
#[cfg(all(feature = "simd", any(target_arch = "x86_64", target_arch = "aarch64")))]
{
if i - j > 8 {
let intermediate: Vec<f64> = (j + 1..i)
.map(|k| self.series.values[k].unwrap().into())
.collect();
return crate::performance::simd::SimdOps::is_visible_natural_simd(
vj, vi, &intermediate, j, i
);
}
}
(j + 1..i).all(|k| {
let vk: f64 = self.series.values[k].unwrap().into();
let line_height = vj + (vi - vj) * ((k - j) as f64 / (i - j) as f64);
vk < line_height
})
}
fn is_visible_horizontal(&self, j: usize, i: usize) -> bool {
let vj = self.series.values[j].unwrap();
let vi = self.series.values[i].unwrap();
let min_h = if vj < vi { vj } else { vi };
#[cfg(all(feature = "simd", any(target_arch = "x86_64", target_arch = "aarch64")))]
{
if i - j > 8 {
let intermediate: Vec<f64> = (j + 1..i)
.map(|k| self.series.values[k].unwrap().into())
.collect();
return crate::performance::simd::SimdOps::is_visible_horizontal_simd(
vj.into(), vi.into(), &intermediate
);
}
}
(j + 1..i).all(|k| self.series.values[k].unwrap() < min_h)
}
fn should_pop(&self, k: usize, j: usize, i: usize) -> bool {
if self.is_adjacent(j, i) {
return false;
}
let (vk, vj, vi) = self.get_values(k, j, i);
let (tk, tj, ti) = (k as f64, j as f64, i as f64);
let expected_height = self.calculate_expected_height(vk, vi, tk, tj, ti);
if vj < expected_height {
self.is_permanently_shadowed(vk, vj, vi, tk, tj, ti)
} else {
false
}
}
fn is_adjacent(&self, j: usize, i: usize) -> bool {
j + 1 >= i
}
fn get_values(&self, k: usize, j: usize, i: usize) -> (f64, f64, f64) {
(
self.series.values[k].unwrap().into(),
self.series.values[j].unwrap().into(),
self.series.values[i].unwrap().into(),
)
}
fn calculate_expected_height(&self, vk: f64, vi: f64, tk: f64, tj: f64, ti: f64) -> f64 {
vk + (vi - vk) * ((tj - tk) / (ti - tk))
}
fn is_permanently_shadowed(&self, vk: f64, vj: f64, vi: f64, tk: f64, tj: f64, ti: f64) -> bool {
let slope_kj = (vj - vk) / (tj - tk);
let slope_ki = (vi - vk) / (ti - tk);
slope_kj < slope_ki
}
}
#[cfg(feature = "parallel")]
impl<'a, T, F> VisibilityEdges<'a, T, F>
where
T: Copy + PartialOrd + Into<f64> + Send + Sync,
F: Fn(usize, usize, T, T) -> f64 + Send + Sync,
{
pub fn compute_edges_parallel(&self) -> HashMap<(usize, usize), f64> {
let n = self.series.len();
if self.should_use_sequential(n) {
return self.compute_edges();
}
let chunk_results = self.process_chunks_in_parallel(n);
self.merge_chunk_results(chunk_results)
}
fn should_use_sequential(&self, n: usize) -> bool {
n <= 100
}
fn process_chunks_in_parallel(&self, n: usize) -> Vec<HashMap<(usize, usize), f64>> {
use rayon::prelude::*;
(0..n)
.collect::<Vec<_>>()
.par_chunks(64)
.map(|target_chunk| self.process_target_chunk(target_chunk))
.collect()
}
fn process_target_chunk(&self, target_chunk: &[usize]) -> HashMap<(usize, usize), f64> {
let mut local_edges = HashMap::new();
for &i in target_chunk {
let stack = self.build_envelope_stack_for_target(i);
self.add_visible_edges_from_stack(&mut local_edges, &stack, i);
}
local_edges
}
fn build_envelope_stack_for_target(&self, target: usize) -> Vec<usize> {
let mut stack = Vec::new();
for j in 0..target {
self.update_envelope_for_parallel(&mut stack, j);
stack.push(j);
}
stack
}
fn update_envelope_for_parallel(&self, stack: &mut Vec<usize>, j: usize) {
if matches!(self.rule, VisibilityType::Natural) {
while stack.len() >= 2 {
let prev_j = *stack.last().unwrap();
let prev_k = stack[stack.len() - 2];
if self.should_pop(prev_k, prev_j, j) {
stack.pop();
} else {
break;
}
}
}
}
fn add_visible_edges_from_stack(
&self,
edges: &mut HashMap<(usize, usize), f64>,
stack: &[usize],
target: usize,
) {
for &j in stack.iter().rev() {
if self.is_visible(j, target) {
let vj = self.series.values[j].unwrap();
let vi = self.series.values[target].unwrap();
let w = (self.weight_fn)(j, target, vj, vi);
edges.insert((j, target), w);
} else if matches!(self.rule, VisibilityType::Horizontal) {
break;
}
}
}
fn merge_chunk_results(&self, chunk_results: Vec<HashMap<(usize, usize), f64>>) -> HashMap<(usize, usize), f64> {
let mut edges = HashMap::new();
for chunk_edges in chunk_results {
edges.extend(chunk_edges);
}
edges
}
}