use crate::{LinearConstraint, LogicResult};
use scirs2_core::ndarray::Array1;
use std::collections::VecDeque;
#[derive(Debug, Clone)]
pub struct OnlineConstraintLearner {
constraint: LinearConstraint,
#[allow(dead_code)]
learning_rate: f32,
data_buffer: VecDeque<(Array1<f32>, bool)>, #[allow(dead_code)]
max_buffer_size: usize,
update_count: usize,
}
impl OnlineConstraintLearner {
pub fn new(
initial_constraint: LinearConstraint,
learning_rate: f32,
max_buffer_size: usize,
) -> Self {
Self {
constraint: initial_constraint,
learning_rate,
data_buffer: VecDeque::new(),
max_buffer_size,
update_count: 0,
}
}
pub fn observe(&mut self, sample: Array1<f32>, is_feasible: bool) -> LogicResult<()> {
self.data_buffer.push_back((sample.clone(), is_feasible));
if self.data_buffer.len() > self.max_buffer_size {
self.data_buffer.pop_front();
}
self.refine_constraint(&sample, is_feasible)?;
self.update_count += 1;
Ok(())
}
fn refine_constraint(&mut self, sample: &Array1<f32>, is_feasible: bool) -> LogicResult<()> {
let sample_slice = sample.as_slice().unwrap_or(&[]);
let current_satisfied = self.constraint.check(sample_slice);
if current_satisfied == is_feasible {
return Ok(());
}
let violation = self.constraint.violation(sample_slice);
let update_scale = if is_feasible {
self.learning_rate * violation
} else {
-self.learning_rate
};
let _ = update_scale;
Ok(())
}
pub fn get_constraint(&self) -> &LinearConstraint {
&self.constraint
}
pub fn update_count(&self) -> usize {
self.update_count
}
pub fn confidence(&self) -> f32 {
if self.data_buffer.is_empty() {
return 0.0;
}
let correct = self
.data_buffer
.iter()
.filter(|(sample, is_feasible)| {
let satisfied = self.constraint.check(sample.as_slice().unwrap_or(&[]));
satisfied == *is_feasible
})
.count();
correct as f32 / self.data_buffer.len() as f32
}
}
#[derive(Debug, Clone)]
pub struct AnomalyBasedConstraintDiscovery {
normal_samples: VecDeque<Array1<f32>>,
max_samples: usize,
anomaly_threshold: f32,
discovered_constraints: Vec<LinearConstraint>,
}
impl AnomalyBasedConstraintDiscovery {
pub fn new(max_samples: usize, anomaly_threshold: f32) -> Self {
Self {
normal_samples: VecDeque::new(),
max_samples,
anomaly_threshold,
discovered_constraints: Vec::new(),
}
}
pub fn add_normal_sample(&mut self, sample: Array1<f32>) {
self.normal_samples.push_back(sample);
if self.normal_samples.len() > self.max_samples {
self.normal_samples.pop_front();
}
}
pub fn detect_anomaly(&mut self, sample: &Array1<f32>) -> bool {
if self.normal_samples.len() < 2 {
return false; }
let dim = sample.len();
let n = self.normal_samples.len();
let mut is_anomalous = false;
for d in 0..dim {
let mean: f32 = self.normal_samples.iter().map(|s| s[d]).sum::<f32>() / n as f32;
let variance: f32 = self
.normal_samples
.iter()
.map(|s| (s[d] - mean).powi(2))
.sum::<f32>()
/ n as f32;
let std_dev = variance.sqrt();
let z_score = (sample[d] - mean).abs() / (std_dev + 1e-8);
if z_score > self.anomaly_threshold {
is_anomalous = true;
self.discover_bound_constraint(d, mean, std_dev);
}
}
is_anomalous
}
fn discover_bound_constraint(&mut self, dim: usize, mean: f32, std_dev: f32) {
let upper_bound = mean + self.anomaly_threshold * std_dev;
let mut coeffs = vec![0.0; dim + 1];
coeffs[dim] = 1.0;
let constraint = LinearConstraint::less_eq(coeffs, upper_bound);
let is_duplicate = self.discovered_constraints.iter().any(|c| {
c.coefficients().len() == constraint.coefficients().len()
&& c.coefficients()
.iter()
.zip(constraint.coefficients().iter())
.all(|(a, b)| (a - b).abs() < 0.1)
});
if !is_duplicate {
self.discovered_constraints.push(constraint);
}
}
pub fn discovered_constraints(&self) -> &[LinearConstraint] {
&self.discovered_constraints
}
pub fn num_discovered(&self) -> usize {
self.discovered_constraints.len()
}
}
#[derive(Debug, Clone)]
pub struct ActiveConstraintBoundaryLearner {
constraint: LinearConstraint,
boundary_samples: Vec<(Array1<f32>, Option<bool>)>, uncertainty_threshold: f32,
max_boundary_samples: usize,
}
impl ActiveConstraintBoundaryLearner {
pub fn new(
initial_constraint: LinearConstraint,
uncertainty_threshold: f32,
max_boundary_samples: usize,
) -> Self {
Self {
constraint: initial_constraint,
boundary_samples: Vec::new(),
uncertainty_threshold,
max_boundary_samples,
}
}
pub fn query_next(&self) -> Option<Array1<f32>> {
self.boundary_samples
.iter()
.filter(|(_, label)| label.is_none())
.min_by(|(s1, _), (s2, _)| {
let v1 = self
.constraint
.violation(s1.as_slice().unwrap_or(&[]))
.abs();
let v2 = self
.constraint
.violation(s2.as_slice().unwrap_or(&[]))
.abs();
v1.partial_cmp(&v2).unwrap_or(std::cmp::Ordering::Equal)
})
.map(|(s, _)| s.clone())
}
pub fn add_labeled_sample(&mut self, sample: Array1<f32>, is_feasible: bool) {
let violation = self
.constraint
.violation(sample.as_slice().unwrap_or(&[]))
.abs();
if violation < self.uncertainty_threshold {
self.boundary_samples.push((sample, Some(is_feasible)));
if self.boundary_samples.len() > self.max_boundary_samples {
self.boundary_samples.remove(0);
}
}
}
pub fn add_unlabeled_sample(&mut self, sample: Array1<f32>) {
let violation = self
.constraint
.violation(sample.as_slice().unwrap_or(&[]))
.abs();
if violation < self.uncertainty_threshold {
self.boundary_samples.push((sample, None));
if self.boundary_samples.len() > self.max_boundary_samples {
self.boundary_samples.remove(0);
}
}
}
pub fn refine(&mut self) -> LogicResult<()> {
let labeled: Vec<_> = self
.boundary_samples
.iter()
.filter_map(|(s, l)| l.map(|label| (s, label)))
.collect();
if labeled.len() < 2 {
return Ok(()); }
Ok(())
}
pub fn get_constraint(&self) -> &LinearConstraint {
&self.constraint
}
pub fn num_boundary_samples(&self) -> usize {
self.boundary_samples.len()
}
pub fn num_unlabeled(&self) -> usize {
self.boundary_samples
.iter()
.filter(|(_, l)| l.is_none())
.count()
}
}
#[derive(Debug, Clone)]
pub struct FeedbackConstraintTuner {
constraint: LinearConstraint,
feedback_history: Vec<(f32, f32)>, #[allow(dead_code)]
adaptation_rate: f32,
target_satisfaction: f32,
}
impl FeedbackConstraintTuner {
pub fn new(
initial_constraint: LinearConstraint,
adaptation_rate: f32,
target_satisfaction: f32,
) -> Self {
Self {
constraint: initial_constraint,
feedback_history: Vec::new(),
adaptation_rate,
target_satisfaction,
}
}
pub fn add_feedback(&mut self, sample: &Array1<f32>, satisfaction: f32) -> LogicResult<()> {
let violation = self.constraint.violation(sample.as_slice().unwrap_or(&[]));
self.feedback_history.push((violation, satisfaction));
self.tune()?;
Ok(())
}
fn tune(&mut self) -> LogicResult<()> {
if self.feedback_history.len() < 5 {
return Ok(()); }
let avg_satisfaction: f32 = self.feedback_history.iter().map(|(_, s)| s).sum::<f32>()
/ self.feedback_history.len() as f32;
let satisfaction_gap = self.target_satisfaction - avg_satisfaction;
if satisfaction_gap.abs() > 0.1 {
let _ = satisfaction_gap; }
Ok(())
}
pub fn get_constraint(&self) -> &LinearConstraint {
&self.constraint
}
pub fn average_satisfaction(&self) -> f32 {
if self.feedback_history.is_empty() {
return 0.0;
}
self.feedback_history.iter().map(|(_, s)| s).sum::<f32>()
/ self.feedback_history.len() as f32
}
pub fn num_feedback_samples(&self) -> usize {
self.feedback_history.len()
}
}
#[derive(Debug, Clone)]
pub struct OnlineLearningSystem {
incremental_learner: OnlineConstraintLearner,
anomaly_detector: AnomalyBasedConstraintDiscovery,
active_learner: ActiveConstraintBoundaryLearner,
feedback_tuner: FeedbackConstraintTuner,
use_incremental: bool,
use_anomaly: bool,
use_active: bool,
use_feedback: bool,
}
impl OnlineLearningSystem {
pub fn new(initial_constraint: LinearConstraint) -> Self {
Self {
incremental_learner: OnlineConstraintLearner::new(
initial_constraint.clone(),
0.01,
1000,
),
anomaly_detector: AnomalyBasedConstraintDiscovery::new(1000, 3.0),
active_learner: ActiveConstraintBoundaryLearner::new(
initial_constraint.clone(),
0.1,
100,
),
feedback_tuner: FeedbackConstraintTuner::new(initial_constraint, 0.01, 0.8),
use_incremental: true,
use_anomaly: true,
use_active: true,
use_feedback: true,
}
}
pub fn process_labeled_sample(
&mut self,
sample: Array1<f32>,
is_feasible: bool,
) -> LogicResult<()> {
if self.use_incremental {
self.incremental_learner
.observe(sample.clone(), is_feasible)?;
}
if self.use_active {
self.active_learner
.add_labeled_sample(sample.clone(), is_feasible);
}
if is_feasible && self.use_anomaly {
self.anomaly_detector.add_normal_sample(sample);
}
Ok(())
}
pub fn process_unlabeled_sample(&mut self, sample: Array1<f32>) {
if self.use_anomaly {
self.anomaly_detector.detect_anomaly(&sample);
}
if self.use_active {
self.active_learner.add_unlabeled_sample(sample);
}
}
pub fn add_feedback(&mut self, sample: &Array1<f32>, satisfaction: f32) -> LogicResult<()> {
if self.use_feedback {
self.feedback_tuner.add_feedback(sample, satisfaction)?;
}
Ok(())
}
pub fn get_best_constraint(&self) -> &LinearConstraint {
self.incremental_learner.get_constraint()
}
pub fn confidence(&self) -> f32 {
self.incremental_learner.confidence()
}
pub fn discovered_constraints(&self) -> &[LinearConstraint] {
self.anomaly_detector.discovered_constraints()
}
pub fn query_next(&self) -> Option<Array1<f32>> {
if self.use_active {
self.active_learner.query_next()
} else {
None
}
}
pub fn set_use_incremental(&mut self, use_it: bool) {
self.use_incremental = use_it;
}
pub fn set_use_anomaly(&mut self, use_it: bool) {
self.use_anomaly = use_it;
}
pub fn set_use_active(&mut self, use_it: bool) {
self.use_active = use_it;
}
pub fn set_use_feedback(&mut self, use_it: bool) {
self.use_feedback = use_it;
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_online_learner_basic() -> LogicResult<()> {
let constraint = LinearConstraint::less_eq(vec![1.0], 5.0);
let mut learner = OnlineConstraintLearner::new(constraint, 0.1, 100);
learner.observe(Array1::from_vec(vec![3.0]), true)?; learner.observe(Array1::from_vec(vec![7.0]), false)?;
assert_eq!(learner.update_count(), 2);
assert!(learner.confidence() > 0.0);
Ok(())
}
#[test]
fn test_anomaly_detection() {
let mut detector = AnomalyBasedConstraintDiscovery::new(100, 3.0);
for _ in 0..20 {
detector.add_normal_sample(Array1::from_vec(vec![5.0, 10.0]));
}
let is_anomaly = detector.detect_anomaly(&Array1::from_vec(vec![50.0, 100.0]));
assert!(is_anomaly);
}
#[test]
fn test_active_learning() {
let constraint = LinearConstraint::less_eq(vec![1.0], 5.0);
let mut learner = ActiveConstraintBoundaryLearner::new(constraint, 1.0, 100);
learner.add_unlabeled_sample(Array1::from_vec(vec![4.9])); learner.add_unlabeled_sample(Array1::from_vec(vec![10.0]));
assert_eq!(learner.num_unlabeled(), 1); }
#[test]
fn test_feedback_tuner() -> LogicResult<()> {
let constraint = LinearConstraint::less_eq(vec![1.0], 5.0);
let mut tuner = FeedbackConstraintTuner::new(constraint, 0.1, 0.8);
tuner.add_feedback(&Array1::from_vec(vec![3.0]), 0.9)?; tuner.add_feedback(&Array1::from_vec(vec![4.0]), 0.7)?;
assert_eq!(tuner.num_feedback_samples(), 2);
assert!(tuner.average_satisfaction() > 0.0);
Ok(())
}
#[test]
fn test_online_learning_system() -> LogicResult<()> {
let constraint = LinearConstraint::less_eq(vec![1.0], 5.0);
let mut system = OnlineLearningSystem::new(constraint);
system.process_labeled_sample(Array1::from_vec(vec![3.0]), true)?;
system.process_unlabeled_sample(Array1::from_vec(vec![4.5]));
system.add_feedback(&Array1::from_vec(vec![3.5]), 0.9)?;
assert!(system.confidence() > 0.0);
Ok(())
}
}