use crate::base::Node;
use std::collections::HashMap;
#[derive(Debug, Clone)]
pub struct Node2VecConfig {
pub dimensions: usize,
pub walk_length: usize,
pub num_walks: usize,
pub window_size: usize,
pub p: f64,
pub q: f64,
pub epochs: usize,
pub learning_rate: f64,
pub negative_samples: usize,
}
impl Default for Node2VecConfig {
fn default() -> Self {
Node2VecConfig {
dimensions: 128,
walk_length: 80,
num_walks: 10,
window_size: 10,
p: 1.0,
q: 1.0,
epochs: 1,
learning_rate: 0.025,
negative_samples: 5,
}
}
}
#[derive(Debug, Clone)]
pub struct DeepWalkConfig {
pub dimensions: usize,
pub walk_length: usize,
pub num_walks: usize,
pub window_size: usize,
pub epochs: usize,
pub learning_rate: f64,
pub negative_samples: usize,
}
impl Default for DeepWalkConfig {
fn default() -> Self {
DeepWalkConfig {
dimensions: 128,
walk_length: 40,
num_walks: 80,
window_size: 5,
epochs: 1,
learning_rate: 0.025,
negative_samples: 5,
}
}
}
#[derive(Debug, Clone)]
pub struct RandomWalk<N: Node> {
pub nodes: Vec<N>,
}
#[derive(Debug, Clone)]
pub struct OptimizationConfig {
pub lr_schedule: LearningRateSchedule,
pub initial_lr: f64,
pub final_lr: f64,
pub use_momentum: bool,
pub momentum: f64,
pub use_adam: bool,
pub adam_beta1: f64,
pub adam_beta2: f64,
pub adam_epsilon: f64,
pub l2_regularization: f64,
pub gradient_clip: Option<f64>,
pub use_hierarchical_softmax: bool,
}
impl Default for OptimizationConfig {
fn default() -> Self {
OptimizationConfig {
lr_schedule: LearningRateSchedule::Linear,
initial_lr: 0.025,
final_lr: 0.0001,
use_momentum: false,
momentum: 0.9,
use_adam: false,
adam_beta1: 0.9,
adam_beta2: 0.999,
adam_epsilon: 1e-8,
l2_regularization: 0.0,
gradient_clip: Some(1.0),
use_hierarchical_softmax: false,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum LearningRateSchedule {
Constant,
Linear,
Exponential,
Cosine,
Step,
}
#[derive(Debug, Clone)]
pub struct TrainingMetrics {
pub epoch: usize,
pub steps: usize,
pub learning_rate: f64,
pub loss: f64,
pub loss_avg: f64,
pub gradient_norm: f64,
pub steps_per_second: f64,
pub memory_usage: usize,
pub convergence_rate: f64,
pub positive_accuracy: f64,
pub negative_accuracy: f64,
}
impl Default for TrainingMetrics {
fn default() -> Self {
TrainingMetrics {
epoch: 0,
steps: 0,
learning_rate: 0.025,
loss: 0.0,
loss_avg: 0.0,
gradient_norm: 0.0,
steps_per_second: 0.0,
memory_usage: 0,
convergence_rate: 0.0,
positive_accuracy: 0.0,
negative_accuracy: 0.0,
}
}
}
#[derive(Debug, Clone)]
pub enum NegativeSamplingStrategy {
Uniform,
Frequency,
Degree,
Adaptive,
Hierarchical,
}
#[derive(Debug, Clone)]
pub struct OptimizerState {
pub momentum_buffers: HashMap<String, Vec<f64>>,
pub adam_m: HashMap<String, Vec<f64>>,
pub adam_v: HashMap<String, Vec<f64>>,
pub time_step: usize,
}
impl Default for OptimizerState {
fn default() -> Self {
Self::new()
}
}
impl OptimizerState {
pub fn new() -> Self {
OptimizerState {
momentum_buffers: HashMap::new(),
adam_m: HashMap::new(),
adam_v: HashMap::new(),
time_step: 0,
}
}
}
#[derive(Debug, Clone)]
pub struct ContextPair<N: Node> {
pub target: N,
pub context: N,
}