1use crate::base::Node;
4use std::collections::HashMap;
5
6#[derive(Debug, Clone)]
8pub struct Node2VecConfig {
9 pub dimensions: usize,
11 pub walk_length: usize,
13 pub num_walks: usize,
15 pub window_size: usize,
17 pub p: f64,
19 pub q: f64,
21 pub epochs: usize,
23 pub learning_rate: f64,
25 pub negative_samples: usize,
27}
28
29impl Default for Node2VecConfig {
30 fn default() -> Self {
31 Node2VecConfig {
32 dimensions: 128,
33 walk_length: 80,
34 num_walks: 10,
35 window_size: 10,
36 p: 1.0,
37 q: 1.0,
38 epochs: 1,
39 learning_rate: 0.025,
40 negative_samples: 5,
41 }
42 }
43}
44
45#[derive(Debug, Clone)]
47pub struct DeepWalkConfig {
48 pub dimensions: usize,
50 pub walk_length: usize,
52 pub num_walks: usize,
54 pub window_size: usize,
56 pub epochs: usize,
58 pub learning_rate: f64,
60 pub negative_samples: usize,
62}
63
64impl Default for DeepWalkConfig {
65 fn default() -> Self {
66 DeepWalkConfig {
67 dimensions: 128,
68 walk_length: 40,
69 num_walks: 80,
70 window_size: 5,
71 epochs: 1,
72 learning_rate: 0.025,
73 negative_samples: 5,
74 }
75 }
76}
77
78#[derive(Debug, Clone)]
80pub struct RandomWalk<N: Node> {
81 pub nodes: Vec<N>,
83}
84
85#[derive(Debug, Clone)]
87pub struct OptimizationConfig {
88 pub lr_schedule: LearningRateSchedule,
90 pub initial_lr: f64,
92 pub final_lr: f64,
94 pub use_momentum: bool,
96 pub momentum: f64,
98 pub use_adam: bool,
100 pub adam_beta1: f64,
102 pub adam_beta2: f64,
104 pub adam_epsilon: f64,
106 pub l2_regularization: f64,
108 pub gradient_clip: Option<f64>,
110 pub use_hierarchical_softmax: bool,
112}
113
114impl Default for OptimizationConfig {
115 fn default() -> Self {
116 OptimizationConfig {
117 lr_schedule: LearningRateSchedule::Linear,
118 initial_lr: 0.025,
119 final_lr: 0.0001,
120 use_momentum: false,
121 momentum: 0.9,
122 use_adam: false,
123 adam_beta1: 0.9,
124 adam_beta2: 0.999,
125 adam_epsilon: 1e-8,
126 l2_regularization: 0.0,
127 gradient_clip: Some(1.0),
128 use_hierarchical_softmax: false,
129 }
130 }
131}
132
133#[derive(Debug, Clone, Copy, PartialEq)]
135pub enum LearningRateSchedule {
136 Constant,
138 Linear,
140 Exponential,
142 Cosine,
144 Step,
146}
147
148#[derive(Debug, Clone)]
150pub struct TrainingMetrics {
151 pub epoch: usize,
153 pub steps: usize,
155 pub learning_rate: f64,
157 pub loss: f64,
159 pub loss_avg: f64,
161 pub gradient_norm: f64,
163 pub steps_per_second: f64,
165 pub memory_usage: usize,
167 pub convergence_rate: f64,
169 pub positive_accuracy: f64,
171 pub negative_accuracy: f64,
173}
174
175impl Default for TrainingMetrics {
176 fn default() -> Self {
177 TrainingMetrics {
178 epoch: 0,
179 steps: 0,
180 learning_rate: 0.025,
181 loss: 0.0,
182 loss_avg: 0.0,
183 gradient_norm: 0.0,
184 steps_per_second: 0.0,
185 memory_usage: 0,
186 convergence_rate: 0.0,
187 positive_accuracy: 0.0,
188 negative_accuracy: 0.0,
189 }
190 }
191}
192
193#[derive(Debug, Clone)]
195pub enum NegativeSamplingStrategy {
196 Uniform,
198 Frequency,
200 Degree,
202 Adaptive,
204 Hierarchical,
206}
207
208#[derive(Debug, Clone)]
210pub struct OptimizerState {
211 pub momentum_buffers: HashMap<String, Vec<f64>>,
213 pub adam_m: HashMap<String, Vec<f64>>,
215 pub adam_v: HashMap<String, Vec<f64>>,
217 pub time_step: usize,
219}
220
221impl Default for OptimizerState {
222 fn default() -> Self {
223 Self::new()
224 }
225}
226
227impl OptimizerState {
228 pub fn new() -> Self {
229 OptimizerState {
230 momentum_buffers: HashMap::new(),
231 adam_m: HashMap::new(),
232 adam_v: HashMap::new(),
233 time_step: 0,
234 }
235 }
236}
237
238#[derive(Debug, Clone)]
240pub struct ContextPair<N: Node> {
241 pub target: N,
243 pub context: N,
245}