use scirs2_core::ndarray::{Array1, Array2};
use scirs2_core::random::{Rng, RngExt};
use crate::error::{GraphError, Result};
#[derive(Debug, Clone)]
pub struct TemporalEvent {
pub source: usize,
pub target: usize,
pub timestamp: f64,
pub features: Option<Vec<f64>>,
}
impl TemporalEvent {
pub fn new(source: usize, target: usize, timestamp: f64) -> Self {
TemporalEvent {
source,
target,
timestamp,
features: None,
}
}
pub fn with_features(source: usize, target: usize, timestamp: f64, features: Vec<f64>) -> Self {
TemporalEvent {
source,
target,
timestamp,
features: Some(features),
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum TimeEncodingType {
Sinusoidal,
Time2Vec,
}
#[derive(Debug, Clone)]
pub struct TimeEncoding {
pub encoding_type: TimeEncodingType,
pub time_dim: usize,
pub omega: Array1<f64>,
pub phi: Array1<f64>,
pub linear_weight: f64,
pub linear_bias: f64,
}
impl TimeEncoding {
pub fn new(time_dim: usize, encoding_type: TimeEncodingType) -> Self {
let mut rng = scirs2_core::random::rng();
let omega = match &encoding_type {
TimeEncodingType::Sinusoidal => {
Array1::from_iter(
(0..time_dim)
.map(|i| 1.0 / 10000.0_f64.powf(2.0 * (i / 2) as f64 / time_dim as f64)),
)
}
TimeEncodingType::Time2Vec => {
Array1::from_iter((0..time_dim).map(|_| rng.random::<f64>() * 2.0))
}
};
let phi =
Array1::from_iter((0..time_dim).map(|_| rng.random::<f64>() * std::f64::consts::TAU));
TimeEncoding {
encoding_type,
time_dim,
omega,
phi,
linear_weight: rng.random::<f64>() * 0.1,
linear_bias: 0.0,
}
}
pub fn encode(&self, t: f64) -> Array1<f64> {
let mut encoding = Array1::zeros(self.time_dim);
match self.encoding_type {
TimeEncodingType::Sinusoidal => {
for i in 0..self.time_dim {
let angle = t * self.omega[i];
if i % 2 == 0 {
encoding[i] = angle.sin();
} else {
encoding[i] = angle.cos();
}
}
}
TimeEncodingType::Time2Vec => {
if self.time_dim > 0 {
encoding[0] = self.linear_weight * t + self.linear_bias;
}
for i in 1..self.time_dim {
encoding[i] = (self.omega[i] * t + self.phi[i]).sin();
}
}
}
encoding
}
pub fn encode_batch(&self, timestamps: &[f64]) -> Array2<f64> {
let n = timestamps.len();
let mut result = Array2::zeros((n, self.time_dim));
for (i, &t) in timestamps.iter().enumerate() {
let enc = self.encode(t);
for j in 0..self.time_dim {
result[[i, j]] = enc[j];
}
}
result
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum MemoryUpdateMethod {
Gru,
Mlp,
}
#[derive(Debug, Clone)]
pub struct MemoryModule {
pub memory: Array2<f64>,
pub last_update: Vec<f64>,
pub memory_dim: usize,
pub time_dim: usize,
pub n_nodes: usize,
pub update_method: MemoryUpdateMethod,
pub time_encoding: TimeEncoding,
pub message_dim: usize,
gru_wz: Array2<f64>,
gru_uz: Array2<f64>,
gru_wr: Array2<f64>,
gru_ur: Array2<f64>,
gru_wh: Array2<f64>,
gru_uh: Array2<f64>,
gru_bz: Array1<f64>,
gru_br: Array1<f64>,
gru_bh: Array1<f64>,
mlp_w: Array2<f64>,
mlp_b: Array1<f64>,
}
impl MemoryModule {
pub fn new(
n_nodes: usize,
memory_dim: usize,
time_dim: usize,
update_method: MemoryUpdateMethod,
) -> Self {
let mut rng = scirs2_core::random::rng();
let time_encoding = TimeEncoding::new(time_dim, TimeEncodingType::Time2Vec);
let message_dim = memory_dim + memory_dim + time_dim;
let scale_gru = (6.0_f64 / (message_dim + memory_dim) as f64).sqrt();
let scale_u = (6.0_f64 / (2 * memory_dim) as f64).sqrt();
let scale_mlp = (6.0_f64 / (message_dim + 2 * memory_dim) as f64).sqrt();
let mut init = |r: usize, c: usize, s: f64| -> Array2<f64> {
Array2::from_shape_fn((r, c), |_| (rng.random::<f64>() * 2.0 - 1.0) * s)
};
MemoryModule {
memory: Array2::zeros((n_nodes, memory_dim)),
last_update: vec![0.0; n_nodes],
memory_dim,
time_dim,
n_nodes,
update_method,
time_encoding,
message_dim,
gru_wz: init(message_dim, memory_dim, scale_gru),
gru_uz: init(memory_dim, memory_dim, scale_u),
gru_wr: init(message_dim, memory_dim, scale_gru),
gru_ur: init(memory_dim, memory_dim, scale_u),
gru_wh: init(message_dim, memory_dim, scale_gru),
gru_uh: init(memory_dim, memory_dim, scale_u),
gru_bz: Array1::zeros(memory_dim),
gru_br: Array1::zeros(memory_dim),
gru_bh: Array1::zeros(memory_dim),
mlp_w: init(message_dim + memory_dim, memory_dim, scale_mlp),
mlp_b: Array1::zeros(memory_dim),
}
}
fn compute_message(&self, event: &TemporalEvent) -> Vec<f64> {
let src = event.source;
let tgt = event.target;
let delta_t = event.timestamp - self.last_update[src].max(self.last_update[tgt]);
let time_enc = self.time_encoding.encode(delta_t);
let mut msg = Vec::with_capacity(self.message_dim);
for j in 0..self.memory_dim {
msg.push(if src < self.n_nodes {
self.memory[[src, j]]
} else {
0.0
});
}
for j in 0..self.memory_dim {
msg.push(if tgt < self.n_nodes {
self.memory[[tgt, j]]
} else {
0.0
});
}
for j in 0..self.time_dim {
msg.push(time_enc[j]);
}
msg
}
fn gru_update(&self, memory: &[f64], message: &[f64]) -> Vec<f64> {
let d = self.memory_dim;
let m = self.message_dim;
let mut z = vec![0.0f64; d];
for j in 0..d {
let mut s = self.gru_bz[j];
for k in 0..m {
s += message[k] * self.gru_wz[[k, j]];
}
for k in 0..d {
s += memory[k] * self.gru_uz[[k, j]];
}
z[j] = sigmoid(s);
}
let mut r = vec![0.0f64; d];
for j in 0..d {
let mut s = self.gru_br[j];
for k in 0..m {
s += message[k] * self.gru_wr[[k, j]];
}
for k in 0..d {
s += memory[k] * self.gru_ur[[k, j]];
}
r[j] = sigmoid(s);
}
let mut h_tilde = vec![0.0f64; d];
for j in 0..d {
let mut s = self.gru_bh[j];
for k in 0..m {
s += message[k] * self.gru_wh[[k, j]];
}
for k in 0..d {
s += (r[k] * memory[k]) * self.gru_uh[[k, j]];
}
h_tilde[j] = s.tanh();
}
let mut h_new = vec![0.0f64; d];
for j in 0..d {
h_new[j] = (1.0 - z[j]) * memory[j] + z[j] * h_tilde[j];
}
h_new
}
fn mlp_update(&self, memory: &[f64], message: &[f64]) -> Vec<f64> {
let d = self.memory_dim;
let total_in = self.message_dim + d;
let mut input = Vec::with_capacity(total_in);
input.extend_from_slice(message);
input.extend_from_slice(memory);
let mut out = vec![0.0f64; d];
for j in 0..d {
let mut s = self.mlp_b[j];
for k in 0..total_in {
s += input[k] * self.mlp_w[[k, j]];
}
out[j] = s.tanh();
}
out
}
pub fn process_event(&mut self, event: &TemporalEvent) -> Result<()> {
if event.source >= self.n_nodes || event.target >= self.n_nodes {
return Err(GraphError::InvalidParameter {
param: "event".to_string(),
value: format!("source={}, target={}", event.source, event.target),
expected: format!("indices < {}", self.n_nodes),
context: "MemoryModule::process_event".to_string(),
});
}
let message = self.compute_message(event);
let src_memory: Vec<f64> = (0..self.memory_dim)
.map(|j| self.memory[[event.source, j]])
.collect();
let new_src = match self.update_method {
MemoryUpdateMethod::Gru => self.gru_update(&src_memory, &message),
MemoryUpdateMethod::Mlp => self.mlp_update(&src_memory, &message),
};
let tgt_memory: Vec<f64> = (0..self.memory_dim)
.map(|j| self.memory[[event.target, j]])
.collect();
let new_tgt = match self.update_method {
MemoryUpdateMethod::Gru => self.gru_update(&tgt_memory, &message),
MemoryUpdateMethod::Mlp => self.mlp_update(&tgt_memory, &message),
};
for j in 0..self.memory_dim {
self.memory[[event.source, j]] = new_src[j];
self.memory[[event.target, j]] = new_tgt[j];
}
self.last_update[event.source] = event.timestamp;
self.last_update[event.target] = event.timestamp;
Ok(())
}
pub fn process_events(&mut self, events: &[TemporalEvent]) -> Result<()> {
for event in events {
self.process_event(event)?;
}
Ok(())
}
pub fn get_memory(&self) -> &Array2<f64> {
&self.memory
}
pub fn reset(&mut self) {
self.memory.fill(0.0);
self.last_update.fill(0.0);
}
}
#[inline]
fn sigmoid(x: f64) -> f64 {
1.0 / (1.0 + (-x).exp())
}
#[derive(Debug, Clone)]
pub struct TemporalAttention {
pub w_q: Array2<f64>,
pub w_k: Array2<f64>,
pub w_v: Array2<f64>,
pub num_heads: usize,
pub hidden_dim: usize,
pub head_dim: usize,
pub time_encoding: TimeEncoding,
pub memory_dim: usize,
pub time_dim: usize,
}
impl TemporalAttention {
pub fn new(memory_dim: usize, time_dim: usize, num_heads: usize) -> Result<Self> {
let hidden_dim = memory_dim;
if !hidden_dim.is_multiple_of(num_heads) {
return Err(GraphError::InvalidParameter {
param: "memory_dim".to_string(),
value: format!("{memory_dim}"),
expected: format!("divisible by num_heads={num_heads}"),
context: "TemporalAttention::new".to_string(),
});
}
let head_dim = hidden_dim / num_heads;
let mut rng = scirs2_core::random::rng();
let scale_q = (6.0_f64 / (memory_dim + hidden_dim) as f64).sqrt();
let scale_kv = (6.0_f64 / (memory_dim + time_dim + hidden_dim) as f64).sqrt();
let w_q = Array2::from_shape_fn((memory_dim, hidden_dim), |_| {
(rng.random::<f64>() * 2.0 - 1.0) * scale_q
});
let w_k = Array2::from_shape_fn((memory_dim + time_dim, hidden_dim), |_| {
(rng.random::<f64>() * 2.0 - 1.0) * scale_kv
});
let w_v = Array2::from_shape_fn((memory_dim + time_dim, hidden_dim), |_| {
(rng.random::<f64>() * 2.0 - 1.0) * scale_kv
});
let time_encoding = TimeEncoding::new(time_dim, TimeEncodingType::Sinusoidal);
Ok(TemporalAttention {
w_q,
w_k,
w_v,
num_heads,
hidden_dim,
head_dim,
time_encoding,
memory_dim,
time_dim,
})
}
pub fn forward(
&self,
query_memory: &Array1<f64>,
neighbor_memories: &Array2<f64>,
time_deltas: &[f64],
) -> Result<Array1<f64>> {
let num_neighbors = neighbor_memories.dim().0;
if num_neighbors == 0 {
return Ok(Array1::zeros(self.hidden_dim));
}
if time_deltas.len() != num_neighbors {
return Err(GraphError::InvalidParameter {
param: "time_deltas".to_string(),
value: format!("len={}", time_deltas.len()),
expected: format!("len={num_neighbors}"),
context: "TemporalAttention::forward".to_string(),
});
}
let h = self.num_heads;
let dk = self.head_dim;
let scale = 1.0 / (dk as f64).sqrt();
let mut q = vec![0.0f64; self.hidden_dim];
for j in 0..self.hidden_dim {
for m in 0..self.memory_dim {
q[j] += query_memory[m] * self.w_q[[m, j]];
}
}
let kv_in_dim = self.memory_dim + self.time_dim;
let mut keys = vec![vec![0.0f64; self.hidden_dim]; num_neighbors];
let mut values = vec![vec![0.0f64; self.hidden_dim]; num_neighbors];
for nb in 0..num_neighbors {
let time_enc = self.time_encoding.encode(time_deltas[nb]);
let mut kv_input = Vec::with_capacity(kv_in_dim);
for m in 0..self.memory_dim {
kv_input.push(neighbor_memories[[nb, m]]);
}
for m in 0..self.time_dim {
kv_input.push(time_enc[m]);
}
for j in 0..self.hidden_dim {
let mut sk = 0.0;
let mut sv = 0.0;
for m in 0..kv_in_dim {
sk += kv_input[m] * self.w_k[[m, j]];
sv += kv_input[m] * self.w_v[[m, j]];
}
keys[nb][j] = sk;
values[nb][j] = sv;
}
}
let mut output = vec![0.0f64; self.hidden_dim];
for head in 0..h {
let offset = head * dk;
let mut scores = vec![0.0f64; num_neighbors];
for nb in 0..num_neighbors {
let mut dot = 0.0;
for m in 0..dk {
dot += q[offset + m] * keys[nb][offset + m];
}
scores[nb] = dot * scale;
}
let alphas = softmax_slice(&scores);
for nb in 0..num_neighbors {
for m in 0..dk {
output[offset + m] += alphas[nb] * values[nb][offset + m];
}
}
}
Ok(Array1::from_vec(output))
}
}
fn softmax_slice(xs: &[f64]) -> Vec<f64> {
if xs.is_empty() {
return Vec::new();
}
let max_val = xs.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
let exps: Vec<f64> = xs.iter().map(|x| (x - max_val).exp()).collect();
let sum = exps.iter().sum::<f64>().max(1e-12);
exps.iter().map(|e| e / sum).collect()
}
#[derive(Debug, Clone)]
pub struct TemporalGnnConfig {
pub n_nodes: usize,
pub memory_dim: usize,
pub time_dim: usize,
pub num_heads: usize,
pub update_method: MemoryUpdateMethod,
pub time_encoding_type: TimeEncodingType,
}
impl Default for TemporalGnnConfig {
fn default() -> Self {
TemporalGnnConfig {
n_nodes: 100,
memory_dim: 64,
time_dim: 16,
num_heads: 4,
update_method: MemoryUpdateMethod::Gru,
time_encoding_type: TimeEncodingType::Time2Vec,
}
}
}
#[derive(Debug, Clone)]
pub struct TemporalGnnModel {
pub memory_module: MemoryModule,
pub temporal_attention: TemporalAttention,
pub config: TemporalGnnConfig,
event_history: Vec<TemporalEvent>,
}
impl TemporalGnnModel {
pub fn new(config: TemporalGnnConfig) -> Result<Self> {
let memory_module = MemoryModule::new(
config.n_nodes,
config.memory_dim,
config.time_dim,
config.update_method.clone(),
);
let temporal_attention =
TemporalAttention::new(config.memory_dim, config.time_dim, config.num_heads)?;
Ok(TemporalGnnModel {
memory_module,
temporal_attention,
config,
event_history: Vec::new(),
})
}
pub fn process_events(&mut self, events: &[TemporalEvent]) -> Result<()> {
self.memory_module.process_events(events)?;
self.event_history.extend(events.iter().cloned());
Ok(())
}
pub fn get_node_embedding(
&self,
node: usize,
current_time: f64,
max_neighbors: usize,
) -> Result<Array1<f64>> {
if node >= self.config.n_nodes {
return Err(GraphError::InvalidParameter {
param: "node".to_string(),
value: format!("{node}"),
expected: format!("< {}", self.config.n_nodes),
context: "TemporalGnnModel::get_node_embedding".to_string(),
});
}
let mut neighbor_events: Vec<(usize, f64)> = Vec::new();
for event in self.event_history.iter().rev() {
if neighbor_events.len() >= max_neighbors {
break;
}
if event.source == node {
neighbor_events.push((event.target, event.timestamp));
} else if event.target == node {
neighbor_events.push((event.source, event.timestamp));
}
}
if neighbor_events.is_empty() {
return Ok(Array1::from_iter(
(0..self.config.memory_dim).map(|j| self.memory_module.memory[[node, j]]),
));
}
let num_nb = neighbor_events.len();
let mut nb_memories = Array2::zeros((num_nb, self.config.memory_dim));
let mut time_deltas = vec![0.0f64; num_nb];
for (idx, &(nb_node, nb_time)) in neighbor_events.iter().enumerate() {
for j in 0..self.config.memory_dim {
nb_memories[[idx, j]] = self.memory_module.memory[[nb_node, j]];
}
time_deltas[idx] = current_time - nb_time;
}
let query = Array1::from_iter(
(0..self.config.memory_dim).map(|j| self.memory_module.memory[[node, j]]),
);
self.temporal_attention
.forward(&query, &nb_memories, &time_deltas)
}
pub fn reset(&mut self) {
self.memory_module.reset();
self.event_history.clear();
}
pub fn get_memory(&self) -> &Array2<f64> {
self.memory_module.get_memory()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_time_encoding_sinusoidal_varies_with_time() {
let te = TimeEncoding::new(8, TimeEncodingType::Sinusoidal);
let enc1 = te.encode(0.0);
let enc2 = te.encode(1.0);
let enc3 = te.encode(10.0);
assert_eq!(enc1.len(), 8);
assert_eq!(enc2.len(), 8);
let diff_12: f64 = enc1
.iter()
.zip(enc2.iter())
.map(|(a, b)| (a - b).abs())
.sum();
let diff_13: f64 = enc1
.iter()
.zip(enc3.iter())
.map(|(a, b)| (a - b).abs())
.sum();
assert!(diff_12 > 1e-6, "encodings at t=0 and t=1 should differ");
assert!(diff_13 > 1e-6, "encodings at t=0 and t=10 should differ");
}
#[test]
fn test_time_encoding_time2vec() {
let te = TimeEncoding::new(6, TimeEncodingType::Time2Vec);
let enc = te.encode(5.0);
assert_eq!(enc.len(), 6);
for &v in enc.iter() {
assert!(v.is_finite(), "Time2Vec encoding should be finite");
}
let enc0 = te.encode(0.0);
let enc10 = te.encode(10.0);
let expected_diff = te.linear_weight * 10.0;
let actual_diff = enc10[0] - enc0[0];
assert!(
(actual_diff - expected_diff).abs() < 1e-10,
"first component should be linear"
);
}
#[test]
fn test_memory_update_changes_state() {
let mut mem = MemoryModule::new(5, 8, 4, MemoryUpdateMethod::Gru);
let initial_norm: f64 = mem.memory.iter().map(|x| x * x).sum();
assert!(initial_norm < 1e-12, "initial memory should be zero");
let event = TemporalEvent::new(0, 1, 1.0);
mem.process_event(&event).expect("process event");
let node0_norm: f64 = (0..8).map(|j| mem.memory[[0, j]].powi(2)).sum();
let node1_norm: f64 = (0..8).map(|j| mem.memory[[1, j]].powi(2)).sum();
assert!(
node0_norm > 1e-12,
"node 0 memory should be updated after event"
);
assert!(
node1_norm > 1e-12,
"node 1 memory should be updated after event"
);
let node2_norm: f64 = (0..8).map(|j| mem.memory[[2, j]].powi(2)).sum();
assert!(node2_norm < 1e-12, "node 2 memory should remain zero");
}
#[test]
fn test_memory_update_mlp() {
let mut mem = MemoryModule::new(4, 6, 3, MemoryUpdateMethod::Mlp);
let event = TemporalEvent::new(0, 1, 0.5);
mem.process_event(&event).expect("process event MLP");
let node0_norm: f64 = (0..6).map(|j| mem.memory[[0, j]].powi(2)).sum();
assert!(node0_norm > 1e-12, "MLP update should modify memory");
}
#[test]
fn test_temporal_attention_shape() {
let ta = TemporalAttention::new(8, 4, 2).expect("temporal attention");
let query = Array1::from_vec(vec![0.1; 8]);
let neighbors = Array2::from_shape_fn((3, 8), |(i, j)| (i + j) as f64 * 0.05);
let deltas = vec![1.0, 2.0, 3.0];
let out = ta.forward(&query, &neighbors, &deltas).expect("forward");
assert_eq!(out.len(), 8);
for &v in out.iter() {
assert!(v.is_finite(), "temporal attention output should be finite");
}
}
#[test]
fn test_temporal_attention_empty_neighbors() {
let ta = TemporalAttention::new(8, 4, 2).expect("temporal attention");
let query = Array1::from_vec(vec![0.1; 8]);
let neighbors = Array2::zeros((0, 8));
let deltas: Vec<f64> = Vec::new();
let out = ta
.forward(&query, &neighbors, &deltas)
.expect("empty forward");
assert_eq!(out.len(), 8);
let norm: f64 = out.iter().map(|x| x * x).sum();
assert!(norm < 1e-12, "empty neighbor attention should return zeros");
}
#[test]
fn test_temporal_gnn_model_full_pipeline() {
let config = TemporalGnnConfig {
n_nodes: 5,
memory_dim: 8,
time_dim: 4,
num_heads: 2,
update_method: MemoryUpdateMethod::Gru,
time_encoding_type: TimeEncodingType::Time2Vec,
};
let mut model = TemporalGnnModel::new(config).expect("model");
let events = vec![
TemporalEvent::new(0, 1, 1.0),
TemporalEvent::new(1, 2, 2.0),
TemporalEvent::new(0, 2, 3.0),
TemporalEvent::new(2, 3, 4.0),
];
model.process_events(&events).expect("process events");
let emb0 = model.get_node_embedding(0, 5.0, 3).expect("embedding 0");
let emb4 = model.get_node_embedding(4, 5.0, 3).expect("embedding 4");
assert_eq!(emb0.len(), 8);
assert_eq!(emb4.len(), 8);
let diff: f64 = emb0
.iter()
.zip(emb4.iter())
.map(|(a, b)| (a - b).abs())
.sum();
assert!(
emb0.iter().any(|&v| v.abs() > 1e-12),
"active node should have non-zero embedding"
);
}
#[test]
fn test_memory_module_event_out_of_bounds() {
let mut mem = MemoryModule::new(3, 4, 2, MemoryUpdateMethod::Gru);
let event = TemporalEvent::new(0, 5, 1.0); let result = mem.process_event(&event);
assert!(result.is_err());
}
#[test]
fn test_temporal_gnn_reset() {
let config = TemporalGnnConfig {
n_nodes: 3,
memory_dim: 4,
time_dim: 2,
num_heads: 2,
..Default::default()
};
let mut model = TemporalGnnModel::new(config).expect("model");
let event = TemporalEvent::new(0, 1, 1.0);
model.process_events(&[event]).expect("process");
model.reset();
let mem_norm: f64 = model.get_memory().iter().map(|x| x * x).sum();
assert!(mem_norm < 1e-12, "memory should be zero after reset");
}
#[test]
fn test_time_encoding_batch() {
let te = TimeEncoding::new(4, TimeEncodingType::Sinusoidal);
let timestamps = vec![0.0, 1.0, 5.0, 10.0];
let batch = te.encode_batch(×tamps);
assert_eq!(batch.dim(), (4, 4));
for (i, &t) in timestamps.iter().enumerate() {
let single = te.encode(t);
for j in 0..4 {
assert!(
(batch[[i, j]] - single[j]).abs() < 1e-12,
"batch encoding should match single encoding"
);
}
}
}
#[test]
fn test_memory_timestamps_updated() {
let mut mem = MemoryModule::new(3, 4, 2, MemoryUpdateMethod::Gru);
assert!(mem.last_update[0] < 1e-12);
assert!(mem.last_update[1] < 1e-12);
let event = TemporalEvent::new(0, 1, 5.0);
mem.process_event(&event).expect("process");
assert!(
(mem.last_update[0] - 5.0).abs() < 1e-12,
"source timestamp should be updated"
);
assert!(
(mem.last_update[1] - 5.0).abs() < 1e-12,
"target timestamp should be updated"
);
assert!(
mem.last_update[2] < 1e-12,
"uninvolved node timestamp should remain 0"
);
}
}