use serde::{Deserialize, Serialize};
use crate::reranker;
pub const STORE_DEDUP_COSINE_THRESHOLD: f64 = 0.85;
pub const STORE_DEDUP_MAX_CANDIDATES: usize = 50;
pub const CONSOLIDATION_COSINE_THRESHOLD: f64 = 0.88;
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct DupMatch {
pub fact_id: String,
pub similarity: f64,
}
#[derive(Debug, Clone, Deserialize)]
pub struct ConsolidationCandidate {
pub id: String,
pub text: String,
pub embedding: Vec<f32>,
pub importance: f64,
pub decay_score: f64,
pub created_at: i64,
pub version: Option<u32>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct ConsolidationCluster {
pub representative: String,
pub duplicates: Vec<String>,
}
pub fn find_best_near_duplicate(
new_embedding: &[f32],
existing: &[(String, Vec<f32>)],
threshold: f64,
) -> Option<DupMatch> {
let mut best: Option<DupMatch> = None;
for (fact_id, embedding) in existing {
let similarity = reranker::cosine_similarity_f32(new_embedding, embedding);
if similarity >= threshold {
if best.as_ref().map_or(true, |b| similarity > b.similarity) {
best = Some(DupMatch {
fact_id: fact_id.clone(),
similarity,
});
}
}
}
best
}
pub fn cluster_facts(
candidates: &[ConsolidationCandidate],
threshold: f64,
) -> Vec<ConsolidationCluster> {
if candidates.is_empty() {
return vec![];
}
let mut clusters: Vec<Vec<usize>> = Vec::new();
let mut rep_embeddings: Vec<usize> = Vec::new();
for (i, candidate) in candidates.iter().enumerate() {
let mut found_cluster = None;
for (ci, &rep_idx) in rep_embeddings.iter().enumerate() {
let sim = reranker::cosine_similarity_f32(
&candidate.embedding,
&candidates[rep_idx].embedding,
);
if sim >= threshold {
found_cluster = Some(ci);
break;
}
}
match found_cluster {
Some(ci) => clusters[ci].push(i),
None => {
clusters.push(vec![i]);
rep_embeddings.push(i);
}
}
}
clusters
.into_iter()
.filter(|c| !c.is_empty())
.map(|member_indices| {
let cluster_candidates: Vec<&ConsolidationCandidate> =
member_indices.iter().map(|&i| &candidates[i]).collect();
let rep_id = pick_representative_from_refs(&cluster_candidates)
.unwrap_or_else(|| cluster_candidates[0].id.clone());
let duplicates: Vec<String> = cluster_candidates
.iter()
.filter(|c| c.id != rep_id)
.map(|c| c.id.clone())
.collect();
ConsolidationCluster {
representative: rep_id,
duplicates,
}
})
.collect()
}
pub fn pick_representative(candidates: &[ConsolidationCandidate]) -> Option<String> {
let refs: Vec<&ConsolidationCandidate> = candidates.iter().collect();
pick_representative_from_refs(&refs)
}
fn pick_representative_from_refs(candidates: &[&ConsolidationCandidate]) -> Option<String> {
if candidates.is_empty() {
return None;
}
let mut best = candidates[0];
for &c in &candidates[1..] {
if c.decay_score > best.decay_score
|| (c.decay_score == best.decay_score && c.created_at > best.created_at)
|| (c.decay_score == best.decay_score
&& c.created_at == best.created_at
&& c.text.len() > best.text.len())
{
best = c;
}
}
Some(best.id.clone())
}
#[deprecated(since = "1.5.0", note = "Use find_best_near_duplicate instead, which returns the highest-similarity match")]
pub fn find_near_duplicate(
new_embedding: &[f32],
existing: &[(String, Vec<f32>)],
threshold: f64,
) -> Option<String> {
for (fact_id, embedding) in existing {
let similarity = reranker::cosine_similarity_f32(new_embedding, embedding);
if similarity >= threshold {
return Some(fact_id.clone());
}
}
None
}
pub fn should_supersede(new_importance: f64, existing_importance: f64) -> bool {
new_importance >= existing_importance
}
#[derive(Deserialize)]
#[allow(dead_code)] struct ExistingFactEntry {
id: String,
embedding: Vec<f32>,
}
#[cfg(feature = "wasm")]
#[wasm_bindgen::prelude::wasm_bindgen(js_name = "findNearDuplicate")]
#[allow(deprecated)]
pub fn wasm_find_near_duplicate(
new_embedding: &[f32],
existing_json: &str,
threshold: f64,
) -> Result<wasm_bindgen::JsValue, wasm_bindgen::JsError> {
let entries: Vec<ExistingFactEntry> = serde_json::from_str(existing_json)
.map_err(|e| wasm_bindgen::JsError::new(&format!("Invalid existing facts JSON: {}", e)))?;
let existing: Vec<(String, Vec<f32>)> = entries
.into_iter()
.map(|e| (e.id, e.embedding))
.collect();
match find_near_duplicate(new_embedding, &existing, threshold) {
Some(id) => Ok(wasm_bindgen::JsValue::from_str(&id)),
None => Ok(wasm_bindgen::JsValue::NULL),
}
}
#[cfg(feature = "wasm")]
#[wasm_bindgen::prelude::wasm_bindgen(js_name = "findBestNearDuplicate")]
pub fn wasm_find_best_near_duplicate(
new_embedding_json: &str,
existing_json: &str,
threshold: f64,
) -> Result<wasm_bindgen::JsValue, wasm_bindgen::JsError> {
let new_embedding: Vec<f32> = serde_json::from_str(new_embedding_json)
.map_err(|e| wasm_bindgen::JsError::new(&format!("Invalid new_embedding JSON: {}", e)))?;
let entries: Vec<ExistingFactEntry> = serde_json::from_str(existing_json)
.map_err(|e| wasm_bindgen::JsError::new(&format!("Invalid existing facts JSON: {}", e)))?;
let existing: Vec<(String, Vec<f32>)> = entries
.into_iter()
.map(|e| (e.id, e.embedding))
.collect();
match find_best_near_duplicate(&new_embedding, &existing, threshold) {
Some(dup) => {
let json = serde_json::to_string(&dup)
.map_err(|e| wasm_bindgen::JsError::new(&format!("Serialization error: {}", e)))?;
Ok(wasm_bindgen::JsValue::from_str(&json))
}
None => Ok(wasm_bindgen::JsValue::NULL),
}
}
#[cfg(feature = "wasm")]
#[wasm_bindgen::prelude::wasm_bindgen(js_name = "clusterFacts")]
pub fn wasm_cluster_facts(
candidates_json: &str,
threshold: f64,
) -> Result<wasm_bindgen::JsValue, wasm_bindgen::JsError> {
let candidates: Vec<ConsolidationCandidate> = serde_json::from_str(candidates_json)
.map_err(|e| wasm_bindgen::JsError::new(&format!("Invalid candidates JSON: {}", e)))?;
let clusters = cluster_facts(&candidates, threshold);
let json = serde_json::to_string(&clusters)
.map_err(|e| wasm_bindgen::JsError::new(&format!("Serialization error: {}", e)))?;
Ok(wasm_bindgen::JsValue::from_str(&json))
}
#[cfg(feature = "wasm")]
#[wasm_bindgen::prelude::wasm_bindgen(js_name = "getStoreDedupCosineThreshold")]
pub fn wasm_store_dedup_cosine_threshold() -> f64 {
STORE_DEDUP_COSINE_THRESHOLD
}
#[cfg(feature = "wasm")]
#[wasm_bindgen::prelude::wasm_bindgen(js_name = "getStoreDedupMaxCandidates")]
pub fn wasm_store_dedup_max_candidates() -> usize {
STORE_DEDUP_MAX_CANDIDATES
}
#[cfg(feature = "wasm")]
#[wasm_bindgen::prelude::wasm_bindgen(js_name = "getConsolidationCosineThreshold")]
pub fn wasm_consolidation_cosine_threshold() -> f64 {
CONSOLIDATION_COSINE_THRESHOLD
}
#[cfg(feature = "wasm")]
#[wasm_bindgen::prelude::wasm_bindgen(js_name = "shouldSupersede")]
pub fn wasm_should_supersede(new_importance: f64, existing_importance: f64) -> bool {
should_supersede(new_importance, existing_importance)
}
#[cfg(feature = "python")]
#[pyo3::prelude::pyfunction]
#[pyo3(name = "find_near_duplicate")]
#[allow(deprecated)]
fn py_find_near_duplicate(
new_embedding: Vec<f32>,
existing_json: &str,
threshold: f64,
) -> pyo3::PyResult<Option<String>> {
let entries: Vec<ExistingFactEntry> = serde_json::from_str(existing_json)
.map_err(|e| pyo3::exceptions::PyValueError::new_err(format!("Invalid JSON: {}", e)))?;
let existing: Vec<(String, Vec<f32>)> = entries
.into_iter()
.map(|e| (e.id, e.embedding))
.collect();
Ok(find_near_duplicate(&new_embedding, &existing, threshold))
}
#[cfg(feature = "python")]
#[pyo3::prelude::pyfunction]
#[pyo3(name = "find_best_near_duplicate")]
fn py_find_best_near_duplicate(
new_embedding_json: &str,
existing_json: &str,
threshold: f64,
) -> pyo3::PyResult<Option<String>> {
let new_embedding: Vec<f32> = serde_json::from_str(new_embedding_json)
.map_err(|e| pyo3::exceptions::PyValueError::new_err(format!("Invalid new_embedding JSON: {}", e)))?;
let entries: Vec<ExistingFactEntry> = serde_json::from_str(existing_json)
.map_err(|e| pyo3::exceptions::PyValueError::new_err(format!("Invalid existing JSON: {}", e)))?;
let existing: Vec<(String, Vec<f32>)> = entries
.into_iter()
.map(|e| (e.id, e.embedding))
.collect();
match find_best_near_duplicate(&new_embedding, &existing, threshold) {
Some(dup) => {
let json = serde_json::to_string(&dup)
.map_err(|e| pyo3::exceptions::PyValueError::new_err(format!("Serialization error: {}", e)))?;
Ok(Some(json))
}
None => Ok(None),
}
}
#[cfg(feature = "python")]
#[pyo3::prelude::pyfunction]
#[pyo3(name = "cluster_facts")]
fn py_cluster_facts(
candidates_json: &str,
threshold: f64,
) -> pyo3::PyResult<String> {
let candidates: Vec<ConsolidationCandidate> = serde_json::from_str(candidates_json)
.map_err(|e| pyo3::exceptions::PyValueError::new_err(format!("Invalid candidates JSON: {}", e)))?;
let clusters = cluster_facts(&candidates, threshold);
serde_json::to_string(&clusters)
.map_err(|e| pyo3::exceptions::PyValueError::new_err(format!("Serialization error: {}", e)))
}
#[cfg(feature = "python")]
#[pyo3::prelude::pyfunction]
#[pyo3(name = "should_supersede")]
fn py_should_supersede(new_importance: f64, existing_importance: f64) -> bool {
should_supersede(new_importance, existing_importance)
}
#[cfg(feature = "python")]
pub fn register_python_functions(m: &pyo3::prelude::Bound<'_, pyo3::prelude::PyModule>) -> pyo3::PyResult<()> {
use pyo3::prelude::*;
m.add_function(pyo3::wrap_pyfunction!(py_find_near_duplicate, m)?)?;
m.add_function(pyo3::wrap_pyfunction!(py_find_best_near_duplicate, m)?)?;
m.add_function(pyo3::wrap_pyfunction!(py_cluster_facts, m)?)?;
m.add_function(pyo3::wrap_pyfunction!(py_should_supersede, m)?)?;
Ok(())
}
#[cfg(test)]
#[allow(deprecated)]
mod tests {
use super::*;
#[test]
fn test_find_near_duplicate_match() {
let new_emb = vec![1.0f32, 0.0, 0.0, 0.0];
let existing = vec![
("fact-1".to_string(), vec![0.0f32, 1.0, 0.0, 0.0]), ("fact-2".to_string(), vec![0.99f32, 0.1, 0.0, 0.0]), ];
let result = find_near_duplicate(&new_emb, &existing, STORE_DEDUP_COSINE_THRESHOLD);
assert_eq!(result, Some("fact-2".to_string()));
}
#[test]
fn test_find_near_duplicate_no_match() {
let new_emb = vec![1.0f32, 0.0, 0.0, 0.0];
let existing = vec![
("fact-1".to_string(), vec![0.0f32, 1.0, 0.0, 0.0]), ("fact-2".to_string(), vec![0.0f32, 0.0, 1.0, 0.0]), ];
let result = find_near_duplicate(&new_emb, &existing, STORE_DEDUP_COSINE_THRESHOLD);
assert!(result.is_none());
}
#[test]
fn test_find_near_duplicate_empty() {
let new_emb = vec![1.0f32, 0.0, 0.0, 0.0];
let existing: Vec<(String, Vec<f32>)> = vec![];
let result = find_near_duplicate(&new_emb, &existing, STORE_DEDUP_COSINE_THRESHOLD);
assert!(result.is_none());
}
#[test]
fn test_find_near_duplicate_exact_match() {
let new_emb = vec![1.0f32, 0.0, 0.0, 0.0];
let existing = vec![
("fact-1".to_string(), vec![1.0f32, 0.0, 0.0, 0.0]), ];
let result = find_near_duplicate(&new_emb, &existing, STORE_DEDUP_COSINE_THRESHOLD);
assert_eq!(result, Some("fact-1".to_string()));
}
#[test]
fn test_find_near_duplicate_returns_first_match() {
let new_emb = vec![1.0f32, 0.0, 0.0, 0.0];
let existing = vec![
("fact-1".to_string(), vec![0.98f32, 0.1, 0.0, 0.0]), ("fact-2".to_string(), vec![0.99f32, 0.05, 0.0, 0.0]), ];
let result = find_near_duplicate(&new_emb, &existing, STORE_DEDUP_COSINE_THRESHOLD);
assert_eq!(result, Some("fact-1".to_string()));
}
#[test]
fn test_find_near_duplicate_custom_threshold() {
let new_emb = vec![1.0f32, 0.0, 0.0, 0.0];
let existing = vec![
("fact-1".to_string(), vec![0.9f32, 0.44, 0.0, 0.0]), ];
assert!(find_near_duplicate(&new_emb, &existing, 0.85).is_some());
assert!(find_near_duplicate(&new_emb, &existing, 0.95).is_none());
}
#[test]
fn test_find_near_duplicate_json_roundtrip() {
let json = r#"[
{"id": "fact-1", "embedding": [0.0, 1.0, 0.0, 0.0]},
{"id": "fact-2", "embedding": [0.99, 0.1, 0.0, 0.0]}
]"#;
let entries: Vec<ExistingFactEntry> = serde_json::from_str(json).unwrap();
let existing: Vec<(String, Vec<f32>)> = entries
.into_iter()
.map(|e| (e.id, e.embedding))
.collect();
let new_emb = vec![1.0f32, 0.0, 0.0, 0.0];
let result = find_near_duplicate(&new_emb, &existing, STORE_DEDUP_COSINE_THRESHOLD);
assert_eq!(result, Some("fact-2".to_string()));
}
#[test]
fn test_best_near_duplicate_returns_highest_similarity() {
let new_emb = vec![1.0f32, 0.0, 0.0, 0.0];
let existing = vec![
("fact-1".to_string(), vec![0.90f32, 0.44, 0.0, 0.0]), ("fact-2".to_string(), vec![0.99f32, 0.05, 0.0, 0.0]), ("fact-3".to_string(), vec![0.95f32, 0.31, 0.0, 0.0]), ];
let result = find_best_near_duplicate(&new_emb, &existing, STORE_DEDUP_COSINE_THRESHOLD);
assert!(result.is_some());
let dup = result.unwrap();
assert_eq!(dup.fact_id, "fact-2");
assert!(dup.similarity > 0.99);
}
#[test]
fn test_best_near_duplicate_returns_none_below_threshold() {
let new_emb = vec![1.0f32, 0.0, 0.0, 0.0];
let existing = vec![
("fact-1".to_string(), vec![0.0f32, 1.0, 0.0, 0.0]), ("fact-2".to_string(), vec![0.0f32, 0.0, 1.0, 0.0]), ];
let result = find_best_near_duplicate(&new_emb, &existing, STORE_DEDUP_COSINE_THRESHOLD);
assert!(result.is_none());
}
#[test]
fn test_best_near_duplicate_empty_existing() {
let new_emb = vec![1.0f32, 0.0, 0.0, 0.0];
let existing: Vec<(String, Vec<f32>)> = vec![];
let result = find_best_near_duplicate(&new_emb, &existing, STORE_DEDUP_COSINE_THRESHOLD);
assert!(result.is_none());
}
#[test]
fn test_best_near_duplicate_single_match() {
let new_emb = vec![1.0f32, 0.0, 0.0, 0.0];
let existing = vec![
("fact-1".to_string(), vec![0.99f32, 0.1, 0.0, 0.0]),
];
let result = find_best_near_duplicate(&new_emb, &existing, STORE_DEDUP_COSINE_THRESHOLD);
assert!(result.is_some());
assert_eq!(result.unwrap().fact_id, "fact-1");
}
#[test]
fn test_best_near_duplicate_differs_from_first_match() {
let new_emb = vec![1.0f32, 0.0, 0.0, 0.0];
let existing = vec![
("fact-1".to_string(), vec![0.90f32, 0.44, 0.0, 0.0]), ("fact-2".to_string(), vec![0.99f32, 0.05, 0.0, 0.0]), ];
let old_result = find_near_duplicate(&new_emb, &existing, STORE_DEDUP_COSINE_THRESHOLD);
assert_eq!(old_result, Some("fact-1".to_string()));
let new_result = find_best_near_duplicate(&new_emb, &existing, STORE_DEDUP_COSINE_THRESHOLD);
assert_eq!(new_result.unwrap().fact_id, "fact-2");
}
fn make_candidate(id: &str, embedding: Vec<f32>, decay_score: f64, created_at: i64, text: &str) -> ConsolidationCandidate {
ConsolidationCandidate {
id: id.to_string(),
text: text.to_string(),
embedding,
importance: 0.5,
decay_score,
created_at,
version: None,
}
}
#[test]
fn test_cluster_facts_groups_similar() {
let candidates = vec![
make_candidate("a", vec![1.0, 0.0, 0.0, 0.0], 1.0, 100, "fact a"),
make_candidate("b", vec![0.99, 0.1, 0.0, 0.0], 0.9, 90, "fact b"), make_candidate("c", vec![0.0, 1.0, 0.0, 0.0], 1.0, 100, "fact c"), ];
let clusters = cluster_facts(&candidates, STORE_DEDUP_COSINE_THRESHOLD);
assert_eq!(clusters.len(), 2);
let cluster_ab = clusters.iter().find(|c| c.representative == "a" || c.duplicates.contains(&"a".to_string()));
assert!(cluster_ab.is_some());
let cluster_ab = cluster_ab.unwrap();
assert_eq!(cluster_ab.representative, "a");
assert_eq!(cluster_ab.duplicates, vec!["b".to_string()]);
let cluster_c = clusters.iter().find(|c| c.representative == "c");
assert!(cluster_c.is_some());
assert!(cluster_c.unwrap().duplicates.is_empty());
}
#[test]
fn test_cluster_facts_empty() {
let clusters = cluster_facts(&[], STORE_DEDUP_COSINE_THRESHOLD);
assert!(clusters.is_empty());
}
#[test]
fn test_cluster_facts_all_unique() {
let candidates = vec![
make_candidate("a", vec![1.0, 0.0, 0.0, 0.0], 1.0, 100, "fact a"),
make_candidate("b", vec![0.0, 1.0, 0.0, 0.0], 1.0, 100, "fact b"),
make_candidate("c", vec![0.0, 0.0, 1.0, 0.0], 1.0, 100, "fact c"),
];
let clusters = cluster_facts(&candidates, STORE_DEDUP_COSINE_THRESHOLD);
assert_eq!(clusters.len(), 3);
for cluster in &clusters {
assert!(cluster.duplicates.is_empty());
}
}
#[test]
fn test_cluster_facts_all_duplicates() {
let candidates = vec![
make_candidate("a", vec![1.0, 0.0, 0.0, 0.0], 0.5, 100, "fact a"),
make_candidate("b", vec![1.0, 0.0, 0.0, 0.0], 0.9, 200, "fact b"),
make_candidate("c", vec![1.0, 0.0, 0.0, 0.0], 0.9, 100, "fact c"),
];
let clusters = cluster_facts(&candidates, STORE_DEDUP_COSINE_THRESHOLD);
assert_eq!(clusters.len(), 1);
let cluster = &clusters[0];
assert_eq!(cluster.representative, "b");
assert_eq!(cluster.duplicates.len(), 2);
}
#[test]
fn test_pick_representative_highest_decay() {
let candidates = vec![
make_candidate("a", vec![], 0.5, 100, "short"),
make_candidate("b", vec![], 0.9, 50, "short"),
make_candidate("c", vec![], 0.7, 200, "short"),
];
assert_eq!(pick_representative(&candidates), Some("b".to_string()));
}
#[test]
fn test_pick_representative_tiebreak_created_at() {
let candidates = vec![
make_candidate("a", vec![], 0.9, 100, "short"),
make_candidate("b", vec![], 0.9, 200, "short"),
make_candidate("c", vec![], 0.9, 50, "short"),
];
assert_eq!(pick_representative(&candidates), Some("b".to_string()));
}
#[test]
fn test_pick_representative_tiebreak_text_length() {
let candidates = vec![
make_candidate("a", vec![], 0.9, 100, "short"),
make_candidate("b", vec![], 0.9, 100, "a much longer text for this fact"),
make_candidate("c", vec![], 0.9, 100, "medium text"),
];
assert_eq!(pick_representative(&candidates), Some("b".to_string()));
}
#[test]
fn test_pick_representative_empty() {
assert_eq!(pick_representative(&[]), None);
}
#[test]
fn test_should_supersede_higher() {
assert!(should_supersede(0.9, 0.5));
}
#[test]
fn test_should_supersede_equal() {
assert!(should_supersede(0.8, 0.8));
}
#[test]
fn test_should_supersede_lower() {
assert!(!should_supersede(0.3, 0.7));
}
#[test]
fn test_constants() {
assert!((STORE_DEDUP_COSINE_THRESHOLD - 0.85).abs() < 1e-10);
assert_eq!(STORE_DEDUP_MAX_CANDIDATES, 50);
assert!((CONSOLIDATION_COSINE_THRESHOLD - 0.88).abs() < 1e-10);
}
#[test]
fn test_consolidation_threshold_higher_than_dedup() {
assert!(CONSOLIDATION_COSINE_THRESHOLD > STORE_DEDUP_COSINE_THRESHOLD);
}
#[test]
fn test_best_near_duplicate_json_roundtrip() {
let new_emb = vec![1.0f32, 0.0, 0.0, 0.0];
let json = r#"[
{"id": "fact-1", "embedding": [0.0, 1.0, 0.0, 0.0]},
{"id": "fact-2", "embedding": [0.99, 0.1, 0.0, 0.0]}
]"#;
let entries: Vec<ExistingFactEntry> = serde_json::from_str(json).unwrap();
let existing: Vec<(String, Vec<f32>)> = entries
.into_iter()
.map(|e| (e.id, e.embedding))
.collect();
let result = find_best_near_duplicate(&new_emb, &existing, STORE_DEDUP_COSINE_THRESHOLD);
assert_eq!(result.unwrap().fact_id, "fact-2");
}
#[test]
fn test_consolidation_candidate_json_roundtrip() {
let json = r#"[
{"id": "a", "text": "fact a", "embedding": [1.0, 0.0], "importance": 0.5, "decay_score": 1.0, "created_at": 100, "version": null},
{"id": "b", "text": "fact b", "embedding": [0.0, 1.0], "importance": 0.5, "decay_score": 0.9, "created_at": 90, "version": 2}
]"#;
let candidates: Vec<ConsolidationCandidate> = serde_json::from_str(json).unwrap();
assert_eq!(candidates.len(), 2);
assert_eq!(candidates[0].id, "a");
assert_eq!(candidates[1].version, Some(2));
}
#[test]
fn test_dup_match_serialization() {
let dup = DupMatch {
fact_id: "fact-1".to_string(),
similarity: 0.95,
};
let json = serde_json::to_string(&dup).unwrap();
assert!(json.contains("fact-1"));
assert!(json.contains("0.95"));
let parsed: DupMatch = serde_json::from_str(&json).unwrap();
assert_eq!(parsed, dup);
}
#[test]
fn test_consolidation_cluster_serialization() {
let cluster = ConsolidationCluster {
representative: "a".to_string(),
duplicates: vec!["b".to_string(), "c".to_string()],
};
let json = serde_json::to_string(&cluster).unwrap();
let parsed: ConsolidationCluster = serde_json::from_str(&json).unwrap();
assert_eq!(parsed, cluster);
}
}