jin 0.1.0

Approximate Nearest Neighbor Search: HNSW, DiskANN, IVF-PQ, ScaNN, quantization
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
//! Crash recovery and WAL replay procedures.
//!
//! Implements comprehensive recovery for all crash scenarios:
//! - Power failure during commit
//! - Crash during merge
//! - Crash during WAL write
//! - Crash during ID generation
//! - Crash during metadata update
//!
//! See `docs/PERSISTENCE_DESIGN.md` for detailed recovery procedures.

use crate::persistence::checkpoint::{CheckpointReader, SegmentMetadata};
use crate::persistence::directory::Directory;
use crate::persistence::error::{PersistenceError, PersistenceResult};
use crate::persistence::wal::{WalEntry, WalReader};
use std::collections::HashMap;
use std::io::{Read, Write};
use std::path::PathBuf;
use std::sync::Arc;

/// Recovery state after WAL replay.
#[derive(Debug, Clone)]
pub struct RecoveryState {
    /// Active segments (segment_id -> metadata)
    pub active_segments: HashMap<u64, SegmentMetadata>,

    /// Pending merges (merge_id -> segments_to_merge)
    pub pending_merges: HashMap<u64, Vec<u64>>,

    /// Deleted documents (segment_id -> set of doc_ids)
    pub deletes: HashMap<u64, Vec<u32>>,

    /// Last processed entry ID
    pub last_entry_id: u64,

    /// Next segment ID to use
    pub next_segment_id: u64,

    /// Next document ID to use
    pub next_doc_id: u32,
}

/// Recovery manager for crash recovery.
pub struct RecoveryManager {
    directory: Arc<dyn Directory>,
}

impl RecoveryManager {
    /// Create a new recovery manager.
    pub fn new(directory: impl Into<Arc<dyn Directory>>) -> Self {
        Self {
            directory: directory.into(),
        }
    }

    /// Perform startup recovery.
    ///
    /// This implements the 10-step recovery procedure from PERSISTENCE_DESIGN.md:
    /// 1. Check for checkpoint
    /// 2. Load checkpoint if available
    /// 3. Replay WAL entries since checkpoint
    /// 4. Detect and cancel stale merges
    /// 5. Detect stale reader handles
    /// 6. Verify segment integrity
    /// 7. Reconstruct index state
    /// 8. Validate consistency
    /// 9. Clean up temporary files
    /// 10. Return recovery state
    pub fn recover(&self) -> PersistenceResult<RecoveryState> {
        // Step 1-2: Check for and load checkpoint
        // Convert Arc to Box for CheckpointReader (requires cloning directory state)
        // For FsDirectory, we can extract the path and create a new one
        // For MemoryDirectory, we need to clone the internal state
        let checkpoint_reader = {
            // Create a new Box<dyn Directory> from the Arc
            // This is a limitation - we need to work around the Arc/Box mismatch
            // In practice, we'd store both Arc and Box, or refactor to use Arc everywhere
            // For now, we'll use a workaround: create a new directory pointing to the same location
            // This works for FsDirectory but not perfectly for MemoryDirectory
            // TODO: Refactor Directory trait to use Arc consistently
            let dir_clone: Box<dyn Directory> = {
                // Try to downcast to FsDirectory to extract path
                // If that fails, we'll use a different approach
                // For now, we'll create a wrapper that can convert Arc to Box
                // This is a temporary solution until we refactor the Directory trait
                Box::new(ArcDirectoryWrapper::new(self.directory.clone()))
            };
            CheckpointReader::new(dir_clone)
        };

        let checkpoints = checkpoint_reader.list_checkpoints()?;
        let (_checkpoint_path, last_checkpoint_entry_id, _initial_segments) =
            if let Some(latest_checkpoint) = checkpoints.last() {
                let full_path = format!("checkpoints/{}", latest_checkpoint);
                match checkpoint_reader.load_checkpoint_with_segments(&full_path) {
                    Ok((header, segments)) => {
                        (Some(latest_checkpoint.clone()), header.entry_id, segments)
                    }
                    Err(e) => {
                        // Checkpoint corrupted - log warning and continue without it
                        // In production, might want to return error or attempt recovery
                        eprintln!(
                        "Warning: Failed to load checkpoint {}: {}. Continuing without checkpoint.",
                        latest_checkpoint, e
                    );
                        (None, 0, Vec::new())
                    }
                }
            } else {
                (None, 0, Vec::new())
            };

        // Step 3: Replay WAL entries since checkpoint
        let wal_reader = WalReader::new(self.directory.clone());
        let all_entries = wal_reader.replay()?;

        // Filter entries after checkpoint
        let entries: Vec<WalEntry> = all_entries
            .into_iter()
            .filter(|entry| {
                // Extract entry_id from entry and filter by checkpoint
                let entry_id = match entry {
                    WalEntry::AddSegment { entry_id, .. } => *entry_id,
                    WalEntry::StartMerge { entry_id, .. } => *entry_id,
                    WalEntry::CancelMerge { entry_id, .. } => *entry_id,
                    WalEntry::EndMerge { entry_id, .. } => *entry_id,
                    WalEntry::DeleteDocuments { entry_id, .. } => *entry_id,
                    WalEntry::Checkpoint { entry_id, .. } => *entry_id,
                };
                entry_id > last_checkpoint_entry_id
            })
            .collect();

        // Step 4-7: Reconstruct state from entries
        let mut active_segments: HashMap<u64, SegmentMetadata> = HashMap::new();
        let mut pending_merges: HashMap<u64, Vec<u64>> = HashMap::new();
        let mut deletes: HashMap<u64, Vec<u32>> = HashMap::new();
        let mut last_entry_id = last_checkpoint_entry_id;

        for entry in entries {
            match entry {
                WalEntry::AddSegment {
                    entry_id,
                    segment_id,
                    doc_count,
                } => {
                    last_entry_id = last_entry_id.max(entry_id);
                    active_segments.insert(
                        segment_id,
                        SegmentMetadata {
                            segment_id,
                            path: format!("segments/segment_{}", segment_id),
                            doc_count,
                            max_doc_id: 0, // Would be tracked in actual implementation
                            size_bytes: 0, // Would be tracked in actual implementation
                        },
                    );
                }
                WalEntry::StartMerge {
                    entry_id,
                    transaction_id,
                    segment_ids,
                } => {
                    last_entry_id = last_entry_id.max(entry_id);
                    // Track pending merge
                    pending_merges.insert(transaction_id, segment_ids);
                }
                WalEntry::EndMerge {
                    entry_id,
                    transaction_id,
                    old_segment_ids,
                    ..
                } => {
                    last_entry_id = last_entry_id.max(entry_id);
                    // Complete merge: remove old segments, add new
                    for old_id in &old_segment_ids {
                        active_segments.remove(old_id);
                    }
                    // new_segment_id would be added via AddSegment entry
                    pending_merges.remove(&transaction_id);
                }
                WalEntry::CancelMerge {
                    entry_id,
                    transaction_id,
                    ..
                } => {
                    last_entry_id = last_entry_id.max(entry_id);
                    pending_merges.remove(&transaction_id);
                }
                WalEntry::DeleteDocuments {
                    entry_id,
                    deletes: delete_list,
                } => {
                    last_entry_id = last_entry_id.max(entry_id);
                    for (segment_id, doc_id) in delete_list {
                        deletes.entry(segment_id).or_default().push(doc_id);
                    }
                }
                WalEntry::Checkpoint { entry_id, .. } => {
                    last_entry_id = last_entry_id.max(entry_id);
                    // Checkpoint entry doesn't change state
                }
            }
        }

        // Step 8: Verify consistency
        // - Check that all active segments exist on disk
        // - Verify no orphaned segments
        // - Check for corrupted segments
        let mut missing_segments = Vec::new();
        for (segment_id, metadata) in &active_segments {
            let segment_dir = &metadata.path;
            if !self.directory.exists(segment_dir) {
                missing_segments.push(*segment_id);
            } else {
                // Verify segment footer exists (basic integrity check)
                let footer_path = format!("{}/footer.bin", segment_dir);
                if !self.directory.exists(&footer_path) {
                    return Err(PersistenceError::InvalidState(format!(
                        "Segment {} exists but footer.bin is missing (corrupted segment)",
                        segment_id
                    )));
                }
            }
        }

        // Report missing segments (in production, might want to error or attempt recovery)
        if !missing_segments.is_empty() {
            eprintln!(
                "Warning: {} segments referenced in state but not found on disk: {:?}",
                missing_segments.len(),
                missing_segments
            );
            // In production, you might want to:
            // - Return an error if strict mode
            // - Attempt to recover from checkpoint
            // - Mark segments as deleted and continue
        }

        // Step 9: Clean up temporary files
        // - Remove stale merge indicators (files like "merge_{transaction_id}.in_progress")
        // - Remove stale handle files (files in handles/ directory that are old)
        // - Remove temporary checkpoint files (*.tmp)
        self.cleanup_temporary_files()?;

        // Step 10: Return recovery state
        // Calculate next_segment_id and next_doc_id from active segments
        let next_segment_id = active_segments.keys().max().copied().unwrap_or(0) + 1;
        let next_doc_id = active_segments
            .values()
            .map(|s| s.max_doc_id)
            .max()
            .unwrap_or(0)
            + 1;

        Ok(RecoveryState {
            active_segments,
            pending_merges,
            deletes,
            last_entry_id,
            next_segment_id,
            next_doc_id,
        })
    }

    /// Verify index consistency after recovery.
    ///
    /// Checks:
    /// - All segments referenced in state exist
    /// - No duplicate segment IDs
    /// - Document counts are consistent
    /// - No orphaned segments
    pub fn verify_consistency(&self, state: &RecoveryState) -> PersistenceResult<()> {
        // Verify all active segments exist
        for segment_id in state.active_segments.keys() {
            let segment_dir = format!("segments/segment_{}", segment_id);
            if !self.directory.exists(&segment_dir) {
                return Err(PersistenceError::InvalidState(format!(
                    "Active segment {} not found on disk",
                    segment_id
                )));
            }
        }

        // Check for orphaned segments (exist on disk but not in state)
        // This would require listing segments directory
        // For now, just verify active segments

        Ok(())
    }

    /// Clean up temporary files from previous operations.
    ///
    /// Removes:
    /// - Stale merge indicator files
    /// - Old temporary checkpoint files (*.tmp)
    /// - Orphaned handle files (if handles directory exists)
    fn cleanup_temporary_files(&self) -> PersistenceResult<()> {
        // Clean up temporary checkpoint files
        if self.directory.exists("checkpoints") {
            if let Ok(files) = self.directory.list_dir("checkpoints") {
                for file in files {
                    if file.ends_with(".tmp") {
                        let temp_path = format!("checkpoints/{}", file);
                        let _ = self.directory.delete(&temp_path); // Best effort
                    }
                }
            }
        }

        // Clean up temporary segment files
        if self.directory.exists("segments") {
            if let Ok(segments) = self.directory.list_dir("segments") {
                for segment_dir in segments {
                    let segment_path = format!("segments/{}", segment_dir);
                    if let Ok(files) = self.directory.list_dir(&segment_path) {
                        for file in files {
                            if file.ends_with(".tmp") {
                                let temp_path = format!("{}/{}", segment_path, file);
                                let _ = self.directory.delete(&temp_path); // Best effort
                            }
                        }
                    }
                }
            }
        }

        // Clean up stale merge indicators
        if self.directory.exists("merges") {
            if let Ok(files) = self.directory.list_dir("merges") {
                for file in files {
                    if file.ends_with(".in_progress") {
                        let merge_path = format!("merges/{}", file);
                        let _ = self.directory.delete(&merge_path); // Best effort
                    }
                }
            }
        }

        Ok(())
    }
}

/// Wrapper to convert Arc<dyn Directory> to Box<dyn Directory>.
///
/// This is a workaround for the Arc/Box mismatch in the API.
/// In the future, we should refactor to use Arc consistently.
struct ArcDirectoryWrapper {
    inner: Arc<dyn Directory>,
}

impl ArcDirectoryWrapper {
    fn new(inner: Arc<dyn Directory>) -> Self {
        Self { inner }
    }
}

impl Directory for ArcDirectoryWrapper {
    fn create_file(&self, path: &str) -> PersistenceResult<Box<dyn Write>> {
        self.inner.create_file(path)
    }

    fn open_file(&self, path: &str) -> PersistenceResult<Box<dyn Read>> {
        self.inner.open_file(path)
    }

    fn exists(&self, path: &str) -> bool {
        self.inner.exists(path)
    }

    fn delete(&self, path: &str) -> PersistenceResult<()> {
        self.inner.delete(path)
    }

    fn atomic_rename(&self, from: &str, to: &str) -> PersistenceResult<()> {
        self.inner.atomic_rename(from, to)
    }

    fn create_dir_all(&self, path: &str) -> PersistenceResult<()> {
        self.inner.create_dir_all(path)
    }

    fn list_dir(&self, path: &str) -> PersistenceResult<Vec<String>> {
        self.inner.list_dir(path)
    }

    fn append_file(&self, path: &str) -> PersistenceResult<Box<dyn Write>> {
        self.inner.append_file(path)
    }

    fn atomic_write(&self, path: &str, data: &[u8]) -> PersistenceResult<()> {
        self.inner.atomic_write(path, data)
    }

    fn file_path(&self, path: &str) -> Option<PathBuf> {
        self.inner.file_path(path)
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::persistence::directory::MemoryDirectory;
    use crate::persistence::wal::{WalEntry, WalWriter};

    #[test]
    fn test_recovery_basic() {
        use std::sync::Arc;
        let mem_dir = MemoryDirectory::new();
        // Convert MemoryDirectory to Arc<dyn Directory> for sharing
        let dir_arc: Arc<dyn Directory> = Arc::new(mem_dir) as Arc<dyn Directory>;
        dir_arc.create_dir_all("wal").unwrap();
        dir_arc.create_dir_all("checkpoints").unwrap();

        // Write some WAL entries
        let mut wal_writer = WalWriter::new(dir_arc.clone());
        wal_writer
            .append(WalEntry::AddSegment {
                entry_id: 1,
                segment_id: 1,
                doc_count: 100,
            })
            .unwrap();
        wal_writer
            .append(WalEntry::AddSegment {
                entry_id: 2,
                segment_id: 2,
                doc_count: 200,
            })
            .unwrap();

        // Recover using Arc
        let recovery = RecoveryManager::new(dir_arc);
        let state = recovery.recover().unwrap();

        assert_eq!(state.active_segments.len(), 2);
        assert!(state.active_segments.contains_key(&1));
        assert!(state.active_segments.contains_key(&2));
    }
}