1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
//! A simple key-value store.
use crate::config;
use crate::error::{Result, TinkvError};
use crate::segment::{DataEntry, DataFile, HintFile};
use glob::glob;
use log::{debug, info, trace};
use std::collections::{BTreeMap, HashMap};
use std::fs;
use std::fs::create_dir_all;

use std::path::{Path, PathBuf};

/// The `Store` stores key/value pairs.
///
/// Key/value pairs are persisted in data files.
#[derive(Debug)]
pub struct Store {
    // directory for database.
    path: PathBuf,
    // holds a bunch of data files.
    data_files: HashMap<u64, DataFile>,
    // only active data file is writeable.
    active_data_file: Option<DataFile>,
    // keydir maintains key value index for fast query.
    keydir: BTreeMap<Vec<u8>, KeyDirEntry>,
    /// monitor tinkv store status, record statistics data.
    stats: Stats,
    /// store config.
    config: Config,
}

impl Store {
    /// Initialize key value store with the given path.
    /// If the given path not found, a new one will be created.
    pub fn open<P: AsRef<Path>>(path: P) -> Result<Self> {
        Self::open_with_options(path, Config::default())
    }

    /// Open datasotre directory with custom options.
    fn open_with_options<P: AsRef<Path>>(path: P, config: Config) -> Result<Self> {
        info!("open store path: {}", path.as_ref().display());
        create_dir_all(&path)?;
        let mut store = Store {
            path: path.as_ref().to_path_buf(),
            data_files: HashMap::new(),
            active_data_file: None,
            keydir: BTreeMap::new(),
            stats: Stats::default(),
            config,
        };

        store.open_data_files()?;
        store.build_keydir()?;
        store.new_active_data_file(None)?;

        Ok(store)
    }

    /// Open data files (they are immutable).
    fn open_data_files(&mut self) -> Result<()> {
        let pattern = format!("{}/*{}", self.path.display(), config::DATA_FILE_SUFFIX);
        trace!("read data files with pattern: {}", &pattern);
        for path in glob(&pattern)? {
            let df = DataFile::new(path?.as_path(), false)?;

            self.stats.total_data_files += 1;
            self.stats.size_of_all_data_files += df.size;

            self.data_files.insert(df.id, df);
        }
        trace!("got {} immutable data files", self.data_files.len());

        Ok(())
    }

    fn build_keydir(&mut self) -> Result<()> {
        // TODO: build keydir from index file.
        // fallback to the original data file to rebuild keydir.
        let mut file_ids = self.data_files.keys().cloned().collect::<Vec<_>>();
        file_ids.sort();

        for file_id in file_ids {
            let hint_file_path = segment_hint_file_path(&self.path, file_id);
            if hint_file_path.exists() {
                self.build_keydir_from_hint_file(&hint_file_path)?;
            } else {
                self.build_keydir_from_data_file(file_id)?;
            }
        }

        // update stats.
        self.stats.total_active_entries = self.keydir.len() as u64;

        info!(
            "build keydir done, got {} keys. current stats: {:?}",
            self.keydir.len(),
            self.stats
        );
        Ok(())
    }

    fn build_keydir_from_hint_file(&mut self, path: &Path) -> Result<()> {
        trace!("build keydir from hint file {}", path.display());
        let mut hint_file = HintFile::new(path, false)?;
        let hint_file_id = hint_file.id;

        for entry in hint_file.entry_iter() {
            let keydir_ent = KeyDirEntry::new(hint_file_id, entry.offset, entry.size);
            let old = self.keydir.insert(entry.key, keydir_ent);
            if let Some(old_ent) = old {
                self.stats.size_of_stale_entries += old_ent.size;
                self.stats.total_stale_entries += 1;
            }
        }
        Ok(())
    }

    fn build_keydir_from_data_file(&mut self, file_id: u64) -> Result<()> {
        let df = self.data_files.get(&file_id).unwrap();
        info!("build keydir from data file {}", df.path.display());
        for entry in df.entry_iter() {
            if !entry.is_valid() {
                return Err(TinkvError::DataEntryCorrupted {
                    file_id: df.id,
                    key: entry.key().into(),
                    offset: entry.offset,
                });
            }

            if entry.value() == config::REMOVE_TOMESTONE {
                trace!("{} is a remove tomestone", &entry);
                self.stats.total_stale_entries += 1;
                self.stats.size_of_stale_entries += entry.size;

                if let Some(old_ent) = self.keydir.remove(entry.key()) {
                    self.stats.size_of_stale_entries += old_ent.size;
                    self.stats.total_stale_entries += 1;
                }
            } else {
                let keydir_ent = KeyDirEntry::new(file_id, entry.offset, entry.size);
                let old = self.keydir.insert(entry.key().into(), keydir_ent);
                if let Some(old_ent) = old {
                    self.stats.size_of_stale_entries += old_ent.size;
                    self.stats.total_stale_entries += 1;
                }
            }
        }
        Ok(())
    }

    fn new_active_data_file(&mut self, file_id: Option<u64>) -> Result<()> {
        // default next file id should be `max_file_id` + 1
        let next_file_id: u64 =
            file_id.unwrap_or_else(|| self.data_files.keys().max().unwrap_or(&0) + 1);

        // build data file path.
        let p = segment_data_file_path(&self.path, next_file_id);
        debug!("new data file at: {}", &p.display());
        self.active_data_file = Some(DataFile::new(p.as_path(), true)?);

        // preapre a read-only data file with the same path.
        let df = DataFile::new(p.as_path(), false)?;
        self.data_files.insert(df.id, df);

        self.stats.total_data_files += 1;

        Ok(())
    }

    /// Save key & value pair to database.
    pub fn set(&mut self, key: &[u8], value: &[u8]) -> Result<()> {
        if key.len() as u64 > self.config.max_key_size {
            return Err(TinkvError::KeyIsTooLarge);
        }

        if value.len() as u64 > self.config.max_value_size {
            return Err(TinkvError::ValueIsTooLarge);
        }

        // save data to data file.
        let ent = self.write(key, value)?;

        // update keydir, the in-memory index.
        let old = self.keydir.insert(
            key.to_vec(),
            KeyDirEntry::new(ent.file_id, ent.offset, ent.size),
        );

        match old {
            None => {
                self.stats.total_active_entries += 1;
            }
            Some(entry) => {
                self.stats.size_of_stale_entries += entry.size;
                self.stats.total_stale_entries += 1;
            }
        }

        self.stats.size_of_all_data_files += ent.size;

        Ok(())
    }

    /// Remove key value from database.
    pub fn remove(&mut self, key: &[u8]) -> Result<()> {
        if self.keydir.contains_key(key) {
            trace!(
                "remove key '{}' from datastore",
                String::from_utf8_lossy(key)
            );
            // write tomestone, will be removed on compaction.
            let entry = self.write(key, config::REMOVE_TOMESTONE)?;
            // remove key from in-memory index.
            let old = self.keydir.remove(key).expect("key not found");

            self.stats.total_active_entries -= 1;
            self.stats.total_stale_entries += 1;
            self.stats.size_of_all_data_files += entry.size;
            self.stats.size_of_stale_entries += old.size + entry.size;

            Ok(())
        } else {
            trace!(
                "remove key '{}' failed, not found in datastore",
                String::from_utf8_lossy(key)
            );
            Err(TinkvError::KeyNotFound(key.into()))
        }
    }

    fn write(&mut self, key: &[u8], value: &[u8]) -> Result<DataEntry> {
        let mut df = self
            .active_data_file
            .as_mut()
            .expect("active data file not found");

        // check file size, switch to another one if nessesary.
        if df.size > self.config.max_data_file_size {
            info!("size of active data file '{}' exceeds maximum size of {} bytes, switch to another one.", df.path.display(), self.config.max_data_file_size);

            // sync data to disk.
            let _ = df.sync();

            // create a new active data file.
            self.new_active_data_file(None)?;

            // get new active data file for writting.
            df = self
                .active_data_file
                .as_mut()
                .expect("active data file not found");
        }

        let entry = df.write(key, value)?;
        if self.config.sync {
            // make sure data entry is persisted in storage.
            df.sync()?;
        }

        Ok(entry)
    }

    /// Get key value from database.
    pub fn get(&mut self, key: &[u8]) -> Result<Option<Vec<u8>>> {
        if let Some(keydir_ent) = self.keydir.get(key) {
            trace!(
                "found key '{}' in keydir, got value {:?}",
                String::from_utf8_lossy(key),
                &keydir_ent
            );
            let df = self
                .data_files
                .get_mut(&keydir_ent.segment_id)
                .unwrap_or_else(|| panic!("data file {} not found", &keydir_ent.segment_id));
            let entry = df.read(keydir_ent.offset)?;
            if !entry.is_valid() {
                Err(TinkvError::DataEntryCorrupted {
                    file_id: df.id,
                    key: entry.key().into(),
                    offset: entry.offset,
                })
            } else {
                Ok(Some(entry.value().into()))
            }
        } else {
            Ok(None)
        }
    }

    /// Clear stale entries from data files and reclaim disk space.
    pub fn compact(&mut self) -> Result<()> {
        info!(
            "there are {} data files need to be compacted",
            self.data_files.len()
        );

        let next_file_id = self.next_file_id();

        // switch to another active data file.
        self.new_active_data_file(Some(next_file_id + 1))?;
        let mut compaction_data_file_id = next_file_id + 2;

        // create a new data file for compaction.
        let data_file_path = segment_data_file_path(&self.path, compaction_data_file_id);

        debug!("create compaction data file: {}", data_file_path.display());
        let mut compaction_df = DataFile::new(&data_file_path, true)?;

        // register read-only compaction data file.
        self.data_files
            .insert(compaction_df.id, DataFile::new(&compaction_df.path, false)?);

        // create a new hint file to store compaction file index.
        let hint_file_path = segment_hint_file_path(&self.path, compaction_data_file_id);

        debug!("create compaction hint file: {}", hint_file_path.display());
        let mut hint_file = HintFile::new(&hint_file_path, true)?;

        let mut total_size_of_compaction_files = 0;

        // copy all the data entries into compaction data file.
        // TODO: check if data file size exceeds threshold, switch
        // to another one if nessesary.
        for (key, keydir_ent) in self.keydir.iter_mut() {
            if compaction_df.size > self.config.max_data_file_size {
                total_size_of_compaction_files += compaction_df.size;

                compaction_df.sync()?;
                hint_file.sync()?;

                compaction_data_file_id += 1;
                // switch to a new data file for compaction.
                let data_file_path = segment_data_file_path(&self.path, compaction_data_file_id);
                debug!(
                    "file size exceeds limit, switch to another compaction data file: {}",
                    data_file_path.display()
                );
                compaction_df = DataFile::new(&data_file_path, true)?;

                self.data_files
                    .insert(compaction_df.id, DataFile::new(&compaction_df.path, false)?);

                let hint_file_path = segment_hint_file_path(&self.path, compaction_data_file_id);
                debug!(
                    "switch to another compaction hint file: {}",
                    hint_file_path.display()
                );
                hint_file = HintFile::new(&hint_file_path, true)?;
            }

            let df = self
                .data_files
                .get_mut(&keydir_ent.segment_id)
                .expect("cannot find data file");
            trace!(
                "copy key '{}': original data file({}) -> compaction data file({})",
                String::from_utf8_lossy(key),
                df.path.display(),
                compaction_df.path.display()
            );

            let offset = compaction_df.copy_bytes_from(df, keydir_ent.offset, keydir_ent.size)?;

            keydir_ent.segment_id = compaction_df.id;
            keydir_ent.offset = offset;

            hint_file.write(key, keydir_ent.offset, keydir_ent.size)?;
        }

        compaction_df.sync()?;
        hint_file.sync()?;

        total_size_of_compaction_files += compaction_df.size;

        // remove stale segments.
        let mut stale_segment_count = 0;
        for df in self.data_files.values() {
            if df.id <= next_file_id {
                if df.path.exists() {
                    debug!("try to remove stale data file: {}", df.path.display());
                    fs::remove_file(&df.path)?;
                }

                let hint_file_path = segment_hint_file_path(&self.path, df.id);
                if hint_file_path.exists() {
                    debug!(
                        "try to remove stale hint file: {}",
                        &hint_file_path.display()
                    );
                    fs::remove_file(&hint_file_path)?;
                }

                stale_segment_count += 1;
            }
        }

        self.data_files.retain(|&k, _| k > next_file_id);
        debug!("cleaned {} stale segments", stale_segment_count);

        // update stats.
        self.stats.total_data_files = self.data_files.len() as u64;
        self.stats.total_active_entries = self.keydir.len() as u64;
        self.stats.total_stale_entries = 0;
        self.stats.size_of_stale_entries = 0;
        self.stats.size_of_all_data_files = total_size_of_compaction_files;

        Ok(())
    }

    fn next_file_id(&self) -> u64 {
        self.active_data_file
            .as_ref()
            .expect("active data file not found")
            .id
            + 1
    }

    /// Return current stats of datastore.
    pub fn stats(&self) -> &Stats {
        &self.stats
    }

    /// Return all keys in datastore.
    pub fn keys(&self) -> impl Iterator<Item = &Vec<u8>> {
        self.keydir.keys()
    }

    /// Return total number of keys in datastore.
    pub fn len(&self) -> u64 {
        self.keydir.len() as u64
    }

    /// Check datastore is empty or not.
    pub fn is_empty(&self) -> bool {
        self.len() == 0
    }

    /// Return `true` if datastore contains the given key.
    pub fn contains_key(&self, key: &[u8]) -> bool {
        self.keydir.contains_key(key)
    }

    /// Iterate all keys in datastore and call function `f`
    /// for each entry.
    ///
    /// If function `f` returns an `Err`, it stops iteration
    /// and propgates the `Err` to the caller.
    ///
    /// You can continue iteration manually by returning `Ok(true)`,
    /// or stop iteration by returning `Ok(false)`.
    pub fn for_each<F>(&mut self, f: &mut F) -> Result<()>
    where
        F: FnMut(&[u8], &[u8]) -> Result<bool>,
    {
        // too stupid, just in order to pass borrow checking.
        // FIXME: find a better way to implement this feature?
        let mut keys = vec![];
        for key in self.keys() {
            keys.push(key.clone());
        }

        for key in keys {
            let r = self.get(&key)?;
            if let Some(value) = r {
                let contine = f(&key, &value)?;
                if !contine {
                    break;
                }
            }
        }
        Ok(())
    }

    /// Force flushing any pending writes to disk.
    pub fn sync(&mut self) -> Result<()> {
        if self.active_data_file.is_some() {
            self.active_data_file.as_mut().unwrap().sync()?;
        }
        Ok(())
    }

    /// Close a tinkv data store, flush all pending writes to disk.
    pub fn close(&mut self) -> Result<()> {
        self.sync()?;
        Ok(())
    }
}

impl Drop for Store {
    fn drop(&mut self) {
        // ignore sync errors.
        trace!("sync all pending writes to disk.");
        let _r = self.sync();
    }
}

/// Entry definition in the keydir (the in-memory index).
#[derive(Debug, Clone, Copy)]
struct KeyDirEntry {
    /// data file id that stores key value pair.
    segment_id: u64,
    /// data entry offset in data file.
    offset: u64,
    /// data entry size.
    size: u64,
}

impl KeyDirEntry {
    fn new(segment_id: u64, offset: u64, size: u64) -> Self {
        KeyDirEntry {
            segment_id,
            offset,
            size,
        }
    }
}

#[derive(Debug, Copy, Clone, Default)]
pub struct Stats {
    /// size (bytes) of stale entries in data files, which can be
    /// deleted after a compaction.
    pub size_of_stale_entries: u64,
    /// total stale entries in data files.
    pub total_stale_entries: u64,
    /// total active key value pairs in database.
    pub total_active_entries: u64,
    /// total data files.
    pub total_data_files: u64,
    /// total size (bytes) of all data files.
    pub size_of_all_data_files: u64,
}

fn segment_data_file_path(dir: &Path, segment_id: u64) -> PathBuf {
    segment_file_path(dir, segment_id, config::DATA_FILE_SUFFIX)
}

fn segment_hint_file_path(dir: &Path, segment_id: u64) -> PathBuf {
    segment_file_path(dir, segment_id, config::HINT_FILE_SUFFIX)
}

fn segment_file_path(dir: &Path, segment_id: u64, suffix: &str) -> PathBuf {
    let mut p = dir.to_path_buf();
    p.push(format!("{:012}{}", segment_id, suffix));
    p
}

#[derive(Debug, Copy, Clone)]
pub(crate) struct Config {
    max_data_file_size: u64,
    max_key_size: u64,
    max_value_size: u64,
    // sync data to storage after each writting operation.
    // we should balance data reliability and writting performance.
    sync: bool,
}

impl Default for Config {
    fn default() -> Self {
        Self {
            max_data_file_size: config::DEFAULT_MAX_DATA_FILE_SIZE,
            max_key_size: config::DEFAULT_MAX_KEY_SIZE,
            max_value_size: config::DEFAULT_MAX_VALUE_SIZE,
            sync: false,
        }
    }
}

/// Build custom open options.
#[derive(Debug)]
pub struct OpenOptions {
    config: Config,
}

impl Default for OpenOptions {
    fn default() -> Self {
        Self {
            config: Config::default(),
        }
    }
}

impl OpenOptions {
    #[allow(dead_code)]
    pub fn new() -> Self {
        Self::default()
    }

    #[allow(dead_code)]
    pub fn max_data_file_size(&mut self, value: u64) -> &mut Self {
        self.config.max_data_file_size = value;
        self
    }

    #[allow(dead_code)]
    pub fn max_key_size(&mut self, value: u64) -> &mut Self {
        self.config.max_key_size = value;
        self
    }

    #[allow(dead_code)]
    pub fn max_value_size(&mut self, value: u64) -> &mut Self {
        self.config.max_value_size = value;
        self
    }

    #[allow(dead_code)]
    pub fn sync(&mut self, value: bool) -> &mut Self {
        self.config.sync = value;
        self
    }

    #[allow(dead_code)]
    pub fn open<P: AsRef<Path>>(&self, path: P) -> Result<Store> {
        Store::open_with_options(path, self.config)
    }
}