[][src]Struct rocks::db::DB

pub struct DB { /* fields omitted */ }

A DB is a persistent ordered map from keys to values.

A DB is safe for concurrent access from multiple threads without any external synchronization.

Examples

use rocks::rocksdb::*;

let db = DB::open(Options::default().map_db_options(|db| db.create_if_missing(true)),
                  "./data").unwrap();
// insert kv
let _ = db.put(&WriteOptions::default(), b"my-key", b"my-value").unwrap();

// get kv
let val = db.get(&ReadOptions::default(), b"my-key").unwrap();
println!("got value {}", String::from_utf8_lossy(&val));

assert_eq!(val, b"my-value");

Implementations

impl DB[src]

pub fn open<T: AsRef<Options>, P: AsRef<Path>>(
    options: T,
    name: P
) -> Result<DB>
[src]

Open the database with the specified name.

pub fn open_with_column_families<CF: Into<ColumnFamilyDescriptor>, P: AsRef<Path>, I: IntoIterator<Item = CF>>(
    options: &DBOptions,
    name: P,
    column_families: I
) -> Result<(DB, Vec<ColumnFamily>)>
[src]

Open DB with column families.

pub fn open_for_readonly<P: AsRef<Path>>(
    options: &Options,
    name: P,
    error_if_log_file_exist: bool
) -> Result<DB>
[src]

Open the database for read only. All DB interfaces that modify data, like put/delete, will return error. If the db is opened in read only mode, then no compactions will happen.

pub fn open_for_readonly_with_column_families<CF: Into<ColumnFamilyDescriptor>, P: AsRef<Path>, I: IntoIterator<Item = CF>>(
    options: &DBOptions,
    name: P,
    column_families: I,
    error_if_log_file_exist: bool
) -> Result<(DB, Vec<ColumnFamily>)>
[src]

Open the database for read only with column families. When opening DB with read only, you can specify only a subset of column families in the database that should be opened. However, you always need to specify default column family. The default column family name is 'default' and it's stored in rocksdb::kDefaultColumnFamilyName

pub fn open_as_secondary<P1: AsRef<Path>, P2: AsRef<Path>>(
    options: &Options,
    name: P1,
    secondary_path: P2
) -> Result<DB>
[src]

Open DB as secondary instance with only the default column family.

pub fn open_as_secondary_with_column_families<P1: AsRef<Path>, P2: AsRef<Path>, CF: Into<ColumnFamilyDescriptor>, I: IntoIterator<Item = CF>>(
    options: &Options,
    name: P1,
    secondary_path: P2,
    column_families: I
) -> Result<(DB, Vec<ColumnFamily>)>
[src]

Open DB as secondary instance with column families. You can open a subset of column families in secondary mode.

pub fn list_column_families<P: AsRef<Path>>(
    options: &Options,
    name: P
) -> Result<Vec<String>>
[src]

ListColumnFamilies will open the DB specified by argument name and return the list of all column nfamilies in that DB through column_families argument. The ordering of column families in column_families is unspecified.

pub fn create_column_family(
    &self,
    cfopts: &ColumnFamilyOptions,
    column_family_name: &str
) -> Result<ColumnFamily>
[src]

Create a column_family and return the handle of column family through the argument handle.

pub fn drop_column_family(
    &self,
    column_family: &ColumnFamilyHandle
) -> Result<()>
[src]

Drop a column family specified by column_family handle. This call only records a drop record in the manifest and prevents the column family from flushing and compacting.

pub fn default_column_family(&self) -> ColumnFamily[src]

Returns default column family handle

Methods from Deref<Target = DBRef>

pub unsafe fn close(&self) -> Result<()>[src]

Close the DB by releasing resources, closing files etc. This should be called before calling the destructor so that the caller can get back a status in case there are any errors. This will not fsync the WAL files. If syncing is required, the caller must first call SyncWAL(), or Write() using an empty write batch with WriteOptions.sync=true.

If the return status is Aborted(), closing fails because there is unreleased snapshot in the system. In this case, users can release the unreleased snapshots and try again and expect it to succeed. For other status, recalling Close() will be no-op.

If the return status is NotSupported(), then the DB implementation does cleanup in the destructor

NOTE for Rust: segmentation fault if the db is accessed after close

pub fn resume(&self) -> Result<()>[src]

Manually resume the DB and put it in read-write mode. This function will flush memtables for all the column families, clear the error, purge any obsolete files, and restart background flush and compaction operations.

pub fn put(
    &self,
    options: &WriteOptions,
    key: &[u8],
    value: &[u8]
) -> Result<()>
[src]

Set the database entry for "key" to "value". If "key" already exists, it will be overwritten. Returns OK on success, and a non-OK status on error.

Note: consider setting options.sync = true.

pub fn put_cf(
    &self,
    options: &WriteOptions,
    column_family: &ColumnFamilyHandle,
    key: &[u8],
    value: &[u8]
) -> Result<()>
[src]

pub fn delete(&self, options: &WriteOptions, key: &[u8]) -> Result<()>[src]

Remove the database entry (if any) for "key". Returns OK on success, and a non-OK status on error. It is not an error if "key" did not exist in the database.

Note: consider setting options.sync = true.

pub fn delete_cf(
    &self,
    options: &WriteOptions,
    column_family: &ColumnFamilyHandle,
    key: &[u8]
) -> Result<()>
[src]

pub fn single_delete(&self, options: &WriteOptions, key: &[u8]) -> Result<()>[src]

Remove the database entry for "key". Requires that the key exists and was not overwritten. Returns OK on success, and a non-OK status on error. It is not an error if "key" did not exist in the database.

If a key is overwritten (by calling Put() multiple times), then the result of calling SingleDelete() on this key is undefined. SingleDelete() only behaves correctly if there has been only one Put() for this key since the previous call to SingleDelete() for this key.

This feature is currently an experimental performance optimization for a very specific workload. It is up to the caller to ensure that SingleDelete is only used for a key that is not deleted using Delete() or written using Merge(). Mixing SingleDelete operations with Deletes and Merges can result in undefined behavior.

Note: consider setting options.sync = true.

pub fn single_delete_cf(
    &self,
    options: &WriteOptions,
    column_family: &ColumnFamilyHandle,
    key: &[u8]
) -> Result<()>
[src]

pub fn delete_range_cf(
    &self,
    options: &WriteOptions,
    column_family: &ColumnFamilyHandle,
    begin_key: &[u8],
    end_key: &[u8]
) -> Result<()>
[src]

Removes the database entries in the range ["begin_key", "end_key"), i.e., including "begin_key" and excluding "end_key". Returns OK on success, and a non-OK status on error. It is not an error if no keys exist in the range ["begin_key", "end_key").

This feature is currently an experimental performance optimization for deleting very large ranges of contiguous keys. Invoking it many times or on small ranges may severely degrade read performance; in particular, the resulting performance can be worse than calling Delete() for each key in the range. Note also the degraded read performance affects keys outside the deleted ranges, and affects database operations involving scans, like flush and compaction.

Consider setting ReadOptions::ignore_range_deletions = true to speed up reads for key(s) that are known to be unaffected by range deletions.

pub fn merge(
    &self,
    options: &WriteOptions,
    key: &[u8],
    val: &[u8]
) -> Result<()>
[src]

Merge the database entry for "key" with "value". Returns OK on success, and a non-OK status on error. The semantics of this operation is determined by the user provided merge_operator when opening DB.

Note: consider setting options.sync = true.

pub fn merge_cf(
    &self,
    options: &WriteOptions,
    column_family: &ColumnFamilyHandle,
    key: &[u8],
    val: &[u8]
) -> Result<()>
[src]

pub fn write(&self, options: &WriteOptions, updates: &WriteBatch) -> Result<()>[src]

Apply the specified updates to the database.

If updates contains no update, WAL will still be synced if options.sync=true.

Returns OK on success, non-OK on failure.

Note: consider setting options.sync = true.

pub fn get(&self, options: &ReadOptions, key: &[u8]) -> Result<PinnableSlice>[src]

If the database contains an entry for "key" store the corresponding value in *value and return OK.

If there is no entry for "key" leave *value unchanged and return a status for which Error::IsNotFound() returns true.

May return some other Error on an error.

pub fn get_cf(
    &self,
    options: &ReadOptions,
    column_family: &ColumnFamilyHandle,
    key: &[u8]
) -> Result<PinnableSlice>
[src]

pub fn multi_get(
    &self,
    options: &ReadOptions,
    keys: &[&[u8]]
) -> Vec<Result<PinnableSlice>>
[src]

If keys[i] does not exist in the database, then the i'th returned status will be one for which Error::IsNotFound() is true, and (*values)[i] will be set to some arbitrary value (often ""). Otherwise, the i'th returned status will have Error::ok() true, and (*values)[i] will store the value associated with keys[i].

(*values) will always be resized to be the same size as (keys). Similarly, the number of returned statuses will be the number of keys.

Note: keys will not be "de-duplicated". Duplicate keys will return duplicate values in order.

pub fn multi_get_cf(
    &self,
    options: &ReadOptions,
    column_families: &[&ColumnFamilyHandle],
    keys: &[&[u8]]
) -> Vec<Result<PinnableSlice>>
[src]

pub fn key_may_exist(&self, options: &ReadOptions, key: &[u8]) -> bool[src]

If the key definitely does not exist in the database, then this method returns false, else true. If the caller wants to obtain value when the key is found in memory, a bool for 'value_found' must be passed. 'value_found' will be true on return if value has been set properly.

This check is potentially lighter-weight than invoking DB::Get(). One way to make this lighter weight is to avoid doing any IOs.

Default implementation here returns true and sets 'value_found' to false

pub fn key_may_get(
    &self,
    options: &ReadOptions,
    key: &[u8]
) -> (bool, Option<Vec<u8>>)
[src]

pub fn key_may_exist_cf(
    &self,
    options: &ReadOptions,
    column_family: &ColumnFamilyHandle,
    key: &[u8]
) -> bool
[src]

pub fn key_may_get_cf(
    &self,
    options: &ReadOptions,
    column_family: &ColumnFamilyHandle,
    key: &[u8]
) -> (bool, Option<Vec<u8>>)
[src]

pub fn new_iterator<'c, 'd: 'c>(&'d self, options: &ReadOptions) -> Iterator<'c>[src]

Return a heap-allocated iterator over the contents of the database. The result of NewIterator() is initially invalid (caller must call one of the Seek methods on the iterator before using it).

Caller should delete the iterator when it is no longer needed. The returned iterator should be deleted before this db is deleted.

pub fn new_iterator_cf<'c, 'd: 'c>(
    &self,
    options: &ReadOptions,
    cf: &'d ColumnFamilyHandle
) -> Iterator<'c>
[src]

pub fn new_iterators<'c, 'b: 'c, T: AsRef<ColumnFamilyHandle>>(
    &'b self,
    options: &ReadOptions,
    cfs: &[T]
) -> Result<Vec<Iterator<'c>>>
[src]

pub fn get_snapshot(&self) -> Option<Snapshot>[src]

Return a handle to the current DB state. Iterators created with this handle will all observe a stable snapshot of the current DB state. The caller must call ReleaseSnapshot(result) when the snapshot is no longer needed.

nullptr will be returned if the DB fails to take a snapshot or does not support snapshot.

pub fn release_snapshot(&self, snapshot: Snapshot)[src]

Release a previously acquired snapshot. The caller must not use "snapshot" after this call.

pub fn get_property(&self, property: &str) -> Option<String>[src]

DB implementations can export properties about their state via this method. If "property" is a valid property understood by this DB implementation (see Properties struct above for valid options), fills "*value" with its current value and returns true. Otherwise, returns false.

pub fn get_property_cf(
    &self,
    column_family: &ColumnFamilyHandle,
    property: &str
) -> Option<String>
[src]

pub fn get_map_property(&self, property: &str) -> Option<()>[src]

pub fn get_int_property(&self, property: &str) -> Option<u64>[src]

Similar to GetProperty(), but only works for a subset of properties whose return value is an integer. Return the value by integer. Supported properties:

  • "rocksdb.num-immutable-mem-table"
  • "rocksdb.mem-table-flush-pending"
  • "rocksdb.compaction-pending"
  • "rocksdb.background-errors"
  • "rocksdb.cur-size-active-mem-table"
  • "rocksdb.cur-size-all-mem-tables"
  • "rocksdb.size-all-mem-tables"
  • "rocksdb.num-entries-active-mem-table"
  • "rocksdb.num-entries-imm-mem-tables"
  • "rocksdb.num-deletes-active-mem-table"
  • "rocksdb.num-deletes-imm-mem-tables"
  • "rocksdb.estimate-num-keys"
  • "rocksdb.estimate-table-readers-mem"
  • "rocksdb.is-file-deletions-enabled"
  • "rocksdb.num-snapshots"
  • "rocksdb.oldest-snapshot-time"
  • "rocksdb.num-live-versions"
  • "rocksdb.current-super-version-number"
  • "rocksdb.estimate-live-data-size"
  • "rocksdb.min-log-number-to-keep"
  • "rocksdb.total-sst-files-size"
  • "rocksdb.base-level"
  • "rocksdb.estimate-pending-compaction-bytes"
  • "rocksdb.num-running-compactions"
  • "rocksdb.num-running-flushes"
  • "rocksdb.actual-delayed-write-rate"
  • "rocksdb.is-write-stopped"

pub fn get_int_property_cf(
    &self,
    column_family: &ColumnFamilyHandle,
    property: &str
) -> Option<u64>
[src]

pub fn get_aggregated_int_property(&self, property: &str) -> Option<u64>[src]

Same as GetIntProperty(), but this one returns the aggregated int property from all column families.

pub fn get_approximate_sizes(
    &self,
    column_family: &ColumnFamilyHandle,
    ranges: &[Range<&[u8]>]
) -> Vec<u64>
[src]

pub fn get_approximate_memtable_stats(
    &self,
    column_family: &ColumnFamilyHandle,
    range: Range<&[u8]>
) -> (u64, u64)
[src]

pub fn compact_range<R: AsCompactRange>(
    &self,
    options: &CompactRangeOptions,
    range: R
) -> Result<()>
[src]

Compact the underlying storage for the key range [*begin,*end]. The actual compaction interval might be superset of [*begin, *end]. In particular, deleted and overwritten versions are discarded, and the data is rearranged to reduce the cost of operations needed to access the data. This operation should typically only be invoked by users who understand the underlying implementation.

begin==nullptr is treated as a key before all keys in the database. end==nullptr is treated as a key after all keys in the database. Therefore the following call will compact the entire database:

db->CompactRange(options, nullptr, nullptr);

Note that after the entire database is compacted, all data are pushed down to the last level containing any data. If the total data size after compaction is reduced, that level might not be appropriate for hosting all the files. In this case, client could set options.change_level to true, to move the files back to the minimum level capable of holding the data set or a given level (specified by non-negative options.target_level).

For Rust: use range expr, and since compact_range() use superset of range.

pub fn set_options<T, H>(
    &self,
    column_family: &ColumnFamilyHandle,
    new_options: H
) -> Result<()> where
    T: AsRef<str>,
    H: IntoIterator<Item = (T, T)>, 
[src]

pub fn set_db_options(&self, new_options: &HashMap<&str, &str>) -> Result<()>[src]

pub fn compact_files<P: AsRef<Path>, I: IntoIterator<Item = P>>(
    &self,
    compact_options: &CompactionOptions,
    input_file_names: I,
    output_level: i32
) -> Result<()>
[src]

CompactFiles() inputs a list of files specified by file numbers and compacts them to the specified level. Note that the behavior is different from CompactRange() in that CompactFiles() performs the compaction job using the CURRENT thread.

pub fn compact_files_to<P: AsRef<Path>, I: IntoIterator<Item = P>>(
    &self,
    compact_options: &CompactionOptions,
    input_file_names: I,
    output_level: i32,
    output_path_id: i32
) -> Result<()>
[src]

pub fn pause_background_work(&self) -> Result<()>[src]

This function will wait until all currently running background processes finish. After it returns, no background process will be run until ContinueBackgroundWork is called

pub fn continue_background_work(&self) -> Result<()>[src]

pub fn cancel_background_work(&self, wait: bool)[src]

Request stopping background work, if wait is true wait until it's done

pub fn enable_auto_compaction(
    &self,
    column_family_handles: &[&ColumnFamilyHandle]
) -> Result<()>
[src]

This function will enable automatic compactions for the given column families if they were previously disabled. The function will first set the disable_auto_compactions option for each column family to 'false', after which it will schedule a flush/compaction.

NOTE: Setting disable_auto_compactions to 'false' through SetOptions() API does NOT schedule a flush/compaction afterwards, and only changes the parameter itself within the column family option.

pub fn number_levels(&self) -> u32[src]

Number of levels used for this DB.

pub fn max_mem_compaction_level(&self) -> u32[src]

Maximum level to which a new compacted memtable is pushed if it does not create overlap.

pub fn level0_stop_write_trigger(&self) -> u32[src]

Number of files in level-0 that would stop writes.

pub fn name(&self) -> String[src]

Get DB name -- the exact same name that was provided as an argument to DB::Open()

pub fn flush(&self, options: &FlushOptions) -> Result<()>[src]

Flush all mem-table data.

pub fn sync_wal(&self) -> Result<()>[src]

Sync the wal. Note that Write() followed by SyncWAL() is not exactly the same as Write() with sync=true: in the latter case the changes won't be visible until the sync is done.

Currently only works if allow_mmap_writes = false in Options.

pub fn get_latest_sequence_number(&self) -> SequenceNumber[src]

The sequence number of the most recent transaction.

pub fn disable_file_deletions(&self) -> Result<()>[src]

Prevent file deletions. Compactions will continue to occur, but no obsolete files will be deleted. Calling this multiple times have the same effect as calling it once.

pub fn enable_file_deletions(&self, force: bool) -> Result<()>[src]

Allow compactions to delete obsolete files.

If force == true, the call to EnableFileDeletions() will guarantee that file deletions are enabled after the call, even if DisableFileDeletions() was called multiple times before.

If force == false, EnableFileDeletions will only enable file deletion after it's been called at least as many times as DisableFileDeletions(), enabling the two methods to be called by two threads concurrently without synchronization -- i.e., file deletions will be enabled only after both threads call EnableFileDeletions()

pub fn get_live_files(&self, flush_memtable: bool) -> Result<(u64, Vec<String>)>[src]

GetLiveFiles followed by GetSortedWalFiles can generate a lossless backup

Retrieve the list of all files in the database. The files are relative to the dbname and are not absolute paths. The valid size of the manifest file is returned in manifest_file_size. The manifest file is an ever growing file, but only the portion specified by manifest_file_size is valid for this snapshot. Setting flush_memtable to true does Flush before recording the live files. Setting flush_memtable to false is useful when we don't want to wait for flush which may have to wait for compaction to complete taking an indeterminate time.

In case you have multiple column families, even if flush_memtable is true, you still need to call GetSortedWalFiles after GetLiveFiles to compensate for new data that arrived to already-flushed column families while other column families were flushing

pub fn get_sorted_wal_files(&self) -> Result<Vec<LogFile>>[src]

Retrieve the sorted list of all wal files with earliest file first

pub fn get_updates_since(
    &self,
    seq_number: SequenceNumber
) -> Result<TransactionLogIterator>
[src]

Sets iter to an iterator that is positioned at a write-batch containing seq_number. If the sequence number is non existent, it returns an iterator at the first available seq_no after the requested seq_no

Returns Error::OK if iterator is valid

Must set WAL_ttl_seconds or WAL_size_limit_MB to large values to use this api, else the WAL files will get cleared aggressively and the iterator might keep getting invalid before an update is read.

pub fn delete_file(&self, name: &str) -> Result<()>[src]

Delete the file name from the db directory and update the internal state to reflect that. Supports deletion of sst and log files only. 'name' must be path relative to the db directory. eg. 000001.sst, /archive/000003.log

pub fn delete_files_in_range(
    &self,
    column_family: &ColumnFamilyHandle,
    begin: &[u8],
    end: &[u8]
) -> Result<()>
[src]

Delete files which are entirely in the given range

Could leave some keys in the range which are in files which are not entirely in the range.

Snapshots before the delete might not see the data in the given range.

pub fn get_live_files_metadata(&self) -> Vec<LiveFileMetaData>[src]

Returns a list of all table files with their level, start key and end key

pub fn get_column_family_metadata(
    &self,
    column_family: &ColumnFamilyHandle
) -> ColumnFamilyMetaData
[src]

Obtains the meta data of the specified column family of the DB.

pub fn ingest_external_file<P: AsRef<Path>, T: IntoIterator<Item = P>>(
    &self,
    external_files: T,
    options: &IngestExternalFileOptions
) -> Result<()>
[src]

IngestExternalFile() will load a list of external SST files (1) into the DB We will try to find the lowest possible level that the file can fit in, and ingest the file into this level (2). A file that have a key range that overlap with the memtable key range will require us to Flush the memtable first before ingesting the file.

  • External SST files can be created using SstFileWriter
  • We will try to ingest the files to the lowest possible level even if the file compression dont match the level compression

pub fn ingest_external_file_cf<P: AsRef<Path>, T: IntoIterator<Item = P>>(
    &self,
    column_family: &ColumnFamilyHandle,
    external_files: T,
    options: &IngestExternalFileOptions
) -> Result<()>
[src]

pub fn get_db_identity(&self) -> Result<String>[src]

Sets the globally unique ID created at database creation time by invoking Env::GenerateUniqueId(), in identity. Returns Error::OK if identity could be set properly

pub fn get_properties_of_all_tables_cf(
    &self,
    column_family: &ColumnFamilyHandle
) -> Result<TablePropertiesCollection>
[src]

pub fn get_properties_of_tables_in_range(
    &self,
    column_family: &ColumnFamilyHandle,
    ranges: &[Range<&[u8]>]
) -> Result<TablePropertiesCollection>
[src]

pub fn get_all_key_versions(
    &self,
    begin_key: &[u8],
    end_key: &[u8]
) -> Result<KeyVersionVec>
[src]

Returns listing of all versions of keys in the provided user key range. The range is inclusive-inclusive, i.e., [begin_key, end_key]. The result is inserted into the provided vector, key_versions.

pub fn try_catch_up_with_primary(&self) -> Result<()>[src]

Make the secondary instance catch up with the primary by tailing and replaying the MANIFEST and WAL of the primary.

Column families created by the primary after the secondary instance starts will be ignored unless the secondary instance closes and restarts with the newly created column families.

Column families that exist before secondary instance starts and dropped by the primary afterwards will be marked as dropped. However, as long as the secondary instance does not delete the corresponding column family handles, the data of the column family is still accessible to the secondary.

Trait Implementations

impl Debug for DB[src]

impl Deref for DB[src]

type Target = DBRef

The resulting type after dereferencing.

impl Send for DB[src]

impl Sync for DB[src]

Auto Trait Implementations

impl RefUnwindSafe for DB

impl Unpin for DB

impl UnwindSafe for DB

Blanket Implementations

impl<T> Any for T where
    T: 'static + ?Sized
[src]

impl<T> Borrow<T> for T where
    T: ?Sized
[src]

impl<T> BorrowMut<T> for T where
    T: ?Sized
[src]

impl<T> From<T> for T[src]

impl<T, U> Into<U> for T where
    U: From<T>, 
[src]

impl<T, U> TryFrom<U> for T where
    U: Into<T>, 
[src]

type Error = Infallible

The type returned in the event of a conversion error.

impl<T, U> TryInto<U> for T where
    U: TryFrom<T>, 
[src]

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.