bitcoinleveldb_options/
options.rs

1crate::ix!();
2
3
4
5//-------------------------------------------[.cpp/bitcoin/src/leveldb/util/options.cc]
6//-------------------------------------------[.cpp/bitcoin/src/leveldb/include/leveldb/options.h]
7
8/**
9  | DB contents are stored in a set of blocks, each
10  | of which holds a sequence of key,value pairs.
11  | Each block may be compressed before being
12  | stored in a file.  The following enum describes
13  | which compression method (if any) is used to
14  | compress a block.
15  */
16pub enum CompressionType {
17
18    /**
19      | @note
20      | 
21      | do not change the values of existing
22      | entries, as these are part of the persistent
23      | format on disk.
24      |
25      */
26    NoCompression     = 0x0,
27    SnappyCompression = 0x1
28}
29
30/**
31  | Options to control the behavior of a
32  | database (passed to DB::Open)
33  |
34  */
35pub struct Options {
36
37    /* -------- Parameters that affect behavior  -------- */
38
39    /**
40      | Comparator used to define the order of keys
41      | in the table.
42      |
43      | Default: a comparator that uses lexicographic
44      | byte-wise ordering
45      |
46      | REQUIRES: The client must ensure that the
47      | comparator supplied here has the same name
48      | and orders keys *exactly* the same as the
49      | comparator provided to previous open calls on
50      | the same DB.
51      */
52    comparator:        Box<dyn SliceComparator>,
53
54    /**
55      | If true, the database will be created
56      | if it is missing.
57      |
58      */
59    create_if_missing: bool, // default = false
60
61    /**
62      | If true, an error is raised if the database
63      | already exists.
64      |
65      */
66    error_if_exists:   bool, // default = false
67
68    /**
69      | If true, the implementation will do
70      | aggressive checking of the data it is
71      | processing and will stop early if it detects
72      | any errors.  This may have unforeseen
73      | ramifications: for example, a corruption of
74      | one DB entry may cause a large number of
75      | entries to become unreadable or for the
76      | entire DB to become unopenable.
77      */
78    paranoid_checks:   bool, // default = false
79
80    /**
81      | Use the specified object to interact with the
82      | environment, e.g. to read/write files,
83      | schedule background work, etc.
84      |
85      | Default: Env::Default()
86      */
87    env:               Rc<RefCell<dyn Env>>,
88
89    /**
90      | Any internal progress/error information
91      | generated by the db will be written to
92      | info_log if it is non-null, or to a file
93      | stored in the same directory as the DB
94      | contents if info_log is null.
95      */
96    info_log:          *mut dyn Logger, // default = nullptr
97
98    /* ------ Parameters that affect performance  ------ */
99
100    /**
101      | Amount of data to build up in memory (backed
102      | by an unsorted log on disk) before converting
103      | to a sorted on-disk file.
104      |
105      | Larger values increase performance,
106      | especially during bulk loads.
107      |
108      | Up to two write buffers may be held in memory
109      | at the same time, so you may wish to adjust
110      | this parameter to control memory usage.
111      |
112      | Also, a larger write buffer will result in
113      | a longer recovery time the next time the
114      | database is opened.
115      */
116    write_buffer_size:      usize, // default = 4 * 1024 * 1024
117
118    /**
119      | Number of open files that can be used by the
120      | DB.  You may need to increase this if your
121      | database has a large working set (budget one
122      | open file per 2MB of working set).
123      */
124    max_open_files:         i32, // default = 1000
125
126    /**
127      | Control over blocks (user data is stored in
128      | a set of blocks, and a block is the unit of
129      | reading from disk).
130      | If non-null, use the specified cache for
131      | blocks.
132      |
133      | If null, leveldb will automatically create
134      | and use an 8MB internal cache.
135      */
136    block_cache:            *mut Cache, // default = nullptr
137
138    /**
139      | Approximate size of user data packed per
140      | block.  Note that the block size specified
141      | here corresponds to uncompressed data.  The
142      | actual size of the unit read from disk may be
143      | smaller if compression is enabled.  This
144      | parameter can be changed dynamically.
145      */
146    block_size:             usize, // default = 4 * 1024
147
148    /**
149      | Number of keys between restart points for
150      | delta encoding of keys.
151      |
152      | This parameter can be changed dynamically.
153      | Most clients should leave this parameter
154      | alone.
155      */
156    block_restart_interval: i32, // default = 16
157
158    /**
159      | Leveldb will write up to this amount of bytes
160      | to a file before switching to a new one.
161      |
162      | Most clients should leave this parameter
163      | alone.  However if your filesystem is more
164      | efficient with larger files, you could
165      | consider increasing the value.  The downside
166      | will be longer compactions and hence longer
167      | latency/performance hiccups.
168      |
169      | Another reason to increase this parameter
170      | might be when you are initially populating
171      | a large database.
172      */
173    max_file_size:          usize, // default = 2 * 1024 * 1024
174
175    /**
176      | Compress blocks using the specified
177      | compression algorithm.  This parameter can be
178      | changed dynamically.
179      |
180      | Default: kSnappyCompression, which gives
181      | lightweight but fast compression.
182      |
183      | Typical speeds of kSnappyCompression on an
184      | Intel(R) Core(TM)2 2.4GHz:
185      |
186      |    ~200-500MB/s compression
187      |    ~400-800MB/s decompression
188      |
189      | Note that these speeds are significantly
190      | faster than most persistent storage speeds,
191      | and therefore it is typically never worth
192      | switching to kNoCompression.  Even if the
193      | input data is incompressible, the
194      | kSnappyCompression implementation will
195      | efficiently detect that and will switch to
196      | uncompressed mode.
197      */
198    compression: CompressionType, // default = kSnappyCompression
199
200    /**
201      | EXPERIMENTAL: If true, append to existing
202      | MANIFEST and log files when a database is
203      | opened.  This can significantly speed up
204      | open.
205      |
206      | Default: currently false, but may become true
207      | later.
208      */
209    reuse_logs:    bool, // default = false
210
211    /**
212      | If non-null, use the specified filter policy
213      | to reduce disk reads.
214      |
215      | Many applications will benefit from passing
216      | the result of NewBloomFilterPolicy() here.
217      */
218    filter_policy: Box<dyn FilterPolicy>, // default = nullptr
219}
220
221impl Default for Options {
222    
223    fn default() -> Self {
224    
225        todo!();
226        /*
227
228
229            : comparator(BytewiseComparator()), env(Env::Default())
230        */
231    }
232}
233
234/**
235  | Options that control read operations
236  |
237  */
238#[derive(Default)]
239pub struct ReadOptions {
240
241    /**
242      | If true, all data read from underlying
243      | storage will be verified against corresponding
244      | checksums.
245      |
246      */
247    verify_checksums: bool, // default = false
248
249    /**
250      | Should the data read for this iteration
251      | be cached in memory? Callers may wish
252      | to set this field to false for bulk scans.
253      |
254      */
255    fill_cache:       bool, // default = true
256
257    /**
258      | If "snapshot" is non-null, read as of the
259      | supplied snapshot (which must belong to the
260      | DB that is being read and which must not have
261      | been released).  If "snapshot" is null, use
262      | an implicit snapshot of the state at the
263      | beginning of this read operation.
264      */
265    snapshot:         Option<Box<dyn Snapshot>>, // default = nullptr
266}
267
268/**
269  | Options that control write operations
270  |
271  */
272#[derive(Default)]
273pub struct WriteOptions {
274
275    /**
276      | If true, the write will be flushed from the
277      | operating system buffer cache (by calling
278      | WritableFile::Sync()) before the write is
279      | considered complete.  If this flag is true,
280      | writes will be slower.
281      |
282      | If this flag is false, and the machine
283      | crashes, some recent writes may be lost.
284      | Note that if it is just the process that
285      | crashes (i.e., the machine does not reboot),
286      | no writes will be lost even if sync==false.
287      |
288      | In other words, a DB write with sync==false
289      | has similar crash semantics as the "write()"
290      | system call.  A DB write with sync==true has
291      | similar crash semantics to a "write()" system
292      | call followed by "fsync()".
293      */
294    sync: bool, // default = false
295}