bitcoinleveldb_bench/
db_bench.rs

1crate::ix!();
2
3//-------------------------------------------[.cpp/bitcoin/src/leveldb/benchmarks/db_bench.cc]
4
5/**
6  | Comma-separated list of operations to run in
7  | the specified order
8  |
9  |   Actual benchmarks:
10  |      fillseq       -- write N values in sequential key order in async mode
11  |      fillrandom    -- write N values in random key order in async mode
12  |      overwrite     -- overwrite N values in random key order in async mode
13  |      fillsync      -- write N/100 values in random key order in sync mode
14  |      fill100K      -- write N/1000 100K values in random order in async mode
15  |      deleteseq     -- delete N keys in sequential order
16  |      deleterandom  -- delete N keys in random order
17  |      readseq       -- read N times sequentially
18  |      readreverse   -- read N times in reverse order
19  |      readrandom    -- read N times in random order
20  |      readmissing   -- read N missing keys in random order
21  |      readhot       -- read N times in random order from 1% section of DB
22  |      seekrandom    -- N random seeks
23  |      open          -- cost of opening a DB
24  |      crc32c        -- repeated crc32c of 4K of data
25  |   Meta operations:
26  |      compact     -- Compact the entire DB
27  |      stats       -- Print DB stats
28  |      sstables    -- Print sstable info
29  |      heapprofile -- Dump a heap profile (if supported by this port)
30  */
31pub const FLAGS_benchmarks: &'static str = concat!{
32    "fillseq,",
33    "fillsync,",
34    "fillrandom,",
35    "overwrite,",
36    "readrandom,",
37    "readrandom,",  // Extra run to allow previous compactions to quiesce
38    "readseq,",
39    "readreverse,",
40    "compact,",
41    "readrandom,",
42    "readseq,",
43    "readreverse,",
44    "fill100K,",
45    "crc32c,",
46    "snappycomp,",
47    "snappyuncomp,"
48};
49
50/**
51  | Number of key/values to place in database
52  |
53  */
54lazy_static!{
55    /*
56    static int FLAGS_num = 1000000;
57    */
58}
59
60/**
61  | Number of read operations to do. If negative,
62  | do FLAGS_num reads.
63  |
64  */
65lazy_static!{
66    /*
67    static int FLAGS_reads = -1;
68    */
69}
70
71/**
72   Number of concurrent threads to run.
73  */
74lazy_static!{
75    /*
76    static int FLAGS_threads = 1;
77    */
78}
79
80/**
81   Size of each value
82  */
83lazy_static!{
84    /*
85    static int FLAGS_value_size = 100;
86    */
87}
88
89/**
90  | Arrange to generate values that shrink to this
91  | fraction of their original size after
92  | compression
93  */
94lazy_static!{
95    /*
96    static double FLAGS_compression_ratio = 0.5;
97    */
98}
99
100/**
101   Print histogram of operation timings
102  */
103lazy_static!{
104    /*
105    static bool FLAGS_histogram = false;
106    */
107}
108
109/**
110  | Number of bytes to buffer in memtable before
111  | compacting (initialized to default value by
112  | "main")
113  */
114lazy_static!{
115    /*
116    static int FLAGS_write_buffer_size = 0;
117    */
118}
119
120/**
121   Number of bytes written to each file.
122   (initialized to default value by "main")
123  */
124lazy_static!{
125    /*
126    static int FLAGS_max_file_size = 0;
127    */
128}
129
130/**
131  | Approximate size of user data packed per block
132  | (before compression.  (initialized to default
133  | value by "main")
134  */
135lazy_static!{
136    /*
137    static int FLAGS_block_size = 0;
138    */
139}
140
141/**
142  | Number of bytes to use as a cache of
143  | uncompressed data.  Negative means use default
144  | settings.
145  */
146lazy_static!{
147    /*
148    static int FLAGS_cache_size = -1;
149    */
150}
151
152/**
153   Maximum number of files to keep open at the
154   same time (use default if == 0)
155  */
156lazy_static!{
157    /*
158    static int FLAGS_open_files = 0;
159    */
160}
161
162/**
163   Bloom filter bits per key.
164
165   Negative means use default settings.
166  */
167lazy_static!{
168    /*
169    static int FLAGS_bloom_bits = -1;
170    */
171}
172
173/**
174  | If true, do not destroy the existing database.
175  | If you set this flag and also specify
176  | a benchmark that wants a fresh database, that
177  | benchmark will fail.
178  */
179lazy_static!{
180    /*
181    static bool FLAGS_use_existing_db = false;
182    */
183}
184
185/**
186   If true, reuse existing log/MANIFEST files when
187   re-opening a database.
188  */
189lazy_static!{
190    /*
191    static bool FLAGS_reuse_logs = false;
192    */
193}
194
195/**
196   Use the db with the following name.
197  */
198lazy_static!{
199    /*
200    static const char* FLAGS_db = nullptr;
201    */
202}
203
204lazy_static!{
205    /*
206    leveldb::Env* g_env = nullptr;
207    */
208}
209
210/**
211   Helper for quickly generating random data.
212  */
213pub struct RandomGenerator {
214    data: String,
215    pos:  i32,
216}
217
218impl Default for RandomGenerator {
219    
220    fn default() -> Self {
221        todo!();
222        /*
223
224
225            // We use a limited amount of data over and over again and ensure
226        // that it is larger than the compression window (32KB), and also
227        // large enough to serve all typical value sizes we want to write.
228        Random rnd(301);
229        std::string piece;
230        while (data_.size() < 1048576) {
231          // Add a short fragment that is as compressible as specified
232          // by FLAGS_compression_ratio.
233          test::CompressibleString(&rnd, FLAGS_compression_ratio, 100, &piece);
234          data_.append(piece);
235        }
236        pos_ = 0;
237        */
238    }
239}
240
241impl RandomGenerator {
242
243    pub fn generate(&mut self, len: usize) -> Slice {
244        
245        todo!();
246        /*
247            if (pos_ + len > data_.size()) {
248          pos_ = 0;
249          assert(len < data_.size());
250        }
251        pos_ += len;
252        return Slice(data_.data() + pos_ - len, len);
253        */
254    }
255}
256
257#[cfg(__linux)]
258pub fn trim_space(s: Slice) -> Slice {
259    
260    todo!();
261        /*
262            size_t start = 0;
263      while (start < s.size() && isspace(s[start])) {
264        start++;
265      }
266      size_t limit = s.size();
267      while (limit > start && isspace(s[limit - 1])) {
268        limit--;
269      }
270      return Slice(s.data() + start, limit - start);
271        */
272}
273
274pub fn append_with_space(
275        str_: *mut String,
276        msg:  Slice)  {
277    
278    todo!();
279        /*
280            if (msg.empty()) return;
281      if (!str->empty()) {
282        str->push_back(' ');
283      }
284      str->append(msg.data(), msg.size());
285        */
286}
287
288///--------------------------
289pub struct Stats {
290    start:          f64,
291    finish:         f64,
292    seconds:        f64,
293    done:           i32,
294    next_report:    i32,
295    bytes:          i64,
296    last_op_finish: f64,
297    hist:           Histogram,
298    message:        String,
299}
300
301impl Default for Stats {
302    
303    fn default() -> Self {
304        todo!();
305        /*
306
307
308            Start();
309        */
310    }
311}
312
313impl Stats {
314
315    pub fn start(&mut self)  {
316        
317        todo!();
318        /*
319            next_report_ = 100;
320        hist_.Clear();
321        done_ = 0;
322        bytes_ = 0;
323        seconds_ = 0;
324        message_.clear();
325        start_ = finish_ = last_op_finish_ = g_env->NowMicros();
326        */
327    }
328    
329    pub fn merge(&mut self, other: &Stats)  {
330        
331        todo!();
332        /*
333            hist_.Merge(other.hist_);
334        done_ += other.done_;
335        bytes_ += other.bytes_;
336        seconds_ += other.seconds_;
337        if (other.start_ < start_) start_ = other.start_;
338        if (other.finish_ > finish_) finish_ = other.finish_;
339
340        // Just keep the messages from one thread
341        if (message_.empty()) message_ = other.message_;
342        */
343    }
344    
345    pub fn stop(&mut self)  {
346        
347        todo!();
348        /*
349            finish_ = g_env->NowMicros();
350        seconds_ = (finish_ - start_) * 1e-6;
351        */
352    }
353    
354    pub fn add_message(&mut self, msg: Slice)  {
355        
356        todo!();
357        /*
358            AppendWithSpace(&message_, msg);
359        */
360    }
361    
362    pub fn finished_single_op(&mut self)  {
363        
364        todo!();
365        /*
366            if (FLAGS_histogram) {
367          double now = g_env->NowMicros();
368          double micros = now - last_op_finish_;
369          hist_.Add(micros);
370          if (micros > 20000) {
371            fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
372            fflush(stderr);
373          }
374          last_op_finish_ = now;
375        }
376
377        done_++;
378        if (done_ >= next_report_) {
379          if (next_report_ < 1000)
380            next_report_ += 100;
381          else if (next_report_ < 5000)
382            next_report_ += 500;
383          else if (next_report_ < 10000)
384            next_report_ += 1000;
385          else if (next_report_ < 50000)
386            next_report_ += 5000;
387          else if (next_report_ < 100000)
388            next_report_ += 10000;
389          else if (next_report_ < 500000)
390            next_report_ += 50000;
391          else
392            next_report_ += 100000;
393          fprintf(stderr, "... finished %d ops%30s\r", done_, "");
394          fflush(stderr);
395        }
396        */
397    }
398    
399    pub fn add_bytes(&mut self, n: i64)  {
400        
401        todo!();
402        /*
403            bytes_ += n;
404        */
405    }
406    
407    pub fn report(&mut self, name: &Slice)  {
408        
409        todo!();
410        /*
411            // Pretend at least one op was done in case we are running a benchmark
412        // that does not call FinishedSingleOp().
413        if (done_ < 1) done_ = 1;
414
415        std::string extra;
416        if (bytes_ > 0) {
417          // Rate is computed on actual elapsed time, not the sum of per-thread
418          // elapsed times.
419          double elapsed = (finish_ - start_) * 1e-6;
420          char rate[100];
421          snprintf(rate, sizeof(rate), "%6.1f MB/s",
422                   (bytes_ / 1048576.0) / elapsed);
423          extra = rate;
424        }
425        AppendWithSpace(&extra, message_);
426
427        fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
428                seconds_ * 1e6 / done_, (extra.empty() ? "" : " "), extra.c_str());
429        if (FLAGS_histogram) {
430          fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
431        }
432        fflush(stdout);
433        */
434    }
435}
436
437/**
438   State shared by all concurrent executions of
439   the same benchmark.
440  */
441pub struct SharedState {
442
443    mu:    Mutex<shared_state::Inner>,
444}
445
446pub mod shared_state {
447
448    use super::*;
449
450    pub struct Inner {
451
452        cv:    Condvar,
453        total: i32,
454
455        /*
456          | Each thread goes through the following states:
457          |
458          |    (1) initializing
459          |
460          |    (2) waiting for others to be initialized
461          |
462          |    (3) running
463          |
464          |    (4) done
465          */
466
467        num_initialized: i32,
468        num_done:        i32,
469        start:           bool,
470    }
471}
472
473impl SharedState {
474
475    pub fn new(total: i32) -> Self {
476    
477        todo!();
478        /*
479        : cv(&mu),
480        : total(total),
481        : num_initialized(0),
482        : num_done(0),
483        : start(false),
484        
485        */
486    }
487}
488
489/**
490  | Per-thread state for concurrent executions
491  | of the same benchmark.
492  |
493  */
494pub struct ThreadState {
495
496    /**
497       0..n-1 when running in n threads
498      */
499    tid:    i32,
500
501    /**
502       Has different seeds for different threads
503      */
504    rand:   Random,
505    stats:  Stats,
506    shared: *mut SharedState,
507}
508
509impl ThreadState {
510    
511    pub fn new(index: i32) -> Self {
512    
513        todo!();
514        /*
515
516
517            : tid(index), rand(1000 + index), shared(nullptr)
518        */
519    }
520}
521
522///-------------------------
523pub struct Benchmark {
524    cache:             *mut Cache,
525    filter_policy:     Box<dyn FilterPolicy>,
526    db:                *mut dyn DB,
527    num:               i32,
528    value_size:        i32,
529    entries_per_batch: i32,
530    write_options:     WriteOptions,
531    reads:             i32,
532    heap_counter:      i32,
533}
534
535pub mod benchmark {
536    use super::*;
537
538    pub struct ThreadArg {
539        bm:     *mut Benchmark,
540        shared: *mut SharedState,
541        thread: *mut ThreadState,
542        method: fn(_0: *mut ThreadState) -> c_void,
543    }
544}
545
546impl Default for Benchmark {
547    
548    fn default() -> Self {
549        todo!();
550        /*
551
552
553            : cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : nullptr),
554            filter_policy_(FLAGS_bloom_bits >= 0
555                               ? NewBloomFilterPolicy(FLAGS_bloom_bits)
556                               : nullptr),
557            db_(nullptr),
558            num_(FLAGS_num),
559            value_size_(FLAGS_value_size),
560            entries_per_batch_(1),
561            reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
562            heap_counter_(0) 
563
564        std::vector<std::string> files;
565        g_env->GetChildren(FLAGS_db, &files);
566        for (size_t i = 0; i < files.size(); i++) {
567          if (Slice(files[i]).starts_with("heap-")) {
568            g_env->DeleteFile(std::string(FLAGS_db) + "/" + files[i]);
569          }
570        }
571        if (!FLAGS_use_existing_db) {
572          DestroyDB(FLAGS_db, Options());
573        }
574        */
575    }
576}
577
578impl Drop for Benchmark {
579    fn drop(&mut self) {
580        todo!();
581        /*
582            delete db_;
583        delete cache_;
584        delete filter_policy_;
585        */
586    }
587}
588
589impl Benchmark {
590
591    pub fn print_header(&mut self)  {
592        
593        todo!();
594        /*
595            const int kKeySize = 16;
596        PrintEnvironment();
597        fprintf(stdout, "Keys:       %d bytes each\n", kKeySize);
598        fprintf(stdout, "Values:     %d bytes each (%d bytes after compression)\n",
599                FLAGS_value_size,
600                static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
601        fprintf(stdout, "Entries:    %d\n", num_);
602        fprintf(stdout, "RawSize:    %.1f MB (estimated)\n",
603                ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
604                 1048576.0));
605        fprintf(stdout, "FileSize:   %.1f MB (estimated)\n",
606                (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
607                 1048576.0));
608        PrintWarnings();
609        fprintf(stdout, "------------------------------------------------\n");
610        */
611    }
612    
613    pub fn print_warnings(&mut self)  {
614        
615        todo!();
616        /*
617            #if defined(__GNUC__) && !defined(__OPTIMIZE__)
618        fprintf(
619            stdout,
620            "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
621    #endif
622    #ifndef NDEBUG
623        fprintf(stdout,
624                "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
625    #endif
626
627        // See if snappy is working by attempting to compress a compressible string
628        const char text[] = "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy";
629        std::string compressed;
630        if (!Snappy_Compress(text, sizeof(text), &compressed)) {
631          fprintf(stdout, "WARNING: Snappy compression is not enabled\n");
632        } else if (compressed.size() >= sizeof(text)) {
633          fprintf(stdout, "WARNING: Snappy compression is not effective\n");
634        }
635        */
636    }
637    
638    pub fn print_environment(&mut self)  {
639        
640        todo!();
641        /*
642            fprintf(stderr, "LevelDB:    version %d.%d\n", kMajorVersion,
643                kMinorVersion);
644
645    #if defined(__linux)
646        time_t now = time(nullptr);
647        fprintf(stderr, "Date:       %s", ctime(&now));  // ctime() adds newline
648
649        FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
650        if (cpuinfo != nullptr) {
651          char line[1000];
652          int num_cpus = 0;
653          std::string cpu_type;
654          std::string cache_size;
655          while (fgets(line, sizeof(line), cpuinfo) != nullptr) {
656            const char* sep = strchr(line, ':');
657            if (sep == nullptr) {
658              continue;
659            }
660            Slice key = TrimSpace(Slice(line, sep - 1 - line));
661            Slice val = TrimSpace(Slice(sep + 1));
662            if (key == "model name") {
663              ++num_cpus;
664              cpu_type = val.ToString();
665            } else if (key == "cache size") {
666              cache_size = val.ToString();
667            }
668          }
669          fclose(cpuinfo);
670          fprintf(stderr, "CPU:        %d * %s\n", num_cpus, cpu_type.c_str());
671          fprintf(stderr, "CPUCache:   %s\n", cache_size.c_str());
672        }
673    #endif
674        */
675    }
676    
677    pub fn run(&mut self)  {
678        
679        todo!();
680        /*
681            PrintHeader();
682        Open();
683
684        const char* benchmarks = FLAGS_benchmarks;
685        while (benchmarks != nullptr) {
686          const char* sep = strchr(benchmarks, ',');
687          Slice name;
688          if (sep == nullptr) {
689            name = benchmarks;
690            benchmarks = nullptr;
691          } else {
692            name = Slice(benchmarks, sep - benchmarks);
693            benchmarks = sep + 1;
694          }
695
696          // Reset parameters that may be overridden below
697          num_ = FLAGS_num;
698          reads_ = (FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads);
699          value_size_ = FLAGS_value_size;
700          entries_per_batch_ = 1;
701          write_options_ = WriteOptions();
702
703          c_void (Benchmark::*method)(ThreadState*) = nullptr;
704          bool fresh_db = false;
705          int num_threads = FLAGS_threads;
706
707          if (name == Slice("open")) {
708            method = &Benchmark::OpenBench;
709            num_ /= 10000;
710            if (num_ < 1) num_ = 1;
711          } else if (name == Slice("fillseq")) {
712            fresh_db = true;
713            method = &Benchmark::WriteSeq;
714          } else if (name == Slice("fillbatch")) {
715            fresh_db = true;
716            entries_per_batch_ = 1000;
717            method = &Benchmark::WriteSeq;
718          } else if (name == Slice("fillrandom")) {
719            fresh_db = true;
720            method = &Benchmark::WriteRandom;
721          } else if (name == Slice("overwrite")) {
722            fresh_db = false;
723            method = &Benchmark::WriteRandom;
724          } else if (name == Slice("fillsync")) {
725            fresh_db = true;
726            num_ /= 1000;
727            write_options_.sync = true;
728            method = &Benchmark::WriteRandom;
729          } else if (name == Slice("fill100K")) {
730            fresh_db = true;
731            num_ /= 1000;
732            value_size_ = 100 * 1000;
733            method = &Benchmark::WriteRandom;
734          } else if (name == Slice("readseq")) {
735            method = &Benchmark::ReadSequential;
736          } else if (name == Slice("readreverse")) {
737            method = &Benchmark::ReadReverse;
738          } else if (name == Slice("readrandom")) {
739            method = &Benchmark::ReadRandom;
740          } else if (name == Slice("readmissing")) {
741            method = &Benchmark::ReadMissing;
742          } else if (name == Slice("seekrandom")) {
743            method = &Benchmark::SeekRandom;
744          } else if (name == Slice("readhot")) {
745            method = &Benchmark::ReadHot;
746          } else if (name == Slice("readrandomsmall")) {
747            reads_ /= 1000;
748            method = &Benchmark::ReadRandom;
749          } else if (name == Slice("deleteseq")) {
750            method = &Benchmark::DeleteSeq;
751          } else if (name == Slice("deleterandom")) {
752            method = &Benchmark::DeleteRandom;
753          } else if (name == Slice("readwhilewriting")) {
754            num_threads++;  // Add extra thread for writing
755            method = &Benchmark::ReadWhileWriting;
756          } else if (name == Slice("compact")) {
757            method = &Benchmark::Compact;
758          } else if (name == Slice("crc32c")) {
759            method = &Benchmark::Crc32c;
760          } else if (name == Slice("snappycomp")) {
761            method = &Benchmark::SnappyCompress;
762          } else if (name == Slice("snappyuncomp")) {
763            method = &Benchmark::SnappyUncompress;
764          } else if (name == Slice("heapprofile")) {
765            HeapProfile();
766          } else if (name == Slice("stats")) {
767            PrintStats("leveldb.stats");
768          } else if (name == Slice("sstables")) {
769            PrintStats("leveldb.sstables");
770          } else {
771            if (!name.empty()) {  // No error message for empty name
772              fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str());
773            }
774          }
775
776          if (fresh_db) {
777            if (FLAGS_use_existing_db) {
778              fprintf(stdout, "%-12s : skipped (--use_existing_db is true)\n",
779                      name.ToString().c_str());
780              method = nullptr;
781            } else {
782              delete db_;
783              db_ = nullptr;
784              DestroyDB(FLAGS_db, Options());
785              Open();
786            }
787          }
788
789          if (method != nullptr) {
790            RunBenchmark(num_threads, name, method);
791          }
792        }
793        */
794    }
795    
796    pub fn thread_body(v: *mut c_void)  {
797        
798        todo!();
799        /*
800            ThreadArg* arg = reinterpret_cast<ThreadArg*>(v);
801        SharedState* shared = arg->shared;
802        ThreadState* thread = arg->thread;
803        {
804          MutexLock l(&shared->mu);
805          shared->num_initialized++;
806          if (shared->num_initialized >= shared->total) {
807            shared->cv.SignalAll();
808          }
809          while (!shared->start) {
810            shared->cv.Wait();
811          }
812        }
813
814        thread->stats.Start();
815        (arg->bm->*(arg->method))(thread);
816        thread->stats.Stop();
817
818        {
819          MutexLock l(&shared->mu);
820          shared->num_done++;
821          if (shared->num_done >= shared->total) {
822            shared->cv.SignalAll();
823          }
824        }
825        */
826    }
827    
828    pub fn run_benchmark(&mut self, 
829        n:      i32,
830        name:   Slice,
831        method: fn(_0: *mut ThreadState) -> c_void)  {
832        
833        todo!();
834        /*
835            SharedState shared(n);
836
837        ThreadArg* arg = new ThreadArg[n];
838        for (int i = 0; i < n; i++) {
839          arg[i].bm = this;
840          arg[i].method = method;
841          arg[i].shared = &shared;
842          arg[i].thread = new ThreadState(i);
843          arg[i].thread->shared = &shared;
844          g_env->StartThread(ThreadBody, &arg[i]);
845        }
846
847        shared.mu.Lock();
848        while (shared.num_initialized < n) {
849          shared.cv.Wait();
850        }
851
852        shared.start = true;
853        shared.cv.SignalAll();
854        while (shared.num_done < n) {
855          shared.cv.Wait();
856        }
857        shared.mu.Unlock();
858
859        for (int i = 1; i < n; i++) {
860          arg[0].thread->stats.Merge(arg[i].thread->stats);
861        }
862        arg[0].thread->stats.Report(name);
863
864        for (int i = 0; i < n; i++) {
865          delete arg[i].thread;
866        }
867        delete[] arg;
868        */
869    }
870    
871    pub fn crc_32c(&mut self, thread: *mut ThreadState)  {
872        
873        todo!();
874        /*
875            // Checksum about 500MB of data total
876        const int size = 4096;
877        const char* label = "(4K per op)";
878        std::string data(size, 'x');
879        int64_t bytes = 0;
880        uint32_t crc = 0;
881        while (bytes < 500 * 1048576) {
882          crc = crc32c::Value(data.data(), size);
883          thread->stats.FinishedSingleOp();
884          bytes += size;
885        }
886        // Print so result is not dead
887        fprintf(stderr, "... crc=0x%x\r", static_cast<unsigned int>(crc));
888
889        thread->stats.AddBytes(bytes);
890        thread->stats.AddMessage(label);
891        */
892    }
893    
894    pub fn snappy_compress(&mut self, thread: *mut ThreadState)  {
895        
896        todo!();
897        /*
898            RandomGenerator gen;
899        Slice input = gen.Generate(Options().block_size);
900        int64_t bytes = 0;
901        int64_t produced = 0;
902        bool ok = true;
903        std::string compressed;
904        while (ok && bytes < 1024 * 1048576) {  // Compress 1G
905          ok = Snappy_Compress(input.data(), input.size(), &compressed);
906          produced += compressed.size();
907          bytes += input.size();
908          thread->stats.FinishedSingleOp();
909        }
910
911        if (!ok) {
912          thread->stats.AddMessage("(snappy failure)");
913        } else {
914          char buf[100];
915          snprintf(buf, sizeof(buf), "(output: %.1f%%)",
916                   (produced * 100.0) / bytes);
917          thread->stats.AddMessage(buf);
918          thread->stats.AddBytes(bytes);
919        }
920        */
921    }
922    
923    pub fn snappy_uncompress(&mut self, thread: *mut ThreadState)  {
924        
925        todo!();
926        /*
927            RandomGenerator gen;
928        Slice input = gen.Generate(Options().block_size);
929        std::string compressed;
930        bool ok = Snappy_Compress(input.data(), input.size(), &compressed);
931        int64_t bytes = 0;
932        char* uncompressed = new char[input.size()];
933        while (ok && bytes < 1024 * 1048576) {  // Compress 1G
934          ok = Snappy_Uncompress(compressed.data(), compressed.size(),
935                                       uncompressed);
936          bytes += input.size();
937          thread->stats.FinishedSingleOp();
938        }
939        delete[] uncompressed;
940
941        if (!ok) {
942          thread->stats.AddMessage("(snappy failure)");
943        } else {
944          thread->stats.AddBytes(bytes);
945        }
946        */
947    }
948    
949    pub fn open(&mut self)  {
950        
951        todo!();
952        /*
953            assert(db_ == nullptr);
954        Options options;
955        options.env = g_env;
956        options.create_if_missing = !FLAGS_use_existing_db;
957        options.block_cache = cache_;
958        options.write_buffer_size = FLAGS_write_buffer_size;
959        options.max_file_size = FLAGS_max_file_size;
960        options.block_size = FLAGS_block_size;
961        options.max_open_files = FLAGS_open_files;
962        options.filter_policy = filter_policy_;
963        options.reuse_logs = FLAGS_reuse_logs;
964        crate::Status s = DB::Open(options, FLAGS_db, &db_);
965        if (!s.ok()) {
966          fprintf(stderr, "open error: %s\n", s.ToString().c_str());
967          exit(1);
968        }
969        */
970    }
971    
972    pub fn open_bench(&mut self, thread: *mut ThreadState)  {
973        
974        todo!();
975        /*
976            for (int i = 0; i < num_; i++) {
977          delete db_;
978          Open();
979          thread->stats.FinishedSingleOp();
980        }
981        */
982    }
983    
984    pub fn write_seq(&mut self, thread: *mut ThreadState)  {
985        
986        todo!();
987        /*
988            DoWrite(thread, true);
989        */
990    }
991    
992    pub fn write_random(&mut self, thread: *mut ThreadState)  {
993        
994        todo!();
995        /*
996            DoWrite(thread, false);
997        */
998    }
999    
1000    pub fn do_write(&mut self, 
1001        thread: *mut ThreadState,
1002        seq:    bool)  {
1003        
1004        todo!();
1005        /*
1006            if (num_ != FLAGS_num) {
1007          char msg[100];
1008          snprintf(msg, sizeof(msg), "(%d ops)", num_);
1009          thread->stats.AddMessage(msg);
1010        }
1011
1012        RandomGenerator gen;
1013        WriteBatch batch;
1014        crate::Status s;
1015        int64_t bytes = 0;
1016        for (int i = 0; i < num_; i += entries_per_batch_) {
1017          batch.Clear();
1018          for (int j = 0; j < entries_per_batch_; j++) {
1019            const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num);
1020            char key[100];
1021            snprintf(key, sizeof(key), "%016d", k);
1022            batch.Put(key, gen.Generate(value_size_));
1023            bytes += value_size_ + strlen(key);
1024            thread->stats.FinishedSingleOp();
1025          }
1026          s = db_->Write(write_options_, &batch);
1027          if (!s.ok()) {
1028            fprintf(stderr, "put error: %s\n", s.ToString().c_str());
1029            exit(1);
1030          }
1031        }
1032        thread->stats.AddBytes(bytes);
1033        */
1034    }
1035    
1036    pub fn read_sequential(&mut self, thread: *mut ThreadState)  {
1037        
1038        todo!();
1039        /*
1040            Iterator* iter = db_->NewIterator(ReadOptions());
1041        int i = 0;
1042        int64_t bytes = 0;
1043        for (iter->SeekToFirst(); i < reads_ && iter->Valid(); iter->Next()) {
1044          bytes += iter->key().size() + iter->value().size();
1045          thread->stats.FinishedSingleOp();
1046          ++i;
1047        }
1048        delete iter;
1049        thread->stats.AddBytes(bytes);
1050        */
1051    }
1052    
1053    pub fn read_reverse(&mut self, thread: *mut ThreadState)  {
1054        
1055        todo!();
1056        /*
1057            Iterator* iter = db_->NewIterator(ReadOptions());
1058        int i = 0;
1059        int64_t bytes = 0;
1060        for (iter->SeekToLast(); i < reads_ && iter->Valid(); iter->Prev()) {
1061          bytes += iter->key().size() + iter->value().size();
1062          thread->stats.FinishedSingleOp();
1063          ++i;
1064        }
1065        delete iter;
1066        thread->stats.AddBytes(bytes);
1067        */
1068    }
1069    
1070    pub fn read_random(&mut self, thread: *mut ThreadState)  {
1071        
1072        todo!();
1073        /*
1074            ReadOptions options;
1075        std::string value;
1076        int found = 0;
1077        for (int i = 0; i < reads_; i++) {
1078          char key[100];
1079          const int k = thread->rand.Next() % FLAGS_num;
1080          snprintf(key, sizeof(key), "%016d", k);
1081          if (db_->Get(options, key, &value).ok()) {
1082            found++;
1083          }
1084          thread->stats.FinishedSingleOp();
1085        }
1086        char msg[100];
1087        snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_);
1088        thread->stats.AddMessage(msg);
1089        */
1090    }
1091    
1092    pub fn read_missing(&mut self, thread: *mut ThreadState)  {
1093        
1094        todo!();
1095        /*
1096            ReadOptions options;
1097        std::string value;
1098        for (int i = 0; i < reads_; i++) {
1099          char key[100];
1100          const int k = thread->rand.Next() % FLAGS_num;
1101          snprintf(key, sizeof(key), "%016d.", k);
1102          db_->Get(options, key, &value);
1103          thread->stats.FinishedSingleOp();
1104        }
1105        */
1106    }
1107    
1108    pub fn read_hot(&mut self, thread: *mut ThreadState)  {
1109        
1110        todo!();
1111        /*
1112            ReadOptions options;
1113        std::string value;
1114        const int range = (FLAGS_num + 99) / 100;
1115        for (int i = 0; i < reads_; i++) {
1116          char key[100];
1117          const int k = thread->rand.Next() % range;
1118          snprintf(key, sizeof(key), "%016d", k);
1119          db_->Get(options, key, &value);
1120          thread->stats.FinishedSingleOp();
1121        }
1122        */
1123    }
1124    
1125    pub fn seek_random(&mut self, thread: *mut ThreadState)  {
1126        
1127        todo!();
1128        /*
1129            ReadOptions options;
1130        int found = 0;
1131        for (int i = 0; i < reads_; i++) {
1132          Iterator* iter = db_->NewIterator(options);
1133          char key[100];
1134          const int k = thread->rand.Next() % FLAGS_num;
1135          snprintf(key, sizeof(key), "%016d", k);
1136          iter->Seek(key);
1137          if (iter->Valid() && iter->key() == key) found++;
1138          delete iter;
1139          thread->stats.FinishedSingleOp();
1140        }
1141        char msg[100];
1142        snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_);
1143        thread->stats.AddMessage(msg);
1144        */
1145    }
1146    
1147    pub fn do_delete(&mut self, 
1148        thread: *mut ThreadState,
1149        seq:    bool)  {
1150        
1151        todo!();
1152        /*
1153            RandomGenerator gen;
1154        WriteBatch batch;
1155        crate::Status s;
1156        for (int i = 0; i < num_; i += entries_per_batch_) {
1157          batch.Clear();
1158          for (int j = 0; j < entries_per_batch_; j++) {
1159            const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num);
1160            char key[100];
1161            snprintf(key, sizeof(key), "%016d", k);
1162            batch.Delete(key);
1163            thread->stats.FinishedSingleOp();
1164          }
1165          s = db_->Write(write_options_, &batch);
1166          if (!s.ok()) {
1167            fprintf(stderr, "del error: %s\n", s.ToString().c_str());
1168            exit(1);
1169          }
1170        }
1171        */
1172    }
1173    
1174    pub fn delete_seq(&mut self, thread: *mut ThreadState)  {
1175        
1176        todo!();
1177        /*
1178            DoDelete(thread, true);
1179        */
1180    }
1181    
1182    pub fn delete_random(&mut self, thread: *mut ThreadState)  {
1183        
1184        todo!();
1185        /*
1186            DoDelete(thread, false);
1187        */
1188    }
1189    
1190    pub fn read_while_writing(&mut self, thread: *mut ThreadState)  {
1191        
1192        todo!();
1193        /*
1194            if (thread->tid > 0) {
1195          ReadRandom(thread);
1196        } else {
1197          // Special thread that keeps writing until other threads are done.
1198          RandomGenerator gen;
1199          while (true) {
1200            {
1201              MutexLock l(&thread->shared->mu);
1202              if (thread->shared->num_done + 1 >= thread->shared->num_initialized) {
1203                // Other threads have finished
1204                break;
1205              }
1206            }
1207
1208            const int k = thread->rand.Next() % FLAGS_num;
1209            char key[100];
1210            snprintf(key, sizeof(key), "%016d", k);
1211            crate::Status s = db_->Put(write_options_, key, gen.Generate(value_size_));
1212            if (!s.ok()) {
1213              fprintf(stderr, "put error: %s\n", s.ToString().c_str());
1214              exit(1);
1215            }
1216          }
1217
1218          // Do not count any of the preceding work/delay in stats.
1219          thread->stats.Start();
1220        }
1221        */
1222    }
1223    
1224    pub fn compact(&mut self, thread: *mut ThreadState)  {
1225        
1226        todo!();
1227        /*
1228            db_->CompactRange(nullptr, nullptr);
1229        */
1230    }
1231    
1232    pub fn print_stats(&mut self, key_: *const u8)  {
1233        
1234        todo!();
1235        /*
1236            std::string stats;
1237        if (!db_->GetProperty(key, &stats)) {
1238          stats = "(failed)";
1239        }
1240        fprintf(stdout, "\n%s\n", stats.c_str());
1241        */
1242    }
1243    
1244    pub fn write_to_file(
1245        arg: *mut c_void,
1246        buf: *const u8,
1247        n:   i32)  {
1248        
1249        todo!();
1250        /*
1251            reinterpret_cast<WritableFile*>(arg)->Append(Slice(buf, n));
1252        */
1253    }
1254    
1255    pub fn heap_profile(&mut self)  {
1256        
1257        todo!();
1258        /*
1259            char fname[100];
1260        snprintf(fname, sizeof(fname), "%s/heap-%04d", FLAGS_db, ++heap_counter_);
1261        WritableFile* file;
1262        crate::Status s = g_env->NewWritableFile(fname, &file);
1263        if (!s.ok()) {
1264          fprintf(stderr, "%s\n", s.ToString().c_str());
1265          return;
1266        }
1267        bool ok = GetHeapProfile(WriteToFile, file);
1268        delete file;
1269        if (!ok) {
1270          fprintf(stderr, "heap profiling not supported\n");
1271          g_env->DeleteFile(fname);
1272        }
1273        */
1274    }
1275}
1276
1277pub fn benchdb_bench_main (
1278    argc: i32,
1279    argv: *mut *mut u8) -> i32 {
1280
1281    todo!();
1282        /*
1283            FLAGS_write_buffer_size = leveldb::Options().write_buffer_size;
1284      FLAGS_max_file_size = leveldb::Options().max_file_size;
1285      FLAGS_block_size = leveldb::Options().block_size;
1286      FLAGS_open_files = leveldb::Options().max_open_files;
1287      std::string default_db_path;
1288
1289      for (int i = 1; i < argc; i++) {
1290        double d;
1291        int n;
1292        char junk;
1293        if (leveldb::Slice(argv[i]).starts_with("--benchmarks=")) {
1294          FLAGS_benchmarks = argv[i] + strlen("--benchmarks=");
1295        } else if (sscanf(argv[i], "--compression_ratio=%lf%c", &d, &junk) == 1) {
1296          FLAGS_compression_ratio = d;
1297        } else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 &&
1298                   (n == 0 || n == 1)) {
1299          FLAGS_histogram = n;
1300        } else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 &&
1301                   (n == 0 || n == 1)) {
1302          FLAGS_use_existing_db = n;
1303        } else if (sscanf(argv[i], "--reuse_logs=%d%c", &n, &junk) == 1 &&
1304                   (n == 0 || n == 1)) {
1305          FLAGS_reuse_logs = n;
1306        } else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) {
1307          FLAGS_num = n;
1308        } else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) {
1309          FLAGS_reads = n;
1310        } else if (sscanf(argv[i], "--threads=%d%c", &n, &junk) == 1) {
1311          FLAGS_threads = n;
1312        } else if (sscanf(argv[i], "--value_size=%d%c", &n, &junk) == 1) {
1313          FLAGS_value_size = n;
1314        } else if (sscanf(argv[i], "--write_buffer_size=%d%c", &n, &junk) == 1) {
1315          FLAGS_write_buffer_size = n;
1316        } else if (sscanf(argv[i], "--max_file_size=%d%c", &n, &junk) == 1) {
1317          FLAGS_max_file_size = n;
1318        } else if (sscanf(argv[i], "--block_size=%d%c", &n, &junk) == 1) {
1319          FLAGS_block_size = n;
1320        } else if (sscanf(argv[i], "--cache_size=%d%c", &n, &junk) == 1) {
1321          FLAGS_cache_size = n;
1322        } else if (sscanf(argv[i], "--bloom_bits=%d%c", &n, &junk) == 1) {
1323          FLAGS_bloom_bits = n;
1324        } else if (sscanf(argv[i], "--open_files=%d%c", &n, &junk) == 1) {
1325          FLAGS_open_files = n;
1326        } else if (strncmp(argv[i], "--db=", 5) == 0) {
1327          FLAGS_db = argv[i] + 5;
1328        } else {
1329          fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
1330          exit(1);
1331        }
1332      }
1333
1334      leveldb::g_env = leveldb::Env::Default();
1335
1336      // Choose a location for the test database if none given with --db=<path>
1337      if (FLAGS_db == nullptr) {
1338        leveldb::g_env->GetTestDirectory(&default_db_path);
1339        default_db_path += "/dbbench";
1340        FLAGS_db = default_db_path.c_str();
1341      }
1342
1343      leveldb::Benchmark benchmark;
1344      benchmark.Run();
1345      return 0;
1346        */
1347}