rust-librocksdb-sys 0.43.0+11.0.4

Native bindings to librocksdb
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
//  This source code is licensed under both the GPLv2 (found in the
//  COPYING file in the root directory) and Apache 2.0 License
//  (found in the LICENSE.Apache file in the root directory).
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
//
#pragma once

#include <algorithm>

#include "memory/memory_allocator_impl.h"
#include "rocksdb/advanced_compression.h"
#include "rocksdb/options.h"
#include "table/block_based/block_type.h"
#include "util/aligned_buffer.h"
#include "util/coding.h"
#include "util/compression_context_cache.h"

#ifdef ZSTD
#include <zstd.h>
#include <zstd_errors.h>
// ZSTD_Compress2(), ZSTD_compressStream2() and frame parameters all belong to
// advanced APIs and require v1.4.0+, which is from April 2019.
// https://github.com/facebook/zstd/blob/eb9f881eb810f2242f1ef36b3f3e7014eecb8fa6/lib/zstd.h#L297C40-L297C45
// To avoid a rat's nest of #ifdefs, we now require v1.4.0+ for ZSTD support.
#if ZSTD_VERSION_NUMBER < 10400
#error "ZSTD support requires version >= 1.4.0 (libzstd-devel)"
#endif  // ZSTD_VERSION_NUMBER
// The above release also includes digested dictionary support, but some
// required functions (ZSTD_createDDict_byReference) are still only exported
// with ZSTD_STATIC_LINKING_ONLY defined.
#if defined(ZSTD_STATIC_LINKING_ONLY)
#define ROCKSDB_ZSTD_DDICT
#endif  // defined(ZSTD_STATIC_LINKING_ONLY)
//  For ZDICT_* functions
#include <zdict.h>
// ZDICT_finalizeDictionary API is exported and stable since v1.4.5
#if ZSTD_VERSION_NUMBER >= 10405
#define ROCKSDB_ZDICT_FINALIZE
#endif  // ZSTD_VERSION_NUMBER >= 10405
#endif  // ZSTD

namespace ROCKSDB_NAMESPACE {
// Need this for the context allocation override
// On windows we need to do this explicitly
#if defined(ZSTD) && defined(ROCKSDB_JEMALLOC) && defined(OS_WIN) && \
    defined(ZSTD_STATIC_LINKING_ONLY)
#define ROCKSDB_ZSTD_CUSTOM_MEM
namespace port {
ZSTD_customMem GetJeZstdAllocationOverrides();
}  // namespace port
#endif  // defined(ZSTD) && defined(ROCKSDB_JEMALLOC) && defined(OS_WIN) &&
        // defined(ZSTD_STATIC_LINKING_ONLY)

// Cached data represents a portion that can be re-used
// If, in the future we have more than one native context to
// cache we can arrange this as a tuple
class ZSTDUncompressCachedData {
 public:
#if defined(ZSTD)
  using ZSTDNativeContext = ZSTD_DCtx*;
#else
  using ZSTDNativeContext = void*;
#endif  // ZSTD
  ZSTDUncompressCachedData() {}
  // Init from cache
  ZSTDUncompressCachedData(const ZSTDUncompressCachedData& o) = delete;
  ZSTDUncompressCachedData& operator=(const ZSTDUncompressCachedData&) = delete;
  ZSTDUncompressCachedData(ZSTDUncompressCachedData&& o) noexcept
      : ZSTDUncompressCachedData() {
    *this = std::move(o);
  }
  ZSTDUncompressCachedData& operator=(ZSTDUncompressCachedData&& o) noexcept {
    assert(zstd_ctx_ == nullptr);
    std::swap(zstd_ctx_, o.zstd_ctx_);
    std::swap(cache_idx_, o.cache_idx_);
    return *this;
  }
  ZSTDNativeContext Get() const { return zstd_ctx_; }
  int64_t GetCacheIndex() const { return cache_idx_; }
  void CreateIfNeeded() {
    if (zstd_ctx_ == nullptr) {
#if !defined(ZSTD)
      zstd_ctx_ = nullptr;
#elif defined(ROCKSDB_ZSTD_CUSTOM_MEM)
      zstd_ctx_ =
          ZSTD_createDCtx_advanced(port::GetJeZstdAllocationOverrides());
#else  // ZSTD && !ROCKSDB_ZSTD_CUSTOM_MEM
      zstd_ctx_ = ZSTD_createDCtx();
#endif
      cache_idx_ = -1;
    }
  }
  void InitFromCache(const ZSTDUncompressCachedData& o, int64_t idx) {
    zstd_ctx_ = o.zstd_ctx_;
    cache_idx_ = idx;
  }
  ~ZSTDUncompressCachedData() {
#if defined(ZSTD)
    if (zstd_ctx_ != nullptr && cache_idx_ == -1) {
      ZSTD_freeDCtx(zstd_ctx_);
    }
#endif  // ZSTD
  }

 private:
  ZSTDNativeContext zstd_ctx_ = nullptr;
  int64_t cache_idx_ = -1;  // -1 means this instance owns the context
};
}  // namespace ROCKSDB_NAMESPACE

#if defined(XPRESS)
#include "port/xpress.h"
#endif

namespace ROCKSDB_NAMESPACE {

class FailureDecompressor : public Decompressor {
 public:
  explicit FailureDecompressor(Status&& status) : status_(std::move(status)) {
    assert(!status_.ok());
  }
  ~FailureDecompressor() override { status_.PermitUncheckedError(); }

  const char* Name() const override { return "FailureDecompressor"; }

  Status ExtractUncompressedSize(Args& /*args*/) override { return status_; }

  Status DecompressBlock(const Args& /*args*/,
                         char* /*uncompressed_output*/) override {
    return status_;
  }

 protected:
  Status status_;
};

// Owns a decompression dictionary, and associated Decompressor, for storing
// in the block cache.
//
// Justification: for a "processed" dictionary to be saved in block cache, we
// also need a reference to the decompressor that processed it, to ensure it
// is recognized properly. At that point, we might as well have the dictionary
// part of the decompressor identity and track an associated decompressor along
// with a decompression dictionary in the block cache, and the decompressor
// hides potential details of processing the dictionary.
struct DecompressorDict {
  // Block containing the data for the compression dictionary in case the
  // constructor that takes a string parameter is used.
  std::string dict_str_;

  // Block containing the data for the compression dictionary in case the
  // constructor that takes a Slice parameter is used and the passed in
  // CacheAllocationPtr is not nullptr.
  CacheAllocationPtr dict_allocation_;

  // A Decompressor referencing and using the dictionary owned by this.
  std::unique_ptr<Decompressor> decompressor_;

  // Approximate owned memory usage
  size_t memory_usage_;

  DecompressorDict(std::string&& dict, Decompressor& from_decompressor)
      : dict_str_(std::move(dict)) {
    Populate(from_decompressor, dict_str_);
  }

  DecompressorDict(Slice slice, CacheAllocationPtr&& allocation,
                   Decompressor& from_decompressor)
      : dict_allocation_(std::move(allocation)) {
    Populate(from_decompressor, slice);
  }

  DecompressorDict(DecompressorDict&& rhs) noexcept
      : dict_str_(std::move(rhs.dict_str_)),
        dict_allocation_(std::move(rhs.dict_allocation_)),
        decompressor_(std::move(rhs.decompressor_)),
        memory_usage_(std::move(rhs.memory_usage_)) {}

  DecompressorDict& operator=(DecompressorDict&& rhs) noexcept {
    if (this == &rhs) {
      return *this;
    }
    dict_str_ = std::move(rhs.dict_str_);
    dict_allocation_ = std::move(rhs.dict_allocation_);
    decompressor_ = std::move(rhs.decompressor_);
    return *this;
  }
  // Disable copy
  DecompressorDict(const DecompressorDict&) = delete;
  DecompressorDict& operator=(const DecompressorDict&) = delete;

  // The object is self-contained if the string constructor is used, or the
  // Slice constructor is invoked with a non-null allocation. Otherwise, it
  // is the caller's responsibility to ensure that the underlying storage
  // outlives this object.
  bool own_bytes() const { return !dict_str_.empty() || dict_allocation_; }

  const Slice& GetRawDict() const { return decompressor_->GetSerializedDict(); }

  // For TypedCacheInterface
  const Slice& ContentSlice() const { return GetRawDict(); }
  static constexpr CacheEntryRole kCacheEntryRole = CacheEntryRole::kOtherBlock;
  static constexpr BlockType kBlockType = BlockType::kCompressionDictionary;

  size_t ApproximateMemoryUsage() const { return memory_usage_; }

 private:
  void Populate(Decompressor& from_decompressor, Slice dict);
};

// Holds dictionary and related data, like ZSTD's digested compression
// dictionary.
struct CompressionDict {
#ifdef ZSTD
  ZSTD_CDict* zstd_cdict_ = nullptr;
#endif  // ZSTD
  std::string dict_;

 public:
  CompressionDict() = default;
  CompressionDict(std::string&& dict, CompressionType type, int level) {
    dict_ = std::move(dict);
#ifdef ZSTD
    zstd_cdict_ = nullptr;
    if (!dict_.empty() && type == kZSTD) {
      if (level == CompressionOptions::kDefaultCompressionLevel) {
        // NB: ZSTD_CLEVEL_DEFAULT is historically == 3
        level = ZSTD_CLEVEL_DEFAULT;
      }
      // Should be safe (but slower) if below call fails as we'll use the
      // raw dictionary to compress.
      zstd_cdict_ = ZSTD_createCDict(dict_.data(), dict_.size(), level);
      assert(zstd_cdict_ != nullptr);
    }
#else
    (void)type;
    (void)level;
#endif  // ZSTD
  }

  CompressionDict(CompressionDict&& other) {
#ifdef ZSTD
    zstd_cdict_ = other.zstd_cdict_;
    other.zstd_cdict_ = nullptr;
#endif  // ZSTD
    dict_ = std::move(other.dict_);
  }
  CompressionDict& operator=(CompressionDict&& other) {
    if (this == &other) {
      return *this;
    }
#ifdef ZSTD
    zstd_cdict_ = other.zstd_cdict_;
    other.zstd_cdict_ = nullptr;
#endif  // ZSTD
    dict_ = std::move(other.dict_);
    return *this;
  }

  ~CompressionDict() {
#ifdef ZSTD
    size_t res = 0;
    if (zstd_cdict_ != nullptr) {
      res = ZSTD_freeCDict(zstd_cdict_);
    }
    assert(res == 0);  // Last I checked they can't fail
    (void)res;         // prevent unused var warning
#endif                 // ZSTD
  }

#ifdef ZSTD
  const ZSTD_CDict* GetDigestedZstdCDict() const { return zstd_cdict_; }
#endif  // ZSTD

  Slice GetRawDict() const { return dict_; }
  bool empty() const { return dict_.empty(); }

  static const CompressionDict& GetEmptyDict() {
    static CompressionDict empty_dict{};
    return empty_dict;
  }

  // Disable copy
  CompressionDict(const CompressionDict&) = delete;
  CompressionDict& operator=(const CompressionDict&) = delete;
};

class CompressionContext : public Compressor::WorkingArea {
 private:
#ifdef ZSTD
  ZSTD_CCtx* zstd_ctx_ = nullptr;

  ZSTD_CCtx* CreateZSTDContext() {
#ifdef ROCKSDB_ZSTD_CUSTOM_MEM
    return ZSTD_createCCtx_advanced(port::GetJeZstdAllocationOverrides());
#else   // ROCKSDB_ZSTD_CUSTOM_MEM
    return ZSTD_createCCtx();
#endif  // ROCKSDB_ZSTD_CUSTOM_MEM
  }

 public:
  // callable inside ZSTD_Compress
  ZSTD_CCtx* ZSTDPreallocCtx() const {
    assert(zstd_ctx_ != nullptr);
    return zstd_ctx_;
  }

 private:
#endif  // ZSTD

  void CreateNativeContext(CompressionType type, int level, bool checksum) {
#ifdef ZSTD
    if (type == kZSTD) {
      zstd_ctx_ = CreateZSTDContext();
      if (level == CompressionOptions::kDefaultCompressionLevel) {
        // NB: ZSTD_CLEVEL_DEFAULT is historically == 3
        level = ZSTD_CLEVEL_DEFAULT;
      }
      size_t err =
          ZSTD_CCtx_setParameter(zstd_ctx_, ZSTD_c_compressionLevel, level);
      if (ZSTD_isError(err)) {
        assert(false);
        ZSTD_freeCCtx(zstd_ctx_);
        zstd_ctx_ = CreateZSTDContext();
      }
      if (checksum) {
        err = ZSTD_CCtx_setParameter(zstd_ctx_, ZSTD_c_checksumFlag, 1);
        if (ZSTD_isError(err)) {
          assert(false);
          ZSTD_freeCCtx(zstd_ctx_);
          zstd_ctx_ = CreateZSTDContext();
        }
      }
    }
#else
    (void)type;
    (void)level;
    (void)checksum;
#endif  // ZSTD
  }
  void DestroyNativeContext() {
#ifdef ZSTD
    if (zstd_ctx_ != nullptr) {
      ZSTD_freeCCtx(zstd_ctx_);
    }
#endif  // ZSTD
  }

 public:
  explicit CompressionContext(CompressionType type,
                              const CompressionOptions& options) {
    CreateNativeContext(type, options.level, options.checksum);
  }
  ~CompressionContext() { DestroyNativeContext(); }
  CompressionContext(const CompressionContext&) = delete;
  CompressionContext& operator=(const CompressionContext&) = delete;
};

// This is like a working area, reusable for different dicts, etc.
// TODO: refactor / consolidate
class UncompressionContext : public Decompressor::WorkingArea {
 private:
  CompressionContextCache* ctx_cache_ = nullptr;
  ZSTDUncompressCachedData uncomp_cached_data_;

 public:
  explicit UncompressionContext(CompressionType type) {
    if (type == kZSTD) {
      ctx_cache_ = CompressionContextCache::Instance();
      uncomp_cached_data_ = ctx_cache_->GetCachedZSTDUncompressData();
    }
  }
  ~UncompressionContext() {
    if (uncomp_cached_data_.GetCacheIndex() != -1) {
      assert(ctx_cache_ != nullptr);
      ctx_cache_->ReturnCachedZSTDUncompressData(
          uncomp_cached_data_.GetCacheIndex());
    }
  }
  UncompressionContext(const UncompressionContext&) = delete;
  UncompressionContext& operator=(const UncompressionContext&) = delete;

  ZSTDUncompressCachedData::ZSTDNativeContext GetZSTDContext() const {
    return uncomp_cached_data_.Get();
  }
};

inline bool Snappy_Supported() {
#ifdef SNAPPY
  return true;
#else
  return false;
#endif
}

inline bool Zlib_Supported() {
#ifdef ZLIB
  return true;
#else
  return false;
#endif
}

inline bool BZip2_Supported() {
#ifdef BZIP2
  return true;
#else
  return false;
#endif
}

inline bool LZ4_Supported() {
#ifdef LZ4
  return true;
#else
  return false;
#endif
}

inline bool XPRESS_Supported() {
#ifdef XPRESS
  return true;
#else
  return false;
#endif
}

inline bool ZSTD_Supported() {
#ifdef ZSTD
  // NB: ZSTD format is finalized since version 0.8.0. See ZSTD_VERSION_NUMBER
  // check above.
  return true;
#else
  return false;
#endif
}

inline bool ZSTD_Streaming_Supported() {
#if defined(ZSTD)
  return true;
#else
  return false;
#endif
}

inline bool StreamingCompressionTypeSupported(
    CompressionType compression_type) {
  switch (compression_type) {
    case kNoCompression:
      return true;
    case kZSTD:
      return ZSTD_Streaming_Supported();
    default:
      return false;
  }
}

inline bool CompressionTypeSupported(CompressionType compression_type) {
  switch (compression_type) {
    case kNoCompression:
      return true;
    case kSnappyCompression:
      return Snappy_Supported();
    case kZlibCompression:
      return Zlib_Supported();
    case kBZip2Compression:
      return BZip2_Supported();
    case kLZ4Compression:
      return LZ4_Supported();
    case kLZ4HCCompression:
      return LZ4_Supported();
    case kXpressCompression:
      return XPRESS_Supported();
    case kZSTD:
      return ZSTD_Supported();
    default:  // Including custom compression types
      return false;
  }
}

inline bool DictCompressionTypeSupported(CompressionType compression_type) {
  switch (compression_type) {
    case kNoCompression:
      return false;
    case kSnappyCompression:
      return false;
    case kZlibCompression:
      return Zlib_Supported();
    case kBZip2Compression:
      return false;
    case kLZ4Compression:
    case kLZ4HCCompression:
#if LZ4_VERSION_NUMBER >= 10400  // r124+
      return LZ4_Supported();
#else
      return false;
#endif
    case kXpressCompression:
      return false;
    case kZSTD:
      // NB: dictionary supported since 0.5.0. See ZSTD_VERSION_NUMBER check
      // above.
      return ZSTD_Supported();
    default:  // Including custom compression types
      return false;
  }
}

// WART: does not match OptionsHelper::compression_type_string_map
std::string CompressionTypeToString(CompressionType compression_type);

// WART: does not match OptionsHelper::compression_type_string_map
CompressionType CompressionTypeFromString(std::string compression_type_str);

std::string CompressionOptionsToString(
    const CompressionOptions& compression_options);

inline bool ZSTD_TrainDictionarySupported() {
#ifdef ZSTD
  // NB: Dictionary trainer is available since v0.6.1 for static linking, but
  // not available for dynamic linking until v1.1.3. See ZSTD_VERSION_NUMBER
  // check above.
  return true;
#else
  return false;
#endif
}

inline bool ZSTD_FinalizeDictionarySupported() {
#ifdef ROCKSDB_ZDICT_FINALIZE
  return true;
#else
  return false;
#endif
}

// The new compression APIs intentionally make it difficult to generate
// compressed data larger than the original. (It is better to store the
// uncompressed version in that case.) For legacy cases that must store
// compressed data even when larger than the uncompressed, this is a convenient
// wrapper to support that, with a compressor from BuiltinCompressionManager and
// a GrowableBuffer.
Status LegacyForceBuiltinCompression(
    Compressor& builtin_compressor,
    Compressor::ManagedWorkingArea* working_area, Slice from,
    GrowableBuffer* to);

// Records the compression type for subsequent WAL records.
class CompressionTypeRecord {
 public:
  explicit CompressionTypeRecord(CompressionType compression_type)
      : compression_type_(compression_type) {}

  CompressionType GetCompressionType() const { return compression_type_; }

  inline void EncodeTo(std::string* dst) const {
    assert(dst != nullptr);
    PutFixed32(dst, compression_type_);
  }

  inline Status DecodeFrom(Slice* src) {
    constexpr char class_name[] = "CompressionTypeRecord";

    uint32_t val;
    if (!GetFixed32(src, &val)) {
      return Status::Corruption(class_name,
                                "Error decoding WAL compression type");
    }
    CompressionType compression_type = static_cast<CompressionType>(val);
    if (!StreamingCompressionTypeSupported(compression_type)) {
      return Status::Corruption(class_name,
                                "WAL compression type not supported");
    }
    compression_type_ = compression_type;
    return Status::OK();
  }

  inline std::string DebugString() const {
    return "compression_type: " + CompressionTypeToString(compression_type_);
  }

 private:
  CompressionType compression_type_;
};

// Base class to implement compression for a stream of buffers.
// Instantiate an implementation of the class using Create() with the
// compression type and use Compress() repeatedly.
// The output buffer needs to be at least max_output_len.
// Call Reset() in between frame boundaries or in case of an error.
// NOTE: This class is not thread safe.
class StreamingCompress {
 public:
  StreamingCompress(CompressionType compression_type,
                    const CompressionOptions& opts,
                    uint32_t compress_format_version, size_t max_output_len)
      : compression_type_(compression_type),
        opts_(opts),
        compress_format_version_(compress_format_version),
        max_output_len_(max_output_len) {}
  virtual ~StreamingCompress() = default;
  // compress should be called repeatedly with the same input till the method
  // returns 0
  // Parameters:
  // input - buffer to compress
  // input_size - size of input buffer
  // output - compressed buffer allocated by caller, should be at least
  // max_output_len
  // output_size - size of the output buffer
  // Returns -1 for errors, the remaining size of the input buffer that needs
  // to be compressed
  virtual int Compress(const char* input, size_t input_size, char* output,
                       size_t* output_pos) = 0;
  // static method to create object of a class inherited from
  // StreamingCompress based on the actual compression type.
  static StreamingCompress* Create(CompressionType compression_type,
                                   const CompressionOptions& opts,
                                   uint32_t compress_format_version,
                                   size_t max_output_len);
  virtual void Reset() = 0;

 protected:
  const CompressionType compression_type_;
  const CompressionOptions opts_;
  const uint32_t compress_format_version_;
  const size_t max_output_len_;
};

// Base class to uncompress a stream of compressed buffers.
// Instantiate an implementation of the class using Create() with the
// compression type and use Uncompress() repeatedly.
// The output buffer needs to be at least max_output_len.
// Call Reset() in between frame boundaries or in case of an error.
// NOTE: This class is not thread safe.
class StreamingUncompress {
 public:
  StreamingUncompress(CompressionType compression_type,
                      uint32_t compress_format_version, size_t max_output_len)
      : compression_type_(compression_type),
        compress_format_version_(compress_format_version),
        max_output_len_(max_output_len) {}
  virtual ~StreamingUncompress() = default;
  // Uncompress can be called repeatedly to progressively process the same
  // input buffer, or can be called with a new input buffer. When the input
  // buffer is not fully consumed, the return value is > 0 or output_size
  // == max_output_len. When calling uncompress to continue processing the
  // same input buffer, the input argument should be nullptr.
  // Parameters:
  // input - buffer to uncompress
  // input_size - size of input buffer
  // output - uncompressed buffer allocated by caller, should be at least
  // max_output_len
  // output_size - size of the output buffer
  // Returns -1 for errors, remaining input to be processed otherwise.
  virtual int Uncompress(const char* input, size_t input_size, char* output,
                         size_t* output_pos) = 0;
  static StreamingUncompress* Create(CompressionType compression_type,
                                     uint32_t compress_format_version,
                                     size_t max_output_len);
  virtual void Reset() = 0;

 protected:
  CompressionType compression_type_;
  uint32_t compress_format_version_;
  size_t max_output_len_;
};

class ZSTDStreamingCompress final : public StreamingCompress {
 public:
  explicit ZSTDStreamingCompress(const CompressionOptions& opts,
                                 uint32_t compress_format_version,
                                 size_t max_output_len)
      : StreamingCompress(kZSTD, opts, compress_format_version,
                          max_output_len) {
#ifdef ZSTD
    cctx_ = ZSTD_createCCtx();
    // Each compressed frame will have a checksum
    ZSTD_CCtx_setParameter(cctx_, ZSTD_c_checksumFlag, 1);
    assert(cctx_ != nullptr);
    input_buffer_ = {/*src=*/nullptr, /*size=*/0, /*pos=*/0};
#endif
  }
  ~ZSTDStreamingCompress() override {
#ifdef ZSTD
    ZSTD_freeCCtx(cctx_);
#endif
  }
  int Compress(const char* input, size_t input_size, char* output,
               size_t* output_pos) override;
  void Reset() override;
#ifdef ZSTD
  ZSTD_CCtx* cctx_;
  ZSTD_inBuffer input_buffer_;
#endif
};

class ZSTDStreamingUncompress final : public StreamingUncompress {
 public:
  explicit ZSTDStreamingUncompress(uint32_t compress_format_version,
                                   size_t max_output_len)
      : StreamingUncompress(kZSTD, compress_format_version, max_output_len) {
#ifdef ZSTD
    dctx_ = ZSTD_createDCtx();
    assert(dctx_ != nullptr);
    input_buffer_ = {/*src=*/nullptr, /*size=*/0, /*pos=*/0};
#endif
  }
  ~ZSTDStreamingUncompress() override {
#ifdef ZSTD
    ZSTD_freeDCtx(dctx_);
#endif
  }
  int Uncompress(const char* input, size_t input_size, char* output,
                 size_t* output_size) override;
  void Reset() override;

 private:
#ifdef ZSTD
  ZSTD_DCtx* dctx_;
  ZSTD_inBuffer input_buffer_;
#endif
};

}  // namespace ROCKSDB_NAMESPACE