bitcoinleveldb_lru/
lru_cache.rs

1/*!
2  | LRU cache implementation
3  |
4  | Cache entries have an "in_cache" boolean
5  | indicating whether the cache has a reference on
6  | the entry.  The only ways that this can become
7  | false without the entry being passed to its
8  | "deleter" are via Erase(), via Insert() when an
9  | element with a duplicate key is inserted, or on
10  | destruction of the cache.
11  |
12  | The cache keeps two linked lists of items in
13  | the cache.  All items in the cache are in one
14  | list or the other, and never both.  Items still
15  | referenced by clients but erased from the cache
16  | are in neither list.  The lists are:
17  |
18  | - in-use:  contains the items currently
19  |   referenced by clients, in no particular
20  |   order.  (This list is used for invariant
21  |   checking.  If we removed the check, elements
22  |   that would otherwise be on this list could be
23  |   left as disconnected singleton lists.)
24  |
25  | - LRU:  contains the items not currently
26  | referenced by clients, in LRU order Elements
27  | are moved between these lists by the Ref() and
28  | Unref() methods, when they detect an element in
29  | the cache acquiring or losing its only external
30  | reference.
31  */
32
33crate::ix!();
34
35pub const NUM_SHARD_BITS: usize = 4;
36pub const NUM_SHARDS:     usize = 1 << NUM_SHARD_BITS;
37
38//-------------------------------------------[.cpp/bitcoin/src/leveldb/util/cache.cc]
39
40/**
41  | An entry is a variable length heap-allocated
42  | structure. Entries are kept in a circular
43  | doubly linked list ordered by access
44  | time.
45  |
46  */
47pub struct LRUHandle {
48
49    value:      *mut c_void,
50
51    deleter:    fn(_0: &Slice, value: *mut c_void) -> c_void,
52
53    next_hash:  *mut LRUHandle,
54    next:       *mut LRUHandle,
55    prev:       *mut LRUHandle,
56
57    /**
58       TODO(opt): Only allow uint32_t?
59      */
60    charge:     usize,
61
62    key_length: usize,
63
64    /**
65       Whether entry is in the cache.
66      */
67    in_cache:   bool,
68
69    /**
70       References, including cache reference, if
71       present.
72      */
73    refs:       u32,
74
75    /**
76       Hash of key(); used for fast sharding and
77       comparisons
78      */
79    hash:       u32,
80
81    /**
82       Beginning of key
83      */
84    key_data:   [u8; 1],
85}
86
87impl LRUHandle {
88
89    pub fn key(&self) -> Slice {
90        
91        todo!();
92        /*
93            // next_ is only equal to this if the LRU handle is the list head of an
94        // empty list. List heads never have meaningful keys.
95        assert(next != this);
96
97        return Slice(key_data, key_length);
98        */
99    }
100}
101
102/**
103  | We provide our own simple hash table since it
104  | removes a whole bunch of porting hacks and is
105  | also faster than some of the built-in hash
106  | table implementations in some of the
107  | compiler/runtime combinations we have tested.
108  | E.g., readrandom speeds up by ~5% over the g++
109  | 4.4.3's builtin hashtable.
110  */
111pub struct HandleTable {
112
113    /**
114      | The table consists of an array of buckets
115      | where each bucket is a linked list of
116      | cache entries that hash into the bucket.
117      |
118      */
119    length: u32,
120    elems:  u32,
121    list:   *mut *mut LRUHandle,
122}
123
124impl Default for HandleTable {
125    
126    fn default() -> Self {
127        todo!();
128        /*
129        : length(0),
130        : elems(0),
131        : list(nullptr),
132
133            Resize();
134        */
135    }
136}
137
138impl Drop for HandleTable {
139    fn drop(&mut self) {
140        todo!();
141        /*
142            delete[] list_;
143        */
144    }
145}
146
147impl HandleTable {
148
149    pub fn lookup(&mut self, 
150        key_:  &Slice,
151        hash_: u32) -> *mut LRUHandle {
152        
153        todo!();
154        /*
155            return *FindPointer(key, hash);
156        */
157    }
158    
159    pub fn insert(&mut self, h: *mut LRUHandle) -> *mut LRUHandle {
160        
161        todo!();
162        /*
163            LRUHandle** ptr = FindPointer(h->key(), h->hash);
164        LRUHandle* old = *ptr;
165        h->next_hash = (old == nullptr ? nullptr : old->next_hash);
166        *ptr = h;
167        if (old == nullptr) {
168          ++elems_;
169          if (elems_ > length_) {
170            // Since each cache entry is fairly large, we aim for a small
171            // average linked list length (<= 1).
172            Resize();
173          }
174        }
175        return old;
176        */
177    }
178    
179    pub fn remove(&mut self, 
180        key_:  &Slice,
181        hash_: u32) -> *mut LRUHandle {
182        
183        todo!();
184        /*
185            LRUHandle** ptr = FindPointer(key, hash);
186        LRUHandle* result = *ptr;
187        if (result != nullptr) {
188          *ptr = result->next_hash;
189          --elems_;
190        }
191        return result;
192        */
193    }
194
195    /**
196      | Return a pointer to slot that points to
197      | a cache entry that matches key/hash.  If
198      | there is no such cache entry, return
199      | a pointer to the trailing slot in the
200      | corresponding linked list.
201      */
202    pub fn find_pointer(&mut self, 
203        key_:  &Slice,
204        hash_: u32) -> *mut *mut LRUHandle {
205        
206        todo!();
207        /*
208            LRUHandle** ptr = &list_[hash & (length_ - 1)];
209        while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {
210          ptr = &(*ptr)->next_hash;
211        }
212        return ptr;
213        */
214    }
215    
216    pub fn resize(&mut self)  {
217        
218        todo!();
219        /*
220            uint32_t new_length = 4;
221        while (new_length < elems_) {
222          new_length *= 2;
223        }
224        LRUHandle** new_list = new LRUHandle*[new_length];
225        memset(new_list, 0, sizeof(new_list[0]) * new_length);
226        uint32_t count = 0;
227        for (uint32_t i = 0; i < length_; i++) {
228          LRUHandle* h = list_[i];
229          while (h != nullptr) {
230            LRUHandle* next = h->next_hash;
231            uint32_t hash = h->hash;
232            LRUHandle** ptr = &new_list[hash & (new_length - 1)];
233            h->next_hash = *ptr;
234            *ptr = h;
235            h = next;
236            count++;
237          }
238        }
239        assert(elems_ == count);
240        delete[] list_;
241        list_ = new_list;
242        length_ = new_length;
243        */
244    }
245}
246
247/**
248   A single shard of sharded cache.
249  */
250pub struct LRUCache {
251
252    /**
253       Initialized before use.
254      */
255    capacity: usize,
256
257    /**
258      | mutex_ protects the following state.
259      |
260      */
261    mutex:    RefCell<Mutex<LRUCacheInner>>,
262}
263
264pub struct LRUCacheInner {
265    usage:    usize,
266
267    /**
268      | Dummy head of LRU list.
269      |
270      | lru.prev is newest entry, lru.next is
271      | oldest entry.
272      |
273      | Entries have refs==1 and in_cache==true.
274      */
275    lru:      LRUHandle,
276
277    /**
278      | Dummy head of in-use list. 
279      |
280      | Entries are in use by clients, and have
281      | refs >= 2 and in_cache==true.
282      |
283      */
284    in_use:   LRUHandle,
285
286    table:    HandleTable,
287}
288
289impl Drop for LRUCache {
290    fn drop(&mut self) {
291        todo!();
292        /*
293            assert(in_use_.next == &in_use_);  // Error if caller has an unreleased handle
294      for (LRUHandle* e = lru_.next; e != &lru_;) {
295        LRUHandle* next = e->next;
296        assert(e->in_cache);
297        e->in_cache = false;
298        assert(e->refs == 1);  // Invariant of lru_ list.
299        Unref(e);
300        e = next;
301      }
302        */
303    }
304}
305
306impl LRUCache {
307
308    /**
309      | Separate from constructor so caller
310      | can easily make an array of LRUCache
311      |
312      */
313    pub fn set_capacity(&mut self, capacity: usize)  {
314        
315        todo!();
316        /*
317            capacity_ = capacity;
318        */
319    }
320
321    pub fn total_charge(&self) -> usize {
322        
323        todo!();
324        /*
325            MutexLock l(&mutex_);
326        return usage_;
327        */
328    }
329    
330    pub fn new() -> Self {
331    
332        todo!();
333        /*
334        : capacity(0),
335        : usage(0),
336
337            // Make empty circular linked lists.
338      lru_.next = &lru_;
339      lru_.prev = &lru_;
340      in_use_.next = &in_use_;
341      in_use_.prev = &in_use_;
342        */
343    }
344    
345    pub fn ref_(&mut self, e: *mut LRUHandle)  {
346        
347        todo!();
348        /*
349          if (e->refs == 1 && e->in_cache) {  // If on lru_ list, move to in_use_ list.
350            LRU_Remove(e);
351            LRU_Append(&in_use_, e);
352          }
353          e->refs++;
354        */
355    }
356    
357    pub fn unref(&mut self, e: *mut LRUHandle)  {
358        
359        todo!();
360        /*
361      assert(e->refs > 0);
362      e->refs--;
363      if (e->refs == 0) {  // Deallocate.
364        assert(!e->in_cache);
365        (*e->deleter)(e->key(), e->value);
366        free(e);
367      } else if (e->in_cache && e->refs == 1) {
368        // No longer in use; move to lru_ list.
369        LRU_Remove(e);
370        LRU_Append(&lru_, e);
371      }
372        */
373    }
374    
375    pub fn lru_remove(&mut self, e: *mut LRUHandle)  {
376        
377        todo!();
378        /*
379           e->next->prev = e->prev;
380           e->prev->next = e->next;
381           */
382    }
383    
384    pub fn lru_append(&mut self, 
385        list: *mut LRUHandle,
386        e:    *mut LRUHandle)  {
387        
388        todo!();
389        /*
390        // Make "e" newest entry by inserting just before *list
391        e->next = list;
392        e->prev = list->prev;
393        e->prev->next = e;
394        e->next->prev = e;
395        */
396    }
397    
398    pub fn lookup(&mut self, 
399        key_:  &Slice,
400        hash_: u32) -> *mut CacheHandle {
401        
402        todo!();
403        /*
404          MutexLock l(&mutex_);
405          LRUHandle* e = table_.Lookup(key, hash);
406          if (e != nullptr) {
407            Ref(e);
408          }
409          return reinterpret_cast<Cache::Handle*>(e);
410        */
411    }
412    
413    pub fn release(&mut self, handle: *mut CacheHandle)  {
414        
415        todo!();
416        /*
417           MutexLock l(&mutex_);
418           Unref(reinterpret_cast<LRUHandle*>(handle));
419           */
420    }
421    
422    /**
423      | Like Cache methods, but with an extra
424      | "hash" parameter.
425      |
426      */
427    pub fn insert(&mut self, 
428        key_:     &Slice,
429        hash_:    u32,
430        value:   *mut c_void,
431        charge:  usize,
432        deleter: fn(key_: &Slice, value: *mut c_void) -> c_void) -> *mut CacheHandle {
433        
434        todo!();
435        /*
436            MutexLock l(&mutex_);
437
438      LRUHandle* e =
439          reinterpret_cast<LRUHandle*>(malloc(sizeof(LRUHandle) - 1 + key.size()));
440      e->value = value;
441      e->deleter = deleter;
442      e->charge = charge;
443      e->key_length = key.size();
444      e->hash = hash;
445      e->in_cache = false;
446      e->refs = 1;  // for the returned handle.
447      memcpy(e->key_data, key.data(), key.size());
448
449      if (capacity_ > 0) {
450        e->refs++;  // for the cache's reference.
451        e->in_cache = true;
452        LRU_Append(&in_use_, e);
453        usage_ += charge;
454        FinishErase(table_.Insert(e));
455      } else {  // don't cache. (capacity_==0 is supported and turns off caching.)
456        // next is read by key() in an assert, so it must be initialized
457        e->next = nullptr;
458      }
459      while (usage_ > capacity_ && lru_.next != &lru_) {
460        LRUHandle* old = lru_.next;
461        assert(old->refs == 1);
462        bool erased = FinishErase(table_.Remove(old->key(), old->hash));
463        if (!erased) {  // to avoid unused variable when compiled NDEBUG
464          assert(erased);
465        }
466      }
467
468      return reinterpret_cast<Cache::Handle*>(e);
469        */
470    }
471
472    /**
473      | If e != nullptr, finish removing *e from
474      | the cache; it has already been removed from
475      | the hash table.  Return whether e !=
476      | nullptr.
477      */
478    #[EXCLUSIVE_LOCKS_REQUIRED(mutex_)]
479    pub fn finish_erase(&mut self, e: *mut LRUHandle) -> bool {
480        
481        todo!();
482        /*
483          if (e != nullptr) {
484            assert(e->in_cache);
485            LRU_Remove(e);
486            e->in_cache = false;
487            usage_ -= e->charge;
488            Unref(e);
489          }
490          return e != nullptr;
491        */
492    }
493    
494    pub fn erase(&mut self, 
495        key_:  &Slice,
496        hash_: u32)  {
497        
498        todo!();
499        /*
500            MutexLock l(&mutex_);
501      FinishErase(table_.Remove(key, hash));
502        */
503    }
504    
505    pub fn prune(&mut self)  {
506        
507        todo!();
508        /*
509            MutexLock l(&mutex_);
510      while (lru_.next != &lru_) {
511        LRUHandle* e = lru_.next;
512        assert(e->refs == 1);
513        bool erased = FinishErase(table_.Remove(e->key(), e->hash));
514        if (!erased) {  // to avoid unused variable when compiled NDEBUG
515          assert(erased);
516        }
517      }
518        */
519    }
520}
521
522pub struct ShardedLRUCache {
523    base:     Cache,
524    shard:    [LRUCache; NUM_SHARDS],
525    id_mutex: parking_lot::RawMutex,
526    last_id:  u64,
527}
528
529impl ShardedLRUCache {
530
531    #[inline] pub fn hash_slice(s: &Slice) -> u32 {
532        
533        todo!();
534        /*
535            return Hash(s.data(), s.size(), 0);
536        */
537    }
538    
539    pub fn shard(hash_: u32) -> u32 {
540        
541        todo!();
542        /*
543            return hash >> (32 - kNumShardBits);
544        */
545    }
546    
547    pub fn new(capacity: usize) -> Self {
548    
549        todo!();
550        /*
551        : last_id(0),
552
553        const size_t per_shard = (capacity + (kNumShards - 1)) / kNumShards;
554        for (int s = 0; s < kNumShards; s++) {
555          shard_[s].SetCapacity(per_shard);
556        }
557        */
558    }
559    
560    pub fn insert(&mut self, 
561        key_:     &Slice,
562        value:   *mut c_void,
563        charge:  usize,
564        deleter: fn(key_: &Slice, value: *mut c_void) -> c_void) -> *mut CacheHandle {
565        
566        todo!();
567        /*
568        const uint32_t hash = HashSlice(key);
569        return shard_[Shard(hash)].Insert(key, hash, value, charge, deleter);
570        */
571    }
572    
573    pub fn lookup(&mut self, key_: &Slice) -> *mut CacheHandle {
574        
575        todo!();
576        /*
577        const uint32_t hash = HashSlice(key);
578        return shard_[Shard(hash)].Lookup(key, hash);
579        */
580    }
581    
582    pub fn release(&mut self, handle: *mut CacheHandle)  {
583        
584        todo!();
585        /*
586        LRUHandle* h = reinterpret_cast<LRUHandle*>(handle);
587        shard_[Shard(h->hash)].Release(handle);
588        */
589    }
590    
591    pub fn erase(&mut self, key_: &Slice)  {
592        
593        todo!();
594        /*
595        const uint32_t hash = HashSlice(key);
596        shard_[Shard(hash)].Erase(key, hash);
597        */
598    }
599    
600    pub fn value(&mut self, handle: *mut CacheHandle)  {
601        
602        todo!();
603        /*
604        return reinterpret_cast<LRUHandle*>(handle)->value;
605        */
606    }
607    
608    pub fn new_id(&mut self) -> u64 {
609        
610        todo!();
611        /*
612        MutexLock l(&id_mutex_);
613        return ++(last_id_);
614        */
615    }
616    
617    pub fn prune(&mut self)  {
618        
619        todo!();
620        /*
621        for (int s = 0; s < kNumShards; s++) {
622          shard_[s].Prune();
623        }
624        */
625    }
626    
627    pub fn total_charge(&self) -> usize {
628        
629        todo!();
630        /*
631        size_t total = 0;
632        for (int s = 0; s < kNumShards; s++) {
633          total += shard_[s].TotalCharge();
634        }
635        return total;
636        */
637    }
638}
639
640pub fn new_lru_cache(capacity: usize) -> *mut Cache {
641    
642    todo!();
643    /*
644       return new ShardedLRUCache(capacity);
645       */
646}