diskann-providers 0.51.0

DiskANN is a fast approximate nearest neighbor search library for high dimensional data
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
/*
 * Copyright (c) Microsoft Corporation.
 * Licensed under the MIT license.
 */

//! The caching provider wraps another `DataProvider` implementation and its associated accessors
//! and strategies, in order to provide a memory-backed cache for index terms. It is useful for
//! accelerating disk-backed providers where accessor operations are slow at the cost of the extra
//! memory for the cache.
//!
//! The real `DataProvider` is known as the **inner** provider.
//!
//! Reads from a caching provider first check the cache, then if the term is present, returns the
//! term; if not present, the underlying provider is accessed. If the underlying provider returns
//! the term, it is placed in the cache and then returned.
//!
//! Writes to a caching provider first write to the underlying provider and then evict the
//! generated internal ID from the cache.
//!
//! Errors are reported using [`CachingError`], which differentiates between an error yielded
//! by the inner provider and an error generated while performing cache operations.
//!
//! Several traits are needed to tie the inner [`DataProvider`] to the cache:
//!
//! * [`AsCacheAccessorFor`]: Create a cache accessor for a [`DataProvider`]/element type
//!   combination.
//!
//! * [`CachedFill`]: The caching version of [`Fill`]. A provided implementation can
//!   be used if desired, or the behavior can be customized per [`Accessor`]/cache accessor
//!   pair.
//!
//! To make use of a caching provider, after creating the inner [`DataProvider`] (`DP`),
//! create a cache `C` that implements the above traits for `DP` and the accessors whose
//! operations you want to cache.
//!
//! With the `DP`/`C` pair, construct a [`CachingProvider`] and use the [`Cached`] strategy
//! to wrap strategies for the inner provider.
//!
//! # Implementing Caches
//!
//! Caches are expectged to implement the above traits as well as [`Evict`]. Access to the
//! cache is done through proxy objects called "cache accessors". These accessors should
//! implement the following traits:
//!
//! * [`ElementCache`]: Get and retrieve items from the cache.
//!
//! * [`NeighborCache`]: Get and retrieve adjacency list terms from the cache.
//!
//! The utilities like [`super::bf_cache::Cache`] and [`super::utils::Graph`] can be helpful
//! for writing custom caches.
//!
//! # Naming Conventions
//!
//! * `[X}Cache`: A trait implemented by a cache accessor.
//! * `Cached[X}`: The caching version of a [`DataProvider`] trait `X`. This is implemented
//!   by the inner [`Accessor`] and is customized for a cache accessor.
//! * The traits [`Evict`] and [`AsCacheAccessorFor`] are implemented by the **cache**.

use std::{fmt::Debug, sync::Arc};

use futures_util::FutureExt;

use diskann::{
    ANNResult,
    error::{self as core_error, IntoANNResult, StandardError},
    graph::{
        AdjacencyList, SearchOutputBuffer,
        glue::{
            self, Batch, ExpandBeam, InplaceDeleteStrategy, InsertStrategy, MultiInsertStrategy,
            Pipeline, PruneStrategy, SearchExt, SearchPostProcessStep, SearchStrategy,
        },
        workingset,
    },
    neighbor::Neighbor,
    provider::{
        Accessor, AsNeighbor, BuildDistanceComputer, BuildQueryComputer, CacheableAccessor,
        DataProvider, DelegateNeighbor, Delete, ElementStatus, HasId, NeighborAccessor,
        NeighborAccessorMut, SetElement,
    },
};
use diskann_utils::{
    WithLifetime,
    future::{AssertSend, AsyncFriendly, SendFuture},
};
use thiserror::Error;

////////////
// Traits //
////////////

/// A missing cache entry. A value can be set in the cache using [`Self::set`].
#[derive(Debug)]
pub struct Missing<'a, C, I> {
    cache: &'a mut C,
    key: I,
}

impl<'a, C, I> Missing<'a, C, I>
where
    I: Clone,
{
    /// Set the current entry to `element` - consuming `self`.
    pub fn set<E>(self, element: &E::Of<'_>) -> Result<(), C::Error>
    where
        C: ElementCache<I, E>,
        E: WithLifetime,
    {
        self.cache.set_cached(self.key, element)
    }
}

/// The result of invoking [`ElementCache::entry`].
///
/// If an item is missing, the `Missing` variant can be used to set it with the true value.
#[derive(Debug)]
pub enum MaybeCached<'a, C, I, E>
where
    E: WithLifetime,
    C: ElementCache<I, E>,
    I: Clone,
{
    Present(E::Of<'a>),
    Missing(Missing<'a, C, I>),
}

/// Indicate that the implementor is a cache for elements of type `E` with keys of type `I`.
///
/// This is a **cache accessor** trait.
pub trait ElementCache<I, E>: Send + Sync + Sized
where
    E: WithLifetime,
    I: Clone,
{
    type Error: StandardError;

    /// Attempt to retrieve a cached element for the key. Return `Ok(Some)` is the value
    /// is in the cache and is well formed. Return `Ok(None)` if the value is not in the
    /// cache.
    ///
    /// Returns any critical error.
    fn get_cached(&mut self, key: I) -> Result<Option<E::Of<'_>>, Self::Error>;

    /// Attempt to store a value in the cache - returning any critical error.
    ///
    /// This will overwrite any existing value in the cache at the same key.
    ///
    /// # Note
    ///
    /// Because the canonical [`bf_tree::BfTree`] used as the cache does not indicate
    /// whether an existing item was inserted or over-written, that information is not
    /// communicated in the return type of this trait.
    fn set_cached(&mut self, key: I, v: &E::Of<'_>) -> Result<(), Self::Error>;

    /// Attempt to get an entry from the cache. If the requested key is present, returns
    /// [`MaybeCached::Present`] with the retrieved value. Otherwise, returns a
    /// [`MaybeCached::Missing`] that can be used to insert into the cache.
    ///
    /// This exists to work around the
    /// [`get_or_insert`](https://nikomatsakis.github.io/rust-belt-rust-2019/#72) pattern
    /// not working in Rust without the Polonius borrow checker. As such, it is difficult
    /// to implement manually and should be left as a provided implementation.
    ///
    /// ## Details
    ///
    /// The provided implementation uses the humorous
    /// [polonius-the-crab](https://docs.rs/polonius-the-crab/latest/polonius_the_crab/)
    /// crate to encapsulate the (sound) unsafe pattern required to implement `get_or_insert`
    /// without the next generation borrow checker.
    ///
    /// When (if) polonius lands, this function can be rewritten or discarded entirely.
    fn try_get(&mut self, key: I) -> Result<MaybeCached<'_, Self, I, E>, Self::Error> {
        use polonius_the_crab as ptc;
        type Output<E> = ptc::ForLt!(<E as WithLifetime>::Of<'_>);

        // This method returns either:
        //
        // 1. The result of a successful `get_cached` call with the returned element. The
        //    borrowed element will be scoped to the lifetime of `&mut self`.
        //
        // 2. A reborrow of `self`, which can be given to `MaybeCached::Missing`.
        let result_or_cache =
            ptc::polonius::<_, Result<(), Self::Error>, Output<E>>(self, |cache| {
                match cache.get_cached(key.clone()) {
                    Ok(Some(element)) => ptc::PoloniusResult::Borrowing(element),
                    Ok(None) => ptc::PoloniusResult::Owned(Ok(())),
                    Err(err) => ptc::PoloniusResult::Owned(Err(err)),
                }
            });

        match result_or_cache {
            ptc::PoloniusResult::Borrowing(v) => Ok(MaybeCached::Present(v)),
            ptc::PoloniusResult::Owned {
                value,
                input_borrow: cache, // This is a reborrow of `self`.
            } => {
                // A result indicating a cache miss if `Ok` or critical error if not.
                value?;

                // Return the reborrow of `self` in a `MaybeCached::Missing`.
                Ok(MaybeCached::Missing(Missing { cache, key }))
            }
        }
    }
}

/// Attempt to retrieve the element associated with `id` from the `cache`. If present, return
/// the cached item.
///
/// Otherwise, attempt to retrieve the element from `accessor`. If this operation is
/// successful, store the value in `cache`.
pub fn get_or_insert<'a, A, C>(
    accessor: &'a mut A,
    cache: &'a mut C,
    id: A::Id,
) -> impl SendFuture<Result<A::Element<'a>, CachingError<A::GetError, C::Error>>>
where
    A: CacheableAccessor,
    C: ElementCache<A::Id, A::Map>,
{
    async move {
        match cache.try_get(id).map_err(CachingError::Cache)? {
            MaybeCached::Present(element) => Ok(A::from_cached(element)),
            MaybeCached::Missing(missing) => {
                let element = accessor
                    .get_element(id)
                    .await
                    .map_err(CachingError::Inner)?;
                missing
                    .set(A::as_cached(&element))
                    .map_err(CachingError::Cache)?;
                Ok(element)
            }
        }
    }
}

/// Reporting status for the results of a graph retrieval.
///
/// The value of `NeighborStatus` influences how the caching accessor will respond.
#[derive(Debug, Clone, Copy, PartialEq)]
#[must_use = "NeighborStatus must be observed and acted on"]
pub enum NeighborStatus {
    /// The requested item was found in the cache.
    Hit,
    /// The requested item was not found in the cache and the accessor should attempt to
    /// fill the cache entry for this item.
    Miss,
    /// The requested item was not found in the cache and the accessor should not attempt
    /// to cache the value.
    Uncacheable,
}

/// A targeted variation of [`ElementCache`] specifically targeting adjacency list storage and
/// retrieval. The interface models [`diskann::glue::NeighborAccessor`] and
/// [`diskann::glue::NeighborAccessorMut`].
///
/// This is a **cache accessor** trait.
pub trait NeighborCache<I>: Send + Sync {
    /// A single unified error type between `get` and `set` to keep things simple.
    type Error: StandardError;

    /// Attempt to retrieve a cached adjacency lists, storing the retireved values into
    /// `neighbors`.
    ///
    /// On success, returns `Ok(true)`. If the value is not in the cache, returns `Ok(false)`
    /// and should leave `neighbors` unmodified.
    ///
    /// Critical errors are reported.
    fn try_get_neighbors(
        &mut self,
        id: I,
        neighbors: &mut AdjacencyList<I>,
    ) -> Result<NeighborStatus, Self::Error>;

    /// Attempt to store a value in the cache - returning any critical error.
    ///
    /// This will overwrite any existing value in the cache at the same key.
    fn set_neighbors(&mut self, id: I, neighbors: &[I]) -> Result<(), Self::Error>;

    /// Invalidate any cached adjacency list for `id`.
    ///
    /// # Note
    ///
    /// The canonical [`bf_tree::BfTree`] used as the implementation does not provide
    /// indication on whether keys are successfully removed, so that information cannot be
    /// reported through this interface.
    fn invalidate_neighbors(&mut self, id: I);
}

/// Invalidate *all* cached items associated with `id`.
pub trait Evict<I> {
    fn evict(&self, id: I);
}

/// Customization point for creating a cache accessor from a graph, tailored for a data
/// provider and specific element type.
///
/// * `DP`: The type of the underlying data provider.
/// * `E`: The [`diskann::provider::Accessor`] element type to retrieve from the cache.
///
/// Customization includes the creation of cache accessors tailored for the requested
/// element type. For example, difference cache accessors may be needed for full precision
/// versus quantized vectors.
pub trait AsCacheAccessorFor<'a, A>
where
    A: CacheableAccessor,
{
    /// The type of the returned accessor. This accessor is meant to interface directly
    /// with the underlying cache, providing caching services for the element type `E`.
    ///
    /// This should **not** be a [`CachingAccessor`] since the [`CachingAccessor`] will call
    /// this method internally when created.
    type Accessor: ElementCache<A::Id, A::Map>;

    /// Errors that can occur while creating the cache accessor.
    ///
    /// Implementations are encouraged to make this construction infallible if at all
    /// possible.
    type Error: StandardError;

    /// Return a cache accessor for the underlying `provider`.
    fn as_cache_accessor_for(
        &'a self,
        accessor: A,
    ) -> Result<CachingAccessor<A, Self::Accessor>, Self::Error>;
}

/// The caching equivalent of [`diskann::glue::Fill`], implemented by the
/// **inner** [`Accessor`] for a cache accessor `C`. This allows the [`Accessor`] to
///
/// customize the interaction with the cache.
///
/// # Provided
///
/// The provided implementation iterates through `itr` and only attempts to mutate ids that
/// are not already present in `set`. The cache will first be checked and if an item is
/// not present, it will be retrieved via [`Self::get_element`] and inserted into the cache.
///
/// **ALL** errors are propagated eagerly by this method.
pub trait CachedFill<C, State>: CacheableAccessor
where
    C: ElementCache<Self::Id, Self::Map>,
    Self: workingset::Fill<State>,
{
    fn cached_fill<'a, Itr>(
        &'a mut self,
        cache: &'a mut C,
        state: &'a mut State,
        itr: Itr,
    ) -> impl SendFuture<Result<Self::View<'a>, CachingError<Self::Error, C::Error>>>
    where
        Itr: ExactSizeIterator<Item = Self::Id> + Clone + Send + Sync;
}

///////////////
// New Types //
///////////////

/// A [`diskann::provider::DataProvider`] that provides a caching service for the
/// underlying provider of type `T` using a cache of type `C`.
///
/// Search and insert strategies must be wrapped inside the thin [`Cached`] strategy wrapper
/// to avoid Rust's orphan rule for implementations.
///
/// Provider access will be done using the [`CachingAccessor`] type, which wraps an
/// [`Accessor`] to the inner provider and a caching interface layer.
///
/// Some amount of work is required to properly interface the underlying provider with the
/// cache layer.
///
/// * [`AsCacheAccessorFor`]: Create an accessor for the underlying cache targeting the
///   underlying provider with a specific element type.
///
/// * [`CachedFill`]: [`diskann::glue::Fill`] specialization for the cache.
pub struct CachingProvider<T, C> {
    provider: T,
    cache: C,
}

impl<T, C> CachingProvider<T, C> {
    /// Construct a new [`CachingProvider`] tying together the underlying `provider` and `cache`.
    pub fn new(provider: T, cache: C) -> Self {
        Self { provider, cache }
    }

    /// Return a reference to the underlying provider.
    pub fn inner(&self) -> &T {
        &self.provider
    }

    /// Return a reference to the underlying cache.
    pub fn cache(&self) -> &C {
        &self.cache
    }
}

/// A generic [`Accessor`] that ties together an [`Accessor`] of type `A` for an underlying
/// [`diskann::provider::DataProvider`] and cache accesssor `C`.
///
/// To be useful, `C` should implment [`ElementCache<A::Id, A::Element>`] and
/// [`NeighborCache<A::Id>`].
#[derive(Debug)]
pub struct CachingAccessor<A, C> {
    inner: A,
    cache: C,
}

impl<A, C> CachingAccessor<A, C> {
    /// Construct a new [`CachingAccessor`] directly over the inner and cache accessors.
    pub fn new(inner: A, cache: C) -> Self {
        Self { inner, cache }
    }

    /// Return a reference to the inner accessor for the underlying provider.
    pub fn inner(&self) -> &A {
        &self.inner
    }

    /// Return a reference to the cache accessor.
    pub fn cache(&self) -> &C {
        &self.cache
    }
}

/// A new-type wrapper for inner strategies to interface with [`CachingProvider`].
///
/// The implementations of [`SearchStrategy`] and related items will use the associated
/// strategies for the underlying type `S`, but propagate [`Cached`] to those strategies.
#[derive(Debug, Clone, Copy)]
pub struct Cached<S> {
    strategy: S,
}

impl<S> Cached<S> {
    /// Construct a new [`Cached`] around the inner `strategy`.
    pub fn new(strategy: S) -> Self {
        Self { strategy }
    }
}

impl<T, U> workingset::AsWorkingSet<Cached<T>> for Cached<U>
where
    U: workingset::AsWorkingSet<T>,
{
    fn as_working_set(&self, capacity: usize) -> Cached<T> {
        Cached::new(self.strategy.as_working_set(capacity))
    }
}

//----------------//
// Error Handling //
//----------------//

/// Error type associated with cache access related operations.
///
/// The goal of this type is to propagate critical cache related errors in addition to
/// any errors yielded by the underlying accessor or provider.
#[derive(Debug, Error)]
pub enum CachingError<E, C> {
    #[error("encountered error from backing provider")]
    Inner(#[source] E),
    #[error("encountered error while accessing cache")]
    Cache(#[source] C),
}

#[cfg(test)]
impl<E, C> CachingError<E, C>
where
    E: Debug,
    C: Debug,
{
    fn expect_inner(self) -> E {
        match self {
            Self::Inner(e) => e,
            Self::Cache(c) => panic!("expected an `Inner` error but got a `Cache`: {:?}", c),
        }
    }
}

/// A local new-type for working with [`ToRanked`] [`CachingError`]s.
#[derive(Debug, Error)]
#[error(transparent)]
pub struct Transient<T>(T);

/// Support conversion to `ANNError` only when the inner error type `E` is convertible to
/// `ANNError`. Even though this is not strictly necessary, it keeps us from implementiong
/// `IntoANNError` when `E: ToRanked` since `ToRanked` does not imply `IntoANNError`.
impl<E, C> From<CachingError<E, C>> for diskann::ANNError
where
    E: Into<diskann::ANNError>,
    C: StandardError,
{
    #[track_caller]
    fn from(err: CachingError<E, C>) -> Self {
        match err {
            CachingError::Inner(inner) => inner.into(),
            CachingError::Cache(err) => err.into(),
        }
    }
}

/// A transparent wrapper for `T as core_error::TranesientError<E>`.
impl<E, C, T> core_error::TransientError<CachingError<E, C>> for Transient<T>
where
    T: core_error::TransientError<E>,
{
    #[track_caller]
    fn acknowledge<D>(self, why: D)
    where
        D: std::fmt::Display,
    {
        self.0.acknowledge(why)
    }

    #[track_caller]
    fn escalate<D>(self, why: D) -> CachingError<E, C>
    where
        D: std::fmt::Display,
    {
        CachingError::Inner(self.0.escalate(why))
    }

    #[track_caller]
    fn acknowledge_with<F, D>(self, why: F)
    where
        F: FnOnce() -> D,
        D: std::fmt::Display,
    {
        self.0.acknowledge_with(why)
    }

    #[track_caller]
    fn escalate_with<F, D>(self, why: F) -> CachingError<E, C>
    where
        F: FnOnce() -> D,
        D: std::fmt::Display,
    {
        CachingError::Inner(self.0.escalate_with(why))
    }
}

impl<E, C> core_error::ToRanked for CachingError<E, C>
where
    E: core_error::ToRanked,
    C: StandardError,
{
    /// Cache errors are always escaslated.
    type Error = CachingError<E::Error, C>;
    type Transient = Transient<E::Transient>;

    fn to_ranked(self) -> core_error::RankedError<Self::Transient, Self::Error> {
        use core_error::RankedError;
        match self {
            Self::Inner(err) => match err.to_ranked() {
                RankedError::Transient(v) => core_error::RankedError::Transient(Transient(v)),
                RankedError::Error(v) => core_error::RankedError::Error(CachingError::Inner(v)),
            },
            Self::Cache(err) => core_error::RankedError::Error(CachingError::Cache(err)),
        }
    }

    fn from_transient(transient: Self::Transient) -> Self {
        Self::Inner(E::from_transient(transient.0))
    }

    fn from_error(error: Self::Error) -> Self {
        match error {
            CachingError::Inner(err) => Self::Inner(E::from_error(err)),
            CachingError::Cache(err) => Self::Cache(err),
        }
    }
}

///////////////////
// Data Provider //
///////////////////

impl<T, C> DataProvider for CachingProvider<T, C>
where
    T: DataProvider,
    C: AsyncFriendly,
{
    type Context = T::Context;
    type Error = T::Error;
    type ExternalId = T::ExternalId;
    type InternalId = T::InternalId;
    type Guard = T::Guard;

    fn to_external_id(
        &self,
        context: &Self::Context,
        id: Self::InternalId,
    ) -> Result<Self::ExternalId, Self::Error> {
        self.provider.to_external_id(context, id)
    }

    fn to_internal_id(
        &self,
        context: &Self::Context,
        gid: &Self::ExternalId,
    ) -> Result<Self::InternalId, Self::Error> {
        self.provider.to_internal_id(context, gid)
    }
}

impl<DP, C> Delete for CachingProvider<DP, C>
where
    DP: DataProvider + Delete,
    C: Evict<DP::InternalId> + AsyncFriendly,
{
    fn delete(
        &self,
        context: &DP::Context,
        gid: &DP::ExternalId,
    ) -> impl Future<Output = Result<(), DP::Error>> + Send {
        self.provider.delete(context, gid)
    }

    fn release(
        &self,
        context: &DP::Context,
        id: DP::InternalId,
    ) -> impl Future<Output = Result<(), DP::Error>> + Send {
        // The very first thing we do is evict from the cache.
        //
        // This will always be correct, even if `release` somehow fails.
        self.cache.evict(id);
        self.provider.release(context, id)
    }

    fn status_by_internal_id(
        &self,
        context: &DP::Context,
        id: DP::InternalId,
    ) -> impl Future<Output = Result<ElementStatus, DP::Error>> + Send {
        self.provider.status_by_internal_id(context, id)
    }

    fn status_by_external_id(
        &self,
        context: &DP::Context,
        gid: &DP::ExternalId,
    ) -> impl Future<Output = Result<ElementStatus, DP::Error>> + Send {
        self.provider.status_by_external_id(context, gid)
    }
}

impl<DP, C, T> SetElement<T> for CachingProvider<DP, C>
where
    DP: SetElement<T>,
    T: Send + Sync,
    C: AsyncFriendly + Evict<DP::InternalId>,
{
    type SetError = DP::SetError;

    async fn set_element(
        &self,
        context: &Self::Context,
        id: &Self::ExternalId,
        element: T,
    ) -> Result<Self::Guard, Self::SetError> {
        use diskann::provider::Guard;

        let guard = self.provider.set_element(context, id, element).await?;
        // Invalidate to ensure we don't have a stale local copy.
        self.cache.evict(guard.id());
        Ok(guard)
    }
}

//////////////
// Accessor //
//////////////

impl<A, C> HasId for CachingAccessor<A, C>
where
    A: HasId,
{
    type Id = A::Id;
}

impl<A, C> NeighborAccessor for CachingAccessor<A, &mut C>
where
    A: NeighborAccessor,
    C: NeighborCache<A::Id>,
{
    async fn get_neighbors(
        mut self,
        id: Self::Id,
        neighbors: &mut AdjacencyList<Self::Id>,
    ) -> ANNResult<Self> {
        // 1. If `status == NeighborStatus::Hit` - we're done.
        // 2. If `status == NeighborStatus::Miss` - retrieve from the inner accessor and
        //    fill the cache.
        // 3. If `status == NeighborStatus::Uncacheable` - retrieve from the inner accessor
        //    but do not fill the cache.
        let status = self
            .cache
            .try_get_neighbors(id, neighbors)
            .into_ann_result()?;
        if status != NeighborStatus::Hit {
            self.inner = self.inner.get_neighbors(id, neighbors).await?;
            if status != NeighborStatus::Uncacheable {
                self.cache.set_neighbors(id, neighbors).into_ann_result()?;
            }
        }

        Ok(self)
    }
}

impl<A, C> NeighborAccessorMut for CachingAccessor<A, &mut C>
where
    A: NeighborAccessorMut,
    C: NeighborCache<A::Id>,
{
    async fn set_neighbors(mut self, id: Self::Id, neighbors: &[Self::Id]) -> ANNResult<Self> {
        self.inner = self.inner.set_neighbors(id, neighbors).await?;
        self.cache.invalidate_neighbors(id);

        Ok(self)
    }

    async fn append_vector(mut self, id: Self::Id, neighbors: &[Self::Id]) -> ANNResult<Self> {
        self.inner = self.inner.append_vector(id, neighbors).await?;
        self.cache.invalidate_neighbors(id);
        Ok(self)
    }
}

impl<'a, A, C> DelegateNeighbor<'a> for CachingAccessor<A, C>
where
    A: DelegateNeighbor<'a>,
    C: NeighborCache<Self::Id>,
{
    type Delegate = CachingAccessor<A::Delegate, &'a mut C>;
    fn delegate_neighbor(&'a mut self) -> Self::Delegate {
        CachingAccessor::new(self.inner.delegate_neighbor(), &mut self.cache)
    }
}

impl<A, C> Accessor for CachingAccessor<A, C>
where
    A: CacheableAccessor,
    C: ElementCache<A::Id, A::Map>,
{
    type Element<'a>
        = A::Element<'a>
    where
        Self: 'a;
    type ElementRef<'a> = A::ElementRef<'a>;

    type GetError = CachingError<A::GetError, C::Error>;

    async fn get_element(&mut self, id: Self::Id) -> Result<A::Element<'_>, Self::GetError> {
        get_or_insert(&mut self.inner, &mut self.cache, id)
            .send()
            .await
    }
}

impl<A, C> BuildDistanceComputer for CachingAccessor<A, C>
where
    A: BuildDistanceComputer + CacheableAccessor,
    C: ElementCache<A::Id, A::Map>,
{
    type DistanceComputerError = A::DistanceComputerError;
    type DistanceComputer = A::DistanceComputer;

    fn build_distance_computer(
        &self,
    ) -> Result<Self::DistanceComputer, Self::DistanceComputerError> {
        self.inner.build_distance_computer()
    }
}

impl<T, A, C> BuildQueryComputer<T> for CachingAccessor<A, C>
where
    A: BuildQueryComputer<T> + CacheableAccessor,
    C: ElementCache<A::Id, A::Map>,
{
    type QueryComputerError = A::QueryComputerError;
    type QueryComputer = A::QueryComputer;

    fn build_query_computer(
        &self,
        from: T,
    ) -> Result<Self::QueryComputer, Self::QueryComputerError> {
        self.inner.build_query_computer(from)
    }
}

impl<A, C, State> workingset::Fill<Cached<State>> for CachingAccessor<A, C>
where
    A: workingset::Fill<State>,
    A: CacheableAccessor + CachedFill<C, State>,
    C: ElementCache<A::Id, A::Map>,
{
    type Error = CachingError<A::Error, C::Error>;

    type View<'a>
        = A::View<'a>
    where
        Self: 'a,
        State: 'a;

    fn fill<'a, Itr>(
        &'a mut self,
        state: &'a mut Cached<State>,
        itr: Itr,
    ) -> impl SendFuture<Result<Self::View<'a>, Self::Error>>
    where
        Itr: ExactSizeIterator<Item = Self::Id> + Clone + Send + Sync,
        Self: 'a,
    {
        self.inner
            .cached_fill(&mut self.cache, &mut state.strategy, itr)
    }
}

impl<A, C, T> ExpandBeam<T> for CachingAccessor<A, C>
where
    A: BuildQueryComputer<T> + CacheableAccessor + AsNeighbor,
    C: ElementCache<A::Id, A::Map> + NeighborCache<A::Id>,
{
}

/// Post Process
#[derive(Debug, Default, Clone, Copy)]
pub struct Unwrap;

impl<A, C, T> SearchPostProcessStep<CachingAccessor<A, C>, T> for Unwrap
where
    A: BuildQueryComputer<T> + CacheableAccessor,
    C: ElementCache<A::Id, A::Map>,
{
    type Error<NextError>
        = NextError
    where
        NextError: StandardError;

    type NextAccessor = A;

    fn post_process_step<I, B, Next>(
        &self,
        next: &Next,
        accessor: &mut CachingAccessor<A, C>,
        query: T,
        computer: &<A as BuildQueryComputer<T>>::QueryComputer,
        candidates: I,
        output: &mut B,
    ) -> impl Future<Output = Result<usize, Self::Error<Next::Error>>> + Send
    where
        I: Iterator<Item = Neighbor<A::Id>> + Send,
        B: SearchOutputBuffer<A::Id> + Send + ?Sized,
        Next: glue::SearchPostProcess<Self::NextAccessor, T, A::Id> + Sync,
    {
        next.post_process(&mut accessor.inner, query, computer, candidates, output)
    }
}

impl<A, C> SearchExt for CachingAccessor<A, C>
where
    A: SearchExt + CacheableAccessor,
    C: ElementCache<A::Id, A::Map>,
{
    fn starting_points(&self) -> impl Future<Output = ANNResult<Vec<Self::Id>>> + Send {
        self.inner.starting_points()
    }

    fn terminate_early(&mut self) -> bool {
        self.inner.terminate_early()
    }

    fn is_not_start_point(
        &self,
    ) -> impl Future<Output = ANNResult<impl Fn(Self::Id) -> bool + Send + Sync + 'static>> + Send
    {
        self.inner.is_not_start_point()
    }
}

//////////////
// Strategy //
//////////////

type SearchAccessor<'a, S, DP, T> = <S as SearchStrategy<DP, T>>::SearchAccessor<'a>;
type PruneAccessor<'a, S, DP> = <S as PruneStrategy<DP>>::PruneAccessor<'a>;

/// A description of what is happening with the trait requirements:
///
/// The strategy `S` needs to be a search strategy for the underlying provider. That
/// strategy has a `SearchAccessor` with an associated element type `E`.
///
/// We are requiring that the underlying cache `C` is convertible via `AsCacheAccessorFor`
/// to an implementation of `ElementCache` that is compatible with the element type `E` and
/// that the relevant accessor can also access the underlying graph.
impl<DP, C, T, S, E> SearchStrategy<CachingProvider<DP, C>, T> for Cached<S>
where
    DP: DataProvider,
    S: for<'a> SearchStrategy<DP, T, SearchAccessor<'a>: CacheableAccessor>,
    C: for<'a> AsCacheAccessorFor<
            'a,
            SearchAccessor<'a, S, DP, T>,
            Accessor: NeighborCache<DP::InternalId>,
            Error = E,
        > + AsyncFriendly,
    E: StandardError,
{
    type QueryComputer = S::QueryComputer;
    type SearchAccessor<'a> = CachingAccessor<
        SearchAccessor<'a, S, DP, T>,
        <C as AsCacheAccessorFor<'a, SearchAccessor<'a, S, DP, T>>>::Accessor,
    >;
    type SearchAccessorError = CachingError<S::SearchAccessorError, E>;

    fn search_accessor<'a>(
        &'a self,
        provider: &'a CachingProvider<DP, C>,
        context: &'a DP::Context,
    ) -> Result<Self::SearchAccessor<'a>, Self::SearchAccessorError> {
        let inner = self
            .strategy
            .search_accessor(&provider.provider, context)
            .map_err(CachingError::Inner)?;

        provider
            .cache
            .as_cache_accessor_for(inner)
            .map_err(CachingError::Cache)
    }
}

/// [`DefaultPostProcessor`] delegation for [`Cached`]. The processor is composed by
/// wrapping the inner strategy's processor with [`Unwrap`] via [`Pipeline`].
impl<DP, C, T, S, E> glue::DefaultPostProcessor<CachingProvider<DP, C>, T> for Cached<S>
where
    DP: DataProvider,
    S: glue::DefaultPostProcessor<DP, T>
        + for<'a> SearchStrategy<DP, T, SearchAccessor<'a>: CacheableAccessor>,
    C: for<'a> AsCacheAccessorFor<
            'a,
            SearchAccessor<'a, S, DP, T>,
            Accessor: NeighborCache<DP::InternalId>,
            Error = E,
        > + AsyncFriendly,
    E: StandardError,
{
    type Processor = Pipeline<Unwrap, S::Processor>;

    fn default_post_processor(&self) -> Self::Processor {
        Pipeline::new(Unwrap, self.strategy.default_post_processor())
    }
}

/// We need `S` to be a [`PruneStrategy`] for the underlying provider.
///
/// This strategy has an associated [`PruneElement`] type `E`
///
/// We are requiring the cache `C` to be convertible via [`AsCacheAccessorFor`] to an
/// implementation of `ElementCache` that is compatible with `E` and allows mutation of the
/// cached graph.
///
/// Finally, the underlying [`PruneAccessor`] needs to implement [`CachedFill`] for the
/// corresponding cached accessor.
impl<DP, C, S, E> PruneStrategy<CachingProvider<DP, C>> for Cached<S>
where
    DP: DataProvider,
    S: for<'a> PruneStrategy<DP, PruneAccessor<'a>: CacheableAccessor>,
    C: for<'a> AsCacheAccessorFor<
            'a,
            PruneAccessor<'a, S, DP>,
            Accessor: NeighborCache<DP::InternalId>,
            Error = E,
        > + AsyncFriendly,
    for<'a> S::PruneAccessor<'a>: CachedFill<<C as AsCacheAccessorFor<'a, PruneAccessor<'a, S, DP>>>::Accessor, S::WorkingSet>,
    E: StandardError,
{
    type WorkingSet = Cached<S::WorkingSet>;
    type DistanceComputer<'a> = S::DistanceComputer<'a>;
    type PruneAccessor<'a> = CachingAccessor<
        PruneAccessor<'a, S, DP>,
        <C as AsCacheAccessorFor<'a, PruneAccessor<'a, S, DP>>>::Accessor,
    >;
    type PruneAccessorError = CachingError<S::PruneAccessorError, E>;

    fn prune_accessor<'a>(
        &'a self,
        provider: &'a CachingProvider<DP, C>,
        context: &'a DP::Context,
    ) -> Result<Self::PruneAccessor<'a>, Self::PruneAccessorError> {
        let inner = self
            .strategy
            .prune_accessor(&provider.provider, context)
            .map_err(CachingError::Inner)?;

        provider
            .cache
            .as_cache_accessor_for(inner)
            .map_err(CachingError::Cache)
    }

    fn create_working_set(&self, capacity: usize) -> Self::WorkingSet {
        Cached::new(self.strategy.create_working_set(capacity))
    }
}

/// Surprisingly - the `where` clause for this, while not pretty, is not too bad.
impl<DP, C, T, S> InsertStrategy<CachingProvider<DP, C>, T> for Cached<S>
where
    DP: DataProvider,
    S: InsertStrategy<DP, T>,
    Cached<S>: SearchStrategy<CachingProvider<DP, C>, T>,
    Cached<S::PruneStrategy>: PruneStrategy<CachingProvider<DP, C>>,
    C: AsyncFriendly,
{
    type PruneStrategy = Cached<S::PruneStrategy>;
    fn prune_strategy(&self) -> Self::PruneStrategy {
        Cached {
            strategy: self.strategy.prune_strategy(),
        }
    }
}

/// The `where` clause requires that:
///
/// 1. The inner strategy's [`DeleteSearchAccessor`] is cacheable.
/// 2. The cache `C` can produce a cache-accessor for the inner strategy's accessor.
/// 3. The wrapped search strategy `Cached<S::SearchStrategy>` remains a valid
///    `SearchStrategy` for `CachingProvider` (needed for the equality constraint on
///    [`InplaceDeleteStrategy::SearchStrategy`]).
impl<DP, C, S, E> InplaceDeleteStrategy<CachingProvider<DP, C>> for Cached<S>
where
    DP: DataProvider,
    S: InplaceDeleteStrategy<DP>,
    for<'a> S::DeleteSearchAccessor<'a>: CacheableAccessor,
    Cached<S::PruneStrategy>: PruneStrategy<CachingProvider<DP, C>>,
    for<'a> Cached<S::SearchStrategy>: SearchStrategy<
            CachingProvider<DP, C>,
            S::DeleteElement<'a>,
            SearchAccessor<'a> = CachingAccessor<
                S::DeleteSearchAccessor<'a>,
                <C as AsCacheAccessorFor<'a, S::DeleteSearchAccessor<'a>>>::Accessor,
            >,
        >,
    C: for<'a> AsCacheAccessorFor<
            'a,
            S::DeleteSearchAccessor<'a>,
            Accessor: NeighborCache<DP::InternalId>,
            Error = E,
        > + AsyncFriendly,
    E: StandardError,
{
    type DeleteElement<'a> = S::DeleteElement<'a>;
    type DeleteElementGuard = S::DeleteElementGuard;
    type DeleteElementError = S::DeleteElementError;

    type PruneStrategy = Cached<S::PruneStrategy>;

    type DeleteSearchAccessor<'a> = CachingAccessor<
        S::DeleteSearchAccessor<'a>,
        <C as AsCacheAccessorFor<'a, S::DeleteSearchAccessor<'a>>>::Accessor,
    >;

    type SearchStrategy = Cached<S::SearchStrategy>;
    type SearchPostProcessor = Pipeline<Unwrap, S::SearchPostProcessor>;

    fn prune_strategy(&self) -> Self::PruneStrategy {
        Cached {
            strategy: self.strategy.prune_strategy(),
        }
    }

    fn search_strategy(&self) -> Self::SearchStrategy {
        Cached {
            strategy: self.strategy.search_strategy(),
        }
    }

    fn search_post_processor(&self) -> Self::SearchPostProcessor {
        Pipeline::new(Unwrap, self.strategy.search_post_processor())
    }

    fn get_delete_element<'a>(
        &'a self,
        provider: &'a CachingProvider<DP, C>,
        context: &'a DP::Context,
        id: DP::InternalId,
    ) -> impl Future<Output = Result<Self::DeleteElementGuard, Self::DeleteElementError>> + Send
    {
        self.strategy
            .get_delete_element(&provider.provider, context, id)
    }
}

impl<DP, C, S, B> MultiInsertStrategy<CachingProvider<DP, C>, B> for Cached<S>
where
    DP: DataProvider,
    B: Batch,
    S: MultiInsertStrategy<DP, B>,
    Cached<S::InsertStrategy>: for<'a> InsertStrategy<
            CachingProvider<DP, C>,
            B::Element<'a>,
            PruneStrategy: PruneStrategy<
                CachingProvider<DP, C>,
                WorkingSet = Cached<S::WorkingSet>,
            >,
        >,
    C: AsyncFriendly,
{
    type Seed = Cached<S::Seed>;
    type WorkingSet = Cached<S::WorkingSet>;
    type FinishError = S::FinishError;
    type InsertStrategy = Cached<S::InsertStrategy>;

    fn insert_strategy(&self) -> Self::InsertStrategy {
        Cached {
            strategy: self.strategy.insert_strategy(),
        }
    }

    fn finish<Itr>(
        &self,
        provider: &CachingProvider<DP, C>,
        context: &DP::Context,
        batch: &Arc<B>,
        ids: Itr,
    ) -> impl std::future::Future<Output = Result<Self::Seed, Self::FinishError>> + Send
    where
        Itr: ExactSizeIterator<Item = DP::InternalId> + Send,
    {
        self.strategy
            .finish(provider.inner(), context, batch, ids)
            .map(|r| r.map(Cached::new))
    }
}

///////////
// Tests //
///////////

#[cfg(test)]
mod tests {
    use super::*;

    use std::{
        fmt::Display,
        sync::{
            Arc,
            atomic::{AtomicUsize, Ordering},
        },
    };

    use diskann::{
        ANNError,
        error::{RankedError, ToRanked, TransientError},
    };

    #[derive(Debug, Default)]
    struct Counters {
        acknowledge: AtomicUsize,
        acknowledge_with: AtomicUsize,
        escalate: AtomicUsize,
        escalate_with: AtomicUsize,
    }

    #[derive(Debug)]
    struct TransientErr {
        counters: Arc<Counters>,
        token: usize,
    }

    impl TransientErr {
        fn new(counters: &Arc<Counters>, token: usize) -> Self {
            Self {
                counters: counters.clone(),
                token,
            }
        }
    }

    #[derive(Debug, Error)]
    #[error("super critical error: {0}")]
    struct Critical(usize);

    impl From<Critical> for ANNError {
        fn from(err: Critical) -> Self {
            ANNError::opaque(err)
        }
    }

    #[derive(Debug)]
    enum Generic {
        Transient(TransientErr),
        Critical(Critical),
    }

    impl TransientError<Critical> for TransientErr {
        fn acknowledge<D>(self, _why: D)
        where
            D: Display,
        {
            self.counters.acknowledge.fetch_add(1, Ordering::Relaxed);
        }

        fn acknowledge_with<F, D>(self, _why: F)
        where
            F: FnOnce() -> D,
            D: Display,
        {
            self.counters
                .acknowledge_with
                .fetch_add(1, Ordering::Relaxed);
        }

        fn escalate<D>(self, _why: D) -> Critical
        where
            D: Display,
        {
            self.counters.escalate.fetch_add(1, Ordering::Relaxed);
            Critical(self.token)
        }

        fn escalate_with<F, D>(self, _why: F) -> Critical
        where
            F: FnOnce() -> D,
            D: Display,
        {
            self.counters.escalate_with.fetch_add(1, Ordering::Relaxed);
            Critical(self.token)
        }
    }

    impl ToRanked for Generic {
        type Transient = TransientErr;
        type Error = Critical;

        fn to_ranked(self) -> RankedError<TransientErr, Critical> {
            match self {
                Self::Transient(e) => RankedError::Transient(e),
                Self::Critical(e) => RankedError::Error(e),
            }
        }

        fn from_transient(transient: TransientErr) -> Self {
            Self::Transient(transient)
        }

        fn from_error(error: Critical) -> Self {
            Self::Critical(error)
        }
    }

    #[derive(Debug, Error)]
    #[error("always a critical error")]
    struct AlwaysCritical;

    impl From<AlwaysCritical> for ANNError {
        fn from(err: AlwaysCritical) -> Self {
            ANNError::opaque(err)
        }
    }

    #[test]
    fn test_caching_error() {
        type TestError = CachingError<Critical, AlwaysCritical>;

        // Cache errors are always critical.
        let err = CachingError::<Generic, AlwaysCritical>::Cache(AlwaysCritical);
        assert!(matches!(
            err.to_ranked(),
            RankedError::Error(CachingError::Cache(AlwaysCritical))
        ));

        // Transient correctly forwards calls.
        let counters = Arc::new(Counters::default());

        let make_transient = || Transient(TransientErr::new(&counters, 10));

        <_ as TransientError<TestError>>::acknowledge(make_transient(), "");
        assert_eq!(counters.acknowledge.load(Ordering::Relaxed), 1);

        <_ as TransientError<TestError>>::acknowledge_with(make_transient(), || "");
        assert_eq!(counters.acknowledge_with.load(Ordering::Relaxed), 1);

        let err = <_ as TransientError<TestError>>::escalate(make_transient(), "").expect_inner();
        assert_eq!(counters.escalate.load(Ordering::Relaxed), 1);
        assert_eq!(err.0, 10);

        let err =
            <_ as TransientError<TestError>>::escalate_with(make_transient(), || "").expect_inner();
        assert_eq!(counters.escalate.load(Ordering::Relaxed), 1);
        assert_eq!(err.0, 10);
    }

    #[test]
    fn test_caching_error_to_ranked() {
        type Top = CachingError<Generic, AlwaysCritical>;
        type Crit = CachingError<Critical, AlwaysCritical>;

        let err = Top::Cache(AlwaysCritical);

        // Cache Errors
        assert!(
            matches!(
                err.to_ranked(),
                RankedError::Error(CachingError::<Critical, AlwaysCritical>::Cache(
                    AlwaysCritical
                ))
            ),
            "cache errors are always critical"
        );

        assert!(
            matches!(Top::from_error(Crit::Cache(AlwaysCritical)), Top::Cache(_)),
            "reassembling from Cache should preserve Cache"
        );

        let counters = Arc::new(Counters::default());

        // Inner - transient.
        let err = Top::Inner(Generic::Transient(TransientErr::new(&counters, 5)));
        assert!(
            matches!(
                err.to_ranked(),
                RankedError::Transient(Transient(TransientErr { .. }))
            ),
            "transient inner errors are transient"
        );

        assert!(
            matches!(
                Top::from_transient(Transient(TransientErr::new(&counters, 5))),
                Top::Inner(Generic::Transient(_)),
            ),
            "transient errors are still tranient",
        );

        // Inner - critical.
        let err = Top::Inner(Generic::Critical(Critical(2)));
        assert!(
            matches!(
                err.to_ranked(),
                RankedError::Error(CachingError::<Critical, AlwaysCritical>::Inner(_))
            ),
            "critical errors are critical"
        );

        assert!(
            matches!(
                Top::from_error(Crit::Inner(Critical(2))),
                Top::Inner(Generic::Critical(_)),
            ),
            "critical errors are still critical",
        );
    }
}