seatbelt 0.4.4

Resilience and recovery mechanisms for fallible operations.
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.

use std::marker::PhantomData;
use std::sync::Arc;
use std::time::Duration;

use layered::Layer;

use crate::retry::backoff::BackoffOptions;
use crate::retry::constants::DEFAULT_RETRY_ATTEMPTS;
use crate::retry::*;
use crate::typestates::{NotSet, Set};
use crate::utils::{EnableIf, TelemetryHelper};
use crate::{Recovery, RecoveryInfo, ResilienceContext, TelemetryString};

/// Builder for configuring retry resilience middleware.
///
/// This type is created by calling [`Retry::layer`](crate::retry::Retry::layer) and uses the
/// type-state pattern to enforce that required properties are configured before the retry middleware can be built:
///
/// - [`clone_input`][RetryLayer::clone_input]: Required to specify how to clone inputs for retry attempts
/// - [`recovery`][RetryLayer::recovery]: Required to determine if an output should trigger a retry
///
/// For comprehensive examples, see the [retry module][crate::retry] documentation.
///
/// # Type State
///
/// - `S1`: Tracks whether [`clone_input`][RetryLayer::clone_input] has been set
/// - `S2`: Tracks whether [`recovery`][RetryLayer::recovery] has been set
#[derive(Debug)]
pub struct RetryLayer<In, Out, S1 = Set, S2 = Set> {
    context: ResilienceContext<In, Out>,
    max_attempts: u32,
    backoff: BackoffOptions,
    clone_input: Option<CloneInput<In>>,
    should_recover: Option<ShouldRecover<Out>>,
    on_retry: Option<OnRetry<Out>>,
    enable_if: EnableIf<In>,
    telemetry: TelemetryHelper,
    restore_input: Option<RestoreInput<In, Out>>,
    handle_unavailable: bool,
    _state: PhantomData<fn(In, S1, S2) -> Out>,
}

impl<In, Out> RetryLayer<In, Out, NotSet, NotSet> {
    #[must_use]
    pub(crate) fn new(name: TelemetryString, context: &ResilienceContext<In, Out>) -> Self {
        Self {
            context: context.clone(),
            max_attempts: DEFAULT_RETRY_ATTEMPTS.saturating_add(1),
            backoff: BackoffOptions::default(),
            clone_input: None,
            should_recover: None,
            on_retry: None,
            enable_if: EnableIf::default(),
            telemetry: context.create_telemetry(name),
            restore_input: None,
            handle_unavailable: false,
            _state: PhantomData,
        }
    }
}

impl<In, Out, S1, S2> RetryLayer<In, Out, S1, S2> {
    /// Sets the maximum number of retry attempts.
    ///
    /// This specifies the maximum number of retry attempts in addition to the original call.
    /// For example, if `max_retry_attempts` is 3, the operation will be attempted up to
    /// 4 times total (1 original `+` 3 retries).
    ///
    /// **Default**: 3 retry attempts
    #[must_use]
    pub fn max_retry_attempts(mut self, max_attempts: u32) -> Self {
        self.max_attempts = max_attempts.saturating_add(1);
        self
    }

    /// Sets the backoff strategy for delay calculation.
    ///
    /// - [`Backoff::Constant`]: Same delay between all retries
    /// - [`Backoff::Linear`]: Linearly increasing delay (`base_delay` `×` attempt)
    /// - [`Backoff::Exponential`]: Exponentially increasing delay (`base_delay × 2^attempt`)
    ///
    /// **Default**: [`Backoff::Exponential`]
    #[must_use]
    pub fn backoff(mut self, backoff_type: Backoff) -> Self {
        self.backoff.backoff_type = backoff_type;
        self
    }

    /// Sets the base delay used for backoff calculations.
    ///
    /// The meaning depends on the backoff strategy:
    /// - **Constant**: The actual delay between retries
    /// - **Linear**: Initial delay; subsequent delays are `base_delay × attempt_number`
    /// - **Exponential**: Initial delay; subsequent delays grow exponentially
    ///
    /// **Default**: 10 milliseconds (optimized for service-to-service communication)
    ///
    /// For client-to-service scenarios, consider increasing to 1-2 seconds.
    #[must_use]
    pub fn base_delay(mut self, delay: Duration) -> Self {
        self.backoff.base_delay = delay;
        self
    }

    /// Sets the maximum allowed delay between retries.
    ///
    /// This caps the backoff calculation to prevent excessively long delays.
    /// If not set, delays can grow indefinitely based on the backoff algorithm.
    ///
    /// **Default**: None (no limit)
    #[must_use]
    pub fn max_delay(mut self, max_delay: Duration) -> Self {
        self.backoff.max_delay = Some(max_delay);
        self
    }

    /// Enables or disables jitter to reduce correlation between retries.
    ///
    /// Jitter adds randomization to delay calculations to prevent thundering herd
    /// problems when multiple clients retry simultaneously. This is especially
    /// important in distributed systems to avoid synchronized retry storms.
    ///
    /// **Default**: true (jitter enabled)
    #[must_use]
    pub fn use_jitter(mut self, use_jitter: bool) -> Self {
        self.backoff.use_jitter = use_jitter;
        self
    }

    /// Applies all settings from a [`RetryConfig`] to this layer.
    ///
    /// This is a convenience method for applying configuration loaded from external sources
    /// (e.g., configuration files) without calling individual builder methods.
    #[must_use]
    pub fn config(self, config: &RetryConfig) -> Self {
        self.backoff(config.backoff_type)
            .base_delay(config.base_delay)
            .use_jitter(config.use_jitter)
            .max_retry_attempts(config.max_retry_attempts)
            .max_delay_optional(config.max_delay)
            .handle_unavailable(config.handle_unavailable)
            .enable(config.enabled)
    }
    fn max_delay_optional(mut self, max_delay: Option<Duration>) -> Self {
        self.backoff.max_delay = max_delay;
        self
    }

    /// Sets the input cloning function.
    ///
    /// Called before each retry attempt to produce a fresh input value, since
    /// the inner service consumes the input on each attempt. The `clone_fn` receives
    /// a mutable reference to the input and [`CloneArgs`] containing context about
    /// the retry attempt. Return `Some(cloned_input)` to proceed with retry, or `None`
    /// to abort and return the last failed result.
    #[must_use]
    pub fn clone_input_with(
        mut self,
        clone_fn: impl Fn(&mut In, CloneArgs) -> Option<In> + Send + Sync + 'static,
    ) -> RetryLayer<In, Out, Set, S2> {
        self.clone_input = Some(CloneInput::new(clone_fn));
        self.into_state::<Set, S2>()
    }

    /// Automatically sets the input cloning function for types that implement [`Clone`].
    ///
    /// This is a convenience method that uses the standard [`Clone`] trait to clone
    /// inputs for retry attempts. For types that implement [`Clone`], this provides
    /// a simple way to enable retries without manually implementing a cloning function.
    ///
    /// This is equivalent to calling [`clone_input_with`][RetryLayer::clone_input_with] with
    /// `|input, _args| Some(input.clone())`.
    ///
    /// # Type Requirements
    ///
    /// This method is only available when the input type `In` implements [`Clone`].
    #[must_use]
    pub fn clone_input(self) -> RetryLayer<In, Out, Set, S2>
    where
        In: Clone,
    {
        self.clone_input_with(|input, _args| Some(input.clone()))
    }

    /// Sets the recovery classification function.
    ///
    /// This function determines whether a specific output should trigger a retry
    /// by examining the output and returning a [`RecoveryInfo`] classification.
    /// The `recover_fn` receives a reference to the output and [`RecoveryArgs`]
    /// with context about the current attempt, and returns a [`RecoveryInfo`] decision.
    #[must_use]
    pub fn recovery_with(
        mut self,
        recover_fn: impl Fn(&Out, RecoveryArgs) -> RecoveryInfo + Send + Sync + 'static,
    ) -> RetryLayer<In, Out, S1, Set> {
        self.should_recover = Some(ShouldRecover::new(recover_fn));
        self.into_state::<S1, Set>()
    }

    /// Automatically sets the recovery classification function for types that implement [`Recovery`].
    ///
    /// This is a convenience method that uses the [`Recovery`] trait to determine
    /// whether an output should trigger a retry. For types that implement [`Recovery`],
    /// this provides a simple way to enable intelligent retry behavior without manually
    /// implementing a recovery classification function.
    ///
    /// This is equivalent to calling [`recovery`][RetryLayer::recovery] with
    /// `|output, _args| output.recovery()`.
    ///
    /// # Type Requirements
    ///
    /// This method is only available when the output type `Out` implements [`Recovery`].
    #[must_use]
    pub fn recovery(self) -> RetryLayer<In, Out, S1, Set>
    where
        Out: Recovery,
    {
        self.recovery_with(|out, _args| out.recovery())
    }

    /// Configures a callback invoked before each retry attempt.
    ///
    /// This callback is useful for logging, metrics, or other observability
    /// purposes. The `retry_fn` receives the output that triggered the retry
    /// and [`OnRetryArgs`] with detailed retry information.
    ///
    /// The callback does not affect retry behavior - it's purely for observation.
    ///
    /// **Default**: None (no observability by default)
    #[must_use]
    pub fn on_retry(mut self, retry_fn: impl Fn(&Out, OnRetryArgs) + Send + Sync + 'static) -> Self {
        self.on_retry = Some(OnRetry::new(retry_fn));
        self
    }

    /// Optionally enables the retry middleware based on a condition.
    ///
    /// When disabled, requests pass through without retry protection.
    /// This call replaces any previous condition. The `is_enabled` function
    /// receives a reference to the input and returns `true` if retry protection
    /// should be enabled for this request.
    ///
    /// **Default**: Always enabled
    #[must_use]
    pub fn enable_if(mut self, is_enabled: impl Fn(&In) -> bool + Send + Sync + 'static) -> Self {
        self.enable_if = EnableIf::custom(is_enabled);
        self
    }

    /// Enables or disables the retry middleware.
    ///
    /// When disabled, requests pass through without retry protection.
    /// This call replaces any previous condition.
    #[must_use]
    fn enable(mut self, enabled: bool) -> Self {
        self.enable_if = EnableIf::new(enabled);
        self
    }

    /// Enables the retry middleware unconditionally.
    ///
    /// All requests will have retry protection applied.
    /// This call replaces any previous condition.
    ///
    /// **Note**: This is the default behavior - retry is enabled by default.
    #[must_use]
    pub fn enable_always(self) -> Self {
        self.enable(true)
    }

    /// Disables the retry middleware completely.
    ///
    /// All requests will pass through without retry protection.
    /// This call replaces any previous condition.
    ///
    /// **Note**: This overrides the default enabled behavior.
    #[must_use]
    pub fn disable(self) -> Self {
        self.enable(false)
    }

    /// Configures whether the retry middleware should attempt to recover from unavailable services.
    ///
    /// When enabled, the retry middleware will treat [`RecoveryInfo::unavailable`] classifications
    /// as recoverable conditions and attempt retries. When disabled (default), unavailable services
    /// are treated as non-recoverable and cause immediate failure without retry attempts.
    ///
    /// This is particularly useful when you have access to multiple resources
    /// or service endpoints. When one resource is unavailable, the retry
    /// mechanism can attempt the operation against a different resource in subsequent
    /// attempts, potentially allowing the operation to succeed despite the unavailability.
    ///
    /// Set `enable` to `true` to enable unavailable recovery, or `false` to treat
    /// unavailable responses as permanent failures.
    ///
    /// **Default**: false (unavailable responses are not retried)
    ///
    /// # Example
    ///
    /// ```rust
    /// # use seatbelt::Attempt;
    /// # use seatbelt::retry::Retry;
    /// # use seatbelt::{RecoveryInfo, ResilienceContext};
    /// # use tick::Clock;
    /// # fn example() {
    /// # let context = ResilienceContext::<String, Result<String, String>>::new(Clock::new_frozen());
    /// // Service with multiple endpoints that can route around unavailable services
    /// let layer = Retry::layer("multi_endpoint_retry", &context)
    ///     .clone_input_with(|input: &mut String, args| {
    ///         let mut input = input.clone();
    ///         update_endpoint(&mut input, args.attempt()); // Modify input to use a different endpoint
    ///         Some(input)
    ///     })
    ///     .recovery_with(|result, _args| match result {
    ///         Ok(_) => RecoveryInfo::never(),
    ///         Err(msg) if msg.contains("service unavailable") => RecoveryInfo::unavailable(),
    ///         Err(_) => RecoveryInfo::retry(),
    ///     })
    ///     .handle_unavailable(true); // Try different endpoints on unavailable
    /// # }
    /// # fn update_endpoint(_input : &mut String, _attempt: Attempt)  {}
    /// ```
    #[must_use]
    pub fn handle_unavailable(mut self, enable: bool) -> Self {
        self.handle_unavailable = enable;
        self
    }

    /// Sets the input restoration function.
    ///
    /// This function is called when the original input could not be cloned for a retry
    /// attempt (i.e., when [`clone_input_with`][RetryLayer::clone_input_with] returns `None`).
    /// The `restore_fn` receives a mutable reference to the output from the failed attempt
    /// and [`RestoreInputArgs`] containing context about the retry attempt. It can attempt
    /// to extract and reconstruct the input for the next retry.
    ///
    /// This is particularly useful when a service is unavailable and the input was not actually
    /// consumed by the operation. A common pattern is that error responses contain or reference
    /// the original input that can be extracted for retry. For example, an HTTP request that
    /// is rejected even before sending, because the remote service is known to be down.
    ///
    /// The restore function should return:
    /// - `Some(Input)` to proceed with retry using the restored input
    /// - `None` to abort retry and return the provided output
    ///
    /// This enables retry scenarios where input cloning is expensive or impossible, but
    /// the input can be extracted from error responses or failure contexts.
    ///
    /// # Example
    ///
    /// ```rust
    /// # use std::ops::ControlFlow;
    /// # use seatbelt::retry::{Retry, RestoreInputArgs};
    /// # use seatbelt::{RecoveryInfo, ResilienceContext};
    /// # use tick::Clock;
    /// # fn example() {
    /// # let clock = Clock::new_frozen();
    /// # let context = ResilienceContext::new(&clock);
    /// #[derive(Clone)]
    /// struct HttpRequest {
    ///     url: String,
    ///     body: Vec<u8>,
    /// }
    ///
    /// enum HttpResult {
    ///     Success(String),
    ///     ConnectionError { original_request: HttpRequest },
    ///     ServerError(u16),
    /// }
    ///
    /// let layer = Retry::layer("http_retry", &context)
    ///     .clone_input_with(|_request, _args| None) // Don't clone expensive request bodies
    ///     .restore_input(|result: &mut HttpResult, _args| {
    ///         match result {
    ///             // Extract the original request from the error for retry
    ///             HttpResult::ConnectionError { original_request } => {
    ///                 let request = original_request.clone();
    ///                 *result = HttpResult::ServerError(0);
    ///                 Some(request)
    ///             }
    ///             _ => None,
    ///         }
    ///     })
    ///     .recovery_with(|result, _args| match result {
    ///         HttpResult::ConnectionError { .. } => RecoveryInfo::retry(),
    ///         _ => RecoveryInfo::never(),
    ///     });
    /// # }
    /// ```
    #[must_use]
    pub fn restore_input(mut self, restore_fn: impl Fn(&mut Out, RestoreInputArgs) -> Option<In> + Send + Sync + 'static) -> Self {
        self.restore_input = Some(RestoreInput::new(restore_fn));
        self
    }
}

impl<In, Out, S> Layer<S> for RetryLayer<In, Out, Set, Set> {
    type Service = Retry<In, Out, S>;

    fn layer(&self, inner: S) -> Self::Service {
        let shared = RetryShared {
            clock: self.context.get_clock().clone(),
            max_attempts: self.max_attempts,
            backoff: self.backoff.clone().into(),
            clone_input: self.clone_input.clone().expect("enforced by the type state pattern"),
            should_recover: self.should_recover.clone().expect("enforced by the type state pattern"),
            on_retry: self.on_retry.clone(),
            enable_if: self.enable_if.clone(),
            #[cfg(any(feature = "logs", feature = "metrics", test))]
            telemetry: self.telemetry.clone(),
            restore_input: self.restore_input.clone(),
            handle_unavailable: self.handle_unavailable,
        };

        Retry {
            shared: Arc::new(shared),
            inner,
        }
    }
}

impl<In, Res, Error, S1, S2> RetryLayer<In, Result<Res, Error>, S1, S2> {
    /// Sets a specialized input restoration callback that operates only on error cases.
    ///
    /// This is a convenience method for working with `Result<Res, Error>` outputs, where you
    /// only want to restore input when an error occurs. The callback receives a mutable reference
    /// to the error and can extract the original input from it, while potentially modifying the
    /// error for the next attempt.
    ///
    /// This method is particularly useful when:
    /// - Your service returns `Result<T, E>` where the error type contains recoverable request data
    /// - You want to extract and restore input only from error cases, not successful responses
    /// - You need to modify the error (e.g., to remove sensitive data) before the next retry
    ///
    /// # Parameters
    ///
    /// * `restore_fn` - A function that takes a mutable reference to the error and restoration
    ///   arguments, returning `Some(input)` if the input can be restored from the error, or
    ///   `None` if restoration is not possible or desired.
    ///
    /// # Example
    ///
    /// ```rust
    /// # use tick::Clock;
    /// # use seatbelt::retry::*;
    /// # use seatbelt::{RecoveryInfo, ResilienceContext};
    /// # #[derive(Clone)]
    /// # struct HttpRequest { url: String, body: Vec<u8> }
    /// # struct HttpResponse { status: u16 }
    /// # enum HttpError {
    /// #     ConnectionError { original_request: HttpRequest },
    /// #     ServerError(u16),
    /// #     AuthError,
    /// # }
    /// # impl HttpError {
    /// #     fn try_restore_request(&mut self) -> Option<HttpRequest> {
    /// #         match self {
    /// #             HttpError::ConnectionError { original_request } => {
    /// #                 Some(original_request.clone())
    /// #             },
    /// #             _ => None,
    /// #         }
    /// #     }
    /// # }
    /// # fn example(clock: Clock) {
    /// # let context = ResilienceContext::<HttpRequest, Result<HttpResponse, HttpError>>::new(&clock);
    /// type HttpResult = Result<HttpResponse, HttpError>;
    ///
    /// let layer = Retry::layer("http_retry", &context).restore_input_from_error(
    ///     |error: &mut HttpError, _args| {
    ///         // Only restore input from connection errors that contain the original request
    ///         error.try_restore_request()
    ///     },
    /// );
    /// # }
    /// ```
    #[must_use]
    pub fn restore_input_from_error(self, restore_fn: impl Fn(&mut Error, RestoreInputArgs) -> Option<In> + Send + Sync + 'static) -> Self {
        self.restore_input(move |input, args| match input {
            Ok(_) => None,
            Err(e) => restore_fn(e, args),
        })
    }
}

impl<In, Out, S1, S2> RetryLayer<In, Out, S1, S2> {
    fn into_state<T1, T2>(self) -> RetryLayer<In, Out, T1, T2> {
        RetryLayer {
            context: self.context,
            max_attempts: self.max_attempts,
            backoff: self.backoff,
            clone_input: self.clone_input,
            should_recover: self.should_recover,
            on_retry: self.on_retry,
            enable_if: self.enable_if,
            telemetry: self.telemetry,
            restore_input: self.restore_input,
            handle_unavailable: self.handle_unavailable,
            _state: PhantomData,
        }
    }
}

#[cfg_attr(coverage_nightly, coverage(off))]
#[cfg(test)]
mod tests {
    use std::fmt::Debug;
    use std::sync::atomic::{AtomicU32, Ordering};

    use layered::Execute;
    use tick::Clock;

    use super::*;
    use crate::testing::RecoverableType;

    #[test]
    fn new_creates_correct_initial_state() {
        let context = create_test_context();
        let layer: RetryLayer<_, _, NotSet, NotSet> = RetryLayer::new("test_retry".into(), &context);

        assert_eq!(layer.max_attempts, 4); // 3 retries + 1 original = 4 total
        assert!(matches!(layer.backoff.backoff_type, Backoff::Exponential));
        assert_eq!(layer.backoff.base_delay, Duration::from_millis(10));
        assert!(layer.backoff.max_delay.is_none());
        assert!(layer.backoff.use_jitter); // Default is true
        assert!(layer.clone_input.is_none());
        assert!(layer.should_recover.is_none());
        assert!(layer.on_retry.is_none());
        assert_eq!(layer.telemetry.strategy_name.as_ref(), "test_retry");
        assert!(layer.enable_if.call(&"test_input".to_string()));
    }

    #[test]
    fn clone_input_sets_correctly() {
        let context = create_test_context();
        let layer = RetryLayer::new("test".into(), &context);

        let layer: RetryLayer<_, _, Set, NotSet> = layer.clone_input_with(|input, _args| Some(input.clone()));

        let result = layer.clone_input.unwrap().call(
            &mut "test".to_string(),
            CloneArgs {
                attempt: Attempt::new(0, false),
                previous_recovery: None,
            },
        );
        assert_eq!(result, Some("test".to_string()));
    }

    #[test]
    fn recovery_sets_correctly() {
        let context = create_test_context();
        let layer = RetryLayer::new("test".into(), &context);

        let layer: RetryLayer<_, _, NotSet, Set> = layer.recovery_with(|output, _args| {
            if output.contains("error") {
                RecoveryInfo::retry()
            } else {
                RecoveryInfo::never()
            }
        });

        let result = layer.should_recover.as_ref().unwrap().call(
            &"error message".to_string(),
            RecoveryArgs {
                attempt: Attempt::new(1, false),
                clock: context.get_clock(),
            },
        );
        assert_eq!(result, RecoveryInfo::retry());

        let result = layer.should_recover.as_ref().unwrap().call(
            &"success".to_string(),
            RecoveryArgs {
                attempt: Attempt::new(1, false),
                clock: context.get_clock(),
            },
        );
        assert_eq!(result, RecoveryInfo::never());
    }

    #[test]
    fn recovery_auto_sets_correctly() {
        let context = ResilienceContext::<RecoverableType, RecoverableType>::new(Clock::new_frozen());
        let layer = RetryLayer::new("test".into(), &context);

        let layer: RetryLayer<_, _, NotSet, Set> = layer.recovery();

        let result = layer.should_recover.as_ref().unwrap().call(
            &RecoverableType::from(RecoveryInfo::retry()),
            RecoveryArgs {
                attempt: Attempt::new(1, false),
                clock: context.get_clock(),
            },
        );
        assert_eq!(result, RecoveryInfo::retry());

        let result = layer.should_recover.as_ref().unwrap().call(
            &RecoverableType::from(RecoveryInfo::never()),
            RecoveryArgs {
                attempt: Attempt::new(1, false),
                clock: context.get_clock(),
            },
        );
        assert_eq!(result, RecoveryInfo::never());
    }

    #[test]
    fn configuration_methods_work() {
        let layer = create_ready_layer()
            .max_retry_attempts(5)
            .backoff(Backoff::Exponential)
            .base_delay(Duration::from_millis(500))
            .max_delay(Duration::from_secs(30))
            .use_jitter(true);

        assert_eq!(layer.max_attempts, 6);
        assert!(matches!(layer.backoff.backoff_type, Backoff::Exponential));
        assert_eq!(layer.backoff.base_delay, Duration::from_millis(500));
        assert_eq!(layer.backoff.max_delay, Some(Duration::from_secs(30)));
        assert!(layer.backoff.use_jitter);
    }

    #[test]
    fn on_retry_works() {
        let called = Arc::new(AtomicU32::new(0));
        let called_clone = Arc::clone(&called);

        let layer = create_ready_layer().on_retry(move |_output, _args| {
            called_clone.fetch_add(1, Ordering::SeqCst);
        });

        layer.on_retry.unwrap().call(
            &"output".to_string(),
            OnRetryArgs {
                retry_delay: Duration::ZERO,
                attempt: Attempt::new(1, false),
                recovery: RecoveryInfo::retry(),
            },
        );

        assert_eq!(called.load(Ordering::SeqCst), 1);
    }

    #[test]
    fn enable_disable_conditions_work() {
        let layer = create_ready_layer().enable_if(|input| input.contains("enable"));

        assert!(layer.enable_if.call(&"enable_test".to_string()));
        assert!(!layer.enable_if.call(&"disable_test".to_string()));

        let layer = layer.disable();
        assert!(!layer.enable_if.call(&"anything".to_string()));

        let layer = layer.enable_always();
        assert!(layer.enable_if.call(&"anything".to_string()));
    }

    #[test]
    fn layer_builds_service_when_ready() {
        let layer = create_ready_layer();
        let _service = layer.layer(Execute::new(|input: String| async move { input }));
    }

    #[test]
    fn handle_unavailable_sets_correctly() {
        let context = create_test_context();
        let layer = RetryLayer::new("test".into(), &context);

        // Test default value
        assert!(!layer.handle_unavailable);

        // Test enabling outage handling
        let layer = layer.handle_unavailable(true);
        assert!(layer.handle_unavailable);

        // Test disabling outage handling
        let layer = layer.handle_unavailable(false);
        assert!(!layer.handle_unavailable);
    }

    #[test]
    fn restore_input_sets_correctly() {
        let context = create_test_context();
        let layer = RetryLayer::new("test".into(), &context);

        let layer = layer.restore_input(|output: &mut String, _args| {
            (output == "restore_me").then(|| {
                *output = "modified_output".to_string();
                "restored_input".to_string()
            })
        });

        let mut test_output = "restore_me".to_string();
        let result = layer.restore_input.as_ref().unwrap().call(
            &mut test_output,
            RestoreInputArgs {
                attempt: Attempt::new(1, false),
                recovery: RecoveryInfo::retry(),
            },
        );

        match result {
            Some(input) => {
                assert_eq!(input, "restored_input");
                assert_eq!(test_output, "modified_output");
            }
            None => panic!("Expected Some, got None"),
        }

        let mut test_output2 = "no_restore".to_string();
        let result = layer.restore_input.as_ref().unwrap().call(
            &mut test_output2,
            RestoreInputArgs {
                attempt: Attempt::new(1, false),
                recovery: RecoveryInfo::retry(),
            },
        );

        match result {
            None => {
                assert_eq!(test_output2, "no_restore");
            }
            Some(_) => panic!("Expected None, got Some"),
        }
    }

    #[test]
    fn restore_input_from_error_sets_correctly() {
        let context: ResilienceContext<String, Result<String, String>> = ResilienceContext::new(Clock::new_frozen()).name("test");
        let layer = RetryLayer::new("test".into(), &context)
            .restore_input_from_error(|e: &mut String, _| (e == "restore").then(|| std::mem::take(e)));

        let restore = layer.restore_input.as_ref().unwrap();
        let args = || RestoreInputArgs {
            attempt: Attempt::new(1, false),
            recovery: RecoveryInfo::retry(),
        };

        assert_eq!(restore.call(&mut Err("restore".into()), args()), Some("restore".to_string()));
        assert_eq!(restore.call(&mut Err("other".into()), args()), None);
        assert_eq!(restore.call(&mut Ok("success".into()), args()), None);
    }

    #[cfg_attr(miri, ignore)]
    #[test]
    fn config_applies_all_settings() {
        let config = RetryConfig {
            enabled: false,
            backoff_type: Backoff::Linear,
            base_delay: Duration::from_secs(2),
            max_delay: Some(Duration::from_secs(60)),
            use_jitter: false,
            max_retry_attempts: 7,
            handle_unavailable: true,
        };

        let layer = create_ready_layer().config(&config);

        insta::assert_debug_snapshot!(layer);
    }

    #[test]
    fn static_assertions() {
        static_assertions::assert_impl_all!(RetryLayer<String, String, Set, Set>: Layer<String>);
        static_assertions::assert_not_impl_all!(RetryLayer<String, String, Set, NotSet>: Layer<String>);
        static_assertions::assert_not_impl_all!(RetryLayer<String, String, NotSet, Set>: Layer<String>);
        static_assertions::assert_impl_all!(RetryLayer<String, String, Set, Set>: Debug);
    }

    fn create_test_context() -> ResilienceContext<String, String> {
        ResilienceContext::new(Clock::new_frozen()).name("test_pipeline")
    }

    fn create_ready_layer() -> RetryLayer<String, String, Set, Set> {
        RetryLayer::new("test".into(), &create_test_context())
            .clone_input_with(|input, _args| Some(input.clone()))
            .recovery_with(|output, _args| {
                if output.contains("error") {
                    RecoveryInfo::retry()
                } else {
                    RecoveryInfo::never()
                }
            })
    }
}