s3_algo/
config.rs

1use serde::{Deserialize, Serialize};
2#[derive(Clone, Debug, Serialize, Deserialize)]
3#[serde(default)]
4#[serde(deny_unknown_fields)]
5pub struct Config {
6    /// Maximum number of simultaneous upload requests
7    pub copy_parallelization: usize,
8
9    pub algorithm: AlgorithmConfig,
10
11    /// The "unit" of a delete request is number of objects
12    pub delete_requests: SpecificTimings,
13
14    /// NOTE: For now, `put_request` is used both in S3 `get`, `put` and `copy` operations.
15    /// Reason: We don't know if it's worth it with different configurations for these operations
16    /// that all have a duration that depends on the number of bytes of the objects in question.
17    /// The "unit" for such requests are number of bytes.
18    pub put_requests: SpecificTimings,
19}
20
21impl Default for Config {
22    fn default() -> Self {
23        Self {
24            copy_parallelization: 20,
25            algorithm: Default::default(),
26            delete_requests: SpecificTimings {
27                seconds_per_unit: 0.2,
28                minimum_units_for_estimation: 10,
29            },
30            put_requests: SpecificTimings {
31                seconds_per_unit: 1.0 / 1_000_000.0, // 1 MBPS = 1e-06 seconds per MB
32                minimum_units_for_estimation: 10,
33            },
34        }
35    }
36}
37
38#[derive(Clone, Debug, Serialize, Deserialize)]
39#[serde(deny_unknown_fields)]
40pub struct AlgorithmConfig {
41    /// The base timeout which will always be there (an estimate of RTT)
42    pub base_timeout: f64,
43
44    /// Timeout is set to a fraction of expected upload time (> 1.0)
45    pub timeout_fraction: f64,
46
47    /// Every retry, the timeout is multiplied by backoff (> 1.0)
48    pub backoff: f64,
49
50    /// Number of times to retry a single request before giving up
51    pub n_retries: usize,
52
53    /// To estimate the upload speed incrementally, we use an exponential average:
54    /// `new_avg_speed = avg_power * new_speed + (1 - avg_power) * avg_speed`.
55    ///
56    /// Thus, between 0.0 and 1.0, closer to 1.0 means that newer data points have
57    /// more significance.
58    pub avg_power: f64,
59}
60impl Default for AlgorithmConfig {
61    fn default() -> Self {
62        Self {
63            base_timeout: 0.5,
64            timeout_fraction: 1.5,
65            backoff: 1.5,
66            n_retries: 8,
67            avg_power: 0.7,
68        }
69    }
70}
71
72/// These settings are specific to the kind of operation we do. For example delete or put in S3.
73#[derive(Clone, Debug, Serialize, Deserialize)]
74pub struct SpecificTimings {
75    /// The initial estimate of extra timeout per unit (byte or object)
76    pub seconds_per_unit: f64,
77    /// The amount of units in a request, below which it does not affect estimation
78    pub minimum_units_for_estimation: usize,
79}
80
81impl SpecificTimings {
82    /// Sane default setting for when the size is number of bytes
83    pub fn default_for_bytes() -> Self {
84        Self {
85            seconds_per_unit: 1.0 / 1_000_000.0,   // 1 MBPS
86            minimum_units_for_estimation: 500_000, // 500 KB
87        }
88    }
89    /// Sane default setting for when the size is number of objects
90    pub fn default_for_objects() -> Self {
91        Self {
92            seconds_per_unit: 0.2,
93            minimum_units_for_estimation: 2,
94        }
95    }
96}
97
98// DRAFT
99//
100// Now, we don't have "avg_min_bytes". Because... we will just substract the assumed constant
101// anyway.
102// What if the assumption is wrong?
103// Well, it should be rather small anyway. It is exclusively thought to be the RTT...
104// If the substraction is negative after all...? Then... idk
105
106// put_timeout_per_byte..?
107//  should configure it as an assumed MBPS just like before. expected_upload_speed.
108// delete_timeout_per_object..?
109//  quite straight-forward seconds per object