s3_algo/config.rs
1use serde::{Deserialize, Serialize};
2#[derive(Clone, Debug, Serialize, Deserialize)]
3#[serde(default)]
4#[serde(deny_unknown_fields)]
5pub struct Config {
6 /// Maximum number of simultaneous upload requests
7 pub copy_parallelization: usize,
8
9 /// Maximum number of simultaneous download requests
10 pub download_parallelization: usize,
11
12 pub algorithm: AlgorithmConfig,
13
14 /// The "unit" of a delete request is number of objects
15 pub delete_requests: SpecificTimings,
16
17 /// NOTE: For now, `put_request` is used both in S3 `get`, `put` and `copy` operations.
18 /// Reason: We don't know if it's worth it with different configurations for these operations
19 /// that all have a duration that depends on the number of bytes of the objects in question.
20 /// The "unit" for such requests are number of bytes.
21 pub put_requests: SpecificTimings,
22}
23
24impl Default for Config {
25 fn default() -> Self {
26 Self {
27 copy_parallelization: 20,
28 download_parallelization: 20,
29 algorithm: Default::default(),
30 delete_requests: SpecificTimings {
31 seconds_per_unit: 0.2,
32 minimum_units_for_estimation: 10,
33 },
34 put_requests: SpecificTimings {
35 seconds_per_unit: 1.0 / 1_000_000.0, // 1 MBPS = 1e-06 seconds per MB
36 minimum_units_for_estimation: 10,
37 },
38 }
39 }
40}
41
42#[derive(Clone, Debug, Serialize, Deserialize)]
43#[serde(deny_unknown_fields)]
44pub struct AlgorithmConfig {
45 /// The base timeout which will always be there (an estimate of RTT)
46 pub base_timeout: f64,
47
48 /// Timeout is set to a fraction of expected upload time (> 1.0)
49 pub timeout_fraction: f64,
50
51 /// Every retry, the timeout is multiplied by backoff (> 1.0)
52 pub backoff: f64,
53
54 /// Number of times to retry a single request before giving up
55 pub n_retries: usize,
56
57 /// To estimate the upload speed incrementally, we use an exponential average:
58 /// `new_avg_speed = avg_power * new_speed + (1 - avg_power) * avg_speed`.
59 ///
60 /// Thus, between 0.0 and 1.0, closer to 1.0 means that newer data points have
61 /// more significance.
62 pub avg_power: f64,
63}
64impl Default for AlgorithmConfig {
65 fn default() -> Self {
66 Self {
67 base_timeout: 0.5,
68 timeout_fraction: 1.5,
69 backoff: 1.5,
70 n_retries: 8,
71 avg_power: 0.7,
72 }
73 }
74}
75
76/// These settings are specific to the kind of operation we do. For example delete or put in S3.
77#[derive(Clone, Debug, Serialize, Deserialize)]
78pub struct SpecificTimings {
79 /// The initial estimate of extra timeout per unit (byte or object)
80 pub seconds_per_unit: f64,
81 /// The amount of units in a request, below which it does not affect estimation
82 pub minimum_units_for_estimation: usize,
83}
84
85impl SpecificTimings {
86 /// Sane default setting for when the size is number of bytes
87 pub fn default_for_bytes() -> Self {
88 Self {
89 seconds_per_unit: 1.0 / 1_000_000.0, // 1 MBPS
90 minimum_units_for_estimation: 500_000, // 500 KB
91 }
92 }
93 /// Sane default setting for when the size is number of objects
94 pub fn default_for_objects() -> Self {
95 Self {
96 seconds_per_unit: 0.2,
97 minimum_units_for_estimation: 2,
98 }
99 }
100}
101
102// DRAFT
103//
104// Now, we don't have "avg_min_bytes". Because... we will just substract the assumed constant
105// anyway.
106// What if the assumption is wrong?
107// Well, it should be rather small anyway. It is exclusively thought to be the RTT...
108// If the substraction is negative after all...? Then... idk
109
110// put_timeout_per_byte..?
111// should configure it as an assumed MBPS just like before. expected_upload_speed.
112// delete_timeout_per_object..?
113// quite straight-forward seconds per object