fakecloud_s3/
simulation.rs1use crate::lifecycle::LifecycleProcessor;
2use crate::state::SharedS3State;
3
4pub struct LifecycleTickResult {
6 pub processed_buckets: u64,
7 pub expired_objects: u64,
8 pub transitioned_objects: u64,
9}
10
11struct BucketSnapshot {
13 name: String,
14 object_count: usize,
15 storage_classes: Vec<(String, String)>,
16}
17
18pub fn tick_lifecycle(state: &SharedS3State) -> LifecycleTickResult {
20 let (buckets_with_lifecycle, before_snapshot) = {
22 let s = state.read();
23 let mut count = 0u64;
24 let mut snapshot: Vec<BucketSnapshot> = Vec::new();
25 for bucket in s.buckets.values() {
26 let classes: Vec<(String, String)> = bucket
27 .objects
28 .iter()
29 .map(|(k, o)| (k.clone(), o.storage_class.clone()))
30 .collect();
31 snapshot.push(BucketSnapshot {
32 name: bucket.name.clone(),
33 object_count: bucket.objects.len(),
34 storage_classes: classes,
35 });
36 if bucket.lifecycle_config.is_some() {
37 count += 1;
38 }
39 }
40 (count, snapshot)
41 };
42
43 let processor = LifecycleProcessor::new(state.clone());
45 processor.tick();
46
47 let mut expired_objects = 0u64;
49 let mut transitioned_objects = 0u64;
50
51 let s = state.read();
52 for snap in &before_snapshot {
53 let bucket = match s.buckets.get(&snap.name) {
54 Some(b) => b,
55 None => continue,
56 };
57
58 let after_count = bucket.objects.len();
60 if snap.object_count > after_count {
61 expired_objects += (snap.object_count - after_count) as u64;
62 }
63
64 for (key, old_class) in &snap.storage_classes {
66 if let Some(obj) = bucket.objects.get(key) {
67 if &obj.storage_class != old_class {
68 transitioned_objects += 1;
69 }
70 }
71 }
72 }
73
74 LifecycleTickResult {
75 processed_buckets: buckets_with_lifecycle,
76 expired_objects,
77 transitioned_objects,
78 }
79}
80
81#[cfg(test)]
82mod tests {
83 use super::*;
84 use crate::state::{S3Bucket, S3Object, S3State};
85 use bytes::Bytes;
86 use chrono::{Duration, Utc};
87 use parking_lot::RwLock;
88 use std::collections::HashMap;
89 use std::sync::Arc;
90
91 fn make_state() -> SharedS3State {
92 Arc::new(RwLock::new(S3State::new("123456789012", "us-east-1")))
93 }
94
95 fn make_object(key: &str, age_days: i64) -> S3Object {
96 S3Object {
97 key: key.to_string(),
98 data: Bytes::from("test"),
99 content_type: "application/octet-stream".to_string(),
100 etag: "\"abc\"".to_string(),
101 size: 4,
102 last_modified: Utc::now() - Duration::days(age_days),
103 metadata: HashMap::new(),
104 storage_class: "STANDARD".to_string(),
105 tags: HashMap::new(),
106 acl_grants: Vec::new(),
107 acl_owner_id: None,
108 parts_count: None,
109 part_sizes: None,
110 sse_algorithm: None,
111 sse_kms_key_id: None,
112 bucket_key_enabled: None,
113 version_id: None,
114 is_delete_marker: false,
115 content_encoding: None,
116 website_redirect_location: None,
117 restore_ongoing: None,
118 restore_expiry: None,
119 checksum_algorithm: None,
120 checksum_value: None,
121 lock_mode: None,
122 lock_retain_until: None,
123 lock_legal_hold: None,
124 }
125 }
126
127 #[test]
128 fn tick_lifecycle_expires_objects() {
129 let state = make_state();
130
131 {
132 let mut s = state.write();
133 let mut bucket = S3Bucket::new("test-bucket", "us-east-1", "123456789012");
134 bucket.lifecycle_config = Some(
135 r#"<LifecycleConfiguration>
136 <Rule>
137 <Filter><Prefix></Prefix></Filter>
138 <Status>Enabled</Status>
139 <Expiration><Days>1</Days></Expiration>
140 </Rule>
141 </LifecycleConfiguration>"#
142 .to_string(),
143 );
144 bucket
145 .objects
146 .insert("old-file.txt".to_string(), make_object("old-file.txt", 5));
147 bucket
148 .objects
149 .insert("new-file.txt".to_string(), make_object("new-file.txt", 0));
150 s.buckets.insert("test-bucket".to_string(), bucket);
151 }
152
153 let result = tick_lifecycle(&state);
154 assert_eq!(result.processed_buckets, 1);
155 assert_eq!(result.expired_objects, 1);
156 assert_eq!(result.transitioned_objects, 0);
157
158 let s = state.read();
159 let bucket = s.buckets.get("test-bucket").unwrap();
160 assert_eq!(bucket.objects.len(), 1);
161 assert!(bucket.objects.contains_key("new-file.txt"));
162 }
163
164 #[test]
165 fn tick_lifecycle_transitions_objects() {
166 let state = make_state();
167
168 {
169 let mut s = state.write();
170 let mut bucket = S3Bucket::new("trans-bucket", "us-east-1", "123456789012");
171 bucket.lifecycle_config = Some(
172 r#"<LifecycleConfiguration>
173 <Rule>
174 <Filter><Prefix></Prefix></Filter>
175 <Status>Enabled</Status>
176 <Transition>
177 <Days>1</Days>
178 <StorageClass>GLACIER</StorageClass>
179 </Transition>
180 </Rule>
181 </LifecycleConfiguration>"#
182 .to_string(),
183 );
184 bucket
185 .objects
186 .insert("old-file.txt".to_string(), make_object("old-file.txt", 5));
187 s.buckets.insert("trans-bucket".to_string(), bucket);
188 }
189
190 let result = tick_lifecycle(&state);
191 assert_eq!(result.processed_buckets, 1);
192 assert_eq!(result.expired_objects, 0);
193 assert_eq!(result.transitioned_objects, 1);
194
195 let s = state.read();
196 let obj = s.buckets["trans-bucket"]
197 .objects
198 .get("old-file.txt")
199 .unwrap();
200 assert_eq!(obj.storage_class, "GLACIER");
201 }
202
203 #[test]
204 fn tick_lifecycle_no_config_returns_zero() {
205 let state = make_state();
206
207 {
208 let mut s = state.write();
209 let bucket = S3Bucket::new("empty-bucket", "us-east-1", "123456789012");
210 s.buckets.insert("empty-bucket".to_string(), bucket);
211 }
212
213 let result = tick_lifecycle(&state);
214 assert_eq!(result.processed_buckets, 0);
215 assert_eq!(result.expired_objects, 0);
216 assert_eq!(result.transitioned_objects, 0);
217 }
218}