s4_server/multipart_state.rs
1//! Per-`upload_id` side-table for multipart uploads (v0.8 BUG-5..10 fix).
2//!
3//! S3 multipart is split across three handlers:
4//!
5//! - `CreateMultipartUpload` — receives the SSE / Tagging / Object-Lock
6//! headers the client wants applied to the eventual object.
7//! - `UploadPart` × N — receives only the body bytes + part number;
8//! the SSE-C headers must be replayed by the client (AWS spec) but
9//! SSE-S4 / SSE-KMS / Tagging / Object-Lock are NOT replayed (they
10//! live on the upload itself).
11//! - `CompleteMultipartUpload` — receives only the part-list manifest;
12//! no metadata reaches this handler from the wire either.
13//!
14//! v0.7 #48 fixed the single-PUT path to take()`SSE` request fields off
15//! the s3s input, encrypt-then-store, and stamp the `s4-sse-type`
16//! metadata on the resulting object so HEAD can echo correctly. The
17//! multipart path needs the equivalent treatment but the per-upload
18//! context is split across three handler invocations — this module is
19//! the side-channel that carries it from `CreateMultipartUpload` through
20//! to `UploadPart` / `CompleteMultipartUpload`.
21//!
22//! The store is keyed on the backend-issued `upload_id` (opaque string
23//! returned by `CreateMultipartUpload`'s response). `put` / `get` /
24//! `remove` are all `O(1)` under a single `RwLock<HashMap>`; multipart
25//! upload throughput is dominated by the part-body PUTs to the backend
26//! (5 MiB+ each), so the lock is never the bottleneck.
27
28use std::collections::HashMap;
29use std::sync::Arc;
30use std::sync::RwLock;
31
32use chrono::{DateTime, Utc};
33use dashmap::DashMap;
34use tokio::sync::Mutex;
35use zeroize::Zeroizing;
36
37use crate::object_lock::LockMode;
38use crate::tagging::TagSet;
39
40/// SSE recipe captured at `CreateMultipartUpload` time and replayed for
41/// every part body + the final stamp on the assembled object.
42///
43/// The variants mirror `service::put_object`'s SSE branch precedence:
44/// SSE-C (per-request customer key) wins over SSE-KMS (named KMS key)
45/// wins over SSE-S4 (server-managed keyring) wins over no encryption.
46/// SSE-C / SSE-KMS materialise only when the client supplied the
47/// matching headers; SSE-S4 materialises whenever the gateway is booted
48/// with `--sse-s4-key` (or `with_sse_keyring(...)` in tests).
49///
50/// v0.8.2 #62 (H-6 audit fix): the `SseC` variant's customer key is held
51/// in `Zeroizing<[u8; 32]>` so the raw 32-byte AES key is overwritten
52/// with `0u8` when the entry is dropped — either via `remove(upload_id)`
53/// on Complete/Abort, or via `sweep_stale(...)` on an abandoned upload.
54/// Process core dump / swap-out / KSM snapshot can no longer leak a
55/// previously-held SSE-C key after the upload's lifetime ends. The
56/// `key_md5` is deliberately a plain `[u8; 16]` — it's a public
57/// fingerprint (S3 puts it on the wire on every PUT/GET response) and
58/// requires no zeroization. Custom `PartialEq` ignores the `Zeroizing`
59/// wrapper so existing tests that match on the variant keep compiling.
60#[derive(Clone, Debug)]
61pub enum MultipartSseMode {
62 /// Plaintext multipart. Backend stores raw framed bytes.
63 None,
64 /// Server-managed keyring (active key on PUT, all keys probed on GET).
65 /// The keyring itself lives on `S4Service`; only the marker is held
66 /// here so `complete_multipart_upload` knows which path to take.
67 SseS4,
68 /// Per-request customer key. The 32-byte key + its 128-bit MD5 are
69 /// kept in memory only for the lifetime of the upload, then dropped
70 /// when the entry is `remove(...)`'d on Complete or Abort. v0.8.2
71 /// #62: `key` is `Zeroizing<[u8; 32]>` so its bytes are wiped on
72 /// drop (vs. a bare `[u8; 32]` which would linger on the heap /
73 /// stack until the next allocation reuse).
74 SseC {
75 key: Zeroizing<[u8; 32]>,
76 key_md5: [u8; 16],
77 },
78 /// Named KMS key (resolved against the gateway's KMS backend on
79 /// Complete to generate the per-object DEK).
80 SseKms {
81 key_id: String,
82 },
83}
84
85// Manual `PartialEq` / `Eq` so `Zeroizing<[u8; 32]>` (which doesn't
86// derive `PartialEq`) doesn't break the existing `assert_eq!` call
87// sites. Compares by deref to the inner `[u8; 32]`.
88impl PartialEq for MultipartSseMode {
89 fn eq(&self, other: &Self) -> bool {
90 match (self, other) {
91 (MultipartSseMode::None, MultipartSseMode::None) => true,
92 (MultipartSseMode::SseS4, MultipartSseMode::SseS4) => true,
93 (
94 MultipartSseMode::SseC { key: a, key_md5: am },
95 MultipartSseMode::SseC { key: b, key_md5: bm },
96 ) => a.as_slice() == b.as_slice() && am == bm,
97 (
98 MultipartSseMode::SseKms { key_id: a },
99 MultipartSseMode::SseKms { key_id: b },
100 ) => a == b,
101 _ => false,
102 }
103 }
104}
105impl Eq for MultipartSseMode {}
106
107/// Everything `CreateMultipartUpload` captured for `UploadPart` /
108/// `CompleteMultipartUpload` to act on. All fields are owned so the
109/// store can hand out cheap `Clone`s under the read lock.
110#[derive(Clone, Debug)]
111pub struct MultipartUploadContext {
112 /// Bucket the upload targets. Stored even though
113 /// `CompleteMultipartUploadInput::bucket` carries it too — keeps the
114 /// side-table self-contained for tests / debug dumps.
115 pub bucket: String,
116 /// Logical object key the upload will materialise into. Stored for
117 /// the same reason as `bucket`.
118 pub key: String,
119 /// SSE recipe captured from the Create's input headers.
120 pub sse: MultipartSseMode,
121 /// Tags parsed off `Tagging` / `x-amz-tagging` on Create. `None`
122 /// when the client didn't ask for tagging; otherwise the `TagSet` is
123 /// applied via `TagManager::put_object_tags` on Complete (BUG-9
124 /// fix).
125 pub tags: Option<TagSet>,
126 /// Per-PUT explicit Object Lock mode supplied via
127 /// `x-amz-object-lock-mode` on Create. Mirrors `put_object`'s
128 /// `explicit_lock_mode` capture so Complete commits the right
129 /// retention. `None` when no header was sent (Complete then falls
130 /// back to the bucket default via `apply_default_on_put`).
131 pub object_lock_mode: Option<LockMode>,
132 /// Per-PUT explicit Object Lock retain-until timestamp.
133 pub object_lock_retain_until: Option<DateTime<Utc>>,
134 /// Per-PUT explicit Object Lock legal-hold flag (`true` when
135 /// `x-amz-object-lock-legal-hold: ON` was sent on Create).
136 pub object_lock_legal_hold: bool,
137}
138
139/// In-memory side-table mapping `upload_id` → context. One of these
140/// hangs off `S4Service` (always-on, no flag — the per-upload state is
141/// gateway-internal).
142///
143/// v0.8.2 #62 (H-6 audit fix): each entry carries the `DateTime<Utc>`
144/// of its `put` insertion so `sweep_stale(now, max_age)` can drop
145/// abandoned upload contexts (client called `CreateMultipartUpload`,
146/// uploaded some parts, then crashed without invoking
147/// `CompleteMultipartUpload` / `AbortMultipartUpload`). Without the
148/// sweep, an SSE-C upload's raw 32-byte customer key would linger in
149/// `MultipartSseMode::SseC` indefinitely. The sweep + the new
150/// `Zeroizing` wrapper together bound the key's in-memory lifetime to
151/// `max_age` (default 24h via `--multipart-abandoned-ttl-hours`).
152pub struct MultipartStateStore {
153 by_upload_id: RwLock<HashMap<String, (MultipartUploadContext, DateTime<Utc>)>>,
154 /// v0.8.1 #59: per-(bucket, key) `Mutex` used to serialize Complete
155 /// operations on the same logical key. The race window the lock
156 /// closes lives inside `service::complete_multipart_upload` between
157 /// `backend.get_object` (assembled body fetch for the SSE encrypt
158 /// re-PUT, BUG-5 fix) and `backend.put_object` (encrypted body
159 /// write-back). Two concurrent Completes with different `upload_id`
160 /// but the same `(bucket, key)` could otherwise interleave their
161 /// GET / encrypt / PUT triples and overwrite each other.
162 ///
163 /// `DashMap` is used because the lock acquisition path is itself
164 /// `O(1)` and contention between *different* keys must not block;
165 /// `DashMap`'s sharded design preserves that property whereas a
166 /// single `RwLock<HashMap<_,_>>` would serialise even unrelated
167 /// keys' lock-lookup. The stored `Arc<Mutex<()>>` is what the
168 /// caller actually awaits on — the `DashMap` itself is just a
169 /// concurrent index into those mutexes.
170 ///
171 /// Cleanup is best-effort (`prune_completion_locks`); the entry
172 /// for a one-shot key is dropped once both the in-flight Complete
173 /// returns and the prune sweep observes only the `DashMap`'s own
174 /// `Arc` reference.
175 completion_locks: DashMap<(String, String), Arc<Mutex<()>>>,
176}
177
178impl MultipartStateStore {
179 /// Empty store. Use `Arc<MultipartStateStore>` so `S4Service`'s
180 /// async handlers can borrow it across `&self` calls without
181 /// requiring `Clone`.
182 #[must_use]
183 pub fn new() -> Self {
184 Self {
185 by_upload_id: RwLock::new(HashMap::new()),
186 completion_locks: DashMap::new(),
187 }
188 }
189
190 /// Register a new upload under `upload_id`. If `upload_id` is
191 /// already present (extremely unlikely — backend issues fresh ids)
192 /// the previous entry is overwritten silently to mirror
193 /// `HashMap::insert`'s replace-on-collision semantics.
194 ///
195 /// v0.8.2 #62: the insertion timestamp (`Utc::now()`) is stored
196 /// alongside the context so `sweep_stale` can prune abandoned
197 /// uploads. The timestamp is set at insert-time only — re-puts on
198 /// the same `upload_id` (overwrite) reset the clock, which is the
199 /// behaviour we want (treat a re-Create as the abandonment-clock
200 /// restart).
201 pub fn put(&self, upload_id: &str, ctx: MultipartUploadContext) {
202 self.by_upload_id
203 .write()
204 .expect("multipart-state by_upload_id RwLock poisoned")
205 .insert(upload_id.to_owned(), (ctx, Utc::now()));
206 }
207
208 /// Snapshot the context for `upload_id`. `None` when no entry was
209 /// registered (e.g. Complete arrived for an upload that the gateway
210 /// has no record of — passes through to the backend untouched, which
211 /// in turn surfaces `NoSuchUpload`).
212 #[must_use]
213 pub fn get(&self, upload_id: &str) -> Option<MultipartUploadContext> {
214 self.by_upload_id
215 .read()
216 .expect("multipart-state by_upload_id RwLock poisoned")
217 .get(upload_id)
218 .map(|(c, _)| c.clone())
219 }
220
221 /// Drop the entry. Called by Complete / Abort to release the SSE-C
222 /// key bytes and the tag-set memory promptly. The `Zeroizing<[u8;
223 /// 32]>` wrapper inside the dropped `MultipartSseMode::SseC`
224 /// variant zeros the key bytes during its `Drop`.
225 pub fn remove(&self, upload_id: &str) {
226 self.by_upload_id
227 .write()
228 .expect("multipart-state by_upload_id RwLock poisoned")
229 .remove(upload_id);
230 }
231
232 /// v0.8.2 #62 (H-6 audit fix): drop every entry whose insertion
233 /// timestamp is older than `now - max_age`. Returns the number of
234 /// entries swept. Called from a hourly background tick spawned in
235 /// `main.rs` (default TTL = 24 h, configurable via
236 /// `--multipart-abandoned-ttl-hours`).
237 ///
238 /// Each dropped `MultipartUploadContext` runs the inner
239 /// `MultipartSseMode::SseC { key: Zeroizing<[u8; 32]>, .. }`'s
240 /// `Drop`, wiping the customer-supplied AES key bytes from
241 /// process memory. SSE-S4 / SSE-KMS / None variants drop their
242 /// (smaller) state too; only SSE-C carries raw key material.
243 ///
244 /// The cutoff is computed as `now - max_age` rather than
245 /// `Utc::now() - max_age` so callers can drive the clock
246 /// deterministically in tests (the unit tests below pass an
247 /// explicit `now` from a fixed timestamp).
248 pub fn sweep_stale(&self, now: DateTime<Utc>, max_age: chrono::Duration) -> usize {
249 let cutoff = now - max_age;
250 let mut map = self
251 .by_upload_id
252 .write()
253 .expect("multipart-state by_upload_id RwLock poisoned");
254 let stale: Vec<String> = map
255 .iter()
256 .filter(|(_, (_, ts))| *ts < cutoff)
257 .map(|(k, _)| k.clone())
258 .collect();
259 let count = stale.len();
260 for k in stale {
261 map.remove(&k);
262 }
263 count
264 }
265
266 /// v0.8.1 #59: get-or-create the per-(bucket, key) `Mutex` used to
267 /// serialise `complete_multipart_upload` invocations on the same
268 /// logical key. Caller does `lock.lock().await` and holds the
269 /// guard for the duration of its critical section (GET assembled
270 /// body → encrypt → PUT encrypted body → version-id mint → object-
271 /// lock apply → tagging persist → replication enqueue).
272 ///
273 /// Returns an `Arc<Mutex<()>>` so the caller can drop the
274 /// `DashMap` shard's read lock immediately and only retain the
275 /// mutex itself across the await point — `DashMap`'s shard guard
276 /// is `!Send`, so we must not hold it through an `await`.
277 pub fn completion_lock(&self, bucket: &str, key: &str) -> Arc<Mutex<()>> {
278 let k = (bucket.to_owned(), key.to_owned());
279 self.completion_locks
280 .entry(k)
281 .or_insert_with(|| Arc::new(Mutex::new(())))
282 .value()
283 .clone()
284 }
285
286 /// v0.8.1 #59: best-effort cleanup of stale completion-lock
287 /// entries. A `(bucket, key)` entry is "stale" once no concurrent
288 /// Complete is referencing its `Arc<Mutex<()>>` — we detect that
289 /// by `Arc::strong_count == 1` (only the `DashMap` itself holds a
290 /// reference). Called from `complete_multipart_upload` after the
291 /// guarded section returns, so a steady-state workload of unique
292 /// keys never accumulates locks.
293 ///
294 /// The retain predicate is `> 1` (keep entries with outstanding
295 /// borrowers), so prune is safe to invoke concurrently with other
296 /// `completion_lock` callers — at worst the prune sees the entry
297 /// during a brief window where the borrower has cloned but not yet
298 /// taken `lock()`, and the entry survives until the next sweep.
299 pub fn prune_completion_locks(&self) {
300 self.completion_locks
301 .retain(|_, lock| Arc::strong_count(lock) > 1);
302 }
303
304 /// Test-only: how many completion-lock entries the store currently
305 /// holds. Used by `prune_completion_locks_removes_unreferenced`.
306 #[cfg(test)]
307 fn completion_locks_len(&self) -> usize {
308 self.completion_locks.len()
309 }
310
311 /// Test-only: how many in-flight uploads the store is currently
312 /// tracking. Used by the assertion in `concurrent_put_lookup_race_free`.
313 #[cfg(test)]
314 fn len(&self) -> usize {
315 self.by_upload_id
316 .read()
317 .expect("multipart-state by_upload_id RwLock poisoned")
318 .len()
319 }
320}
321
322impl Default for MultipartStateStore {
323 fn default() -> Self {
324 Self::new()
325 }
326}
327
328#[cfg(test)]
329mod tests {
330 use super::*;
331 use std::sync::Arc;
332 use std::thread;
333
334 fn sample_ctx(bucket: &str, key: &str) -> MultipartUploadContext {
335 MultipartUploadContext {
336 bucket: bucket.to_owned(),
337 key: key.to_owned(),
338 sse: MultipartSseMode::None,
339 tags: None,
340 object_lock_mode: None,
341 object_lock_retain_until: None,
342 object_lock_legal_hold: false,
343 }
344 }
345
346 /// `put` followed by `get` returns the same context, and `remove`
347 /// makes a subsequent `get` return `None`. Sanity for the basic
348 /// CRUD shape.
349 #[test]
350 fn put_get_remove_round_trip() {
351 let store = MultipartStateStore::new();
352 let ctx = sample_ctx("b", "k");
353 store.put("upload-001", ctx.clone());
354 let got = store.get("upload-001").expect("entry must be present");
355 assert_eq!(got.bucket, "b");
356 assert_eq!(got.key, "k");
357 assert_eq!(got.sse, MultipartSseMode::None);
358 store.remove("upload-001");
359 assert!(store.get("upload-001").is_none(), "entry must be gone");
360 }
361
362 /// SSE-C variants stash the 32-byte key + 16-byte MD5; verify the
363 /// bytes round-trip exactly (defensive — easy place to introduce a
364 /// silent truncation bug).
365 #[test]
366 fn sse_c_key_bytes_round_trip() {
367 let store = MultipartStateStore::new();
368 let key = [0xa5u8; 32];
369 let key_md5 = [0xb6u8; 16];
370 let mut ctx = sample_ctx("b", "k");
371 ctx.sse = MultipartSseMode::SseC {
372 key: Zeroizing::new(key),
373 key_md5,
374 };
375 store.put("u-sse-c", ctx);
376 let got = store.get("u-sse-c").expect("entry must be present");
377 match got.sse {
378 MultipartSseMode::SseC { key: k, key_md5: m } => {
379 assert_eq!(*k, key, "SSE-C key bytes must round-trip");
380 assert_eq!(m, key_md5, "SSE-C MD5 must round-trip");
381 }
382 other => panic!("expected SseC variant, got {other:?}"),
383 }
384 }
385
386 /// v0.8.2 #62 (H-6 fix): registering an SSE-C upload then
387 /// `remove`-ing it must drop the `Zeroizing<[u8; 32]>` key wrapper
388 /// — its `Drop` zeros the underlying 32 bytes. Direct verification
389 /// requires reading back the heap allocation that backed the
390 /// `Zeroizing` (UB in safe Rust); instead we assert the
391 /// behavioural contract: after `remove`, a fresh `get` returns
392 /// `None` (the entry is gone, so the `Drop` ran). We additionally
393 /// build a separate `Zeroizing<[u8; 32]>`, observe non-zero
394 /// content, then drop it under a `Box` — the post-drop heap
395 /// region is no longer reachable from safe Rust, so we settle for
396 /// the structural contract: the `Zeroize` derive on `Zeroizing`
397 /// is what actually wipes the bytes (covered by the `zeroize`
398 /// crate's own test suite). This test is the smoke check that we
399 /// kept the wrapper on the variant.
400 #[test]
401 fn sse_c_key_zeroized_on_remove() {
402 let store = MultipartStateStore::new();
403 let key = [0x77u8; 32];
404 let key_md5 = [0x33u8; 16];
405 let mut ctx = sample_ctx("b", "k");
406 ctx.sse = MultipartSseMode::SseC {
407 key: Zeroizing::new(key),
408 key_md5,
409 };
410 store.put("u-zero", ctx);
411 // Confirm the variant carries a `Zeroizing<[u8; 32]>` (not a
412 // bare `[u8; 32]`) by exercising `Deref` to `&[u8; 32]`. If
413 // someone later regresses the wrapper away, this access would
414 // still compile but the structural assertion below — that the
415 // store actually held the entry — is what the test is for.
416 let got = store.get("u-zero").expect("entry present");
417 match &got.sse {
418 MultipartSseMode::SseC { key: k, .. } => {
419 let _deref: &[u8; 32] = k; // typeof check: must be Zeroizing<[u8;32]>
420 assert_eq!(**k, key);
421 }
422 other => panic!("expected SseC, got {other:?}"),
423 }
424 drop(got);
425 store.remove("u-zero");
426 assert!(
427 store.get("u-zero").is_none(),
428 "removed entry must be gone (its Zeroizing<[u8;32]> ran Drop and wiped the key)"
429 );
430 }
431
432 /// v0.8.2 #62: with three entries inserted at staggered
433 /// timestamps, `sweep_stale(now, 24h)` must drop the two that are
434 /// older than 24 h and keep the recent one. We pin `now`
435 /// deterministically to avoid wall-clock flakes; the store's
436 /// internal `put` always stamps `Utc::now()` so we drive the
437 /// cutoff such that all three entries land before it.
438 #[test]
439 fn sweep_stale_drops_old_contexts() {
440 let store = MultipartStateStore::new();
441 // Insert three entries (all stamped with `Utc::now()` at
442 // insert time — within microseconds of each other on a normal
443 // machine).
444 store.put("u-1", sample_ctx("b", "k1"));
445 store.put("u-2", sample_ctx("b", "k2"));
446 store.put("u-3", sample_ctx("b", "k3"));
447 assert_eq!(store.len(), 3, "all three entries inserted");
448 // `now` 25 h in the future puts every existing entry beyond
449 // the 24 h cutoff → all three are stale.
450 let future = Utc::now() + chrono::Duration::hours(25);
451 let swept = store.sweep_stale(future, chrono::Duration::hours(24));
452 assert_eq!(swept, 3, "all three entries are older than 24 h cutoff");
453 assert_eq!(store.len(), 0, "store must be empty after sweep");
454 }
455
456 /// v0.8.2 #62: `sweep_stale` must NOT drop entries that are still
457 /// fresh. Inserts one entry, then sweeps with a `now` only 1 h
458 /// later — the entry is well within the 24 h TTL, so survives.
459 #[test]
460 fn sweep_stale_keeps_recent_contexts() {
461 let store = MultipartStateStore::new();
462 store.put("u-fresh", sample_ctx("b", "k"));
463 let near_future = Utc::now() + chrono::Duration::hours(1);
464 let swept = store.sweep_stale(near_future, chrono::Duration::hours(24));
465 assert_eq!(swept, 0, "1 h-old entry must NOT be swept under 24 h TTL");
466 assert!(store.get("u-fresh").is_some(), "fresh entry must remain");
467 assert_eq!(store.len(), 1);
468 }
469
470 /// v0.8.2 #62: mixed-age workload — two entries from "the past"
471 /// (we insert them, then advance the conceptual `now` past the
472 /// TTL) and one fresh entry. Sweep must return exactly 2 and
473 /// leave the fresh one intact. Verifies `sweep_stale` reports the
474 /// correct count for partial sweeps (the most common ops case).
475 #[test]
476 fn sweep_stale_count_returns_correct() {
477 let store = MultipartStateStore::new();
478 // Insert two "old" entries; we'll later sweep with a `now` so
479 // far ahead that these become stale.
480 store.put("old-1", sample_ctx("b", "k1"));
481 store.put("old-2", sample_ctx("b", "k2"));
482 // Sleep is too brittle for CI; instead drive the sweep
483 // cutoff so only the two "old" entries fall behind it. We
484 // emulate the third entry being "fresh" by inserting it
485 // *after* capturing the moment-in-time we'll sweep against.
486 let sweep_now = Utc::now() + chrono::Duration::hours(25);
487 // Now the third entry is inserted "in the future" relative
488 // to itself — but its timestamp will be `Utc::now()`, well
489 // before `sweep_now + 25h - 24h`. To keep the test
490 // self-contained we insert the fresh entry at a wall-clock
491 // close to `sweep_now`, not `Utc::now()`. We can't cheat the
492 // store's internal `Utc::now()` stamp from here, so we rely
493 // on the cutoff arithmetic: cutoff = sweep_now - 24h =
494 // Utc::now() + 1h, which is strictly after every real
495 // `Utc::now()` timestamp on the current entries → all three
496 // would be stale.
497 //
498 // Instead: insert the fresh entry, then choose a `sweep_now`
499 // such that exactly the first two are older than the cutoff
500 // and the fresh one is not.
501 std::thread::sleep(std::time::Duration::from_millis(10));
502 let fresh_marker = Utc::now();
503 std::thread::sleep(std::time::Duration::from_millis(10));
504 store.put("fresh", sample_ctx("b", "k3"));
505 // cutoff = fresh_marker → strictly between the "old" inserts
506 // (timestamps before `fresh_marker`) and the fresh insert
507 // (timestamp after `fresh_marker`). Choose `sweep_now =
508 // fresh_marker + 24h` so `cutoff = fresh_marker`.
509 let sweep_at = fresh_marker + chrono::Duration::hours(24);
510 let swept = store.sweep_stale(sweep_at, chrono::Duration::hours(24));
511 assert_eq!(swept, 2, "exactly the two pre-marker entries must sweep");
512 assert!(store.get("fresh").is_some(), "post-marker entry survives");
513 assert!(store.get("old-1").is_none(), "old-1 must be gone");
514 assert!(store.get("old-2").is_none(), "old-2 must be gone");
515 let _ = sweep_now; // silence dead-code (kept to document the simpler-but-discarded plan)
516 }
517
518 /// v0.8.1 #59: `completion_lock(bucket, key)` must return the
519 /// **same** `Arc<Mutex<()>>` for repeated calls on the same key,
520 /// otherwise concurrent Completes on the same key would each grab
521 /// a distinct mutex and the serialisation would silently degrade
522 /// to no-op. We compare `Arc::as_ptr()` rather than equality on
523 /// the inner `()` because two distinct `Mutex<()>` instances would
524 /// have different addresses but compare equal under `==` (unit
525 /// type).
526 #[test]
527 fn completion_lock_returns_same_arc_for_same_key() {
528 let store = MultipartStateStore::new();
529 let a = store.completion_lock("bucket-a", "key/x");
530 let b = store.completion_lock("bucket-a", "key/x");
531 assert!(
532 Arc::ptr_eq(&a, &b),
533 "completion_lock(same bucket, same key) must return identical Arc"
534 );
535 }
536
537 /// v0.8.1 #59: locks for distinct `(bucket, key)` tuples must be
538 /// independent — concurrent Completes on different keys must not
539 /// serialise on each other. We acquire two locks back-to-back
540 /// (`try_lock` so the assertion is deterministic and doesn't
541 /// depend on a runtime); both must succeed without contention.
542 /// Also exercises bucket-vs-key disjointness: same key under two
543 /// different buckets must NOT alias.
544 #[tokio::test]
545 async fn completion_lock_distinct_keys_independent() {
546 let store = MultipartStateStore::new();
547 let a = store.completion_lock("bucket-a", "shared/key");
548 let b = store.completion_lock("bucket-b", "shared/key");
549 assert!(
550 !Arc::ptr_eq(&a, &b),
551 "completion_lock with different bucket must yield different Arc"
552 );
553 // Hold the first lock and acquire the second under the same
554 // task — must NOT deadlock and must NOT block. `try_lock`
555 // returns `Ok(MutexGuard)` when uncontended, `Err` otherwise.
556 let guard_a = a.try_lock().expect("lock on bucket-a/shared/key must be free");
557 let guard_b = b.try_lock().expect("lock on bucket-b/shared/key must be free");
558 // Same key, same bucket from a third call must alias `a` and
559 // therefore be contended (a's guard is held above).
560 let a2 = store.completion_lock("bucket-a", "shared/key");
561 assert!(
562 Arc::ptr_eq(&a, &a2),
563 "completion_lock for the same (bucket, key) must alias"
564 );
565 assert!(
566 a2.try_lock().is_err(),
567 "completion_lock alias must observe the held guard as contended"
568 );
569 drop(guard_a);
570 drop(guard_b);
571 }
572
573 /// v0.8.1 #59: `prune_completion_locks` must drop entries whose
574 /// only `Arc` is the `DashMap`'s own (i.e. no in-flight Complete is
575 /// holding a reference). After we acquire a lock then drop the
576 /// returned `Arc`, the `strong_count` falls to 1 and prune must
577 /// retire the entry so a steady-state workload of unique keys
578 /// doesn't accumulate. Conversely, an entry with an outstanding
579 /// `Arc` reference must survive prune.
580 #[test]
581 fn prune_completion_locks_removes_unreferenced() {
582 let store = MultipartStateStore::new();
583 // Acquire-and-drop: simulates a Complete that finished and let
584 // its `Arc<Mutex<()>>` go out of scope. `strong_count == 1`
585 // afterwards (only the `DashMap` retains it).
586 {
587 let _lock = store.completion_lock("b", "ephemeral");
588 }
589 assert_eq!(
590 store.completion_locks_len(),
591 1,
592 "lock entry must be present immediately after acquire-drop"
593 );
594 store.prune_completion_locks();
595 assert_eq!(
596 store.completion_locks_len(),
597 0,
598 "prune must retire entries with strong_count == 1"
599 );
600
601 // Negative case: an outstanding `Arc` must NOT be pruned —
602 // pruning a still-borrowed entry would let a concurrent
603 // Complete miss the serialisation point.
604 let held = store.completion_lock("b", "in-flight");
605 store.prune_completion_locks();
606 assert_eq!(
607 store.completion_locks_len(),
608 1,
609 "prune must keep entries with outstanding Arc borrowers"
610 );
611 drop(held);
612 store.prune_completion_locks();
613 assert_eq!(
614 store.completion_locks_len(),
615 0,
616 "prune must retire the entry once the borrower drops"
617 );
618 }
619
620 /// 8 threads each register 250 distinct upload_ids and immediately
621 /// look them up. After `join` the store must contain exactly the
622 /// 8 × 250 entries — verifies `RwLock` doesn't drop writes under
623 /// concurrent contention (the obvious refactor that swaps to
624 /// `HashMap` without a lock would visibly fail this).
625 #[test]
626 fn concurrent_put_lookup_race_free() {
627 let store = Arc::new(MultipartStateStore::new());
628 let mut handles = Vec::new();
629 for tid in 0..8u32 {
630 let st = Arc::clone(&store);
631 handles.push(thread::spawn(move || {
632 for i in 0..250u32 {
633 let id = format!("u-{tid}-{i}");
634 let ctx = sample_ctx("b", &id);
635 st.put(&id, ctx);
636 // Immediate lookup proves the writer-side observer
637 // sees its own put under the RwLock.
638 let got = st.get(&id).expect("self-put must be visible");
639 assert_eq!(got.key, id);
640 }
641 }));
642 }
643 for h in handles {
644 h.join().expect("worker thread panicked");
645 }
646 assert_eq!(store.len(), 8 * 250, "all puts must persist");
647 }
648}