Skip to main content

reifydb_store_multi/store/
worker.rs

1// SPDX-License-Identifier: AGPL-3.0-or-later
2// Copyright (c) 2025 ReifyDB
3
4//! Background worker for deferred drop operations.
5//!
6//! This module provides an actor-based drop processing system that executes
7//! version cleanup operations off the critical commit path.
8//!
9//! The actor model is platform-agnostic:
10//! - **Native**: Runs on its own OS thread, processes messages from a channel
11//! - **WASM**: Messages are processed inline (synchronously) when sent
12
13use std::{collections::HashMap, time::Duration};
14
15use reifydb_core::{
16	common::CommitVersion,
17	encoded::key::EncodedKey,
18	event::{
19		EventBus,
20		metric::{StorageDrop, StorageStatsRecordedEvent},
21	},
22};
23use reifydb_runtime::{
24	actor::{
25		context::Context,
26		mailbox::ActorRef,
27		system::{ActorConfig, ActorSystem},
28		timers::TimerHandle,
29		traits::{Actor, Directive},
30	},
31	clock::{Clock, Instant},
32};
33use reifydb_type::util::cowvec::CowVec;
34use tracing::{Span, debug, error, instrument};
35
36use super::drop::find_keys_to_drop;
37use crate::{
38	hot::storage::HotStorage,
39	tier::{EntryKind, TierStorage},
40};
41
42/// Configuration for the drop worker.
43#[derive(Debug, Clone)]
44pub struct DropWorkerConfig {
45	/// How many drop requests to batch before executing.
46	pub batch_size: usize,
47	/// Maximum time to wait before flushing a partial batch.
48	pub flush_interval: Duration,
49}
50
51impl Default for DropWorkerConfig {
52	fn default() -> Self {
53		Self {
54			batch_size: 100,
55			flush_interval: Duration::from_millis(50),
56		}
57	}
58}
59
60/// A request to drop old versions of a key.
61#[derive(Debug, Clone)]
62pub struct DropRequest {
63	/// The table containing the key.
64	pub table: EntryKind,
65	/// The logical key (without version suffix).
66	pub key: CowVec<u8>,
67	/// Drop versions below this threshold (if Some).
68	pub up_to_version: Option<CommitVersion>,
69	/// Keep this many most recent versions (if Some).
70	pub keep_last_versions: Option<usize>,
71	/// The commit version that created this drop request.
72	pub commit_version: CommitVersion,
73	/// A version being written in the same batch (to avoid race).
74	pub pending_version: Option<CommitVersion>,
75}
76
77/// Messages for the drop actor.
78#[derive(Clone)]
79pub enum DropMessage {
80	/// A single drop request to process.
81	Request(DropRequest),
82	/// A batch of drop requests to process.
83	Batch(Vec<DropRequest>),
84	/// Periodic tick for flushing batches.
85	Tick,
86	/// Shutdown the actor.
87	Shutdown,
88}
89
90/// Actor that processes drop operations asynchronously.
91pub struct DropActor {
92	storage: HotStorage,
93	event_bus: EventBus,
94	config: DropWorkerConfig,
95	clock: Clock,
96}
97
98/// State for the drop actor.
99pub struct DropActorState {
100	/// Pending requests waiting to be processed.
101	pending_requests: Vec<DropRequest>,
102	/// Last time we flushed the batch.
103	last_flush: Instant,
104	/// Handle to the periodic timer (for cleanup).
105	_timer_handle: Option<TimerHandle>,
106}
107
108impl DropActor {
109	pub fn new(config: DropWorkerConfig, storage: HotStorage, event_bus: EventBus, clock: Clock) -> Self {
110		Self {
111			storage,
112			event_bus,
113			config,
114			clock,
115		}
116	}
117
118	pub fn spawn(
119		system: &ActorSystem,
120		config: DropWorkerConfig,
121		storage: HotStorage,
122		event_bus: EventBus,
123		clock: Clock,
124	) -> ActorRef<DropMessage> {
125		let actor = Self::new(config, storage, event_bus, clock);
126		system.spawn("drop-worker", actor).actor_ref().clone()
127	}
128
129	/// Maybe flush if batch is full.
130	fn maybe_flush(&self, state: &mut DropActorState) {
131		if state.pending_requests.len() >= self.config.batch_size {
132			self.flush(state);
133		}
134	}
135
136	/// Flush all pending requests.
137	fn flush(&self, state: &mut DropActorState) {
138		if state.pending_requests.is_empty() {
139			return;
140		}
141
142		Self::process_batch(&self.storage, &mut state.pending_requests, &self.event_bus);
143		state.last_flush = self.clock.instant();
144	}
145
146	#[instrument(name = "drop::process_batch", level = "debug", skip_all, fields(num_requests = requests.len(), total_dropped))]
147	fn process_batch(storage: &HotStorage, requests: &mut Vec<DropRequest>, event_bus: &EventBus) {
148		// Collect all keys to drop, grouped by table: (key, version) pairs
149		let mut batches: HashMap<EntryKind, Vec<(CowVec<u8>, CommitVersion)>> = HashMap::new();
150		// Collect drop stats for metrics
151		let mut drops_with_stats = Vec::new();
152		let mut max_pending_version = CommitVersion(0);
153
154		for request in requests.drain(..) {
155			// Track highest version for event (prefer pending_version if set, otherwise use commit_version)
156			let version_for_event = request.pending_version.unwrap_or(request.commit_version);
157			if version_for_event > max_pending_version {
158				max_pending_version = version_for_event;
159			}
160
161			match find_keys_to_drop(
162				storage,
163				request.table,
164				request.key.as_ref(),
165				request.up_to_version,
166				request.keep_last_versions,
167				request.pending_version,
168			) {
169				Ok(entries_to_drop) => {
170					for entry in entries_to_drop {
171						// Collect stats for metrics
172						drops_with_stats.push(StorageDrop {
173							key: EncodedKey(request.key.clone()),
174							value_bytes: entry.value_bytes,
175						});
176
177						// Queue for physical deletion: (key, version) pair
178						batches.entry(request.table)
179							.or_default()
180							.push((entry.key, entry.version));
181					}
182				}
183				Err(e) => {
184					error!("Drop actor failed to find keys to drop: {}", e);
185				}
186			}
187		}
188
189		if !batches.is_empty() {
190			if let Err(e) = storage.drop(batches) {
191				error!("Drop actor failed to execute drops: {}", e);
192			}
193		}
194
195		let total_dropped = drops_with_stats.len();
196		Span::current().record("total_dropped", total_dropped);
197
198		event_bus.emit(StorageStatsRecordedEvent::new(vec![], vec![], drops_with_stats, max_pending_version));
199	}
200}
201
202impl Actor for DropActor {
203	type State = DropActorState;
204	type Message = DropMessage;
205
206	fn init(&self, ctx: &Context<Self::Message>) -> Self::State {
207		debug!("Drop actor started");
208
209		// Schedule periodic tick for flushing partial batches
210		let timer_handle = ctx.schedule_repeat(Duration::from_millis(10), DropMessage::Tick);
211
212		DropActorState {
213			pending_requests: Vec::with_capacity(self.config.batch_size),
214			last_flush: self.clock.instant(),
215			_timer_handle: Some(timer_handle),
216		}
217	}
218
219	fn handle(&self, state: &mut Self::State, msg: Self::Message, ctx: &Context<Self::Message>) -> Directive {
220		// Check for cancellation
221		if ctx.is_cancelled() {
222			// Flush remaining requests before stopping
223			self.flush(state);
224			return Directive::Stop;
225		}
226
227		match msg {
228			DropMessage::Request(request) => {
229				state.pending_requests.push(request);
230				self.maybe_flush(state);
231			}
232			DropMessage::Batch(requests) => {
233				state.pending_requests.extend(requests);
234				self.maybe_flush(state);
235			}
236			DropMessage::Tick => {
237				if !state.pending_requests.is_empty()
238					&& state.last_flush.elapsed() >= self.config.flush_interval
239				{
240					self.flush(state);
241				}
242			}
243			DropMessage::Shutdown => {
244				debug!("Drop actor received shutdown signal");
245				// Process any remaining requests before shutdown
246				self.flush(state);
247				return Directive::Stop;
248			}
249		}
250
251		Directive::Continue
252	}
253
254	fn post_stop(&self) {
255		debug!("Drop actor stopped");
256	}
257
258	fn config(&self) -> ActorConfig {
259		// Use a reasonable mailbox size for batched operations
260		ActorConfig::new().mailbox_capacity(256)
261	}
262}