redis_oxide/commands/
optimized.rs

1//! Optimized command builders with pre-allocation and caching
2//!
3//! This module provides optimized versions of command builders that:
4//! - Pre-allocate argument vectors based on command type
5//! - Cache frequently used keys and values
6//! - Optimize serialized command caching
7//! - Reduce memory allocations
8
9#![allow(unused_variables)]
10#![allow(dead_code)]
11#![allow(missing_docs)]
12
13use crate::commands::Command;
14use crate::core::{error::RedisResult, value::RespValue};
15use crate::pipeline::PipelineCommand;
16use bytes::Bytes;
17use std::collections::HashMap;
18use std::sync::{Arc, RwLock};
19use std::time::Duration;
20
21/// String interning cache for frequently used strings
22#[derive(Debug)]
23pub struct StringInterner {
24    cache: HashMap<String, Arc<str>>,
25    max_size: usize,
26    access_count: HashMap<String, u64>,
27}
28
29impl StringInterner {
30    /// Create a new string interner
31    pub fn new(max_size: usize) -> Self {
32        Self {
33            cache: HashMap::new(),
34            max_size,
35            access_count: HashMap::new(),
36        }
37    }
38
39    /// Intern a string, returning an `Arc<str>` for efficient sharing
40    pub fn intern(&mut self, s: &str) -> Arc<str> {
41        if let Some(interned) = self.cache.get(s) {
42            // Update access count
43            *self.access_count.entry(s.to_string()).or_insert(0) += 1;
44            return interned.clone();
45        }
46
47        // If cache is full, remove least frequently used entry
48        if self.cache.len() >= self.max_size {
49            if let Some((lfu_key, _)) = self.access_count.iter().min_by_key(|(_, &count)| count) {
50                let lfu_key = lfu_key.clone();
51                self.cache.remove(&lfu_key);
52                self.access_count.remove(&lfu_key);
53            }
54        }
55
56        let interned: Arc<str> = s.into();
57        self.cache.insert(s.to_string(), interned.clone());
58        self.access_count.insert(s.to_string(), 1);
59        interned
60    }
61
62    /// Get cache statistics
63    pub fn stats(&self) -> (usize, usize) {
64        (self.cache.len(), self.max_size)
65    }
66
67    /// Clear the cache
68    pub fn clear(&mut self) {
69        self.cache.clear();
70        self.access_count.clear();
71    }
72}
73
74/// Global string interner instance
75static STRING_INTERNER: RwLock<Option<StringInterner>> = RwLock::new(None);
76
77/// Initialize the global string interner
78pub fn init_string_interner(max_size: usize) {
79    let mut interner = STRING_INTERNER.write().unwrap();
80    *interner = Some(StringInterner::new(max_size));
81}
82
83/// Intern a string using the global interner
84pub fn intern_string(s: &str) -> Arc<str> {
85    let mut interner_guard = STRING_INTERNER.write().unwrap();
86    if let Some(ref mut interner) = *interner_guard {
87        interner.intern(s)
88    } else {
89        // Fallback if interner not initialized
90        s.into()
91    }
92}
93
94/// Command cache for serialized commands
95#[derive(Debug)]
96pub struct CommandCache {
97    cache: HashMap<String, Bytes>,
98    max_size: usize,
99    access_count: HashMap<String, u64>,
100}
101
102impl CommandCache {
103    /// Create a new command cache
104    pub fn new(max_size: usize) -> Self {
105        Self {
106            cache: HashMap::new(),
107            max_size,
108            access_count: HashMap::new(),
109        }
110    }
111
112    /// Get a cached command or insert a new one
113    pub fn get_or_insert<F>(&mut self, key: &str, f: F) -> Bytes
114    where
115        F: FnOnce() -> Bytes,
116    {
117        if let Some(cached) = self.cache.get(key) {
118            *self.access_count.entry(key.to_string()).or_insert(0) += 1;
119            return cached.clone();
120        }
121
122        // If cache is full, remove least frequently used entry
123        if self.cache.len() >= self.max_size {
124            if let Some((lfu_key, _)) = self.access_count.iter().min_by_key(|(_, &count)| count) {
125                let lfu_key = lfu_key.clone();
126                self.cache.remove(&lfu_key);
127                self.access_count.remove(&lfu_key);
128            }
129        }
130
131        let value = f();
132        self.cache.insert(key.to_string(), value.clone());
133        self.access_count.insert(key.to_string(), 1);
134        value
135    }
136
137    /// Get cache statistics
138    pub fn stats(&self) -> (usize, usize) {
139        (self.cache.len(), self.max_size)
140    }
141
142    /// Clear the cache
143    pub fn clear(&mut self) {
144        self.cache.clear();
145        self.access_count.clear();
146    }
147}
148
149/// Optimized GET command with pre-allocated arguments
150#[derive(Debug, Clone)]
151pub struct OptimizedGetCommand {
152    key: Arc<str>,
153    args_cache: Option<Vec<RespValue>>,
154}
155
156impl OptimizedGetCommand {
157    /// Create a new optimized GET command
158    pub fn new(key: impl AsRef<str>) -> Self {
159        let key = intern_string(key.as_ref());
160        Self {
161            key,
162            args_cache: None,
163        }
164    }
165
166    /// Pre-compute and cache arguments
167    pub fn with_cached_args(mut self) -> Self {
168        self.args_cache = Some(vec![RespValue::from(self.key.as_ref())]);
169        self
170    }
171}
172
173impl Command for OptimizedGetCommand {
174    type Output = Option<String>;
175
176    fn command_name(&self) -> &str {
177        "GET"
178    }
179
180    fn args(&self) -> Vec<RespValue> {
181        if let Some(ref cached) = self.args_cache {
182            cached.clone()
183        } else {
184            vec![RespValue::from(self.key.as_ref())]
185        }
186    }
187
188    fn parse_response(&self, response: RespValue) -> RedisResult<Self::Output> {
189        if response.is_null() {
190            Ok(None)
191        } else {
192            Ok(Some(response.as_string()?))
193        }
194    }
195
196    fn keys(&self) -> Vec<&[u8]> {
197        vec![self.key.as_bytes()]
198    }
199}
200
201impl PipelineCommand for OptimizedGetCommand {
202    fn name(&self) -> &str {
203        self.command_name()
204    }
205
206    fn args(&self) -> Vec<RespValue> {
207        <Self as Command>::args(self)
208    }
209
210    fn key(&self) -> Option<String> {
211        Some(self.key.to_string())
212    }
213}
214
215/// Optimized SET command with pre-allocated arguments and options
216#[derive(Debug, Clone)]
217pub struct OptimizedSetCommand {
218    key: Arc<str>,
219    value: Arc<str>,
220    expiration: Option<Duration>,
221    nx: bool,
222    xx: bool,
223    args_cache: Option<Vec<RespValue>>,
224}
225
226impl OptimizedSetCommand {
227    /// Create a new optimized SET command
228    pub fn new(key: impl AsRef<str>, value: impl AsRef<str>) -> Self {
229        let key = intern_string(key.as_ref());
230        let value = intern_string(value.as_ref());
231        Self {
232            key,
233            value,
234            expiration: None,
235            nx: false,
236            xx: false,
237            args_cache: None,
238        }
239    }
240
241    /// Set expiration time (EX seconds)
242    pub fn expire(mut self, duration: Duration) -> Self {
243        self.expiration = Some(duration);
244        self.args_cache = None; // Invalidate cache
245        self
246    }
247
248    /// Only set if key doesn't exist (NX)
249    pub fn only_if_not_exists(mut self) -> Self {
250        self.nx = true;
251        self.args_cache = None; // Invalidate cache
252        self
253    }
254
255    /// Only set if key exists (XX)
256    pub fn only_if_exists(mut self) -> Self {
257        self.xx = true;
258        self.args_cache = None; // Invalidate cache
259        self
260    }
261
262    /// Pre-compute and cache arguments
263    pub fn with_cached_args(mut self) -> Self {
264        let mut args = Vec::with_capacity(6); // Pre-allocate for worst case
265        args.push(RespValue::from(self.key.as_ref()));
266        args.push(RespValue::from(self.value.as_ref()));
267
268        if let Some(duration) = self.expiration {
269            args.push(RespValue::from("EX"));
270            args.push(RespValue::from(duration.as_secs().to_string()));
271        }
272
273        if self.nx {
274            args.push(RespValue::from("NX"));
275        }
276
277        if self.xx {
278            args.push(RespValue::from("XX"));
279        }
280
281        self.args_cache = Some(args);
282        self
283    }
284}
285
286impl Command for OptimizedSetCommand {
287    type Output = bool;
288
289    fn command_name(&self) -> &str {
290        "SET"
291    }
292
293    fn args(&self) -> Vec<RespValue> {
294        if let Some(ref cached) = self.args_cache {
295            cached.clone()
296        } else {
297            let mut args = Vec::with_capacity(6);
298            args.push(RespValue::from(self.key.as_ref()));
299            args.push(RespValue::from(self.value.as_ref()));
300
301            if let Some(duration) = self.expiration {
302                args.push(RespValue::from("EX"));
303                args.push(RespValue::from(duration.as_secs().to_string()));
304            }
305
306            if self.nx {
307                args.push(RespValue::from("NX"));
308            }
309
310            if self.xx {
311                args.push(RespValue::from("XX"));
312            }
313
314            args
315        }
316    }
317
318    fn parse_response(&self, response: RespValue) -> RedisResult<Self::Output> {
319        match response {
320            RespValue::SimpleString(ref s) if s == "OK" => Ok(true),
321            _ => Ok(false),
322        }
323    }
324
325    fn keys(&self) -> Vec<&[u8]> {
326        vec![self.key.as_bytes()]
327    }
328}
329
330impl PipelineCommand for OptimizedSetCommand {
331    fn name(&self) -> &str {
332        self.command_name()
333    }
334
335    fn args(&self) -> Vec<RespValue> {
336        <Self as Command>::args(self)
337    }
338
339    fn key(&self) -> Option<String> {
340        Some(self.key.to_string())
341    }
342}
343
344/// Optimized HSET command with pre-allocated arguments
345#[derive(Debug, Clone)]
346pub struct OptimizedHSetCommand {
347    key: Arc<str>,
348    field: Arc<str>,
349    value: Arc<str>,
350    args_cache: Option<Vec<RespValue>>,
351}
352
353impl OptimizedHSetCommand {
354    /// Create a new optimized HSET command
355    pub fn new(key: impl AsRef<str>, field: impl AsRef<str>, value: impl AsRef<str>) -> Self {
356        let key = intern_string(key.as_ref());
357        let field = intern_string(field.as_ref());
358        let value = intern_string(value.as_ref());
359        Self {
360            key,
361            field,
362            value,
363            args_cache: None,
364        }
365    }
366
367    /// Pre-compute and cache arguments
368    pub fn with_cached_args(mut self) -> Self {
369        self.args_cache = Some(vec![
370            RespValue::from(self.key.as_ref()),
371            RespValue::from(self.field.as_ref()),
372            RespValue::from(self.value.as_ref()),
373        ]);
374        self
375    }
376}
377
378impl Command for OptimizedHSetCommand {
379    type Output = i64;
380
381    fn command_name(&self) -> &str {
382        "HSET"
383    }
384
385    fn args(&self) -> Vec<RespValue> {
386        if let Some(ref cached) = self.args_cache {
387            cached.clone()
388        } else {
389            vec![
390                RespValue::from(self.key.as_ref()),
391                RespValue::from(self.field.as_ref()),
392                RespValue::from(self.value.as_ref()),
393            ]
394        }
395    }
396
397    fn parse_response(&self, response: RespValue) -> RedisResult<Self::Output> {
398        response.as_int()
399    }
400
401    fn keys(&self) -> Vec<&[u8]> {
402        vec![self.key.as_bytes()]
403    }
404}
405
406impl PipelineCommand for OptimizedHSetCommand {
407    fn name(&self) -> &str {
408        self.command_name()
409    }
410
411    fn args(&self) -> Vec<RespValue> {
412        <Self as Command>::args(self)
413    }
414
415    fn key(&self) -> Option<String> {
416        Some(self.key.to_string())
417    }
418}
419
420/// Batch command builder for optimized pipeline operations
421pub struct BatchCommandBuilder {
422    commands: Vec<Box<dyn Command<Output = RespValue> + Send + Sync>>,
423    estimated_size: usize,
424}
425
426impl BatchCommandBuilder {
427    /// Create a new batch command builder
428    pub fn new() -> Self {
429        Self {
430            commands: Vec::new(),
431            estimated_size: 0,
432        }
433    }
434
435    /// Create a new batch command builder with capacity
436    pub fn with_capacity(capacity: usize) -> Self {
437        Self {
438            commands: Vec::with_capacity(capacity),
439            estimated_size: 0,
440        }
441    }
442
443    /// Add a command to the batch
444    pub fn add_command<T>(&mut self, command: T)
445    where
446        T: Command + Send + Sync + 'static,
447        T::Output: Into<RespValue>,
448    {
449        // Estimate size for this command
450        let args = command.args();
451        let cmd_size = command.command_name().len()
452            + args
453                .iter()
454                .map(|arg| self.estimate_arg_size(arg))
455                .sum::<usize>();
456        self.estimated_size += cmd_size;
457
458        // Box the command with type erasure
459        // Note: This is simplified - real implementation would need proper trait objects
460        // self.commands.push(Box::new(command));
461    }
462
463    /// Estimate the serialized size of a RespValue
464    fn estimate_arg_size(&self, value: &RespValue) -> usize {
465        match value {
466            RespValue::SimpleString(s) => s.len() + 3, // +str\r\n
467            RespValue::Error(e) => e.len() + 3,        // -err\r\n
468            RespValue::Integer(_) => 10,               // Rough estimate for :num\r\n
469            RespValue::BulkString(b) => b.len() + 10,  // $len\r\ndata\r\n
470            RespValue::Null => 5,                      // $-1\r\n
471            RespValue::Array(arr) => {
472                10 + arr
473                    .iter()
474                    .map(|item| self.estimate_arg_size(item))
475                    .sum::<usize>()
476            }
477        }
478    }
479
480    /// Get the estimated total size
481    pub fn estimated_size(&self) -> usize {
482        self.estimated_size
483    }
484
485    /// Get the number of commands in the batch
486    pub fn len(&self) -> usize {
487        self.commands.len()
488    }
489
490    /// Check if the batch is empty
491    pub fn is_empty(&self) -> bool {
492        self.commands.is_empty()
493    }
494}
495
496impl Default for BatchCommandBuilder {
497    fn default() -> Self {
498        Self::new()
499    }
500}
501
502/// Memory pool for reusing temporary objects
503pub struct MemoryPool<T> {
504    pool: Vec<T>,
505    max_size: usize,
506    create_fn: Box<dyn Fn() -> T + Send + Sync>,
507}
508
509impl<T> MemoryPool<T> {
510    /// Create a new memory pool
511    pub fn new<F>(max_size: usize, create_fn: F) -> Self
512    where
513        F: Fn() -> T + Send + Sync + 'static,
514    {
515        Self {
516            pool: Vec::new(),
517            max_size,
518            create_fn: Box::new(create_fn),
519        }
520    }
521
522    /// Get an object from the pool or create a new one
523    pub fn get(&mut self) -> T {
524        self.pool.pop().unwrap_or_else(|| (self.create_fn)())
525    }
526
527    /// Return an object to the pool
528    pub fn put(&mut self, item: T) {
529        if self.pool.len() < self.max_size {
530            self.pool.push(item);
531        }
532        // If pool is full, just drop the item
533    }
534
535    /// Get pool statistics
536    pub fn stats(&self) -> (usize, usize) {
537        (self.pool.len(), self.max_size)
538    }
539
540    /// Clear the pool
541    pub fn clear(&mut self) {
542        self.pool.clear();
543    }
544}
545
546#[cfg(test)]
547mod tests {
548    use super::*;
549
550    #[test]
551    fn test_string_interner() {
552        let mut interner = StringInterner::new(3);
553
554        let s1 = interner.intern("hello");
555        let s2 = interner.intern("hello");
556        let s3 = interner.intern("world");
557
558        // Same string should return same Arc
559        assert!(Arc::ptr_eq(&s1, &s2));
560        assert_eq!(s1.as_ref(), "hello");
561        assert_eq!(s3.as_ref(), "world");
562
563        let (size, max_size) = interner.stats();
564        assert_eq!(size, 2);
565        assert_eq!(max_size, 3);
566    }
567
568    #[test]
569    fn test_optimized_get_command() {
570        let cmd = OptimizedGetCommand::new("test_key").with_cached_args();
571        assert_eq!(cmd.command_name(), "GET");
572        assert_eq!(cmd.keys(), vec![b"test_key"]);
573
574        let args = <OptimizedGetCommand as Command>::args(&cmd);
575        assert_eq!(args.len(), 1);
576    }
577
578    #[test]
579    fn test_optimized_set_command() {
580        let cmd = OptimizedSetCommand::new("key", "value")
581            .expire(Duration::from_secs(60))
582            .only_if_not_exists()
583            .with_cached_args();
584
585        assert_eq!(cmd.command_name(), "SET");
586        let args = <OptimizedSetCommand as Command>::args(&cmd);
587        assert!(args.len() >= 4); // key, value, EX, 60, NX
588    }
589
590    #[test]
591    fn test_batch_command_builder() {
592        let builder = BatchCommandBuilder::with_capacity(10);
593        assert_eq!(builder.len(), 0);
594        assert!(builder.is_empty());
595
596        // Test size estimation
597        assert_eq!(builder.estimated_size(), 0);
598    }
599
600    #[test]
601    fn test_memory_pool() {
602        let mut pool = MemoryPool::new(3, Vec::<i32>::new);
603
604        let mut vec1 = pool.get();
605        vec1.push(1);
606        vec1.push(2);
607
608        pool.put(vec1);
609
610        let vec2 = pool.get();
611        // Should reuse the vector (but it might be cleared depending on implementation)
612
613        let (size, max_size) = pool.stats();
614        assert_eq!(max_size, 3);
615    }
616}