prax-query 0.9.0

Type-safe query builder for the Prax ORM
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
//! Allocation tracking and statistics.
//!
//! Provides fine-grained tracking of memory allocations for leak detection
//! and performance analysis.

use parking_lot::{Mutex, RwLock};
use std::alloc::{GlobalAlloc, Layout, System};
use std::collections::HashMap;
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
use std::time::{Duration, Instant};

/// Global allocation tracker instance.
pub static GLOBAL_TRACKER: std::sync::LazyLock<AllocationTracker> =
    std::sync::LazyLock::new(AllocationTracker::new);

// ============================================================================
// Allocation Record
// ============================================================================

/// Record of a single allocation.
#[derive(Debug, Clone)]
pub struct AllocationRecord {
    /// Unique allocation ID.
    pub id: u64,
    /// Size in bytes.
    pub size: usize,
    /// Memory alignment.
    pub align: usize,
    /// Timestamp when allocated.
    pub timestamp: Instant,
    /// Optional backtrace (expensive, only in debug mode).
    #[cfg(debug_assertions)]
    pub backtrace: Option<String>,
}

impl AllocationRecord {
    /// Create a new allocation record.
    pub fn new(id: u64, size: usize, align: usize) -> Self {
        Self {
            id,
            size,
            align,
            timestamp: Instant::now(),
            #[cfg(debug_assertions)]
            backtrace: if super::is_profiling_enabled() {
                Some(format!("{:?}", std::backtrace::Backtrace::capture()))
            } else {
                None
            },
        }
    }

    /// Get the age of this allocation.
    pub fn age(&self) -> Duration {
        self.timestamp.elapsed()
    }
}

// ============================================================================
// Allocation Tracker
// ============================================================================

/// Tracks memory allocations and deallocations.
#[derive(Debug)]
pub struct AllocationTracker {
    /// Counter for allocation IDs.
    next_id: AtomicU64,
    /// Total allocations.
    total_allocations: AtomicU64,
    /// Total deallocations.
    total_deallocations: AtomicU64,
    /// Total bytes allocated.
    total_bytes_allocated: AtomicUsize,
    /// Total bytes deallocated.
    total_bytes_deallocated: AtomicUsize,
    /// Current bytes allocated.
    current_bytes: AtomicUsize,
    /// Peak bytes allocated.
    peak_bytes: AtomicUsize,
    /// Active allocations (ptr -> record).
    active: RwLock<HashMap<usize, AllocationRecord>>,
    /// Allocation size histogram.
    size_histogram: Mutex<SizeHistogram>,
    /// Start time.
    start_time: Instant,
}

impl AllocationTracker {
    /// Create a new allocation tracker.
    pub fn new() -> Self {
        Self {
            next_id: AtomicU64::new(1),
            total_allocations: AtomicU64::new(0),
            total_deallocations: AtomicU64::new(0),
            total_bytes_allocated: AtomicUsize::new(0),
            total_bytes_deallocated: AtomicUsize::new(0),
            current_bytes: AtomicUsize::new(0),
            peak_bytes: AtomicUsize::new(0),
            active: RwLock::new(HashMap::new()),
            size_histogram: Mutex::new(SizeHistogram::new()),
            start_time: Instant::now(),
        }
    }

    /// Record an allocation.
    pub fn record_alloc(&self, ptr: usize, size: usize, align: usize) {
        if !super::is_profiling_enabled() {
            return;
        }

        let id = self.next_id.fetch_add(1, Ordering::Relaxed);
        let record = AllocationRecord::new(id, size, align);

        // Update counters
        self.total_allocations.fetch_add(1, Ordering::Relaxed);
        self.total_bytes_allocated
            .fetch_add(size, Ordering::Relaxed);

        let current = self.current_bytes.fetch_add(size, Ordering::Relaxed) + size;

        // Update peak
        let mut peak = self.peak_bytes.load(Ordering::Relaxed);
        while current > peak {
            match self.peak_bytes.compare_exchange_weak(
                peak,
                current,
                Ordering::Relaxed,
                Ordering::Relaxed,
            ) {
                Ok(_) => break,
                Err(p) => peak = p,
            }
        }

        // Record allocation
        self.active.write().insert(ptr, record);

        // Update histogram
        self.size_histogram.lock().record(size);
    }

    /// Record a deallocation.
    pub fn record_dealloc(&self, ptr: usize, size: usize) {
        if !super::is_profiling_enabled() {
            return;
        }

        self.total_deallocations.fetch_add(1, Ordering::Relaxed);
        self.total_bytes_deallocated
            .fetch_add(size, Ordering::Relaxed);
        self.current_bytes.fetch_sub(size, Ordering::Relaxed);

        // Remove from active
        self.active.write().remove(&ptr);
    }

    /// Get current statistics.
    pub fn stats(&self) -> AllocationStats {
        AllocationStats {
            total_allocations: self.total_allocations.load(Ordering::Relaxed),
            total_deallocations: self.total_deallocations.load(Ordering::Relaxed),
            total_bytes_allocated: self.total_bytes_allocated.load(Ordering::Relaxed),
            total_bytes_deallocated: self.total_bytes_deallocated.load(Ordering::Relaxed),
            current_allocations: self.active.read().len() as u64,
            current_bytes: self.current_bytes.load(Ordering::Relaxed),
            peak_bytes: self.peak_bytes.load(Ordering::Relaxed),
            uptime: self.start_time.elapsed(),
        }
    }

    /// Get active allocations.
    pub fn active_allocations(&self) -> Vec<AllocationRecord> {
        self.active.read().values().cloned().collect()
    }

    /// Get allocations older than a threshold (potential leaks).
    pub fn old_allocations(&self, threshold: Duration) -> Vec<AllocationRecord> {
        self.active
            .read()
            .values()
            .filter(|r| r.age() > threshold)
            .cloned()
            .collect()
    }

    /// Get allocation size histogram.
    pub fn histogram(&self) -> SizeHistogram {
        self.size_histogram.lock().clone()
    }

    /// Reset all tracking state.
    pub fn reset(&self) {
        self.total_allocations.store(0, Ordering::Relaxed);
        self.total_deallocations.store(0, Ordering::Relaxed);
        self.total_bytes_allocated.store(0, Ordering::Relaxed);
        self.total_bytes_deallocated.store(0, Ordering::Relaxed);
        self.current_bytes.store(0, Ordering::Relaxed);
        self.peak_bytes.store(0, Ordering::Relaxed);
        self.active.write().clear();
        *self.size_histogram.lock() = SizeHistogram::new();
    }
}

impl Default for AllocationTracker {
    fn default() -> Self {
        Self::new()
    }
}

// ============================================================================
// Allocation Statistics
// ============================================================================

/// Summary statistics for allocations.
#[derive(Debug, Clone, Default)]
pub struct AllocationStats {
    /// Total number of allocations.
    pub total_allocations: u64,
    /// Total number of deallocations.
    pub total_deallocations: u64,
    /// Total bytes ever allocated.
    pub total_bytes_allocated: usize,
    /// Total bytes ever deallocated.
    pub total_bytes_deallocated: usize,
    /// Current number of active allocations.
    pub current_allocations: u64,
    /// Current bytes allocated.
    pub current_bytes: usize,
    /// Peak bytes allocated.
    pub peak_bytes: usize,
    /// Time since tracking started.
    pub uptime: Duration,
}

impl AllocationStats {
    /// Get net allocations (allocs - deallocs).
    pub fn net_allocations(&self) -> i64 {
        self.total_allocations as i64 - self.total_deallocations as i64
    }

    /// Get allocation rate (allocations per second).
    pub fn allocation_rate(&self) -> f64 {
        if self.uptime.as_secs_f64() > 0.0 {
            self.total_allocations as f64 / self.uptime.as_secs_f64()
        } else {
            0.0
        }
    }

    /// Get average allocation size.
    pub fn avg_allocation_size(&self) -> usize {
        if self.total_allocations > 0 {
            self.total_bytes_allocated / self.total_allocations as usize
        } else {
            0
        }
    }

    /// Check if there are potential leaks.
    pub fn has_potential_leaks(&self) -> bool {
        self.net_allocations() > 0 && self.current_bytes > 0
    }
}

// ============================================================================
// Size Histogram
// ============================================================================

/// Histogram of allocation sizes.
#[derive(Debug, Clone, Default)]
pub struct SizeHistogram {
    /// Buckets: [0-64], [64-256], [256-1K], [1K-4K], [4K-16K], [16K-64K], [64K+]
    buckets: [u64; 7],
    /// Total allocations.
    total: u64,
}

impl SizeHistogram {
    /// Create a new histogram.
    pub fn new() -> Self {
        Self::default()
    }

    /// Record an allocation size.
    pub fn record(&mut self, size: usize) {
        let bucket = match size {
            0..=64 => 0,
            65..=256 => 1,
            257..=1024 => 2,
            1025..=4096 => 3,
            4097..=16384 => 4,
            16385..=65536 => 5,
            _ => 6,
        };
        self.buckets[bucket] += 1;
        self.total += 1;
    }

    /// Get bucket counts.
    pub fn buckets(&self) -> &[u64; 7] {
        &self.buckets
    }

    /// Get bucket labels.
    pub fn bucket_labels() -> &'static [&'static str; 7] {
        &[
            "0-64B", "64-256B", "256B-1K", "1K-4K", "4K-16K", "16K-64K", "64K+",
        ]
    }

    /// Get the most common bucket.
    pub fn most_common_bucket(&self) -> (&'static str, u64) {
        let labels = Self::bucket_labels();
        let (idx, count) = self
            .buckets
            .iter()
            .enumerate()
            .max_by_key(|(_, c)| *c)
            .unwrap();
        (labels[idx], *count)
    }

    /// Get percentage for each bucket.
    pub fn percentages(&self) -> [f64; 7] {
        if self.total == 0 {
            return [0.0; 7];
        }
        let mut pcts = [0.0; 7];
        for (i, &count) in self.buckets.iter().enumerate() {
            pcts[i] = (count as f64 / self.total as f64) * 100.0;
        }
        pcts
    }
}

// ============================================================================
// Tracked Allocator
// ============================================================================

/// A wrapper allocator that tracks allocations.
///
/// Use this as the global allocator to enable automatic tracking:
///
/// ```rust,ignore
/// use prax_query::profiling::TrackedAllocator;
///
/// #[global_allocator]
/// static ALLOC: TrackedAllocator = TrackedAllocator::new();
/// ```
pub struct TrackedAllocator {
    inner: System,
}

impl TrackedAllocator {
    /// Create a new tracked allocator.
    pub const fn new() -> Self {
        Self { inner: System }
    }
}

impl Default for TrackedAllocator {
    fn default() -> Self {
        Self::new()
    }
}

unsafe impl GlobalAlloc for TrackedAllocator {
    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
        // SAFETY: Delegating to System allocator which is always safe to call
        let ptr = unsafe { self.inner.alloc(layout) };
        if !ptr.is_null() {
            GLOBAL_TRACKER.record_alloc(ptr as usize, layout.size(), layout.align());
        }
        ptr
    }

    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
        GLOBAL_TRACKER.record_dealloc(ptr as usize, layout.size());
        // SAFETY: Delegating to System allocator which is always safe to call
        unsafe { self.inner.dealloc(ptr, layout) };
    }

    unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
        // Record deallocation of old
        GLOBAL_TRACKER.record_dealloc(ptr as usize, layout.size());

        // SAFETY: Delegating to System allocator which is always safe to call
        let new_ptr = unsafe { self.inner.realloc(ptr, layout, new_size) };

        // Record allocation of new
        if !new_ptr.is_null() {
            GLOBAL_TRACKER.record_alloc(new_ptr as usize, new_size, layout.align());
        }

        new_ptr
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_allocation_record() {
        let record = AllocationRecord::new(1, 1024, 8);
        assert_eq!(record.id, 1);
        assert_eq!(record.size, 1024);
        assert_eq!(record.align, 8);
    }

    #[test]
    #[ignore = "flaky in CI due to global profiling state interference"]
    fn test_allocation_tracker() {
        // Skip in CI: other tests in this module can toggle the
        // process-wide profiling flag between our `enable_profiling()`
        // call and our `record_alloc()` calls, at which point
        // `record_alloc` no-ops and the stats don't match. The test is
        // still useful locally where the binary runs in isolation.
        if std::env::var("CI").is_ok() {
            return;
        }
        super::super::enable_profiling();

        let tracker = AllocationTracker::new();

        tracker.record_alloc(0x1000, 100, 8);
        tracker.record_alloc(0x2000, 200, 8);

        let stats = tracker.stats();
        assert_eq!(stats.total_allocations, 2);
        assert_eq!(stats.total_bytes_allocated, 300);
        assert_eq!(stats.current_bytes, 300);

        tracker.record_dealloc(0x1000, 100);

        let stats = tracker.stats();
        assert_eq!(stats.total_deallocations, 1);
        assert_eq!(stats.current_bytes, 200);

        super::super::disable_profiling();
    }

    #[test]
    fn test_size_histogram() {
        let mut hist = SizeHistogram::new();

        hist.record(32); // 0-64
        hist.record(128); // 64-256
        hist.record(512); // 256-1K
        hist.record(32); // 0-64

        assert_eq!(hist.buckets[0], 2); // 0-64
        assert_eq!(hist.buckets[1], 1); // 64-256
        assert_eq!(hist.buckets[2], 1); // 256-1K
    }

    #[test]
    fn test_stats_calculations() {
        let stats = AllocationStats {
            total_allocations: 100,
            total_deallocations: 80,
            total_bytes_allocated: 10000,
            total_bytes_deallocated: 8000,
            current_allocations: 20,
            current_bytes: 2000,
            peak_bytes: 5000,
            uptime: Duration::from_secs(10),
        };

        assert_eq!(stats.net_allocations(), 20);
        assert_eq!(stats.allocation_rate(), 10.0);
        assert_eq!(stats.avg_allocation_size(), 100);
        assert!(stats.has_potential_leaks());
    }
}