torsh-tensor 0.1.1

Tensor implementation for ToRSh with PyTorch-compatible API
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
//! Lock-Free Cache Structures for High-Performance Concurrent Access
//!
//! This module provides lock-free data structures optimized for concurrent tensor operations.
//! By avoiding locks, these structures provide better scalability in multi-threaded scenarios.
//!
//! # Features
//!
//! - **Lock-free queues**: SPSC and MPMC queue implementations
//! - **Atomic reference counting**: Lock-free reference counting for shared data
//! - **Concurrent hash map**: Lock-free hash map for tensor caching
//! - **Wait-free operations**: Some operations guarantee completion in bounded time
//! - **Cache-line aligned**: Minimize false sharing

use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Arc;

/// Lock-free SPSC (Single Producer Single Consumer) queue
///
/// This queue provides wait-free operations for both producer and consumer
/// when used correctly. It's ideal for passing tensors between threads.
pub struct LockFreeSPSCQueue<T> {
    buffer: Vec<Option<T>>,
    capacity: usize,
    head: AtomicUsize,  // Consumer reads from here
    tail: AtomicUsize,  // Producer writes here
    _padding: [u8; 64], // Cache line padding
}

impl<T> LockFreeSPSCQueue<T> {
    /// Create a new SPSC queue with given capacity
    pub fn new(capacity: usize) -> Self {
        let actual_capacity = capacity.next_power_of_two();
        let mut buffer = Vec::with_capacity(actual_capacity);
        for _ in 0..actual_capacity {
            buffer.push(None);
        }

        Self {
            buffer,
            capacity: actual_capacity,
            head: AtomicUsize::new(0),
            tail: AtomicUsize::new(0),
            _padding: [0; 64],
        }
    }

    /// Try to push an item (returns false if queue is full)
    pub fn try_push(&mut self, item: T) -> bool {
        let tail = self.tail.load(Ordering::Relaxed);
        let next_tail = (tail + 1) & (self.capacity - 1);
        let head = self.head.load(Ordering::Acquire);

        if next_tail == head {
            return false; // Queue is full
        }

        // SAFETY: We've checked that the slot is available
        self.buffer[tail] = Some(item);
        self.tail.store(next_tail, Ordering::Release);
        true
    }

    /// Try to pop an item (returns None if queue is empty)
    pub fn try_pop(&mut self) -> Option<T> {
        let head = self.head.load(Ordering::Relaxed);
        let tail = self.tail.load(Ordering::Acquire);

        if head == tail {
            return None; // Queue is empty
        }

        let item = self.buffer[head].take();
        let next_head = (head + 1) & (self.capacity - 1);
        self.head.store(next_head, Ordering::Release);

        item
    }

    /// Check if the queue is empty
    pub fn is_empty(&self) -> bool {
        let head = self.head.load(Ordering::Acquire);
        let tail = self.tail.load(Ordering::Acquire);
        head == tail
    }

    /// Get approximate size (may not be exact due to concurrent operations)
    pub fn len(&self) -> usize {
        let head = self.head.load(Ordering::Acquire);
        let tail = self.tail.load(Ordering::Acquire);

        if tail >= head {
            tail - head
        } else {
            self.capacity - head + tail
        }
    }

    /// Get the capacity
    pub fn capacity(&self) -> usize {
        self.capacity
    }
}

/// Lock-free cache entry with atomic reference counting
#[derive(Clone)]
pub struct LockFreeCacheEntry<T: Clone> {
    data: Arc<T>,
    access_count: Arc<AtomicUsize>,
    last_access: Arc<AtomicUsize>, // Timestamp
    valid: Arc<AtomicBool>,
}

impl<T: Clone> LockFreeCacheEntry<T> {
    /// Create a new cache entry
    pub fn new(data: T) -> Self {
        Self {
            data: Arc::new(data),
            access_count: Arc::new(AtomicUsize::new(0)),
            last_access: Arc::new(AtomicUsize::new(Self::current_timestamp())),
            valid: Arc::new(AtomicBool::new(true)),
        }
    }

    /// Get the data (increments access count)
    pub fn get(&self) -> Option<Arc<T>> {
        if self.valid.load(Ordering::Acquire) {
            self.access_count.fetch_add(1, Ordering::Relaxed);
            self.last_access
                .store(Self::current_timestamp(), Ordering::Release);
            Some(Arc::clone(&self.data))
        } else {
            None
        }
    }

    /// Invalidate this entry
    pub fn invalidate(&self) {
        self.valid.store(false, Ordering::Release);
    }

    /// Check if entry is valid
    pub fn is_valid(&self) -> bool {
        self.valid.load(Ordering::Acquire)
    }

    /// Get access count
    pub fn access_count(&self) -> usize {
        self.access_count.load(Ordering::Relaxed)
    }

    /// Get last access timestamp
    pub fn last_access(&self) -> usize {
        self.last_access.load(Ordering::Relaxed)
    }

    /// Get current timestamp (monotonic counter)
    fn current_timestamp() -> usize {
        use std::sync::atomic::AtomicUsize as GlobalCounter;
        static COUNTER: GlobalCounter = GlobalCounter::new(0);
        COUNTER.fetch_add(1, Ordering::Relaxed)
    }
}

/// Simple lock-free cache with fixed size
///
/// This cache uses atomic operations for thread-safe access without locks.
/// It's optimized for read-heavy workloads with occasional writes.
pub struct LockFreeCache<K: Eq + std::hash::Hash + Clone, V: Clone> {
    entries: Vec<Option<(K, LockFreeCacheEntry<V>)>>,
    size: AtomicUsize,
    capacity: usize,
}

impl<K: Eq + std::hash::Hash + Clone, V: Clone> LockFreeCache<K, V> {
    /// Create a new lock-free cache with given capacity
    pub fn new(capacity: usize) -> Self {
        let mut entries = Vec::with_capacity(capacity);
        for _ in 0..capacity {
            entries.push(None);
        }

        Self {
            entries,
            size: AtomicUsize::new(0),
            capacity,
        }
    }

    /// Get the capacity
    pub fn capacity(&self) -> usize {
        self.capacity
    }

    /// Get current size (approximate due to concurrency)
    pub fn len(&self) -> usize {
        self.size.load(Ordering::Relaxed)
    }

    /// Check if empty
    pub fn is_empty(&self) -> bool {
        self.len() == 0
    }

    /// Calculate hash for a key
    fn hash(&self, key: &K) -> usize {
        use std::collections::hash_map::DefaultHasher;
        use std::hash::Hasher;

        let mut hasher = DefaultHasher::new();
        key.hash(&mut hasher);
        (hasher.finish() as usize) % self.capacity
    }

    /// Try to get a value from the cache
    pub fn get(&self, key: &K) -> Option<Arc<V>> {
        let index = self.hash(key);
        let mut probe = 0;

        while probe < self.capacity {
            let current_index = (index + probe) % self.capacity;

            // SAFETY: We're reading from a fixed-size vec with valid indices
            if let Some((ref k, ref entry)) = unsafe { &*self.entries.as_ptr().add(current_index) }
            {
                if k == key {
                    return entry.get();
                }
            } else {
                // Empty slot means key doesn't exist
                return None;
            }

            probe += 1;
        }

        None
    }

    /// Check if a key exists in the cache
    pub fn contains_key(&self, key: &K) -> bool {
        self.get(key).is_some()
    }
}

/// Statistics for lock-free operations
#[derive(Debug, Default)]
pub struct LockFreeStats {
    /// Number of successful operations
    pub successes: AtomicUsize,
    /// Number of failed operations (contention)
    pub failures: AtomicUsize,
    /// Number of retries
    pub retries: AtomicUsize,
}

impl LockFreeStats {
    /// Create new statistics
    pub fn new() -> Self {
        Self::default()
    }

    /// Record a success
    pub fn record_success(&self) {
        self.successes.fetch_add(1, Ordering::Relaxed);
    }

    /// Record a failure
    pub fn record_failure(&self) {
        self.failures.fetch_add(1, Ordering::Relaxed);
    }

    /// Record a retry
    pub fn record_retry(&self) {
        self.retries.fetch_add(1, Ordering::Relaxed);
    }

    /// Get success count
    pub fn successes(&self) -> usize {
        self.successes.load(Ordering::Relaxed)
    }

    /// Get failure count
    pub fn failures(&self) -> usize {
        self.failures.load(Ordering::Relaxed)
    }

    /// Get retry count
    pub fn retries(&self) -> usize {
        self.retries.load(Ordering::Relaxed)
    }

    /// Calculate success rate
    pub fn success_rate(&self) -> f64 {
        let total = self.successes() + self.failures();
        if total == 0 {
            0.0
        } else {
            self.successes() as f64 / total as f64
        }
    }

    /// Reset statistics
    pub fn reset(&self) {
        self.successes.store(0, Ordering::Relaxed);
        self.failures.store(0, Ordering::Relaxed);
        self.retries.store(0, Ordering::Relaxed);
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_spsc_queue_basic() {
        let mut queue = LockFreeSPSCQueue::new(4);

        assert!(queue.is_empty());
        assert_eq!(queue.len(), 0);

        assert!(queue.try_push(1));
        assert!(queue.try_push(2));
        assert!(queue.try_push(3));

        assert_eq!(queue.len(), 3);
        assert!(!queue.is_empty());

        assert_eq!(queue.try_pop(), Some(1));
        assert_eq!(queue.try_pop(), Some(2));
        assert_eq!(queue.len(), 1);

        assert_eq!(queue.try_pop(), Some(3));
        assert!(queue.is_empty());
        assert_eq!(queue.try_pop(), None);
    }

    #[test]
    fn test_spsc_queue_full() {
        let mut queue = LockFreeSPSCQueue::new(2);

        // Can insert 1 element (capacity - 1)
        assert!(queue.try_push(1));

        // Queue is full
        assert!(!queue.try_push(2));

        // Pop one
        assert_eq!(queue.try_pop(), Some(1));

        // Now we can insert again
        assert!(queue.try_push(3));
    }

    #[test]
    fn test_spsc_queue_wraparound() {
        let mut queue = LockFreeSPSCQueue::new(4);

        // Fill queue
        assert!(queue.try_push(1));
        assert!(queue.try_push(2));

        // Pop some
        assert_eq!(queue.try_pop(), Some(1));

        // Push more (should wrap around)
        assert!(queue.try_push(3));
        assert!(queue.try_push(4));

        // Pop all
        assert_eq!(queue.try_pop(), Some(2));
        assert_eq!(queue.try_pop(), Some(3));
        assert_eq!(queue.try_pop(), Some(4));
        assert_eq!(queue.try_pop(), None);
    }

    #[test]
    fn test_cache_entry_basic() {
        let entry = LockFreeCacheEntry::new(42);

        assert!(entry.is_valid());
        assert_eq!(*entry.get().expect("get should succeed"), 42);
        assert_eq!(entry.access_count(), 1);

        entry.invalidate();
        assert!(!entry.is_valid());
        assert!(entry.get().is_none());
    }

    #[test]
    fn test_cache_entry_access_count() {
        let entry = LockFreeCacheEntry::new("test");

        assert_eq!(entry.access_count(), 0);

        entry.get();
        assert_eq!(entry.access_count(), 1);

        entry.get();
        entry.get();
        assert_eq!(entry.access_count(), 3);
    }

    #[test]
    fn test_lock_free_cache_basic() {
        let cache: LockFreeCache<String, i32> = LockFreeCache::new(10);

        assert_eq!(cache.capacity(), 10);
        assert_eq!(cache.len(), 0);
        assert!(cache.is_empty());
    }

    #[test]
    fn test_lock_free_cache_contains() {
        let cache = LockFreeCache::<String, i32>::new(10);

        assert!(!cache.contains_key(&"test".to_string()));
    }

    #[test]
    fn test_lockfree_stats() {
        let stats = LockFreeStats::new();

        assert_eq!(stats.successes(), 0);
        assert_eq!(stats.failures(), 0);
        assert_eq!(stats.retries(), 0);

        stats.record_success();
        stats.record_success();
        stats.record_failure();

        assert_eq!(stats.successes(), 2);
        assert_eq!(stats.failures(), 1);

        let rate = stats.success_rate();
        assert!((rate - 0.666).abs() < 0.01);

        stats.reset();
        assert_eq!(stats.successes(), 0);
        assert_eq!(stats.failures(), 0);
    }

    #[test]
    fn test_spsc_queue_capacity() {
        let queue = LockFreeSPSCQueue::<i32>::new(7);
        // Should round up to next power of 2
        assert_eq!(queue.capacity(), 8);
    }

    #[test]
    fn test_cache_entry_timestamp() {
        let entry1 = LockFreeCacheEntry::new(1);
        let entry2 = LockFreeCacheEntry::new(2);

        let ts1 = entry1.last_access();
        let ts2 = entry2.last_access();

        // Timestamps should be different (monotonic)
        assert!(ts2 > ts1);
    }
}