slop_alloc/buffer.rs
1//! Fixed-capacity buffer with customizable memory backends.
2//!
3//! This module provides a `Buffer<T, A>` type, which is a contiguous array type
4//! with heap-allocated contents. Unlike `Vec<T>`, buffers have a fixed capacity
5//! determined at creation time and cannot grow beyond this capacity.
6//!
7//! # Key Differences from `Vec<T>`
8//!
9//! - **Fixed Capacity**: Buffers cannot reallocate to grow beyond their initial capacity
10//! - **Backend Support**: Works with different memory allocators (CPU, GPU, etc.)
11//! - **CPU Backend Exception**: Only `Buffer<T, CpuBackend>` supports capacity growth through
12//! conversion to/from `Vec<T>`
13//!
14//! # Examples
15//!
16//! ```rust,ignore
17//! let mut buffer: Buffer<i32> = Buffer::with_capacity(10);
18//! // The buffer can hold up to 10 elements
19//! assert_eq!(buffer.len(), 0);
20//! assert_eq!(buffer.capacity(), 10);
21//!
22//! // For non-CPU backends, this is the maximum capacity
23//! // Attempting to exceed it will panic
24//! ```
25
26use serde::{Deserialize, Serialize, Serializer};
27use slop_algebra::{ExtensionField, Field};
28
29use crate::{
30 backend::{Backend, CpuBackend, GLOBAL_CPU_BACKEND},
31 mem::{CopyDirection, CopyError},
32 slice::Slice,
33 HasBackend, Init, RawBuffer, TryReserveError,
34};
35use std::{
36 alloc::Layout,
37 mem::{ManuallyDrop, MaybeUninit},
38 ops::{
39 Deref, DerefMut, Index, IndexMut, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo,
40 RangeToInclusive,
41 },
42};
43
44/// A fixed-capacity buffer with heap-allocated contents.
45///
46/// This type provides a contiguous array with a fixed maximum capacity. For most backends,
47/// the capacity is immutable after creation. Only `Buffer<T, CpuBackend>` can grow by
48/// converting to/from `Vec<T>` internally.
49///
50/// # Type Parameters
51///
52/// - `T`: The type of elements stored in the buffer
53/// - `A`: The backend allocator type (defaults to `CpuBackend`)
54///
55/// # Guarantees
56///
57/// - The memory it points to is allocated by the backend allocator `A`
58/// - `length` <= `capacity`
59/// - The first `length` values are properly initialized
60/// - The capacity remains fixed for the lifetime of the buffer (except for `CpuBackend`)
61#[derive(Debug)]
62#[repr(C)]
63pub struct Buffer<T, A: Backend = CpuBackend> {
64 buf: RawBuffer<T, A>,
65 len: usize,
66}
67
68unsafe impl<T, A: Backend> Send for Buffer<T, A> {}
69unsafe impl<T, A: Backend> Sync for Buffer<T, A> {}
70
71impl<T, A> Buffer<T, A>
72where
73 A: Backend,
74{
75 /// Constructs a new, empty `Buffer<T, A>` with the specified capacity
76 /// using the provided allocator.
77 ///
78 /// The buffer will be able to hold exactly `capacity` elements. For non-CPU
79 /// backends, this capacity is fixed and cannot be exceeded. Attempting to
80 /// add more elements than the capacity will result in a panic.
81 ///
82 /// # Panics
83 ///
84 /// Panics if the new capacity exceeds `isize::MAX` bytes.
85 ///
86 /// # Examples
87 ///
88 /// ```rust,ignore
89 /// use crate::{Buffer, CpuBackend, GLOBAL_CPU_BACKEND};
90 ///
91 /// let mut buffer = Buffer::with_capacity_in(10, GLOBAL_CPU_BACKEND);
92 ///
93 /// // The buffer contains no items, even though it has capacity for more
94 /// assert_eq!(buffer.len(), 0);
95 /// assert_eq!(buffer.capacity(), 10);
96 /// ```
97 #[inline]
98 #[must_use]
99 pub fn with_capacity_in(capacity: usize, allocator: A) -> Self {
100 let buf = RawBuffer::with_capacity_in(capacity, allocator);
101 Self { buf, len: 0 }
102 }
103
104 /// Tries to construct a new, empty `Buffer<T, A>` with the specified
105 /// capacity using the provided allocator.
106 ///
107 /// This is the fallible version of [`with_capacity_in`]. It returns an error
108 /// if the allocation fails instead of panicking.
109 ///
110 /// # Errors
111 ///
112 /// Returns `Err(TryReserveError)` if the allocator fails to allocate memory.
113 ///
114 /// # Examples
115 ///
116 /// ```rust,ignore
117 /// use crate::{Buffer, CpuBackend, GLOBAL_CPU_BACKEND};
118 ///
119 /// match Buffer::<i32, CpuBackend>::try_with_capacity_in(10, GLOBAL_CPU_BACKEND) {
120 /// Ok(buffer) => {
121 /// assert_eq!(buffer.len(), 0);
122 /// assert_eq!(buffer.capacity(), 10);
123 /// }
124 /// Err(e) => println!("Failed to allocate: {:?}", e),
125 /// }
126 /// ```
127 ///
128 /// [`with_capacity_in`]: Buffer::with_capacity_in
129 #[inline]
130 pub fn try_with_capacity_in(capacity: usize, allocator: A) -> Result<Self, TryReserveError> {
131 let buf = RawBuffer::try_with_capacity_in(capacity, allocator)?;
132 Ok(Self { buf, len: 0 })
133 }
134
135 /// Returns a new buffer from a pointer, length, and capacity.
136 ///
137 /// # Safety
138 ///
139 /// The pointer must be valid, it must have allocated memory in the size of
140 /// capacity * size_of<T>, and the first `len` elements of the buffer must be initialized or
141 /// about to be initialized in a foreign call.
142 pub unsafe fn from_raw_parts(ptr: *mut T, length: usize, capacity: usize, alloc: A) -> Self {
143 Self { buf: RawBuffer::from_raw_parts_in(ptr, capacity, alloc), len: length }
144 }
145
146 /// Returns the number of elements in the buffer, also referred to as its 'length'.
147 ///
148 /// # Examples
149 ///
150 /// ```rust,ignore
151 /// let buffer = buffer![1, 2, 3];
152 /// assert_eq!(buffer.len(), 3);
153 /// ```
154 #[inline]
155 pub fn len(&self) -> usize {
156 self.len
157 }
158
159 /// Returns the total number of elements the buffer can hold.
160 ///
161 /// For non-CPU backends, this is a fixed value that cannot change.
162 /// For CPU backends, this may increase if operations like `push` or
163 /// `extend` trigger internal reallocation through `Vec` conversion.
164 ///
165 /// # Examples
166 ///
167 /// ```rust,ignore
168 /// let buffer: Buffer<i32> = Buffer::with_capacity(10);
169 /// assert_eq!(buffer.capacity(), 10);
170 /// ```
171 #[inline]
172 pub fn capacity(&self) -> usize {
173 self.buf.capacity()
174 }
175
176 /// # Safety
177 ///
178 /// This function is unsafe because it enables bypassing the lifetime of the buffer.
179 #[inline]
180 pub unsafe fn owned_unchecked(&self) -> ManuallyDrop<Self> {
181 self.owned_unchecked_in(self.allocator().clone())
182 }
183
184 /// # Safety
185 ///
186 /// This function is unsafe because it enables bypassing the lifetime of the buffer.
187 #[inline]
188 pub unsafe fn owned_unchecked_in(&self, allocator: A) -> ManuallyDrop<Self> {
189 let ptr = self.as_ptr() as *mut T;
190 let len = self.len();
191 let cap = self.capacity();
192 ManuallyDrop::new(Self::from_raw_parts(ptr, len, cap, allocator))
193 }
194
195 /// Returns `true` if the buffer contains no elements.
196 ///
197 /// # Examples
198 ///
199 /// ```rust,ignore
200 /// let mut buffer = Buffer::with_capacity(10);
201 /// assert!(buffer.is_empty());
202 ///
203 /// buffer.push(1);
204 /// assert!(!buffer.is_empty());
205 /// ```
206 #[inline]
207 pub fn is_empty(&self) -> bool {
208 self.len == 0
209 }
210
211 /// Returns a raw pointer to the buffer's elements.
212 ///
213 /// The caller must ensure that the buffer outlives the pointer this function
214 /// returns, or else it will end up pointing to garbage. For CPU backends,
215 /// modifying the buffer may cause its buffer to be reallocated, which would
216 /// also make any pointers to it invalid.
217 ///
218 /// The pointer is valid for reads of up to `len() * size_of::<T>()` bytes.
219 ///
220 /// # Examples
221 ///
222 /// ```rust,ignore
223 /// let buffer = buffer![1, 2, 4];
224 /// let buffer_ptr = buffer.as_ptr();
225 ///
226 /// unsafe {
227 /// for i in 0..buffer.len() {
228 /// assert_eq!(*buffer_ptr.add(i), [1, 2, 4][i]);
229 /// }
230 /// }
231 /// ```
232 #[inline]
233 pub fn as_ptr(&self) -> *const T {
234 self.buf.ptr()
235 }
236
237 /// Returns an unsafe mutable pointer to the buffer's elements.
238 ///
239 /// The caller must ensure that the buffer outlives the pointer this function
240 /// returns, or else it will end up pointing to garbage. For CPU backends,
241 /// modifying the buffer may cause its buffer to be reallocated, which would
242 /// also make any pointers to it invalid.
243 ///
244 /// # Examples
245 ///
246 /// ```rust,ignore
247 /// let mut buffer = buffer![1, 2, 4];
248 /// let buffer_ptr = buffer.as_mut_ptr();
249 ///
250 /// unsafe {
251 /// for i in 0..buffer.len() {
252 /// *buffer_ptr.add(i) = i as i32;
253 /// }
254 /// }
255 ///
256 /// assert_eq!(&*buffer, &[0, 1, 2]);
257 /// ```
258 #[inline]
259 pub fn as_mut_ptr(&mut self) -> *mut T {
260 self.buf.ptr()
261 }
262
263 /// Forces the length of the buffer to `new_len`.
264 ///
265 /// This is a low-level operation that maintains none of the normal invariants
266 /// of the type. Normally changing the length of a buffer is done using one of
267 /// the safe operations instead, such as [`push`], [`pop`], [`extend_from_slice`],
268 /// or [`clear`].
269 ///
270 /// # Safety
271 ///
272 /// - `new_len` must be less than or equal to [`capacity()`].
273 /// - The elements at `old_len..new_len` must be initialized.
274 ///
275 /// # Examples
276 ///
277 /// This method can be useful for situations in which the buffer is serving as a
278 /// buffer for other code, particularly over FFI. As an example, if FFI code writes
279 /// values into the buffer, then this can be used to change the length of the buffer
280 /// to match the number of elements written.
281 ///
282 /// ```rust,ignore
283 /// let mut buffer = Buffer::with_capacity(3);
284 /// unsafe {
285 /// let ptr = buffer.as_mut_ptr();
286 /// // Overwrite memory with 3, 2, 1
287 /// ptr.write(3);
288 /// ptr.add(1).write(2);
289 /// ptr.add(2).write(1);
290 ///
291 /// // Set the length to 3 after writing
292 /// buffer.set_len(3);
293 /// }
294 /// assert_eq!(&*buffer, &[3, 2, 1]);
295 /// ```
296 ///
297 /// [`capacity()`]: Buffer::capacity
298 /// [`push`]: Buffer::push
299 /// [`pop`]: Buffer::pop
300 /// [`extend_from_slice`]: Buffer::extend_from_slice
301 /// [`clear`]: Buffer::clear
302 #[inline]
303 pub unsafe fn set_len(&mut self, new_len: usize) {
304 self.len = new_len;
305 }
306
307 /// Assumes that the entire capacity of the buffer is initialized.
308 ///
309 /// This sets the buffer's length to its capacity, effectively marking all
310 /// allocated memory as containing valid values of type `T`.
311 ///
312 /// # Safety
313 ///
314 /// The caller must ensure that all elements up to the buffer's capacity are
315 /// properly initialized before calling this method. Calling this on a buffer
316 /// with uninitialized memory will lead to undefined behavior when those
317 /// elements are accessed.
318 ///
319 /// This is particularly dangerous for types with drop implementations, as
320 /// dropping uninitialized memory can cause crashes or worse.
321 ///
322 /// # Examples
323 ///
324 /// ```rust,ignore
325 /// let mut buffer: Buffer<u8> = Buffer::with_capacity(4);
326 ///
327 /// unsafe {
328 /// // Initialize all 4 bytes
329 /// buffer.as_mut_ptr().write_bytes(0, 4);
330 ///
331 /// // Now we can safely assume all memory is initialized
332 /// buffer.assume_init();
333 /// }
334 ///
335 /// assert_eq!(buffer.len(), 4);
336 /// assert_eq!(&*buffer, &[0, 0, 0, 0]);
337 /// ```
338 #[inline]
339 pub unsafe fn assume_init(&mut self) {
340 let cap = self.capacity();
341 self.set_len(cap);
342 }
343
344 /// Copies all elements from `src` into `self`, using `copy_nonoverlapping`.
345 ///
346 /// The length of `src` must be the same as `self`. This method overwrites the
347 /// entire contents of the buffer.
348 ///
349 /// # Panics
350 ///
351 /// This function will panic if the two slices have different lengths.
352 ///
353 /// # Errors
354 ///
355 /// Returns `Err(CopyError)` if the allocator fails to perform the copy operation.
356 ///
357 /// # Safety
358 ///
359 /// This operation is potentially asynchronous. The caller must ensure the memory
360 /// of the source slice remains valid for the duration of the operation. For backends
361 /// that perform asynchronous operations (like GPU backends), the source memory must
362 /// not be freed or modified until the operation completes.
363 ///
364 /// # Examples
365 ///
366 /// ```rust,ignore
367 /// let mut buffer = Buffer::with_capacity(3);
368 /// unsafe {
369 /// buffer.set_len(3); // Must set length first
370 /// buffer.copy_from_host_slice(&[1, 2, 3]).unwrap();
371 /// }
372 /// assert_eq!(&*buffer, &[1, 2, 3]);
373 /// ```
374 #[track_caller]
375 pub unsafe fn copy_from_host_slice(&mut self, src: &[T]) -> Result<(), CopyError> {
376 // The panic code path was put into a cold function to not bloat the
377 // call site.
378 #[inline(never)]
379 #[cold]
380 #[track_caller]
381 fn len_mismatch_fail(dst_len: usize, src_len: usize) -> ! {
382 panic!(
383 "source slice length ({src_len}) does not match destination slice length ({dst_len})",
384 );
385 }
386
387 if self.len() != src.len() {
388 len_mismatch_fail(self.len(), src.len());
389 }
390
391 let layout = Layout::array::<T>(src.len()).unwrap();
392
393 unsafe {
394 self.buf.allocator().copy_nonoverlapping(
395 src.as_ptr() as *const u8,
396 self.buf.ptr() as *mut u8,
397 layout.size(),
398 CopyDirection::HostToDevice,
399 )
400 }
401 }
402
403 /// Returns a reference to the underlying allocator.
404 ///
405 /// # Examples
406 ///
407 /// ```rust,ignore
408 /// use crate::{Buffer, CpuBackend, GLOBAL_CPU_BACKEND};
409 ///
410 /// let buffer: Buffer<i32, CpuBackend> = Buffer::with_capacity(10);
411 /// let allocator = buffer.allocator();
412 /// // Can use the allocator reference for other operations
413 /// ```
414 #[inline]
415 pub fn allocator(&self) -> &A {
416 self.buf.allocator()
417 }
418
419 /// Returns a mutable reference to the underlying allocator.
420 ///
421 /// # Safety
422 ///
423 /// This method is unsafe because modifying the allocator while the buffer
424 /// is in use could lead to undefined behavior. The caller must ensure that
425 /// any modifications to the allocator do not invalidate the buffer's
426 /// existing allocations or violate any invariants.
427 #[inline]
428 pub unsafe fn allocator_mut(&mut self) -> &mut A {
429 self.buf.allocator_mut()
430 }
431
432 /// Appends all elements from a device slice into `self`.
433 ///
434 /// This extends the buffer by copying elements from another slice on the same device.
435 /// The operation uses `copy_nonoverlapping` and is typically more efficient than
436 /// host-to-device copies.
437 ///
438 /// # Panics
439 ///
440 /// This function will panic if the resulting length exceeds the buffer's capacity.
441 ///
442 /// # Errors
443 ///
444 /// Returns `Err(CopyError)` if the allocator fails to perform the copy operation.
445 ///
446 /// # Safety
447 ///
448 /// While this method is safe to call, the operation may be asynchronous depending
449 /// on the backend. The implementation ensures proper memory handling.
450 ///
451 /// # Examples
452 ///
453 /// ```rust,ignore
454 /// let mut buffer1 = buffer![1, 2, 3];
455 /// let mut buffer2 = Buffer::with_capacity(6);
456 ///
457 /// // Copy elements from buffer1 to buffer2
458 /// buffer2.extend_from_device_slice(&buffer1[..]).unwrap();
459 /// assert_eq!(buffer2.len(), 3);
460 /// ```
461 #[track_caller]
462 pub fn extend_from_device_slice(&mut self, src: &Slice<T, A>) -> Result<(), CopyError> {
463 // The panic code path was put into a cold function to not bloat the
464 // call site.
465 #[inline(never)]
466 #[cold]
467 #[track_caller]
468 fn capacity_fail(dst_len: usize, src_len: usize, cap: usize) -> ! {
469 panic!(
470 "source slice length ({src_len}) too long for buffer of length ({dst_len}) and capacity ({cap})"
471 );
472 }
473
474 if self.len() + src.len() > self.capacity() {
475 capacity_fail(self.len(), src.len(), self.capacity());
476 }
477
478 let layout = Layout::array::<T>(src.len()).unwrap();
479
480 unsafe {
481 self.buf.allocator().copy_nonoverlapping(
482 src.as_ptr() as *const u8,
483 self.buf.ptr().add(self.len()) as *mut u8,
484 layout.size(),
485 CopyDirection::DeviceToDevice,
486 )?;
487 }
488
489 // Extend the length of the buffer to include the new elements.
490 self.len += src.len();
491
492 Ok(())
493 }
494
495 /// Appends all elements from a host slice into `self`.
496 ///
497 /// This extends the buffer by copying elements from CPU memory. For non-CPU backends,
498 /// this involves a host-to-device transfer.
499 ///
500 /// # Panics
501 ///
502 /// This function will panic if the resulting length exceeds the buffer's capacity.
503 ///
504 /// # Errors
505 ///
506 /// Returns `Err(CopyError)` if the allocator fails to perform the copy operation.
507 ///
508 /// # Safety
509 ///
510 /// While this method is safe to call, the operation may be asynchronous depending
511 /// on the backend. The implementation ensures the source memory remains valid
512 /// during the operation.
513 ///
514 /// # Examples
515 ///
516 /// ```rust,ignore
517 /// let mut buffer = Buffer::with_capacity(5);
518 /// buffer.extend_from_host_slice(&[1, 2, 3]).unwrap();
519 /// assert_eq!(buffer.len(), 3);
520 ///
521 /// buffer.extend_from_host_slice(&[4, 5]).unwrap();
522 /// assert_eq!(buffer.len(), 5);
523 /// ```
524 #[track_caller]
525 pub fn extend_from_host_slice(&mut self, src: &[T]) -> Result<(), CopyError> {
526 // The panic code path was put into a cold function to not bloat the
527 // call site.
528 #[inline(never)]
529 #[cold]
530 #[track_caller]
531 fn capacity_fail(dst_len: usize, src_len: usize, cap: usize) -> ! {
532 panic!(
533 "source slice length ({src_len}) too long for buffer of length ({dst_len}) and capacity ({cap})"
534 );
535 }
536
537 if self.len() + src.len() > self.capacity() {
538 capacity_fail(self.len(), src.len(), self.capacity());
539 }
540
541 let layout = Layout::array::<T>(src.len()).unwrap();
542
543 unsafe {
544 self.buf.allocator().copy_nonoverlapping(
545 src.as_ptr() as *const u8,
546 self.buf.ptr().add(self.len()) as *mut u8,
547 layout.size(),
548 CopyDirection::HostToDevice,
549 )?;
550 }
551
552 // Extend the length of the buffer to include the new elements.
553 self.len += src.len();
554
555 Ok(())
556 }
557
558 /// Copies all elements from `self` into `dst`, using `copy_nonoverlapping`.
559 ///
560 /// The length of `dst` must be the same as `self`.
561 ///
562 /// **Note**: This function might be blocking.
563 ///
564 /// # Safety
565 ///
566 /// This operation is potentially asynchronous. The caller must insure the memory of the
567 /// destination is valid for the duration of the operation.
568 #[track_caller]
569 pub unsafe fn copy_into_host(&self, dst: &mut [MaybeUninit<T>]) -> Result<(), CopyError> {
570 // The panic code path was put into a cold function to not bloat the
571 // call site.
572 #[inline(never)]
573 #[cold]
574 #[track_caller]
575 fn len_mismatch_fail(dst_len: usize, src_len: usize) -> ! {
576 panic!(
577 "source slice length ({src_len}) does not match destination slice length ({dst_len})",
578 );
579 }
580
581 if self.len() != dst.len() {
582 len_mismatch_fail(dst.len(), self.len());
583 }
584
585 let layout = Layout::array::<T>(dst.len()).unwrap();
586
587 unsafe {
588 self.buf.allocator().copy_nonoverlapping(
589 self.buf.ptr() as *const u8,
590 dst.as_mut_ptr() as *mut u8,
591 layout.size(),
592 CopyDirection::DeviceToHost,
593 )
594 }
595 }
596
597 /// Copies all elements from `self` into a newely allocated [Vec<T>] and returns it.
598 ///
599 /// # Safety
600 /// See [Buffer::copy_into_host]
601 pub unsafe fn copy_into_host_vec(&self) -> Vec<T> {
602 let mut vec = Vec::with_capacity(self.len());
603 self.copy_into_host(vec.spare_capacity_mut()).unwrap();
604 unsafe {
605 vec.set_len(self.len());
606 }
607 vec
608 }
609
610 /// Copies all elements from `self` into a newely allocated [Vec<T>] and returns it.
611 ///
612 /// # Safety
613 /// See [Buffer::copy_into_host]
614 pub unsafe fn copy_into_host_buffer(&self) -> Buffer<T, CpuBackend> {
615 let vec = self.copy_into_host_vec();
616 Buffer::from(vec)
617 }
618
619 /// Sets `len` bytes of memory starting at the current length to `value`.
620 ///
621 /// This extends the buffer by `len` bytes, all set to `value`. The `len`
622 /// parameter must be a multiple of `size_of::<T>()` to ensure proper alignment.
623 ///
624 /// # Errors
625 ///
626 /// Returns `Err(CopyError)` if the backend allocator fails to perform the
627 /// memory operation.
628 ///
629 /// # Panics
630 ///
631 /// - Panics if `len` is not a multiple of `size_of::<T>()`
632 /// - Panics if extending by `len` bytes would exceed the buffer's capacity
633 ///
634 /// # Examples
635 ///
636 /// ```rust,ignore
637 /// let mut buffer: Buffer<u32> = Buffer::with_capacity(10);
638 ///
639 /// // Write 12 bytes (3 u32s) of value 0xFF
640 /// buffer.write_bytes(0xFF, 12).unwrap();
641 /// assert_eq!(buffer.len(), 3);
642 /// assert_eq!(*buffer[0], 0xFFFFFFFF);
643 /// ```
644 #[track_caller]
645 pub fn write_bytes(&mut self, value: u8, len: usize) -> Result<(), CopyError> {
646 // The panic code path was put into a cold function to not bloat the
647 // call site.
648 #[inline(never)]
649 #[cold]
650 #[track_caller]
651 fn capacity_fail(dst_len: usize, len: usize, cap: usize) -> ! {
652 panic!("Cannot write {len} bytes to buffer of length {dst_len} and capacity {cap}");
653 }
654
655 // The panic code path was put into a cold function to not bloat the
656 // call site.
657 #[inline(never)]
658 #[cold]
659 #[track_caller]
660 fn align_fail(len: usize, size: usize) -> ! {
661 panic!("Number of bytes ({len}) does not match the size of the type ({size})");
662 }
663
664 // Check that the number of bytes matches the size of the type.
665 if !len.is_multiple_of(std::mem::size_of::<T>()) {
666 align_fail(len, std::mem::size_of::<T>());
667 }
668
669 // Check that the buffer has enough capacity.
670 if self.len() * std::mem::size_of::<T>() + len > self.capacity() * std::mem::size_of::<T>()
671 {
672 capacity_fail(self.len(), len, self.capacity());
673 }
674
675 // Write the bytes to the buffer.
676 unsafe {
677 self.buf.allocator().write_bytes(
678 self.buf.ptr().add(self.len()) as *mut u8,
679 value,
680 len,
681 )?;
682 }
683
684 // Extend the length of the buffer to include the new elements.
685 self.len += len / std::mem::size_of::<T>();
686
687 Ok(())
688 }
689
690 /// Reinterprets the buffer's elements as base field elements.
691 ///
692 /// This method consumes the buffer and returns a new buffer where each
693 /// extension field element is reinterpreted as `D` base field elements,
694 /// where `D` is the degree of the extension.
695 ///
696 /// # Type Parameters
697 ///
698 /// - `E`: The base field type
699 /// - `T`: Must implement `ExtensionField<E>`
700 ///
701 /// # Examples
702 ///
703 /// ```rust,ignore
704 /// // If T is a degree-4 extension over E
705 /// let buffer: Buffer<ExtField> = buffer![ext1, ext2, ext3];
706 /// let base_buffer: Buffer<BaseField> = buffer.flatten_to_base();
707 /// assert_eq!(base_buffer.len(), 12); // 3 * 4 = 12
708 /// ```
709 pub fn flatten_to_base<E>(self) -> Buffer<E, A>
710 where
711 T: ExtensionField<E>,
712 E: Field,
713 {
714 let mut buffer = ManuallyDrop::new(self);
715 let (original_ptr, original_len, original_cap, allocator) =
716 (buffer.as_mut_ptr(), buffer.len(), buffer.capacity(), buffer.allocator().clone());
717 let ptr = original_ptr as *mut E;
718 let len = original_len * T::D;
719 let cap = original_cap * T::D;
720 unsafe { Buffer::from_raw_parts(ptr, len, cap, allocator) }
721 }
722
723 /// Reinterprets the buffer's base field elements as extension field elements.
724 ///
725 /// This method consumes the buffer and returns a new buffer where every `D`
726 /// base field elements are reinterpreted as one extension field element,
727 /// where `D` is the degree of the extension.
728 ///
729 /// # Type Parameters
730 ///
731 /// - `T`: The base field type
732 /// - `E`: Must implement `ExtensionField<T>`
733 ///
734 /// # Panics
735 ///
736 /// Panics if the buffer length is not divisible by the extension degree.
737 ///
738 /// # Examples
739 ///
740 /// ```rust,ignore
741 /// // If E is a degree-4 extension over T
742 /// let buffer: Buffer<BaseField> = buffer![b1, b2, b3, b4, b5, b6, b7, b8];
743 /// let ext_buffer: Buffer<ExtField> = buffer.into_extension();
744 /// assert_eq!(ext_buffer.len(), 2); // 8 / 4 = 2
745 /// ```
746 pub fn into_extension<E>(self) -> Buffer<E, A>
747 where
748 T: Field,
749 E: ExtensionField<T>,
750 {
751 let mut buffer = ManuallyDrop::new(self);
752 let (original_ptr, original_len, original_cap, allocator) =
753 (buffer.as_mut_ptr(), buffer.len(), buffer.capacity(), buffer.allocator().clone());
754 let ptr = original_ptr as *mut E;
755 let len = original_len.checked_div(E::D).unwrap();
756 let cap = original_cap.checked_div(E::D).unwrap();
757 unsafe { Buffer::from_raw_parts(ptr, len, cap, allocator) }
758 }
759}
760
761impl<T, A: Backend> HasBackend for Buffer<T, A> {
762 type Backend = A;
763
764 fn backend(&self) -> &Self::Backend {
765 self.buf.allocator()
766 }
767}
768
769impl<T> Buffer<T, CpuBackend> {
770 /// Constructs a new, empty `Buffer<T>` with at least the specified capacity.
771 ///
772 /// This is a convenience method that uses the global CPU backend allocator.
773 /// The buffer will be able to hold at least `capacity` elements without
774 /// reallocating. If `capacity` is 0, the buffer will not allocate.
775 ///
776 /// Note that for CPU backend buffers, the capacity can grow beyond the initial
777 /// value through operations like `push` or `extend_from_slice`.
778 ///
779 /// # Panics
780 ///
781 /// Panics if the new capacity exceeds `isize::MAX` bytes.
782 ///
783 /// # Examples
784 ///
785 /// ```rust,ignore
786 /// let mut buffer = Buffer::with_capacity(10);
787 /// assert!(buffer.capacity() >= 10);
788 /// ```
789 #[inline]
790 pub fn with_capacity(capacity: usize) -> Self {
791 Self::with_capacity_in(capacity, GLOBAL_CPU_BACKEND)
792 }
793
794 /// Appends an element to the back of the buffer.
795 ///
796 /// For CPU backend buffers, this may cause reallocation if the buffer is full.
797 /// The reallocation is handled by converting to/from `Vec<T>` internally.
798 ///
799 /// # Panics
800 ///
801 /// Panics if the new capacity exceeds `isize::MAX` bytes.
802 ///
803 /// # Examples
804 ///
805 /// ```rust,ignore
806 /// let mut buffer = Buffer::with_capacity(2);
807 /// buffer.push(3);
808 /// assert_eq!(&*buffer, &[3]);
809 ///
810 /// buffer.push(4);
811 /// assert_eq!(&*buffer, &[3, 4]);
812 ///
813 /// // This will trigger reallocation
814 /// buffer.push(5);
815 /// assert_eq!(&*buffer, &[3, 4, 5]);
816 /// assert!(buffer.capacity() >= 3);
817 /// ```
818 #[inline]
819 pub fn push(&mut self, value: T) {
820 let take_self = std::mem::take(self);
821 let mut vec = Vec::from(take_self);
822 vec.push(value);
823 *self = Self::from(vec);
824 }
825
826 /// Removes the last element from the buffer and returns it, or `None` if empty.
827 ///
828 /// # Examples
829 ///
830 /// ```rust,ignore
831 /// let mut buffer = buffer![1, 2, 3];
832 /// assert_eq!(buffer.pop(), Some(3));
833 /// assert_eq!(&*buffer, &[1, 2]);
834 /// assert_eq!(buffer.pop(), Some(2));
835 /// assert_eq!(buffer.pop(), Some(1));
836 /// assert_eq!(buffer.pop(), None);
837 /// ```
838 #[inline]
839 pub fn pop(&mut self) -> Option<T> {
840 if self.is_empty() {
841 return None;
842 }
843
844 // This is safe because we have just checked that the buffer is not empty.
845 unsafe {
846 let len = self.len();
847 let ptr = &mut self[len - 1] as *mut _ as *mut T;
848 let value = ptr.read();
849 self.set_len(len - 1);
850 Some(value)
851 }
852 }
853
854 /// Clears the buffer, removing all values.
855 ///
856 /// Note that this method has no effect on the allocated capacity of the buffer.
857 ///
858 /// # Examples
859 ///
860 /// ```rust,ignore
861 /// let mut buffer = buffer![1, 2, 3];
862 /// buffer.clear();
863 /// assert!(buffer.is_empty());
864 /// assert!(buffer.capacity() >= 3);
865 /// ```
866 #[inline]
867 pub fn clear(&mut self) {
868 let elems: *mut [T] = self.as_mut_slice();
869
870 // SAFETY:
871 // - `elems` comes directly from `as_mut_slice` and is therefore valid.
872 // - Setting `self.len` before calling `drop_in_place` means that, if an element's `Drop`
873 // impl panics, the vector's `Drop` impl will do nothing (leaking the rest of the
874 // elements) instead of dropping some twice.
875 unsafe {
876 self.len = 0;
877 std::ptr::drop_in_place(elems);
878 }
879 }
880
881 /// Resizes the buffer in-place so that `len` is equal to `new_len`.
882 ///
883 /// If `new_len` is greater than `len`, the buffer is extended by the
884 /// difference, with each additional slot filled with `value`.
885 /// If `new_len` is less than `len`, the buffer is simply truncated.
886 ///
887 /// This method may trigger reallocation if `new_len` exceeds the current capacity.
888 ///
889 /// # Examples
890 ///
891 /// ```rust,ignore
892 /// let mut buffer = buffer![1, 2, 3];
893 /// buffer.resize(5, 0);
894 /// assert_eq!(&*buffer, &[1, 2, 3, 0, 0]);
895 ///
896 /// buffer.resize(2, 0);
897 /// assert_eq!(&*buffer, &[1, 2]);
898 /// ```
899 #[inline]
900 pub fn resize(&mut self, new_len: usize, value: T)
901 where
902 T: Copy,
903 {
904 let owned_self = std::mem::take(self);
905 let mut vec = Vec::from(owned_self);
906 vec.resize(new_len, value);
907 *self = Self::from(vec);
908 }
909
910 /// Extends the buffer with the contents of the given slice.
911 ///
912 /// This is a specialized version for CPU backend that can trigger reallocation
913 /// if needed to accommodate the new elements.
914 ///
915 /// # Examples
916 ///
917 /// ```rust,ignore
918 /// let mut buffer = buffer![1, 2, 3];
919 /// buffer.extend_from_slice(&[4, 5, 6]);
920 /// assert_eq!(&*buffer, &[1, 2, 3, 4, 5, 6]);
921 /// ```
922 #[inline]
923 pub fn extend_from_slice(&mut self, slice: &[T]) {
924 // Check to see if capacity needs to be increased.
925 if self.len() + slice.len() > self.capacity() {
926 let additional_capacity = self.len() + slice.len() - self.capacity();
927 let owned_self = std::mem::take(self);
928 let mut vec = Vec::from(owned_self);
929 vec.reserve(vec.capacity() + additional_capacity);
930 *self = Self::from(vec);
931 assert!(self.capacity() >= self.len() + slice.len());
932 }
933
934 self.extend_from_host_slice(slice).unwrap()
935 }
936
937 /// Converts the buffer into a `Vec<T>`.
938 ///
939 /// This consumes the buffer and transfers ownership of its data to a standard
940 /// `Vec`. This is a zero-cost operation as the underlying memory layout is
941 /// compatible.
942 ///
943 /// # Examples
944 ///
945 /// ```rust,ignore
946 /// let buffer = buffer![1, 2, 3];
947 /// let vec = buffer.into_vec();
948 /// assert_eq!(vec, vec![1, 2, 3]);
949 /// ```
950 #[inline]
951 pub fn into_vec(self) -> Vec<T> {
952 self.into()
953 }
954
955 /// Returns a slice containing the entire buffer.
956 ///
957 /// Equivalent to `&buffer[..]`.
958 ///
959 /// # Examples
960 ///
961 /// ```rust,ignore
962 /// let buffer = buffer![1, 2, 3];
963 /// assert_eq!(buffer.as_slice(), &[1, 2, 3]);
964 /// ```
965 #[inline]
966 pub fn as_slice(&self) -> &[T] {
967 &self[..]
968 }
969
970 /// Returns a mutable slice containing the entire buffer.
971 ///
972 /// Equivalent to `&mut buffer[..]`.
973 ///
974 /// # Examples
975 ///
976 /// ```rust,ignore
977 /// let mut buffer = buffer![1, 2, 3];
978 /// buffer.as_mut_slice()[0] = 7;
979 /// assert_eq!(&*buffer, &[7, 2, 3]);
980 /// ```
981 #[inline]
982 pub fn as_mut_slice(&mut self) -> &mut [T] {
983 &mut self[..]
984 }
985
986 /// Returns the remaining spare capacity of the buffer as a slice of `MaybeUninit<T>`.
987 ///
988 /// The returned slice can be used to fill the buffer with data before marking
989 /// the data as initialized using [`set_len`].
990 ///
991 /// # Examples
992 ///
993 /// ```rust,ignore
994 /// let mut buffer = Buffer::with_capacity(10);
995 /// buffer.push(0);
996 /// buffer.push(1);
997 ///
998 /// let spare = buffer.spare_capacity_mut();
999 /// assert_eq!(spare.len(), 8);
1000 ///
1001 /// // Initialize the spare capacity
1002 /// for i in 0..4 {
1003 /// spare[i].write(i as i32 + 2);
1004 /// }
1005 ///
1006 /// unsafe {
1007 /// buffer.set_len(6);
1008 /// }
1009 /// assert_eq!(&*buffer, &[0, 1, 2, 3, 4, 5]);
1010 /// ```
1011 ///
1012 /// [`set_len`]: Buffer::set_len
1013 pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<T>] {
1014 let mut vec = ManuallyDrop::new(unsafe {
1015 Vec::from_raw_parts(self.as_mut_ptr(), self.len(), self.capacity())
1016 });
1017 let slice = vec.spare_capacity_mut();
1018 let len = slice.len();
1019 let ptr = slice.as_mut_ptr();
1020 unsafe { std::slice::from_raw_parts_mut(ptr, len) }
1021 }
1022
1023 /// Inserts an element at position `index`, shifting all elements after it to the right.
1024 ///
1025 /// This operation may trigger reallocation if the buffer is at capacity.
1026 ///
1027 /// # Panics
1028 ///
1029 /// Panics if `index > len`.
1030 ///
1031 /// # Examples
1032 ///
1033 /// ```rust,ignore
1034 /// let mut buffer = buffer![1, 2, 3];
1035 /// buffer.insert(1, 4);
1036 /// assert_eq!(&*buffer, &[1, 4, 2, 3]);
1037 /// buffer.insert(4, 5);
1038 /// assert_eq!(&*buffer, &[1, 4, 2, 3, 5]);
1039 /// ```
1040 #[inline]
1041 pub fn insert(&mut self, index: usize, value: T) {
1042 let take_self = std::mem::take(self);
1043 let mut vec = Vec::from(take_self);
1044 vec.insert(index, value);
1045 *self = Self::from(vec);
1046 }
1047}
1048
1049impl<T> From<Vec<T>> for Buffer<T, CpuBackend> {
1050 /// Creates a buffer from a `Vec<T>`.
1051 ///
1052 /// This is a zero-cost conversion that takes ownership of the vector's
1053 /// allocated memory.
1054 ///
1055 /// # Examples
1056 ///
1057 /// ```rust,ignore
1058 /// let vec = vec![1, 2, 3, 4];
1059 /// let buffer = Buffer::from(vec);
1060 /// assert_eq!(&*buffer, &[1, 2, 3, 4]);
1061 /// ```
1062 fn from(value: Vec<T>) -> Self {
1063 unsafe {
1064 let mut vec = ManuallyDrop::new(value);
1065 Buffer::from_raw_parts(vec.as_mut_ptr(), vec.len(), vec.capacity(), GLOBAL_CPU_BACKEND)
1066 }
1067 }
1068}
1069
1070impl<T> Default for Buffer<T, CpuBackend> {
1071 /// Creates an empty buffer.
1072 ///
1073 /// Equivalent to `Buffer::with_capacity(0)`.
1074 #[inline]
1075 fn default() -> Self {
1076 Self::with_capacity(0)
1077 }
1078}
1079
1080impl<T> From<Buffer<T, CpuBackend>> for Vec<T> {
1081 /// Converts a buffer into a `Vec<T>`.
1082 ///
1083 /// This is a zero-cost conversion that transfers ownership of the buffer's
1084 /// allocated memory to the vector.
1085 ///
1086 /// # Examples
1087 ///
1088 /// ```rust,ignore
1089 /// let buffer = buffer![1, 2, 3];
1090 /// let vec = Vec::from(buffer);
1091 /// assert_eq!(vec, vec![1, 2, 3]);
1092 /// ```
1093 fn from(value: Buffer<T, CpuBackend>) -> Self {
1094 let mut self_undropped = ManuallyDrop::new(value);
1095 unsafe {
1096 Vec::from_raw_parts(
1097 self_undropped.as_mut_ptr(),
1098 self_undropped.len(),
1099 self_undropped.capacity(),
1100 )
1101 }
1102 }
1103}
1104
1105impl<T> FromIterator<T> for Buffer<T, CpuBackend> {
1106 /// Creates a buffer from an iterator.
1107 ///
1108 /// # Examples
1109 ///
1110 /// ```rust,ignore
1111 /// let buffer: Buffer<_> = (0..5).collect();
1112 /// assert_eq!(&*buffer, &[0, 1, 2, 3, 4]);
1113 ///
1114 /// let buffer: Buffer<_> = vec![1, 2, 3].into_iter().collect();
1115 /// assert_eq!(&*buffer, &[1, 2, 3]);
1116 /// ```
1117 fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
1118 let vec: Vec<T> = iter.into_iter().collect();
1119 Self::from(vec)
1120 }
1121}
1122
1123/// Creates a [`Buffer`] containing the arguments.
1124///
1125/// `buffer!` allows creating buffers using the same syntax as the `vec!` macro.
1126/// It simply creates a `Vec` and converts it to a `Buffer`.
1127///
1128/// # Examples
1129///
1130/// ```rust,ignore
1131/// let buffer = buffer![1, 2, 3];
1132/// assert_eq!(&*buffer, &[1, 2, 3]);
1133///
1134/// let buffer = buffer![0; 5];
1135/// assert_eq!(&*buffer, &[0, 0, 0, 0, 0]);
1136/// ```
1137///
1138/// [`Buffer`]: crate::Buffer
1139#[macro_export]
1140macro_rules! buffer {
1141 ($($x:expr),*) => {
1142 $crate::Buffer::from(vec![$($x),*])
1143 };
1144}
1145
1146macro_rules! impl_index {
1147 ($($t:ty)*) => {
1148 $(
1149 impl<T, A: Backend> Index<$t> for Buffer<T, A>
1150 {
1151 type Output = Slice<T, A>;
1152
1153 fn index(&self, index: $t) -> &Slice<T, A> {
1154 unsafe {
1155 Slice::from_slice(
1156 std::slice::from_raw_parts(self.as_ptr(), self.len).index(index)
1157 )
1158 }
1159 }
1160 }
1161
1162 impl<T, A: Backend> IndexMut<$t> for Buffer<T, A>
1163 {
1164 fn index_mut(&mut self, index: $t) -> &mut Slice<T, A> {
1165 unsafe {
1166 Slice::from_slice_mut(
1167 std::slice::from_raw_parts_mut(self.as_mut_ptr(), self.len).index_mut(index)
1168 )
1169 }
1170 }
1171 }
1172 )*
1173 }
1174}
1175
1176impl_index! {
1177 Range<usize>
1178 RangeFull
1179 RangeFrom<usize>
1180 RangeInclusive<usize>
1181 RangeTo<usize>
1182 RangeToInclusive<usize>
1183}
1184
1185impl<T, A: Backend> Deref for Buffer<T, A> {
1186 type Target = Slice<T, A>;
1187
1188 fn deref(&self) -> &Self::Target {
1189 &self[..]
1190 }
1191}
1192
1193impl<T, A: Backend> DerefMut for Buffer<T, A> {
1194 fn deref_mut(&mut self) -> &mut Self::Target {
1195 &mut self[..]
1196 }
1197}
1198
1199impl<T, A: Backend> Index<usize> for Buffer<T, A> {
1200 type Output = Init<T, A>;
1201
1202 #[inline]
1203 fn index(&self, index: usize) -> &Self::Output {
1204 &self[..][index]
1205 }
1206}
1207
1208impl<T, A: Backend> IndexMut<usize> for Buffer<T, A> {
1209 #[inline]
1210 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1211 &mut self[..][index]
1212 }
1213}
1214
1215impl<T, A: Backend> Clone for Buffer<T, A> {
1216 /// Returns a copy of the buffer.
1217 ///
1218 /// This allocates a new buffer with the same capacity as `self` and copies
1219 /// all elements using the backend's `copy_nonoverlapping` operation.
1220 ///
1221 /// # Examples
1222 ///
1223 /// ```rust,ignore
1224 /// let buffer1 = buffer![1, 2, 3];
1225 /// let buffer2 = buffer1.clone();
1226 /// assert_eq!(&*buffer1, &*buffer2);
1227 /// ```
1228 #[inline]
1229 fn clone(&self) -> Self {
1230 let mut cloned = Self::with_capacity_in(self.len(), self.allocator().clone());
1231 let layout = Layout::array::<T>(self.len()).unwrap();
1232 unsafe {
1233 self.buf
1234 .allocator()
1235 .copy_nonoverlapping(
1236 self.as_ptr() as *const u8,
1237 cloned.as_mut_ptr() as *mut u8,
1238 layout.size(),
1239 CopyDirection::DeviceToDevice,
1240 )
1241 .unwrap();
1242 cloned.set_len(self.len());
1243 }
1244 cloned
1245 }
1246}
1247
1248impl<T: PartialEq> PartialEq for Buffer<T, CpuBackend> {
1249 /// Tests for equality between two buffers.
1250 ///
1251 /// Two buffers are considered equal if the underlying slices are equal, i.e. they have the same
1252 /// length and all corresponding elements are equal. Capacity is not considered.
1253 ///
1254 /// # Examples
1255 ///
1256 /// ```rust,ignore
1257 /// let buffer1 = buffer![1, 2, 3];
1258 /// let buffer2 = buffer![1, 2, 3];
1259 /// assert_eq!(buffer1, buffer2);
1260 ///
1261 /// let buffer3 = buffer![1, 2, 4];
1262 /// assert_ne!(buffer1, buffer3);
1263 /// ```
1264 fn eq(&self, other: &Self) -> bool {
1265 self[..] == other[..]
1266 }
1267}
1268
1269impl<T: Eq> Eq for Buffer<T, CpuBackend> {}
1270
1271impl<T: Serialize> Serialize for Buffer<T, CpuBackend> {
1272 fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
1273 self.as_slice().serialize(serializer)
1274 }
1275}
1276
1277impl<'de, T: Deserialize<'de>> Deserialize<'de> for Buffer<T, CpuBackend> {
1278 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
1279 where
1280 D: serde::Deserializer<'de>,
1281 {
1282 let vec: Vec<T> = Vec::deserialize(deserializer)?;
1283 Ok(Buffer::from(vec))
1284 }
1285}
1286
1287#[cfg(test)]
1288mod tests {
1289 use super::*;
1290
1291 #[test]
1292 fn test_buffer() {
1293 let mut buffer = Buffer::<u32>::with_capacity(10);
1294 assert_eq!(buffer.len(), 0);
1295 assert_eq!(buffer.capacity(), 10);
1296
1297 buffer.push(1);
1298 buffer.push(2);
1299 buffer.push(3);
1300 assert_eq!(buffer.len(), 3);
1301
1302 let as_slice: &[u32] = &buffer[..];
1303 assert_eq!(as_slice, &[1, 2, 3]);
1304
1305 let val = *buffer[0];
1306 assert_eq!(val, 1);
1307
1308 let val = *buffer[1];
1309 assert_eq!(val, 2);
1310
1311 let val = *buffer[2];
1312 assert_eq!(val, 3);
1313
1314 let value = buffer.pop().unwrap();
1315 assert_eq!(value, 3);
1316 assert_eq!(buffer.len(), 2);
1317
1318 buffer.extend_from_slice(&[4, 5, 6]);
1319 let host_vec = Vec::from(buffer);
1320 assert_eq!(host_vec, [1, 2, 4, 5, 6]);
1321
1322 // Test the host_buffer!() macro
1323 let buffer = buffer![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
1324 assert_eq!(buffer.len(), 10);
1325 assert_eq!(buffer.capacity(), 10);
1326 assert_eq!(*buffer[0], 1);
1327 assert_eq!(*buffer[1], 2);
1328 assert_eq!(*buffer[2], 3);
1329 assert_eq!(*buffer[3], 4);
1330 assert_eq!(*buffer[4], 5);
1331 assert_eq!(*buffer[5], 6);
1332 assert_eq!(*buffer[6], 7);
1333 assert_eq!(*buffer[7], 8);
1334 assert_eq!(*buffer[8], 9);
1335 assert_eq!(*buffer[9], 10);
1336
1337 let mut buffer = buffer![1, 2, 3, 4, 5, 6, 7, 8, 9];
1338 buffer.insert(0, 0);
1339 assert_eq!(buffer.len(), 10);
1340 assert_eq!(*buffer[0], 0);
1341 assert_eq!(*buffer[1], 1);
1342 assert_eq!(*buffer[2], 2);
1343 assert_eq!(*buffer[3], 3);
1344 assert_eq!(*buffer[4], 4);
1345 buffer.insert(4, 4);
1346 assert_eq!(buffer.len(), 11);
1347 assert_eq!(*buffer[4], 4);
1348 assert_eq!(*buffer[5], 4);
1349 }
1350}