fil_rustacuda/memory/unified.rs
1use super::DeviceCopy;
2use crate::error::*;
3use crate::memory::malloc::{cuda_free_unified, cuda_malloc_unified};
4use crate::memory::UnifiedPointer;
5use std::borrow::{Borrow, BorrowMut};
6use std::cmp::Ordering;
7use std::convert::{AsMut, AsRef};
8use std::fmt::{self, Display, Pointer};
9use std::hash::{Hash, Hasher};
10use std::mem;
11use std::ops::{Deref, DerefMut};
12use std::ptr;
13use std::slice;
14
15/// A pointer type for heap-allocation in CUDA unified memory.
16///
17/// See the [`module-level documentation`](../memory/index.html) for more information on unified
18/// memory. Should behave equivalently to `std::boxed::Box`, except that the allocated memory can be
19/// seamlessly shared between host and device.
20#[derive(Debug)]
21pub struct UnifiedBox<T: DeviceCopy> {
22 ptr: UnifiedPointer<T>,
23}
24impl<T: DeviceCopy> UnifiedBox<T> {
25 /// Allocate unified memory and place val into it.
26 ///
27 /// This doesn't actually allocate if `T` is zero-sized.
28 ///
29 /// # Errors
30 ///
31 /// If a CUDA error occurs, returns that error.
32 ///
33 /// # Examples
34 ///
35 /// ```
36 /// # let _context = rustacuda::quick_init().unwrap();
37 /// use rustacuda::memory::*;
38 /// let five = UnifiedBox::new(5).unwrap();
39 /// ```
40 pub fn new(val: T) -> CudaResult<Self> {
41 if mem::size_of::<T>() == 0 {
42 Ok(UnifiedBox {
43 ptr: UnifiedPointer::null(),
44 })
45 } else {
46 let mut ubox = unsafe { UnifiedBox::uninitialized()? };
47 *ubox = val;
48 Ok(ubox)
49 }
50 }
51
52 /// Allocate unified memory without initializing it.
53 ///
54 /// This doesn't actually allocate if `T` is zero-sized.
55 ///
56 /// # Safety
57 ///
58 /// Since the backing memory is not initialized, this function is not safe. The caller must
59 /// ensure that the backing memory is set to a valid value before it is read, else undefined
60 /// behavior may occur.
61 ///
62 /// # Errors
63 ///
64 /// If a CUDA error occurs, returns that error.
65 ///
66 /// # Examples
67 ///
68 /// ```
69 /// # let _context = rustacuda::quick_init().unwrap();
70 /// use rustacuda::memory::*;
71 /// let mut five = unsafe{ UnifiedBox::uninitialized().unwrap() };
72 /// *five = 5u64;
73 /// ```
74 pub unsafe fn uninitialized() -> CudaResult<Self> {
75 if mem::size_of::<T>() == 0 {
76 Ok(UnifiedBox {
77 ptr: UnifiedPointer::null(),
78 })
79 } else {
80 let ptr = cuda_malloc_unified(1)?;
81 Ok(UnifiedBox { ptr })
82 }
83 }
84
85 /// Constructs a UnifiedBox from a raw pointer.
86 ///
87 /// After calling this function, the raw pointer and the memory it points to is owned by the
88 /// UnifiedBox. The UnifiedBox destructor will free the allocated memory, but will not call the destructor
89 /// of `T`. This function may accept any pointer produced by the `cuMemAllocManaged` CUDA API
90 /// call.
91 ///
92 /// # Safety
93 ///
94 /// This function is unsafe because improper use may lead to memory problems. For example, a
95 /// double free may occur if this function is called twice on the same pointer, or a segfault
96 /// may occur if the pointer is not one returned by the appropriate API call.
97 ///
98 /// # Examples
99 ///
100 /// ```
101 /// # let _context = rustacuda::quick_init().unwrap();
102 /// use rustacuda::memory::*;
103 /// let x = UnifiedBox::new(5).unwrap();
104 /// let ptr = UnifiedBox::into_unified(x).as_raw_mut();
105 /// let x = unsafe { UnifiedBox::from_raw(ptr) };
106 /// ```
107 pub unsafe fn from_raw(ptr: *mut T) -> Self {
108 UnifiedBox {
109 ptr: UnifiedPointer::wrap(ptr),
110 }
111 }
112
113 /// Constructs a UnifiedBox from a UnifiedPointer.
114 ///
115 /// After calling this function, the pointer and the memory it points to is owned by the
116 /// UnifiedBox. The UnifiedBox destructor will free the allocated memory, but will not call the destructor
117 /// of `T`. This function may accept any pointer produced by the `cuMemAllocManaged` CUDA API
118 /// call, such as one taken from `UnifiedBox::into_unified`.
119 ///
120 /// # Safety
121 ///
122 /// This function is unsafe because improper use may lead to memory problems. For example, a
123 /// double free may occur if this function is called twice on the same pointer, or a segfault
124 /// may occur if the pointer is not one returned by the appropriate API call.
125 ///
126 /// # Examples
127 ///
128 /// ```
129 /// # let _context = rustacuda::quick_init().unwrap();
130 /// use rustacuda::memory::*;
131 /// let x = UnifiedBox::new(5).unwrap();
132 /// let ptr = UnifiedBox::into_unified(x);
133 /// let x = unsafe { UnifiedBox::from_unified(ptr) };
134 /// ```
135 pub unsafe fn from_unified(ptr: UnifiedPointer<T>) -> Self {
136 UnifiedBox { ptr }
137 }
138
139 /// Consumes the UnifiedBox, returning the wrapped UnifiedPointer.
140 ///
141 /// After calling this function, the caller is responsible for the memory previously managed by
142 /// the UnifiedBox. In particular, the caller should properly destroy T and deallocate the memory.
143 /// The easiest way to do so is to create a new UnifiedBox using the `UnifiedBox::from_unified` function.
144 ///
145 /// Note: This is an associated function, which means that you have to all it as
146 /// `UnifiedBox::into_unified(b)` instead of `b.into_unified()` This is so that there is no conflict with
147 /// a method on the inner type.
148 ///
149 /// # Examples
150 ///
151 /// ```
152 /// # let _context = rustacuda::quick_init().unwrap();
153 /// use rustacuda::memory::*;
154 /// let x = UnifiedBox::new(5).unwrap();
155 /// let ptr = UnifiedBox::into_unified(x);
156 /// # unsafe { UnifiedBox::from_unified(ptr) };
157 /// ```
158 #[allow(clippy::wrong_self_convention)]
159 pub fn into_unified(mut b: UnifiedBox<T>) -> UnifiedPointer<T> {
160 let ptr = mem::replace(&mut b.ptr, UnifiedPointer::null());
161 mem::forget(b);
162 ptr
163 }
164
165 /// Returns the contained unified pointer without consuming the box.
166 ///
167 /// This is useful for passing the box to a kernel launch.
168 ///
169 /// # Examples
170 ///
171 /// ```
172 /// # let _context = rustacuda::quick_init().unwrap();
173 /// use rustacuda::memory::*;
174 /// let mut x = UnifiedBox::new(5).unwrap();
175 /// let ptr = x.as_unified_ptr();
176 /// println!("{:p}", ptr);
177 /// ```
178 pub fn as_unified_ptr(&mut self) -> UnifiedPointer<T> {
179 self.ptr
180 }
181
182 /// Consumes and leaks the UnifiedBox, returning a mutable reference, &'a mut T. Note that the type T
183 /// must outlive the chosen lifetime 'a. If the type has only static references, or none at all,
184 /// this may be chosen to be 'static.
185 ///
186 /// This is mainly useful for data that lives for the remainder of the program's life. Dropping
187 /// the returned reference will cause a memory leak. If this is not acceptable, the reference
188 /// should be wrapped with the UnifiedBox::from_raw function to produce a new UnifiedBox. This UnifiedBox can then
189 /// be dropped, which will properly destroy T and release the allocated memory.
190 ///
191 /// Note: This is an associated function, which means that you have to all it as
192 /// `UnifiedBox::leak(b)` instead of `b.leak()` This is so that there is no conflict with
193 /// a method on the inner type.
194 pub fn leak<'a>(b: UnifiedBox<T>) -> &'a mut T
195 where
196 T: 'a,
197 {
198 unsafe { &mut *UnifiedBox::into_unified(b).as_raw_mut() }
199 }
200
201 /// Destroy a `UnifiedBox`, returning an error.
202 ///
203 /// Deallocating unified memory can return errors from previous asynchronous work. This function
204 /// destroys the given box and returns the error and the un-destroyed box on failure.
205 ///
206 /// # Example
207 ///
208 /// ```
209 /// # let _context = rustacuda::quick_init().unwrap();
210 /// use rustacuda::memory::*;
211 /// let x = UnifiedBox::new(5).unwrap();
212 /// match UnifiedBox::drop(x) {
213 /// Ok(()) => println!("Successfully destroyed"),
214 /// Err((e, uni_box)) => {
215 /// println!("Failed to destroy box: {:?}", e);
216 /// // Do something with uni_box
217 /// },
218 /// }
219 /// ```
220 pub fn drop(mut uni_box: UnifiedBox<T>) -> DropResult<UnifiedBox<T>> {
221 if uni_box.ptr.is_null() {
222 return Ok(());
223 }
224
225 let ptr = mem::replace(&mut uni_box.ptr, UnifiedPointer::null());
226 unsafe {
227 match cuda_free_unified(ptr) {
228 Ok(()) => {
229 mem::forget(uni_box);
230 Ok(())
231 }
232 Err(e) => Err((e, UnifiedBox { ptr })),
233 }
234 }
235 }
236}
237impl<T: DeviceCopy> Drop for UnifiedBox<T> {
238 fn drop(&mut self) {
239 if !self.ptr.is_null() {
240 let ptr = mem::replace(&mut self.ptr, UnifiedPointer::null());
241 // No choice but to panic if this fails.
242 unsafe {
243 cuda_free_unified(ptr).expect("Failed to deallocate CUDA Unified memory.");
244 }
245 }
246 }
247}
248
249impl<T: DeviceCopy> Borrow<T> for UnifiedBox<T> {
250 fn borrow(&self) -> &T {
251 &**self
252 }
253}
254impl<T: DeviceCopy> BorrowMut<T> for UnifiedBox<T> {
255 fn borrow_mut(&mut self) -> &mut T {
256 &mut **self
257 }
258}
259impl<T: DeviceCopy> AsRef<T> for UnifiedBox<T> {
260 fn as_ref(&self) -> &T {
261 &**self
262 }
263}
264impl<T: DeviceCopy> AsMut<T> for UnifiedBox<T> {
265 fn as_mut(&mut self) -> &mut T {
266 &mut **self
267 }
268}
269impl<T: DeviceCopy> Deref for UnifiedBox<T> {
270 type Target = T;
271
272 fn deref(&self) -> &T {
273 unsafe { &*self.ptr.as_raw() }
274 }
275}
276impl<T: DeviceCopy> DerefMut for UnifiedBox<T> {
277 fn deref_mut(&mut self) -> &mut T {
278 unsafe { &mut *self.ptr.as_raw_mut() }
279 }
280}
281impl<T: Display + DeviceCopy> Display for UnifiedBox<T> {
282 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
283 fmt::Display::fmt(&**self, f)
284 }
285}
286impl<T: DeviceCopy> Pointer for UnifiedBox<T> {
287 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
288 fmt::Pointer::fmt(&self.ptr, f)
289 }
290}
291impl<T: DeviceCopy + PartialEq> PartialEq for UnifiedBox<T> {
292 fn eq(&self, other: &UnifiedBox<T>) -> bool {
293 PartialEq::eq(&**self, &**other)
294 }
295}
296impl<T: DeviceCopy + Eq> Eq for UnifiedBox<T> {}
297impl<T: DeviceCopy + PartialOrd> PartialOrd for UnifiedBox<T> {
298 fn partial_cmp(&self, other: &UnifiedBox<T>) -> Option<Ordering> {
299 PartialOrd::partial_cmp(&**self, &**other)
300 }
301 fn lt(&self, other: &UnifiedBox<T>) -> bool {
302 PartialOrd::lt(&**self, &**other)
303 }
304 fn le(&self, other: &UnifiedBox<T>) -> bool {
305 PartialOrd::le(&**self, &**other)
306 }
307 fn ge(&self, other: &UnifiedBox<T>) -> bool {
308 PartialOrd::ge(&**self, &**other)
309 }
310 fn gt(&self, other: &UnifiedBox<T>) -> bool {
311 PartialOrd::gt(&**self, &**other)
312 }
313}
314impl<T: DeviceCopy + Ord> Ord for UnifiedBox<T> {
315 fn cmp(&self, other: &UnifiedBox<T>) -> Ordering {
316 Ord::cmp(&**self, &**other)
317 }
318}
319impl<T: DeviceCopy + Hash> Hash for UnifiedBox<T> {
320 fn hash<H: Hasher>(&self, state: &mut H) {
321 (**self).hash(state);
322 }
323}
324
325/// Fixed-size buffer in unified memory.
326///
327/// See the [`module-level documentation`](../memory/index.html) for more details on unified memory.
328#[derive(Debug)]
329pub struct UnifiedBuffer<T: DeviceCopy> {
330 buf: UnifiedPointer<T>,
331 capacity: usize,
332}
333impl<T: DeviceCopy + Clone> UnifiedBuffer<T> {
334 /// Allocate a new unified buffer large enough to hold `size` `T`'s and initialized with
335 /// clones of `value`.
336 ///
337 /// # Errors
338 ///
339 /// If the allocation fails, returns the error from CUDA. If `size` is large enough that
340 /// `size * mem::sizeof::<T>()` overflows usize, then returns InvalidMemoryAllocation.
341 ///
342 /// # Examples
343 ///
344 /// ```
345 /// # let _context = rustacuda::quick_init().unwrap();
346 /// use rustacuda::memory::*;
347 /// let mut buffer = UnifiedBuffer::new(&0u64, 5).unwrap();
348 /// buffer[0] = 1;
349 /// ```
350 pub fn new(value: &T, size: usize) -> CudaResult<Self> {
351 unsafe {
352 let mut uninit = UnifiedBuffer::uninitialized(size)?;
353 for x in 0..size {
354 *uninit.get_unchecked_mut(x) = value.clone();
355 }
356 Ok(uninit)
357 }
358 }
359
360 /// Allocate a new unified buffer of the same size as `slice`, initialized with a clone of
361 /// the data in `slice`.
362 ///
363 /// # Errors
364 ///
365 /// If the allocation fails, returns the error from CUDA.
366 ///
367 /// # Examples
368 ///
369 /// ```
370 /// # let _context = rustacuda::quick_init().unwrap();
371 /// use rustacuda::memory::*;
372 /// let values = [0u64; 5];
373 /// let mut buffer = UnifiedBuffer::from_slice(&values).unwrap();
374 /// buffer[0] = 1;
375 /// ```
376 pub fn from_slice(slice: &[T]) -> CudaResult<Self> {
377 unsafe {
378 let mut uninit = UnifiedBuffer::uninitialized(slice.len())?;
379 for (i, x) in slice.iter().enumerate() {
380 *uninit.get_unchecked_mut(i) = x.clone();
381 }
382 Ok(uninit)
383 }
384 }
385}
386impl<T: DeviceCopy> UnifiedBuffer<T> {
387 /// Allocate a new unified buffer large enough to hold `size` `T`'s, but without
388 /// initializing the contents.
389 ///
390 /// # Errors
391 ///
392 /// If the allocation fails, returns the error from CUDA. If `size` is large enough that
393 /// `size * mem::sizeof::<T>()` overflows usize, then returns InvalidMemoryAllocation.
394 ///
395 /// # Safety
396 ///
397 /// The caller must ensure that the contents of the buffer are initialized before reading from
398 /// the buffer.
399 ///
400 /// # Examples
401 ///
402 /// ```
403 /// # let _context = rustacuda::quick_init().unwrap();
404 /// use rustacuda::memory::*;
405 /// let mut buffer = unsafe { UnifiedBuffer::uninitialized(5).unwrap() };
406 /// for i in buffer.iter_mut() {
407 /// *i = 0u64;
408 /// }
409 /// ```
410 pub unsafe fn uninitialized(size: usize) -> CudaResult<Self> {
411 let ptr = if size > 0 && mem::size_of::<T>() > 0 {
412 cuda_malloc_unified(size)?
413 } else {
414 UnifiedPointer::wrap(ptr::NonNull::dangling().as_ptr() as *mut T)
415 };
416 Ok(UnifiedBuffer {
417 buf: ptr,
418 capacity: size,
419 })
420 }
421
422 /// Extracts a slice containing the entire buffer.
423 ///
424 /// Equivalent to `&s[..]`.
425 ///
426 /// # Examples
427 ///
428 /// ```
429 /// # let _context = rustacuda::quick_init().unwrap();
430 /// use rustacuda::memory::*;
431 /// let buffer = UnifiedBuffer::new(&0u64, 5).unwrap();
432 /// let sum : u64 = buffer.as_slice().iter().sum();
433 /// ```
434 pub fn as_slice(&self) -> &[T] {
435 self
436 }
437
438 /// Extracts a mutable slice of the entire buffer.
439 ///
440 /// Equivalent to `&mut s[..]`.
441 ///
442 /// # Examples
443 ///
444 /// ```
445 /// # let _context = rustacuda::quick_init().unwrap();
446 /// use rustacuda::memory::*;
447 /// let mut buffer = UnifiedBuffer::new(&0u64, 5).unwrap();
448 /// for i in buffer.as_mut_slice() {
449 /// *i = 12u64;
450 /// }
451 /// ```
452 pub fn as_mut_slice(&mut self) -> &mut [T] {
453 self
454 }
455
456 /// Returns a `UnifiedPointer<T>` to the buffer.
457 ///
458 /// The caller must ensure that the buffer outlives the returned pointer, or it will end up
459 /// pointing to garbage.
460 ///
461 /// Modifying the buffer is guaranteed not to cause its buffer to be reallocated, so pointers
462 /// cannot be invalidated in that manner, but other types may be added in the future which can
463 /// reallocate.
464 pub fn as_unified_ptr(&mut self) -> UnifiedPointer<T> {
465 self.buf
466 }
467
468 /// Creates a `UnifiedBuffer<T>` directly from the raw components of another unified buffer.
469 ///
470 /// # Safety
471 ///
472 /// This is highly unsafe, due to the number of invariants that aren't
473 /// checked:
474 ///
475 /// * `ptr` needs to have been previously allocated via `UnifiedBuffer` or
476 /// [`cuda_malloc_unified`](fn.cuda_malloc_unified.html).
477 /// * `ptr`'s `T` needs to have the same size and alignment as it was allocated with.
478 /// * `capacity` needs to be the capacity that the pointer was allocated with.
479 ///
480 /// Violating these may cause problems like corrupting the CUDA driver's
481 /// internal data structures.
482 ///
483 /// The ownership of `ptr` is effectively transferred to the
484 /// `UnifiedBuffer<T>` which may then deallocate, reallocate or change the
485 /// contents of memory pointed to by the pointer at will. Ensure
486 /// that nothing else uses the pointer after calling this
487 /// function.
488 ///
489 /// # Examples
490 ///
491 /// ```
492 /// # let _context = rustacuda::quick_init().unwrap();
493 /// use std::mem;
494 /// use rustacuda::memory::*;
495 ///
496 /// let mut buffer = UnifiedBuffer::new(&0u64, 5).unwrap();
497 /// let ptr = buffer.as_unified_ptr();
498 /// let size = buffer.len();
499 ///
500 /// mem::forget(buffer);
501 ///
502 /// let buffer = unsafe { UnifiedBuffer::from_raw_parts(ptr, size) };
503 /// ```
504 pub unsafe fn from_raw_parts(ptr: UnifiedPointer<T>, capacity: usize) -> UnifiedBuffer<T> {
505 UnifiedBuffer { buf: ptr, capacity }
506 }
507
508 /// Destroy a `UnifiedBuffer`, returning an error.
509 ///
510 /// Deallocating unified memory can return errors from previous asynchronous work. This function
511 /// destroys the given buffer and returns the error and the un-destroyed buffer on failure.
512 ///
513 /// # Example
514 ///
515 /// ```
516 /// # let _context = rustacuda::quick_init().unwrap();
517 /// use rustacuda::memory::*;
518 /// let x = UnifiedBuffer::from_slice(&[10u32, 20, 30]).unwrap();
519 /// match UnifiedBuffer::drop(x) {
520 /// Ok(()) => println!("Successfully destroyed"),
521 /// Err((e, buf)) => {
522 /// println!("Failed to destroy buffer: {:?}", e);
523 /// // Do something with buf
524 /// },
525 /// }
526 /// ```
527 pub fn drop(mut uni_buf: UnifiedBuffer<T>) -> DropResult<UnifiedBuffer<T>> {
528 if uni_buf.buf.is_null() {
529 return Ok(());
530 }
531
532 if uni_buf.capacity > 0 && mem::size_of::<T>() > 0 {
533 let capacity = uni_buf.capacity;
534 let ptr = mem::replace(&mut uni_buf.buf, UnifiedPointer::null());
535 unsafe {
536 match cuda_free_unified(ptr) {
537 Ok(()) => {
538 mem::forget(uni_buf);
539 Ok(())
540 }
541 Err(e) => Err((e, UnifiedBuffer::from_raw_parts(ptr, capacity))),
542 }
543 }
544 } else {
545 Ok(())
546 }
547 }
548}
549
550impl<T: DeviceCopy> AsRef<[T]> for UnifiedBuffer<T> {
551 fn as_ref(&self) -> &[T] {
552 self
553 }
554}
555impl<T: DeviceCopy> AsMut<[T]> for UnifiedBuffer<T> {
556 fn as_mut(&mut self) -> &mut [T] {
557 self
558 }
559}
560impl<T: DeviceCopy> Deref for UnifiedBuffer<T> {
561 type Target = [T];
562
563 fn deref(&self) -> &[T] {
564 unsafe {
565 let p = self.buf.as_raw();
566 slice::from_raw_parts(p, self.capacity)
567 }
568 }
569}
570impl<T: DeviceCopy> DerefMut for UnifiedBuffer<T> {
571 fn deref_mut(&mut self) -> &mut [T] {
572 unsafe {
573 let ptr = self.buf.as_raw_mut();
574 slice::from_raw_parts_mut(ptr, self.capacity)
575 }
576 }
577}
578impl<T: DeviceCopy> Drop for UnifiedBuffer<T> {
579 fn drop(&mut self) {
580 if self.buf.is_null() {
581 return;
582 }
583
584 if self.capacity > 0 && mem::size_of::<T>() > 0 {
585 // No choice but to panic if this fails.
586 unsafe {
587 let ptr = mem::replace(&mut self.buf, UnifiedPointer::null());
588 cuda_free_unified(ptr).expect("Failed to deallocate CUDA unified memory.");
589 }
590 }
591 self.capacity = 0;
592 }
593}
594
595#[cfg(test)]
596mod test_unified_box {
597 use super::*;
598
599 #[derive(Clone, Debug)]
600 struct ZeroSizedType;
601 unsafe impl DeviceCopy for ZeroSizedType {}
602
603 #[test]
604 fn test_allocate_and_free() {
605 let _context = crate::quick_init().unwrap();
606 let mut x = UnifiedBox::new(5u64).unwrap();
607 *x = 10;
608 assert_eq!(10, *x);
609 drop(x);
610 }
611
612 #[test]
613 fn test_allocates_for_non_zst() {
614 let _context = crate::quick_init().unwrap();
615 let x = UnifiedBox::new(5u64).unwrap();
616 let ptr = UnifiedBox::into_unified(x);
617 assert!(!ptr.is_null());
618 let _ = unsafe { UnifiedBox::from_unified(ptr) };
619 }
620
621 #[test]
622 fn test_doesnt_allocate_for_zero_sized_type() {
623 let _context = crate::quick_init().unwrap();
624 let x = UnifiedBox::new(ZeroSizedType).unwrap();
625 let ptr = UnifiedBox::into_unified(x);
626 assert!(ptr.is_null());
627 let _ = unsafe { UnifiedBox::from_unified(ptr) };
628 }
629
630 #[test]
631 fn test_into_from_unified() {
632 let _context = crate::quick_init().unwrap();
633 let x = UnifiedBox::new(5u64).unwrap();
634 let ptr = UnifiedBox::into_unified(x);
635 let _ = unsafe { UnifiedBox::from_unified(ptr) };
636 }
637
638 #[test]
639 fn test_equality() {
640 let _context = crate::quick_init().unwrap();
641 let x = UnifiedBox::new(5u64).unwrap();
642 let y = UnifiedBox::new(5u64).unwrap();
643 let z = UnifiedBox::new(0u64).unwrap();
644 assert_eq!(x, y);
645 assert!(x != z);
646 }
647
648 #[test]
649 fn test_ordering() {
650 let _context = crate::quick_init().unwrap();
651 let x = UnifiedBox::new(1u64).unwrap();
652 let y = UnifiedBox::new(2u64).unwrap();
653
654 assert!(x < y);
655 }
656}
657#[cfg(test)]
658mod test_unified_buffer {
659 use super::*;
660 use std::mem;
661
662 #[derive(Clone, Debug)]
663 struct ZeroSizedType;
664 unsafe impl DeviceCopy for ZeroSizedType {}
665
666 #[test]
667 fn test_new() {
668 let _context = crate::quick_init().unwrap();
669 let val = 0u64;
670 let mut buffer = UnifiedBuffer::new(&val, 5).unwrap();
671 buffer[0] = 1;
672 }
673
674 #[test]
675 fn test_from_slice() {
676 let _context = crate::quick_init().unwrap();
677 let values = [0u64; 10];
678 let mut buffer = UnifiedBuffer::from_slice(&values).unwrap();
679 for i in buffer[0..3].iter_mut() {
680 *i = 10;
681 }
682 }
683
684 #[test]
685 fn from_raw_parts() {
686 let _context = crate::quick_init().unwrap();
687 let mut buffer = UnifiedBuffer::new(&0u64, 5).unwrap();
688 buffer[2] = 1;
689 let ptr = buffer.as_unified_ptr();
690 let len = buffer.len();
691 mem::forget(buffer);
692
693 let buffer = unsafe { UnifiedBuffer::from_raw_parts(ptr, len) };
694 assert_eq!(&[0u64, 0, 1, 0, 0], buffer.as_slice());
695 drop(buffer);
696 }
697
698 #[test]
699 fn zero_length_buffer() {
700 let _context = crate::quick_init().unwrap();
701 let buffer = UnifiedBuffer::new(&0u64, 0).unwrap();
702 drop(buffer);
703 }
704
705 #[test]
706 fn zero_size_type() {
707 let _context = crate::quick_init().unwrap();
708 let buffer = UnifiedBuffer::new(&ZeroSizedType, 10).unwrap();
709 drop(buffer);
710 }
711
712 #[test]
713 fn overflows_usize() {
714 let _context = crate::quick_init().unwrap();
715 let err = UnifiedBuffer::new(&0u64, ::std::usize::MAX - 1).unwrap_err();
716 assert_eq!(CudaError::InvalidMemoryAllocation, err);
717 }
718
719 #[test]
720 fn test_unified_pointer_implements_traits_safely() {
721 let _context = crate::quick_init().unwrap();
722 let mut x = UnifiedBox::new(5u64).unwrap();
723 let mut y = UnifiedBox::new(0u64).unwrap();
724
725 // If the impls dereference the pointer, this should segfault.
726 let _ = Ord::cmp(&x.as_unified_ptr(), &y.as_unified_ptr());
727 let _ = PartialOrd::partial_cmp(&x.as_unified_ptr(), &y.as_unified_ptr());
728 let _ = PartialEq::eq(&x.as_unified_ptr(), &y.as_unified_ptr());
729
730 let mut hasher = std::collections::hash_map::DefaultHasher::new();
731 std::hash::Hash::hash(&x.as_unified_ptr(), &mut hasher);
732
733 let _ = format!("{:?}", x.as_unified_ptr());
734 let _ = format!("{:p}", x.as_unified_ptr());
735 }
736}