pointer_vec/
lib.rs

1
2// | capacity | length | T...
3/// a Vec that has a size of pointer type
4pub struct PointerVec<T>(*mut T);
5
6
7impl<T> PointerVec<T>{
8    pub const fn new() -> Self{
9        Self(0 as _)
10    }
11
12    pub fn with_capacity(capacity: usize) -> Self{
13        unsafe{
14            let ptr = std::alloc::alloc(std::alloc::Layout::array::<u8>(std::mem::size_of::<[usize;2]>() + std::mem::size_of::<T>()*capacity).unwrap()) as *mut usize;
15            *ptr = capacity;
16            *ptr.add(1) = 0;
17            Self(ptr.add(2) as *mut T)
18        }
19    }
20
21    pub fn len(&self) -> usize{
22        if self.0.is_null(){
23            return 0
24        }
25
26        unsafe{*(self.0 as *mut usize).sub(1)}
27    }
28
29    pub fn capacity(&self) -> usize{
30        if self.0.is_null(){
31            return 0
32        }
33
34        unsafe{*(self.0 as *mut usize).sub(2)}
35    }
36
37    pub fn reserve(&mut self, additional: usize) {
38        if usize::MAX - additional > self.len(){
39            panic!("new capacity cannot exceed usize::Max")
40        }
41        while (self.capacity() - self.len()) < additional{
42            self.realloc();
43        }
44    }
45
46    pub fn reserve_exact(&mut self, additional: usize){
47        if usize::MAX - additional > self.len(){
48            panic!("new capacity cannot exceed usize::Max")
49        }
50        self.reserve(additional)
51    }
52
53    pub fn try_reserve(&mut self, additional: usize) -> Result<(), ()>{
54        if usize::MAX - additional > self.len(){
55            return Err(())
56        }
57        self.reserve(additional);
58        return Ok(())
59    }
60
61    pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), ()>{
62        if usize::MAX - additional > self.len(){
63            return Err(())
64        }
65        self.reserve_exact(additional);
66        return Ok(())
67    }
68
69    pub fn shrink_to_fit(&mut self){
70
71    }
72
73    pub fn shrink_to(&mut self, min_capacity: usize) {
74        if self.capacity() > min_capacity {
75            let _new_cap = self.len().max(min_capacity);
76
77        }
78    }
79
80    pub fn into_boxed_slice(mut self) -> Box<[T]> {
81        unsafe {
82            self.shrink_to_fit();
83            let ptr = std::slice::from_raw_parts_mut(self.0, self.len());
84            core::mem::forget(self);
85            Box::from_raw(ptr)
86        }
87    }
88
89    pub fn truncate(&mut self, len: usize) {
90        // This is safe because:
91        //
92        // * the slice passed to `drop_in_place` is valid; the `len > self.len`
93        //   case avoids creating an invalid slice, and
94        // * the `len` of the vector is shrunk before calling `drop_in_place`,
95        //   such that no value will be dropped twice in case `drop_in_place`
96        //   were to panic once (if it panics twice, the program aborts).
97        unsafe {
98            // Note: It's intentional that this is `>` and not `>=`.
99            //       Changing it to `>=` has negative performance
100            //       implications in some cases. See #78884 for more.
101            if len > self.len() {
102                return;
103            }
104            let remaining_len = self.len() - len;
105            let s = std::slice::from_raw_parts_mut(self.0.add(len), remaining_len);
106            *((self.0 as *mut usize).sub(1)) = len;
107            core::ptr::drop_in_place(s as *mut [T]);
108        }
109    }
110
111    pub fn as_slice(&self) -> &[T] {
112        self
113    }
114
115    pub fn as_mut_slice(&mut self) -> &mut [T]{
116        self
117    }
118
119    pub fn as_ptr(&self) -> *const T {
120        // We shadow the slice method of the same name to avoid going through
121        // `deref`, which creates an intermediate reference.
122        if self.0.is_null(){
123            return core::ptr::NonNull::dangling().as_ptr()
124        }
125        self.0
126    }
127
128    pub fn as_mut_ptr(&self) -> *mut T {
129        // We shadow the slice method of the same name to avoid going through
130        // `deref`, which creates an intermediate reference.
131        if self.0.is_null(){
132            return core::ptr::NonNull::dangling().as_ptr()
133        }
134        self.0
135    }
136
137    pub unsafe fn set_len(&mut self, new_len: usize) {
138        debug_assert!(new_len <= self.capacity());
139
140        *((self.0 as *mut usize).sub(1)) = new_len;
141    }
142
143    pub fn swap_remove(&mut self, index: usize) -> T {
144        #[cold]
145        #[inline(never)]
146        fn assert_failed(index: usize, len: usize) -> ! {
147            panic!("swap_remove index (is {index}) should be < len (is {len})");
148        }
149
150        let len = self.len();
151        if index >= len {
152            assert_failed(index, len);
153        }
154        unsafe {
155            // We replace self[index] with the last element. Note that if the
156            // bounds check above succeeds there must be a last element (which
157            // can be self[index] itself).
158            let value = core::ptr::read(self.as_ptr().add(index));
159            let base_ptr = self.as_mut_ptr();
160            core::ptr::copy(base_ptr.add(len - 1), base_ptr.add(index), 1);
161            self.set_len(len - 1);
162            value
163        }
164    }
165
166    pub fn insert(&mut self, index: usize, element: T) {
167        #[cold]
168        #[inline(never)]
169        fn assert_failed(index: usize, len: usize) -> ! {
170            panic!("insertion index (is {index}) should be <= len (is {len})");
171        }
172
173        let len = self.len();
174        if index > len {
175            assert_failed(index, len);
176        }
177
178        // space for the new element
179        if len == self.capacity() {
180            self.reserve(1);
181        }
182
183        unsafe {
184            // infallible
185            // The spot to put the new value
186            {
187                let p = self.as_mut_ptr().add(index);
188                // Shift everything over to make space. (Duplicating the
189                // `index`th element into two consecutive places.)
190                core::ptr::copy(p, p.offset(1), len - index);
191                // Write it in, overwriting the first copy of the `index`th
192                // element.
193                core::ptr::write(p, element);
194            }
195            self.set_len(len + 1);
196        }
197    }
198
199    pub fn remove(&mut self, index: usize) -> T {
200        #[cold]
201        #[inline(never)]
202        #[track_caller]
203        fn assert_failed(index: usize, len: usize) -> ! {
204            panic!("removal index (is {index}) should be < len (is {len})");
205        }
206
207        let len = self.len();
208        if index >= len {
209            assert_failed(index, len);
210        }
211        unsafe {
212            // infallible
213            let ret;
214            {
215                // the place we are taking from.
216                let ptr = self.as_mut_ptr().add(index);
217                // copy it out, unsafely having a copy of the value on
218                // the stack and in the vector at the same time.
219                ret = core::ptr::read(ptr);
220
221                // Shift everything down to fill in that spot.
222                core::ptr::copy(ptr.offset(1), ptr, len - index - 1);
223            }
224            self.set_len(len - 1);
225            ret
226        }
227    }
228
229    pub fn retain<F>(&mut self, mut f: F)
230    where
231        F: FnMut(&T) -> bool,
232    {
233        self.retain_mut(|elem| f(elem));
234    }
235
236    pub fn retain_mut<F>(&mut self, mut f: F)
237    where
238        F: FnMut(&mut T) -> bool,
239    {
240        let original_len = self.len();
241        // Avoid double drop if the drop guard is not executed,
242        // since we may make some holes during the process.
243        unsafe { self.set_len(0) };
244
245        // Vec: [Kept, Kept, Hole, Hole, Hole, Hole, Unchecked, Unchecked]
246        //      |<-              processed len   ->| ^- next to check
247        //                  |<-  deleted cnt     ->|
248        //      |<-              original_len                          ->|
249        // Kept: Elements which predicate returns true on.
250        // Hole: Moved or dropped element slot.
251        // Unchecked: Unchecked valid elements.
252        //
253        // This drop guard will be invoked when predicate or `drop` of element panicked.
254        // It shifts unchecked elements to cover holes and `set_len` to the correct length.
255        // In cases when predicate and `drop` never panick, it will be optimized out.
256        struct BackshiftOnDrop<'a, T> {
257            v: &'a mut PointerVec<T>,
258            processed_len: usize,
259            deleted_cnt: usize,
260            original_len: usize,
261        }
262
263        impl<T> Drop for BackshiftOnDrop<'_, T> {
264            fn drop(&mut self) {
265                if self.deleted_cnt > 0 {
266                    // SAFETY: Trailing unchecked items must be valid since we never touch them.
267                    unsafe {
268                        core::ptr::copy(
269                            self.v.as_ptr().add(self.processed_len),
270                            self.v.as_mut_ptr().add(self.processed_len - self.deleted_cnt),
271                            self.original_len - self.processed_len,
272                        );
273                    }
274                }
275                // SAFETY: After filling holes, all items are in contiguous memory.
276                unsafe {
277                    self.v.set_len(self.original_len - self.deleted_cnt);
278                }
279            }
280        }
281
282        let mut g = BackshiftOnDrop { v: self, processed_len: 0, deleted_cnt: 0, original_len };
283
284        fn process_loop<F, T, const DELETED: bool>(
285            original_len: usize,
286            f: &mut F,
287            g: &mut BackshiftOnDrop<'_, T>,
288        ) where
289            F: FnMut(&mut T) -> bool,
290        {
291            while g.processed_len != original_len {
292                // SAFETY: Unchecked element must be valid.
293                let cur = unsafe { &mut *g.v.as_mut_ptr().add(g.processed_len) };
294                if !f(cur) {
295                    // Advance early to avoid double drop if `drop_in_place` panicked.
296                    g.processed_len += 1;
297                    g.deleted_cnt += 1;
298                    // SAFETY: We never touch this element again after dropped.
299                    unsafe { core::ptr::drop_in_place(cur) };
300                    // We already advanced the counter.
301                    if DELETED {
302                        continue;
303                    } else {
304                        break;
305                    }
306                }
307                if DELETED {
308                    // SAFETY: `deleted_cnt` > 0, so the hole slot must not overlap with current element.
309                    // We use copy for move, and never touch this element again.
310                    unsafe {
311                        let hole_slot = g.v.as_mut_ptr().add(g.processed_len - g.deleted_cnt);
312                        core::ptr::copy_nonoverlapping(cur, hole_slot, 1);
313                    }
314                }
315                g.processed_len += 1;
316            }
317        }
318
319        // Stage 1: Nothing was deleted.
320        process_loop::<F, T, false>(original_len, &mut f, &mut g);
321
322        // Stage 2: Some elements were deleted.
323        process_loop::<F, T, true>(original_len, &mut f, &mut g);
324
325        // All item are processed. This can be optimized to `set_len` by LLVM.
326        drop(g);
327    }
328
329    pub fn dedup_by_key<F, K>(&mut self, mut key: F)
330    where
331        F: FnMut(&mut T) -> K,
332        K: PartialEq,
333    {
334        self.dedup_by(|a, b| key(a) == key(b))
335    }
336
337    pub fn dedup_by<F>(&mut self, mut same_bucket: F)
338    where
339        F: FnMut(&mut T, &mut T) -> bool,
340    {
341        let len = self.len();
342        if len <= 1 {
343            return;
344        }
345
346        /* INVARIANT: vec.len() > read >= write > write-1 >= 0 */
347        struct FillGapOnDrop<'a, T> {
348            /* Offset of the element we want to check if it is duplicate */
349            read: usize,
350
351            /* Offset of the place where we want to place the non-duplicate
352             * when we find it. */
353            write: usize,
354
355            /* The Vec that would need correction if `same_bucket` panicked */
356            vec: &'a mut PointerVec<T>,
357        }
358
359        impl<'a, T> Drop for FillGapOnDrop<'a, T> {
360            fn drop(&mut self) {
361                /* This code gets executed when `same_bucket` panics */
362
363                /* SAFETY: invariant guarantees that `read - write`
364                 * and `len - read` never overflow and that the copy is always
365                 * in-bounds. */
366                unsafe {
367                    let ptr = self.vec.as_mut_ptr();
368                    let len = self.vec.len();
369
370                    /* How many items were left when `same_bucket` panicked.
371                     * Basically vec[read..].len() */
372                    let items_left = len.wrapping_sub(self.read);
373
374                    /* Pointer to first item in vec[write..write+items_left] slice */
375                    let dropped_ptr = ptr.add(self.write);
376                    /* Pointer to first item in vec[read..] slice */
377                    let valid_ptr = ptr.add(self.read);
378
379                    /* Copy `vec[read..]` to `vec[write..write+items_left]`.
380                     * The slices can overlap, so `copy_nonoverlapping` cannot be used */
381                    core::ptr::copy(valid_ptr, dropped_ptr, items_left);
382
383                    /* How many items have been already dropped
384                     * Basically vec[read..write].len() */
385                    let dropped = self.read.wrapping_sub(self.write);
386
387                    self.vec.set_len(len - dropped);
388                }
389            }
390        }
391
392        let mut gap = FillGapOnDrop { read: 1, write: 1, vec: self };
393        let ptr = gap.vec.as_mut_ptr();
394
395        /* Drop items while going through Vec, it should be more efficient than
396         * doing slice partition_dedup + truncate */
397
398        /* SAFETY: Because of the invariant, read_ptr, prev_ptr and write_ptr
399         * are always in-bounds and read_ptr never aliases prev_ptr */
400        unsafe {
401            while gap.read < len {
402                let read_ptr = ptr.add(gap.read);
403                let prev_ptr = ptr.add(gap.write.wrapping_sub(1));
404
405                if same_bucket(&mut *read_ptr, &mut *prev_ptr) {
406                    // Increase `gap.read` now since the drop may panic.
407                    gap.read += 1;
408                    /* We have found duplicate, drop it in-place */
409                    core::ptr::drop_in_place(read_ptr);
410                } else {
411                    let write_ptr = ptr.add(gap.write);
412
413                    /* Because `read_ptr` can be equal to `write_ptr`, we either
414                     * have to use `copy` or conditional `copy_nonoverlapping`.
415                     * Looks like the first option is faster. */
416                    core::ptr::copy(read_ptr, write_ptr, 1);
417
418                    /* We have filled that place, so go further */
419                    gap.write += 1;
420                    gap.read += 1;
421                }
422            }
423
424            /* Technically we could let `gap` clean up with its Drop, but
425             * when `same_bucket` is guaranteed to not panic, this bloats a little
426             * the codegen, so we just do it manually */
427            gap.vec.set_len(gap.write);
428            core::mem::forget(gap);
429        }
430    }
431
432
433    fn realloc(&mut self){
434        unsafe{
435            if self.0.is_null(){
436                let ptr = std::alloc::alloc(std::alloc::Layout::array::<u8>(std::mem::size_of::<[usize;2]>() + std::mem::size_of::<T>()*24).unwrap()) as *mut usize;
437                *ptr = 24;
438                *ptr.add(1) = 0;
439                self.0 = ptr.add(2) as *mut T;
440            } else{
441                let l = self.len();
442                let cap = self.capacity();
443                let ptr = std::alloc::alloc(std::alloc::Layout::array::<u8>(
444                    std::mem::size_of::<[usize;2]>() + std::mem::size_of::<T>()*cap*2).unwrap()) as *mut usize;
445                *ptr = cap*2;
446                *ptr.add(1) = l;
447                self.0 = ptr.add(2) as *mut T;
448            }
449        }
450        
451    }
452
453    pub fn push(&mut self, value:T){
454        let l = self.len();
455        if l == self.capacity(){
456            self.realloc();
457        }
458        unsafe{*(self.0.add(l)) = value};
459    }
460
461    pub fn pop(&mut self) -> Option<T> {
462        let l = self.len();
463        if l == 0 {
464            None
465        } else {
466            unsafe {
467                self.set_len(l-1);
468                Some(core::ptr::read(self.as_ptr().add(self.len())))
469            }
470        }
471    }
472
473    pub fn append(&mut self, other: &mut Self) {
474        unsafe {
475            self.append_elements(other.as_slice() as _);
476            other.set_len(0);
477        }
478    }
479
480    unsafe fn append_elements(&mut self, other: *const [T]) {
481        let count = (*other).len() ;
482        self.reserve(count);
483        let len = self.len();
484        core::ptr::copy_nonoverlapping(other as *const T, self.as_mut_ptr().add(len), count) ;
485        self.set_len(len + count);
486    }
487
488    pub fn drain<R>(&mut self, range: R) -> std::vec::Drain<'_, T>
489    where
490        R: core::ops::RangeBounds<usize>,
491    {
492        // Memory safety
493        //
494        // When the Drain is first created, it shortens the length of
495        // the source vector to make sure no uninitialized or moved-from elements
496        // are accessible at all if the Drain's destructor never gets to run.
497        //
498        // Drain will ptr::read out the values to remove.
499        // When finished, remaining tail of the vec is copied back to cover
500        // the hole, and the vector length is restored to the new length.
501        //
502        let len = self.len();
503        let bounds = ..len;
504        let (start, end ) = {
505            let len = bounds.end;
506
507            let start: std::ops::Bound<&usize> = range.start_bound();
508            let start = match start {
509                std::ops::Bound::Included(&start) => start,
510                std::ops::Bound::Excluded(start) => {
511                    start.checked_add(1).unwrap_or_else(|| panic!("attempted to index slice from after maximum usize"))
512                }
513                std::ops::Bound::Unbounded => 0,
514            };
515
516            let end: std::ops::Bound<&usize> = range.end_bound();
517            let end = match end {
518                std::ops::Bound::Included(end) => {
519                    end.checked_add(1).unwrap_or_else(|| panic!("attempted to index slice up to maximum usize"))
520                },
521                std::ops::Bound::Excluded(&end) => end,
522                std::ops::Bound::Unbounded => len,
523            };
524
525            if start > end {
526                panic!("slice index start is larger than end");
527            }
528            if end > len {
529                panic!("slice start index is out of range for slice");
530            }
531
532            (start, end)
533        };
534
535        unsafe {
536            // set self.vec length's to start, to be safe in case Drain is leaked
537            self.set_len(start);
538            // Use the borrow in the IterMut to indicate borrowing behavior of the
539            // whole Drain iterator (like &mut T).
540            let _range_slice = std::slice::from_raw_parts_mut(self.as_mut_ptr().add(start), end - start);
541            todo!()
542        }
543    }
544
545    pub fn clear(&mut self) {
546        let elems: *mut [T] = self.as_mut_slice();
547
548        // SAFETY:
549        // - `elems` comes directly from `as_mut_slice` and is therefore valid.
550        // - Setting `self.len` before calling `drop_in_place` means that,
551        //   if an element's `Drop` impl panics, the vector's `Drop` impl will
552        //   do nothing (leaking the rest of the elements) instead of dropping
553        //   some twice.
554        unsafe {
555            self.set_len(0);
556            core::ptr::drop_in_place(elems);
557        }
558    }
559
560    pub fn is_empty(&self) -> bool {
561        self.len() == 0
562    }
563
564    pub fn split_off(&mut self, at: usize) -> Self
565    {
566        #[cold]
567        #[inline(never)]
568        fn assert_failed(at: usize, len: usize) -> ! {
569            panic!("`at` split index (is {at}) should be <= len (is {len})");
570        }
571
572        if at > self.len() {
573            assert_failed(at, self.len());
574        }
575
576        if at == 0 {
577            // the new vector can take over the original buffer and avoid the copy
578            return std::mem::replace(
579                self,
580                Self::with_capacity(self.capacity()),
581            );
582        }
583
584        let other_len = self.len() - at;
585        let mut other = Self::with_capacity(other_len);
586
587        // Unsafely `set_len` and copy items to `other`.
588        unsafe {
589            self.set_len(at);
590            other.set_len(other_len);
591
592            std::ptr::copy_nonoverlapping(self.as_ptr().add(at), other.as_mut_ptr(), other.len());
593        }
594        other
595    }
596
597    pub fn resize_with<F>(&mut self, new_len: usize, mut f: F)
598    where
599        F: FnMut() -> T,
600    {
601        let len = self.len();
602        if new_len > len {
603            let l = new_len - len;
604            self.reserve(l);
605            for _ in 0..l{
606                self.push((f)());
607            }
608        } else {
609            self.truncate(new_len);
610        }
611    }
612
613    pub fn leak<'a>(self) -> &'a mut [T]
614    {
615        let me = std::mem::ManuallyDrop::new(self);
616        unsafe { std::slice::from_raw_parts_mut(me.as_mut_ptr(), me.len()) }
617    }
618
619    pub fn spare_capacity_mut(&mut self) -> &mut [std::mem::MaybeUninit<T>] {
620        // Note:
621        // This method is not implemented in terms of `split_at_spare_mut`,
622        // to prevent invalidation of pointers to the buffer.
623        unsafe {
624            std::slice::from_raw_parts_mut(
625                self.as_mut_ptr().add(self.len()) as *mut std::mem::MaybeUninit<T>,
626                self.capacity() - self.len(),
627            )
628        }
629    }
630
631    pub fn split_at_spare_mut(&mut self) -> (&mut [T], &mut [std::mem::MaybeUninit<T>]) {
632        // SAFETY:
633        // - len is ignored and so never changed
634        let (init, spare, _) = unsafe { self.split_at_spare_mut_with_len() };
635        (init, spare)
636    }
637
638    unsafe fn split_at_spare_mut_with_len(
639        &mut self,
640    ) -> (&mut [T], &mut [std::mem::MaybeUninit<T>], &mut usize) {
641        let ptr = self.as_mut_ptr();
642        // SAFETY:
643        // - `ptr` is guaranteed to be valid for `self.len` elements
644        // - but the allocation extends out to `self.buf.capacity()` elements, possibly
645        // uninitialized
646        let spare_ptr = ptr.add(self.len()) ;
647        let spare_ptr = spare_ptr.cast::<std::mem::MaybeUninit<T>>();
648        let spare_len = self.capacity() - self.len();
649
650        // SAFETY:
651        // - `ptr` is guaranteed to be valid for `self.len` elements
652        // - `spare_ptr` is pointing one element past the buffer, so it doesn't overlap with `initialized`
653        let initialized = std::slice::from_raw_parts_mut(ptr, self.len());
654        let spare = std::slice::from_raw_parts_mut(spare_ptr, spare_len);
655
656        (initialized, spare, (self.0 as *mut usize).sub(1).as_mut().unwrap())
657        
658    }
659
660    
661}
662
663impl<T:Clone> PointerVec<T>{
664    pub fn resize(&mut self, new_len: usize, value: T){
665        let len = self.len();
666
667        if new_len > len {
668            let l = new_len - len;
669            self.reserve(l);
670            unsafe{
671                let p = self.as_mut_ptr().add(len);
672                for i in 0..l{
673                    p.add(i).write(value.clone())
674                }
675                self.set_len(new_len);
676            }
677            
678        } else {
679            self.truncate(new_len);
680        }
681    }
682
683    pub fn extend_from_slice(&mut self, other: &[T]){
684        self.reserve(other.len());
685        unsafe{
686            let l = self.len();
687            let mut i = 0;
688            for v in other{
689                self.0.add(l).add(i).write(v.clone());
690                i += 1;
691            }
692            self.set_len(l+other.len())
693        }
694        
695    }
696
697    pub fn extend_from_within<R>(&mut self, src: R)
698    where
699        R: std::ops::RangeBounds<usize>
700    {
701        let start = match src.start_bound(){
702            std::ops::Bound::Excluded(v) => *v,
703            std::ops::Bound::Included(v) => *v,
704            std::ops::Bound::Unbounded => 0
705        };
706        let end = match src.end_bound(){
707            std::ops::Bound::Excluded(v) => *v,
708            std::ops::Bound::Included(v) => *v +1,
709            std::ops::Bound::Unbounded => self.len()
710        };
711        let s = unsafe{std::slice::from_raw_parts(self.0, self.len())};
712        self.extend_from_slice(&s[start..end]);
713    }
714}
715
716impl<T> Drop for PointerVec<T>{
717    fn drop(&mut self) {
718        unsafe{
719            let s = self.as_mut_slice() as *mut [T];
720            std::ptr::drop_in_place(s);
721            std::alloc::dealloc(self.0 as *mut u8, 
722                std::alloc::Layout::array::<u8>(std::mem::size_of::<[usize;2]>() + std::mem::size_of::<T>()*self.capacity()).unwrap());
723        }
724    }
725}
726
727impl<T> std::ops::Deref for PointerVec<T> {
728    type Target = [T];
729
730    fn deref(&self) -> &[T] {
731        unsafe { std::slice::from_raw_parts(self.as_ptr(), self.len()) }
732    }
733}
734
735impl<T> std::ops::DerefMut for PointerVec<T> {
736
737    fn deref_mut(&mut self) -> &mut Self::Target {
738        unsafe { std::slice::from_raw_parts_mut(self.as_mut_ptr(), self.len()) }
739    }
740}
741
742impl<T: Clone> Clone for PointerVec<T> {
743    fn clone(&self) -> Self {
744        let mut a = Self::new();
745        a.reserve(self.capacity());
746        unsafe{
747            for i in 0..self.len(){
748                a.0.add(i).write(self[i].clone());
749            };
750            a.set_len(self.len());
751        };
752        return a
753    }
754}
755
756impl<T: std::hash::Hash> std::hash::Hash for PointerVec<T> {
757    #[inline]
758    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
759        std::hash::Hash::hash(&**self, state)
760    }
761}
762
763impl<T, I: std::slice::SliceIndex<[T]>> std::ops::Index<I> for PointerVec<T> {
764    type Output = I::Output;
765
766    #[inline]
767    fn index(&self, index: I) -> &Self::Output {
768        std::ops::Index::index(&**self, index)
769    }
770}
771
772impl<T, I: std::slice::SliceIndex<[T]>> std::ops::IndexMut<I> for PointerVec<T> {
773    #[inline]
774    fn index_mut(&mut self, index: I) -> &mut Self::Output {
775        std::ops::IndexMut::index_mut(&mut **self, index)
776    }
777}
778
779impl<A> std::iter::FromIterator<A> for PointerVec<A>{
780    fn from_iter<T: IntoIterator<Item = A>>(iter: T) -> Self {
781        let mut a = Self::new();
782        iter.into_iter().for_each(|v|{
783            a.push(v);
784        });
785        return a;
786    }
787}
788
789
790impl<'a, T> IntoIterator for &'a PointerVec<T>{
791    type Item = &'a T;
792    type IntoIter = std::slice::Iter<'a, T>;
793    fn into_iter(self) -> Self::IntoIter {
794        self.as_slice().into_iter()
795    }
796}
797
798impl<'a, T> IntoIterator for &'a mut PointerVec<T>{
799    type Item = &'a mut T;
800    type IntoIter = std::slice::IterMut<'a, T>;
801    fn into_iter(self) -> Self::IntoIter {
802        self.as_mut_slice().into_iter()
803    }
804}
805
806impl<T> IntoIterator for PointerVec<T>{
807    type IntoIter = Iter<T>;
808    type Item = T;
809
810    fn into_iter(self) -> Self::IntoIter {
811        let i = Iter{
812            v:self.0,
813            len:self.len(),
814            count:0
815        };
816        // prevent drop of values
817        std::mem::forget(self);
818        i
819    }
820}
821
822pub struct Iter<T>{
823    v:*mut T,
824    len:usize,
825    count:usize
826}
827
828impl<T> Iterator for Iter<T>{
829    type Item = T;
830
831    fn count(self) -> usize
832        where
833            Self: Sized, {
834        return self.len - self.count
835    }
836
837
838    fn next(&mut self) -> Option<Self::Item> {
839        if self.count >= self.len{
840            return None
841        }
842        unsafe{
843            let v = self.v.add(self.count).read();
844            self.count += 1;
845            return Some(v)
846        }
847    }
848
849    fn nth(&mut self, n: usize) -> Option<Self::Item> {
850        if n >= self.len - self.count{
851            return None
852        }
853
854        return Some(unsafe{self.v.add(self.count + n).read()})
855    }
856}
857
858impl<T> Drop for Iter<T>{
859    fn drop(&mut self) {
860        let mut v = PointerVec(self.v);
861        unsafe{v.set_len(0)};
862        drop(v);
863    }
864}