Skip to main content

i_slint_core/
sharedvector.rs

1// Copyright © SixtyFPS GmbH <info@slint.dev>
2// SPDX-License-Identifier: GPL-3.0-only OR LicenseRef-Slint-Royalty-free-2.0 OR LicenseRef-Slint-Software-3.0
3
4//! module for the SharedVector and related things
5#![allow(unsafe_code)]
6#![warn(missing_docs)]
7use core::fmt::Debug;
8use core::mem::MaybeUninit;
9use core::ops::Deref;
10use core::ptr::NonNull;
11
12use portable_atomic as atomic;
13
14#[repr(C)]
15struct SharedVectorHeader {
16    refcount: atomic::AtomicIsize,
17    size: usize,
18    capacity: usize,
19}
20
21#[repr(C)]
22struct SharedVectorInner<T> {
23    header: SharedVectorHeader,
24    data: MaybeUninit<T>,
25}
26
27fn compute_inner_layout<T>(capacity: usize) -> core::alloc::Layout {
28    core::alloc::Layout::new::<SharedVectorHeader>()
29        .extend(core::alloc::Layout::array::<T>(capacity).unwrap())
30        .unwrap()
31        .0
32}
33
34unsafe fn drop_inner<T>(mut inner: NonNull<SharedVectorInner<T>>) {
35    unsafe {
36        debug_assert_eq!(inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed), 0);
37        let data_ptr = inner.as_mut().data.as_mut_ptr();
38        for x in 0..inner.as_ref().header.size {
39            core::ptr::drop_in_place(data_ptr.add(x));
40        }
41        alloc::alloc::dealloc(
42            inner.as_ptr() as *mut u8,
43            compute_inner_layout::<T>(inner.as_ref().header.capacity),
44        )
45    }
46}
47
48/// Allocate the memory for the SharedVector with the given capacity. Return the inner with size and refcount set to 1
49fn alloc_with_capacity<T>(capacity: usize) -> NonNull<SharedVectorInner<T>> {
50    let ptr = unsafe { ::alloc::alloc::alloc(compute_inner_layout::<T>(capacity)) };
51    assert!(!ptr.is_null(), "allocation of {capacity:?} bytes failed");
52    unsafe {
53        core::ptr::write(
54            ptr as *mut SharedVectorHeader,
55            SharedVectorHeader { refcount: 1.into(), size: 0, capacity },
56        );
57    }
58    NonNull::new(ptr).unwrap().cast()
59}
60
61/// Return a new capacity suitable for this vector
62/// Loosely based on alloc::raw_vec::RawVec::grow_amortized.
63fn capacity_for_grow(current_cap: usize, required_cap: usize, elem_size: usize) -> usize {
64    if current_cap >= required_cap {
65        return current_cap;
66    }
67    let cap = core::cmp::max(current_cap * 2, required_cap);
68    let min_non_zero_cap = if elem_size == 1 {
69        8
70    } else if elem_size <= 1024 {
71        4
72    } else {
73        1
74    };
75    core::cmp::max(min_non_zero_cap, cap)
76}
77
78#[repr(C)]
79/// SharedVector holds a reference-counted read-only copy of `[T]`.
80pub struct SharedVector<T> {
81    inner: NonNull<SharedVectorInner<T>>,
82}
83
84// Safety: We use atomic reference counting, and if T is Send and Sync, we can send the vector to another thread
85unsafe impl<T: Send + Sync> Send for SharedVector<T> {}
86// Safety: We use atomic reference counting, and if T is Send and Sync, we can access the vector from multiple threads
87unsafe impl<T: Send + Sync> Sync for SharedVector<T> {}
88
89impl<T> Drop for SharedVector<T> {
90    fn drop(&mut self) {
91        unsafe {
92            if self
93                .inner
94                .cast::<SharedVectorHeader>()
95                .as_ref()
96                .refcount
97                .load(atomic::Ordering::Relaxed)
98                < 0
99            {
100                return;
101            }
102            if self.inner.as_ref().header.refcount.fetch_sub(1, atomic::Ordering::SeqCst) == 1 {
103                drop_inner(self.inner)
104            }
105        }
106    }
107}
108
109impl<T> Clone for SharedVector<T> {
110    fn clone(&self) -> Self {
111        unsafe {
112            if self
113                .inner
114                .cast::<SharedVectorHeader>()
115                .as_ref()
116                .refcount
117                .load(atomic::Ordering::Relaxed)
118                > 0
119            {
120                self.inner.as_ref().header.refcount.fetch_add(1, atomic::Ordering::SeqCst);
121            }
122            SharedVector { inner: self.inner }
123        }
124    }
125}
126
127impl<T> SharedVector<T> {
128    /// Create a new empty array with a pre-allocated capacity in number of items
129    pub fn with_capacity(capacity: usize) -> Self {
130        Self { inner: alloc_with_capacity(capacity) }
131    }
132
133    fn as_ptr(&self) -> *const T {
134        unsafe { self.inner.as_ref().data.as_ptr() }
135    }
136
137    /// Number of elements in the array
138    pub fn len(&self) -> usize {
139        unsafe { self.inner.cast::<SharedVectorHeader>().as_ref().size }
140    }
141
142    /// Return true if the SharedVector is empty
143    pub fn is_empty(&self) -> bool {
144        self.len() == 0
145    }
146
147    /// Return a slice to the array
148    pub fn as_slice(&self) -> &[T] {
149        if self.is_empty() {
150            &[]
151        } else {
152            // Safety: When len > 0, we know that the pointer holds an array of the size of len
153            unsafe { core::slice::from_raw_parts(self.as_ptr(), self.len()) }
154        }
155    }
156
157    /// Returns the number of elements the vector can hold without reallocating, when not shared
158    fn capacity(&self) -> usize {
159        unsafe { self.inner.cast::<SharedVectorHeader>().as_ref().capacity }
160    }
161}
162
163impl<T: Clone> SharedVector<T> {
164    /// Create a SharedVector from a slice
165    pub fn from_slice(slice: &[T]) -> SharedVector<T> {
166        Self::from(slice)
167    }
168
169    /// Ensure that the reference count is 1 so the array can be changed.
170    /// If that's not the case, the array will be cloned
171    fn detach(&mut self, new_capacity: usize) {
172        let is_shared =
173            unsafe { self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) } != 1;
174        if !is_shared && new_capacity <= self.capacity() {
175            return;
176        }
177        let mut new_array = SharedVector::with_capacity(new_capacity);
178        core::mem::swap(&mut self.inner, &mut new_array.inner);
179        let mut size = 0;
180        for x in new_array.into_iter() {
181            assert_ne!(size, new_capacity);
182            unsafe {
183                core::ptr::write(self.inner.as_mut().data.as_mut_ptr().add(size), x);
184                size += 1;
185                self.inner.as_mut().header.size = size;
186            }
187            if size == new_capacity {
188                break;
189            }
190        }
191    }
192
193    /// Return a mutable slice to the array. If the array was shared, this will make a copy of the array.
194    pub fn make_mut_slice(&mut self) -> &mut [T] {
195        self.detach(self.len());
196        unsafe { core::slice::from_raw_parts_mut(self.as_ptr() as *mut T, self.len()) }
197    }
198
199    /// Add an element to the array. If the array was shared, this will make a copy of the array.
200    pub fn push(&mut self, value: T) {
201        self.detach(capacity_for_grow(self.capacity(), self.len() + 1, core::mem::size_of::<T>()));
202        unsafe {
203            core::ptr::write(
204                self.inner.as_mut().data.as_mut_ptr().add(self.inner.as_mut().header.size),
205                value,
206            );
207            self.inner.as_mut().header.size += 1;
208        }
209    }
210
211    /// Removes last element from the array and returns it.
212    /// If the array was shared, this will make a copy of the array.
213    pub fn pop(&mut self) -> Option<T> {
214        if self.is_empty() {
215            None
216        } else {
217            self.detach(self.len());
218            unsafe {
219                self.inner.as_mut().header.size -= 1;
220                Some(core::ptr::read(self.inner.as_mut().data.as_mut_ptr().add(self.len())))
221            }
222        }
223    }
224
225    /// Resize the array to the given size.
226    /// If the array was smaller new elements will be initialized with the value.
227    /// If the array was bigger, extra elements will be discarded
228    ///
229    /// ```
230    /// use i_slint_core::SharedVector;
231    /// let mut shared_vector = SharedVector::<u32>::from_slice(&[1, 2, 3]);
232    /// shared_vector.resize(5, 8);
233    /// assert_eq!(shared_vector.as_slice(), &[1, 2, 3, 8, 8]);
234    /// shared_vector.resize(2, 0);
235    /// assert_eq!(shared_vector.as_slice(), &[1, 2]);
236    /// ```
237    pub fn resize(&mut self, new_len: usize, value: T) {
238        if self.len() == new_len {
239            return;
240        }
241        self.detach(new_len);
242        // Safety: detach ensured that the array is not shared.
243        let inner = unsafe { self.inner.as_mut() };
244
245        if inner.header.size >= new_len {
246            self.shrink(new_len);
247        } else {
248            while inner.header.size < new_len {
249                // Safety: The array must have a capacity of at least new_len because of the detach call earlier
250                unsafe {
251                    core::ptr::write(inner.data.as_mut_ptr().add(inner.header.size), value.clone());
252                }
253                inner.header.size += 1;
254            }
255        }
256    }
257
258    fn shrink(&mut self, new_len: usize) {
259        if self.len() == new_len {
260            return;
261        }
262
263        assert!(
264            unsafe { self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) } == 1
265        );
266        // Safety: caller (and above debug_assert) must ensure that the array is not shared.
267        let inner = unsafe { self.inner.as_mut() };
268
269        while inner.header.size > new_len {
270            inner.header.size -= 1;
271            // Safety: The array was of size inner.header.size, so there should be an element there
272            unsafe {
273                core::ptr::drop_in_place(inner.data.as_mut_ptr().add(inner.header.size));
274            }
275        }
276    }
277
278    /// Clears the vector and removes all elements.
279    pub fn clear(&mut self) {
280        let is_shared =
281            unsafe { self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) } != 1;
282        if is_shared {
283            *self = SharedVector::default();
284        } else {
285            self.shrink(0)
286        }
287    }
288
289    /// Reserves capacity for at least `additional` bytes more than the current vector's length.
290    pub fn reserve(&mut self, additional: usize) {
291        self.detach((self.len() + additional).max(self.capacity()))
292    }
293}
294
295impl<T> Deref for SharedVector<T> {
296    type Target = [T];
297    fn deref(&self) -> &Self::Target {
298        self.as_slice()
299    }
300}
301
302/* FIXME: is this a good idea to implement DerefMut knowing what it might detach?
303impl<T> DerefMut for SharedVector<T> {
304    fn deref_mut(&mut self) -> &mut Self::Target {
305        self.as_mut_slice()
306    }
307}*/
308
309impl<T: Clone> From<&[T]> for SharedVector<T> {
310    fn from(slice: &[T]) -> Self {
311        let capacity = slice.len();
312        let mut result = Self::with_capacity(capacity);
313        for x in slice {
314            unsafe {
315                core::ptr::write(
316                    result.inner.as_mut().data.as_mut_ptr().add(result.inner.as_mut().header.size),
317                    x.clone(),
318                );
319                result.inner.as_mut().header.size += 1;
320            }
321        }
322        result
323    }
324}
325
326impl<T, const N: usize> From<[T; N]> for SharedVector<T> {
327    fn from(array: [T; N]) -> Self {
328        array.into_iter().collect()
329    }
330}
331
332impl<T> FromIterator<T> for SharedVector<T> {
333    fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
334        let mut iter = iter.into_iter();
335        let mut capacity = iter.size_hint().0;
336        let mut result = Self::with_capacity(capacity);
337        let mut size = 0;
338        while let Some(x) = iter.next() {
339            if size >= capacity {
340                capacity = capacity_for_grow(
341                    capacity,
342                    size + 1 + iter.size_hint().0,
343                    core::mem::size_of::<T>(),
344                );
345                unsafe {
346                    result.inner.as_ref().header.refcount.store(0, atomic::Ordering::Relaxed)
347                };
348                let mut iter = IntoIter(IntoIterInner::UnShared(result.inner, 0));
349                result.inner = alloc_with_capacity::<T>(capacity);
350                match &mut iter.0 {
351                    IntoIterInner::UnShared(old_inner, begin) => {
352                        while *begin < size {
353                            unsafe {
354                                core::ptr::write(
355                                    result.inner.as_mut().data.as_mut_ptr().add(*begin),
356                                    core::ptr::read(old_inner.as_ref().data.as_ptr().add(*begin)),
357                                );
358                                *begin += 1;
359                                result.inner.as_mut().header.size = *begin;
360                            }
361                        }
362                    }
363                    _ => unreachable!(),
364                }
365            }
366            debug_assert_eq!(result.len(), size);
367            debug_assert!(result.capacity() > size);
368            unsafe {
369                core::ptr::write(result.inner.as_mut().data.as_mut_ptr().add(size), x);
370                size += 1;
371                result.inner.as_mut().header.size = size;
372            }
373        }
374        result
375    }
376}
377
378impl<T: Clone> Extend<T> for SharedVector<T> {
379    fn extend<X: IntoIterator<Item = T>>(&mut self, iter: X) {
380        let iter = iter.into_iter();
381        let hint = iter.size_hint().0;
382        if hint > 0 {
383            self.detach(capacity_for_grow(
384                self.capacity(),
385                self.len() + hint,
386                core::mem::size_of::<T>(),
387            ));
388        }
389        for item in iter {
390            self.push(item);
391        }
392    }
393}
394
395static SHARED_NULL: SharedVectorHeader =
396    SharedVectorHeader { refcount: atomic::AtomicIsize::new(-1), size: 0, capacity: 0 };
397
398impl<T> Default for SharedVector<T> {
399    fn default() -> Self {
400        SharedVector { inner: NonNull::from(&SHARED_NULL).cast() }
401    }
402}
403
404impl<T: Debug> Debug for SharedVector<T> {
405    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
406        self.as_slice().fmt(f)
407    }
408}
409
410impl<T> AsRef<[T]> for SharedVector<T> {
411    #[inline]
412    fn as_ref(&self) -> &[T] {
413        self.as_slice()
414    }
415}
416
417impl<T, U> PartialEq<U> for SharedVector<T>
418where
419    U: ?Sized + AsRef<[T]>,
420    T: PartialEq,
421{
422    fn eq(&self, other: &U) -> bool {
423        self.as_slice() == other.as_ref()
424    }
425}
426
427impl<T: Eq> Eq for SharedVector<T> {}
428
429impl<T: Clone> IntoIterator for SharedVector<T> {
430    type Item = T;
431    type IntoIter = IntoIter<T>;
432    fn into_iter(self) -> Self::IntoIter {
433        IntoIter(unsafe {
434            if self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) == 1 {
435                let inner = self.inner;
436                core::mem::forget(self);
437                inner.as_ref().header.refcount.store(0, atomic::Ordering::Relaxed);
438                IntoIterInner::UnShared(inner, 0)
439            } else {
440                IntoIterInner::Shared(self, 0)
441            }
442        })
443    }
444}
445
446#[cfg(feature = "serde")]
447use serde::ser::SerializeSeq;
448#[cfg(feature = "serde")]
449impl<T> serde::Serialize for SharedVector<T>
450where
451    T: serde::Serialize,
452{
453    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
454    where
455        S: serde::Serializer,
456    {
457        let mut seq = serializer.serialize_seq(Some(self.len()))?;
458        for item in self.iter() {
459            seq.serialize_element(item)?;
460        }
461        seq.end()
462    }
463}
464
465#[cfg(feature = "serde")]
466impl<'de, T> serde::Deserialize<'de> for SharedVector<T>
467where
468    T: Clone + serde::Deserialize<'de>,
469{
470    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
471    where
472        D: serde::Deserializer<'de>,
473    {
474        let mut elements: alloc::vec::Vec<T> = serde::Deserialize::deserialize(deserializer)?;
475        let mut shared_vec = SharedVector::with_capacity(elements.len());
476        for elem in elements.drain(..) {
477            shared_vec.push(elem);
478        }
479        Ok(shared_vec)
480    }
481}
482
483enum IntoIterInner<T> {
484    Shared(SharedVector<T>, usize),
485    // Elements up to the usize member are already moved out
486    UnShared(NonNull<SharedVectorInner<T>>, usize),
487}
488
489impl<T> Drop for IntoIterInner<T> {
490    fn drop(&mut self) {
491        match self {
492            IntoIterInner::Shared(..) => { /* drop of SharedVector takes care of it */ }
493            IntoIterInner::UnShared(inner, begin) => unsafe {
494                debug_assert_eq!(inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed), 0);
495                let data_ptr = inner.as_mut().data.as_mut_ptr();
496                for x in (*begin)..inner.as_ref().header.size {
497                    core::ptr::drop_in_place(data_ptr.add(x));
498                }
499                ::alloc::alloc::dealloc(
500                    inner.as_ptr() as *mut u8,
501                    compute_inner_layout::<T>(inner.as_ref().header.capacity),
502                )
503            },
504        }
505    }
506}
507
508/// An iterator that moves out of a SharedVector.
509///
510/// This `struct` is created by the `into_iter` method on [`SharedVector`] (provided
511/// by the [`IntoIterator`] trait).
512pub struct IntoIter<T>(IntoIterInner<T>);
513
514impl<T: Clone> Iterator for IntoIter<T> {
515    type Item = T;
516
517    fn next(&mut self) -> Option<Self::Item> {
518        match &mut self.0 {
519            IntoIterInner::Shared(array, moved) => {
520                let result = array.as_slice().get(*moved).cloned();
521                *moved += 1;
522                result
523            }
524            IntoIterInner::UnShared(inner, begin) => unsafe {
525                if *begin < inner.as_ref().header.size {
526                    let r = core::ptr::read(inner.as_ref().data.as_ptr().add(*begin));
527                    *begin += 1;
528                    Some(r)
529                } else {
530                    None
531                }
532            },
533        }
534    }
535}
536
537#[test]
538fn simple_test() {
539    let x: SharedVector<i32> = SharedVector::from([1, 2, 3]);
540    let y: SharedVector<i32> = SharedVector::from([3, 2, 1]);
541    assert_eq!(x, x.clone());
542    assert_ne!(x, y);
543    let z: [i32; 3] = [1, 2, 3];
544    assert_eq!(z, x.as_slice());
545    let vec: std::vec::Vec<i32> = std::vec![1, 2, 3];
546    assert_eq!(x, vec);
547    let def: SharedVector<i32> = Default::default();
548    assert_eq!(def, SharedVector::<i32>::default());
549    assert_ne!(def, x);
550}
551
552#[test]
553fn push_test() {
554    let mut x: SharedVector<i32> = SharedVector::from([1, 2, 3]);
555    let y = x.clone();
556    x.push(4);
557    x.push(5);
558    x.push(6);
559    assert_eq!(x.as_slice(), &[1, 2, 3, 4, 5, 6]);
560    assert_eq!(y.as_slice(), &[1, 2, 3]);
561}
562
563#[test]
564#[should_panic]
565fn invalid_capacity_test() {
566    let _: SharedVector<u8> = SharedVector::with_capacity(usize::MAX / 2 - 1000);
567}
568
569#[test]
570fn collect_from_iter_with_no_size_hint() {
571    use std::string::{String, ToString};
572    struct NoSizeHintIter<'a> {
573        data: &'a [&'a str],
574        i: usize,
575    }
576
577    impl Iterator for NoSizeHintIter<'_> {
578        type Item = String;
579
580        fn next(&mut self) -> Option<Self::Item> {
581            if self.i >= self.data.len() {
582                return None;
583            }
584            let item = self.data[self.i];
585            self.i += 1;
586            Some(item.to_string())
587        }
588
589        fn size_hint(&self) -> (usize, Option<usize>) {
590            (0, None)
591        }
592    }
593
594    // 5 elements to be above the initial "grow"-capacity of 4 and thus require one realloc.
595    let input = NoSizeHintIter { data: &["Hello", "sweet", "world", "of", "iterators"], i: 0 };
596
597    let shared_vec: SharedVector<String> = input.collect();
598    assert_eq!(shared_vec.as_slice(), &["Hello", "sweet", "world", "of", "iterators"]);
599}
600
601#[test]
602fn test_capacity_grows_only_when_needed() {
603    let mut vec: SharedVector<u8> = SharedVector::with_capacity(2);
604    vec.push(0);
605    assert_eq!(vec.capacity(), 2);
606    vec.push(0);
607    assert_eq!(vec.capacity(), 2);
608    vec.push(0);
609    assert_eq!(vec.len(), 3);
610    assert!(vec.capacity() > 2);
611}
612
613#[test]
614fn test_vector_clear() {
615    let mut vec: SharedVector<std::string::String> = Default::default();
616    vec.clear();
617    vec.push("Hello".into());
618    vec.push("World".into());
619    vec.push("of".into());
620    vec.push("Vectors".into());
621
622    let mut copy = vec.clone();
623
624    assert_eq!(vec.len(), 4);
625    let orig_cap = vec.capacity();
626    assert!(orig_cap >= vec.len());
627    vec.clear();
628    assert_eq!(vec.len(), 0);
629    assert_eq!(vec.capacity(), 0); // vec was shared, so start with new empty vector.
630    vec.push("Welcome back".into());
631    assert_eq!(vec.len(), 1);
632    assert!(vec.capacity() >= vec.len());
633
634    assert_eq!(copy.len(), 4);
635    assert_eq!(copy.capacity(), orig_cap);
636    copy.clear(); // copy is not shared (anymore), retain capacity.
637    assert_eq!(copy.capacity(), orig_cap);
638}
639
640#[test]
641fn pop_test() {
642    let mut x: SharedVector<i32> = SharedVector::from([1, 2, 3]);
643    let y = x.clone();
644    assert_eq!(x.pop(), Some(3));
645    assert_eq!(x.pop(), Some(2));
646    assert_eq!(x.pop(), Some(1));
647    assert_eq!(x.pop(), None);
648    assert!(x.is_empty());
649    assert_eq!(y.as_slice(), &[1, 2, 3]);
650}
651
652#[cfg(feature = "ffi")]
653pub(crate) mod ffi {
654    use super::*;
655
656    #[unsafe(no_mangle)]
657    /// This function is used for the low-level C++ interface to allocate the backing vector of a SharedVector.
658    pub unsafe extern "C" fn slint_shared_vector_allocate(size: usize, align: usize) -> *mut u8 {
659        unsafe { alloc::alloc::alloc(alloc::alloc::Layout::from_size_align(size, align).unwrap()) }
660    }
661
662    #[unsafe(no_mangle)]
663    /// This function is used for the low-level C++ interface to deallocate the backing vector of a SharedVector
664    pub unsafe extern "C" fn slint_shared_vector_free(ptr: *mut u8, size: usize, align: usize) {
665        unsafe {
666            alloc::alloc::dealloc(ptr, alloc::alloc::Layout::from_size_align(size, align).unwrap())
667        }
668    }
669
670    #[unsafe(no_mangle)]
671    /// This function is used for the low-level C++ interface to initialize the empty SharedVector.
672    pub unsafe extern "C" fn slint_shared_vector_empty() -> *const u8 {
673        &SHARED_NULL as *const _ as *const u8
674    }
675}
676
677#[cfg(feature = "serde")]
678#[test]
679fn test_serialize_deserialize_sharedvector() {
680    let v = SharedVector::from([1, 2, 3]);
681    let serialized = serde_json::to_string(&v).unwrap();
682    let deserialized: SharedVector<i32> = serde_json::from_str(&serialized).unwrap();
683    assert_eq!(v, deserialized);
684}
685
686#[test]
687fn test_reserve() {
688    let mut v = SharedVector::from([1, 2, 3]);
689    assert_eq!(v.capacity(), 3);
690    v.reserve(1);
691    assert_eq!(v.capacity(), 4);
692    assert_eq!(v.len(), 3);
693    v.push(4);
694    v.push(5);
695    assert_eq!(v.len(), 5);
696    assert_eq!(v.capacity(), 8);
697    v.reserve(1);
698    assert_eq!(v.capacity(), 8);
699    v.reserve(8);
700    assert_eq!(v.len(), 5);
701    assert_eq!(v.capacity(), 13);
702}