1#![allow(unsafe_code)]
6#![warn(missing_docs)]
7use core::fmt::Debug;
8use core::mem::MaybeUninit;
9use core::ops::Deref;
10use core::ptr::NonNull;
11
12use portable_atomic as atomic;
13
14#[repr(C)]
15struct SharedVectorHeader {
16 refcount: atomic::AtomicIsize,
17 size: usize,
18 capacity: usize,
19}
20
21#[repr(C)]
22struct SharedVectorInner<T> {
23 header: SharedVectorHeader,
24 data: MaybeUninit<T>,
25}
26
27fn compute_inner_layout<T>(capacity: usize) -> core::alloc::Layout {
28 core::alloc::Layout::new::<SharedVectorHeader>()
29 .extend(core::alloc::Layout::array::<T>(capacity).unwrap())
30 .unwrap()
31 .0
32}
33
34unsafe fn drop_inner<T>(mut inner: NonNull<SharedVectorInner<T>>) {
35 debug_assert_eq!(inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed), 0);
36 let data_ptr = inner.as_mut().data.as_mut_ptr();
37 for x in 0..inner.as_ref().header.size {
38 core::ptr::drop_in_place(data_ptr.add(x));
39 }
40 alloc::alloc::dealloc(
41 inner.as_ptr() as *mut u8,
42 compute_inner_layout::<T>(inner.as_ref().header.capacity),
43 )
44}
45
46fn alloc_with_capacity<T>(capacity: usize) -> NonNull<SharedVectorInner<T>> {
48 let ptr = unsafe { ::alloc::alloc::alloc(compute_inner_layout::<T>(capacity)) };
49 assert!(!ptr.is_null(), "allocation of {capacity:?} bytes failed");
50 unsafe {
51 core::ptr::write(
52 ptr as *mut SharedVectorHeader,
53 SharedVectorHeader { refcount: 1.into(), size: 0, capacity },
54 );
55 }
56 NonNull::new(ptr).unwrap().cast()
57}
58
59fn capacity_for_grow(current_cap: usize, required_cap: usize, elem_size: usize) -> usize {
62 if current_cap >= required_cap {
63 return current_cap;
64 }
65 let cap = core::cmp::max(current_cap * 2, required_cap);
66 let min_non_zero_cap = if elem_size == 1 {
67 8
68 } else if elem_size <= 1024 {
69 4
70 } else {
71 1
72 };
73 core::cmp::max(min_non_zero_cap, cap)
74}
75
76#[repr(C)]
77pub struct SharedVector<T> {
79 inner: NonNull<SharedVectorInner<T>>,
80}
81
82unsafe impl<T: Send + Sync> Send for SharedVector<T> {}
84unsafe impl<T: Send + Sync> Sync for SharedVector<T> {}
86
87impl<T> Drop for SharedVector<T> {
88 fn drop(&mut self) {
89 unsafe {
90 if self
91 .inner
92 .cast::<SharedVectorHeader>()
93 .as_ref()
94 .refcount
95 .load(atomic::Ordering::Relaxed)
96 < 0
97 {
98 return;
99 }
100 if self.inner.as_ref().header.refcount.fetch_sub(1, atomic::Ordering::SeqCst) == 1 {
101 drop_inner(self.inner)
102 }
103 }
104 }
105}
106
107impl<T> Clone for SharedVector<T> {
108 fn clone(&self) -> Self {
109 unsafe {
110 if self
111 .inner
112 .cast::<SharedVectorHeader>()
113 .as_ref()
114 .refcount
115 .load(atomic::Ordering::Relaxed)
116 > 0
117 {
118 self.inner.as_ref().header.refcount.fetch_add(1, atomic::Ordering::SeqCst);
119 }
120 SharedVector { inner: self.inner }
121 }
122 }
123}
124
125impl<T> SharedVector<T> {
126 pub fn with_capacity(capacity: usize) -> Self {
128 Self { inner: alloc_with_capacity(capacity) }
129 }
130
131 fn as_ptr(&self) -> *const T {
132 unsafe { self.inner.as_ref().data.as_ptr() }
133 }
134
135 pub fn len(&self) -> usize {
137 unsafe { self.inner.cast::<SharedVectorHeader>().as_ref().size }
138 }
139
140 pub fn is_empty(&self) -> bool {
142 self.len() == 0
143 }
144
145 pub fn as_slice(&self) -> &[T] {
147 if self.is_empty() {
148 &[]
149 } else {
150 unsafe { core::slice::from_raw_parts(self.as_ptr(), self.len()) }
152 }
153 }
154
155 fn capacity(&self) -> usize {
157 unsafe { self.inner.cast::<SharedVectorHeader>().as_ref().capacity }
158 }
159}
160
161impl<T: Clone> SharedVector<T> {
162 pub fn from_slice(slice: &[T]) -> SharedVector<T> {
164 Self::from(slice)
165 }
166
167 fn detach(&mut self, new_capacity: usize) {
170 let is_shared =
171 unsafe { self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) } != 1;
172 if !is_shared && new_capacity <= self.capacity() {
173 return;
174 }
175 let mut new_array = SharedVector::with_capacity(new_capacity);
176 core::mem::swap(&mut self.inner, &mut new_array.inner);
177 let mut size = 0;
178 for x in new_array.into_iter() {
179 assert_ne!(size, new_capacity);
180 unsafe {
181 core::ptr::write(self.inner.as_mut().data.as_mut_ptr().add(size), x);
182 size += 1;
183 self.inner.as_mut().header.size = size;
184 }
185 if size == new_capacity {
186 break;
187 }
188 }
189 }
190
191 pub fn make_mut_slice(&mut self) -> &mut [T] {
193 self.detach(self.len());
194 unsafe { core::slice::from_raw_parts_mut(self.as_ptr() as *mut T, self.len()) }
195 }
196
197 pub fn push(&mut self, value: T) {
199 self.detach(capacity_for_grow(self.capacity(), self.len() + 1, core::mem::size_of::<T>()));
200 unsafe {
201 core::ptr::write(
202 self.inner.as_mut().data.as_mut_ptr().add(self.inner.as_mut().header.size),
203 value,
204 );
205 self.inner.as_mut().header.size += 1;
206 }
207 }
208
209 pub fn pop(&mut self) -> Option<T> {
212 if self.is_empty() {
213 None
214 } else {
215 self.detach(self.len());
216 unsafe {
217 self.inner.as_mut().header.size -= 1;
218 Some(core::ptr::read(self.inner.as_mut().data.as_mut_ptr().add(self.len())))
219 }
220 }
221 }
222
223 pub fn resize(&mut self, new_len: usize, value: T) {
236 if self.len() == new_len {
237 return;
238 }
239 self.detach(new_len);
240 let inner = unsafe { self.inner.as_mut() };
242
243 if inner.header.size >= new_len {
244 self.shrink(new_len);
245 } else {
246 while inner.header.size < new_len {
247 unsafe {
249 core::ptr::write(inner.data.as_mut_ptr().add(inner.header.size), value.clone());
250 }
251 inner.header.size += 1;
252 }
253 }
254 }
255
256 fn shrink(&mut self, new_len: usize) {
257 if self.len() == new_len {
258 return;
259 }
260
261 assert!(
262 unsafe { self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) } == 1
263 );
264 let inner = unsafe { self.inner.as_mut() };
266
267 while inner.header.size > new_len {
268 inner.header.size -= 1;
269 unsafe {
271 core::ptr::drop_in_place(inner.data.as_mut_ptr().add(inner.header.size));
272 }
273 }
274 }
275
276 pub fn clear(&mut self) {
278 let is_shared =
279 unsafe { self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) } != 1;
280 if is_shared {
281 *self = SharedVector::default();
282 } else {
283 self.shrink(0)
284 }
285 }
286
287 pub fn reserve(&mut self, additional: usize) {
289 self.detach((self.len() + additional).max(self.capacity()))
290 }
291}
292
293impl<T> Deref for SharedVector<T> {
294 type Target = [T];
295 fn deref(&self) -> &Self::Target {
296 self.as_slice()
297 }
298}
299
300impl<T: Clone> From<&[T]> for SharedVector<T> {
308 fn from(slice: &[T]) -> Self {
309 let capacity = slice.len();
310 let mut result = Self::with_capacity(capacity);
311 for x in slice {
312 unsafe {
313 core::ptr::write(
314 result.inner.as_mut().data.as_mut_ptr().add(result.inner.as_mut().header.size),
315 x.clone(),
316 );
317 result.inner.as_mut().header.size += 1;
318 }
319 }
320 result
321 }
322}
323
324impl<T, const N: usize> From<[T; N]> for SharedVector<T> {
325 fn from(array: [T; N]) -> Self {
326 array.into_iter().collect()
327 }
328}
329
330impl<T> FromIterator<T> for SharedVector<T> {
331 fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
332 let mut iter = iter.into_iter();
333 let mut capacity = iter.size_hint().0;
334 let mut result = Self::with_capacity(capacity);
335 let mut size = 0;
336 while let Some(x) = iter.next() {
337 if size >= capacity {
338 capacity = capacity_for_grow(
339 capacity,
340 size + 1 + iter.size_hint().0,
341 core::mem::size_of::<T>(),
342 );
343 unsafe {
344 result.inner.as_ref().header.refcount.store(0, atomic::Ordering::Relaxed)
345 };
346 let mut iter = IntoIter(IntoIterInner::UnShared(result.inner, 0));
347 result.inner = alloc_with_capacity::<T>(capacity);
348 match &mut iter.0 {
349 IntoIterInner::UnShared(old_inner, begin) => {
350 while *begin < size {
351 unsafe {
352 core::ptr::write(
353 result.inner.as_mut().data.as_mut_ptr().add(*begin),
354 core::ptr::read(old_inner.as_ref().data.as_ptr().add(*begin)),
355 );
356 *begin += 1;
357 result.inner.as_mut().header.size = *begin;
358 }
359 }
360 }
361 _ => unreachable!(),
362 }
363 }
364 debug_assert_eq!(result.len(), size);
365 debug_assert!(result.capacity() > size);
366 unsafe {
367 core::ptr::write(result.inner.as_mut().data.as_mut_ptr().add(size), x);
368 size += 1;
369 result.inner.as_mut().header.size = size;
370 }
371 }
372 result
373 }
374}
375
376impl<T: Clone> Extend<T> for SharedVector<T> {
377 fn extend<X: IntoIterator<Item = T>>(&mut self, iter: X) {
378 let iter = iter.into_iter();
379 let hint = iter.size_hint().0;
380 if hint > 0 {
381 self.detach(capacity_for_grow(
382 self.capacity(),
383 self.len() + hint,
384 core::mem::size_of::<T>(),
385 ));
386 }
387 for item in iter {
388 self.push(item);
389 }
390 }
391}
392
393static SHARED_NULL: SharedVectorHeader =
394 SharedVectorHeader { refcount: atomic::AtomicIsize::new(-1), size: 0, capacity: 0 };
395
396impl<T> Default for SharedVector<T> {
397 fn default() -> Self {
398 SharedVector { inner: NonNull::from(&SHARED_NULL).cast() }
399 }
400}
401
402impl<T: Debug> Debug for SharedVector<T> {
403 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
404 self.as_slice().fmt(f)
405 }
406}
407
408impl<T> AsRef<[T]> for SharedVector<T> {
409 #[inline]
410 fn as_ref(&self) -> &[T] {
411 self.as_slice()
412 }
413}
414
415impl<T, U> PartialEq<U> for SharedVector<T>
416where
417 U: ?Sized + AsRef<[T]>,
418 T: PartialEq,
419{
420 fn eq(&self, other: &U) -> bool {
421 self.as_slice() == other.as_ref()
422 }
423}
424
425impl<T: Eq> Eq for SharedVector<T> {}
426
427impl<T: Clone> IntoIterator for SharedVector<T> {
428 type Item = T;
429 type IntoIter = IntoIter<T>;
430 fn into_iter(self) -> Self::IntoIter {
431 IntoIter(unsafe {
432 if self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) == 1 {
433 let inner = self.inner;
434 core::mem::forget(self);
435 inner.as_ref().header.refcount.store(0, atomic::Ordering::Relaxed);
436 IntoIterInner::UnShared(inner, 0)
437 } else {
438 IntoIterInner::Shared(self, 0)
439 }
440 })
441 }
442}
443
444#[cfg(feature = "serde")]
445use serde::ser::SerializeSeq;
446#[cfg(feature = "serde")]
447impl<T> serde::Serialize for SharedVector<T>
448where
449 T: serde::Serialize,
450{
451 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
452 where
453 S: serde::Serializer,
454 {
455 let mut seq = serializer.serialize_seq(Some(self.len()))?;
456 for item in self.iter() {
457 seq.serialize_element(item)?;
458 }
459 seq.end()
460 }
461}
462
463#[cfg(feature = "serde")]
464impl<'de, T> serde::Deserialize<'de> for SharedVector<T>
465where
466 T: Clone + serde::Deserialize<'de>,
467{
468 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
469 where
470 D: serde::Deserializer<'de>,
471 {
472 let mut elements: alloc::vec::Vec<T> = serde::Deserialize::deserialize(deserializer)?;
473 let mut shared_vec = SharedVector::with_capacity(elements.len());
474 for elem in elements.drain(..) {
475 shared_vec.push(elem);
476 }
477 Ok(shared_vec)
478 }
479}
480
481enum IntoIterInner<T> {
482 Shared(SharedVector<T>, usize),
483 UnShared(NonNull<SharedVectorInner<T>>, usize),
485}
486
487impl<T> Drop for IntoIterInner<T> {
488 fn drop(&mut self) {
489 match self {
490 IntoIterInner::Shared(..) => { }
491 IntoIterInner::UnShared(mut inner, begin) => unsafe {
492 debug_assert_eq!(inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed), 0);
493 let data_ptr = inner.as_mut().data.as_mut_ptr();
494 for x in (*begin)..inner.as_ref().header.size {
495 core::ptr::drop_in_place(data_ptr.add(x));
496 }
497 ::alloc::alloc::dealloc(
498 inner.as_ptr() as *mut u8,
499 compute_inner_layout::<T>(inner.as_ref().header.capacity),
500 )
501 },
502 }
503 }
504}
505
506pub struct IntoIter<T>(IntoIterInner<T>);
511
512impl<T: Clone> Iterator for IntoIter<T> {
513 type Item = T;
514
515 fn next(&mut self) -> Option<Self::Item> {
516 match &mut self.0 {
517 IntoIterInner::Shared(array, moved) => {
518 let result = array.as_slice().get(*moved).cloned();
519 *moved += 1;
520 result
521 }
522 IntoIterInner::UnShared(inner, begin) => unsafe {
523 if *begin < inner.as_ref().header.size {
524 let r = core::ptr::read(inner.as_ref().data.as_ptr().add(*begin));
525 *begin += 1;
526 Some(r)
527 } else {
528 None
529 }
530 },
531 }
532 }
533}
534
535#[test]
536fn simple_test() {
537 let x: SharedVector<i32> = SharedVector::from([1, 2, 3]);
538 let y: SharedVector<i32> = SharedVector::from([3, 2, 1]);
539 assert_eq!(x, x.clone());
540 assert_ne!(x, y);
541 let z: [i32; 3] = [1, 2, 3];
542 assert_eq!(z, x.as_slice());
543 let vec: std::vec::Vec<i32> = std::vec![1, 2, 3];
544 assert_eq!(x, vec);
545 let def: SharedVector<i32> = Default::default();
546 assert_eq!(def, SharedVector::<i32>::default());
547 assert_ne!(def, x);
548}
549
550#[test]
551fn push_test() {
552 let mut x: SharedVector<i32> = SharedVector::from([1, 2, 3]);
553 let y = x.clone();
554 x.push(4);
555 x.push(5);
556 x.push(6);
557 assert_eq!(x.as_slice(), &[1, 2, 3, 4, 5, 6]);
558 assert_eq!(y.as_slice(), &[1, 2, 3]);
559}
560
561#[test]
562#[should_panic]
563fn invalid_capacity_test() {
564 let _: SharedVector<u8> = SharedVector::with_capacity(usize::MAX / 2 - 1000);
565}
566
567#[test]
568fn collect_from_iter_with_no_size_hint() {
569 use std::string::{String, ToString};
570 struct NoSizeHintIter<'a> {
571 data: &'a [&'a str],
572 i: usize,
573 }
574
575 impl Iterator for NoSizeHintIter<'_> {
576 type Item = String;
577
578 fn next(&mut self) -> Option<Self::Item> {
579 if self.i >= self.data.len() {
580 return None;
581 }
582 let item = self.data[self.i];
583 self.i += 1;
584 Some(item.to_string())
585 }
586
587 fn size_hint(&self) -> (usize, Option<usize>) {
588 (0, None)
589 }
590 }
591
592 let input = NoSizeHintIter { data: &["Hello", "sweet", "world", "of", "iterators"], i: 0 };
594
595 let shared_vec: SharedVector<String> = input.collect();
596 assert_eq!(shared_vec.as_slice(), &["Hello", "sweet", "world", "of", "iterators"]);
597}
598
599#[test]
600fn test_capacity_grows_only_when_needed() {
601 let mut vec: SharedVector<u8> = SharedVector::with_capacity(2);
602 vec.push(0);
603 assert_eq!(vec.capacity(), 2);
604 vec.push(0);
605 assert_eq!(vec.capacity(), 2);
606 vec.push(0);
607 assert_eq!(vec.len(), 3);
608 assert!(vec.capacity() > 2);
609}
610
611#[test]
612fn test_vector_clear() {
613 let mut vec: SharedVector<std::string::String> = Default::default();
614 vec.clear();
615 vec.push("Hello".into());
616 vec.push("World".into());
617 vec.push("of".into());
618 vec.push("Vectors".into());
619
620 let mut copy = vec.clone();
621
622 assert_eq!(vec.len(), 4);
623 let orig_cap = vec.capacity();
624 assert!(orig_cap >= vec.len());
625 vec.clear();
626 assert_eq!(vec.len(), 0);
627 assert_eq!(vec.capacity(), 0); vec.push("Welcome back".into());
629 assert_eq!(vec.len(), 1);
630 assert!(vec.capacity() >= vec.len());
631
632 assert_eq!(copy.len(), 4);
633 assert_eq!(copy.capacity(), orig_cap);
634 copy.clear(); assert_eq!(copy.capacity(), orig_cap);
636}
637
638#[test]
639fn pop_test() {
640 let mut x: SharedVector<i32> = SharedVector::from([1, 2, 3]);
641 let y = x.clone();
642 assert_eq!(x.pop(), Some(3));
643 assert_eq!(x.pop(), Some(2));
644 assert_eq!(x.pop(), Some(1));
645 assert_eq!(x.pop(), None);
646 assert!(x.is_empty());
647 assert_eq!(y.as_slice(), &[1, 2, 3]);
648}
649
650#[cfg(feature = "ffi")]
651pub(crate) mod ffi {
652 use super::*;
653
654 #[unsafe(no_mangle)]
655 pub unsafe extern "C" fn slint_shared_vector_allocate(size: usize, align: usize) -> *mut u8 {
657 alloc::alloc::alloc(alloc::alloc::Layout::from_size_align(size, align).unwrap())
658 }
659
660 #[unsafe(no_mangle)]
661 pub unsafe extern "C" fn slint_shared_vector_free(ptr: *mut u8, size: usize, align: usize) {
663 alloc::alloc::dealloc(ptr, alloc::alloc::Layout::from_size_align(size, align).unwrap())
664 }
665
666 #[unsafe(no_mangle)]
667 pub unsafe extern "C" fn slint_shared_vector_empty() -> *const u8 {
669 &SHARED_NULL as *const _ as *const u8
670 }
671}
672
673#[cfg(feature = "serde")]
674#[test]
675fn test_serialize_deserialize_sharedvector() {
676 let v = SharedVector::from([1, 2, 3]);
677 let serialized = serde_json::to_string(&v).unwrap();
678 let deserialized: SharedVector<i32> = serde_json::from_str(&serialized).unwrap();
679 assert_eq!(v, deserialized);
680}
681
682#[test]
683fn test_reserve() {
684 let mut v = SharedVector::from([1, 2, 3]);
685 assert_eq!(v.capacity(), 3);
686 v.reserve(1);
687 assert_eq!(v.capacity(), 4);
688 assert_eq!(v.len(), 3);
689 v.push(4);
690 v.push(5);
691 assert_eq!(v.len(), 5);
692 assert_eq!(v.capacity(), 8);
693 v.reserve(1);
694 assert_eq!(v.capacity(), 8);
695 v.reserve(8);
696 assert_eq!(v.len(), 5);
697 assert_eq!(v.capacity(), 13);
698}