orx_linked_list/list/get.rs
1use super::List;
2use crate::variant::ListVariant;
3use orx_pinned_vec::PinnedVec;
4use orx_selfref_col::{MemoryPolicy, MemoryState, Node, Utilization};
5
6impl<V, M, P> List<V, M, P>
7where
8 V: ListVariant,
9 M: MemoryPolicy<V>,
10 P: PinnedVec<Node<V>>,
11{
12 /// ***O(1)*** Returns the number of elements in the list.
13 ///
14 /// # Examples
15 ///
16 /// ```rust
17 /// use orx_linked_list::*;
18 ///
19 /// let mut list = DoublyList::new();
20 ///
21 /// assert_eq!(0, list.len());
22 ///
23 /// list.push_back('a');
24 /// list.push_front('b');
25 /// _ = list.pop_back();
26 /// list.push_back('c');
27 ///
28 /// assert_eq!(2, list.len());
29 /// ```
30 #[inline(always)]
31 pub fn len(&self) -> usize {
32 self.0.len()
33 }
34
35 /// Returns the number of elements in the list.
36 ///
37 /// # Examples
38 ///
39 /// ```rust
40 /// use orx_linked_list::*;
41 ///
42 /// let mut list = SinglyList::new();
43 ///
44 /// assert!(list.is_empty());
45 ///
46 /// list.push_front('a');
47 /// assert!(!list.is_empty());
48 ///
49 /// _ = list.pop_front();
50 /// assert!(list.is_empty());
51 /// ```
52 #[inline(always)]
53 pub fn is_empty(&self) -> bool {
54 self.0.is_empty()
55 }
56
57 /// Returns a key representing the memory state of the list.
58 ///
59 /// The list's memory state changes only when its nodes are reorganized.
60 ///
61 /// For [`SinglyListLazy`] or [`DoublyListLazy`]:
62 /// * a node reorganization never happens implicitly,
63 /// * it can only be triggered manually by calling `reclaim_closed_nodes`.
64 ///
65 /// For [`SinglyList`] and [`DoublyList`] with default memory policy:
66 /// * a node reorganization might be triggered on methods that remove nodes from the list such as `pop_front` or `remove`,
67 /// * a removal leads to a node reorganization if the ratio of closed nodes to all nodes exceeds `25%` (see [`Utilization`]);
68 ///
69 /// A node reorganization does not necessarily lead to a change in memory state; however, it is likely.
70 ///
71 /// Memory state is critical due to the following:
72 /// * let `indices` be a collection of node indices ([`DoublyIdx`] or [`SinglyIdx`]) which are taken when the memory state was "s1";
73 /// * note that all methods that add an element to the list returns its index, such as `push_back` or `insert_at`,
74 /// * further note that these growth methods never change the state.
75 /// * as long as the memory state does not change (stays as "s1"):
76 /// * we can use all of the `indices` to safely access elements in constant time,
77 /// * or use them in constant time mutation methods such as `insert_after` or `remove`.
78 /// * at some point, the memory state may change to "s2" due to:
79 /// * manually calling `reclaim_closed_nodes` on a lazy memory policy list, or
80 /// * due to many removals from the list,
81 /// * then, all of the `indices` obtained beforehand are invalidated,
82 /// * constant time methods requiring indices will safely fail with a proper error.
83 ///
84 /// [`SinglyList`]: crate::SinglyList
85 /// [`DoublyList`]: crate::DoublyList
86 /// [`SinglyListLazy`]: crate::SinglyListLazy
87 /// [`DoublyListLazy`]: crate::DoublyListLazy
88 /// [`DoublyIdx`]: crate::DoublyIdx
89 /// [`SinglyIdx`]: crate::SinglyIdx
90 #[inline(always)]
91 pub fn memory_state(&self) -> MemoryState {
92 self.0.memory_state()
93 }
94
95 /// Returns the node utilization of the underlying storage of the linked list.
96 pub fn node_utilization(&self) -> Utilization {
97 self.0.utilization()
98 }
99
100 /// Creates an arbitrary order iterator on elements of the list.
101 ///
102 /// Note that the iterator created by `iter_x` is often faster than that created by `iter`;
103 /// and hence, can be preferred whenever the iteration order does not matter.
104 pub fn iter_x(&self) -> impl Iterator<Item = &V::Item> {
105 self.0.nodes().iter().filter_map(|x| x.data())
106 }
107
108 /// Creates a parallel iterator over references to the elements of the linked list in **arbitrary order**.
109 ///
110 /// Note that `par_x` is parallel counterpart of [`iter_x`].
111 ///
112 /// Please see [`ParIter`] for details of the parallel computation.
113 /// In brief, computation is defined as chain of iterator transformations and parallelization
114 /// is handled by the underlying parallel executor.
115 ///
116 /// Requires **orx-parallel** feature.
117 ///
118 /// [`ParIter`]: orx_parallel::ParIter
119 /// [`iter_x`]: crate::List::iter_x
120 ///
121 /// # Examples
122 ///
123 /// ```
124 /// use orx_linked_list::*;
125 ///
126 /// let list: DoublyList<_> = (0..1024).collect();
127 ///
128 /// let expected: usize = list.iter_x().sum();
129 ///
130 /// let sum = list.par_x().sum(); // parallelized computation
131 /// assert_eq!(expected, sum);
132 ///
133 /// let sum = list.par_x().num_threads(4).sum(); // using at most 4 threads
134 /// assert_eq!(expected, sum);
135 ///
136 /// let sum_doubles = list.par_x().map(|x| x * 2).sum();
137 /// assert_eq!(2 * expected, sum_doubles);
138 ///
139 /// let expected: usize = list.iter_x().filter(|x| *x % 2 == 0).sum();
140 /// let sum_evens = list.par_x().filter(|x| *x % 2 == 0).sum();
141 /// ```
142 #[cfg(feature = "orx-parallel")]
143 pub fn par_x(&self) -> impl orx_parallel::ParIter<Item = &V::Item>
144 where
145 V::Item: Send + Sync,
146 Node<V>: Send + Sync,
147 for<'a> &'a P: orx_concurrent_iter::IntoConcurrentIter<Item = &'a Node<V>>,
148 {
149 use orx_parallel::*;
150 let pinned = self.0.nodes();
151 pinned.par().filter_map(|x| x.data())
152 }
153}