1use std::ops::RangeFrom;
2
3use crate::{
4 ptr,
5 NonNull,
6 alloc,
7 Layout,
8 BitUtil,
9 RawBitVecIter,
10 RawBitVecDrain,
11 IdxProxy,
12 BitProto,
13 MemUtil,
14 Range,
15 ManuallyDrop,
16 handle_alloc_error,
17};
18
19pub struct RawBitVec {
41 pub(crate) ptr: NonNull<usize>,
42 pub(crate) len: usize,
43 pub(crate) true_cap: usize,
44}
45
46impl RawBitVec {
47 #[inline]
48 pub fn len(&self) -> usize {
49 self.len
50 }
51
52 #[inline]
53 pub unsafe fn cap(&self, proto: BitProto) -> usize {
54 BitProto::calc_bitwise_count_from_block_count(proto, self.true_cap)
55 }
56
57 #[inline]
58 pub unsafe fn free(&self, proto: BitProto) -> usize {
59 self.cap(proto) - self.len
60 }
61
62 #[inline]
63 pub fn new() -> Self {
64 Self {
65 ptr: NonNull::dangling(),
66 len: 0,
67 true_cap: 0
68 }
69 }
70
71 #[inline]
72 pub fn with_capacity(proto: BitProto, cap: usize) -> Self {
73 let mut new_vec = Self::new();
74 let block_cap = BitProto::calc_block_count_from_bitwise_count(proto, cap);
75 let (new_ptr, new_layout) = unsafe {Self::alloc_new(block_cap)};
76 let new_non_null = Self::handle_alloc_result(new_layout, new_ptr);
77 new_vec.ptr = new_non_null;
78 new_vec.true_cap = block_cap;
79 new_vec
80 }
81
82 #[inline]
83 pub unsafe fn grow_exact_for_additional_elements_if_needed(&mut self, proto: BitProto, extra_elements: usize) -> Result<(), String> {
84 if usize::MAX - extra_elements > self.len {
85 return Err(format!("{} extra elements would overflow usize::MAX", extra_elements));
86 }
87 self.handle_grow_if_needed(proto, self.len + extra_elements, false)
88 }
89
90 #[inline]
91 pub unsafe fn grow_exact_for_total_elements_if_needed(&mut self, proto: BitProto, total_elements: usize) -> Result<(), String> {
92 self.handle_grow_if_needed(proto, total_elements, false)
93 }
94
95 #[inline]
96 pub unsafe fn grow_for_additional_elements_if_needed(&mut self, proto: BitProto, extra_elements: usize) -> Result<(), String> {
97 if usize::MAX - extra_elements > self.len {
98 return Err(format!("{} extra elements would overflow usize::MAX", extra_elements));
99 }
100 self.handle_grow_if_needed(proto, self.len + extra_elements, true)
101 }
102
103 #[inline]
104 pub unsafe fn grow_for_total_elements_if_needed(&mut self, proto: BitProto, total_elements: usize) -> Result<(), String> {
105 self.handle_grow_if_needed(proto, total_elements, true)
106 }
107
108 #[inline]
109 pub fn clear(&mut self) {
110 self.len = 0
111 }
112
113 #[inline]
114 pub unsafe fn push(&mut self, proto: BitProto, val: usize) -> Result<(), String> {
115 BitProto::check_value(proto, val)?;
116 match self.len == proto.MAX_CAPACITY {
117 true => Err(format!("BitVec is at maximum capacity ({})", proto.MAX_CAPACITY)),
118 false => {
119 self.handle_grow_if_needed(proto, self.len+1, true)?;
120 self.push_unchecked(proto, val);
121 Ok(())
122 }
123 }
124 }
125
126 #[inline]
127 pub unsafe fn push_unchecked(&mut self, proto: BitProto, val: usize) {
128 let len_proxy = BitProto::idx_proxy(proto, self.len);
129 self.write_val_with_idx_proxy(len_proxy, val);
130 self.len += 1;
131 }
132
133 #[inline]
134 pub unsafe fn pop(&mut self, proto: BitProto) -> Result<usize, String> {
135 if self.len == 0 {
136 Err(format!("no elements in BitVec to pop out"))
137 } else {
138 Ok(self.pop_unchecked(proto))
139 }
140 }
141
142 #[inline]
143 pub unsafe fn pop_unchecked(&mut self, proto: BitProto) -> usize {
144 self.len -= 1;
145 let last_proxy = BitProto::idx_proxy(proto, self.len);
146 self.replace_val_with_idx_proxy(last_proxy, 0)
147 }
148
149 #[inline]
150 pub unsafe fn insert(&mut self, proto: BitProto, idx: usize, val: usize) -> Result<(), String> {
151 BitProto::check_value(proto, val)?;
152 if idx > self.len {
153 return Err(format!("index out of bounds for insert: (idx) {} > {} (len)", idx, self.len));
154 }
155 if self.len == proto.MAX_CAPACITY {
156 return Err(format!("BitVec is at maximum capacity ({})", proto.MAX_CAPACITY));
157 }
158 self.handle_grow_if_needed(proto, self.len+1, true)?;
159 match idx == self.len {
160 true => Ok(self.push_unchecked(proto, val)),
161 false => Ok(self.insert_unchecked(proto, idx, val))
162 }
163 }
164
165 #[inline]
166 pub unsafe fn insert_unchecked(&mut self, proto: BitProto, idx: usize, val: usize) {
167 let idx_proxy = BitProto::idx_proxy(proto, idx);
168 self.shift_elements_up_with_with_idx_proxy(proto, idx_proxy, 1);
169 self.write_val_with_idx_proxy(idx_proxy, val);
170 self.len += 1;
171 }
172
173 #[inline]
174 pub unsafe fn insert_bitvec(&mut self, proto: BitProto, insert_idx: usize, bitvec: Self) -> Result<(), String> {
175 if insert_idx > self.len {
176 return Err(format!("index out of bounds for insert_bitvec: (idx) {} > {} (len)", insert_idx, self.len));
177 }
178 if proto.MAX_CAPACITY - bitvec.len < self.len {
179 return Err(format!("BitVec cannot hold {} more elements, {} elements would reach the maximum capacity ({})", bitvec.len, proto.MAX_CAPACITY - self.len, proto.MAX_CAPACITY));
180 }
181 self.handle_grow_if_needed(proto, self.len + bitvec.len, true)?;
182 match insert_idx == self.len {
183 true => self.append_bitvec_unchecked(proto, bitvec),
184 false => self.insert_bitvec_unchecked(proto, insert_idx, bitvec)
185 }
186 Ok(())
187 }
188
189 #[inline]
190 pub unsafe fn insert_bitvec_unchecked(&mut self, proto: BitProto, insert_idx: usize, bitvec: Self) {
191 if bitvec.len > 0 {
192 let begin_idx = BitProto::idx_proxy(proto, insert_idx);
193 self.shift_elements_up_with_with_idx_proxy(proto, begin_idx, bitvec.len);
194 self.len += bitvec.len;
195 let mut count: usize = 0;
196 while count < bitvec.len {
197 let write_proxy = BitProto::idx_proxy(proto, insert_idx+count);
198 let read_proxy = BitProto::idx_proxy(proto, count);
199 let val = bitvec.read_val_with_idx_proxy(read_proxy);
200 self.write_val_with_idx_proxy(write_proxy, val);
201 count += 1;
202 }
203 }
204 }
205
206 #[inline]
207 pub unsafe fn insert_iter<II, TO, ESI>(&mut self, proto: BitProto, insert_idx: usize, source: II) -> Result<(), String>
208 where II: IntoIterator<Item = TO, IntoIter = ESI>, TO: ToOwned<Owned = usize>, ESI: ExactSizeIterator + Iterator<Item = TO> {
209 if insert_idx > self.len {
210 return Err(format!("index out of bounds for insert_iter: (idx) {} > {} (len)", insert_idx, self.len));
211 }
212 let iter = source.into_iter();
213 let iter_len = iter.len();
214 let mut valid_values = Vec::with_capacity(iter_len);
215 for to_val in iter {
216 let val = to_val.to_owned();
217 BitProto::check_value(proto, val)?;
218 valid_values.push(val);
219 }
220 if proto.MAX_CAPACITY - iter_len < self.len {
221 return Err(format!("BitVec cannot hold {} more elements, {} elements would reach the maximum capacity ({})", iter_len, proto.MAX_CAPACITY - self.len, proto.MAX_CAPACITY));
222 }
223 self.handle_grow_if_needed(proto, self.len + iter_len, true)?;
224 if insert_idx == self.len {
225 self.append_iter_unchecked(proto, valid_values);
226 } else {
227 self.insert_iter_unchecked(proto, insert_idx, valid_values);
228 }
229 Ok(())
230 }
231
232 #[inline]
233 pub unsafe fn insert_iter_unchecked<II, TO, ESI>(&mut self, proto: BitProto, insert_idx: usize, source: II)
234 where II: IntoIterator<Item = TO, IntoIter = ESI>, TO: ToOwned<Owned = usize>, ESI: ExactSizeIterator + Iterator<Item = TO> {
235 let mut iter = source.into_iter();
236 let iter_len = iter.len();
237 let begin_idx = BitProto::idx_proxy(proto, insert_idx);
238 self.shift_elements_up_with_with_idx_proxy(proto, begin_idx, iter_len);
239 self.len += iter_len;
240 let mut count = 0usize;
241 while count < iter_len {
242 let write_proxy = BitProto::idx_proxy(proto, insert_idx+count);
243 let val = iter.next().unwrap();
244 self.write_val_with_idx_proxy(write_proxy, val.to_owned());
245 count += 1;
246 }
247 }
248
249 #[inline]
250 pub unsafe fn remove(&mut self, proto: BitProto, idx: usize) -> Result<usize, String> {
251 match idx >= self.len {
252 true => Err(format!("index out of bounds for remove: (idx) {} >= {} (len)", idx, self.len)),
253 false => match idx == self.len - 1 {
254 true => Ok(self.pop_unchecked(proto)),
255 false => Ok(self.remove_unchecked(proto, idx))
256 },
257 }
258 }
259
260 #[inline]
261 pub unsafe fn remove_unchecked(&mut self, proto: BitProto, idx: usize) -> usize {
262 let idx_proxy = BitProto::idx_proxy(proto, idx);
263 let shift_proxy = BitProto::idx_proxy(proto, idx+1);
264 let val = self.replace_val_with_idx_proxy(idx_proxy, 0);
265 self.shift_elements_down_with_with_idx_proxy(proto, idx_proxy, shift_proxy, 1);
266 self.len -= 1;
267 val
268 }
269
270 #[inline]
271 pub unsafe fn remove_range(&mut self, proto: BitProto, remove_range: Range<usize>) -> Result<Self, String> {
272 match remove_range.start >= self.len || remove_range.end > self.len {
273 true => Err(format!("index out of bounds for remove range: (start idx) {} >= {} (len) AND/OR (end idx) {} > {} (len)", remove_range.start, self.len, remove_range.end, self.len)),
274 false => match remove_range.len() == 0 {
275 true => Ok(Self::new()),
276 false => match remove_range.end == self.len {
277 true => Ok(self.trim_range_unchecked(proto, remove_range.start..)),
278 false => Ok(self.remove_range_unchecked(proto, remove_range))
279 }
280 }
281 }
282 }
283
284 #[inline]
285 pub unsafe fn remove_range_unchecked(&mut self, proto: BitProto, remove_range: Range<usize>) -> Self {
286 let count = remove_range.len();
287 let mut new_vec;
288 new_vec = Self::with_capacity(proto, count);
289 let start_proxy = BitProto::idx_proxy(proto, remove_range.start);
290 let end_excluded_proxy = BitProto::idx_proxy(proto, remove_range.end);
291 for idx in remove_range {
292 let idx_proxy = BitProto::idx_proxy(proto, idx);
293 let val = self.replace_val_with_idx_proxy(idx_proxy, 0);
294 new_vec.push_unchecked(proto, val);
295 }
296 self.shift_elements_down_with_with_idx_proxy(proto, start_proxy, end_excluded_proxy, count);
297 self.len -= count;
298 new_vec
299 }
300
301 #[inline]
302 pub unsafe fn trim_range(&mut self, proto: BitProto, trim_start: RangeFrom<usize>) -> Result<Self, String> {
303 match trim_start.start >= self.len {
304 true => Err(format!("index out of bounds for remove range: (idx) {} >= {} (len)", trim_start.start, self.len)),
305 false => Ok(self.trim_range_unchecked(proto, trim_start))
306 }
307 }
308
309 #[inline]
310 pub unsafe fn trim_range_unchecked(&mut self, proto: BitProto, trim_start: RangeFrom<usize>) -> Self {
311 let real_idx_range = trim_start.start..self.len;
312 let count = real_idx_range.len();
313 let mut new_vec;
314 new_vec = Self::with_capacity(proto, count);
315 for idx in real_idx_range {
316 let idx_proxy = BitProto::idx_proxy(proto, idx);
317 let val = self.read_val_with_idx_proxy(idx_proxy);
318 new_vec.push_unchecked(proto, val);
319 }
320 self.len -= count;
321 new_vec
322 }
323
324 #[inline]
325 pub unsafe fn swap(&mut self, proto: BitProto, idx_a: usize, idx_b: usize) -> Result<(), String> {
326 if idx_a >= self.len || idx_b >= self.len {
327 return Err(format!("index out of bounds for swap: (idx_a) {} >= {} (len) OR (idx_b) {} >= {} (len)", idx_a, self.len, idx_b, self.len))
328 } else if idx_a != idx_b {
329 self.swap_unchecked(proto, idx_a, idx_b);
330 }
331 Ok(())
332 }
333
334 #[inline]
335 pub unsafe fn swap_unchecked(&mut self, proto: BitProto, idx_a: usize, idx_b: usize) {
336 let proxy_a = BitProto::idx_proxy(proto, idx_a);
337 let proxy_b = BitProto::idx_proxy(proto, idx_b);
338 self.swap_vals_with_idx_proxy(proxy_a, proxy_b)
339}
340
341 #[inline]
342 pub unsafe fn swap_pop(&mut self, proto: BitProto, idx: usize) -> Result<usize, String> {
343 if idx >= self.len {
344 Err(format!("index out of bounds for swap_pop: (idx) {} >= {} (len)", idx, self.len))
345 } else if idx == self.len - 1 {
346 Ok(self.pop_unchecked(proto))
347 } else {
348 Ok(self.swap_pop_unchecked(proto, idx))
349 }
350 }
351
352 #[inline]
353 pub unsafe fn swap_pop_unchecked(&mut self, proto: BitProto, idx: usize) -> usize {
354 self.len -= 1;
355 let last_proxy = BitProto::idx_proxy(proto, self.len);
356 let pop_proxy = BitProto::idx_proxy(proto, idx);
357 self.swap_pop_val_with_idx_proxy(pop_proxy, last_proxy)
358 }
359
360 #[inline]
361 pub unsafe fn trim_excess_capacity(&mut self, proto: BitProto, target_extra_capacity: usize) -> Result<(), String> {
362 let target_capacity = self.len.saturating_add(target_extra_capacity);
363 if target_capacity < self.cap(proto) {
364 let target_block_capacity = BitProto::calc_block_count_from_bitwise_count(proto, target_capacity);
365 let new_layout = MemUtil::usize_array_layout(target_block_capacity);
366 let old_layout = MemUtil::usize_array_layout(self.true_cap);
367 let new_ptr = alloc::realloc(self.ptr.as_ptr().cast(), old_layout, new_layout.size());
368 let new_non_null = Self::handle_alloc_result(new_layout, new_ptr);
369 self.true_cap = target_block_capacity;
370 self.ptr = new_non_null;
371 }
372 Ok(())
373 }
374
375 #[inline]
376 pub unsafe fn append_bitvec(&mut self, proto: BitProto, bitvec: Self) -> Result<(), String> {
377 if proto.MAX_CAPACITY - bitvec.len < self.len {
378 return Err(format!("BitVec cannot hold {} more elements, {} elements would reach the maximum capacity ({})", bitvec.len, proto.MAX_CAPACITY - self.len, proto.MAX_CAPACITY));
379 }
380 self.handle_grow_if_needed(proto, self.len + bitvec.len, true)?;
381 self.append_bitvec_unchecked(proto, bitvec);
382 Ok(())
383 }
384
385 #[inline]
386 pub unsafe fn append_bitvec_unchecked(&mut self, proto: BitProto, bitvec: Self) {
387 let mut count = 0;
388 while count < bitvec.len {
389 let write_proxy = BitProto::idx_proxy(proto, self.len+count);
390 let read_proxy = BitProto::idx_proxy(proto, count);
391 let val = bitvec.read_val_with_idx_proxy(read_proxy);
392 self.write_val_with_idx_proxy(write_proxy, val);
393 count += 1;
394 }
395 self.len += bitvec.len
396 }
397
398 #[inline]
399 pub unsafe fn append_iter<II, TO, ESI>(&mut self, proto: BitProto, source: II) -> Result<(), String>
400 where II: IntoIterator<Item = TO, IntoIter = ESI>, TO: ToOwned<Owned = usize>, ESI: ExactSizeIterator + Iterator<Item = TO> {
401 let iter = source.into_iter();
402 let iter_len = iter.len();
403 let mut valid_values = Vec::with_capacity(iter_len);
404 for to_val in iter {
405 let val = to_val.to_owned();
406 BitProto::check_value(proto, val)?;
407 valid_values.push(val);
408 }
409 if proto.MAX_CAPACITY - iter_len < self.len {
410 return Err(format!("BitVec cannot hold {} more elements, {} elements would reach the maximum capacity ({})", iter_len, proto.MAX_CAPACITY - self.len, proto.MAX_CAPACITY));
411 }
412 self.handle_grow_if_needed(proto, self.len + iter_len, true)?;
413 self.append_iter_unchecked(proto, valid_values);
414 Ok(())
415 }
416
417 #[inline]
418 pub unsafe fn append_iter_unchecked<II, TO, ESI>(&mut self, proto: BitProto, source: II)
419 where II: IntoIterator<Item = TO, IntoIter = ESI>, TO: ToOwned<Owned = usize>, ESI: ExactSizeIterator + Iterator<Item = TO> {
420 let iter = source.into_iter();
421 for val in iter {
422 self.push_unchecked(proto, val.to_owned())
423 }
424 }
425
426 #[inline]
427 pub unsafe fn get(&self, proto: BitProto, idx: usize) -> Result<usize, String> {
428 match idx < self.len {
429 true => Ok(self.get_unchecked(proto, idx)),
430 false => Err(format!("index out of bounds for get: (idx) {} >= {} (len)", idx, self.len))
431 }
432 }
433
434 #[inline]
435 pub unsafe fn get_unchecked(&self, proto: BitProto, idx: usize) -> usize {
436 let idx_proxy = BitProto::idx_proxy(proto, idx);
437 self.read_val_with_idx_proxy(idx_proxy)
438 }
439
440 #[inline]
441 pub unsafe fn replace(&mut self, proto: BitProto, idx: usize, val: usize) -> Result<usize, String> {
442 BitProto::check_value(proto, val)?;
443 match idx < self.len {
444 true => Ok(self.replace_unchecked(proto, idx, val)),
445 false => Err(format!("index out of bounds for replace: (idx) {} >= {} (len)", idx, self.len))
446 }
447 }
448
449 #[inline]
450 pub unsafe fn replace_unchecked(&mut self, proto: BitProto, idx: usize, val: usize) -> usize {
451 let idx_proxy = BitProto::idx_proxy(proto, idx);
452 self.replace_val_with_idx_proxy(idx_proxy, val)
453 }
454
455 #[inline]
456 pub unsafe fn set(&mut self, proto: BitProto, idx: usize, val: usize) -> Result<(), String> {
457 BitProto::check_value(proto, val)?;
458 match idx < self.len {
459 true => Ok(self.set_unchecked(proto, idx, val)),
460 false => Err(format!("index out of bounds for set: (idx) {} >= {} (len)", idx, self.len))
461 }
462 }
463
464 #[inline]
465 pub unsafe fn set_unchecked(&mut self, proto: BitProto, idx: usize, val: usize) {
466 let idx_proxy = BitProto::idx_proxy(proto, idx);
467 self.write_val_with_idx_proxy(idx_proxy, val);
468 }
469
470 #[inline]
471 pub fn discard_from_end(&mut self, count: usize) {
472 self.len = self.len.saturating_sub(count)
473 }
474
475 #[inline]
476 pub fn drain<'vec>(&'vec mut self) -> RawBitVecDrain<'vec> {
477 let len = self.len;
478 RawBitVecDrain {
479 vec: self,
480 start: 0,
481 end_excluded: len,
482 }
483 }
484
485 #[inline]
486 pub fn into_iter(self) -> RawBitVecIter {
487 let nodrop_self = ManuallyDrop::new(self);
488 RawBitVecIter{
489 ptr: nodrop_self.ptr,
490 true_cap: nodrop_self.true_cap,
491 start: 0,
492 end_excluded: nodrop_self.len,
493 }
494 }
495
496 #[inline]
497 pub(crate) unsafe fn read_val_with_idx_proxy(&self, idx_proxy: IdxProxy) -> usize {
498 let mut block_ptr = self.ptr.as_ptr().add(idx_proxy.real_idx);
499 let mut block_bits = ptr::read(block_ptr);
500 let mut val: usize = (block_bits & idx_proxy.first_mask) >> idx_proxy.first_offset;
501 if idx_proxy.second_mask != 0 {
502 block_ptr = block_ptr.add(1);
503 block_bits = ptr::read(block_ptr);
504 val = val | ((block_bits & idx_proxy.second_mask) << idx_proxy.second_offset);
505 }
506 val
507 }
508
509 #[inline]
510 pub(crate) unsafe fn replace_val_with_idx_proxy(&mut self, idx_proxy: IdxProxy, new_val: usize) -> usize {
511 let mut block_ptr = self.ptr.as_ptr().add(idx_proxy.real_idx);
512 let mut block_bits = ptr::read(block_ptr);
513 let mut val = (block_bits & idx_proxy.first_mask) >> idx_proxy.first_offset;
514 block_bits = (block_bits & !idx_proxy.first_mask) | (new_val << idx_proxy.first_offset);
515 ptr::write(block_ptr, block_bits);
516 if idx_proxy.second_mask != 0 {
517 block_ptr = block_ptr.add(1);
518 block_bits = ptr::read(block_ptr);
519 val = val | ((block_bits & idx_proxy.second_mask) << idx_proxy.second_offset);
520 block_bits = (block_bits & !idx_proxy.second_mask) | (new_val >> idx_proxy.second_offset);
521 ptr::write(block_ptr, block_bits);
522 }
523 val
524 }
525
526 #[inline]
527 pub(crate) unsafe fn write_val_with_idx_proxy(&mut self, idx_proxy: IdxProxy, new_val: usize) {
528 let mut block_ptr = self.ptr.as_ptr().add(idx_proxy.real_idx);
529 let mut block_bits = ptr::read(block_ptr);
530 block_bits = (block_bits & !idx_proxy.first_mask) | (new_val << idx_proxy.first_offset);
531 ptr::write(block_ptr, block_bits);
532 if idx_proxy.second_mask != 0 {
533 block_ptr = block_ptr.add(1);
534 block_bits = ptr::read(block_ptr);
535 block_bits = (block_bits & !idx_proxy.second_mask) | (new_val >> idx_proxy.second_offset);
536 ptr::write(block_ptr, block_bits);
537 }
538 }
539
540 #[inline]
541 pub(crate) unsafe fn swap_vals_with_idx_proxy(&mut self, proxy_a: IdxProxy, proxy_b: IdxProxy) {
542 let val_a = self.replace_val_with_idx_proxy(proxy_a, 0);
543 let val_b = self.replace_val_with_idx_proxy(proxy_b, 0);
544 self.write_val_with_idx_proxy(proxy_a, val_b);
545 self.write_val_with_idx_proxy(proxy_b, val_a);
546 }
547
548 #[inline]
549 pub(crate) unsafe fn swap_pop_val_with_idx_proxy(&mut self, pop_proxy: IdxProxy, last_proxy: IdxProxy) -> usize {
550 let val_last = self.replace_val_with_idx_proxy(last_proxy, 0);
551 self.replace_val_with_idx_proxy(pop_proxy, val_last)
552 }
553
554 #[inline]
555 pub(crate) unsafe fn shift_elements_up_with_with_idx_proxy(&mut self, proto: BitProto, begin_proxy: IdxProxy, shift_count: usize) {
556 let real_len = BitProto::calc_block_count_from_bitwise_count(proto, self.len);
557 let blocks_until_end = real_len - begin_proxy.real_idx;
558 let final_real_len = BitProto::calc_block_count_from_bitwise_count(proto, self.len+shift_count);
559 let end_excluded_block_ptr = self.ptr.as_ptr().add(final_real_len);
560 let mut block_ptr = self.ptr.as_ptr().add(begin_proxy.real_idx);
561 let mut block_bits = ptr::read(block_ptr);
562 let keep_first_mask = BitUtil::all_bits_less_than_bit(begin_proxy.first_offset);
563 let keep_first_bits = block_bits & keep_first_mask;
564 block_bits &= !keep_first_mask;
565 ptr::write(block_ptr, block_bits);
566 let total_bits_shifted = shift_count * proto.BITS;
567 let whole_blocks = total_bits_shifted / BitUtil::USIZE_BITS;
568 let rollover_bits = total_bits_shifted - (whole_blocks * BitUtil::USIZE_BITS);
569 if whole_blocks > 0 {
570 ptr::copy(block_ptr, block_ptr.add(whole_blocks), blocks_until_end);
571 block_ptr = block_ptr.add(whole_blocks);
572 }
573 if rollover_bits > 0 {
574 let rollover_shift = BitUtil::USIZE_BITS - rollover_bits;
575 let rollover_mask = usize::MAX << rollover_shift;
576 let mut rollover_bits_paste: usize = 0;
577 let mut rollover_bits_copy: usize;
578 while block_ptr < end_excluded_block_ptr {
579 block_bits = ptr::read(block_ptr);
580 rollover_bits_copy = (block_bits & rollover_mask) >> rollover_shift;
581 block_bits = (block_bits << rollover_bits) | rollover_bits_paste;
582 ptr::write(block_ptr, block_bits);
583 block_ptr = block_ptr.add(1);
584 rollover_bits_paste = rollover_bits_copy;
585 }
586 }
587 block_ptr = self.ptr.as_ptr().add(begin_proxy.real_idx);
588 block_bits = ptr::read(block_ptr);
589 ptr::write(block_ptr, block_bits | keep_first_bits);
590 }
591
592 #[inline]
593 pub(crate) unsafe fn shift_elements_down_with_with_idx_proxy(&mut self, proto: BitProto, begin_proxy: IdxProxy, shift_proxy: IdxProxy, shift_count: usize) {
594 let real_len = BitProto::calc_block_count_from_bitwise_count(proto, self.len);
595 let mut block_ptr = self.ptr.as_ptr().add(begin_proxy.real_idx);
596 let mut block_bits = ptr::read(block_ptr);
597 let keep_first_mask = BitUtil::all_bits_less_than_bit(begin_proxy.first_offset);
598 let keep_first_bits = block_bits & keep_first_mask;
599 block_bits &= !keep_first_mask;
600 ptr::write(block_ptr, block_bits);
601 let total_bits_shifted = shift_count * proto.BITS;
602 let whole_blocks = total_bits_shifted / BitUtil::USIZE_BITS;
603 if whole_blocks > 0 {
604 let copy_block_count = real_len - shift_proxy.real_idx;
605 ptr::copy(block_ptr.add(whole_blocks), block_ptr, copy_block_count);
606 }
607 let rollover_bits = total_bits_shifted - (whole_blocks * BitUtil::USIZE_BITS);
608 if rollover_bits > 0 {
609 let shift_block_count = (real_len - begin_proxy.real_idx) - whole_blocks;
610 let rollover_shift = BitUtil::USIZE_BITS - rollover_bits;
611 let rollover_mask = usize::MAX >> rollover_shift;
612 let mut blocks_shifted = 0;
613 let mut rollover_bits_paste: usize = 0;
614 let mut rollover_bits_copy: usize;
615 block_ptr = self.ptr.as_ptr().add(real_len-whole_blocks);
616 while blocks_shifted < shift_block_count {
617 block_ptr = block_ptr.sub(1);
618 block_bits = ptr::read(block_ptr);
619 rollover_bits_copy = (block_bits & rollover_mask) << rollover_shift;
620 block_bits = (block_bits >> rollover_bits) | rollover_bits_paste;
621 ptr::write(block_ptr, block_bits);
622 blocks_shifted += 1;
623 rollover_bits_paste = rollover_bits_copy;
624 }
625 }
626 block_bits = ptr::read(block_ptr);
627 ptr::write(block_ptr, block_bits | keep_first_bits);
628 }
629
630 #[inline]
631 pub(crate) unsafe fn handle_grow_if_needed(&mut self, proto: BitProto, min_capacity: usize, grow_exponential: bool) -> Result<(), String> {
632 let true_min_capacity = BitProto::calc_block_count_from_bitwise_count(proto, min_capacity);
633 if true_min_capacity > self.true_cap{
634 let new_true_cap = match grow_exponential {
635 true => true_min_capacity.saturating_add(true_min_capacity >> 1),
636 false => true_min_capacity,
637 };
638 let new_layout: Layout = MemUtil::usize_array_layout(new_true_cap);
639 let new_ptr = match self.true_cap {
640 0 => {
641 alloc::alloc(new_layout)
642 },
643 _ => {
644 let old_layout = MemUtil::usize_array_layout(self.true_cap);
645 alloc::realloc(self.ptr.as_ptr().cast(), old_layout, new_layout.size())
646 },
647 };
648 let new_non_null = Self::handle_alloc_result(new_layout, new_ptr);
649 self.true_cap = new_true_cap;
650 self.ptr = new_non_null;
651 Ok(())
652 } else {
653 Ok(())
654 }
655 }
656
657 #[inline]
658 pub(crate) unsafe fn alloc_new(real_cap: usize) -> (*mut u8, Layout) {
659 let new_layout: Layout = MemUtil::usize_array_layout(real_cap);
660 let new_ptr = alloc::alloc(new_layout);
661 (new_ptr, new_layout)
662 }
663
664 #[inline]
665 pub(crate) fn handle_alloc_result(alloc_layout: Layout, alloc_result_ptr: *mut u8) -> NonNull<usize> {
666 match NonNull::new(alloc_result_ptr) {
667 Some(non_null) => non_null.cast::<usize>(),
668 None => handle_alloc_error(alloc_layout)
669 }
670 }
671}
672
673impl Drop for RawBitVec {
674 #[inline]
675 fn drop(&mut self) {
676 unsafe {
677 if self.true_cap > 0 {
678 let layout = MemUtil::usize_array_layout(self.true_cap);
679 alloc::dealloc(self.ptr.as_ptr().cast(), layout)
680 }
681 }
682 }
683}
684
685#[cfg(debug_assertions)]
686impl RawBitVec {
687 #[allow(dead_code)]
688 pub(crate) fn debug_string(&self, proto: BitProto) -> String {
689 let true_len = BitProto::calc_block_count_from_bitwise_count(proto, self.len);
690 let elem_cap = BitProto::calc_bitwise_count_from_block_count(proto, self.true_cap);
691 let mut output = format!("elem len = {}\nelem cap = {}\ntrue len = {}\ntrue cap = {}\ndata: \n", self.len, elem_cap, true_len, self.true_cap);
692 let mut i = 0usize;
693 while i < true_len {
694 let block = unsafe{ptr::read(self.ptr.as_ptr().add(i))};
695 output.push_str(format!("{} = {:064b}\n", i, block).as_str());
696 i += 1;
697 }
698 output
699 }
700}