1use core::cmp;
4use core::fmt;
5use core::iter::Map;
6use core::mem;
7use core::ops::Range;
8use core::ops::{Index, IndexMut};
9use core::slice;
10
11use crate::local_prelude::*;
12use crate::util::{div_rem, round_up_to_next};
13
14pub struct BitSubMatrix<'a> {
16 slice: &'a [Block],
17 row_bits: usize,
18}
19
20pub struct BitSubMatrixMut<'a> {
22 slice: &'a mut [Block],
23 row_bits: usize,
24}
25
26impl<'a> BitSubMatrix<'a> {
27 pub fn new(slice: &[Block], row_bits: usize) -> BitSubMatrix {
29 BitSubMatrix {
30 slice: slice,
31 row_bits: row_bits,
32 }
33 }
34
35 #[inline]
37 pub unsafe fn from_raw_parts(ptr: *const Block, rows: usize, row_bits: usize) -> Self {
38 BitSubMatrix {
39 slice: slice::from_raw_parts(ptr, round_up_to_next(row_bits, BITS) / BITS * rows),
40 row_bits: row_bits,
41 }
42 }
43
44 pub fn iter(&self) -> Map<slice::Chunks<Block>, fn(&[Block]) -> &BitSlice> {
46 fn f(arg: &[Block]) -> &BitSlice {
47 unsafe { mem::transmute(arg) }
48 }
49 let row_size = round_up_to_next(self.row_bits, BITS) / BITS;
50 self.slice.chunks(row_size).map(f)
51 }
52}
53
54impl<'a> BitSubMatrixMut<'a> {
55 pub fn new(slice: &mut [Block], row_bits: usize) -> BitSubMatrixMut {
57 BitSubMatrixMut {
58 slice: slice,
59 row_bits: row_bits,
60 }
61 }
62
63 #[inline]
65 pub unsafe fn from_raw_parts(ptr: *mut Block, rows: usize, row_bits: usize) -> Self {
66 BitSubMatrixMut {
67 slice: slice::from_raw_parts_mut(ptr, round_up_to_next(row_bits, BITS) / BITS * rows),
68 row_bits: row_bits,
69 }
70 }
71
72 #[inline]
74 fn num_rows(&self) -> usize {
75 let row_size = round_up_to_next(self.row_bits, BITS) / BITS;
76 if row_size == 0 {
77 0
78 } else {
79 self.slice.len() / row_size
80 }
81 }
82
83 #[inline]
89 pub fn set(&mut self, row: usize, col: usize, enabled: bool) {
90 let row_size_in_bits = round_up_to_next(self.row_bits, BITS);
91 let bit = row * row_size_in_bits + col;
92 let (block, i) = div_rem(bit, BITS);
93 assert!(block < self.slice.len() && col < self.row_bits);
94 unsafe {
95 let elt = self.slice.get_unchecked_mut(block);
96 if enabled {
97 *elt |= 1 << i;
98 } else {
99 *elt &= !(1 << i);
100 }
101 }
102 }
103
104 #[inline]
106 pub fn sub_matrix(&self, range: Range<usize>) -> BitSubMatrix {
107 let row_size = round_up_to_next(self.row_bits, BITS) / BITS;
108 BitSubMatrix {
109 slice: &self.slice[range.start * row_size..range.end * row_size],
110 row_bits: self.row_bits,
111 }
112 }
113
114 #[inline]
120 pub fn split_at(&self, row: usize) -> (BitSubMatrix, BitSubMatrix) {
121 (
122 self.sub_matrix(0..row),
123 self.sub_matrix(row..self.num_rows()),
124 )
125 }
126
127 #[inline]
130 pub fn split_at_mut(&mut self, row: usize) -> (BitSubMatrixMut, BitSubMatrixMut) {
131 let row_size = round_up_to_next(self.row_bits, BITS) / BITS;
132 let (first, second) = self.slice.split_at_mut(row * row_size);
133 (
134 BitSubMatrixMut::new(first, self.row_bits),
135 BitSubMatrixMut::new(second, self.row_bits),
136 )
137 }
138
139 pub fn transitive_closure(&mut self) {
143 assert_eq!(self.num_rows(), self.row_bits);
144 for pos in 0..self.row_bits {
145 let (mut rows0, mut rows1a) = self.split_at_mut(pos);
146 let (row, mut rows1b) = rows1a.split_at_mut(1);
147 for dst_row in rows0.iter_mut().chain(rows1b.iter_mut()) {
148 if dst_row[pos] {
149 for (dst, src) in dst_row.iter_blocks_mut().zip(row[0].iter_blocks()) {
150 *dst |= src;
151 }
152 }
153 }
154 }
155 }
156
157 pub fn reflexive_closure(&mut self) {
159 for i in 0..cmp::min(self.row_bits, self.num_rows()) {
160 self.set(i, i, true);
161 }
162 }
163
164 pub fn iter_mut(&mut self) -> Map<slice::ChunksMut<Block>, fn(&mut [Block]) -> &mut BitSlice> {
166 fn f(arg: &mut [Block]) -> &mut BitSlice {
167 unsafe { mem::transmute(arg) }
168 }
169 let row_size = round_up_to_next(self.row_bits, BITS) / BITS;
170 self.slice.chunks_mut(row_size).map(f)
171 }
172}
173
174impl<'a> Index<usize> for BitSubMatrixMut<'a> {
176 type Output = BitSlice;
177
178 #[inline]
179 fn index(&self, row: usize) -> &BitSlice {
180 let row_size = round_up_to_next(self.row_bits, BITS) / BITS;
181 unsafe { mem::transmute(&self.slice[row * row_size..(row + 1) * row_size]) }
182 }
183}
184
185impl<'a> IndexMut<usize> for BitSubMatrixMut<'a> {
187 #[inline]
188 fn index_mut(&mut self, row: usize) -> &mut BitSlice {
189 let row_size = round_up_to_next(self.row_bits, BITS) / BITS;
190 unsafe { mem::transmute(&mut self.slice[row * row_size..(row + 1) * row_size]) }
191 }
192}
193
194impl<'a> Index<usize> for BitSubMatrix<'a> {
196 type Output = BitSlice;
197
198 #[inline]
199 fn index(&self, row: usize) -> &BitSlice {
200 let row_size = round_up_to_next(self.row_bits, BITS) / BITS;
201 unsafe { mem::transmute(&self.slice[row * row_size..(row + 1) * row_size]) }
202 }
203}
204
205impl<'a> fmt::Debug for BitSubMatrix<'a> {
206 fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
207 for row in self.iter() {
208 for bit in row.iter_bits(self.row_bits) {
209 write!(fmt, "{}", if bit { 1 } else { 0 })?;
210 }
211 write!(fmt, "\n")?;
212 }
213 Ok(())
214 }
215}