1use crate::{intrinsics::transpose, packed_seq::read_slice};
2
3use super::*;
4
5#[derive(Clone, Debug, Default, MemSize, MemDbg)]
9#[cfg_attr(feature = "pyo3", pyo3::pyclass)]
10#[cfg_attr(feature = "epserde", derive(epserde::Epserde))]
11pub struct AsciiSeqVec {
12 pub seq: Vec<u8>,
13}
14
15#[derive(Copy, Clone, Debug, MemSize, MemDbg, PartialEq, Eq, PartialOrd, Ord)]
19pub struct AsciiSeq<'s>(pub &'s [u8]);
20
21impl<'s> Seq<'s> for AsciiSeq<'s> {
24 const BASES_PER_BYTE: usize = 1;
26 const BITS_PER_CHAR: usize = 2;
28 type SeqVec = AsciiSeqVec;
29
30 #[inline(always)]
31 fn len(&self) -> usize {
32 self.0.len()
33 }
34
35 #[inline(always)]
36 fn is_empty(&self) -> bool {
37 self.0.is_empty()
38 }
39
40 #[inline(always)]
41 fn get(&self, index: usize) -> u8 {
42 pack_char(self.0[index])
43 }
44
45 #[inline(always)]
46 fn get_ascii(&self, index: usize) -> u8 {
47 self.0[index]
48 }
49
50 #[inline(always)]
51 fn as_u64(&self) -> u64 {
52 let len = self.len();
53 assert!(len <= u64::BITS as usize / 2);
54
55 let mut val = 0u64;
56
57 #[cfg(all(target_arch = "x86_64", target_feature = "bmi2"))]
58 {
59 for i in (0..len).step_by(8) {
60 let packed_bytes = if i + 8 <= self.len() {
61 let chunk: &[u8; 8] = &self.0[i..i + 8].try_into().unwrap();
62 let ascii = u64::from_ne_bytes(*chunk);
63 unsafe { std::arch::x86_64::_pext_u64(ascii, 0x0606060606060606) }
64 } else {
65 let mut chunk: [u8; 8] = [0; 8];
66 chunk[..self.len() - i].copy_from_slice(self.0[i..].try_into().unwrap());
68 let ascii = u64::from_ne_bytes(chunk);
69 unsafe { std::arch::x86_64::_pext_u64(ascii, 0x0606060606060606) }
70 };
71 val |= packed_bytes << (i * 2);
72 }
73 }
74
75 #[cfg(target_feature = "neon")]
76 {
77 use core::arch::aarch64::{vandq_u8, vdup_n_u8, vld1q_u8, vpadd_u8, vshlq_u8};
78 use core::mem::transmute;
79
80 for i in (0..len).step_by(16) {
81 let packed_bytes: u64 = if i + 16 <= self.len() {
82 unsafe {
83 let ascii = vld1q_u8(self.0.as_ptr().add(i));
84 let masked_bits = vandq_u8(ascii, transmute([6i8; 16]));
85 let (bits_0, bits_1) = transmute(vshlq_u8(
86 masked_bits,
87 transmute([-1i8, 1, 3, 5, -1, 1, 3, 5, -1, 1, 3, 5, -1, 1, 3, 5]),
88 ));
89 let half_packed = vpadd_u8(bits_0, bits_1);
90 let packed = vpadd_u8(half_packed, vdup_n_u8(0));
91 transmute(packed)
92 }
93 } else {
94 let mut chunk: [u8; 16] = [0; 16];
95 chunk[..self.len() - i].copy_from_slice(self.0[i..].try_into().unwrap());
97 unsafe {
98 let ascii = vld1q_u8(chunk.as_ptr());
99 let masked_bits = vandq_u8(ascii, transmute([6i8; 16]));
100 let (bits_0, bits_1) = transmute(vshlq_u8(
101 masked_bits,
102 transmute([-1i8, 1, 3, 5, -1, 1, 3, 5, -1, 1, 3, 5, -1, 1, 3, 5]),
103 ));
104 let half_packed = vpadd_u8(bits_0, bits_1);
105 let packed = vpadd_u8(half_packed, vdup_n_u8(0));
106 transmute(packed)
107 }
108 };
109 val |= packed_bytes << (i * 2);
110 }
111 }
112
113 #[cfg(not(any(
114 all(target_arch = "x86_64", target_feature = "bmi2"),
115 target_feature = "neon"
116 )))]
117 {
118 for (i, &base) in self.0.iter().enumerate() {
119 val |= (pack_char(base) as u64) << (i * 2);
120 }
121 }
122
123 val
124 }
125
126 #[inline(always)]
127 fn revcomp_as_u64(&self) -> u64 {
128 packed_seq::revcomp_u64(self.as_u64(), self.len())
129 }
130
131 #[inline(always)]
133 fn to_vec(&self) -> AsciiSeqVec {
134 AsciiSeqVec {
135 seq: self.0.to_vec(),
136 }
137 }
138
139 #[inline(always)]
140 fn to_revcomp(&self) -> AsciiSeqVec {
141 AsciiSeqVec {
142 seq: self
143 .0
144 .iter()
145 .rev()
146 .copied()
147 .map(packed_seq::complement_char)
148 .collect(),
149 }
150 }
151
152 #[inline(always)]
153 fn slice(&self, range: Range<usize>) -> Self {
154 Self(&self.0[range])
155 }
156
157 #[inline(always)]
161 fn iter_bp(self) -> impl ExactSizeIterator<Item = u8> + Clone {
162 #[cfg(all(target_arch = "x86_64", target_feature = "bmi2"))]
163 {
164 let mut cache = 0;
165 (0..self.len()).map(
166 #[inline(always)]
167 move |i| {
168 if i % 8 == 0 {
169 if i + 8 <= self.len() {
170 let chunk: &[u8; 8] = &self.0[i..i + 8].try_into().unwrap();
171 let ascii = u64::from_ne_bytes(*chunk);
172 cache = ascii >> 1;
173 } else {
174 let mut chunk: [u8; 8] = [0; 8];
175 chunk[..self.len() - i]
177 .copy_from_slice(self.0[i..].try_into().unwrap());
178 let ascii = u64::from_ne_bytes(chunk);
179 cache = ascii >> 1;
180 }
181 }
182 let base = cache & 0x03;
183 cache >>= 8;
184 base as u8
185 },
186 )
187 }
188
189 #[cfg(not(all(target_arch = "x86_64", target_feature = "bmi2")))]
190 self.0.iter().copied().map(pack_char)
191 }
192
193 #[inline(always)]
195 fn par_iter_bp(self, context: usize) -> (impl ExactSizeIterator<Item = S> + Clone, usize) {
196 let num_kmers = self.len().saturating_sub(context - 1);
197 let n = num_kmers.div_ceil(L);
198 let padding = L * n - num_kmers;
199
200 let offsets: [usize; 8] = from_fn(|l| (l * n));
201 let mut cur = S::ZERO;
202
203 let mut buf = Box::new([S::ZERO; 8]);
206
207 let par_len = if num_kmers == 0 { 0 } else { n + context - 1 };
208 let it = (0..par_len).map(
209 #[inline(always)]
210 move |i| {
211 if i % 4 == 0 {
212 if i % 32 == 0 {
213 let data: [u32x8; 8] = from_fn(
215 #[inline(always)]
216 |lane| read_slice(self.0, offsets[lane] + i),
217 );
218 *buf = transpose(data);
219 for x in buf.iter_mut() {
220 *x = *x >> 1;
221 }
222 }
223 cur = buf[(i % 32) / 4];
224 }
225 let chars = cur & S::splat(0x03);
227 cur = cur >> S::splat(8);
229 chars
230 },
231 );
232
233 (it, padding)
234 }
235
236 #[inline(always)]
237 fn par_iter_bp_delayed(
238 self,
239 context: usize,
240 delay: usize,
241 ) -> (impl ExactSizeIterator<Item = (S, S)> + Clone, usize) {
242 assert!(
243 delay < usize::MAX / 2,
244 "Delay={} should be >=0.",
245 delay as isize
246 );
247
248 let num_kmers = self.len().saturating_sub(context - 1);
249 let n = num_kmers.div_ceil(L);
250 let padding = L * n - num_kmers;
251
252 let offsets: [usize; 8] = from_fn(|l| (l * n));
253 let mut upcoming = S::ZERO;
254 let mut upcoming_d = S::ZERO;
255
256 let buf_len = (delay / 4 + 8).next_power_of_two();
260 let buf_mask = buf_len - 1;
261 let mut buf = vec![S::ZERO; buf_len];
262 let mut write_idx = 0;
263 let mut read_idx = (buf_len - delay / 4) % buf_len;
266
267 let par_len = if num_kmers == 0 { 0 } else { n + context - 1 };
268 let it = (0..par_len).map(
269 #[inline(always)]
270 move |i| {
271 if i % 4 == 0 {
272 if i % 32 == 0 {
273 let data: [u32x8; 8] = from_fn(
275 #[inline(always)]
276 |lane| read_slice(self.0, offsets[lane] + i),
277 );
278 unsafe {
279 let mut_array: &mut [u32x8; 8] = buf
280 .get_unchecked_mut(write_idx..write_idx + 8)
281 .try_into()
282 .unwrap_unchecked();
283 *mut_array = transpose(data);
284 for x in mut_array {
285 *x = *x >> 1;
286 }
287 }
288 }
289 upcoming = buf[write_idx];
290 write_idx += 1;
291 write_idx &= buf_mask;
292 }
293 if i % 4 == delay % 4 {
294 unsafe { assert_unchecked(read_idx < buf.len()) };
295 upcoming_d = buf[read_idx];
296 read_idx += 1;
297 read_idx &= buf_mask;
298 }
299 let chars = upcoming & S::splat(0x03);
301 let chars_d = upcoming_d & S::splat(0x03);
302 upcoming = upcoming >> S::splat(8);
304 upcoming_d = upcoming_d >> S::splat(8);
305 (chars, chars_d)
306 },
307 );
308
309 (it, padding)
310 }
311
312 #[inline(always)]
313 fn par_iter_bp_delayed_2(
314 self,
315 context: usize,
316 delay1: usize,
317 delay2: usize,
318 ) -> (impl ExactSizeIterator<Item = (S, S, S)> + Clone, usize) {
319 assert!(delay1 <= delay2, "Delay1 must be at most delay2.");
320
321 let num_kmers = self.len().saturating_sub(context - 1);
322 let n = num_kmers.div_ceil(L);
323 let padding = L * n - num_kmers;
324
325 let offsets: [usize; 8] = from_fn(|l| (l * n));
326
327 let mut upcoming = S::ZERO;
328 let mut upcoming_d1 = S::ZERO;
329 let mut upcoming_d2 = S::ZERO;
330
331 let buf_len = (delay2 / 4 + 8).next_power_of_two();
335 let buf_mask = buf_len - 1;
336 let mut buf = vec![S::ZERO; buf_len];
337 let mut write_idx = 0;
338 let mut read_idx1 = (buf_len - delay1 / 4) % buf_len;
341 let mut read_idx2 = (buf_len - delay2 / 4) % buf_len;
342
343 let par_len = if num_kmers == 0 { 0 } else { n + context - 1 };
344 let it = (0..par_len).map(
345 #[inline(always)]
346 move |i| {
347 if i % 4 == 0 {
348 if i % 32 == 0 {
349 let data: [u32x8; 8] = from_fn(
351 #[inline(always)]
352 |lane| read_slice(self.0, offsets[lane] + i),
353 );
354 unsafe {
355 let mut_array: &mut [u32x8; 8] = buf
356 .get_unchecked_mut(write_idx..write_idx + 8)
357 .try_into()
358 .unwrap_unchecked();
359 *mut_array = transpose(data);
360 for x in mut_array {
361 *x = *x >> 1;
362 }
363 }
364 }
365 upcoming = buf[write_idx];
366 write_idx += 1;
367 write_idx &= buf_mask;
368 }
369 if i % 4 == delay1 % 4 {
370 unsafe { assert_unchecked(read_idx1 < buf.len()) };
371 upcoming_d1 = buf[read_idx1];
372 read_idx1 += 1;
373 read_idx1 &= buf_mask;
374 }
375 if i % 4 == delay2 % 4 {
376 unsafe { assert_unchecked(read_idx2 < buf.len()) };
377 upcoming_d2 = buf[read_idx2];
378 read_idx2 += 1;
379 read_idx2 &= buf_mask;
380 }
381 let chars = upcoming & S::splat(0x03);
383 let chars_d1 = upcoming_d1 & S::splat(0x03);
384 let chars_d2 = upcoming_d2 & S::splat(0x03);
385 upcoming = upcoming >> S::splat(8);
387 upcoming_d1 = upcoming_d1 >> S::splat(8);
388 upcoming_d2 = upcoming_d2 >> S::splat(8);
389 (chars, chars_d1, chars_d2)
390 },
391 );
392
393 (it, padding)
394 }
395
396 fn cmp_lcp(&self, other: &Self) -> (std::cmp::Ordering, usize) {
398 for i in 0..self.len().min(other.len()) {
399 if self.0[i] != other.0[i] {
400 return (self.0[i].cmp(&other.0[i]), i);
401 }
402 }
403 (self.len().cmp(&other.len()), self.len().min(other.len()))
404 }
405}
406
407impl AsciiSeqVec {
408 #[inline(always)]
409 pub const fn from_vec(seq: Vec<u8>) -> Self {
410 Self { seq }
411 }
412}
413
414impl SeqVec for AsciiSeqVec {
415 type Seq<'s> = AsciiSeq<'s>;
416
417 #[inline(always)]
419 fn into_raw(self) -> Vec<u8> {
420 self.seq
421 }
422
423 #[inline(always)]
424 fn as_slice(&self) -> Self::Seq<'_> {
425 AsciiSeq(self.seq.as_slice())
426 }
427
428 #[inline(always)]
429 fn len(&self) -> usize {
430 self.seq.len()
431 }
432
433 #[inline(always)]
434 fn is_empty(&self) -> bool {
435 self.seq.is_empty()
436 }
437
438 #[inline(always)]
439 fn clear(&mut self) {
440 self.seq.clear()
441 }
442
443 #[inline(always)]
444 fn push_seq(&mut self, seq: AsciiSeq) -> Range<usize> {
445 let start = self.seq.len();
446 let end = start + seq.len();
447 let range = start..end;
448 self.seq.extend(seq.0);
449 range
450 }
451
452 #[inline(always)]
453 fn push_ascii(&mut self, seq: &[u8]) -> Range<usize> {
454 self.push_seq(AsciiSeq(seq))
455 }
456
457 #[cfg(feature = "rand")]
458 fn random(n: usize) -> Self {
459 use rand::{RngCore, SeedableRng};
460
461 let mut seq = vec![0; n];
462 rand::rngs::SmallRng::from_os_rng().fill_bytes(&mut seq);
463 Self {
464 seq: seq.into_iter().map(|b| b"ACGT"[b as usize % 4]).collect(),
465 }
466 }
467}