1use std::io::{self, Read, Write};
2
3use base64_simd::AsOut;
4use rayon::prelude::*;
5
6const BASE64_ENGINE: &base64_simd::Base64 = &base64_simd::STANDARD;
7
8const NOWRAP_CHUNK: usize = 32 * 1024 * 1024 - (32 * 1024 * 1024 % 3);
11
12const PARALLEL_ENCODE_THRESHOLD: usize = 1024 * 1024;
15
16const PARALLEL_DECODE_THRESHOLD: usize = 1024 * 1024;
19
20pub fn encode_to_writer(data: &[u8], wrap_col: usize, out: &mut impl Write) -> io::Result<()> {
23 if data.is_empty() {
24 return Ok(());
25 }
26
27 if wrap_col == 0 {
28 return encode_no_wrap(data, out);
29 }
30
31 encode_wrapped(data, wrap_col, out)
32}
33
34fn encode_no_wrap(data: &[u8], out: &mut impl Write) -> io::Result<()> {
36 if data.len() >= PARALLEL_ENCODE_THRESHOLD {
37 return encode_no_wrap_parallel(data, out);
38 }
39
40 let actual_chunk = NOWRAP_CHUNK.min(data.len());
41 let enc_max = BASE64_ENGINE.encoded_length(actual_chunk);
42 let mut buf: Vec<u8> = Vec::with_capacity(enc_max);
44 #[allow(clippy::uninit_vec)]
45 unsafe {
46 buf.set_len(enc_max);
47 }
48
49 for chunk in data.chunks(NOWRAP_CHUNK) {
50 let enc_len = BASE64_ENGINE.encoded_length(chunk.len());
51 let encoded = BASE64_ENGINE.encode(chunk, buf[..enc_len].as_out());
52 out.write_all(encoded)?;
53 }
54 Ok(())
55}
56
57fn encode_no_wrap_parallel(data: &[u8], out: &mut impl Write) -> io::Result<()> {
61 let num_threads = rayon::current_num_threads().max(1);
62 let raw_chunk = data.len() / num_threads;
63 let chunk_size = ((raw_chunk + 2) / 3) * 3;
65
66 let chunks: Vec<&[u8]> = data.chunks(chunk_size.max(3)).collect();
67 let encoded_chunks: Vec<Vec<u8>> = chunks
68 .par_iter()
69 .map(|chunk| {
70 let enc_len = BASE64_ENGINE.encoded_length(chunk.len());
71 let mut buf: Vec<u8> = Vec::with_capacity(enc_len);
72 #[allow(clippy::uninit_vec)]
73 unsafe {
74 buf.set_len(enc_len);
75 }
76 let _ = BASE64_ENGINE.encode(chunk, buf[..enc_len].as_out());
77 buf
78 })
79 .collect();
80
81 let iov: Vec<io::IoSlice> = encoded_chunks.iter().map(|c| io::IoSlice::new(c)).collect();
83 write_all_vectored(out, &iov)
84}
85
86fn encode_wrapped(data: &[u8], wrap_col: usize, out: &mut impl Write) -> io::Result<()> {
91 let bytes_per_line = wrap_col * 3 / 4;
94 if bytes_per_line == 0 {
95 return encode_wrapped_small(data, wrap_col, out);
97 }
98
99 if data.len() >= PARALLEL_ENCODE_THRESHOLD && bytes_per_line.is_multiple_of(3) {
102 return encode_wrapped_parallel(data, wrap_col, bytes_per_line, out);
103 }
104
105 let lines_per_chunk = (32 * 1024 * 1024) / bytes_per_line;
107 let max_input_chunk = (lines_per_chunk * bytes_per_line).max(bytes_per_line);
108 let input_chunk = max_input_chunk.min(data.len());
109
110 let enc_max = BASE64_ENGINE.encoded_length(input_chunk);
111 let mut encode_buf: Vec<u8> = Vec::with_capacity(enc_max);
112 #[allow(clippy::uninit_vec)]
113 unsafe {
114 encode_buf.set_len(enc_max);
115 }
116
117 for chunk in data.chunks(max_input_chunk.max(1)) {
118 let enc_len = BASE64_ENGINE.encoded_length(chunk.len());
119 let encoded = BASE64_ENGINE.encode(chunk, encode_buf[..enc_len].as_out());
120
121 write_wrapped_iov(encoded, wrap_col, out)?;
125 }
126
127 Ok(())
128}
129
130static NEWLINE: [u8; 1] = [b'\n'];
132
133#[inline]
138fn write_wrapped_iov(encoded: &[u8], wrap_col: usize, out: &mut impl Write) -> io::Result<()> {
139 const MAX_IOV: usize = 1024;
142
143 let num_full_lines = encoded.len() / wrap_col;
144 let remainder = encoded.len() % wrap_col;
145 let total_iov = num_full_lines * 2 + if remainder > 0 { 2 } else { 0 };
146
147 if total_iov <= MAX_IOV {
149 let mut iov: Vec<io::IoSlice> = Vec::with_capacity(total_iov);
150 let mut pos = 0;
151 for _ in 0..num_full_lines {
152 iov.push(io::IoSlice::new(&encoded[pos..pos + wrap_col]));
153 iov.push(io::IoSlice::new(&NEWLINE));
154 pos += wrap_col;
155 }
156 if remainder > 0 {
157 iov.push(io::IoSlice::new(&encoded[pos..pos + remainder]));
158 iov.push(io::IoSlice::new(&NEWLINE));
159 }
160 return write_all_vectored(out, &iov);
161 }
162
163 let mut iov: Vec<io::IoSlice> = Vec::with_capacity(MAX_IOV);
165 let mut pos = 0;
166 for _ in 0..num_full_lines {
167 iov.push(io::IoSlice::new(&encoded[pos..pos + wrap_col]));
168 iov.push(io::IoSlice::new(&NEWLINE));
169 pos += wrap_col;
170 if iov.len() >= MAX_IOV {
171 write_all_vectored(out, &iov)?;
172 iov.clear();
173 }
174 }
175 if remainder > 0 {
176 iov.push(io::IoSlice::new(&encoded[pos..pos + remainder]));
177 iov.push(io::IoSlice::new(&NEWLINE));
178 }
179 if !iov.is_empty() {
180 write_all_vectored(out, &iov)?;
181 }
182 Ok(())
183}
184
185#[inline]
189fn write_wrapped_iov_streaming(
190 encoded: &[u8],
191 wrap_col: usize,
192 col: &mut usize,
193 out: &mut impl Write,
194) -> io::Result<()> {
195 const MAX_IOV: usize = 1024;
196 let mut iov: Vec<io::IoSlice> = Vec::with_capacity(MAX_IOV);
197 let mut rp = 0;
198
199 while rp < encoded.len() {
200 let space = wrap_col - *col;
201 let avail = encoded.len() - rp;
202
203 if avail <= space {
204 iov.push(io::IoSlice::new(&encoded[rp..rp + avail]));
206 *col += avail;
207 if *col == wrap_col {
208 iov.push(io::IoSlice::new(&NEWLINE));
209 *col = 0;
210 }
211 break;
212 } else {
213 iov.push(io::IoSlice::new(&encoded[rp..rp + space]));
215 iov.push(io::IoSlice::new(&NEWLINE));
216 rp += space;
217 *col = 0;
218 }
219
220 if iov.len() >= MAX_IOV - 1 {
221 write_all_vectored(out, &iov)?;
222 iov.clear();
223 }
224 }
225
226 if !iov.is_empty() {
227 write_all_vectored(out, &iov)?;
228 }
229 Ok(())
230}
231
232fn encode_wrapped_parallel(
236 data: &[u8],
237 wrap_col: usize,
238 bytes_per_line: usize,
239 out: &mut impl Write,
240) -> io::Result<()> {
241 let num_threads = rayon::current_num_threads().max(1);
242 let lines_per_chunk = (data.len() / bytes_per_line / num_threads).max(1);
244 let chunk_size = lines_per_chunk * bytes_per_line;
245
246 let chunks: Vec<&[u8]> = data.chunks(chunk_size.max(bytes_per_line)).collect();
247 let encoded_chunks: Vec<Vec<u8>> = chunks
248 .par_iter()
249 .map(|chunk| {
250 let enc_max = BASE64_ENGINE.encoded_length(chunk.len());
251 let max_lines = enc_max / wrap_col + 2;
252 let fused_size = enc_max + max_lines;
256 let total_size = fused_size + enc_max;
257 let mut buf: Vec<u8> = Vec::with_capacity(total_size);
258 #[allow(clippy::uninit_vec)]
259 unsafe {
260 buf.set_len(total_size);
261 }
262 let _ = BASE64_ENGINE.encode(chunk, buf[fused_size..fused_size + enc_max].as_out());
264 let (fused_region, encode_region) = buf.split_at_mut(fused_size);
266 let encoded = &encode_region[..enc_max];
267 let wp = fuse_wrap(encoded, wrap_col, fused_region);
268 buf.truncate(wp);
269 buf
270 })
271 .collect();
272
273 let iov: Vec<io::IoSlice> = encoded_chunks.iter().map(|c| io::IoSlice::new(c)).collect();
275 write_all_vectored(out, &iov)
276}
277
278#[inline]
282fn fuse_wrap(encoded: &[u8], wrap_col: usize, out_buf: &mut [u8]) -> usize {
283 let line_out = wrap_col + 1; let mut rp = 0;
285 let mut wp = 0;
286
287 while rp + 8 * wrap_col <= encoded.len() {
289 unsafe {
290 let src = encoded.as_ptr().add(rp);
291 let dst = out_buf.as_mut_ptr().add(wp);
292
293 std::ptr::copy_nonoverlapping(src, dst, wrap_col);
294 *dst.add(wrap_col) = b'\n';
295
296 std::ptr::copy_nonoverlapping(src.add(wrap_col), dst.add(line_out), wrap_col);
297 *dst.add(line_out + wrap_col) = b'\n';
298
299 std::ptr::copy_nonoverlapping(src.add(2 * wrap_col), dst.add(2 * line_out), wrap_col);
300 *dst.add(2 * line_out + wrap_col) = b'\n';
301
302 std::ptr::copy_nonoverlapping(src.add(3 * wrap_col), dst.add(3 * line_out), wrap_col);
303 *dst.add(3 * line_out + wrap_col) = b'\n';
304
305 std::ptr::copy_nonoverlapping(src.add(4 * wrap_col), dst.add(4 * line_out), wrap_col);
306 *dst.add(4 * line_out + wrap_col) = b'\n';
307
308 std::ptr::copy_nonoverlapping(src.add(5 * wrap_col), dst.add(5 * line_out), wrap_col);
309 *dst.add(5 * line_out + wrap_col) = b'\n';
310
311 std::ptr::copy_nonoverlapping(src.add(6 * wrap_col), dst.add(6 * line_out), wrap_col);
312 *dst.add(6 * line_out + wrap_col) = b'\n';
313
314 std::ptr::copy_nonoverlapping(src.add(7 * wrap_col), dst.add(7 * line_out), wrap_col);
315 *dst.add(7 * line_out + wrap_col) = b'\n';
316 }
317 rp += 8 * wrap_col;
318 wp += 8 * line_out;
319 }
320
321 while rp + 4 * wrap_col <= encoded.len() {
323 unsafe {
324 let src = encoded.as_ptr().add(rp);
325 let dst = out_buf.as_mut_ptr().add(wp);
326
327 std::ptr::copy_nonoverlapping(src, dst, wrap_col);
328 *dst.add(wrap_col) = b'\n';
329
330 std::ptr::copy_nonoverlapping(src.add(wrap_col), dst.add(line_out), wrap_col);
331 *dst.add(line_out + wrap_col) = b'\n';
332
333 std::ptr::copy_nonoverlapping(src.add(2 * wrap_col), dst.add(2 * line_out), wrap_col);
334 *dst.add(2 * line_out + wrap_col) = b'\n';
335
336 std::ptr::copy_nonoverlapping(src.add(3 * wrap_col), dst.add(3 * line_out), wrap_col);
337 *dst.add(3 * line_out + wrap_col) = b'\n';
338 }
339 rp += 4 * wrap_col;
340 wp += 4 * line_out;
341 }
342
343 while rp + wrap_col <= encoded.len() {
345 unsafe {
346 std::ptr::copy_nonoverlapping(
347 encoded.as_ptr().add(rp),
348 out_buf.as_mut_ptr().add(wp),
349 wrap_col,
350 );
351 *out_buf.as_mut_ptr().add(wp + wrap_col) = b'\n';
352 }
353 rp += wrap_col;
354 wp += line_out;
355 }
356
357 if rp < encoded.len() {
359 let remaining = encoded.len() - rp;
360 unsafe {
361 std::ptr::copy_nonoverlapping(
362 encoded.as_ptr().add(rp),
363 out_buf.as_mut_ptr().add(wp),
364 remaining,
365 );
366 }
367 wp += remaining;
368 out_buf[wp] = b'\n';
369 wp += 1;
370 }
371
372 wp
373}
374
375fn encode_wrapped_small(data: &[u8], wrap_col: usize, out: &mut impl Write) -> io::Result<()> {
377 let enc_max = BASE64_ENGINE.encoded_length(data.len());
378 let mut buf: Vec<u8> = Vec::with_capacity(enc_max);
379 #[allow(clippy::uninit_vec)]
380 unsafe {
381 buf.set_len(enc_max);
382 }
383 let encoded = BASE64_ENGINE.encode(data, buf[..enc_max].as_out());
384
385 let wc = wrap_col.max(1);
386 for line in encoded.chunks(wc) {
387 out.write_all(line)?;
388 out.write_all(b"\n")?;
389 }
390 Ok(())
391}
392
393pub fn decode_to_writer(data: &[u8], ignore_garbage: bool, out: &mut impl Write) -> io::Result<()> {
397 if data.is_empty() {
398 return Ok(());
399 }
400
401 if ignore_garbage {
402 let mut cleaned = strip_non_base64(data);
403 return decode_clean_slice(&mut cleaned, out);
404 }
405
406 decode_stripping_whitespace(data, out)
408}
409
410pub fn decode_owned(
412 data: &mut Vec<u8>,
413 ignore_garbage: bool,
414 out: &mut impl Write,
415) -> io::Result<()> {
416 if data.is_empty() {
417 return Ok(());
418 }
419
420 if ignore_garbage {
421 data.retain(|&b| is_base64_char(b));
422 } else {
423 strip_whitespace_inplace(data);
424 }
425
426 decode_clean_slice(data, out)
427}
428
429fn strip_whitespace_inplace(data: &mut Vec<u8>) {
433 let has_ws = data.iter().any(|&b| !NOT_WHITESPACE[b as usize]);
435 if !has_ws {
436 return;
437 }
438
439 let ptr = data.as_ptr();
441 let mut_ptr = data.as_mut_ptr();
442 let len = data.len();
443 let mut wp = 0usize;
444
445 for i in 0..len {
446 let b = unsafe { *ptr.add(i) };
447 if NOT_WHITESPACE[b as usize] {
448 unsafe { *mut_ptr.add(wp) = b };
449 wp += 1;
450 }
451 }
452
453 data.truncate(wp);
454}
455
456static NOT_WHITESPACE: [bool; 256] = {
459 let mut table = [true; 256];
460 table[b' ' as usize] = false;
461 table[b'\t' as usize] = false;
462 table[b'\n' as usize] = false;
463 table[b'\r' as usize] = false;
464 table[0x0b] = false; table[0x0c] = false; table
467};
468
469fn decode_stripping_whitespace(data: &[u8], out: &mut impl Write) -> io::Result<()> {
476 let has_ws = data.iter().any(|&b| !NOT_WHITESPACE[b as usize]);
478 if !has_ws {
479 return decode_borrowed_clean(out, data);
481 }
482
483 let mut clean: Vec<u8> = Vec::with_capacity(data.len());
487 let dst = clean.as_mut_ptr();
488 let mut wp = 0usize;
489 let mut gap_start = 0usize;
490 let mut has_rare_ws = false;
493
494 for pos in memchr::memchr2_iter(b'\n', b'\r', data) {
495 let gap_len = pos - gap_start;
496 if gap_len > 0 {
497 if !has_rare_ws {
500 has_rare_ws = data[gap_start..pos]
501 .iter()
502 .any(|&b| b == b' ' || b == b'\t' || b == 0x0b || b == 0x0c);
503 }
504 unsafe {
505 std::ptr::copy_nonoverlapping(data.as_ptr().add(gap_start), dst.add(wp), gap_len);
506 }
507 wp += gap_len;
508 }
509 gap_start = pos + 1;
510 }
511 let tail_len = data.len() - gap_start;
513 if tail_len > 0 {
514 if !has_rare_ws {
515 has_rare_ws = data[gap_start..]
516 .iter()
517 .any(|&b| b == b' ' || b == b'\t' || b == 0x0b || b == 0x0c);
518 }
519 unsafe {
520 std::ptr::copy_nonoverlapping(data.as_ptr().add(gap_start), dst.add(wp), tail_len);
521 }
522 wp += tail_len;
523 }
524 unsafe {
525 clean.set_len(wp);
526 }
527
528 if has_rare_ws {
531 let ptr = clean.as_mut_ptr();
532 let len = clean.len();
533 let mut rp = 0;
534 let mut cwp = 0;
535 while rp < len {
536 let b = unsafe { *ptr.add(rp) };
537 if NOT_WHITESPACE[b as usize] {
538 unsafe { *ptr.add(cwp) = b };
539 cwp += 1;
540 }
541 rp += 1;
542 }
543 clean.truncate(cwp);
544 }
545
546 decode_clean_slice(&mut clean, out)
547}
548
549fn decode_clean_slice(data: &mut [u8], out: &mut impl Write) -> io::Result<()> {
551 if data.is_empty() {
552 return Ok(());
553 }
554 match BASE64_ENGINE.decode_inplace(data) {
555 Ok(decoded) => out.write_all(decoded),
556 Err(_) => decode_error(),
557 }
558}
559
560#[cold]
562#[inline(never)]
563fn decode_error() -> io::Result<()> {
564 Err(io::Error::new(io::ErrorKind::InvalidData, "invalid input"))
565}
566
567fn decode_borrowed_clean(out: &mut impl Write, data: &[u8]) -> io::Result<()> {
569 if data.is_empty() {
570 return Ok(());
571 }
572 if data.len() >= PARALLEL_DECODE_THRESHOLD {
575 return decode_borrowed_clean_parallel(out, data);
576 }
577 match BASE64_ENGINE.decode_to_vec(data) {
578 Ok(decoded) => {
579 out.write_all(&decoded)?;
580 Ok(())
581 }
582 Err(_) => decode_error(),
583 }
584}
585
586fn decode_borrowed_clean_parallel(out: &mut impl Write, data: &[u8]) -> io::Result<()> {
590 let num_threads = rayon::current_num_threads().max(1);
591 let raw_chunk = data.len() / num_threads;
592 let chunk_size = ((raw_chunk + 3) / 4) * 4;
594
595 let chunks: Vec<&[u8]> = data.chunks(chunk_size.max(4)).collect();
596
597 let mut offsets: Vec<usize> = Vec::with_capacity(chunks.len() + 1);
601 offsets.push(0);
602 let mut total_decoded = 0usize;
603 for (i, chunk) in chunks.iter().enumerate() {
604 let decoded_size = if i == chunks.len() - 1 {
605 let pad = chunk.iter().rev().take(2).filter(|&&b| b == b'=').count();
607 chunk.len() * 3 / 4 - pad
608 } else {
609 chunk.len() * 3 / 4
611 };
612 total_decoded += decoded_size;
613 offsets.push(total_decoded);
614 }
615
616 let mut output_buf: Vec<u8> = Vec::with_capacity(total_decoded);
618 #[allow(clippy::uninit_vec)]
619 unsafe {
620 output_buf.set_len(total_decoded);
621 }
622
623 let out_addr = output_buf.as_mut_ptr() as usize;
628 let decode_result: Result<Vec<()>, io::Error> = chunks
629 .par_iter()
630 .enumerate()
631 .map(|(i, chunk)| {
632 let offset = offsets[i];
633 let expected_size = offsets[i + 1] - offset;
634 let out_slice = unsafe {
636 std::slice::from_raw_parts_mut((out_addr as *mut u8).add(offset), expected_size)
637 };
638 let decoded = BASE64_ENGINE
639 .decode(chunk, out_slice.as_out())
640 .map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "invalid input"))?;
641 debug_assert_eq!(decoded.len(), expected_size);
642 Ok(())
643 })
644 .collect();
645
646 decode_result?;
647
648 out.write_all(&output_buf[..total_decoded])
649}
650
651fn strip_non_base64(data: &[u8]) -> Vec<u8> {
653 data.iter()
654 .copied()
655 .filter(|&b| is_base64_char(b))
656 .collect()
657}
658
659#[inline]
661fn is_base64_char(b: u8) -> bool {
662 b.is_ascii_alphanumeric() || b == b'+' || b == b'/' || b == b'='
663}
664
665pub fn encode_stream(
668 reader: &mut impl Read,
669 wrap_col: usize,
670 writer: &mut impl Write,
671) -> io::Result<()> {
672 if wrap_col == 0 {
673 return encode_stream_nowrap(reader, writer);
674 }
675 encode_stream_wrapped(reader, wrap_col, writer)
676}
677
678fn encode_stream_nowrap(reader: &mut impl Read, writer: &mut impl Write) -> io::Result<()> {
682 const NOWRAP_READ: usize = 4 * 1024 * 1024 - (4 * 1024 * 1024 % 3); let mut buf = vec![0u8; NOWRAP_READ];
687 let encode_buf_size = BASE64_ENGINE.encoded_length(NOWRAP_READ);
688 let mut encode_buf = vec![0u8; encode_buf_size];
689
690 loop {
691 let n = read_full(reader, &mut buf)?;
692 if n == 0 {
693 break;
694 }
695 let enc_len = BASE64_ENGINE.encoded_length(n);
696 let encoded = BASE64_ENGINE.encode(&buf[..n], encode_buf[..enc_len].as_out());
697 writer.write_all(encoded)?;
698 }
699 Ok(())
700}
701
702fn encode_stream_wrapped(
705 reader: &mut impl Read,
706 wrap_col: usize,
707 writer: &mut impl Write,
708) -> io::Result<()> {
709 const STREAM_READ: usize = 3 * 1024 * 1024;
711 let mut buf = vec![0u8; STREAM_READ];
712 let encode_buf_size = BASE64_ENGINE.encoded_length(STREAM_READ);
713 let mut encode_buf = vec![0u8; encode_buf_size];
714
715 let mut col = 0usize;
716
717 loop {
718 let n = read_full(reader, &mut buf)?;
719 if n == 0 {
720 break;
721 }
722 let enc_len = BASE64_ENGINE.encoded_length(n);
723 let encoded = BASE64_ENGINE.encode(&buf[..n], encode_buf[..enc_len].as_out());
724
725 write_wrapped_iov_streaming(encoded, wrap_col, &mut col, writer)?;
727 }
728
729 if col > 0 {
730 writer.write_all(b"\n")?;
731 }
732
733 Ok(())
734}
735
736pub fn decode_stream(
742 reader: &mut impl Read,
743 ignore_garbage: bool,
744 writer: &mut impl Write,
745) -> io::Result<()> {
746 const READ_CHUNK: usize = 4 * 1024 * 1024;
747 let mut buf = vec![0u8; READ_CHUNK];
748 let mut clean: Vec<u8> = Vec::with_capacity(READ_CHUNK + 4);
751 let mut carry = [0u8; 4];
752 let mut carry_len = 0usize;
753
754 loop {
755 let n = read_full(reader, &mut buf)?;
756 if n == 0 {
757 break;
758 }
759
760 unsafe {
762 std::ptr::copy_nonoverlapping(carry.as_ptr(), clean.as_mut_ptr(), carry_len);
763 }
764
765 let chunk = &buf[..n];
766 if ignore_garbage {
767 let dst = unsafe { clean.as_mut_ptr().add(carry_len) };
769 let mut wp = 0usize;
770 for &b in chunk {
771 if is_base64_char(b) {
772 unsafe { *dst.add(wp) = b };
773 wp += 1;
774 }
775 }
776 unsafe { clean.set_len(carry_len + wp) };
777 } else {
778 let dst = unsafe { clean.as_mut_ptr().add(carry_len) };
784 let mut wp = 0usize;
785 let mut gap_start = 0usize;
786 let mut has_rare_ws = false;
787
788 for pos in memchr::memchr2_iter(b'\n', b'\r', chunk) {
789 let gap_len = pos - gap_start;
790 if gap_len > 0 {
791 if !has_rare_ws {
792 has_rare_ws = chunk[gap_start..pos]
793 .iter()
794 .any(|&b| b == b' ' || b == b'\t' || b == 0x0b || b == 0x0c);
795 }
796 unsafe {
797 std::ptr::copy_nonoverlapping(
798 chunk.as_ptr().add(gap_start),
799 dst.add(wp),
800 gap_len,
801 );
802 }
803 wp += gap_len;
804 }
805 gap_start = pos + 1;
806 }
807 let tail_len = n - gap_start;
808 if tail_len > 0 {
809 if !has_rare_ws {
810 has_rare_ws = chunk[gap_start..n]
811 .iter()
812 .any(|&b| b == b' ' || b == b'\t' || b == 0x0b || b == 0x0c);
813 }
814 unsafe {
815 std::ptr::copy_nonoverlapping(
816 chunk.as_ptr().add(gap_start),
817 dst.add(wp),
818 tail_len,
819 );
820 }
821 wp += tail_len;
822 }
823 let total_clean = carry_len + wp;
824 unsafe { clean.set_len(total_clean) };
825
826 if has_rare_ws {
829 let ptr = clean.as_mut_ptr();
830 let mut rp = carry_len;
831 let mut cwp = carry_len;
832 while rp < total_clean {
833 let b = unsafe { *ptr.add(rp) };
834 if NOT_WHITESPACE[b as usize] {
835 unsafe { *ptr.add(cwp) = b };
836 cwp += 1;
837 }
838 rp += 1;
839 }
840 clean.truncate(cwp);
841 }
842 }
843
844 carry_len = 0;
845 let is_last = n < READ_CHUNK;
846
847 if is_last {
848 decode_clean_slice(&mut clean, writer)?;
850 } else {
851 let clean_len = clean.len();
853 let decode_len = (clean_len / 4) * 4;
854 let leftover = clean_len - decode_len;
855 if leftover > 0 {
856 unsafe {
857 std::ptr::copy_nonoverlapping(
858 clean.as_ptr().add(decode_len),
859 carry.as_mut_ptr(),
860 leftover,
861 );
862 }
863 carry_len = leftover;
864 }
865 if decode_len > 0 {
866 clean.truncate(decode_len);
867 decode_clean_slice(&mut clean, writer)?;
868 }
869 }
870 }
871
872 if carry_len > 0 {
874 let mut carry_buf = carry[..carry_len].to_vec();
875 decode_clean_slice(&mut carry_buf, writer)?;
876 }
877
878 Ok(())
879}
880
881fn write_all_vectored(out: &mut impl Write, slices: &[io::IoSlice]) -> io::Result<()> {
884 if slices.is_empty() {
885 return Ok(());
886 }
887 let total: usize = slices.iter().map(|s| s.len()).sum();
888
889 let written = match out.write_vectored(slices) {
891 Ok(n) if n >= total => return Ok(()),
892 Ok(n) => n,
893 Err(e) => return Err(e),
894 };
895
896 let mut skip = written;
898 for slice in slices {
899 let slen = slice.len();
900 if skip >= slen {
901 skip -= slen;
902 continue;
903 }
904 if skip > 0 {
905 out.write_all(&slice[skip..])?;
906 skip = 0;
907 } else {
908 out.write_all(slice)?;
909 }
910 }
911 Ok(())
912}
913
914#[inline]
918fn read_full(reader: &mut impl Read, buf: &mut [u8]) -> io::Result<usize> {
919 let n = reader.read(buf)?;
921 if n == buf.len() || n == 0 {
922 return Ok(n);
923 }
924 let mut total = n;
926 while total < buf.len() {
927 match reader.read(&mut buf[total..]) {
928 Ok(0) => break,
929 Ok(n) => total += n,
930 Err(e) if e.kind() == io::ErrorKind::Interrupted => continue,
931 Err(e) => return Err(e),
932 }
933 }
934 Ok(total)
935}