Skip to main content

coreutils_rs/base64/
core.rs

1use std::io::{self, Read, Write};
2
3use base64_simd::AsOut;
4use rayon::prelude::*;
5
6const BASE64_ENGINE: &base64_simd::Base64 = &base64_simd::STANDARD;
7
8/// Streaming encode chunk: 8MB aligned to 3 bytes for maximum throughput.
9const STREAM_ENCODE_CHUNK: usize = 8 * 1024 * 1024 - (8 * 1024 * 1024 % 3);
10
11/// Chunk size for no-wrap encoding: 8MB aligned to 3 bytes.
12const NOWRAP_CHUNK: usize = 8 * 1024 * 1024 - (8 * 1024 * 1024 % 3);
13
14/// Minimum input size for parallel encoding.
15const PARALLEL_ENCODE_THRESHOLD: usize = 1024 * 1024;
16
17/// Encode data and write to output with line wrapping.
18/// Uses SIMD encoding with reusable buffers for maximum throughput.
19pub fn encode_to_writer(data: &[u8], wrap_col: usize, out: &mut impl Write) -> io::Result<()> {
20    if data.is_empty() {
21        return Ok(());
22    }
23
24    if wrap_col == 0 {
25        return encode_no_wrap(data, out);
26    }
27
28    encode_wrapped(data, wrap_col, out)
29}
30
31/// Encode without wrapping using parallel SIMD encoding for large inputs.
32fn encode_no_wrap(data: &[u8], out: &mut impl Write) -> io::Result<()> {
33    if data.len() >= PARALLEL_ENCODE_THRESHOLD {
34        // Split into per-thread chunks aligned to 3-byte boundaries
35        let num_threads = rayon::current_num_threads().max(1);
36        let raw_chunk = (data.len() + num_threads - 1) / num_threads;
37        // Align to 3 bytes for clean base64 boundaries (no padding mid-stream)
38        let chunk_size = ((raw_chunk + 2) / 3) * 3;
39
40        let encoded_chunks: Vec<Vec<u8>> = data
41            .par_chunks(chunk_size)
42            .map(|chunk| {
43                let enc_len = BASE64_ENGINE.encoded_length(chunk.len());
44                let mut buf = vec![0u8; enc_len];
45                let encoded = BASE64_ENGINE.encode(chunk, buf[..enc_len].as_out());
46                let len = encoded.len();
47                buf.truncate(len);
48                buf
49            })
50            .collect();
51
52        for chunk in &encoded_chunks {
53            out.write_all(chunk)?;
54        }
55        return Ok(());
56    }
57
58    let enc_max = BASE64_ENGINE.encoded_length(NOWRAP_CHUNK);
59    let mut buf = vec![0u8; enc_max];
60
61    for chunk in data.chunks(NOWRAP_CHUNK) {
62        let enc_len = BASE64_ENGINE.encoded_length(chunk.len());
63        let encoded = BASE64_ENGINE.encode(chunk, buf[..enc_len].as_out());
64        out.write_all(encoded)?;
65    }
66    Ok(())
67}
68
69/// Encode with line wrapping. For large inputs, uses parallel encoding.
70fn encode_wrapped(data: &[u8], wrap_col: usize, out: &mut impl Write) -> io::Result<()> {
71    let bytes_per_line = wrap_col * 3 / 4;
72
73    if data.len() >= PARALLEL_ENCODE_THRESHOLD && bytes_per_line > 0 {
74        // Parallel: split input into chunks aligned to bytes_per_line (= 3-byte aligned)
75        // so each chunk produces complete lines (no cross-chunk line splitting).
76        let num_threads = rayon::current_num_threads().max(1);
77        let lines_per_thread = ((data.len() / bytes_per_line) + num_threads - 1) / num_threads;
78        let chunk_input = (lines_per_thread * bytes_per_line).max(bytes_per_line);
79
80        let wrapped_chunks: Vec<Vec<u8>> = data
81            .par_chunks(chunk_input)
82            .map(|chunk| {
83                let enc_len = BASE64_ENGINE.encoded_length(chunk.len());
84                let mut encode_buf = vec![0u8; enc_len];
85                let encoded = BASE64_ENGINE.encode(chunk, encode_buf[..enc_len].as_out());
86
87                // Wrap the encoded output
88                let line_out = wrap_col + 1;
89                let max_lines = (encoded.len() + wrap_col - 1) / wrap_col + 1;
90                let mut wrap_buf = vec![0u8; max_lines * line_out];
91                let wp = wrap_encoded(encoded, wrap_col, &mut wrap_buf);
92                wrap_buf.truncate(wp);
93                wrap_buf
94            })
95            .collect();
96
97        for chunk in &wrapped_chunks {
98            out.write_all(chunk)?;
99        }
100        return Ok(());
101    }
102
103    // Sequential path
104    let lines_per_chunk = (8 * 1024 * 1024) / bytes_per_line.max(1);
105    let chunk_input = lines_per_chunk * bytes_per_line.max(1);
106    let chunk_encoded_max = BASE64_ENGINE.encoded_length(chunk_input.max(1));
107    let mut encode_buf = vec![0u8; chunk_encoded_max];
108    let wrapped_max = (lines_per_chunk + 1) * (wrap_col + 1);
109    let mut wrap_buf = vec![0u8; wrapped_max];
110
111    for chunk in data.chunks(chunk_input.max(1)) {
112        let enc_len = BASE64_ENGINE.encoded_length(chunk.len());
113        let encoded = BASE64_ENGINE.encode(chunk, encode_buf[..enc_len].as_out());
114        let wp = wrap_encoded(encoded, wrap_col, &mut wrap_buf);
115        out.write_all(&wrap_buf[..wp])?;
116    }
117
118    Ok(())
119}
120
121/// Wrap encoded base64 data with newlines at `wrap_col` columns.
122/// Returns number of bytes written to `wrap_buf`.
123#[inline]
124fn wrap_encoded(encoded: &[u8], wrap_col: usize, wrap_buf: &mut [u8]) -> usize {
125    let line_out = wrap_col + 1;
126    let mut rp = 0;
127    let mut wp = 0;
128
129    // Unrolled: process 4 lines per iteration
130    while rp + 4 * wrap_col <= encoded.len() {
131        unsafe {
132            let src = encoded.as_ptr().add(rp);
133            let dst = wrap_buf.as_mut_ptr().add(wp);
134
135            std::ptr::copy_nonoverlapping(src, dst, wrap_col);
136            *dst.add(wrap_col) = b'\n';
137
138            std::ptr::copy_nonoverlapping(src.add(wrap_col), dst.add(line_out), wrap_col);
139            *dst.add(line_out + wrap_col) = b'\n';
140
141            std::ptr::copy_nonoverlapping(src.add(2 * wrap_col), dst.add(2 * line_out), wrap_col);
142            *dst.add(2 * line_out + wrap_col) = b'\n';
143
144            std::ptr::copy_nonoverlapping(src.add(3 * wrap_col), dst.add(3 * line_out), wrap_col);
145            *dst.add(3 * line_out + wrap_col) = b'\n';
146        }
147        rp += 4 * wrap_col;
148        wp += 4 * line_out;
149    }
150
151    // Remaining full lines
152    while rp + wrap_col <= encoded.len() {
153        wrap_buf[wp..wp + wrap_col].copy_from_slice(&encoded[rp..rp + wrap_col]);
154        wp += wrap_col;
155        wrap_buf[wp] = b'\n';
156        wp += 1;
157        rp += wrap_col;
158    }
159
160    // Partial last line
161    if rp < encoded.len() {
162        let remaining = encoded.len() - rp;
163        wrap_buf[wp..wp + remaining].copy_from_slice(&encoded[rp..rp + remaining]);
164        wp += remaining;
165        wrap_buf[wp] = b'\n';
166        wp += 1;
167    }
168
169    wp
170}
171
172/// Decode base64 data and write to output (borrows data, allocates clean buffer).
173/// When `ignore_garbage` is true, strip all non-base64 characters.
174/// When false, only strip whitespace (standard behavior).
175pub fn decode_to_writer(data: &[u8], ignore_garbage: bool, out: &mut impl Write) -> io::Result<()> {
176    if data.is_empty() {
177        return Ok(());
178    }
179
180    if ignore_garbage {
181        let mut cleaned = strip_non_base64(data);
182        return decode_owned_clean(&mut cleaned, out);
183    }
184
185    // Fast path: strip newlines with memchr (SIMD), then SIMD decode
186    decode_stripping_whitespace(data, out)
187}
188
189/// Decode base64 from an owned Vec (in-place whitespace strip + decode).
190/// Avoids a full buffer copy by stripping whitespace in the existing allocation,
191/// then decoding in-place. Ideal when the caller already has an owned Vec.
192pub fn decode_owned(
193    data: &mut Vec<u8>,
194    ignore_garbage: bool,
195    out: &mut impl Write,
196) -> io::Result<()> {
197    if data.is_empty() {
198        return Ok(());
199    }
200
201    if ignore_garbage {
202        data.retain(|&b| is_base64_char(b));
203    } else {
204        strip_whitespace_inplace(data);
205    }
206
207    decode_owned_clean(data, out)
208}
209
210/// Strip all whitespace from a Vec in-place using SIMD memchr for newlines
211/// and a fallback scan for rare non-newline whitespace.
212fn strip_whitespace_inplace(data: &mut Vec<u8>) {
213    // First, collect newline positions using SIMD memchr.
214    let positions: Vec<usize> = memchr::memchr_iter(b'\n', data.as_slice()).collect();
215
216    if positions.is_empty() {
217        // No newlines; check for other whitespace only.
218        if data.iter().any(|&b| is_whitespace(b)) {
219            data.retain(|&b| !is_whitespace(b));
220        }
221        return;
222    }
223
224    // Compact data in-place, removing newlines using copy_within.
225    let mut wp = 0;
226    let mut rp = 0;
227
228    for &pos in &positions {
229        if pos > rp {
230            let len = pos - rp;
231            data.copy_within(rp..pos, wp);
232            wp += len;
233        }
234        rp = pos + 1;
235    }
236
237    let data_len = data.len();
238    if rp < data_len {
239        let len = data_len - rp;
240        data.copy_within(rp..data_len, wp);
241        wp += len;
242    }
243
244    data.truncate(wp);
245
246    // Handle rare non-newline whitespace (CR, tab, etc.)
247    if data.iter().any(|&b| is_whitespace(b)) {
248        data.retain(|&b| !is_whitespace(b));
249    }
250}
251
252/// Decode by stripping all whitespace from the entire input at once,
253/// then performing a single SIMD decode pass. Used when data is borrowed.
254/// For large inputs, decodes in parallel chunks for maximum throughput.
255fn decode_stripping_whitespace(data: &[u8], out: &mut impl Write) -> io::Result<()> {
256    // Quick check: any whitespace at all?
257    if memchr::memchr(b'\n', data).is_none() && !data.iter().any(|&b| is_whitespace(b)) {
258        return decode_borrowed_clean(out, data);
259    }
260
261    // Strip newlines from entire input in a single pass using SIMD memchr.
262    let mut clean = Vec::with_capacity(data.len());
263    let mut last = 0;
264    for pos in memchr::memchr_iter(b'\n', data) {
265        if pos > last {
266            clean.extend_from_slice(&data[last..pos]);
267        }
268        last = pos + 1;
269    }
270    if last < data.len() {
271        clean.extend_from_slice(&data[last..]);
272    }
273
274    // Handle rare non-newline whitespace (CR, tab, etc.)
275    if clean.iter().any(|&b| is_whitespace(b)) {
276        clean.retain(|&b| !is_whitespace(b));
277    }
278
279    // Parallel decode for large inputs
280    if clean.len() >= PARALLEL_ENCODE_THRESHOLD {
281        return decode_parallel(&clean, out);
282    }
283
284    decode_owned_clean(&mut clean, out)
285}
286
287/// Decode clean base64 data in parallel chunks.
288fn decode_parallel(data: &[u8], out: &mut impl Write) -> io::Result<()> {
289    let num_threads = rayon::current_num_threads().max(1);
290    // Each chunk must be aligned to 4 bytes (base64 quadruplet boundary)
291    let raw_chunk = (data.len() + num_threads - 1) / num_threads;
292    let chunk_size = ((raw_chunk + 3) / 4) * 4;
293
294    // Check if last chunk has padding — only the very last chunk can have '='
295    // Split so that all but the last chunk are padless and 4-aligned
296    let decoded_chunks: Vec<Result<Vec<u8>, _>> = data
297        .par_chunks(chunk_size)
298        .map(|chunk| match BASE64_ENGINE.decode_to_vec(chunk) {
299            Ok(decoded) => Ok(decoded),
300            Err(_) => Err(io::Error::new(io::ErrorKind::InvalidData, "invalid input")),
301        })
302        .collect();
303
304    for chunk_result in decoded_chunks {
305        let chunk = chunk_result?;
306        out.write_all(&chunk)?;
307    }
308
309    Ok(())
310}
311
312/// Decode a clean (no whitespace) owned buffer in-place with SIMD.
313fn decode_owned_clean(data: &mut [u8], out: &mut impl Write) -> io::Result<()> {
314    if data.is_empty() {
315        return Ok(());
316    }
317    match BASE64_ENGINE.decode_inplace(data) {
318        Ok(decoded) => out.write_all(decoded),
319        Err(_) => Err(io::Error::new(io::ErrorKind::InvalidData, "invalid input")),
320    }
321}
322
323/// Decode clean base64 data (no whitespace) from a borrowed slice.
324fn decode_borrowed_clean(out: &mut impl Write, data: &[u8]) -> io::Result<()> {
325    if data.is_empty() {
326        return Ok(());
327    }
328    match BASE64_ENGINE.decode_to_vec(data) {
329        Ok(decoded) => {
330            out.write_all(&decoded)?;
331            Ok(())
332        }
333        Err(_) => Err(io::Error::new(io::ErrorKind::InvalidData, "invalid input")),
334    }
335}
336
337/// Strip non-base64 characters (for -i / --ignore-garbage).
338fn strip_non_base64(data: &[u8]) -> Vec<u8> {
339    data.iter()
340        .copied()
341        .filter(|&b| is_base64_char(b))
342        .collect()
343}
344
345/// Check if a byte is a valid base64 alphabet character or padding.
346#[inline]
347fn is_base64_char(b: u8) -> bool {
348    b.is_ascii_alphanumeric() || b == b'+' || b == b'/' || b == b'='
349}
350
351/// Check if a byte is ASCII whitespace.
352#[inline]
353fn is_whitespace(b: u8) -> bool {
354    matches!(b, b' ' | b'\t' | b'\n' | b'\r' | 0x0b | 0x0c)
355}
356
357/// Stream-encode from a reader to a writer. Used for stdin processing.
358/// Uses 4MB read chunks and batches wrapped output for minimum syscalls.
359/// The caller is expected to provide a suitably buffered or raw fd writer.
360pub fn encode_stream(
361    reader: &mut impl Read,
362    wrap_col: usize,
363    writer: &mut impl Write,
364) -> io::Result<()> {
365    let mut buf = vec![0u8; STREAM_ENCODE_CHUNK];
366
367    let encode_buf_size = BASE64_ENGINE.encoded_length(STREAM_ENCODE_CHUNK);
368    let mut encode_buf = vec![0u8; encode_buf_size];
369
370    if wrap_col == 0 {
371        // No wrapping: encode each 4MB chunk and write directly.
372        loop {
373            let n = read_full(reader, &mut buf)?;
374            if n == 0 {
375                break;
376            }
377            let enc_len = BASE64_ENGINE.encoded_length(n);
378            let encoded = BASE64_ENGINE.encode(&buf[..n], encode_buf[..enc_len].as_out());
379            writer.write_all(encoded)?;
380        }
381    } else {
382        // Wrapping: batch wrapped output into a pre-allocated buffer.
383        // For 4MB input at 76-col wrap, wrapped output is ~5.6MB.
384        let max_wrapped = encode_buf_size + (encode_buf_size / wrap_col + 2);
385        let mut wrap_buf = vec![0u8; max_wrapped];
386        let mut col = 0usize;
387
388        loop {
389            let n = read_full(reader, &mut buf)?;
390            if n == 0 {
391                break;
392            }
393            let enc_len = BASE64_ENGINE.encoded_length(n);
394            let encoded = BASE64_ENGINE.encode(&buf[..n], encode_buf[..enc_len].as_out());
395
396            // Build wrapped output in wrap_buf, then single write.
397            let wp = build_wrapped_output(encoded, wrap_col, &mut col, &mut wrap_buf);
398            writer.write_all(&wrap_buf[..wp])?;
399        }
400
401        if col > 0 {
402            writer.write_all(b"\n")?;
403        }
404    }
405
406    Ok(())
407}
408
409/// Build wrapped output into a pre-allocated buffer.
410/// Returns the number of bytes written to wrap_buf.
411/// Updates `col` to track the current column position across calls.
412#[inline]
413fn build_wrapped_output(
414    data: &[u8],
415    wrap_col: usize,
416    col: &mut usize,
417    wrap_buf: &mut [u8],
418) -> usize {
419    let mut rp = 0;
420    let mut wp = 0;
421
422    while rp < data.len() {
423        let space = wrap_col - *col;
424        let avail = data.len() - rp;
425
426        if avail <= space {
427            wrap_buf[wp..wp + avail].copy_from_slice(&data[rp..rp + avail]);
428            wp += avail;
429            *col += avail;
430            if *col == wrap_col {
431                wrap_buf[wp] = b'\n';
432                wp += 1;
433                *col = 0;
434            }
435            break;
436        } else {
437            wrap_buf[wp..wp + space].copy_from_slice(&data[rp..rp + space]);
438            wp += space;
439            wrap_buf[wp] = b'\n';
440            wp += 1;
441            rp += space;
442            *col = 0;
443        }
444    }
445
446    wp
447}
448
449/// Stream-decode from a reader to a writer. Used for stdin processing.
450/// Reads 4MB chunks, strips whitespace, decodes, and writes incrementally.
451/// Handles base64 quadruplet boundaries across chunk reads.
452pub fn decode_stream(
453    reader: &mut impl Read,
454    ignore_garbage: bool,
455    writer: &mut impl Write,
456) -> io::Result<()> {
457    const READ_CHUNK: usize = 4 * 1024 * 1024;
458    let mut buf = vec![0u8; READ_CHUNK];
459    let mut clean = Vec::with_capacity(READ_CHUNK);
460    let mut carry: Vec<u8> = Vec::with_capacity(4);
461
462    loop {
463        let n = read_full(reader, &mut buf)?;
464        if n == 0 {
465            break;
466        }
467
468        // Build clean buffer: carry-over + stripped chunk
469        clean.clear();
470        clean.extend_from_slice(&carry);
471        carry.clear();
472
473        let chunk = &buf[..n];
474        if ignore_garbage {
475            clean.extend(chunk.iter().copied().filter(|&b| is_base64_char(b)));
476        } else {
477            // Strip newlines using SIMD memchr
478            let mut last = 0;
479            for pos in memchr::memchr_iter(b'\n', chunk) {
480                if pos > last {
481                    clean.extend_from_slice(&chunk[last..pos]);
482                }
483                last = pos + 1;
484            }
485            if last < n {
486                clean.extend_from_slice(&chunk[last..]);
487            }
488            // Handle rare non-newline whitespace
489            if clean.iter().any(|&b| is_whitespace(b) && b != b'\n') {
490                clean.retain(|&b| !is_whitespace(b));
491            }
492        }
493
494        let is_last = n < READ_CHUNK;
495
496        if is_last {
497            // Last chunk: decode everything (including padding)
498            decode_owned_clean(&mut clean, writer)?;
499        } else {
500            // Save incomplete base64 quadruplet for next iteration
501            let decode_len = (clean.len() / 4) * 4;
502            if decode_len < clean.len() {
503                carry.extend_from_slice(&clean[decode_len..]);
504            }
505            if decode_len > 0 {
506                clean.truncate(decode_len);
507                decode_owned_clean(&mut clean, writer)?;
508            }
509        }
510    }
511
512    // Handle any remaining carry-over bytes
513    if !carry.is_empty() {
514        decode_owned_clean(&mut carry, writer)?;
515    }
516
517    Ok(())
518}
519
520/// Read as many bytes as possible into buf, retrying on partial reads.
521fn read_full(reader: &mut impl Read, buf: &mut [u8]) -> io::Result<usize> {
522    let mut total = 0;
523    while total < buf.len() {
524        match reader.read(&mut buf[total..]) {
525            Ok(0) => break,
526            Ok(n) => total += n,
527            Err(e) if e.kind() == io::ErrorKind::Interrupted => continue,
528            Err(e) => return Err(e),
529        }
530    }
531    Ok(total)
532}