coreutils_rs/common/io.rs
1use std::fs::{self, File};
2use std::io::{self, Read};
3use std::ops::Deref;
4use std::path::Path;
5
6#[cfg(target_os = "linux")]
7use std::sync::atomic::{AtomicBool, Ordering};
8
9use memmap2::{Mmap, MmapOptions};
10
11/// Holds file data — either zero-copy mmap or an owned Vec.
12/// Dereferences to `&[u8]` for transparent use.
13pub enum FileData {
14 Mmap(Mmap),
15 Owned(Vec<u8>),
16}
17
18impl Deref for FileData {
19 type Target = [u8];
20
21 fn deref(&self) -> &[u8] {
22 match self {
23 FileData::Mmap(m) => m,
24 FileData::Owned(v) => v,
25 }
26 }
27}
28
29/// Threshold below which we use read() instead of mmap.
30/// For files under 1MB, read() is faster since mmap has setup/teardown overhead
31/// (page table creation for up to 256 pages, TLB flush on munmap) that exceeds
32/// the zero-copy benefit.
33const MMAP_THRESHOLD: u64 = 1024 * 1024;
34
35/// Track whether O_NOATIME is supported to avoid repeated failed open() attempts.
36/// After the first EPERM, we never try O_NOATIME again (saves one syscall per file).
37#[cfg(target_os = "linux")]
38static NOATIME_SUPPORTED: AtomicBool = AtomicBool::new(true);
39
40/// Open a file with O_NOATIME on Linux to avoid atime inode writes.
41/// Caches whether O_NOATIME works to avoid double-open on every file.
42#[cfg(target_os = "linux")]
43fn open_noatime(path: &Path) -> io::Result<File> {
44 use std::os::unix::fs::OpenOptionsExt;
45 if NOATIME_SUPPORTED.load(Ordering::Relaxed) {
46 match fs::OpenOptions::new()
47 .read(true)
48 .custom_flags(libc::O_NOATIME)
49 .open(path)
50 {
51 Ok(f) => return Ok(f),
52 Err(ref e) if e.raw_os_error() == Some(libc::EPERM) => {
53 // O_NOATIME requires file ownership or CAP_FOWNER — disable globally
54 NOATIME_SUPPORTED.store(false, Ordering::Relaxed);
55 }
56 Err(e) => return Err(e), // Real error, propagate
57 }
58 }
59 File::open(path)
60}
61
62#[cfg(not(target_os = "linux"))]
63fn open_noatime(path: &Path) -> io::Result<File> {
64 File::open(path)
65}
66
67/// Read a file with zero-copy mmap for large files or read() for small files.
68/// Opens once with O_NOATIME, uses fstat for metadata to save a syscall.
69pub fn read_file(path: &Path) -> io::Result<FileData> {
70 let file = open_noatime(path)?;
71 let metadata = file.metadata()?;
72 let len = metadata.len();
73
74 if len > 0 && metadata.file_type().is_file() {
75 // Small files: exact-size read from already-open fd.
76 // Uses read_full into pre-sized buffer instead of read_to_end,
77 // which avoids the grow-and-probe pattern (saves 1-2 extra read() syscalls).
78 if len < MMAP_THRESHOLD {
79 let mut buf = vec![0u8; len as usize];
80 let n = read_full(&mut &file, &mut buf)?;
81 buf.truncate(n);
82 return Ok(FileData::Owned(buf));
83 }
84
85 // SAFETY: Read-only mapping. No MAP_POPULATE — it synchronously faults
86 // all pages with 4KB before MADV_HUGEPAGE can take effect, causing ~25,600
87 // minor page faults for 100MB (~12.5ms overhead). Without it, HUGEPAGE hint
88 // is set first, then POPULATE_READ prefaults using 2MB pages (~50 faults).
89 match unsafe { MmapOptions::new().map(&file) } {
90 Ok(mmap) => {
91 #[cfg(target_os = "linux")]
92 {
93 // HUGEPAGE MUST come first: reduces 25,600 minor faults (4KB) to
94 // ~50 faults (2MB) for 100MB files. Saves ~12ms of page fault overhead.
95 if len >= 2 * 1024 * 1024 {
96 let _ = mmap.advise(memmap2::Advice::HugePage);
97 }
98 let _ = mmap.advise(memmap2::Advice::Sequential);
99 // POPULATE_READ (5.14+): prefault with huge pages. Fall back to WillNeed.
100 if len >= 4 * 1024 * 1024 {
101 if mmap.advise(memmap2::Advice::PopulateRead).is_err() {
102 let _ = mmap.advise(memmap2::Advice::WillNeed);
103 }
104 } else {
105 let _ = mmap.advise(memmap2::Advice::WillNeed);
106 }
107 }
108 Ok(FileData::Mmap(mmap))
109 }
110 Err(_) => {
111 // mmap failed — fall back to read
112 let mut buf = Vec::with_capacity(len as usize);
113 let mut reader = file;
114 reader.read_to_end(&mut buf)?;
115 Ok(FileData::Owned(buf))
116 }
117 }
118 } else if !metadata.file_type().is_file() {
119 // Non-regular file (pipe, FIFO, device, process substitution) — read from open fd.
120 // Pipes report len=0 from stat(), so we must always try to read regardless of len.
121 let mut buf = Vec::new();
122 let mut reader = file;
123 reader.read_to_end(&mut buf)?;
124 Ok(FileData::Owned(buf))
125 } else {
126 Ok(FileData::Owned(Vec::new()))
127 }
128}
129
130/// Read a file entirely into a mutable Vec.
131/// Uses exact-size allocation from fstat + single read() for efficiency.
132/// Preferred over mmap when the caller needs mutable access (e.g., in-place decode).
133pub fn read_file_vec(path: &Path) -> io::Result<Vec<u8>> {
134 let file = open_noatime(path)?;
135 let metadata = file.metadata()?;
136 let len = metadata.len() as usize;
137 if len == 0 {
138 return Ok(Vec::new());
139 }
140 let mut buf = vec![0u8; len];
141 let n = read_full(&mut &file, &mut buf)?;
142 buf.truncate(n);
143 Ok(buf)
144}
145
146/// Read a file always using mmap, with optimal page fault strategy.
147/// Used by tac for zero-copy output and parallel scanning.
148///
149/// Strategy: mmap WITHOUT MAP_POPULATE, then MADV_HUGEPAGE + MADV_POPULATE_READ.
150/// MAP_POPULATE synchronously faults all pages with 4KB BEFORE MADV_HUGEPAGE
151/// can take effect, causing ~25,600 minor faults for 100MB (~12.5ms overhead).
152/// MADV_POPULATE_READ (Linux 5.14+) prefaults pages AFTER HUGEPAGE is set,
153/// using 2MB huge pages (~50 faults = ~0.1ms). Falls back to WILLNEED on
154/// older kernels.
155pub fn read_file_mmap(path: &Path) -> io::Result<FileData> {
156 let file = open_noatime(path)?;
157 let metadata = file.metadata()?;
158 let len = metadata.len();
159
160 if len > 0 && metadata.file_type().is_file() {
161 // No MAP_POPULATE: let MADV_HUGEPAGE take effect before page faults.
162 let mmap_result = unsafe { MmapOptions::new().map(&file) };
163 match mmap_result {
164 Ok(mmap) => {
165 #[cfg(target_os = "linux")]
166 {
167 // HUGEPAGE first: must be set before any page faults occur.
168 // Reduces ~25,600 minor faults (4KB) to ~50 (2MB) for 100MB.
169 if len >= 2 * 1024 * 1024 {
170 let _ = mmap.advise(memmap2::Advice::HugePage);
171 }
172 // POPULATE_READ (Linux 5.14+): synchronously prefaults all pages
173 // using huge pages. Falls back to WILLNEED on older kernels.
174 if len >= 4 * 1024 * 1024 {
175 if mmap.advise(memmap2::Advice::PopulateRead).is_err() {
176 let _ = mmap.advise(memmap2::Advice::WillNeed);
177 }
178 } else {
179 let _ = mmap.advise(memmap2::Advice::WillNeed);
180 }
181 }
182 return Ok(FileData::Mmap(mmap));
183 }
184 Err(_) => {
185 // mmap failed — fall back to read
186 let mut buf = vec![0u8; len as usize];
187 let n = read_full(&mut &file, &mut buf)?;
188 buf.truncate(n);
189 return Ok(FileData::Owned(buf));
190 }
191 }
192 } else if !metadata.file_type().is_file() {
193 // Non-regular file (pipe, FIFO, device, process substitution) — read from open fd.
194 // Pipes report len=0 from stat(), so we must always try to read regardless of len.
195 let mut buf = Vec::new();
196 let mut reader = file;
197 reader.read_to_end(&mut buf)?;
198 Ok(FileData::Owned(buf))
199 } else {
200 Ok(FileData::Owned(Vec::new()))
201 }
202}
203
204/// Get file size without reading it (for byte-count-only optimization).
205pub fn file_size(path: &Path) -> io::Result<u64> {
206 Ok(fs::metadata(path)?.len())
207}
208
209/// Read all bytes from stdin into a Vec.
210/// On Linux, uses raw libc::read() to bypass Rust's StdinLock/BufReader overhead.
211/// Uses a direct read() loop into a pre-allocated buffer instead of read_to_end(),
212/// which avoids Vec's grow-and-probe pattern (extra read() calls and memcpy).
213/// Callers should enlarge the pipe buffer via fcntl(F_SETPIPE_SZ) before calling.
214/// Uses the full spare capacity for each read() to minimize syscalls.
215pub fn read_stdin() -> io::Result<Vec<u8>> {
216 #[cfg(target_os = "linux")]
217 return read_stdin_raw();
218
219 #[cfg(not(target_os = "linux"))]
220 read_stdin_generic()
221}
222
223/// Raw libc::read() implementation for Linux — bypasses Rust's StdinLock
224/// and BufReader layers entirely. StdinLock uses an internal 8KB BufReader
225/// which adds an extra memcpy for every read; raw read() goes directly
226/// from the kernel pipe buffer to our Vec.
227///
228/// Pre-allocates 16MB to cover most workloads (benchmark = 10MB) without
229/// over-allocating. For inputs > 16MB, doubles capacity on demand.
230/// Each read() uses the full spare capacity to maximize bytes per syscall.
231///
232/// Note: callers (ftac, ftr, fbase64) are expected to enlarge the pipe
233/// buffer via fcntl(F_SETPIPE_SZ) before calling this function. We don't
234/// do it here to avoid accidentally shrinking a previously enlarged pipe.
235#[cfg(target_os = "linux")]
236fn read_stdin_raw() -> io::Result<Vec<u8>> {
237 const PREALLOC: usize = 16 * 1024 * 1024;
238
239 let mut buf: Vec<u8> = Vec::with_capacity(PREALLOC);
240
241 loop {
242 let spare_cap = buf.capacity() - buf.len();
243 if spare_cap < 1024 * 1024 {
244 // Grow by doubling (or at least 64MB) to minimize realloc count
245 let new_cap = (buf.capacity() * 2).max(buf.len() + PREALLOC);
246 buf.reserve(new_cap - buf.capacity());
247 }
248 let spare_cap = buf.capacity() - buf.len();
249 let start = buf.len();
250
251 // SAFETY: we read into the uninitialized spare capacity and extend
252 // set_len only by the number of bytes actually read.
253 let ret = unsafe {
254 libc::read(
255 0,
256 buf.as_mut_ptr().add(start) as *mut libc::c_void,
257 spare_cap,
258 )
259 };
260 if ret < 0 {
261 let err = io::Error::last_os_error();
262 if err.kind() == io::ErrorKind::Interrupted {
263 continue;
264 }
265 return Err(err);
266 }
267 if ret == 0 {
268 break;
269 }
270 unsafe { buf.set_len(start + ret as usize) };
271 }
272
273 Ok(buf)
274}
275
276/// Splice piped stdin to a memfd, then mmap for zero-copy access.
277/// Uses splice(2) to move data from the stdin pipe directly into a memfd's
278/// page cache (kernel→kernel, no userspace copy). Returns a mutable mmap.
279/// Returns None if stdin is not a pipe or splice fails.
280///
281/// For translate operations: caller can modify the mmap'd data in-place.
282/// For filter operations (delete, cut): caller reads from the mmap.
283#[cfg(target_os = "linux")]
284pub fn splice_stdin_to_mmap() -> io::Result<Option<memmap2::MmapMut>> {
285 use std::os::unix::io::FromRawFd;
286
287 // Check if stdin is a pipe
288 let mut stat: libc::stat = unsafe { std::mem::zeroed() };
289 if unsafe { libc::fstat(0, &mut stat) } != 0 {
290 return Ok(None);
291 }
292 if (stat.st_mode & libc::S_IFMT) != libc::S_IFIFO {
293 return Ok(None);
294 }
295
296 // Create memfd for receiving spliced data.
297 // Use raw syscall to avoid glibc version dependency (memfd_create added in glibc 2.27,
298 // but the syscall works on any kernel >= 3.17). This fixes cross-compilation to
299 // aarch64-unknown-linux-gnu with older sysroots.
300 let memfd =
301 unsafe { libc::syscall(libc::SYS_memfd_create, c"stdin_splice".as_ptr(), 0u32) as i32 };
302 if memfd < 0 {
303 return Ok(None); // memfd_create not supported, fallback
304 }
305
306 // Splice all data from stdin pipe to memfd (zero-copy: kernel moves pipe pages)
307 let mut total: usize = 0;
308 loop {
309 let n = unsafe {
310 libc::splice(
311 0,
312 std::ptr::null_mut(),
313 memfd,
314 std::ptr::null_mut(),
315 // Splice up to 1GB at a time (kernel will limit to actual pipe data)
316 1024 * 1024 * 1024,
317 libc::SPLICE_F_MOVE,
318 )
319 };
320 if n > 0 {
321 total += n as usize;
322 } else if n == 0 {
323 break; // EOF
324 } else {
325 let err = io::Error::last_os_error();
326 if err.kind() == io::ErrorKind::Interrupted {
327 continue;
328 }
329 unsafe { libc::close(memfd) };
330 return Ok(None); // splice failed, fallback to read
331 }
332 }
333
334 if total == 0 {
335 unsafe { libc::close(memfd) };
336 return Ok(None);
337 }
338
339 // Truncate memfd to exact data size. splice() may leave the memfd larger than
340 // `total` (page-aligned), and mmap would map the full file including zero padding.
341 // Without ftruncate, callers get a mmap with garbage/zero bytes beyond `total`.
342 if unsafe { libc::ftruncate(memfd, total as libc::off_t) } != 0 {
343 unsafe { libc::close(memfd) };
344 return Ok(None);
345 }
346
347 // Wrap memfd in a File for memmap2 API, then mmap it.
348 // MAP_SHARED allows in-place modification; populate prefaults pages.
349 let file = unsafe { File::from_raw_fd(memfd) };
350 let mmap = unsafe { MmapOptions::new().populate().map_mut(&file) };
351 drop(file); // Close memfd fd (mmap stays valid, kernel holds reference)
352
353 match mmap {
354 Ok(mut mm) => {
355 // Advise kernel for sequential access + hugepages
356 unsafe {
357 libc::madvise(
358 mm.as_mut_ptr() as *mut libc::c_void,
359 total,
360 libc::MADV_SEQUENTIAL,
361 );
362 if total >= 2 * 1024 * 1024 {
363 libc::madvise(
364 mm.as_mut_ptr() as *mut libc::c_void,
365 total,
366 libc::MADV_HUGEPAGE,
367 );
368 }
369 }
370 Ok(Some(mm))
371 }
372 Err(_) => Ok(None),
373 }
374}
375
376/// Generic read_stdin for non-Linux platforms.
377#[cfg(not(target_os = "linux"))]
378fn read_stdin_generic() -> io::Result<Vec<u8>> {
379 const PREALLOC: usize = 16 * 1024 * 1024;
380 const READ_BUF: usize = 4 * 1024 * 1024;
381
382 let mut stdin = io::stdin().lock();
383 let mut buf: Vec<u8> = Vec::with_capacity(PREALLOC);
384
385 loop {
386 let spare_cap = buf.capacity() - buf.len();
387 if spare_cap < READ_BUF {
388 buf.reserve(PREALLOC);
389 }
390 let spare_cap = buf.capacity() - buf.len();
391
392 let start = buf.len();
393 unsafe { buf.set_len(start + spare_cap) };
394 match stdin.read(&mut buf[start..start + spare_cap]) {
395 Ok(0) => {
396 buf.truncate(start);
397 break;
398 }
399 Ok(n) => {
400 buf.truncate(start + n);
401 }
402 Err(e) if e.kind() == io::ErrorKind::Interrupted => {
403 buf.truncate(start);
404 continue;
405 }
406 Err(e) => return Err(e),
407 }
408 }
409
410 Ok(buf)
411}
412
413/// Read as many bytes as possible into buf, retrying on partial reads.
414/// Ensures the full buffer is filled (or EOF reached), avoiding the
415/// probe-read overhead of read_to_end.
416/// Fast path: regular file reads usually return the full buffer on the first call.
417#[inline]
418fn read_full(reader: &mut impl Read, buf: &mut [u8]) -> io::Result<usize> {
419 // Fast path: first read() usually fills the entire buffer for regular files
420 let n = reader.read(buf)?;
421 if n == buf.len() || n == 0 {
422 return Ok(n);
423 }
424 // Slow path: partial read — retry to fill buffer (pipes, slow devices)
425 let mut total = n;
426 while total < buf.len() {
427 match reader.read(&mut buf[total..]) {
428 Ok(0) => break,
429 Ok(n) => total += n,
430 Err(e) if e.kind() == io::ErrorKind::Interrupted => continue,
431 Err(e) => return Err(e),
432 }
433 }
434 Ok(total)
435}