mzdata 0.63.4

A library to read mass spectrometry data formats and a data model for mass spectra
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
#![allow(dead_code)]

use std::fs;
use std::io;
#[allow(unused)]
use std::io::prelude::*;
use std::path;
#[allow(unused)]
use std::path::PathBuf;

#[cfg(feature = "checksum")]
use md5::{Context as MD5Context, Digest};
#[cfg(feature = "checksum")]
use sha1::{self, Digest as _};

type ByteBuffer = io::Cursor<Vec<u8>>;

#[derive(Debug, Clone, Default)]
pub enum FileWrapper<T: io::Read> {
    FileSystem(path::PathBuf),
    Stream(T),
    #[default]
    Empty,
}

/// Controls the level of spectral detail read from an MS data file
#[derive(Debug, Default, Clone, Copy, Hash, PartialEq, Eq)]
pub enum DetailLevel {
    #[default]
    /// Read all spectral data, including peak data, eagerly decoding it. This is the default
    Full,
    /// Read all spectral data, including peak data but defer decoding until later if possible.
    /// Check a format reader's documentation to see if it supports lazy loading. Lazy loading
    /// is only really of value for dense profile mode data or very, very long peak lists
    /// that are large and expensive to decode.
    Lazy,
    /// Read only the metadata of spectra, ignoring peak data entirely
    MetadataOnly,
}

#[derive(Debug, Clone, Default)]
pub(crate) struct FileSource<T: io::Read> {
    pub source: FileWrapper<T>,
}

// This really should be a full file-like object abstraction, but that
// feels like it is beyond the scope of this crate. Something like
// https://github.com/bnjjj/chicon-rs
impl<T: io::Read> FileSource<T> {
    pub fn from_path<P>(path: P) -> FileSource<T>
    where
        P: Into<path::PathBuf>,
    {
        FileSource {
            source: FileWrapper::FileSystem(path.into()),
        }
    }

    pub fn from_stream(stream: T) -> FileSource<T> {
        FileSource {
            source: FileWrapper::Stream(stream),
        }
    }

    pub fn file_name(&self) -> Option<&path::Path> {
        match &self.source {
            FileWrapper::FileSystem(path) => Some(path),
            FileWrapper::Stream(_stream) => None,
            FileWrapper::Empty => None,
        }
    }

    pub fn index_file_name(&self) -> Option<path::PathBuf> {
        match &self.source {
            FileWrapper::Empty => None,
            FileWrapper::Stream(_stream) => None,
            FileWrapper::FileSystem(path) => {
                if let Some(stem) = path.file_name() {
                    if let Some(parent) = path.parent() {
                        let base = parent.join(stem);
                        let name = base.with_extension("index.json");
                        return Some(name);
                    }
                }
                None
            }
        }
    }

    pub fn has_index_file(&self) -> bool {
        match self.index_file_name() {
            Some(path) => path.exists(),
            None => false,
        }
    }
}

pub fn from_path<P>(path: P) -> FileSource<fs::File>
where
    P: Into<path::PathBuf>,
{
    FileSource::from_path(path)
}

impl<T, P> From<P> for FileSource<T>
where
    P: Into<path::PathBuf>,
    T: io::Read,
{
    fn from(path: P) -> FileSource<T> {
        FileSource::from_path(path)
    }
}

/// A wrapper around an [`io::Read`] to provide limited [`io::Seek`] access even if the
/// underlying stream does not support it. It pre-buffers the next *n* bytes of content
/// in memory and permits seek operations within that range, but fails all seeks beyond
/// that range.
///
/// This is useful for working with [`io::stdin`] or a network stream.
pub struct PreBufferedStream<R: io::Read> {
    stream: R,
    buffer: io::Cursor<Vec<u8>>,
    buffer_size: usize,
    position: usize,
}

impl<R: io::Read> io::Seek for PreBufferedStream<R> {
    fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
        match pos {
            io::SeekFrom::Start(offset) => {
                if self.position > self.buffer_size {
                    return Err(io::Error::new(
                        io::ErrorKind::InvalidInput,
                        "Seeking after leaving buffered prefix",
                    ));
                } else if self.position + offset as usize > self.buffer_size {
                    return Err(io::Error::new(
                        io::ErrorKind::InvalidInput,
                        "Cannot seeking beyond buffered prefix",
                    ));
                }
                self.position = offset as usize;
                let r = self.buffer.seek(pos);
                if log::log_enabled!(log::Level::Trace) {
                    log::trace!(
                        "{pos:?} Position {0} -> {1}: {r:?}",
                        self.position,
                        self.buffer
                            .stream_position()
                            .map(|s| s.to_string())
                            .unwrap_or_else(|e| format!("err: {e}"))
                    );
                }
                r
            }
            io::SeekFrom::End(_) => Err(io::Error::new(
                io::ErrorKind::Unsupported,
                "Cannot seek relative the end of PreBufferedStream",
            )),
            io::SeekFrom::Current(offset) => {
                if self.position > self.buffer_size {
                    return Err(io::Error::new(
                        io::ErrorKind::InvalidInput,
                        "Seeking after leaving buffered prefix",
                    ));
                }
                if offset < 0 {
                    if offset.unsigned_abs() as usize > self.position {
                        Err(io::Error::new(
                            io::ErrorKind::InvalidInput,
                            "Cannot seek to negative position",
                        ))
                    } else {
                        self.position =
                            self.position.saturating_sub(offset.unsigned_abs() as usize);
                        let r = self.buffer.seek(io::SeekFrom::Start(self.position as u64));
                        if log::log_enabled!(log::Level::Trace) {
                            log::trace!(
                                "{pos:?} Position {0} -> {1}: {r:?}",
                                self.position,
                                self.buffer
                                    .stream_position()
                                    .map(|s| s.to_string())
                                    .unwrap_or_else(|e| format!("err: {e}"))
                            );
                        }
                        r
                    }
                } else if offset as usize + self.position > self.buffer_size {
                    Err(io::Error::new(
                        io::ErrorKind::InvalidInput,
                        "Cannot seeking beyond buffered prefix",
                    ))
                } else {
                    let r = self.buffer.seek(io::SeekFrom::Current(offset));
                    if log::log_enabled!(log::Level::Trace) {
                        log::trace!(
                            "{pos:?} Position {0} -> {1}: {r:?}",
                            self.position,
                            self.buffer
                                .stream_position()
                                .map(|s| s.to_string())
                                .unwrap_or_else(|e| format!("err: {e}"))
                        );
                    }
                    r
                }
            }
        }
    }

    fn stream_position(&mut self) -> io::Result<u64> {
        Ok(self.position as u64)
    }
}

impl<R: io::Read> io::Read for PreBufferedStream<R> {
    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
        let n_total = buf.len();
        let before = self.position;
        let (n_remaining, n_from_buffer) = if self.position < self.buffer_size {
            let n_from_buffer = self.buffer.read(buf)?;
            self.position += n_from_buffer;
            (n_total.saturating_sub(n_from_buffer), n_from_buffer)
        } else {
            (n_total, 0)
        };
        if n_remaining > 0 {
            let n_rest = self.stream.read(&mut buf[n_from_buffer..])?;
            self.position += n_rest;
        }
        let total_read = self.position - before;
        Ok(total_read)
    }
}

const BUFFER_SIZE: usize = 2usize.pow(16);

impl<R: io::Read> PreBufferedStream<R> {
    /// Create a new pre-buffered stream wrapping `stream` with a buffer size of 2<sup>16</sup> bytes.
    ///
    /// This method fails if attempting to fill the buffer fails.
    pub fn new(stream: R) -> io::Result<Self> {
        Self::new_with_buffer_size(stream, BUFFER_SIZE)
    }

    /// Create a new pre-buffered stream wrapping `stream` with a buffer size of `buffer_size` bytes.
    ///
    /// This method fails if attempting to fill the buffer fails.
    pub fn new_with_buffer_size(stream: R, buffer_size: usize) -> io::Result<Self> {
        let buffer = io::Cursor::new(Vec::with_capacity(buffer_size));
        let mut inst = Self {
            stream,
            buffer_size,
            buffer,
            position: 0,
        };
        inst.prefill_buffer()?;
        Ok(inst)
    }

    fn prefill_buffer(&mut self) -> io::Result<usize> {
        let buffer = self.buffer.get_mut();
        buffer.resize(self.buffer_size, 0);
        let bytes_read = self.stream.read(buffer)?;
        buffer.shrink_to(bytes_read);
        self.buffer_size = bytes_read;
        Ok(bytes_read)
    }
}

#[cfg(feature = "checksum")]
/// Compute a SHA-1 digest of a file path
pub fn checksum_file(path: &PathBuf) -> io::Result<String> {
    let mut checksum = sha1::Sha1::new();
    let mut reader = io::BufReader::new(fs::File::open(path)?);
    let mut buf = vec![0; 2usize.pow(20)];
    while let Ok(i) = reader.read(&mut buf) {
        if i == 0 {
            break;
        }
        checksum.update(&buf[..i]);
    }
    let x = base16ct::lower::encode_string(&checksum.finalize());
    Ok(x)
}

#[cfg(feature = "checksum")]
/// A writable stream that keeps a running MD5 checksum of all bytes
#[derive(Clone)]
pub(crate) struct MD5HashingStream<T: io::Write> {
    pub stream: T,
    pub context: MD5Context,
}

#[cfg(feature = "checksum")]
impl<T: io::Write> MD5HashingStream<T> {
    pub fn new(file: T) -> MD5HashingStream<T> {
        Self {
            stream: file,
            context: MD5Context::new(),
        }
    }

    pub fn compute(&self) -> Digest {
        self.context.clone().compute()
    }

    pub fn get_mut(&mut self) -> &mut T {
        &mut self.stream
    }

    pub fn into_inner(self) -> T {
        self.stream
    }
}

#[cfg(feature = "checksum")]
impl<T: io::Write> io::Write for MD5HashingStream<T> {
    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
        self.context.consume(buf);
        self.stream.write(buf)
    }

    fn flush(&mut self) -> io::Result<()> {
        self.stream.flush()
    }
}

#[cfg(feature = "checksum")]
impl<T: io::Seek + io::Write> io::Seek for MD5HashingStream<T> {
    fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
        self.stream.seek(pos)
    }
}


#[cfg(feature = "parallelism")]
mod parallelism {
    use rayon::prelude::*;
    use crate::prelude::*;

    use super::*;

    /// A helper type to load spectra concurrently across multiple threads.
    /// Requires the reader type implement [`MZFileReader`].
    ///
    /// # Note
    /// This helper is still too low level. Expect a higher level API to eventually become available.
    pub struct ConcurrentLoader {
        path: PathBuf,
        num_threads: Option<usize>,
    }

    impl ConcurrentLoader {
        pub fn new(path: PathBuf, num_threads: Option<usize>) -> Self {
            Self { path, num_threads }
        }

        /// Do the actual concurrent loading
        pub fn load<F: MZFileReader<C, D, S>, C: CentroidLike, D: DeconvolutedCentroidLike, S: SpectrumLike<C, D> + Send>(self) -> io::Result<Vec<S>> {
            let guide = F::open_path(&self.path)?;
            let n = guide.len();

            let num_threads = self.num_threads.unwrap_or_else(|| rayon::max_num_threads());

            let task = || -> Vec<S>{
                let mut chunks: Vec<_> = (0..n).into_par_iter().chunks((n / num_threads / 3).max(10)).map(|ii| {
                    let start = ii[0];
                    let mut local_reader = F::open_path(&self.path).unwrap();
                    let spectra: Vec<_> = ii.into_iter().flat_map(|i| local_reader.get_spectrum_by_index(i)).collect();
                    (start ,spectra)
                }).collect();
                chunks.par_sort_by(|a, b| a.0.cmp(&b.0));
                chunks.into_iter().map(|(_, chunk)| chunk).flatten().collect()
            };

            let out = if let Some(num_threads) = self.num_threads {
                let pool = rayon::ThreadPoolBuilder::new().num_threads(num_threads).thread_name(|i| format!("mzdata-concurrent-loader-{i}")).build().unwrap();
                pool.install(|| task())
            } else {
                task()
            };

            Ok(out)
        }
    }
}


#[cfg(feature = "parallelism")]
pub use parallelism::ConcurrentLoader;


#[cfg(test)]
mod test {
    use super::*;

    #[test]
    fn test_from_buffer() {
        let mut buff: Vec<u8> = Vec::new();
        buff.extend(b"foobar");
        let stream = ByteBuffer::new(buff);
        let mut out: Vec<u8> = Vec::new();
        let desc = FileSource::<ByteBuffer>::from_stream(stream);
        assert!(desc.file_name().is_none());
        if let FileWrapper::Stream(mut buff) = desc.source {
            buff.read_to_end(&mut out).unwrap();
            assert_eq!(out, b"foobar");
        }
    }

    #[test]
    fn test_prebuffering() -> io::Result<()> {
        let mut fh = fs::File::open("./test/data/batching_test.mzML")?;
        let mut data = Vec::new();
        fh.read_to_end(&mut data)?;
        let content = io::Cursor::new(data);
        let mut stream = PreBufferedStream::new_with_buffer_size(content, 512)?;

        assert_eq!(stream.buffer_size, 512);

        let mut buffer = [0u8; 128];
        stream.read_exact(&mut buffer)?;
        assert_eq!(buffer.len(), 128);
        assert!(buffer.starts_with(b"<?xml version=\"1.0\" encoding=\"utf-8\"?>"));

        let mut buffer2 = [0u8; 128];
        stream.seek(io::SeekFrom::Start(0))?;
        stream.read_exact(&mut buffer2)?;

        assert_eq!(buffer, buffer2);

        assert!(stream.seek(io::SeekFrom::Start(556)).is_err());

        Ok(())
    }

    #[cfg(feature = "parallelism")]
    #[test]
    fn test_parallel_load() -> io::Result<()> {
        use crate::prelude::*;

        let loader= ConcurrentLoader::new("./test/data/batching_test.mzML".into(), Some(4));
        let spectra = loader.load::<crate::MzMLReader<fs::File>, _, _, _>()?;
        assert_eq!(spectra.len(), 2232);

        let _ = spectra.iter().fold(0, |last, spec| {
            assert_eq!(last, spec.index());
            spec.index() + 1
        });

        Ok(())
    }
}