edf_reader/
async_reader.rs1use crate::file_reader::AsyncFileReader;
4use crate::model::{EDFHeader, EDF_HEADER_BYTE_SIZE};
5
6use futures::future::err;
7use futures::Future;
8use std::io::Error;
9
10pub struct AsyncEDFReader<T: AsyncFileReader> {
11 pub edf_header: EDFHeader,
12 file_reader: T,
13}
14
15impl<T: 'static + AsyncFileReader> AsyncEDFReader<T> {
16 pub fn init_with_file_reader(
22 file_reader: T,
23 ) -> Box<Future<Item = AsyncEDFReader<T>, Error = std::io::Error>> {
24 Box::new(
25 file_reader
26 .read_async(0, 256)
27 .map(|general_header_raw: Vec<u8>| {
28 let mut edf_header = EDFHeader::build_general_header(general_header_raw);
29
30 file_reader
31 .read_async(
32 256,
33 edf_header.number_of_signals * EDF_HEADER_BYTE_SIZE as u64,
34 )
35 .map(|channel_headers_raw| {
36 edf_header.build_channel_headers(channel_headers_raw);
37
38 AsyncEDFReader {
39 edf_header: edf_header,
40 file_reader,
41 }
42 })
43 })
44 .flatten(),
45 )
46 }
47
48 pub fn read_data_window(
50 &self,
51 start_time_ms: u64, duration_ms: u64, ) -> Box<Future<Item = Vec<Vec<f32>>, Error = std::io::Error>> {
54 if let Err(e) = super::check_bounds(start_time_ms, duration_ms, &self.edf_header) {
56 return Box::new(err::<Vec<Vec<f32>>, Error>(e));
57 }
58
59 let first_block_start_time = start_time_ms - start_time_ms % self.edf_header.block_duration;
62
63 let first_block_index = first_block_start_time / self.edf_header.block_duration;
64
65 let number_of_blocks_to_get =
66 (duration_ms as f64 / self.edf_header.block_duration as f64).ceil() as u64;
67
68 let offset = self.edf_header.byte_size_header
69 + first_block_index * self.edf_header.get_size_of_data_block();
70
71 let header = self.edf_header.clone();
72
73 Box::new(
74 self.file_reader
75 .read_async(
76 offset,
77 number_of_blocks_to_get * self.edf_header.get_size_of_data_block(),
78 )
79 .map(move |data: Vec<u8>| {
80 let mut result: Vec<Vec<f32>> = Vec::new();
81
82 for _ in 0..header.number_of_signals {
83 result.push(Vec::new());
84 }
85
86 let mut index = 0;
87
88 for _ in 0..number_of_blocks_to_get {
89 for (j, channel) in header.channels.iter().enumerate() {
90 for _ in 0..channel.number_of_samples_in_data_record {
91 let sample = super::get_sample(&data, index) as f32;
92 result[j].push(
93 (sample - channel.digital_minimum as f32)
94 * channel.scale_factor
95 + channel.physical_minimum,
96 );
97 index += 1;
98 }
99 }
100 }
101
102 result
103 }),
104 )
105 }
106}
107
108