mkv_element/
io.rs

1//! I/O utilities.
2
3/// blocking I/O implementations, supporting reading and writing.
4pub mod blocking_impl {
5    use crate::{
6        base::Header,
7        element::Element,
8        functional::Encode,
9        master::{Cluster, Segment},
10    };
11    use std::io::{Read, Write};
12
13    /// Read from a reader.
14    pub trait ReadFrom: Sized {
15        /// Read Self from a reader.
16        fn read_from<R: Read>(r: &mut R) -> crate::Result<Self>;
17    }
18
19    /// Read an element from a reader provided the header.
20    pub trait ReadElement: Sized + Element {
21        /// Read an element from a reader provided the header.
22        fn read_element<R: Read>(header: &Header, r: &mut R) -> crate::Result<Self> {
23            let body = header.read_body(r)?;
24            Self::decode_body(&mut &body[..])
25        }
26    }
27    impl<T: Element> ReadElement for T {}
28
29    impl Header {
30        /// Read the body of the element from a reader into memory.
31        pub(crate) fn read_body<R: Read>(&self, r: &mut R) -> crate::Result<Vec<u8>> {
32            // Segment and Cluster can have unknown size, but we don't support that here.
33            let size = if self.size.is_unknown && [Segment::ID, Cluster::ID].contains(&self.id) {
34                return Err(crate::Error::ElementBodySizeUnknown(self.id));
35            } else {
36                *self.size
37            };
38            // we allocate 4096 bytes upfront and grow as needed
39            let cap = size.min(4096) as usize;
40            let mut buf = Vec::with_capacity(cap);
41            let n = std::io::copy(&mut r.take(size), &mut buf)?;
42            if size != n {
43                return Err(crate::Error::OutOfBounds);
44            }
45            Ok(buf)
46        }
47    }
48
49    /// Write to a writer.
50    pub trait WriteTo {
51        /// Write to a writer.
52        fn write_to<W: Write>(&self, w: &mut W) -> crate::Result<()>;
53    }
54
55    impl<T: Encode> WriteTo for T {
56        fn write_to<W: Write>(&self, w: &mut W) -> crate::Result<()> {
57            //TODO should avoid the extra allocation here
58            let mut buf = vec![];
59            self.encode(&mut buf)?;
60            w.write_all(&buf)?;
61            Ok(())
62        }
63    }
64}
65/// tokio non-blocking I/O implementations, supporting async reading and writing.
66#[cfg(feature = "tokio")]
67#[cfg_attr(docsrs, doc(cfg(feature = "tokio")))]
68pub mod tokio_impl {
69    use crate::{
70        base::Header,
71        element::Element,
72        master::{Cluster, Segment},
73    };
74
75    use std::future::Future;
76    use tokio::io::{AsyncRead, AsyncReadExt, AsyncWriteExt};
77
78    /// Read from a reader asynchronously.
79    pub trait AsyncReadFrom: Sized {
80        /// Read Self from a reader.
81        fn async_read_from<R: tokio::io::AsyncRead + Unpin>(
82            r: &mut R,
83        ) -> impl Future<Output = crate::Result<Self>>;
84    }
85
86    /// Read an element from a reader provided the header asynchronously.
87    pub trait AsyncReadElement: Sized + Element {
88        /// Read an element from a reader provided the header.
89        fn async_read_element<R: tokio::io::AsyncRead + Unpin>(
90            header: &Header,
91            r: &mut R,
92        ) -> impl std::future::Future<Output = crate::Result<Self>> {
93            async {
94                let body = header.read_body_tokio(r).await?;
95                Self::decode_body(&mut &body[..])
96            }
97        }
98    }
99    impl<T: Element> AsyncReadElement for T {}
100
101    /// Write to a writer asynchronously.
102    pub trait AsyncWriteTo {
103        /// Write to a writer asynchronously.
104        fn async_write_to<W: tokio::io::AsyncWrite + Unpin>(
105            &self,
106            w: &mut W,
107        ) -> impl std::future::Future<Output = crate::Result<()>>;
108    }
109
110    impl<T: crate::functional::Encode> AsyncWriteTo for T {
111        async fn async_write_to<W: tokio::io::AsyncWrite + Unpin>(
112            &self,
113            w: &mut W,
114        ) -> crate::Result<()> {
115            //TODO should avoid the extra allocation here
116            let mut buf = vec![];
117            self.encode(&mut buf)?;
118            Ok(w.write_all(&buf).await?)
119        }
120    }
121
122    impl Header {
123        /// Read the body of the element from a reader into memory.
124        pub(crate) async fn read_body_tokio<R: AsyncRead + Unpin>(
125            &self,
126            r: &mut R,
127        ) -> crate::Result<Vec<u8>> {
128            // Segment and Cluster can have unknown size, but we don't support that here.
129            let size = if self.size.is_unknown && [Segment::ID, Cluster::ID].contains(&self.id) {
130                return Err(crate::Error::ElementBodySizeUnknown(self.id));
131            } else {
132                *self.size
133            };
134            // we allocate 4096 bytes upfront and grow as needed
135            let cap = size.min(4096) as usize;
136            let mut buf = Vec::with_capacity(cap);
137            let n = tokio::io::copy(&mut r.take(size), &mut buf).await?;
138            if size != n {
139                return Err(crate::Error::OutOfBounds);
140            }
141            Ok(buf)
142        }
143    }
144}