hff_std/write/
mod.rs

1use crate::WriteSeek;
2use hff_core::{
3    write::{DataArray, DataSource, HffDesc},
4    ByteOrder, Ecc, Header, IdType, Result,
5};
6use std::io::Write;
7
8/// Writer trait for HffDesc.
9pub trait Writer {
10    /// Write to a stream.
11    fn write<E: ByteOrder>(
12        self,
13        id_type: IdType,
14        content_type: impl Into<Ecc>,
15        writer: &mut dyn Write,
16    ) -> Result<()>;
17
18    /// Write to a stream but finalize chunk lazilly during the write.
19    /// This requires a stream with both Write and Seek capabilities.
20    fn lazy_write<E: ByteOrder>(
21        self,
22        id_type: IdType,
23        content_type: impl Into<Ecc>,
24        writer: &mut dyn WriteSeek,
25    ) -> Result<()>;
26}
27
28impl<'a> Writer for HffDesc<'a> {
29    fn write<E: ByteOrder>(
30        self,
31        id_type: IdType,
32        content_type: impl Into<Ecc>,
33        writer: &mut dyn Write,
34    ) -> Result<()> {
35        let offset_to_blob = self.offset_to_blob() as u64;
36        let (mut tables, mut chunks, mut data) = self.finish();
37
38        let header = Header::new(
39            id_type,
40            content_type.into(),
41            tables.len() as u32,
42            chunks.len() as u32,
43        );
44        writer.write_all(header.to_bytes::<E>()?.as_slice())?;
45
46        // Prepare all the data in the data array so we have offsets and length.
47        let offset_len = data.prepare()?;
48
49        // Update the table metadata length/offset and chunk length/offset.
50        HffDesc::update_data(&mut tables, &mut chunks, offset_to_blob, &offset_len);
51
52        // And write the content+data blob.
53        writer.write_all(tables.to_bytes::<E>()?.as_slice())?;
54        writer.write_all(chunks.to_bytes::<E>()?.as_slice())?;
55        let _test = write_data_array(data, writer)?;
56        assert_eq!(_test, offset_len);
57
58        Ok(())
59    }
60
61    fn lazy_write<E: ByteOrder>(
62        self,
63        id_type: IdType,
64        content_type: impl Into<Ecc>,
65        mut writer: &mut dyn WriteSeek,
66    ) -> Result<()> {
67        let array_size = self.arrays_size();
68        let offset_to_blob = self.offset_to_blob() as u64;
69        let (mut tables, mut chunks, data) = self.finish();
70
71        let header = Header::new(
72            id_type,
73            content_type.into(),
74            tables.len() as u32,
75            chunks.len() as u32,
76        );
77        writer.write_all(header.to_bytes::<E>()?.as_slice())?;
78
79        // Write zero's for the table and chunk array.
80        // Use this rather than skipping in order to avoid any questionable
81        // differences between different backing types.
82        writer.write_all(&mut vec![0; array_size])?;
83
84        // Write the data and record the offset/length information.
85        let offset_len = write_data_array(data, &mut writer)?;
86
87        // Update the table metadata length/offset and chunk length/offset.
88        HffDesc::update_data(&mut tables, &mut chunks, offset_to_blob, &offset_len);
89
90        // Seek back to the tables/chunks.
91        writer.seek(std::io::SeekFrom::Start(Header::SIZE as u64))?;
92
93        // And write the tables and chunks.
94        writer.write_all(tables.to_bytes::<E>()?.as_slice())?;
95        writer.write_all(chunks.to_bytes::<E>()?.as_slice())?;
96
97        Ok(())
98    }
99}
100
101/// Write the data to the given stream.
102/// Returns a vector of offset into the writer (starting from 0)
103/// and the length of the data written without alignment padding.
104fn write_data_array(data_array: DataArray, writer: &mut dyn Write) -> Result<Vec<(u64, u64)>> {
105    let mut offset_len = vec![];
106
107    // Track where we are in the writer, starting from zero.
108    let mut offset = 0;
109    for mut item in data_array {
110        // Prepare each item.
111        // This is only for compressed data (at this time) to perform
112        // the compression.  Using std write here means it all has to
113        // be buffered into memory.
114        item.prepare()?;
115
116        // Write in the appropriate manner.
117        let length = match item {
118            DataSource::File(mut f, _) => std::io::copy(&mut f, writer)?,
119            DataSource::Owned(data) => std::io::copy(&mut data.as_slice(), writer)?,
120            DataSource::Ref(mut data) => std::io::copy(&mut data, writer)?,
121            #[cfg(feature = "compression")]
122            DataSource::Compressed(_, _, data) => {
123                std::io::copy(&mut data.unwrap().as_slice(), writer)?
124            }
125        };
126
127        // Record the offset and length.
128        offset_len.push((offset as u64, length));
129
130        // What is the padding requirement?
131        let padding = (length.next_multiple_of(16) - length) as usize;
132        // Track where we are in the output stream.
133        offset += length as usize + padding;
134
135        // Write the padding.
136        let padding = vec![0; padding];
137        writer.write_all(&padding)?;
138    }
139
140    Ok(offset_len)
141}
142
143#[cfg(test)]
144mod tests {
145    use crate::*;
146    use hff_core::{
147        write::{chunk, hff, table},
148        IdType,
149    };
150
151    #[test]
152    fn empty() {
153        let content = hff([]);
154        let mut buffer = vec![];
155        content
156            .write::<hff_core::LE>(IdType::Ecc2, "Test", &mut buffer)
157            .unwrap();
158        let _hff = crate::read::inspect(&mut buffer.as_slice()).unwrap();
159    }
160
161    #[test]
162    fn test() {
163        let content = hff([
164            table((Ecc::new("p0"), Ecc::new("s0")))
165                .metadata("123")
166                .unwrap()
167                .children([table((Ecc::new("p1"), Ecc::new("s1")))
168                    .metadata("1234")
169                    .unwrap()
170                    .chunks([
171                        chunk((Ecc::new("c0"), Ecc::new("cs0")), "chunk 0").unwrap(),
172                        chunk((Ecc::new("c1"), Ecc::new("cs1")), "chunk 1").unwrap(),
173                        chunk((Ecc::new("c2"), Ecc::new("cs2")), "chunk 2").unwrap(),
174                    ])
175                    .children([
176                        table((Ecc::new("p2"), Ecc::new("s2")))
177                            .metadata("12345")
178                            .unwrap()
179                            .chunks([]),
180                        table((Ecc::new("p3"), Ecc::new("s3")))
181                            .metadata("123456")
182                            .unwrap()
183                            .chunks([]),
184                    ])])
185                .chunks([]),
186            table((Ecc::new("p4"), Ecc::new("s4")))
187                .metadata("1234567")
188                .unwrap(),
189            table((Ecc::new("p5"), Ecc::new("s5")))
190                .metadata("12345678")
191                .unwrap()
192                .chunks([chunk((Ecc::new("c3"), Ecc::new("cs3")), "chunk 3").unwrap()]),
193        ]);
194
195        let mut buffer = vec![];
196        content
197            .write::<hff_core::LE>(IdType::Ecc2, "Test", &mut buffer)
198            .unwrap();
199
200        let hff = crate::read::inspect(&mut buffer.as_slice()).unwrap();
201        println!("{:#?}", hff);
202        println!("-----------------------------");
203        for (depth, table) in hff.depth_first() {
204            println!("-- <{}>: <{:?}>", depth, table);
205        }
206        println!("-----------------------------");
207
208        //assert!(false);
209    }
210}