hff_std/
lib.rs

1//! Implements the basic reader/writer functionality for HFF.
2#![warn(missing_docs)]
3
4// Reexport needed types.
5#[cfg(feature = "compression")]
6pub use hff_core::read::{decompress, decompress_exact};
7
8// Pull in core if special behavior is needed.
9pub use hff_core;
10
11// Pull in common needs.  Aka: prelude.
12pub use hff_core::{
13    read::{ChunkView, Hff, TableView},
14    utilities,
15    write::{chunk, hff, table, ChunkDesc, DataSource, HffDesc, TableBuilder},
16    ByteOrder, ChunkCache, ContentInfo, Ecc, Error, IdType, Result, Version, BE, LE, NE, OP,
17};
18
19// Helper traits which provide blanket implementations over the
20// required trait combinations.
21
22mod write_seek;
23pub use write_seek::WriteSeek;
24
25mod read_seek;
26pub use read_seek::ReadSeek;
27
28/// Create a new builder instance.
29pub fn build<'a>(
30    tables: impl IntoIterator<Item = hff_core::write::TableBuilder<'a>>,
31) -> Result<hff_core::write::HffDesc<'a>> {
32    Ok(hff_core::write::hff(tables))
33}
34
35mod read;
36pub use read::*;
37
38mod write;
39pub use write::*;
40
41#[cfg(test)]
42mod tests {
43    use super::*;
44
45    fn test_table<'a>() -> Result<HffDesc<'a>> {
46        Ok(hff([
47            table((Ecc::new("Test"), Ecc::new("TestSub")))
48            .metadata("This is some metadata attached to the table.")?
49            .chunks([
50                chunk((Ecc::new("TRC0"), Ecc::new("TRS0")), "Chunks can be most types.  This is passed as an arbitrary byte array.".as_bytes())?,
51                chunk(
52                    (Ecc::new("TRC1"),
53                    Ecc::new("TRS1")),
54                    "Chunks provided to the table will maintain their order.",
55                )?,
56                chunk(
57                    (Ecc::new("TRC2"),
58                    Ecc::new("TRS2")),
59                    "So, iterating through the chunks has the same order as presented here.",
60                )?,
61                chunk(
62                    (Ecc::new("TRC3"),
63                    Ecc::new("TRS3")),
64                    "Chunks can be supplied with data from multiple sources.",
65                )?,
66                chunk(
67                    (Ecc::new("TRC4"),
68                    Ecc::new("TRS4")),
69                    "In fact, providing a std::path::Path will pull the content of a file in as the chunk data.",
70                )?,
71                // Compress the string if compression is enabled.
72                #[cfg(feature = "compression")]
73                chunk(
74                    (Ecc::new("TRC5"),
75                    Ecc::new("TRS5")),
76                    // Compressing chunks is just sending in a tuple with the compression level.
77                    // Using lzma for compression and the level is expected to be between 0 and 9.
78                    (9, "In the case of a lazy_write, the file will be opened and streamed directly to the writer without being buffered in memory."),
79                )?,
80                // Don't compress the string if compression is disabled.
81                #[cfg(not(feature = "compression"))]
82                chunk(
83                    (Ecc::new("TRC5"),
84                    Ecc::new("TRS5")),
85                    "In the case of a lazy_write, the file will be opened and streamed directly to the writer without being buffered in memory.",
86                )?,
87            ])
88            .children([
89                table((Ecc::new("C0Prime"), Ecc::new("C0Sub")))
90                .metadata("Each table has its own metadata.")?
91                .chunks([chunk((Ecc::new("C0C0"), Ecc::new("C0S0")), "Each table also has its own set of chunks.")?])
92                .children([
93                    table((Ecc::new("C1Prime"), Ecc::new("C1Sub")))
94                    .chunks([
95                        chunk(
96                            (Ecc::new("C1C0"),
97                            Ecc::new("C1S0")),
98                            "They will only be listed while iterating that specific table.",
99                        )?
100                    ]),
101                    table((Ecc::new("C2Prime"), Ecc::new("C2Sub")))
102                    .children([
103                        table((Ecc::new("C3Prime"), Ecc::new("C3Sub")))
104                        .chunks([
105                            chunk((Ecc::new("C2C0"), Ecc::new("C2S0")), "Tables don't *have* to have chunks, tables can be used to simply contain other tables.")?
106                        ])
107                    ])
108                ]),
109                table((Ecc::new("C4Prime"), Ecc::new("C4Sub"))).chunks([
110                    chunk((Ecc::new("C4C0"), Ecc::new("C4S0")),"The last chunk in the overall file.")?
111                ])
112                .metadata("And we're done.")?
113            ])
114        ]))
115    }
116
117    fn checks(hff: &Hff<ChunkCache>) {
118        {
119            // Check the content of root is as expected.
120            let root = hff.tables().next().unwrap();
121            assert_eq!(
122                root.identifier(),
123                (Ecc::new("Test"), Ecc::new("TestSub")).into()
124            );
125            assert_eq!(root.child_count(), 2);
126            assert_eq!(root.chunk_count(), 6);
127
128            // Check that we get a proper child iterator from the root.
129            let mut root_children = root.iter();
130            let c0 = root_children.next().unwrap();
131            assert_eq!(c0.identifier().as_ecc2().0, "C0Prime".into());
132            let c4 = root_children.next().unwrap();
133            assert_eq!(c4.identifier().as_ecc2().0, "C4Prime".into());
134            assert!(root_children.next().is_none());
135        }
136
137        {
138            // Check the metadata for the root.
139            let root = hff.tables().next().unwrap();
140            // The resulting reader is just a reference to the data
141            // in the content.  You can take a &mut Read on it if you
142            // wish to use it with std::io methods such as copy.
143            let metadata = hff.read(&root).unwrap();
144            assert!(std::str::from_utf8(metadata)
145                .unwrap()
146                .starts_with("This is some metadata"));
147
148            // Check the last table (second root child) metadata.
149            let mut children = hff.tables().next().unwrap().iter();
150            children.next();
151            let c4 = children.next().unwrap();
152            let metadata = hff.read(&c4).unwrap();
153            assert!(std::str::from_utf8(metadata)
154                .unwrap()
155                .starts_with("And we're done."));
156        }
157
158        {
159            // Check the root chunks are as expected.
160            let root = hff.tables().next().unwrap();
161
162            let test_data = [
163                ("TRC0", "TRS0", "Chunks can be most types.  This is passed as an arbitrary byte array."),
164                (
165                    "TRC1",
166                    "TRS1",
167                    "Chunks provided to the table will maintain their order.",
168                ),
169                (
170                    "TRC2",
171                    "TRS2",
172                    "So, iterating through the chunks has the same order as presented here.",
173                ),
174                (
175                    "TRC3",
176                    "TRS3",
177                    "Chunks can be supplied with data from multiple sources.",
178                ),
179                (
180                    "TRC4",
181                    "TRS4",
182                    "In fact, providing a std::path::Path will pull the content of a file in as the chunk data.",
183                ),
184                (
185                    "TRC5",
186                    "TRS5",
187                    "In the case of a lazy_write, the file will be opened and streamed directly to the writer without being buffered in memory.",
188                )
189            ];
190            for (index, chunk) in root.chunks().enumerate() {
191                let test_entry = test_data[index];
192                let (primary, secondary): (Ecc, Ecc) = chunk.identifier().into();
193                assert_eq!(Ecc::new(test_entry.0), primary);
194                assert_eq!(Ecc::new(test_entry.1), secondary);
195
196                #[cfg(feature = "compression")]
197                {
198                    let (_, secondary): (Ecc, Ecc) = chunk.identifier().into();
199                    if secondary == Ecc::new("TRS5") {
200                        let decompressed = decompress(hff.read(&chunk).unwrap()).unwrap();
201                        assert_eq!(decompressed.len(), test_entry.2.len());
202                        assert_eq!(decompressed, Vec::from(test_entry.2.as_bytes()));
203                    } else {
204                        assert_eq!(chunk.size(), test_entry.2.len());
205                        assert_eq!(
206                            hff.read(&chunk).unwrap(),
207                            Vec::from(test_entry.2.as_bytes())
208                        );
209                    }
210                }
211                #[cfg(not(feature = "compression"))]
212                {
213                    assert_eq!(chunk.size(), test_entry.2.len());
214                    assert_eq!(
215                        hff.read(&chunk).unwrap(),
216                        Vec::from(test_entry.2.as_bytes())
217                    );
218                }
219            }
220
221            {
222                let test_data = [
223                    (0, "Test", "TestSub"),
224                    (1, "C0Prime", "C0Sub"),
225                    (2, "C1Prime", "C1Sub"),
226                    (2, "C2Prime", "C2Sub"),
227                    (3, "C3Prime", "C3Sub"),
228                    (1, "C4Prime", "C4Sub"),
229                ];
230                // Test depth first iteration.
231                for ((depth, table), data) in hff.depth_first().zip(test_data.iter()) {
232                    assert_eq!(depth, data.0);
233                    assert_eq!(
234                        table.identifier(),
235                        (Ecc::new(data.1), Ecc::new(data.2)).into()
236                    );
237                }
238            }
239        }
240    }
241
242    #[test]
243    fn test() {
244        use std::io::Seek;
245
246        // Simple dev test for structure.
247        {
248            let content = test_table().unwrap();
249            let buffer = vec![];
250            let mut writer = std::io::Cursor::new(buffer);
251            assert!(content
252                .lazy_write::<hff_core::NE>(IdType::Ecc2, "Test", &mut writer)
253                .is_ok());
254
255            // Read it back in and iterate.
256            writer.rewind().unwrap();
257            let access = crate::read::read(&mut writer).unwrap();
258            checks(&access);
259        }
260
261        // Simple dev test for structure.
262        {
263            let content = test_table().unwrap();
264            let mut buffer = vec![];
265
266            assert!(content
267                .write::<hff_core::OP>(IdType::Ecc2, "Test", &mut buffer)
268                .is_ok());
269
270            // Read it back in and iterate.
271            let access = crate::read::read(&mut buffer.as_slice()).unwrap();
272            checks(&access);
273        }
274    }
275}