tmf 0.2.1

TMF is a 3D model compression format achieving compression ratio of between 4.2 and 1.95, while being very fast to read and having no visual difference between compressed and uncompressed models.
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
use std::io::{BufWriter, Read, Result, Write};
#[cfg(not(feature = "byte_rw"))]
type UnalignedStorage = usize;
#[cfg(feature = "byte_rw")]
type UnalignedStorage = usize;

const UNALIGNED_STORAGE_BITS: u8 = (std::mem::size_of::<UnalignedStorage>() * 8) as u8;
pub struct UnalignedReader<R: Read> {
    /// Buff Reader used to speedup reads in some cases.
    reader: R,
    /// current byte read from file.
    current_byte: UnalignedStorage,
    /// Amount of bits that have been already read.
    bits_read: u8,
}
impl<R: Read> UnalignedReader<R> {
    fn read_to_internal_storage(&mut self) -> Result<()> {
        // For u8, use simpler, old version. For others, this branch can never be taken an will be optimised out.
        if std::mem::size_of::<UnalignedStorage>() == 1 {
            let mut tmp: [u8; std::mem::size_of::<UnalignedStorage>()] =
                [0; std::mem::size_of::<UnalignedStorage>()];
            self.bits_read = 0;
            self.reader.read_exact(&mut tmp)?;
            self.current_byte = tmp[0] as UnalignedStorage;
        } else {
            let mut tmp: [u8; std::mem::size_of::<UnalignedStorage>()] =
                [0; std::mem::size_of::<UnalignedStorage>()];
            let bits_read = self.reader.read(&mut tmp)? * 8;
            if bits_read == 0 {
                return Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, ""));
            }
            self.bits_read = (8 * std::mem::size_of::<UnalignedStorage>() - bits_read) as u8;
            self.current_byte = UnalignedStorage::from_be_bytes(tmp);
        }
        Ok(())
    }
    /// Reads exactly one bit from UBA.
    pub fn read_bit(&mut self) -> Result<bool> {
        // If all bits in current_byte read, read new byte with new bits, and set amount of bits bits_read in current bit back to 0.
        if self.bits_read >= UNALIGNED_STORAGE_BITS {
            self.read_to_internal_storage()?;
            if (UNALIGNED_STORAGE_BITS - self.bits_read) < 1 {
                use std::io::{Error, ErrorKind};
                return Err(Error::from(ErrorKind::UnexpectedEof));
            }
        }
        const BIT_MASK: UnalignedStorage = 1 << (UNALIGNED_STORAGE_BITS - 1);
        let res = self.current_byte & BIT_MASK;
        self.current_byte <<= 1;
        self.bits_read += 1;
        Ok(res != 0)
    }
    pub fn read2_unaligned(&mut self, mode: UnalignedRWMode) -> Result<(u64, u64)> {
        //assert_ne!(mode.0, 0);
        if mode.0 == 0 {
            return Ok((0, 0));
        }
        if mode.0 > (u64::BITS / 2) as u8 {
            Ok((self.read_unaligned(mode)?, self.read_unaligned(mode)?))
        } else {
            let read_size = UnalignedRWMode(mode.0 * 2);
            let data = self.read_unaligned(read_size)?;
            let r1 = data >> mode.0;
            let r2 = data << (u64::BITS as u8 - mode.0) >> (u64::BITS as u8 - mode.0);
            Ok((r1, r2))
        }
    }
    pub fn read_pair_unaligned(
        &mut self,
        mode1: UnalignedRWMode,
        mode2: UnalignedRWMode,
    ) -> Result<(u64, u64)> {
        if mode1.0 + mode2.0 > u64::BITS as u8 {
            Ok((self.read_unaligned(mode1)?, self.read_unaligned(mode2)?))
        } else {
            let read_size = UnalignedRWMode(mode1.0 + mode2.0);
            let data = self.read_unaligned(read_size)?;
            let r1 = data >> mode1.0;
            let r2 = data << (u64::BITS as u8 - mode1.0) >> (u64::BITS as u8 - mode1.0);
            Ok((r1, r2))
        }
    }
    pub fn read3_unaligned(&mut self, mode: UnalignedRWMode) -> Result<(u64, u64, u64)> {
        if mode.0 > (u64::BITS / 3) as u8 {
            let res_1_2 = self.read2_unaligned(mode)?;
            Ok((res_1_2.0, res_1_2.1, self.read_unaligned(mode)?))
        } else {
            let read_size = UnalignedRWMode(mode.0 * 3);
            let data = self.read_unaligned(read_size)?;
            let r1 = data >> ((mode.0) * 2);
            let r_23 = data << (u64::BITS as u8 - mode.0 * 2) >> (u64::BITS as u8 - mode.0 * 2);
            let r2 = r_23 >> mode.0;
            let r3 = r_23 << (u64::BITS as u8 - mode.0) >> (u64::BITS as u8 - mode.0);
            Ok((r1, r2, r3))
        }
    }
    /// Reads *mode.0* bits from self, keeping internal alignment
    pub fn read_unaligned(&mut self, mode: UnalignedRWMode) -> Result<u64> {
        if mode.0 == 0 || mode.0 >= u64::BITS as u8 {
            return Ok(0);
        };
        // Prepare result integer, in which read result is stored.
        let mut res: u64 = 0;
        // Total bits remaining to read
        let mut total_read = mode.0;
        while total_read != 0 {
            // If all bits in current_byte read, read new byte with new bits, and set amount of bits bits_read in current bit back to 0.
            if self.bits_read >= UNALIGNED_STORAGE_BITS {
                self.read_to_internal_storage()?;
            }
            // Get amount of bits to read in current iteration: either all bits left in current_byte, or all bits remaining to read, whichever lower
            let read_ammount = total_read.min(UNALIGNED_STORAGE_BITS - self.bits_read);
            // Move res by amount of bits bits_read in current iteration to prepare res for reading into in next iteration.
            res <<= read_ammount;
            // Calculate offset of bits_read bits in current byte.
            let read_offset = UNALIGNED_STORAGE_BITS - read_ammount;
            // Read bits in current_byte at read_offset into res.
            res |= (self.current_byte >> read_offset) as u64;
            // Increment amount of bits already bits_read.
            self.bits_read += read_ammount;
            // If read less than whole current byte, move current byte in such a way that next bits to read are the leftmost bits.
            if read_ammount < UNALIGNED_STORAGE_BITS {
                self.current_byte <<= read_ammount;
            }
            // Decrement total amount of bits left to read
            total_read -= read_ammount;
        }
        Ok(res)
    }
    /// Creates new Unaligned Reader form *r*
    pub fn new(r: R) -> Self {
        let reader = r;
        let current_byte = 0; //read this
        let bits_read = UNALIGNED_STORAGE_BITS;
        Self {
            current_byte,
            bits_read,
            reader,
        }
    }
}
pub struct UnalignedWriter<W: Write> {
    /// Buffered writer used to speedup writes in some cases.
    writer: BufWriter<W>,
    /// The next byte
    next_byte: u8,
    /// Amount of bit already written in this byte.
    written: u8,
}
impl<W: Write> Drop for UnalignedWriter<W> {
    fn drop(&mut self) {
        if self.written > 0 {
            self.writer
                .write_all(&[self.next_byte])
                .expect("Unaligned writer failed to write last byte when dropped")
        };
        self.writer
            .flush()
            .expect("Unaligned writer failed to flush when dropped");
    }
}
impl<W: Write> UnalignedWriter<W> {
    pub fn new(w: W) -> Self {
        let writer = BufWriter::new(w);
        let next_byte = 0;
        let written = 0;
        Self {
            next_byte,
            written,
            writer,
        }
    }
    /// Writes exactly one bit from UBA.
    pub fn write_bit(&mut self, bit: bool) -> Result<()> {
        const BIT_PREC: UnalignedRWMode = UnalignedRWMode::precision_bits(1);
        self.write_unaligned(BIT_PREC, bit as u64)
    }
    #[inline(always)]
    pub fn write_unaligned(&mut self, mode: UnalignedRWMode, mut data: u64) -> Result<()> {
        if mode.0 == 0 {
            return Ok(());
        };
        let mut total_write = mode.0;
        // Move all the bits to write to left, so the first bit to write is the leftmost bit.
        data <<= 64 - total_write;

        while total_write > 0 {
            // Calculate how many bits to write in current iteration: either how many bits unwritten are left in next byte(bits in byte - nits written) or all the bits remaining in data, if they fit.
            let curr_write = total_write.min(8 - self.written);
            // Take *curr_write* bits of data
            let curr_bits = {
                //Get *curr_write* leftmostmost bits of data
                let bits = (data >> (64 - curr_write)) as u8;
                //Move data to left by *curr_write* ensuring the next bits to read are the rightmost bits(this assumption is used to extract bits to write).
                data <<= curr_write;
                bits
            };
            // Calculate the offset within vurrent byte at which to put all data(byte size - already occupied bits - amount of bits to write
            let bit_offset = 8 - self.written - curr_write;
            // Move the current bits by the bit offset to but them in right place to write to next_byte
            let curr_bits = curr_bits << bit_offset;
            // Or the current bits and next_byte to write to it
            self.next_byte |= curr_bits;
            // Increment amount of bits written in next byte
            self.written += curr_write;
            // Decrease amount of bits left to write by amount of bits written in current iteration
            total_write -= curr_write;
            // If full byte written, write(flush) it to output, and reset other parameters to prepare for next writes
            if self.written >= 8 {
                self.writer.write_all(&[self.next_byte])?;
                self.written = 0;
                self.next_byte = 0;
            }
        }
        Ok(())
    }
    /// This function should be called when and *ONLY* when writer is no longer in use. It flushes the writer an allows user to handle errors.
    pub fn flush(&mut self) -> Result<()> {
        self.writer.flush()
    }
}
#[derive(Clone, Copy)]
pub struct UnalignedRWMode(u8);
impl UnalignedRWMode {
    pub const fn precision_bits(bits: u8) -> Self {
        Self(bits)
    }
    pub const fn bits(&self) -> u8 {
        self.0
    }
}
#[cfg(test)]
const CHANIGING_ALGHMENT_EXPECTED: [u8; 252] = [
    0b11001101, 0b00001010, 0b00110000, 0b01110000, 0b10000000, 0b01001000, 0b00010100, 0b00000010,
    0b11000000, 0b00110000, 0b00000001, 0b10100000, 0b00000111, 0b00000000, 0b00001111, 0b00000000,
    0b00010000, 0b00000000, 0b00001000, 0b10000000, 0b00000010, 0b01000000, 0b00000000, 0b01001100,
    0b00000000, 0b00000101, 0b00000000, 0b00000000, 0b00101010, 0b00000000, 0b00000000, 0b10110000,
    0b00000000, 0b00000001, 0b01110000, 0b00000000, 0b00000001, 0b10000000, 0b00000000, 0b00000000,
    0b11001000, 0b00000000, 0b00000000, 0b00110100, 0b00000000, 0b00000000, 0b00000110, 0b11000000,
    0b00000000, 0b00000000, 0b01110000, 0b00000000, 0b00000000, 0b00000011, 0b10100000, 0b00000000,
    0b00000000, 0b00001111, 0b00000000, 0b00000000, 0b00000000, 0b00011111, 0b00000000, 0b00000000,
    0b00000000, 0b00100000, 0b00000000, 0b00000000, 0b00000000, 0b00010000, 0b10000000, 0b00000000,
    0b00000000, 0b00000100, 0b01000000, 0b00000000, 0b00000000, 0b00000000, 0b10001100, 0b00000000,
    0b00000000, 0b00000000, 0b00001001, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b01001010,
    0b00000000, 0b00000000, 0b00000000, 0b00000001, 0b00110000, 0b00000000, 0b00000000, 0b00000000,
    0b00000010, 0b01110000, 0b00000000, 0b00000000, 0b00000000, 0b00000010, 0b10000000, 0b00000000,
    0b00000000, 0b00000000, 0b00000001, 0b01001000, 0b00000000, 0b00000000, 0b00000000, 0b00000000,
    0b01010100, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00001010, 0b11000000, 0b00000000,
    0b00000000, 0b00000000, 0b00000000, 0b10110000, 0b00000000, 0b00000000, 0b00000000, 0b00000000,
    0b00000101, 0b10100000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00010111, 0b00000000,
    0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00101111, 0b00000000, 0b00000000, 0b00000000,
    0b00000000, 0b00000000, 0b00110000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000,
    0b00011000, 0b10000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000110, 0b01000000,
    0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b11001100, 0b00000000, 0b00000000,
    0b00000000, 0b00000000, 0b00000000, 0b00001101, 0b00000000, 0b00000000, 0b00000000, 0b00000000,
    0b00000000, 0b00000000, 0b01101010, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000,
    0b00000001, 0b10110000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000011,
    0b01110000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000011, 0b10000000,
    0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000001, 0b11001000, 0b00000000,
    0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b01110100, 0b00000000, 0b00000000,
    0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00001110, 0b11000000, 0b00000000, 0b00000000,
    0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b11110000, 0b00000000, 0b00000000, 0b00000000,
    0b00000000, 0b00000000, 0b00000000, 0b00000111, 0b10100000, 0b00000000, 0b00000000, 0b00000000,
    0b00000000, 0b00000000, 0b00000000, 0b00011111, 0b00000000, 0b00000000, 0b00000000, 0b00000000,
    0b00000000, 0b00000000, 0b00000000, 0b00111111,
];
#[cfg(test)]
mod test_reader {
    use super::*;
    #[test]
    fn half_aligned() {
        let bytes: [u8; 8] = [0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF];
        let mut reader = UnalignedReader::new(&bytes as &[u8]);
        for byte in 0..0x10 {
            let rbyte = reader.read_unaligned(UnalignedRWMode(4)).unwrap() as u8;
            assert!(rbyte == byte, "{rbyte} != {byte}");
        }
    }
    #[test]
    fn read2_half_aligned() {
        let bytes: [u8; 8] = [0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF];
        let mut reader = UnalignedReader::new(&bytes as &[u8]);
        for byte in 0..(0x10 / 2) {
            let (r1, r2) = reader.read2_unaligned(UnalignedRWMode(4)).unwrap();
            assert_eq!(r1, byte * 2);
            assert_eq!(r2, byte * 2 + 1);
        }
    }
    #[test]
    fn read3_half_aligned() {
        let bytes: [u8; 8] = [0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF];
        let mut reader = UnalignedReader::new(&bytes as &[u8]);
        for byte in 0..(0x10 / 3) {
            let (r1, r2, r3) = reader.read3_unaligned(UnalignedRWMode(4)).unwrap();
            assert_eq!(r1, byte * 3, "r1");
            assert_eq!(r2, byte * 3 + 1, "r2");
            assert_eq!(r3, byte * 3 + 2, "r3");
        }
    }
    #[test]
    fn read_bit() {
        let bytes: [u8; 2] = [0b1110_0010, 0b1010_0101];
        let expected: [bool; 16] = [
            true, true, true, false, false, false, true, false, true, false, true, false, false,
            true, false, true,
        ];
        let mut reader = UnalignedReader::new(&bytes as &[u8]);
        for val in expected {
            let rval = reader.read_bit().unwrap();
            //println!("{val} {rval}");
            assert_eq!(val, rval);
        }
    }
    #[test]
    fn one_and_half_aligned() {
        let bytes: [u8; 12] = [
            0x00, 0x0F, 0xFF, 0xF0, 0xFA, 0xBC, 0x1A, 0x58, 0x54, 0x48, 0x55, 0x64,
        ];
        let expected: [u16; 8] = [0x000, 0xFFF, 0xF0F, 0xABC, 0x1A5, 0x854, 0x485, 0x564];
        let mut reader = UnalignedReader::new(&bytes as &[u8]);
        for byte in expected {
            let rbyte = reader.read_unaligned(UnalignedRWMode(12)).unwrap() as u16;
            assert!(rbyte == byte, "{rbyte} != {byte}");
        }
    }
    #[test]
    fn bin_3_aligned() {
        // 000 001 01|0 011 100 1|01 110 111
        let bytes: [u8; 3] = [0b00000101, 0b00111001, 0b01110111];
        let mut reader = UnalignedReader::new(&bytes as &[u8]);
        for byte in 0..8 {
            let rbyte = reader.read_unaligned(UnalignedRWMode(3)).unwrap() as u8;
            assert!(rbyte == byte, "{rbyte:8b} != {byte:8b}");
        }
    }
    #[test]
    fn changing_alignment() {
        let mut reader = UnalignedReader::new(&CHANIGING_ALGHMENT_EXPECTED as &[u8]);
        for byte in 1..64 {
            assert!(reader.read_unaligned(UnalignedRWMode(byte)).unwrap() == byte as u64);
        }
    }
}
#[cfg(test)]
mod test_writter {
    use super::*;
    #[test]
    fn half_aligned() {
        let bytes: [u8; 8] = [0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF];
        let mut result = Vec::with_capacity(8);
        {
            let mut writter = UnalignedWriter::new(&mut result);
            for byte in 0..0x10 {
                writter.write_unaligned(UnalignedRWMode(4), byte).unwrap();
            }
        }
        assert!(result == bytes, "{:x?} != {:x?}", result, bytes);
    }
    #[test]
    fn bin_3_aligned() {
        let bytes: [u8; 3] = [0b00000101, 0b00111001, 0b01110111];
        let mut result = Vec::with_capacity(8);
        {
            let mut writter = UnalignedWriter::new(&mut result);
            for byte in 0..0x8 {
                writter.write_unaligned(UnalignedRWMode(3), byte).unwrap();
            }
        }
        for i in 0..3 {
            assert!(
                result[i] == bytes[i],
                "{:08b} != {:08b}",
                result[i],
                bytes[i]
            );
        }
        assert!(result == bytes, "{:x?} != {:x?}", result, bytes);
    }
    #[test]
    fn one_and_half_aligned() {
        let expected: [u8; 12] = [
            0x00, 0x0F, 0xFF, 0xF0, 0xFA, 0xBC, 0x1A, 0x58, 0x54, 0x48, 0x55, 0x64,
        ];
        let bytes: [u16; 8] = [0x000, 0xFFF, 0xF0F, 0xABC, 0x1A5, 0x854, 0x485, 0x564];
        let mut result = Vec::with_capacity(8);
        {
            let mut writter = UnalignedWriter::new(&mut result);
            for byte in bytes {
                writter
                    .write_unaligned(UnalignedRWMode(12), byte as u64)
                    .unwrap();
            }
        }
        assert!(result == expected, "{:x?} != {:x?}", result, expected);
    }
    #[test]
    fn changing_alignment() {
        let mut result = Vec::with_capacity(8);
        {
            let mut writter = UnalignedWriter::new(&mut result);
            for byte in 1..64 {
                writter
                    .write_unaligned(UnalignedRWMode(byte), byte as u64)
                    .unwrap();
            }
        }
        assert!(result == CHANIGING_ALGHMENT_EXPECTED);
    }
}