1pub mod error;
6pub mod format;
7
8use crate::format::Format;
9
10use crate::error::{ParseError, ReadError, ReadToError};
11use byteorder::{ByteOrder, NativeEndian};
12use std::convert::TryInto;
13use std::io::SeekFrom;
14use tokio::io::AsyncSeek;
15use tokio::prelude::*;
16
17pub struct Reader<T> {
21 input: T,
22 head: Header,
23 levels_index: Vec<LevelIndex>,
24}
25
26impl<T: AsyncRead + AsyncSeek + Unpin> Reader<T> {
28 pub async fn new(mut input: T) -> ReadResult<Self> {
37 let head = Self::read_head(&mut input).await?;
38 let levels_index = Self::read_level_index(&mut input, &head).await?;
39 Ok(Self {
40 input,
41 head,
42 levels_index,
43 })
44 }
45
46 async fn read_head(input: &mut T) -> ReadResult<Header> {
48 let mut head_bytes = [0; 48];
49 input.read_exact(&mut head_bytes).await?;
50 Self::test_identifier(&head_bytes)?;
51
52 Ok(Header::from_bytes(&head_bytes)?)
53 }
54
55 async fn read_level_index(input: &mut T, head: &Header) -> ReadResult<Vec<LevelIndex>> {
59 const LEVEL_INDEX_START_BYTE: u64 = 80;
60 const LEVEL_INDEX_BYTE_LEN: u32 = 24;
61 let level_count = head.level_count.max(1);
62 let level_index_bytes_len = level_count * LEVEL_INDEX_BYTE_LEN;
63 let mut level_index_bytes: Vec<u8> = (0..level_index_bytes_len).map(|_| 0u8).collect();
64
65 input.seek(SeekFrom::Start(LEVEL_INDEX_START_BYTE)).await?;
66 input.read_exact(&mut level_index_bytes).await?;
67 let mut infos = Vec::with_capacity(level_count as usize);
68 for level_index in 0..level_count {
69 let start_byte = (level_index * LEVEL_INDEX_BYTE_LEN) as usize;
70 let end_byte = start_byte + LEVEL_INDEX_BYTE_LEN as usize;
71 infos.push(LevelIndex::from_bytes(
72 &level_index_bytes[start_byte..end_byte],
73 ))
74 }
75 Ok(infos)
76 }
77
78 pub async fn read_data(&mut self) -> ReadResult<Vec<u8>> {
82 let data_len_bytes = self.data_len_bytes();
83 let mut buffer = Vec::new();
84 buffer.resize(data_len_bytes as usize, 0);
85 self.read_data_to(&mut buffer)
86 .await
87 .map(|_| buffer)
88 .map_err(|e| match e {
89 ReadToError::ReadError(e) => e,
90 ReadToError::BadBuffer(_) => {
91 panic!("Pass well sized buffer to read_data_to(), but got BadBuffer error")
92 }
93 })
94 }
95
96 pub async fn read_data_to(&mut self, buf: &mut [u8]) -> ReadToResult<()> {
101 let data_len_bytes = self.data_len_bytes();
102 if buf.len() != data_len_bytes as usize {
103 return Err(ReadToError::BadBuffer(data_len_bytes));
104 }
105
106 let data_start_byte = self.first_level_offset_bytes();
107 self.input.seek(SeekFrom::Start(data_start_byte)).await?;
108
109 self.input.read_exact(buf).await?;
110 Ok(())
111 }
112
113 fn test_identifier(head_bytes: &HeadBytes) -> ReadResult<()> {
117 let mut red_id = [0; 12];
118 red_id.copy_from_slice(&head_bytes[0..12]);
119 if red_id == KTX2_IDENTIFIER {
120 return Ok(());
121 }
122 Err(ReadError::ParseError(ParseError::BadIdentifier(red_id)))
123 }
124
125 pub fn header(&self) -> &Header {
127 &self.head
128 }
129
130 pub fn regions_description(&self) -> Vec<RegionDescription> {
132 let base_offset = self.first_level_offset_bytes();
133 self.levels_index
134 .iter()
135 .enumerate()
136 .map(|(i, level)| self.region_from_level_index(i, level.offset - base_offset))
137 .collect()
138 }
139
140 fn first_level_offset_bytes(&self) -> u64 {
142 self.levels_index
143 .iter()
144 .map(|l| l.offset)
145 .min()
146 .expect("No levels got, but read some on constructing")
147 }
148
149 fn last_level(&self) -> LevelIndex {
151 *self
152 .levels_index
153 .iter()
154 .max_by_key(|l| l.offset)
155 .expect("No levels got, but read some on constructing")
156 }
157
158 pub fn data_len_bytes(&self) -> u64 {
160 let start_offset = self.first_level_offset_bytes();
161 let last_level = self.last_level();
162 last_level.offset + last_level.uncompressed_length_bytes - start_offset
163 }
164
165 fn region_from_level_index(&self, i: usize, offset: u64) -> RegionDescription {
167 RegionDescription {
168 level: i as u32,
169 layer_count: self.head.layer_count.max(1) * self.head.face_count,
170 offset_bytes: offset,
171 width: Self::level_size(self.head.base_width, i as u32),
172 height: Self::level_size(self.head.base_height, i as u32),
173 depth: Self::level_size(self.head.base_depth, i as u32),
174 }
175 }
176
177 fn level_size(base: u32, level: u32) -> u32 {
179 (base >> level).max(1)
180 }
181}
182
183static KTX2_IDENTIFIER: [u8; 12] = [
185 0xAB, 0x4B, 0x54, 0x58, 0x20, 0x32, 0x30, 0xBB, 0x0D, 0x0A, 0x1A, 0x0A,
186];
187
188pub type ReadResult<T> = Result<T, ReadError>;
190
191pub type ReadToResult<T> = Result<T, ReadToError>;
193
194pub type ParseResult<T> = Result<T, ParseError>;
196
197#[derive(Copy, Clone, Eq, PartialEq, Debug)]
199pub struct Header {
200 pub format: Format,
201 pub type_size: u32,
202 pub base_width: u32,
203 pub base_height: u32,
204 pub base_depth: u32,
205 pub layer_count: u32,
206 pub face_count: u32,
207 pub level_count: u32,
208 pub supercompression_scheme: u32,
209}
210
211impl Header {
212 pub fn from_bytes(data: &HeadBytes) -> ParseResult<Self> {
214 let format_id = NativeEndian::read_u32(&data[12..16]);
215 let format = format_id.try_into()?;
216
217 Ok(Self {
218 format,
219 type_size: NativeEndian::read_u32(&data[16..20]),
220 base_width: Self::parse_base_width(&data[20..24])?,
221 base_height: NativeEndian::read_u32(&data[24..28]),
222 base_depth: NativeEndian::read_u32(&data[28..32]),
223 layer_count: NativeEndian::read_u32(&data[32..36]),
224 face_count: Self::parse_face_count(&data[36..40])?,
225 level_count: NativeEndian::read_u32(&data[40..44]),
226 supercompression_scheme: Self::parse_supercompression_scheme(&data[44..48])?,
227 })
228 }
229
230 fn parse_base_width(data: &[u8]) -> ParseResult<u32> {
231 let result = NativeEndian::read_u32(&data[0..4]);
232 match result {
233 0 => Err(ParseError::ZeroWidth),
234 _ => Ok(result),
235 }
236 }
237
238 fn parse_face_count(data: &[u8]) -> ParseResult<u32> {
239 let result = NativeEndian::read_u32(&data[0..4]);
240 match result {
241 0 => Err(ParseError::ZeroFaceCount),
242 _ => Ok(result),
243 }
244 }
245
246 fn parse_supercompression_scheme(data: &[u8]) -> ParseResult<u32> {
247 let result = NativeEndian::read_u32(&data[0..4]);
248 match result {
249 0 => Ok(0),
250 _ => Err(ParseError::UnsupportedFeature("supercompression scheme")),
251 }
252 }
253}
254
255type HeadBytes = [u8; 48];
257
258#[derive(Debug, Eq, PartialEq, Copy, Clone)]
260struct LevelIndex {
261 pub offset: u64,
262 pub length_bytes: u64,
263 pub uncompressed_length_bytes: u64,
264}
265
266impl LevelIndex {
267 pub fn from_bytes(data: &[u8]) -> Self {
268 Self {
269 offset: NativeEndian::read_u64(&data[0..8]),
270 length_bytes: NativeEndian::read_u64(&data[8..16]),
271 uncompressed_length_bytes: NativeEndian::read_u64(&data[16..24]),
272 }
273 }
274}
275
276#[derive(Debug, Eq, PartialEq, Copy, Clone)]
278pub struct RegionDescription {
279 pub level: u32,
280 pub layer_count: u32,
281 pub offset_bytes: u64,
282 pub width: u32,
283 pub height: u32,
284 pub depth: u32,
285}