1use std::fmt;
2use std::mem::size_of;
3use std::collections::BTreeMap;
4use std::io::{self, Read, Write, Seek, SeekFrom, copy};
5use bytemuck::{Pod, Zeroable};
6use thiserror::Error;
7
8use crate::{EntryId, Hash, Version};
9use crate::bin::{DataSource, Fixed, Positioned, Readable, ReadableFixed, Writable, WritableFixed, derive_readable_via_pod, derive_writable_via_pod};
10use crate::str::{StrError, ZString};
11use crate::read::{self, Reader};
12use crate::write::{self, Writer};
13use crate::version::MagicNumber;
14
15
16#[derive(Debug, Error)]
17pub enum V001WriteError {
18 #[error("v001 does not support compression")]
19 CompressionNotSupported,
20 #[error("v001 requires unique hashes. {0} and {1} have the same hash: {}", Hash::v001(.0))]
21 HashCollision(String, String),
22 #[error("{0}")]
23 IO(#[from] io::Error),
24 #[error("{1}: \"{0}\"")]
25 StrErr(String, StrError),
26}
27
28#[repr(C)]
29#[derive(Debug, Clone, Copy, Zeroable, Pod)]
30pub struct HeaderV001 {
31 pub offset_hash_table: u32,
32 pub file_count: u32,
33}
34impl fmt::Display for HeaderV001 {
35 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
36 writeln!(f, "file_count: {}", self.file_count)
37 }
38}
39impl Fixed for HeaderV001 {
40 fn pos() -> usize { size_of::<MagicNumber>() }
41}
42derive_var_size_via_size_of!(HeaderV001);
43derive_readable_fixed_via_default!(HeaderV001);
44derive_writable_fixed_via_default!(HeaderV001);
45
46
47#[repr(C)]
48#[derive(Debug, Clone, Copy, Zeroable, Pod)]
49struct FileRecord {
50 pub size: u32,
51 pub offset: u32,
52}
53derive_var_size_via_size_of!(FileRecord);
54derive_readable_via_pod!(FileRecord);
55derive_writable_via_pod!(FileRecord);
56
57
58const fn offset_after_header() -> u64 {
59 size_of::<(MagicNumber, HeaderV001)>() as u64
60}
61const fn offset_names_start(file_count: u64) -> u64 {
62 offset_after_header() + (file_count * size_of::<(FileRecord, u32)>() as u64)
63}
64const fn offset_after_index(header: &HeaderV001) -> u64 {
65 offset_after_header() + header.offset_hash_table as u64 + (size_of::<Hash>() * header.file_count as usize) as u64
66}
67
68pub struct V001 {}
69pub type WriterV001 = V001;
70impl Default for V001 {
71 fn default() -> Self {
72 Self {}
73 }
74}
75pub struct ReaderV001<R> {
76 reader: R,
77 header: HeaderV001,
78 files: Option<Vec<read::File>>,
79}
80impl<R: Read + Seek> ReaderV001<R> {
81 fn files(&mut self) -> io::Result<Vec<read::File>> {
82 let file_count = self.header.file_count as usize;
83 self.reader.seek(SeekFrom::Start(offset_after_header()))?;
84
85 let recs = FileRecord::read_bin_many(&mut self.reader, file_count)?;
86 let name_offsets = u32::read_bin_many(&mut self.reader, file_count)?;
87
88 self.reader.seek(SeekFrom::Start(offset_after_header() + self.header.offset_hash_table as u64))?;
89 let hashes = Hash::read_bin_many(&mut self.reader, file_count)?;
90
91 recs.iter().zip(name_offsets).zip(hashes)
92 .map(|((rec, name_offset), hash)| {
93 let name_pos = offset_names_start(file_count as u64) + name_offset as u64;
94 self.reader.seek(SeekFrom::Start(name_pos))?;
95 let name = match ZString::read_bin(&mut self.reader) {
96 Ok(n) => n,
97 Err(err) => panic!("could not read name at {}: {}", name_pos, err),
98 };
99
100 Ok(read::File {
101 id: EntryId {
102 hash,
103 name: Some(name.to_string()),
104 },
105 compressed: false,
106 size: rec.size as usize,
107 offset: offset_after_index(&self.header) + rec.offset as u64,
108 })
109 })
110 .collect()
111 }
112}
113impl<R> Reader for ReaderV001<R>
114where R: Read + Seek {
115 type Header = HeaderV001;
116 type Root = Vec<read::File>;
117 type In = R;
118
119 fn read_bsa(mut reader: R) -> io::Result<Self> {
120 let header = HeaderV001::read_fixed(&mut reader)?;
121 Ok(Self {
122 reader,
123 header,
124 files: None,
125 })
126 }
127
128 fn header(&self) -> HeaderV001 { self.header }
129 fn list(&mut self) -> io::Result<Vec<read::File>> {
130 if let Some(files) = &self.files {
131 Ok(files.to_vec())
132 } else {
133 let files = self.files()?;
134 self.files = Some(files.to_vec());
135 Ok(files)
136 }
137 }
138 fn extract<W: Write>(&mut self, file: &read::File, mut out: W) -> io::Result<()> {
139 self.reader.seek(SeekFrom::Start(file.offset))?;
140 let mut data = (&mut self.reader).take(file.size as u64);
141 copy(&mut data, &mut out)?;
142 Ok(())
143 }
144}
145impl Writer for V001 {
146 type Err = V001WriteError;
147
148 fn write_bsa<DS, D, W>(&self, dirs: DS, mut out: W) -> Result<(), V001WriteError>
149 where
150 DS: IntoIterator<Item = write::Dir<D>>,
151 D: DataSource,
152 W: Write + Seek,
153 {
154 let mut offset_hash_table: u32 = 0;
155 let mut files: BTreeMap<Hash, (String, D)> = BTreeMap::new(); for dir in dirs {
157 for file in dir.files {
158 if file.compressed == Some(true) {
159 return Err(V001WriteError::CompressionNotSupported)
160 }
161 let name = format!("{}\\{}",
162 dir.name.to_lowercase(),
163 file.name.to_lowercase());
164 offset_hash_table += (size_of::<(FileRecord, u32)>() + name.len() + 1) as u32;
165 let hash = Hash::v001(&name);
166 if let Some((other, _)) = files.get(&hash) {
167 return Err(V001WriteError::HashCollision(name, other.clone()))
168 }
169 files.insert(hash, (name, file.data));
170 }
171 }
172
173 Version::V001.write_fixed(&mut out)?;
174 let header = HeaderV001 {
175 offset_hash_table,
176 file_count: files.len() as u32,
177 };
178 header.write_fixed(&mut out)?;
179
180 let mut recs: Vec<Positioned<FileRecord>> = Vec::new();
181 for _ in &files {
182 recs.push(Positioned::new(FileRecord {
183 offset: 0,
184 size: 0,
185 }, &mut out)?);
186 }
187
188 let mut name_offsets: Vec<Positioned<u32>> = Vec::new();
189 for _ in &files {
190 name_offsets.push(Positioned::new(0, &mut out)?);
191 }
192 let offset_names_start = offset_names_start(files.len() as u64) as u32;
193 for (name_offset, (_, (name, _))) in name_offsets.iter_mut().zip(&files) {
194 name_offset.data = out.stream_position()? as u32 - offset_names_start;
195 println!("write name at {}", name_offset.data);
196 let name = ZString::new(&name)
197 .map_err(|err| V001WriteError::StrErr(name.clone(), err))?;
198 name.write(&mut out)?;
199 name_offset.update(&mut out)?;
200 }
201 for (hash, _) in &files {
202 hash.write(&mut out)?;
203 }
204 for (rec, (_, (_, data))) in recs.iter_mut().zip(&files) {
205 let pos = out.stream_position()? as u32;
206 println!("write file data at: {}", pos);
207 rec.data.offset = pos - offset_after_index(&header) as u32;
208 let mut data = data.open()?;
209 rec.data.size = copy(&mut data, &mut out)? as u32;
210 rec.update(&mut out)?;
211 }
212 Ok(())
213 }
214}
215
216
217#[cfg(test)]
218mod tests {
219 use crate::{
220 Hash,
221 read::Reader,
222 write::test::*,
223 Version,
224 };
225 use super::*;
226
227 #[test]
228 fn writes_version() {
229 let mut bytes = bsa_bytes(V001::default(),some_bsa_dirs());
230
231 let v = Version::read_fixed(&mut bytes)
232 .unwrap_or_else(|err| panic!("could not read version {}", err));
233 assert_eq!(v, Version::V001);
234 }
235
236 #[test]
237 fn writes_header() {
238 let mut bytes = bsa_bytes(WriterV001::default(), some_bsa_dirs());
239
240 let header = HeaderV001::read_fixed(&mut bytes)
241 .unwrap_or_else(|err| panic!("could not read header {}", err));
242
243 assert_eq!(header.offset_hash_table, 16, "offset_hash_table");
244 assert_eq!(header.file_count, 1, "file_count");
245 }
246
247 #[test]
248 fn write_read_identity_bsa() {
249 let dirs = some_bsa_dirs();
250 let bytes = bsa_bytes(WriterV001::default(), dirs.clone());
251 let mut bsa = ReaderV001::read_bsa(bytes)
252 .unwrap_or_else(|err| panic!("could not open bsa {}", err));
253 let files = bsa.list()
254 .unwrap_or_else(|err| panic!("could not read dirs {}", err));
255
256
257 assert_eq!(files.len(), 1, "files.len()");
258 assert_eq!(files[0].hash, Hash::v001("a\\b"), "files[0].hash");
259 assert_eq!(files[0].name, Some("a\\b".to_owned()), "files[0].name");
260
261 let mut data = Vec::<u8>::new();
262 bsa.extract(&files[0], &mut data)
263 .unwrap_or_else(|err| panic!("could not extract data {}", err));
264 assert_eq!(dirs[0].files[0].data, data, "file data");
265 }
266}