mpq_rs/core/
creator.rs

1use std::borrow::Cow;
2use std::cmp::min;
3use std::io::Error as IoError;
4use std::io::{Seek, SeekFrom, Write};
5
6use byteorder::{WriteBytesExt, LE};
7use indexmap::IndexMap;
8
9// use super::archive::Archive;
10use crate::core::consts::*;
11use crate::core::header::*;
12use crate::core::table::*;
13use crate::core::util::*;
14
15#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
16struct FileKey {
17    hash_a: u32,
18    hash_b: u32,
19    index: u32,
20}
21
22impl FileKey {
23    fn new(name: &str) -> FileKey {
24        let hash_a = hash_string(name.as_bytes(), MPQ_HASH_NAME_A);
25        let hash_b = hash_string(name.as_bytes(), MPQ_HASH_NAME_B);
26        let index = hash_string(name.as_bytes(), MPQ_HASH_TABLE_INDEX);
27
28        FileKey {
29            hash_a,
30            hash_b,
31            index,
32        }
33    }
34}
35
36#[derive(Debug)]
37struct FileRecord {
38    file_name: String,
39    contents: Vec<u8>,
40    offset: u64,
41    compressed_size: u64,
42    options: FileOptions,
43}
44
45impl FileRecord {
46    fn new<S: Into<String>, C: Into<Vec<u8>>>(
47        name: S,
48        contents: C,
49        options: FileOptions,
50    ) -> FileRecord {
51        FileRecord {
52            file_name: name.into(),
53            contents: contents.into(),
54            offset: 0,
55            compressed_size: 0,
56            options,
57        }
58    }
59}
60
61#[derive(Debug, Clone, Copy)]
62/// Represents various options that can be used when adding a file to an archive.
63pub struct FileOptions {
64    /// Whether to encrypt the file using MPQ's encryption scheme.
65    /// The encryption key is derived from the file name, so in practice
66    /// this is pretty useless.
67    pub encrypt: bool,
68    /// Whether to compress the file. Currently will only try to use DEFLATE
69    /// compression.
70    pub compress: bool,
71    /// If the file is ecnrypted, this will "adjust" the encryption key by
72    /// performing some simple transformations on it. By default, this is used for
73    /// "technical" files such as `(listfile)`.
74    pub adjust_key: bool,
75}
76
77impl Default for FileOptions {
78    fn default() -> FileOptions {
79        FileOptions {
80            encrypt: false,
81            compress: false,
82            adjust_key: false,
83        }
84    }
85}
86
87impl FileOptions {
88    fn flags(self) -> u32 {
89        let mut flags = MPQ_FILE_EXISTS;
90
91        if self.encrypt {
92            flags |= MPQ_FILE_ENCRYPTED;
93        }
94
95        if self.adjust_key {
96            flags |= MPQ_FILE_ADJUST_KEY;
97        }
98
99        if self.compress {
100            flags |= MPQ_FILE_COMPRESS;
101        }
102
103        flags
104    }
105}
106
107#[derive(Debug)]
108/// Creator capable of creating MPQ Version 1 archives.
109///
110/// Will hold all the files in memory until asked to [write](struct.Creator.html#method.write) them
111/// to a `writer`.
112///
113/// When writing, a `(listfile)` will be automatically appended to the archive.
114// TODO: Add support for multiple compression types
115pub struct Creator {
116    added_files: IndexMap<FileKey, FileRecord>,
117
118    sector_size: u64,
119}
120
121impl Default for Creator {
122    fn default() -> Creator {
123        Creator {
124            added_files: IndexMap::new(),
125            sector_size: 0x10000,
126        }
127    }
128}
129
130impl Creator {
131    /// Adds a file to be later written to the archive.
132    ///
133    /// All forward slashes (`/`) in the file path will be auto-converted to backward slashes (`\`)
134    ///
135    /// [`FileOptions`](struct.FileOptions.html) determine the options for adding the file, e.g. encryption and compression.
136    pub fn add_file<C>(&mut self, file_name: &str, contents: C, options: FileOptions)
137    where
138        C: Into<Vec<u8>>,
139    {
140        let file_name = file_name.replace('/', "\\");
141        let key = FileKey::new(&file_name);
142
143        self.added_files
144            .insert(key, FileRecord::new(file_name, contents, options));
145    }
146
147    /// Writes out the entire archive to the specified writer.
148    ///
149    /// The archive start position is calculated as follows:  
150    /// `((current_pos + (HEADER_BOUNDARY - 1)) / HEADER_BOUNDARY) * HEADER_BOUNDARY`  
151    /// Where `current_pos` is the `writer`'s current seek pos, and `HEADER_BOUNDARY` is 512.
152    ///
153    /// Will write the following:
154    /// - MPQ Header
155    /// - All files with their sector offset table
156    /// - MPQ hash table
157    /// - MPQ block table
158    pub fn write<W>(&mut self, mut writer: W) -> Result<(), IoError>
159    where
160        W: Write + Seek,
161    {
162        let (added_files, sector_size) = match self {
163            Creator {
164                added_files,
165                sector_size,
166            } => (added_files, *sector_size),
167        };
168
169        let current_pos = writer.seek(SeekFrom::Current(0))?;
170        // starting from the current pos, this will find the closest valid header position
171        let archive_start =
172            ((current_pos + (HEADER_BOUNDARY - 1)) / HEADER_BOUNDARY) * HEADER_BOUNDARY;
173        writer.seek(SeekFrom::Start(archive_start))?;
174
175        // skip writing the header for now
176        writer.seek(SeekFrom::Current(HEADER_MPQ_SIZE as i64))?;
177
178        // create a listfile
179        let mut listfile = String::new();
180        for file in added_files.values() {
181            listfile += &file.file_name;
182            listfile += "\r\n";
183        }
184
185        // add it to the file list
186        {
187            let key = FileKey::new("(listfile)");
188            added_files.insert(
189                key,
190                FileRecord::new(
191                    "(listfile)",
192                    listfile,
193                    FileOptions {
194                        compress: true,
195                        encrypt: true,
196                        adjust_key: true,
197                    },
198                ),
199            );
200        }
201
202        // write out all the files back-to-back
203        for file in added_files.values_mut() {
204            write_file(sector_size, archive_start, &mut writer, file)?;
205        }
206
207        let mut hashtable_size = MIN_HASH_TABLE_SIZE;
208        while hashtable_size < added_files.len() {
209            hashtable_size *= 2;
210        }
211
212        // write hash table and remember its position
213        let hashtable_pos = write_hashtable(&mut writer, hashtable_size, &added_files)?;
214
215        // write block table and remember its position
216        let blocktable_pos = write_blocktable(&mut writer, &added_files)?;
217
218        // write header
219        let archive_end = writer.seek(SeekFrom::Current(0))?;
220        write_header(
221            &mut writer,
222            (archive_start, archive_end),
223            (hashtable_pos, hashtable_size),
224            (blocktable_pos, added_files.len()),
225            sector_size,
226        )?;
227
228        Ok(())
229    }
230}
231
232fn write_hashtable<W>(
233    mut writer: W,
234    hashtable_size: usize,
235    added_files: &IndexMap<FileKey, FileRecord>,
236) -> Result<u64, IoError>
237where
238    W: Write + Seek,
239{
240    let hashtable_pos = writer.seek(SeekFrom::Current(0))?;
241    let mut hashtable = vec![HashEntry::blank(); hashtable_size];
242    let hash_index_mask = hashtable_size - 1;
243
244    for (block_index, (key, _)) in added_files.iter().enumerate() {
245        let mut hash_index = (key.index as usize) & hash_index_mask;
246        let hash_entry = HashEntry::new(key.hash_a, key.hash_b, block_index as u32);
247
248        while !hashtable[hash_index].is_blank() {
249            hash_index += 1;
250            if hash_index == hashtable_size {
251                hash_index = 0;
252            }
253        }
254
255        hashtable[hash_index] = hash_entry;
256    }
257
258    let mut buf = vec![0u8; hashtable_size * HASH_TABLE_ENTRY_SIZE as usize];
259
260    let mut cursor = buf.as_mut_slice();
261    for entry in hashtable {
262        entry.write(&mut cursor)?;
263    }
264    encrypt_mpq_block(&mut buf, HASH_TABLE_KEY);
265
266    writer.write_all(&buf)?;
267
268    Ok(hashtable_pos)
269}
270
271fn write_blocktable<W>(
272    mut writer: W,
273    added_files: &IndexMap<FileKey, FileRecord>,
274) -> Result<u64, IoError>
275where
276    W: Write + Seek,
277{
278    let blocktable_pos = writer.seek(SeekFrom::Current(0))?;
279
280    let mut buf = vec![0u8; added_files.len() * BLOCK_TABLE_ENTRY_SIZE as usize];
281
282    let mut cursor = buf.as_mut_slice();
283    for file in added_files.values() {
284        let flags = file.options.flags();
285
286        let block_entry = BlockEntry::new(
287            file.offset,
288            file.compressed_size,
289            file.contents.len() as u64,
290            flags,
291        );
292
293        block_entry.write(&mut cursor)?;
294    }
295
296    encrypt_mpq_block(&mut buf, BLOCK_TABLE_KEY);
297    writer.write_all(&buf)?;
298
299    Ok(blocktable_pos)
300}
301
302fn write_header<W>(
303    mut writer: W,
304    (archive_start, archive_end): (u64, u64),
305    (hashtable_pos, hashtable_size): (u64, usize),
306    (blocktable_pos, blocktable_size): (u64, usize),
307    sector_size: u64,
308) -> Result<(), IoError>
309where
310    W: Write + Seek,
311{
312    let header = FileHeader::new_v1(
313        (archive_end - archive_start) as u32,
314        sector_size as u32,
315        (hashtable_pos - archive_start) as u32,
316        (blocktable_pos - archive_start) as u32,
317        hashtable_size as u32,
318        blocktable_size as u32,
319    );
320
321    writer.seek(SeekFrom::Start(archive_start))?;
322    header.write(&mut writer)?;
323
324    Ok(())
325}
326
327/// Writes out the specified file starting at the writer's current position.
328/// If the file is marked for compression, a Sector Offset Table (SOT) will be written, and all sectors will attempt compression.
329/// If the file is not marked for compression, no SOT will be written.
330/// If the file is marked for encryption, it will also be encrypted after compression.
331fn write_file<W>(
332    sector_size: u64,
333    archive_start: u64,
334    mut writer: W,
335    file: &mut FileRecord,
336) -> Result<(), IoError>
337where
338    W: Write + Seek,
339{
340    let options = file.options;
341    let sector_count = sector_count_from_size(file.contents.len() as u64, sector_size);
342    let file_start = writer.seek(SeekFrom::Current(0))?;
343
344    // calculate the encryption key if encryption was requested
345    let encryption_key = if options.encrypt {
346        Some(calculate_file_key(
347            &file.file_name,
348            (file_start - archive_start) as u32,
349            file.contents.len() as u32,
350            options.adjust_key,
351        ))
352    } else {
353        None
354    };
355
356    if options.compress {
357        let mut offsets: Vec<u32> = Vec::new();
358
359        // store the start of the first sector and prepare to write there
360        let first_sector_start = ((sector_count + 1) * 4) as u32;
361        writer.seek(SeekFrom::Current(i64::from(first_sector_start)))?;
362        offsets.push(first_sector_start);
363        // write each sector and the offset of its end
364        for i in 0..sector_count {
365            let sector_start = i * sector_size;
366            let sector_end = min((i + 1) * sector_size, file.contents.len() as u64);
367            let data = &file.contents[sector_start as usize..sector_end as usize];
368
369            let mut compressed = compress_mpq_block(data);
370
371            // encrypt the block if encryption was requested
372            if let Some(key) = encryption_key.map(|k| k + i as u32) {
373                encrypt_mpq_block(compressed.to_mut(), key);
374            }
375
376            writer.write_all(&compressed)?;
377
378            // store the end of the current sector
379            // which is also the start of the next sector if there is one
380
381            let current_offset = writer.seek(SeekFrom::Current(0))?;
382            offsets.push((current_offset - file_start) as u32);
383        }
384
385        let file_end = writer.seek(SeekFrom::Current(0))?;
386
387        // write the sector offset table
388        {
389            let mut buf = vec![0u8; offsets.len() * 4];
390            let mut cursor = buf.as_mut_slice();
391            for offset in &offsets {
392                cursor.write_u32::<LE>(*offset)?;
393            }
394
395            // encrypt the SOT if requested
396            if let Some(key) = encryption_key.map(|k| k - 1) {
397                encrypt_mpq_block(&mut buf, key);
398            }
399
400            writer.seek(SeekFrom::Start(file_start))?;
401            writer.write_all(&buf)?;
402        }
403
404        // put the writer at the file end, so that we don't overwrite this file with subsequent writes
405        writer.seek(SeekFrom::Start(file_end))?;
406
407        file.offset = file_start - archive_start;
408        file.compressed_size = file_end - file_start;
409
410        Ok(())
411    } else {
412        // write each sector
413        for i in 0..sector_count {
414            let sector_start = i * sector_size;
415            let sector_end = min((i + 1) * sector_size, file.contents.len() as u64);
416            let data = &file.contents[sector_start as usize..sector_end as usize];
417            let mut buf = Cow::Borrowed(data);
418
419            // encrypt the block if encryption was requested
420            if let Some(key) = encryption_key.map(|k| k + i as u32) {
421                encrypt_mpq_block(buf.to_mut(), key);
422            }
423
424            writer.write_all(&buf)?;
425        }
426
427        let file_end = writer.seek(SeekFrom::Current(0))?;
428
429        file.offset = file_start - archive_start;
430        file.compressed_size = file_end - file_start;
431
432        Ok(())
433    }
434}