libmwemu 0.24.4

x86 32/64bits and system internals emulator, for securely emulating malware and other stuff.
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
use crate::emu::Emu;
use crate::loaders::pe::pe32::PE32;
use crate::loaders::pe::pe64::PE64;
use crate::maps::mem64::Permission;
use crate::windows::constants;
use crate::windows::peb::{peb32, peb64};

macro_rules! align_up {
    ($size:expr, $align:expr) => {{
        // Ensure alignment is a power of two at compile time if possible
        ($size + $align - 1) & !($align - 1)
    }};
}

impl Emu {
    /// Prefer PE `ImageBase` when it is in canonical user space and does not overlap existing maps;
    /// otherwise fall back to `lib64_alloc` in `LIBS64_*`.
    fn pick_pe64_dll_base(&mut self, pe64: &PE64) -> u64 {
        const USER_MAX: u64 = 0x7FFF_FFFF_FFFF;
        let ib = pe64.opt.image_base;
        let span = (pe64.opt.size_of_image as u64).max(pe64.size());
        if ib < 0x10000 {
            return self.maps.lib64_alloc(pe64.size()).expect("out of memory");
        }
        let Some(end) = ib.checked_add(span) else {
            return self.maps.lib64_alloc(pe64.size()).expect("out of memory");
        };
        if end > USER_MAX || self.maps.overlaps(ib, span) {
            return self.maps.lib64_alloc(pe64.size()).expect("out of memory");
        }
        ib
    }

    /// Complex funtion called from many places and with multiple purposes.
    /// This is called from load_code() if sample is PE32, but also from load_library etc.
    /// cyclic stuff: [load_pe] -> [iat-binding]  ->  [load_library] -> [load_pe]
    /// Powered by pe32.rs implementation.
    pub fn load_pe32(&mut self, filename: &str, set_entry: bool, force_base: u32) -> (u32, u32) {
        let is_maps = filename.contains("windows/x86/") ;
        let map_name = self.filename_to_mapname(filename);
        let filename2 = map_name;
        let mut pe32 = PE32::load(filename);
        let base: u32;

        log::trace!("loading pe32 {}", filename);

        /* .rsrc extraction tests
        if set_entry {
            log::trace!("get_resource_by_id");
            pe32.get_resource(Some(3), Some(0), None, None);
        }*/

        // 1. base logic

        // base is forced by libmwemu
        if force_base > 0 {
            if self.maps.overlaps(force_base as u64, pe32.size() as u64) {
                panic!("the forced base address overlaps");
            } else {
                base = force_base;
            }

        // base is setted by user
        } else if !is_maps
            && self.cfg.code_base_addr != constants::CFG_DEFAULT_BASE
            && !self.cfg.emulate_winapi
        {
            base = self.cfg.code_base_addr as u32;
            if self.maps.overlaps(base as u64, pe32.size() as u64) {
                panic!("the setted base address overlaps");
            }

        // base is setted by image base (if overlapps, alloc)
        } else {
            // user's program
            if set_entry {
                if pe32.opt.image_base >= constants::LIBS32_MIN as u32
                    || self
                        .maps
                        .overlaps(pe32.opt.image_base as u64, pe32.mem_size() as u64)
                {
                    base = self
                        .maps
                        .alloc(pe32.mem_size() as u64 + 0xff)
                        .expect("out of memory") as u32;
                } else {
                    base = pe32.opt.image_base;
                }

            // system library
            } else {
                base = self
                    .maps
                    .lib32_alloc(pe32.mem_size() as u64)
                    .expect("out of memory") as u32;
            }
        }

        if set_entry || self.cfg.emulate_winapi {
            // 2. pe binding
            if !is_maps || self.cfg.emulate_winapi {
                pe32.iat_binding(self, base);
                pe32.delay_load_binding(self, base);
                self.base = base as u64;
            }

            // 3. entry point logic
            if self.cfg.entry_point == constants::CFG_DEFAULT_BASE {
                self.regs_mut().rip = base as u64 + pe32.opt.address_of_entry_point as u64;
                log::trace!("entry point at 0x{:x}", self.regs().rip);
            } else {
                self.regs_mut().rip = self.cfg.entry_point;
                log::trace!(
                    "entry point at 0x{:x} but forcing it at 0x{:x}",
                    base as u64 + pe32.opt.address_of_entry_point as u64,
                    self.regs().rip
                );
            }

            log::trace!("base: 0x{:x}", base);
        }

        let sec_allign = pe32.opt.section_alignment;
        // 4. map pe and then sections
        let pemap = self
            .maps
            .create_map(
                &format!("{}.pe", filename2),
                base.into(),
                align_up!(pe32.opt.size_of_headers, sec_allign) as u64,
                Permission::READ_WRITE,
            )
            .expect("cannot create pe map");
        pemap.memcpy(pe32.get_headers(), pe32.opt.size_of_headers as usize);

        for i in 0..pe32.num_of_sections() {
            let ptr = pe32.get_section_ptr(i);
            let sect = pe32.get_section(i);
            let charactis = sect.characteristics;
            let is_exec = charactis & 0x20000000 != 0x0;
            let is_read = charactis & 0x40000000 != 0x0;
            let is_write = charactis & 0x80000000 != 0x0;
            let permission = Permission::from_flags(is_read, is_write, is_exec);

            let sz: u64 = if sect.virtual_size > sect.size_of_raw_data {
                sect.virtual_size as u64
            } else {
                sect.size_of_raw_data as u64
            };

            if sz == 0 {
                log::trace!("size of section {} is 0", sect.get_name());
                continue;
            }

            let mut sect_name = sect
                .get_name()
                .replace(" ", "")
                .replace("\t", "")
                .replace("\x0a", "")
                .replace("\x0d", "");

            if sect_name.is_empty() {
                sect_name = format!("{:x}", sect.virtual_address);
            }

            let map = match self.maps.create_map(
                &format!("{}{}", filename2, sect_name),
                base as u64 + sect.virtual_address as u64,
                align_up!(sz, sec_allign as u64),
                permission,
            ) {
                Ok(m) => m,
                Err(_e) => {
                    log::trace!(
                        "weird pe, skipping section {} {} because overlaps",
                        filename2,
                        sect.get_name()
                    );
                    continue;
                }
            };

            if ptr.len() > sz as usize {
                panic!(
                    "overflow {} {} {} {}",
                    filename2,
                    sect.get_name(),
                    ptr.len(),
                    sz
                );
            }
            if !ptr.is_empty() {
                map.memcpy(ptr, ptr.len());
            }
        }

        // 5. ldr table entry creation and link
        if set_entry {
            let _space_addr =
                peb32::create_ldr_entry(self, base, self.regs().rip as u32, &filename2, 0, 0x2c1950);
            let exe_name = self.cfg.exe_name.clone();
            peb32::update_ldr_entry_base(&exe_name, base as u64, self);
        }

        // 6. return values
        let pe_hdr_off = pe32.dos.e_lfanew;
        self.pe32 = Some(pe32);
        (base, pe_hdr_off)
    }

    pub fn map_dll_pe64(&mut self, filename: &str) -> (u64, PE64) {
        let map_name = self.filename_to_mapname(filename);
        let mut pe64 = PE64::load(&filename.to_lowercase());

        let base = self.pick_pe64_dll_base(&pe64);

        let sec_allign = pe64.opt.section_alignment;

        let pemap = match self.maps.create_map(
            &format!("{}.pe", map_name),
            base,
            align_up!(pe64.opt.size_of_headers, sec_allign) as u64,
            Permission::READ_WRITE,
        ) {
            Ok(m) => m,
            Err(e) => {
                panic!("cannot create pe64 map: {}", e);
            }
        };
        pemap.memcpy(pe64.get_headers(), pe64.opt.size_of_headers as usize);
        for i in 0..pe64.num_of_sections() {
            let ptr = pe64.get_section_ptr(i);
            let sect = pe64.get_section(i);
            let charistic = sect.characteristics;
            let is_exec = charistic & 0x20000000 != 0x0;
            let is_read = charistic & 0x40000000 != 0x0;
            let is_write = charistic & 0x80000000 != 0x0;
            let permission = Permission::from_flags(is_read, is_write, is_exec);

            let map_sz: u64 = if sect.virtual_size > 0 {
                sect.virtual_size as u64
            } else {
                sect.size_of_raw_data as u64
            };

            if map_sz == 0 {
                log::trace!("size of section {} is 0", sect.get_name());
                continue;
            }

            let mut sect_name = sect
                .get_name()
                .replace(" ", "")
                .replace("\t", "")
                .replace("\x0a", "")
                .replace("\x0d", "");

            if sect_name.is_empty() {
                sect_name = format!("{:x}", sect.virtual_address);
            }

            let map = match self.maps.create_map(
                &format!("{}{}", map_name, sect_name),
                base + sect.virtual_address as u64,
                align_up!(map_sz, sec_allign as u64),
                permission,
            ) {
                Ok(m) => m,
                Err(_e) => {
                    log::trace!(
                        "weird pe, skipping section because overlaps {} {}",
                        map_name,
                        sect.get_name()
                    );
                    continue;
                }
            };

            let copy_len = (sect.size_of_raw_data as usize).min(map_sz as usize).min(ptr.len());
            if copy_len > 0 {
                map.memcpy(&ptr[..copy_len], copy_len);
            }
        }

        pe64.apply_relocations(self, base);

        (base, pe64)
    }

    /// Complex funtion called from many places and with multiple purposes.
    /// This is called from load_code() if sample is PE64, but also from load_library etc.
    /// cyclic stuff: [load_pe] -> [iat-binding]  ->  [load_library] -> [load_pe]
    /// Powered by pe64.rs implementation.
    pub fn load_pe64(&mut self, filename: &str, set_entry: bool, force_base: u64) -> (u64, u32) {
        let is_maps = filename.contains("windows/x86_64/") || filename.contains("windows/aarch64/") ;
        let map_name = self.filename_to_mapname(filename);
        let filename2 = map_name;
        let mut pe64 = PE64::load(filename);
        let base: u64;

        // 1. base logic

        // base is setted by libmwemu
        if force_base > 0 {
            if self.maps.overlaps(force_base, pe64.size()) {
                panic!("the forced base address overlaps");
            } else {
                base = force_base;
            }

        // base is setted by user
        } else if !is_maps && self.cfg.code_base_addr != constants::CFG_DEFAULT_BASE {
            base = self.cfg.code_base_addr;
            if self.maps.overlaps(base, pe64.size()) {
                panic!("the setted base address overlaps");
            }

        // base is setted by image base (if overlapps, alloc)
        } else {
            // user's program
            if set_entry {
                if pe64.opt.image_base >= constants::LIBS64_MIN {
                    base = self.maps.alloc(pe64.size() + 0xff).expect("out of memory");
                } else if self.maps.overlaps(pe64.opt.image_base, pe64.size()) {
                    base = self.maps.alloc(pe64.size() + 0xff).expect("out of memory");
                } else {
                    base = pe64.opt.image_base;
                }

            // system library
            } else {
                base = self.pick_pe64_dll_base(&pe64);
            }
        }

        // Only the main image owns `self.base` and the initial RIP. System DLLs loaded
        // via `load_library` (e.g. NtMapViewOfSection's KnownDll path under --ssdt) must
        // never clobber these — otherwise the post-LdrInitializeThunk IAT binding in
        // loaders.rs reads the DLL's base instead of the EXE's, and the EXE's IAT stays
        // unbound (call rax → rax=0 → crash).
        if set_entry {
            self.base = base;

            // 2. entry point logic (relocs + IAT run after PE maps exist; see step 4b below)
            if self.cfg.entry_point == constants::CFG_DEFAULT_BASE {
                self.set_pc(base + pe64.opt.address_of_entry_point as u64);
                log::trace!("entry point at 0x{:x}", self.pc());
            } else {
                self.set_pc(self.cfg.entry_point);
                log::trace!(
                    "entry point at 0x{:x} but forcing it at 0x{:x} by -a flag",
                    base + pe64.opt.address_of_entry_point as u64,
                    self.pc()
                );
            }
            log::trace!("base: 0x{:x}", base);
        }

        let sec_allign = pe64.opt.section_alignment;
        // 4. map pe and then sections
        let pemap = match self.maps.create_map(
            &format!("{}.pe", filename2),
            base,
            align_up!(pe64.opt.size_of_headers, sec_allign) as u64,
            Permission::READ_WRITE,
        ) {
            Ok(m) => m,
            Err(e) => {
                panic!("cannot create pe64 map: {}", e);
            }
        };
        pemap.memcpy(pe64.get_headers(), pe64.opt.size_of_headers as usize);

        for i in 0..pe64.num_of_sections() {
            let ptr = pe64.get_section_ptr(i);
            let sect = pe64.get_section(i);

            let charistic = sect.characteristics;
            let is_exec = charistic & 0x20000000 != 0x0;
            let is_read = charistic & 0x40000000 != 0x0;
            let is_write = charistic & 0x80000000 != 0x0;
            let permission = Permission::from_flags(is_read, is_write, is_exec);

            // Virtual size determines how much address space the section occupies.
            // Raw size is the on-disk data size and may exceed virtual size for
            // packed/overlay sections — using raw size would create an oversized map
            // that overlaps subsequent sections.
            let map_sz: u64 = if sect.virtual_size > 0 {
                sect.virtual_size as u64
            } else {
                sect.size_of_raw_data as u64
            };

            if map_sz == 0 {
                log::trace!("size of section {} is 0", sect.get_name());
                continue;
            }

            let mut sect_name = sect
                .get_name()
                .replace(" ", "")
                .replace("\t", "")
                .replace("\x0a", "")
                .replace("\x0d", "");

            if sect_name.is_empty() {
                sect_name = format!("{:x}", sect.virtual_address);
            }

            let map = match self.maps.create_map(
                &format!("{}{}", filename2, sect_name),
                base + sect.virtual_address as u64,
                align_up!(map_sz, sec_allign as u64),
                permission,
            ) {
                Ok(m) => m,
                Err(_e) => {
                    log::trace!(
                        "weird pe, skipping section because overlaps {} {}",
                        filename2,
                        sect.get_name()
                    );
                    continue;
                }
            };

            // Copy only as many bytes as fit in the virtual mapping.
            let copy_len = (sect.size_of_raw_data as usize).min(map_sz as usize).min(ptr.len());

            if copy_len > 0 {
                map.memcpy(&ptr[..copy_len], copy_len);
            }
        }

        // 4b. Base relocs on the mapped image (all load paths, including DLL without emulate_winapi).
        pe64.apply_relocations(self, base);

        if set_entry || self.cfg.emulate_winapi {
            if !is_maps || self.cfg.emulate_winapi {
                // In SSDT + LdrInitializeThunk bootstrap mode, skip eager IAT binding for the main image.
                if !(set_entry && self.cfg.emulate_winapi && self.cfg.emulate_winapi) {
                    pe64.iat_binding(self, base);
                    pe64.delay_load_binding(self, base);
                }
            }
        }

        // 5. ldr table entry creation and link
        if set_entry {
            if !(self.cfg.emulate_winapi && self.cfg.emulate_winapi) {
                let _space_addr =
                    peb64::create_ldr_entry(self, base, self.pc(), &filename2, 0, 0x2c1950);
                let exe_name = self.cfg.exe_name.clone();
                peb64::update_ldr_entry_base(&exe_name, base, self);
            }
            if self.cfg.emulate_winapi && self.cfg.emulate_winapi {
                peb64::update_peb_image_base(self, base);
            }
        }

        // 6. return values
        let pe_hdr_off = pe64.dos.e_lfanew;
        self.pe64 = Some(pe64);
        (base, pe_hdr_off)
    }
}