1use core::slice;
4use std::cell::RefCell;
5use std::cmp::min;
6use std::collections::HashMap;
7use std::fmt::Debug;
8use std::fs::File;
9use std::ops::Range;
10use std::path::Path;
11use std::{io, mem};
12
13use crate::bits::Bits;
14use crate::error::{PxeNotPresent, Result};
15use crate::gxa::Gxa;
16use crate::map::{MappedFileReader, Reader};
17use crate::structs::{
18 BmpHeader64, Context, DUMP_HEADER64_EXPECTED_SIGNATURE, DUMP_HEADER64_EXPECTED_VALID_DUMP,
19 DumpType, ExceptionRecord64, FullRdmpHeader64, Header64, KdDebuggerData64, KernelRdmpHeader64,
20 LdrDataTableEntry, ListEntry, PageKind, PfnRange, PhysmemDesc, PhysmemMap, PhysmemRun,
21 UnicodeString, read_struct,
22};
23use crate::{AddrTranslationError, Gpa, Gva, KdmpParserError, Pfn, Pxe};
24
25#[derive(Debug)]
32pub struct VirtTranslationDetails {
33 pub pfn: Pfn,
35 pub offset: u64,
37 pub page_kind: PageKind,
39 pub writable: bool,
41 pub executable: bool,
43 pub user_accessible: bool,
45}
46
47impl VirtTranslationDetails {
48 pub fn new(pxes: &[Pxe], gva: Gva) -> Self {
49 let writable = pxes.iter().all(Pxe::writable);
50 let executable = pxes.iter().all(Pxe::executable);
51 let user_accessible = pxes.iter().all(Pxe::user_accessible);
52 let pfn = pxes.last().map(|p| p.pfn).expect("at least one pxe");
53 let page_kind = match pxes.len() {
54 4 => PageKind::Normal,
55 3 => PageKind::Large,
56 2 => PageKind::Huge,
57 _ => unreachable!("pxes len should be between 2 and 4"),
58 };
59 let offset = page_kind.page_offset(gva.u64());
60
61 Self {
62 pfn,
63 offset,
64 page_kind,
65 writable,
66 executable,
67 user_accessible,
68 }
69 }
70
71 pub fn gpa(&self) -> Gpa {
72 self.pfn.gpa_with_offset(self.offset)
73 }
74}
75
76fn gpa_from_bitmap(bitmap_idx: u64, bit_idx: usize) -> Option<Gpa> {
77 let pfn = Pfn::new(
78 bitmap_idx
79 .checked_mul(8)?
80 .checked_add(bit_idx.try_into().ok()?)?,
81 );
82
83 Some(pfn.gpa())
84}
85
86fn gpa_from_pfn_range(pfn_range: &PfnRange, page_idx: u64) -> Option<Gpa> {
87 let offset = page_idx.checked_mul(PageKind::Normal.size())?;
88
89 Some(Pfn::new(pfn_range.page_file_number).gpa_with_offset(offset))
90}
91
92trait PtrSize: Sized + Copy + Into<u64> + From<u32> {
95 fn checked_add(self, rhs: Self) -> Option<Self>;
96}
97
98macro_rules! impl_checked_add {
99 ($($ty:ident),*) => {
100 $(impl PtrSize for $ty {
101 fn checked_add(self, rhs: $ty) -> Option<Self> {
102 $ty::checked_add(self, rhs)
103 }
104 })*
105 };
106}
107
108impl_checked_add!(u32, u64);
109
110fn try_read_module_map<P>(parser: &mut KernelDumpParser, head: Gva) -> Result<Option<ModuleMap>>
113where
114 P: PtrSize,
115{
116 let mut modules = ModuleMap::new();
117 let Some(entry) = parser.try_virt_read_struct::<ListEntry<P>>(head)? else {
118 return Ok(None);
119 };
120
121 let mut entry_addr = Gva::new(entry.flink.into());
122 while entry_addr != head {
124 let Some(data) = parser.try_virt_read_struct::<LdrDataTableEntry<P>>(entry_addr)? else {
126 return Ok(None);
127 };
128
129 let Some(dll_name) = parser
132 .try_virt_read_unicode_string::<P>(&data.full_dll_name)
133 .and_then(|s| {
134 if s.is_none() {
135 parser.try_virt_read_unicode_string::<P>(&data.base_dll_name)
137 } else {
138 Ok(s)
139 }
140 })?
141 else {
142 return Ok(None);
143 };
144
145 let dll_end_addr = data
147 .dll_base
148 .checked_add(data.size_of_image.into())
149 .ok_or(KdmpParserError::Overflow("module address"))?;
150 let at = Gva::new(data.dll_base.into())..Gva::new(dll_end_addr.into());
151 let inserted = modules.insert(at, dll_name);
152 debug_assert!(inserted.is_none());
153
154 entry_addr = Gva::new(data.in_load_order_links.flink.into());
156 }
157
158 Ok(Some(modules))
159}
160
161fn try_extract_kernel_modules(parser: &mut KernelDumpParser) -> Result<Option<ModuleMap>> {
163 try_read_module_map::<u64>(parser, parser.headers().ps_loaded_module_list.into())
165}
166
167fn try_find_prcb(
170 parser: &mut KernelDumpParser,
171 kd_debugger_data_block: &KdDebuggerData64,
172) -> Result<Option<Gva>> {
173 let mut processor_block = kd_debugger_data_block.ki_processor_block;
174 for _ in 0..parser.headers().number_processors {
175 let Some(kprcb_addr) = parser.try_virt_read_struct::<u64>(processor_block.into())? else {
177 return Ok(None);
178 };
179
180 let kprcb_context_addr = kprcb_addr
182 .checked_add(kd_debugger_data_block.offset_prcb_context.into())
183 .ok_or(KdmpParserError::Overflow("offset_prcb"))?;
184
185 let Some(kprcb_context_addr) =
187 parser.try_virt_read_struct::<u64>(kprcb_context_addr.into())?
188 else {
189 return Ok(None);
190 };
191
192 let Some(kprcb_context) =
194 parser.try_virt_read_struct::<Context>(kprcb_context_addr.into())?
195 else {
196 return Ok(None);
197 };
198
199 let kprcb_context = Box::new(kprcb_context);
201 if kprcb_context.rsp == parser.context_record().rsp {
202 return Ok(Some(kprcb_addr.into()));
205 }
206
207 processor_block = processor_block
209 .checked_add(mem::size_of::<u64>() as _)
210 .ok_or(KdmpParserError::Overflow("kprcb ptr"))?;
211 }
212
213 Ok(None)
214}
215
216fn try_extract_user_modules(
219 parser: &mut KernelDumpParser,
220 kd_debugger_data_block: &KdDebuggerData64,
221 prcb_addr: Gva,
222) -> Result<Option<ModuleMap>> {
223 let kthread_addr = prcb_addr
225 .u64()
226 .checked_add(kd_debugger_data_block.offset_prcb_current_thread.into())
227 .ok_or(KdmpParserError::Overflow("offset prcb current thread"))?;
228 let Some(kthread_addr) = parser.try_virt_read_struct::<u64>(kthread_addr.into())? else {
229 return Ok(None);
230 };
231
232 let teb_addr = kthread_addr
234 .checked_add(kd_debugger_data_block.offset_kthread_teb.into())
235 .ok_or(KdmpParserError::Overflow("offset kthread teb"))?;
236 let Some(teb_addr) = parser.try_virt_read_struct::<u64>(teb_addr.into())? else {
237 return Ok(None);
238 };
239
240 if teb_addr == 0 {
241 return Ok(None);
242 }
243
244 let peb_offset = 0x60;
251 let peb_addr = teb_addr
252 .checked_add(peb_offset)
253 .ok_or(KdmpParserError::Overflow("peb offset"))?;
254 let Some(peb_addr) = parser.try_virt_read_struct::<u64>(peb_addr.into())? else {
255 return Ok(None);
256 };
257
258 let ldr_offset = 0x18;
264 let peb_ldr_addr = peb_addr
265 .checked_add(ldr_offset)
266 .ok_or(KdmpParserError::Overflow("ldr offset"))?;
267 let Some(peb_ldr_addr) = parser.try_virt_read_struct::<u64>(peb_ldr_addr.into())? else {
268 return Ok(None);
269 };
270
271 let in_load_order_module_list_offset = 0x10;
277 let module_list_entry_addr = peb_ldr_addr
278 .checked_add(in_load_order_module_list_offset)
279 .ok_or(KdmpParserError::Overflow(
280 "in load order module list offset",
281 ))?;
282
283 let Some(mut modules) = try_read_module_map::<u64>(parser, module_list_entry_addr.into())?
285 else {
286 return Ok(None);
287 };
288
289 let teb32_offset = 0x2_000;
298 let teb32_addr = teb_addr
299 .checked_add(teb32_offset)
300 .ok_or(KdmpParserError::Overflow("teb32 offset"))?;
301 let peb32_offset = 0x30;
302 let peb32_addr = teb32_addr
303 .checked_add(peb32_offset)
304 .ok_or(KdmpParserError::Overflow("peb32 offset"))?;
305 let Some(peb32_addr) = parser.try_virt_read_struct::<u32>(peb32_addr.into())? else {
306 return Ok(Some(modules));
307 };
308
309 let ldr_offset = 0x0c;
315 let peb32_ldr_addr = peb32_addr
316 .checked_add(ldr_offset)
317 .ok_or(KdmpParserError::Overflow("ldr32 offset"))?;
318 let Some(peb32_ldr_addr) =
319 parser.try_virt_read_struct::<u32>(Gva::new(peb32_ldr_addr.into()))?
320 else {
321 return Ok(Some(modules));
322 };
323
324 let in_load_order_module_list_offset = 0xc;
330 let module_list_entry_addr = peb32_ldr_addr
331 .checked_add(in_load_order_module_list_offset)
332 .ok_or(KdmpParserError::Overflow(
333 "in load order module list offset",
334 ))?;
335
336 let Some(modules32) =
338 try_read_module_map::<u32>(parser, Gva::new(module_list_entry_addr.into()))?
339 else {
340 return Ok(Some(modules));
341 };
342
343 modules.extend(modules32);
345
346 Ok(Some(modules))
348}
349
350fn filter_addr_translation_err<T>(res: Result<T>) -> Result<Option<T>> {
355 match res {
356 Ok(o) => Ok(Some(o)),
357 Err(KdmpParserError::AddrTranslation(..)) => Ok(None),
359 Err(e) => Err(e),
360 }
361}
362
363pub type ModuleMap = HashMap<Range<Gva>, String>;
366
367pub struct KernelDumpParser {
371 dump_type: DumpType,
373 context: Box<Context>,
375 headers: Box<Header64>,
377 physmem: PhysmemMap,
380 reader: RefCell<Box<dyn Reader>>,
383 kernel_modules: ModuleMap,
386 user_modules: ModuleMap,
389}
390
391impl Debug for KernelDumpParser {
392 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
393 f.debug_struct("KernelDumpParser")
394 .field("dump_type", &self.dump_type)
395 .finish()
396 }
397}
398
399impl KernelDumpParser {
400 pub fn with_reader(mut reader: impl Reader + 'static) -> Result<Self> {
403 let headers = Box::new(read_struct::<Header64>(&mut reader)?);
405 if headers.signature != DUMP_HEADER64_EXPECTED_SIGNATURE {
406 return Err(KdmpParserError::InvalidSignature(headers.signature));
407 }
408
409 if headers.valid_dump != DUMP_HEADER64_EXPECTED_VALID_DUMP {
410 return Err(KdmpParserError::InvalidValidDump(headers.valid_dump));
411 }
412
413 let dump_type = DumpType::try_from(headers.dump_type)?;
415
416 let physmem = Self::build_physmem(dump_type, &headers, &mut reader)?;
418
419 let context = Box::new(read_struct(&mut io::Cursor::new(
421 headers.context_record_buffer.as_slice(),
422 ))?);
423
424 let reader: RefCell<Box<dyn Reader>> = RefCell::new(Box::new(reader));
425 let mut parser = Self {
426 dump_type,
427 context,
428 headers,
429 physmem,
430 reader,
431 kernel_modules: Default::default(),
432 user_modules: Default::default(),
433 };
434
435 if let Some(kernel_modules) = try_extract_kernel_modules(&mut parser)? {
438 parser.kernel_modules.extend(kernel_modules);
439 }
440
441 let Some(kd_debugger_data_block) = parser.try_virt_read_struct::<KdDebuggerData64>(
446 parser.headers().kd_debugger_data_block.into(),
447 )?
448 else {
449 return Ok(parser);
450 };
451 let kd_debugger_data_block = Box::new(kd_debugger_data_block);
452
453 let Some(prcb_addr) = try_find_prcb(&mut parser, &kd_debugger_data_block)? else {
455 return Ok(parser);
456 };
457
458 let Some(user_modules) =
460 try_extract_user_modules(&mut parser, &kd_debugger_data_block, prcb_addr)?
461 else {
462 return Ok(parser);
463 };
464
465 parser.user_modules.extend(user_modules);
466
467 Ok(parser)
468 }
469
470 pub fn new(dump_path: impl AsRef<Path>) -> Result<Self> {
471 let size = dump_path.as_ref().metadata()?.len();
474 const FOUR_GIGS: u64 = 1_024 * 1_024 * 1_024 * 4;
475
476 match size {
477 0..=FOUR_GIGS => {
478 let mapped_file = MappedFileReader::new(dump_path.as_ref())?;
479
480 Self::with_reader(mapped_file)
481 }
482 _ => {
483 let file = File::open(dump_path)?;
484
485 Self::with_reader(file)
486 }
487 }
488 }
489
490 pub fn physmem(&self) -> impl ExactSizeIterator<Item = (Gpa, u64)> + '_ {
494 self.physmem.iter().map(|(&k, &v)| (k, v))
495 }
496
497 pub fn kernel_modules(&self) -> impl ExactSizeIterator<Item = (&Range<Gva>, &str)> + '_ {
499 self.kernel_modules.iter().map(|(k, v)| (k, v.as_str()))
500 }
501
502 pub fn user_modules(&self) -> impl ExactSizeIterator<Item = (&Range<Gva>, &str)> + '_ {
504 self.user_modules.iter().map(|(k, v)| (k, v.as_str()))
505 }
506
507 pub fn dump_type(&self) -> DumpType {
509 self.dump_type
510 }
511
512 pub fn headers(&self) -> &Header64 {
514 &self.headers
515 }
516
517 pub fn exception_record(&self) -> &ExceptionRecord64 {
519 &self.headers.exception
520 }
521
522 pub fn context_record(&self) -> &Context {
524 &self.context
525 }
526
527 pub fn phys_translate(&self, gpa: Gpa) -> Result<u64> {
530 let offset = *self
531 .physmem
532 .get(&gpa.page_align())
533 .ok_or(AddrTranslationError::Phys(gpa))?;
534
535 offset
536 .checked_add(gpa.offset())
537 .ok_or(KdmpParserError::Overflow("w/ gpa offset"))
538 }
539
540 pub fn phys_read(&self, gpa: Gpa, buf: &mut [u8]) -> Result<usize> {
542 let mut amount_left = buf.len();
544 let mut total_read = 0;
546 let mut addr = gpa;
548 while amount_left > 0 {
550 let phy_offset = self.phys_translate(addr)?;
552 self.seek(io::SeekFrom::Start(phy_offset))?;
554 let left_in_page = (PageKind::Normal.size() - gpa.offset()) as usize;
559 let amount_wanted = min(amount_left, left_in_page);
560 let slice = &mut buf[total_read..total_read + amount_wanted];
562 let amount_read = self.read(slice)?;
564 total_read += amount_read;
566 amount_left -= amount_read;
567 if amount_read != amount_wanted {
569 return Ok(total_read);
570 }
571
572 addr = addr.next_aligned_page();
574 }
575
576 Ok(total_read)
578 }
579
580 pub fn phys_read_exact(&self, gpa: Gpa, buf: &mut [u8]) -> Result<()> {
583 let len = self.phys_read(gpa, buf)?;
585
586 if len == buf.len() {
588 Ok(())
589 }
590 else {
592 Err(KdmpParserError::PartialPhysRead)
593 }
594 }
595
596 pub fn phys_read_struct<T>(&self, gpa: Gpa) -> Result<T> {
598 let mut t = mem::MaybeUninit::uninit();
599 let size_of_t = mem::size_of_val(&t);
600 let slice_over_t =
601 unsafe { slice::from_raw_parts_mut(t.as_mut_ptr() as *mut u8, size_of_t) };
602
603 self.phys_read_exact(gpa, slice_over_t)?;
604
605 Ok(unsafe { t.assume_init() })
606 }
607
608 pub fn virt_translate(&self, gva: Gva) -> Result<VirtTranslationDetails> {
610 self.virt_translate_with_dtb(gva, Gpa::new(self.headers.directory_table_base))
611 }
612
613 pub fn virt_translate_with_dtb(&self, gva: Gva, dtb: Gpa) -> Result<VirtTranslationDetails> {
616 let pml4_base = dtb.page_align();
618 let pml4e_gpa = Gpa::new(pml4_base.u64() + (gva.pml4e_idx() * 8));
619 let pml4e = Pxe::from(self.phys_read_struct::<u64>(pml4e_gpa)?);
620 if !pml4e.present() {
621 return Err(AddrTranslationError::Virt(gva, PxeNotPresent::Pml4e).into());
622 }
623
624 let pdpt_base = pml4e.pfn.gpa();
625 let pdpte_gpa = Gpa::new(pdpt_base.u64() + (gva.pdpe_idx() * 8));
626 let pdpte = Pxe::from(self.phys_read_struct::<u64>(pdpte_gpa)?);
627 if !pdpte.present() {
628 return Err(AddrTranslationError::Virt(gva, PxeNotPresent::Pdpte).into());
629 }
630
631 let pd_base = pdpte.pfn.gpa();
635 if pdpte.large_page() {
636 return Ok(VirtTranslationDetails::new(&[pml4e, pdpte], gva));
637 }
638
639 let pde_gpa = Gpa::new(pd_base.u64() + (gva.pde_idx() * 8));
640 let pde = Pxe::from(self.phys_read_struct::<u64>(pde_gpa)?);
641 if !pde.present() {
642 return Err(AddrTranslationError::Virt(gva, PxeNotPresent::Pde).into());
643 }
644
645 let pt_base = pde.pfn.gpa();
649 if pde.large_page() {
650 return Ok(VirtTranslationDetails::new(&[pml4e, pdpte, pde], gva));
651 }
652
653 let pte_gpa = Gpa::new(pt_base.u64() + (gva.pte_idx() * 8));
654 let pte = Pxe::from(self.phys_read_struct::<u64>(pte_gpa)?);
655 if !pte.present() {
656 if !pte.transition() {
659 return Err(AddrTranslationError::Virt(gva, PxeNotPresent::Pte).into());
660 }
661 }
662
663 Ok(VirtTranslationDetails::new(&[pml4e, pdpte, pde, pte], gva))
664 }
665
666 pub fn virt_read(&self, gva: Gva, buf: &mut [u8]) -> Result<usize> {
668 self.virt_read_with_dtb(gva, buf, Gpa::new(self.headers.directory_table_base))
669 }
670
671 pub fn virt_read_with_dtb(&self, gva: Gva, buf: &mut [u8], dtb: Gpa) -> Result<usize> {
674 let mut amount_left = buf.len();
676 let mut total_read = 0;
678 let mut addr = gva;
680 while amount_left > 0 {
682 let translation = match self.virt_translate_with_dtb(addr, dtb) {
685 Ok(tr) => tr,
686 Err(e) => {
687 if total_read > 0 {
688 return Ok(total_read);
690 }
691
692 return Err(e);
693 }
694 };
695
696 let left_in_page = (translation.page_kind.size() - translation.offset) as usize;
699 let amount_wanted = min(amount_left, left_in_page);
702 let slice = &mut buf[total_read..total_read + amount_wanted];
704
705 let amount_read = self.phys_read(translation.gpa(), slice)?;
707 total_read += amount_read;
709 amount_left -= amount_read;
710 if amount_read != amount_wanted {
712 return Ok(total_read);
713 }
714
715 addr = addr.next_aligned_page();
717 }
718
719 Ok(total_read)
721 }
722
723 pub fn try_virt_read(&self, gva: Gva, buf: &mut [u8]) -> Result<Option<usize>> {
727 filter_addr_translation_err(self.virt_read(gva, buf))
728 }
729
730 pub fn try_virt_read_with_dtb(
735 &self,
736 gva: Gva,
737 buf: &mut [u8],
738 dtb: Gpa,
739 ) -> Result<Option<usize>> {
740 filter_addr_translation_err(self.virt_read_with_dtb(gva, buf, dtb))
741 }
742
743 pub fn virt_read_exact(&self, gva: Gva, buf: &mut [u8]) -> Result<()> {
745 self.virt_read_exact_with_dtb(gva, buf, Gpa::new(self.headers.directory_table_base))
746 }
747
748 pub fn virt_read_exact_with_dtb(&self, gva: Gva, buf: &mut [u8], dtb: Gpa) -> Result<()> {
751 let len = self.virt_read_with_dtb(gva, buf, dtb)?;
753
754 if len == buf.len() {
756 Ok(())
757 }
758 else {
760 Err(KdmpParserError::PartialVirtRead)
761 }
762 }
763
764 pub fn try_virt_read_exact(&self, gva: Gva, buf: &mut [u8]) -> Result<Option<()>> {
768 self.try_virt_read_exact_with_dtb(gva, buf, Gpa::new(self.headers.directory_table_base))
769 }
770
771 pub fn try_virt_read_exact_with_dtb(
776 &self,
777 gva: Gva,
778 buf: &mut [u8],
779 dtb: Gpa,
780 ) -> Result<Option<()>> {
781 filter_addr_translation_err(self.virt_read_exact_with_dtb(gva, buf, dtb))
782 }
783
784 pub fn virt_read_struct<T>(&self, gva: Gva) -> Result<T> {
786 self.virt_read_struct_with_dtb(gva, Gpa::new(self.headers.directory_table_base))
787 }
788
789 pub fn virt_read_struct_with_dtb<T>(&self, gva: Gva, dtb: Gpa) -> Result<T> {
792 let mut t = mem::MaybeUninit::uninit();
793 let size_of_t = mem::size_of_val(&t);
794 let slice_over_t =
795 unsafe { slice::from_raw_parts_mut(t.as_mut_ptr() as *mut u8, size_of_t) };
796
797 self.virt_read_exact_with_dtb(gva, slice_over_t, dtb)?;
798
799 Ok(unsafe { t.assume_init() })
800 }
801
802 pub fn try_virt_read_struct<T>(&self, gva: Gva) -> Result<Option<T>> {
805 self.try_virt_read_struct_with_dtb::<T>(gva, Gpa::new(self.headers.directory_table_base))
806 }
807
808 pub fn try_virt_read_struct_with_dtb<T>(&self, gva: Gva, dtb: Gpa) -> Result<Option<T>> {
812 filter_addr_translation_err(self.virt_read_struct_with_dtb::<T>(gva, dtb))
813 }
814
815 pub fn seek(&self, pos: io::SeekFrom) -> Result<u64> {
816 Ok(self.reader.borrow_mut().seek(pos)?)
817 }
818
819 pub fn read(&self, buf: &mut [u8]) -> Result<usize> {
820 Ok(self.reader.borrow_mut().read(buf)?)
821 }
822
823 fn try_virt_read_unicode_string<P>(
825 &self,
826 unicode_str: &UnicodeString<P>,
827 ) -> Result<Option<String>>
828 where
829 P: PtrSize,
830 {
831 self.try_virt_read_unicode_string_with_dtb(
832 unicode_str,
833 Gpa::new(self.headers.directory_table_base),
834 )
835 }
836
837 fn try_virt_read_unicode_string_with_dtb<P>(
840 &self,
841 unicode_str: &UnicodeString<P>,
842 dtb: Gpa,
843 ) -> Result<Option<String>>
844 where
845 P: PtrSize,
846 {
847 if (unicode_str.length % 2) != 0 {
848 return Err(KdmpParserError::InvalidUnicodeString);
849 }
850
851 let mut buffer = vec![0; unicode_str.length.into()];
852 match self.virt_read_exact_with_dtb(Gva::new(unicode_str.buffer.into()), &mut buffer, dtb) {
853 Ok(_) => {}
854 Err(KdmpParserError::AddrTranslation(_)) => return Ok(None),
856 Err(e) => return Err(e),
857 };
858
859 let n = unicode_str.length / 2;
860
861 Ok(Some(String::from_utf16(unsafe {
862 slice::from_raw_parts(buffer.as_ptr().cast(), n.into())
863 })?))
864 }
865
866 fn full_physmem(headers: &Header64, reader: &mut impl Reader) -> Result<PhysmemMap> {
884 let mut page_offset = reader.stream_position()?;
885 let mut run_cursor = io::Cursor::new(headers.physical_memory_block_buffer);
886 let physmem_desc = read_struct::<PhysmemDesc>(&mut run_cursor)?;
887 let mut physmem = PhysmemMap::new();
888
889 for run_idx in 0..physmem_desc.number_of_runs {
890 let run = read_struct::<PhysmemRun>(&mut run_cursor)?;
891 for page_idx in 0..run.page_count {
892 let phys_addr = run
894 .phys_addr(page_idx)
895 .ok_or(KdmpParserError::PhysAddrOverflow(run_idx, page_idx))?;
896
897 if physmem.insert(phys_addr, page_offset).is_some() {
899 return Err(KdmpParserError::DuplicateGpa(phys_addr));
900 }
901
902 page_offset = page_offset
904 .checked_add(PageKind::Normal.size())
905 .ok_or(KdmpParserError::PageOffsetOverflow(run_idx, page_idx))?;
906 }
907 }
908
909 Ok(physmem)
910 }
911
912 fn bmp_physmem(reader: &mut impl Reader) -> Result<PhysmemMap> {
914 let bmp_header = read_struct::<BmpHeader64>(reader)?;
915 if !bmp_header.looks_good() {
916 return Err(KdmpParserError::InvalidData(
917 "bmp header doesn't look right",
918 ));
919 }
920
921 let remaining_bits = bmp_header.pages % 8;
922 let bitmap_size = bmp_header.pages.next_multiple_of(8) / 8;
923 let mut page_offset = bmp_header.first_page;
924 let mut physmem = PhysmemMap::new();
925
926 for bitmap_idx in 0..bitmap_size {
928 let mut byte = [0u8];
929 reader.read_exact(&mut byte)?;
930 let last_byte = bitmap_idx == bitmap_size - 1;
932 if last_byte && remaining_bits != 0 {
933 let mask = (1u8 << remaining_bits).wrapping_sub(1);
935 byte[0] &= mask;
936 }
937
938 let byte = byte[0];
939 for bit_idx in 0..8 {
941 if byte.bit(bit_idx) == 0 {
943 continue;
944 }
945
946 let pa = gpa_from_bitmap(bitmap_idx, bit_idx)
948 .ok_or(KdmpParserError::Overflow("pfn in bitmap"))?;
949
950 let insert = physmem.insert(pa, page_offset);
951 debug_assert!(insert.is_none());
952 page_offset = page_offset.checked_add(PageKind::Normal.size()).ok_or(
953 KdmpParserError::BitmapPageOffsetOverflow(bitmap_idx, bit_idx),
954 )?;
955 }
956 }
957
958 Ok(physmem)
959 }
960
961 fn kernel_physmem(dump_type: DumpType, reader: &mut impl Reader) -> Result<PhysmemMap> {
964 use DumpType as D;
965 let mut page_count = 0u64;
966 let (mut page_offset, metadata_size, total_number_of_pages) = match dump_type {
967 D::KernelMemory | D::KernelAndUserMemory => {
968 let kernel_hdr = read_struct::<KernelRdmpHeader64>(reader)?;
969 if !kernel_hdr.hdr.looks_good() {
970 return Err(KdmpParserError::InvalidData(
971 "RdmpHeader64 doesn't look right",
972 ));
973 }
974
975 (
976 kernel_hdr.hdr.first_page_offset,
977 kernel_hdr.hdr.metadata_size,
978 0,
979 )
980 }
981 D::CompleteMemory => {
982 let full_hdr = read_struct::<FullRdmpHeader64>(reader)?;
983 if !full_hdr.hdr.looks_good() {
984 return Err(KdmpParserError::InvalidData(
985 "FullRdmpHeader64 doesn't look right",
986 ));
987 }
988
989 (
990 full_hdr.hdr.first_page_offset,
991 full_hdr.hdr.metadata_size,
992 full_hdr.total_number_of_pages,
993 )
994 }
995 _ => unreachable!(),
996 };
997
998 if page_offset == 0 || metadata_size == 0 {
999 return Err(KdmpParserError::InvalidData(
1000 "no first page or metadata size",
1001 ));
1002 }
1003
1004 let pfn_range_size = mem::size_of::<PfnRange>();
1005 if (metadata_size % pfn_range_size as u64) != 0 {
1006 return Err(KdmpParserError::InvalidData(
1007 "metadata size is not a multiple of 8",
1008 ));
1009 }
1010
1011 let number_pfns = metadata_size / pfn_range_size as u64;
1012 let mut physmem = PhysmemMap::new();
1013
1014 for _ in 0..number_pfns {
1015 if dump_type == D::CompleteMemory {
1016 if page_count == total_number_of_pages {
1019 break;
1020 }
1021
1022 if page_count > total_number_of_pages {
1023 return Err(KdmpParserError::InvalidData(
1024 "page_count > total_number_of_pages",
1025 ));
1026 }
1027 }
1028
1029 let pfn_range = read_struct::<PfnRange>(reader)?;
1030 if pfn_range.page_file_number == 0 {
1031 break;
1032 }
1033
1034 for page_idx in 0..pfn_range.number_of_pages {
1035 let gpa = gpa_from_pfn_range(&pfn_range, page_idx)
1036 .ok_or(KdmpParserError::Overflow("w/ pfn_range"))?;
1037 let insert = physmem.insert(gpa, page_offset);
1038 debug_assert!(insert.is_none());
1039 page_offset = page_offset
1040 .checked_add(PageKind::Normal.size())
1041 .ok_or(KdmpParserError::Overflow("w/ page_offset"))?;
1042 }
1043
1044 page_count = page_count
1045 .checked_add(pfn_range.number_of_pages)
1046 .ok_or(KdmpParserError::Overflow("w/ page_count"))?;
1047 }
1048
1049 Ok(physmem)
1050 }
1051
1052 fn build_physmem(
1053 dump_type: DumpType,
1054 headers: &Header64,
1055 reader: &mut impl Reader,
1056 ) -> Result<PhysmemMap> {
1057 use DumpType as D;
1058 match dump_type {
1059 D::Full => Self::full_physmem(headers, reader),
1060 D::Bmp | D::LiveKernelMemory => Self::bmp_physmem(reader),
1061 D::KernelMemory | D::KernelAndUserMemory | D::CompleteMemory => {
1062 Self::kernel_physmem(dump_type, reader)
1063 }
1064 }
1065 }
1066}