Package rekall :: Package plugins :: Package addrspaces :: Module amd64
[frames] | no frames]

Source Code for Module rekall.plugins.addrspaces.amd64

  1  # Rekall Memory Forensics 
  2  # Copyright 2013 Google Inc. All Rights Reserved. 
  3  # 
  4  # Authors: 
  5  # Mike Auty 
  6  # Michael Cohen 
  7  # Jordi Sanchez 
  8  # 
  9  # This program is free software; you can redistribute it and/or modify 
 10  # it under the terms of the GNU General Public License as published by 
 11  # the Free Software Foundation; either version 2 of the License, or (at 
 12  # your option) any later version. 
 13  # 
 14  # This program is distributed in the hope that it will be useful, but 
 15  # WITHOUT ANY WARRANTY; without even the implied warranty of 
 16  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 
 17  # General Public License for more details. 
 18  # 
 19  # You should have received a copy of the GNU General Public License 
 20  # along with this program; if not, write to the Free Software 
 21  # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 
 22  # 
 23   
 24  """ This is based on Jesse Kornblum's patch to clean up the standard AS's. 
 25  """ 
 26  # pylint: disable=protected-access 
 27   
 28  import struct 
 29   
 30  from rekall import addrspace 
 31  from rekall import config 
 32  from rekall import obj 
 33  from rekall.plugins.addrspaces import intel 
 34  from rekall.plugins.addrspaces import standard 
 35  from rekall_lib import utils 
 36   
 37   
 38  config.DeclareOption("ept", group="Virtualization support", 
 39                       type="ArrayIntParser", 
 40                       help="The EPT physical address.") 
41 42 43 -class AMD64PagedMemory(intel.IA32PagedMemoryPae):
44 """Standard AMD 64-bit address space. 45 46 Provides an address space for AMD64 paged memory, aka the x86_64 47 architecture, which is laid out similarly to Physical Address 48 Extensions (PAE). Allows callers to map virtual address to 49 offsets in physical memory. 50 51 Create a new AMD64 address space to sit on top of the base address 52 space and a Directory Table Base (CR3 value) of 'dtb'. 53 54 Comments in this class mostly come from the Intel(R) 64 and IA-32 55 Architectures Software Developer's Manual Volume 3A: System Programming 56 Guide, Part 1, revision 031, pages 4-8 to 4-15. This book is available 57 for free at http://www.intel.com/products/processor/manuals/index.htm. 58 Similar information is also available from Advanced Micro Devices (AMD) 59 at http://support.amd.com/us/Processor_TechDocs/24593.pdf. 60 """ 61 order = 60 62
63 - def describe_vtop(self, vaddr, collection=None):
64 """Describe the resolution process of a Virtual Address. 65 66 See base method for docs. 67 """ 68 if collection is None: 69 collection = intel.DescriptorCollection(self.session) 70 71 # Bits 51:12 are from CR3 72 # Bits 11:3 are bits 47:39 of the linear address 73 pml4e_addr = ((self.get_pml4() & 0xffffffffff000) | 74 ((vaddr & 0xff8000000000) >> 36)) 75 pml4e_value = self.read_pte(pml4e_addr, collection=collection) 76 77 collection.add(intel.AddressTranslationDescriptor, 78 object_name="pml4e", object_value=pml4e_value, 79 object_address=pml4e_addr) 80 81 if not pml4e_value & self.valid_mask: 82 collection.add(intel.InvalidAddress, "Invalid PML4E\n") 83 return collection 84 85 # Bits 51:12 are from the PML4E 86 # Bits 11:3 are bits 38:30 of the linear address 87 pdpte_addr = ((pml4e_value & 0xffffffffff000) | 88 ((vaddr & 0x7FC0000000) >> 27)) 89 pdpte_value = self.read_pte(pdpte_addr, collection=collection) 90 91 collection.add(intel.AddressTranslationDescriptor, 92 object_name="pdpte", object_value=pdpte_value, 93 object_address=pdpte_addr) 94 95 if not pdpte_value & self.valid_mask: 96 collection.add(intel.InvalidAddress, "Invalid PDPTE\n") 97 98 # Large page mapping. 99 if pdpte_value & self.page_size_mask: 100 # Bits 51:30 are from the PDE 101 # Bits 29:0 are from the original linear address 102 physical_address = ((pdpte_value & 0xfffffc0000000) | 103 (vaddr & 0x3fffffff)) 104 collection.add(intel.CommentDescriptor, "One Gig page\n") 105 106 collection.add(intel.PhysicalAddressDescriptor, 107 address=physical_address) 108 109 return collection 110 111 # Bits 51:12 are from the PDPTE 112 # Bits 11:3 are bits 29:21 of the linear address 113 pde_addr = ((pdpte_value & 0xffffffffff000) | 114 ((vaddr & 0x3fe00000) >> 18)) 115 self._describe_pde(collection, pde_addr, vaddr) 116 117 return collection
118
119 - def get_pml4(self):
120 """Returns the PML4, the base of the paging tree.""" 121 return self.dtb
122
123 - def get_mappings(self, start=0, end=2**64):
124 """Enumerate all available ranges. 125 126 Yields Run objects for all available ranges in the virtual address 127 space. 128 """ 129 # Pages that hold PDEs and PTEs are 0x1000 bytes each. 130 # Each PDE and PTE is eight bytes. Thus there are 0x1000 / 8 = 0x200 131 # PDEs and PTEs we must test. 132 for pml4e_index in xrange(0, 0x200): 133 vaddr = pml4e_index << 39 134 if vaddr > end: 135 return 136 137 next_vaddr = (pml4e_index + 1) << 39 138 if start >= next_vaddr: 139 continue 140 141 pml4e_addr = ((self.get_pml4() & 0xffffffffff000) | 142 ((vaddr & 0xff8000000000) >> 36)) 143 pml4e_value = self.read_pte(pml4e_addr) 144 if not pml4e_value & self.valid_mask: 145 continue 146 147 tmp1 = vaddr 148 for pdpte_index in xrange(0, 0x200): 149 vaddr = tmp1 + (pdpte_index << 30) 150 if vaddr > end: 151 return 152 153 next_vaddr = tmp1 + ((pdpte_index + 1) << 30) 154 if start >= next_vaddr: 155 continue 156 157 # Bits 51:12 are from the PML4E 158 # Bits 11:3 are bits 38:30 of the linear address 159 pdpte_addr = ((pml4e_value & 0xffffffffff000) | 160 ((vaddr & 0x7FC0000000) >> 27)) 161 pdpte_value = self.read_pte(pdpte_addr) 162 if not pdpte_value & self.valid_mask: 163 continue 164 165 # 1 gig page. 166 if pdpte_value & self.page_size_mask: 167 yield addrspace.Run( 168 start=vaddr, 169 end=vaddr+0x40000000, 170 file_offset=((pdpte_value & 0xfffffc0000000) | 171 (vaddr & 0x3fffffff)), 172 address_space=self.base) 173 continue 174 175 for x in self._get_available_PDEs( 176 vaddr, pdpte_value, start, end): 177 yield x
178
179 - def _get_pte_addr(self, vaddr, pde_value):
180 if pde_value & self.valid_mask: 181 return (pde_value & 0xffffffffff000) | ((vaddr & 0x1ff000) >> 9)
182
183 - def _get_pde_addr(self, pdpte_value, vaddr):
184 if pdpte_value & self.valid_mask: 185 return ((pdpte_value & 0xffffffffff000) | 186 ((vaddr & 0x3fe00000) >> 18))
187
188 - def _get_available_PDEs(self, vaddr, pdpte_value, start, end):
189 # This reads the entire PDE table at once - On 190 # windows where IO is extremely expensive, its 191 # about 10 times more efficient than reading it 192 # one value at the time - and this loop is HOT! 193 pde_table_addr = self._get_pde_addr(pdpte_value, vaddr) 194 if pde_table_addr is None: 195 return 196 197 data = self.base.read(pde_table_addr, 8 * 0x200) 198 pde_table = struct.unpack("<" + "Q" * 0x200, data) 199 200 tmp2 = vaddr 201 for pde_index in range(0, 0x200): 202 vaddr = tmp2 + (pde_index << 21) 203 if vaddr > end: 204 return 205 206 next_vaddr = tmp2 + ((pde_index + 1) << 21) 207 if start >= next_vaddr: 208 continue 209 210 pde_value = pde_table[pde_index] 211 if pde_value & self.valid_mask and pde_value & self.page_size_mask: 212 yield addrspace.Run( 213 start=vaddr, 214 end=vaddr + 0x200000, 215 file_offset=(pde_value & 0xfffffffe00000) | ( 216 vaddr & 0x1fffff), 217 address_space=self.base) 218 continue 219 220 # This reads the entire PTE table at once - On 221 # windows where IO is extremely expensive, its 222 # about 10 times more efficient than reading it 223 # one value at the time - and this loop is HOT! 224 pte_table_addr = self._get_pte_addr(vaddr, pde_value) 225 226 # Invalid PTEs. 227 if pte_table_addr is None: 228 continue 229 230 data = self.base.read(pte_table_addr, 8 * 0x200) 231 pte_table = struct.unpack("<" + "Q" * 0x200, data) 232 233 for x in self._get_available_PTEs( 234 pte_table, vaddr, start=start, end=end): 235 yield x
236
237 - def _get_available_PTEs(self, pte_table, vaddr, start=0, end=2**64):
238 tmp3 = vaddr 239 for i, pte_value in enumerate(pte_table): 240 if not pte_value & self.valid_mask: 241 continue 242 243 vaddr = tmp3 + i << 12 244 if vaddr > end: 245 return 246 247 next_vaddr = tmp3 + ((i + 1) << 12) 248 if start >= next_vaddr: 249 continue 250 251 yield addrspace.Run(start=vaddr, 252 end=vaddr + 0x1000, 253 file_offset=( 254 pte_value & 0xffffffffff000) | ( 255 vaddr & 0xfff), 256 address_space=self.base)
257
258 - def end(self):
259 return (2 ** 64) - 1
260
261 262 -class VTxPagedMemory(AMD64PagedMemory):
263 """Intel VT-x address space. 264 265 Provides an address space that does EPT page translation to provide access 266 to the guest physical address space, thus allowing plugins to operate on a 267 virtual machine running on a host operating system. 268 269 This is described in the Intel(R) 64 and IA-32 Architectures Software 270 Developer's Manual Volume 3C: System Programming Guide, Part 3, pages 28-1 271 to 28-12. This book is available for free at 272 http://www.intel.com/products/processor/manuals/index.htm. 273 274 This address space depends on the "ept" parameter. You can use the vmscan 275 plugin to find valid ept values on a physical memory image. 276 277 Note that support for AMD's AMD-V address space is untested at the moment. 278 """ 279 280 # Virtualization is always the last AS since it has to overlay any form of 281 # image AS. 282 order = standard.FileAddressSpace.order + 10 283 __image = True 284 _ept = None 285 286 # A page entry being present depends only on bits 2:0 for EPT translation. 287 valid_mask = 7 288 289 # This is a virtualized address space. 290 virtualized = True 291
292 - def __init__(self, ept=None, **kwargs):
293 # A dummy DTB is passed to the base class so the DTB checks on 294 # IA32PagedMemory don't bail out. We require the DTB to never be used 295 # for page translation outside of get_pml4e. 296 try: 297 super(VTxPagedMemory, self).__init__(dtb=0xFFFFFFFF, **kwargs) 298 except TypeError: 299 raise addrspace.ASAssertionError() 300 301 # Reset the DTB, in case a plugin or AS relies on us providing one. 302 self.dtb = None 303 ept_list = ept or self.session.GetParameter("ept") 304 if not isinstance(ept_list, (list, tuple)): 305 ept_list = [ept_list] 306 307 self.as_assert(ept_list, "No EPT specified") 308 309 this_ept = None 310 if isinstance(self.base, VTxPagedMemory): 311 # Find our EPT, which will be the next one after the base one. 312 base_idx = ept_list.index(self.base._ept) 313 try: 314 this_ept = ept_list[base_idx + 1] 315 except IndexError: 316 pass 317 else: 318 this_ept = ept_list[0] 319 320 self.as_assert(this_ept != None, "No more EPTs specified") 321 self._ept = this_ept 322 self.name = "VTxPagedMemory@%#x" % self._ept
323 324 @utils.safe_property
325 - def ept(self):
326 return self._ept
327
328 - def get_pml4(self):
329 # PML4 for VT-x is in the EPT, not the DTB as AMD64PagedMemory does. 330 return self._ept
331
332 - def __str__(self):
333 return "%s@0x%08X" % (self.__class__.__name__, self._ept)
334
335 336 -class XenM2PMapper(dict):
337 """A maping between machine and physical addresses."""
338
339 340 -class XenParaVirtAMD64PagedMemory(AMD64PagedMemory):
341 """XEN ParaVirtualized guest address space.""" 342 343 PAGE_SIZE = 0x1000 344 P2M_PER_PAGE = P2M_TOP_PER_PAGE = P2M_MID_PER_PAGE = PAGE_SIZE / 8 345 346 # From include/xen/interface/features.h 347 XENFEAT_writable_page_tables = 0 348 XENFEAT_writable_descriptor_tables = 1 349 XENFEAT_auto_translated_physmap = 2 350 XENFEAT_supervisor_mode_kernel = 3 351 XENFEAT_pae_pgdir_above_4gb = 4 352 XENFEAT_mmu_pt_update_preserve_ad = 5 353 XENFEAT_hvm_callback_vector = 8 354 XENFEAT_hvm_safe_pvclock = 9 355 XENFEAT_hvm_pirqs = 10 356 XENFEAT_dom0 = 11 357
358 - def __init__(self, **kwargs):
359 super(XenParaVirtAMD64PagedMemory, self).__init__(**kwargs) 360 self.page_offset = self.session.GetParameter("page_offset") 361 self._xen_features = None 362 self.rebuilding_map = False 363 if self.page_offset: 364 self._RebuildM2PMapping()
365
366 - def xen_feature(self, flag):
367 """Obtains the state of a XEN feature.""" 368 if not self._xen_features: 369 # We have to instantiate xen_features manually from the physical 370 # address space since we are building a virtual one when xen_feature 371 # is called. 372 xen_features_p = self.session.profile.get_constant("xen_features") 373 xen_features_phys = (xen_features_p - 374 self.session.profile.GetPageOffset()) 375 self._xen_features = obj.Array( 376 vm=self.session.physical_address_space, 377 target="unsigned char", 378 offset=xen_features_phys, 379 session=self.session, 380 profile=self.session.profile, 381 count=32) 382 383 return self._xen_features[flag]
384
385 - def _ReadP2M(self, offset, p2m_size):
386 """Helper function to return p2m entries at offset. 387 388 This function is used to speed up reading the p2m tree, because 389 traversal via the Array struct is slow. 390 391 Yields tuples of (index, p2m) for each p2m, up to a number of p2m_size. 392 """ 393 for index, mfn in zip( 394 xrange(0, p2m_size), 395 struct.unpack( 396 "<" + "Q" * p2m_size, 397 self.read(offset, 0x1000))): 398 yield (index, mfn)
399
400 - def _RebuildM2PMapping(self):
401 """Rebuilds the machine to physical mapping. 402 403 A XEN ParaVirtualized kernel (the guest) maintains a special set of 404 page tables. Each entry is to machine (host) memory instead of 405 physical (guest) memory. 406 407 XEN maintains a mapping of machine to physical and mapping of physical 408 to machine mapping in a set of trees. We need to use the former to 409 translate the machine addresses in the page tables, but only the later 410 tree is available (mapped in memory) on the guest. 411 412 When rekall is run against the memory of a paravirtualized Linux kernel 413 we traverse the physical to machine mapping and invert it so we can 414 quickly translate from machine (host) addresses to guest physical 415 addresses. 416 417 See: http://lxr.free-electrons.com/source/arch/x86/xen/p2m.c?v=3.0 for 418 reference. 419 """ 420 421 if self.session.GetParameter("m2p_mapping"): 422 return 423 424 if self.rebuilding_map: 425 raise RuntimeError("RebuildM2PMapping recursed... aborting.") 426 427 self.rebuilding_map = True 428 429 self.session.logging.debug( 430 "Rebuilding the machine to physical mapping...") 431 432 try: 433 p2m_top_location = self.session.profile.get_constant_object( 434 "p2m_top", "Pointer", vm=self) 435 p2m_missing = self.session.profile.get_constant_object( 436 "p2m_missing", "Pointer", vm=self) 437 p2m_mid_missing = self.session.profile.get_constant_object( 438 "p2m_mid_missing", "Pointer", vm=self) 439 p2m_identity = self.session.profile.get_constant_object( 440 "p2m_identity", "Pointer", vm=self) 441 442 self.session.logging.debug("p2m_top = %#0x", p2m_top_location) 443 self.session.logging.debug("p2m_missing = %#0x", p2m_missing) 444 self.session.logging.debug("p2m_mid_missing = %#0x", 445 p2m_mid_missing) 446 self.session.logging.debug("p2m_identity = %#0x", p2m_identity) 447 448 # Obtained for debugging purposes as we don't have explicit support 449 # for it yet, and it doesn't seem to be common. 450 self.session.logging.debug( 451 "XENFEAT_auto_translated_physmap = %d", 452 self.xen_feature(self.XENFEAT_auto_translated_physmap)) 453 454 # A mapping of offset to symbol name 455 OFF2SYM = { 456 long(p2m_missing): "p2m_missing", 457 long(p2m_mid_missing): "p2m_mid_missing", 458 ~0: "INVALID_P2M", 459 } 460 461 new_mapping = XenM2PMapper() 462 463 # TOP entries 464 for p2m_top in self._ReadP2M( 465 p2m_top_location, self.P2M_TOP_PER_PAGE): 466 p2m_top_idx, p2m_top_entry = p2m_top 467 p2m_top_entry = obj.Pointer.integer_to_address(p2m_top_entry) 468 469 self.session.report_progress( 470 "Building m2p map %.02f%%" % ( 471 100 * (float(p2m_top_idx) / self.P2M_TOP_PER_PAGE))) 472 473 self.session.logging.debug( 474 "p2m_top[%d] = %s", 475 p2m_top_idx, 476 OFF2SYM.get(p2m_top_entry, "%#0x" % p2m_top_entry)) 477 478 if p2m_top_entry == p2m_mid_missing: 479 continue 480 481 # MID entries 482 for p2m_mid in self._ReadP2M( 483 p2m_top_entry, self.P2M_MID_PER_PAGE): 484 485 p2m_mid_idx, p2m_mid_entry = p2m_mid 486 p2m_mid_entry = obj.Pointer.integer_to_address( 487 p2m_mid_entry) 488 489 if p2m_mid_entry == p2m_identity: 490 # Logging because we haven't seen IDENTITY mid_entries 491 # before. 492 self.session.logging.debug( 493 "p2m_top[%d][%d] IS IDENTITY", 494 p2m_top_idx, p2m_mid_idx) 495 496 # XXX: [Experimental] based on the kernel source code. 497 # get_phys_to_machine returns the IDENTITY_FRAME of the 498 # PFN as the MFN when the mid_entry was marked as 499 # being an identity. 500 # http://lxr.free-electrons.com/source/arch/x86/xen/p2m.c?v=3.8#L494 501 # 502 # We fill all the MFNs under this mid_entry as 503 # identities. 504 for idx in xrange(self.P2M_PER_PAGE): 505 pfn = (p2m_top_idx * self.P2M_MID_PER_PAGE 506 * self.P2M_PER_PAGE 507 + p2m_mid_idx * self.P2M_PER_PAGE 508 + idx) 509 mfn = self.IDENTITY_FRAME(pfn) 510 new_mapping[mfn] = pfn 511 continue 512 513 # Uninitialized p2m_mid_entries can be skipped entirely. 514 if p2m_mid_entry == p2m_missing: 515 continue 516 517 self.session.logging.debug( 518 "p2m_top[%d][%d] = %s", 519 p2m_top_idx, 520 p2m_mid_idx, 521 OFF2SYM.get(p2m_mid_entry, "%#0x" % p2m_mid_entry)) 522 523 for p2m in self._ReadP2M(p2m_mid_entry, self.P2M_PER_PAGE): 524 p2m_idx, mfn = p2m 525 pfn = (p2m_top_idx * self.P2M_MID_PER_PAGE 526 * self.P2M_PER_PAGE 527 + p2m_mid_idx * self.P2M_PER_PAGE 528 + p2m_idx) 529 530 if p2m_mid_entry == p2m_identity: 531 self.session.logging.debug( 532 "p2m_top[%d][%d][%d] is IDENTITY", 533 p2m_top_idx, 534 p2m_mid_idx, 535 p2m_idx) 536 537 # For debugging purposes. Not found commonly as far as 538 # we've seen. 539 if mfn == ~0: 540 self.session.logging.debug( 541 "p2m_top[%d][%d][%d] is INVALID") 542 continue 543 544 new_mapping[mfn] = pfn 545 self.session.logging.debug("Caching m2p_mapping (%d entries)...", 546 len(new_mapping)) 547 self.session.SetCache("m2p_mapping", new_mapping) 548 finally: 549 self.rebuilding_map = False
550
551 - def IDENTITY_FRAME(self, pfn):
552 """Returns the identity frame of pfn. 553 554 From 555 http://lxr.free-electrons.com/source/arch/x86/include/asm/xen/page.h?v=3.8#L36 556 """ 557 558 BITS_PER_LONG = 64 559 IDENTITY_BIT = 1 << (BITS_PER_LONG - 2) 560 return pfn | IDENTITY_BIT
561
562 - def m2p(self, machine_address):
563 """Translates from a machine address to a physical address. 564 565 This translates host physical addresses to guest physical. 566 Requires a machine to physical mapping to have been calculated. 567 """ 568 m2p_mapping = self.session.GetParameter("m2p_mapping", cached=True) 569 if not m2p_mapping: 570 self._RebuildM2PMapping() 571 machine_address = obj.Pointer.integer_to_address(machine_address) 572 mfn = machine_address / 0x1000 573 pfn = m2p_mapping.get(mfn) 574 if pfn is None: 575 return obj.NoneObject("No PFN mapping found for MFN %d" % mfn) 576 return (pfn * 0x1000) | (0xFFF & machine_address)
577
578 - def read_pte(self, vaddr, collection=None):
579 mfn = super(XenParaVirtAMD64PagedMemory, self).read_pte(vaddr) 580 pfn = self.m2p(mfn) 581 if collection != None: 582 collection.add( 583 intel.CommentDescriptor, 584 ("\n(XEN resolves MFN 0x%x to PFN 0x%x)\n" 585 % (mfn, pfn))) 586 587 return pfn
588
589 - def vtop(self, vaddr):
590 vaddr = obj.Pointer.integer_to_address(vaddr) 591 592 if not self.session.GetParameter("m2p_mapping"): 593 # Simple shortcut for linux. This is required for the first set 594 # of virtual to physical resolutions while we're building the 595 # mapping. 596 page_offset = obj.Pointer.integer_to_address( 597 self.profile.GetPageOffset()) 598 599 if vaddr > page_offset: 600 result = self.profile.phys_addr(vaddr) 601 if result > self.base.end(): 602 # Force a rebuild if the phys_addr is outside the base 603 # image. 604 self._RebuildM2PMapping() 605 return super(XenParaVirtAMD64PagedMemory, 606 self).vtop(vaddr) 607 return result 608 609 # Try to update the mapping 610 if not self.rebuilding_map: 611 self._RebuildM2PMapping() 612 613 return super(XenParaVirtAMD64PagedMemory, self).vtop(vaddr)
614
615 - def _get_available_PDEs(self, vaddr, pdpte_value, start, end):
616 # This reads the entire PDE table at once - On 617 # windows where IO is extremely expensive, its 618 # about 10 times more efficient than reading it 619 # one value at the time - and this loop is HOT! 620 pde_table_addr = self._get_pde_addr(pdpte_value, vaddr) 621 if pde_table_addr is None: 622 return 623 624 data = self.base.read(pde_table_addr, 8 * 0x200) 625 pde_table = struct.unpack("<" + "Q" * 0x200, data) 626 627 tmp2 = vaddr 628 for pde_index in range(0, 0x200): 629 vaddr = tmp2 | (pde_index << 21) 630 if vaddr > end: 631 return 632 633 next_vaddr = tmp2 | ((pde_index + 1) << 21) 634 if start >= next_vaddr: 635 continue 636 637 pde_value = self.m2p(pde_table[pde_index]) 638 if pde_value & self.valid_mask and pde_value & self.page_size_mask: 639 yield addrspace.Run( 640 start=vaddr, 641 end=vaddr + 0x200000, 642 file_offset=(pde_value & 0xfffffffe00000) | ( 643 vaddr & 0x1fffff), 644 address_space=self.base) 645 continue 646 647 # This reads the entire PTE table at once - On 648 # windows where IO is extremely expensive, its 649 # about 10 times more efficient than reading it 650 # one value at the time - and this loop is HOT! 651 pte_table_addr = self._get_pte_addr(vaddr, pde_value) 652 653 # Invalid PTEs. 654 if pte_table_addr is None: 655 continue 656 657 data = self.base.read(pte_table_addr, 8 * 0x200) 658 pte_table = struct.unpack("<" + "Q" * 0x200, data) 659 660 for x in self._get_available_PTEs( 661 pte_table, vaddr, start=start, end=end): 662 yield x
663
664 - def _get_available_PTEs(self, pte_table, vaddr, start=0, end=2**64):
665 """Returns PFNs for each PTE entry.""" 666 tmp3 = vaddr 667 for i, pte_value in enumerate(pte_table): 668 # Each of the PTE values has to be translated back to a PFN, since 669 # they are MFNs. 670 pte_value = self.m2p(pte_value) 671 672 # When no translation was found, we skip the PTE, since we don't 673 # know where it's pointing to. 674 if pte_value == None: 675 continue 676 677 if not pte_value & self.valid_mask: 678 continue 679 680 vaddr = tmp3 | i << 12 681 if vaddr > end: 682 return 683 684 next_vaddr = tmp3 | ((i + 1) << 12) 685 if start >= next_vaddr: 686 continue 687 688 yield addrspace.Run(start=vaddr, 689 end=vaddr + 0x1000, 690 file_offset=( 691 pte_value & 0xffffffffff000) | ( 692 vaddr & 0xfff), 693 address_space=self.base)
694