1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23 """An address space to read ARM memory images.
24
25 References:
26
27 ARM1176JZ-S Technical Reference Manual
28 Revision: r0p7
29 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0333h/ch06s11s01.html
30 http://infocenter.arm.com/help/topic/com.arm.doc.ddi0333h/DDI0333H_arm1176jzs_r0p7_trm.pdf
31
32 ARM926EJ-S Revision: r0p5 Technical Reference Manual
33 Chapter 3.2 Address translation
34 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0198e/Babjcchb.html
35 http://infocenter.arm.com/help/topic/com.arm.doc.ddi0198e/DDI0198E_arm926ejs_r0p5_trm.pdf
36 """
37
38 import struct
39
40 from rekall import addrspace
41 from rekall.plugins.addrspaces import intel
42
43
44 -class ArmPagedMemory(addrspace.PagedReader):
45 """An address space to read virtual memory on ARM systems.
46
47 The ARM manual refers to the "Translation Table Base Register" (TTBR) as the
48 equivalent of the Intel CR3 register. We just refer to it as the DTB
49 (Directory Table Base) to be consistent with the other Rekall address
50 spaces.
51
52 This implementation is guided by Figure 6.6 of ARM1176JZ-S Technical
53 Reference Manual, Revision: r0p7.
54 http://infocenter.arm.com/help/topic/com.arm.doc.ddi0333h/DDI0333H_arm1176jzs_r0p7_trm.pdf
55 """
56
57
58
59 section_index_mask = (1 << 20) - 1
60 section_base_address_mask = ~section_index_mask
61
62
63
64 super_section_mask = 1 << 18
65
66
67 super_section_index_mask = (2 << 24) - 1
68 super_section_base_address_mask = ~super_section_index_mask
69
70
71 table_index_mask = ~section_index_mask
72
73
74 l2_table_index_mask = ((1 << 20) -1) ^ ((1 << 12) - 1)
75
76
77 coarse_page_table_base_address_mask = ~((1 << 10) - 1)
78
79
80 fine_page_table_base_address_mask = ~((1 << 12) - 1)
81 fine_l2_table_index_mask = ((1 << 20) -1) ^ ((1 << 10) - 1)
82
83
84
85 fine_page_table_index_mask = ((1 << 2) - 1) << 10
86
87
88 large_page_index_mask = (1 << 16) - 1
89 large_page_base_address_mask = ~large_page_index_mask
90
91
92 small_page_index_mask = (1 << 12) - 1
93 small_page_base_address_mask = ~small_page_index_mask
94
95
96 tiny_page_index_mask = (1 << 10) - 1
97 tiny_page_base_address_mask = ~tiny_page_index_mask
98
99 - def __init__(self, name=None, dtb=None, **kwargs):
100 super(ArmPagedMemory, self).__init__(**kwargs)
101
102 if not self.base:
103 raise TypeError("No base Address Space")
104
105
106 self.dtb = dtb or self.session.GetParameter("dtb")
107
108 if not self.dtb != None:
109 raise TypeError("No valid DTB specified. Try the find_dtb"
110 " plugin to search for the dtb.")
111 self.name = (name or 'Kernel AS') + "@%#x" % self.dtb
112
113
114 self.dtb &= ~ ((1 << 14) - 1)
115
116 - def read_long_phys(self, addr):
117 """Read an unsigned 32-bit integer from physical memory.
118
119 Note this always succeeds - reads outside mapped addresses in the image
120 will simply return 0.
121 """
122 string = self.base.read(addr, 4)
123 return struct.unpack("<I", string)[0]
124
125 - def vtop(self, vaddr):
126 """Translates virtual addresses into physical offsets.
127
128 The function should return either None (no valid mapping)
129 or the offset in physical memory where the address maps.
130
131 This function is simply a wrapper around describe_vtop() which does all
132 the hard work. You probably never need to override it.
133 """
134 vaddr = int(vaddr)
135
136 collection = self.describe_vtop(
137 vaddr, intel.PhysicalAddressDescriptorCollector(self.session))
138
139 return collection.physical_address
140
141 - def describe_vtop(self, vaddr, collection=None):
142 if collection is None:
143 collection = intel.DescriptorCollection(self.session)
144
145 l1_descriptor_addr = (self.dtb | (
146 (vaddr & self.table_index_mask) >> 18))
147 l1_descriptor = self.read_long_phys(l1_descriptor_addr)
148 collection.add(intel.AddressTranslationDescriptor,
149 object_name="l1 descriptor",
150 object_value=l1_descriptor,
151 object_address=l1_descriptor_addr)
152
153 l1_descriptor_type = l1_descriptor & 0b11
154 if l1_descriptor_type == 0b00:
155 collection.add(intel.InvalidAddress, "Invalid L1 descriptor")
156 return collection
157
158
159 if l1_descriptor_type == 0b10:
160
161
162
163 if l1_descriptor & self.super_section_mask:
164 collection.add(
165 intel.CommentDescriptor,
166 "Super section base @ {0:#x}\n",
167 l1_descriptor & self.super_section_base_address_mask)
168
169 collection.add(
170 intel.PhysicalAddressDescriptor,
171 address=(l1_descriptor &
172 self.super_section_base_address_mask) | (
173 vaddr & self.super_section_index_mask))
174 else:
175
176 collection.add(intel.CommentDescriptor,
177 "Section base @ {0:#x}\n",
178 l1_descriptor & self.section_base_address_mask)
179
180 collection.add(
181 intel.PhysicalAddressDescriptor,
182 address=(l1_descriptor &
183 self.section_base_address_mask) | (
184 vaddr & self.section_index_mask))
185
186
187 elif l1_descriptor_type == 0b01:
188 collection.add(
189 intel.CommentDescriptor, "Coarse table base @ {0:#x}\n",
190 address=(l1_descriptor &
191 self.coarse_page_table_base_address_mask))
192
193 l2_addr = (
194 (l1_descriptor &
195 self.coarse_page_table_base_address_mask) |
196 ((vaddr & self.l2_table_index_mask) >> 10))
197
198 l2_descriptor = self.read_long_phys(l2_addr)
199
200 collection.add(intel.AddressTranslationDescriptor,
201 object_name="2l descriptor",
202 object_value=l2_descriptor,
203 object_address=l2_addr)
204
205 self._desc_l2_descriptor(collection, l2_descriptor, vaddr)
206
207
208 elif l1_descriptor_type == 0b11:
209 collection.add(
210 intel.CommentDescriptor, "Fine table base @ {0:#x}\n",
211 address=(l1_descriptor &
212 self.fine_page_table_base_address_mask))
213
214 l2_addr = (
215 (l1_descriptor &
216 self.fine_page_table_base_address_mask) |
217 ((vaddr & self.fine_l2_table_index_mask) >> 12))
218
219 l2_descriptor = self.read_long_phys(l2_addr)
220
221 collection.add(intel.AddressTranslationDescriptor,
222 object_name="2l descriptor",
223 object_value=l2_descriptor,
224 object_address=l2_addr)
225
226 self._desc_l2_descriptor(collection, l2_descriptor, vaddr)
227
228 return collection
229
230 - def _desc_l2_descriptor(self, collection, l2_descriptor, vaddr):
231 l2_descriptor_type = l2_descriptor & 0b11
232
233
234 if l2_descriptor_type == 0b01:
235 collection.add(
236 intel.CommentDescriptor, "Coarse table base @ {0:#x}\n",
237 l2_descriptor & self.large_page_base_address_mask)
238
239 collection.add(
240 intel.PhysicalAddressDescriptor,
241 address=(l2_descriptor &
242 self.large_page_base_address_mask) | (
243 vaddr & self.large_page_index_mask))
244
245
246 elif l2_descriptor_type == 0b10 or l2_descriptor_type == 0b11:
247 collection.add(
248 intel.CommentDescriptor, "Coarse table base @ {0:#x}\n",
249 l2_descriptor & self.small_page_base_address_mask)
250
251 collection.add(
252 intel.PhysicalAddressDescriptor,
253 address=(l2_descriptor &
254 self.small_page_base_address_mask) | (
255 vaddr & self.small_page_index_mask))
256
257
258 elif l2_descriptor_type == 0b11:
259 collection.add(
260 intel.CommentDescriptor, "Coarse table base @ {0:#x}\n",
261 l2_descriptor & self.tiny_page_base_address_mask)
262
263 collection.add(
264 intel.PhysicalAddressDescriptor,
265 address=(l2_descriptor &
266 self.tiny_page_base_address_mask) | (
267 vaddr & self.tiny_page_index_mask))
268
269 elif l2_descriptor_type == 0b00:
270 collection.add(intel.InvalidAddress, "Invalid L2 descriptor")
271
272
273 - def page_fault_handler(self, descriptor, vaddr):
274 """A placeholder for handling page faults."""
275 _ = descriptor, vaddr
276 return None
277
278 - def get_mappings(self, start=0, end=2**64):
279 """Generate all valid addresses.
280
281 Note that ARM requires page table entries for large sections to be
282 duplicated (e.g. a supersection first_level_descriptor must be
283 duplicated 16 times). We don't actually check for this here.
284 """
285 vaddr = 0
286 while vaddr < end:
287 l1_descriptor = self.read_long_phys(self.dtb | (
288 (vaddr & self.table_index_mask) >> 18))
289
290 l1_descriptor_type = l1_descriptor & 0b11
291
292
293 if l1_descriptor_type == 0b00:
294 vaddr += 1 << 20
295 continue
296
297 if l1_descriptor_type == 0b10:
298
299 if l1_descriptor & self.super_section_mask:
300 yield addrspace.Run(
301 start=vaddr,
302 end=vaddr + (1 << 24),
303 file_offset=(l1_descriptor &
304 self.super_section_base_address_mask),
305 address_space=self.base)
306
307 vaddr += 1 << 24
308 continue
309
310
311 yield addrspace.Run(
312 start=vaddr,
313 end=vaddr + (1 << 20),
314 file_offset=l1_descriptor & self.section_base_address_mask,
315 address_space=self.base)
316 vaddr += 1 << 20
317 continue
318
319
320 if l1_descriptor_type == 0b01:
321 for x in self._generate_coarse_page_table_addresses(
322 vaddr, l1_descriptor &
323 self.coarse_page_table_base_address_mask):
324 yield x
325
326 vaddr += 1 << 20
327 continue
328
329 raise RuntimeError("Unreachable")
330
331 - def _generate_coarse_page_table_addresses(self, base_vaddr,
332 coarse_page_base):
333 vaddr = base_vaddr
334 while vaddr < base_vaddr + (1 << 20):
335 l2_addr = (coarse_page_base |
336 (vaddr & self.l2_table_index_mask) >> 10)
337
338 l2_descriptor = self.read_long_phys(l2_addr)
339 l2_descriptor_type = l2_descriptor & 0b11
340
341
342 if l2_descriptor_type == 0b01:
343 yield addrspace.Run(
344 start=vaddr,
345 end=vaddr + (1 << 16),
346 file_offset=(l2_descriptor &
347 self.large_page_base_address_mask),
348 address_space=self.base)
349 vaddr += 1 << 16
350 continue
351
352
353 if l2_descriptor_type == 0b10 or l2_descriptor_type == 0b11:
354 yield addrspace.Run(
355 start=vaddr,
356 end=vaddr + (1 << 12),
357 file_offset=(l2_descriptor &
358 self.small_page_base_address_mask),
359 address_space=self.base)
360 vaddr += 1 << 12
361 continue
362
363
364 if l2_descriptor_type == 0b00:
365 vaddr += 1 << 10
366 continue
367
368 raise RuntimeError("Unreachable")
369
370
373