1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23 """This Address Space allows us to open aff4 images.
24
25 AFF4 images are produced by the Rekall memory acquisition tools (Pmem and
26 friends).
27
28 For this address space to work:
29
30 pip install pyaff4
31
32 """
33 import logging
34 import re
35 import os
36
37 from pyaff4 import data_store
38 try:
39 from pyaff4 import aff4_cloud
40 except ImportError:
41 aff4_cloud = None
42
43 from pyaff4 import aff4_directory
44 from pyaff4 import zip
45 from pyaff4 import lexicon
46 from pyaff4 import rdfvalue
47
48 from pyaff4 import plugins
49
50 from rekall import addrspace
51 from rekall import cache
52 from rekall_lib import yaml_utils
53 from rekall_lib import utils
54 from rekall.plugins.addrspaces import standard
55
56
57 LOGGER = logging.getLogger("pyaff4")
58 LOGGER.setLevel(logging.ERROR)
59
60
64
65 - def read(self, offset, length):
68
70 return self.stream.Size()
71
73 return utils.SmartUnicode(self.stream.urn)
74
75
76 -class AFF4AddressSpace(addrspace.CachingAddressSpaceMixIn,
77 addrspace.RunBasedAddressSpace):
78 """Handle AFF4Map or AFF4Image type streams.
79
80 Since AFF4 volumes may contain multiple streams, we allow the stream to be
81 specified inside the volume path. For example suppose the volume located at:
82
83 /home/mic/images/myimage.aff4
84
85 Contains a stream called PhysicalMemory, then we can specify the filename
86 as:
87
88 /home/mic/images/myimage.aff4/PhysicalMemory
89
90 If we just specified the path to the volume, then this address space will
91 pick the first AFF4 stream which has an aff4:category of
92 lexicon.AFF4_MEMORY_PHYSICAL.
93
94 So if you have more than one physical memory stream in the same volume, you
95 will need to specify the full path to the stream within the volume.
96 """
97 __name = "aff4"
98 __image = True
99
100
101 __can_map_files = True
102
103 order = standard.FileAddressSpace.order - 10
104
105 - def __init__(self, filename=None, **kwargs):
106 super(AFF4AddressSpace, self).__init__(**kwargs)
107 self.as_assert(self.base == None,
108 "Must stack on another address space")
109
110 path = filename or self.session.GetParameter("filename")
111 self.as_assert(path != None, "Filename must be specified")
112
113 self.image = None
114 self.resolver = data_store.MemoryDataStore()
115
116
117 try:
118 cache_dir = cache.GetCacheDir(self.session)
119 if cache_dir:
120 self.resolver.Set(lexicon.AFF4_CONFIG_CACHE_DIR,
121 lexicon.AFF4_FILE_NAME,
122 rdfvalue.XSDString(
123 os.path.join(cache_dir, "aff4_cache")))
124 except IOError:
125 pass
126
127
128
129 self.mapped_files = {}
130 try:
131 volume_path, stream_path = self._LocateAFF4Volume(path)
132 except IOError as e:
133 self.session.logging.debug("Unable to open AFF4 image %s", e)
134 raise addrspace.ASAssertionError("Unable to open AFF4 volume")
135
136
137
138 if not stream_path:
139 try:
140 self._AutoLoadAFF4Volume(volume_path)
141 return
142 except IOError as e:
143 raise addrspace.ASAssertionError(
144 "Unable to open AFF4 volume: %s" % e)
145
146
147
148
149 try:
150 image_urn = volume_path.Append(stream_path)
151 self._LoadMemoryImage(image_urn)
152 except IOError as e:
153 raise addrspace.ASAssertionError(
154 "Unable to open AFF4 stream %s: %s" % (
155 stream_path, e))
156
158 stream_name = []
159 path_components = list(filename.split(os.sep))
160 while path_components:
161 volume_path = os.sep.join(path_components)
162 volume_urn = rdfvalue.URN.NewURNFromFilename(volume_path)
163 volume_urn_parts = volume_urn.Parse()
164
165 if volume_urn_parts.scheme == "file":
166 if os.path.isfile(volume_path):
167 with zip.ZipFile.NewZipFile(
168 self.resolver, volume_urn) as volume:
169 if stream_name:
170 return volume.urn, os.sep.join(stream_name)
171
172 return volume.urn, None
173
174 elif os.path.isdir(volume_path):
175 with aff4_directory.AFF4Directory.NewAFF4Directory(
176 self.resolver, volume_urn) as volume:
177 if stream_name:
178 return volume.urn, os.sep.join(stream_name)
179
180 return volume.urn, None
181
182 else:
183
184 return None, None
185
186 elif volume_urn_parts.scheme == "gs" and aff4_cloud:
187 with aff4_cloud.AFF4GStore.NewAFF4GStore(
188 self.resolver, volume_urn) as volume:
189 if stream_name:
190 return volume.urn, os.sep.join(stream_name)
191
192 return volume.urn, None
193
194
195
196 else:
197 stream_name.insert(0, path_components.pop(-1))
198
199 raise IOError("Not found")
200
202 with self.resolver.AFF4FactoryOpen(path) as volume:
203 self.volume_urn = volume.urn
204
205
206 for (subject, _, value) in self.resolver.QueryPredicate(
207 lexicon.AFF4_CATEGORY):
208 if value == lexicon.AFF4_MEMORY_PHYSICAL:
209 self._LoadMemoryImage(subject)
210 break
211
212 self.as_assert(self.image is not None,
213 "No physical memory categories found.")
214
215 self.filenames = {}
216
217
218 for (subject, _, value) in self.resolver.QueryPredicate(
219 lexicon.AFF4_STREAM_ORIGINAL_FILENAME):
220
221 self.filenames[unicode(value).lower()] = subject
222
223
224
225 for subject in self.resolver.QuerySubject(re.compile(".")):
226 relative_name = self.volume_urn.RelativePath(subject)
227 if relative_name:
228 filename = self._normalize_filename(relative_name)
229 self.filenames[filename] = subject
230
232 """Normalize the filename based on the source OS."""
233 m = re.match(r"/?([a-zA-Z]:[/\\].+)", filename)
234 if m:
235
236 filename = m.group(1).replace("/", "\\")
237
238 return filename.lower()
239
240 return filename
241
243 aff4_stream = self.resolver.AFF4FactoryOpen(image_urn)
244 self.image = AFF4StreamWrapper(aff4_stream)
245
246
247 try:
248 for map_range in aff4_stream.GetRanges():
249 self.add_run(map_range.map_offset,
250 map_range.map_offset,
251 map_range.length,
252 self.image)
253
254 except AttributeError:
255 self.add_run(0, 0, aff4_stream.Size(), self.image)
256
257 self.session.logging.info("Added %s as physical memory", image_urn)
258
261
263 """Returns the offset where the filename should be mapped.
264
265 This function manages the session cache. By storing the file mappings in
266 the session cache we can guarantee repeatable mappings.
267 """
268 mapped_files = self.session.GetParameter("file_mappings", {})
269 if filename in mapped_files:
270 return utils.CaseInsensitiveDictLookup(
271 filename, mapped_files)
272
273
274 mapped_offset = (self.end() + 0x10000) & 0xFFFFFFFFFFFFF000
275 mapped_files[filename] = mapped_offset
276
277 self.session.SetCache("file_mappings", mapped_files)
278
279 return mapped_offset
280
282 """Return an address space for filename."""
283 subject = utils.CaseInsensitiveDictLookup(
284 filename, self.filenames)
285
286 if subject:
287 return AFF4StreamWrapper(self.resolver.AFF4FactoryOpen(subject))
288 return
289
291 """Map the filename into the address space.
292
293 If the filename is found in the AFF4 image, we return the offset in this
294 address space corresponding to file_offset in the mapped file.
295
296 If the file is not mapped, return None.
297 """
298 mapped_offset = None
299 filename = self._normalize_filename(filename)
300 mapped_offset = utils.CaseInsensitiveDictLookup(
301 filename, self.mapped_files)
302 if mapped_offset is None:
303
304 subject = utils.CaseInsensitiveDictLookup(
305 filename, self.filenames)
306
307
308
309 if not subject:
310
311
312 subject = utils.CaseInsensitiveDictLookup(
313 filename.replace("SysNative", "System32"),
314 self.filenames)
315
316 if subject:
317 stream = self.resolver.AFF4FactoryOpen(subject)
318 mapped_offset = self.file_mapping_offset(filename)
319 self.add_run(mapped_offset, 0, stream.Size(),
320 AFF4StreamWrapper(stream))
321
322 self.session.logging.info(
323 "Mapped %s into address %#x", stream.urn, mapped_offset)
324
325 else:
326
327 mapped_offset = -1
328
329
330 self.mapped_files[filename] = mapped_offset
331 if mapped_offset > 0:
332 return mapped_offset + file_offset
333
334 _parameter = [
335 ("dtb", "Registers.CR3"),
336 ("kernel_base", "KernBase"),
337 ("vm_kernel_slide", "kaslr_slide")
338 ]
339
364
378
379
380
381
382
383 import rdflib.plugins.memory
384 import rdflib.plugins.parsers.hturtle
385 import rdflib.plugins.parsers.notation3
386 import rdflib.plugins.parsers.nquads
387 import rdflib.plugins.parsers.nt
388 import rdflib.plugins.parsers.rdfxml
389 import rdflib.plugins.parsers.structureddata
390 import rdflib.plugins.parsers.trig
391 import rdflib.plugins.parsers.trix
392 import rdflib.plugins.serializers.n3
393 import rdflib.plugins.serializers.nquads
394 import rdflib.plugins.serializers.nt
395 import rdflib.plugins.serializers.rdfxml
396 import rdflib.plugins.serializers.trig
397 import rdflib.plugins.serializers.trix
398 import rdflib.plugins.serializers.turtle
399 import rdflib.plugins.sleepycat
400 import rdflib.plugins.sparql.processor
401 import rdflib.plugins.sparql.results.csvresults
402 import rdflib.plugins.sparql.results.jsonresults
403 import rdflib.plugins.sparql.results.tsvresults
404 import rdflib.plugins.sparql.results.txtresults
405 import rdflib.plugins.sparql.results.xmlresults
406 import rdflib.plugins.stores.auditable
407 import rdflib.plugins.stores.concurrent
408 import rdflib.plugins.stores.sparqlstore
409