1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21 """This file implements support for parsing NTFS filesystem in Rekall.
22
23 Simply select the ntfs profile with an ntfs image - you might need to also
24 specify the --file_offset (or -o) parameter.
25
26 $ rekal -v --profile ntfs -f ~/images/ntfs1-gen2.E01
27
28 [1] Default session 13:56:54> fls
29 MFT Seq Created File Mod MFT Mod Access Size Filename
30 ----- ----- ------------------------- ------------------------- ------------------------- ------------------------- ---------- --------
31 4 4 2008-12-31 22:44:02+0000 2008-12-31 22:44:02+0000 2008-12-31 22:44:02+0000 2008-12-31 22:44:02+0000 36000 $AttrDef
32 8 8 2008-12-31 22:44:02+0000 2008-12-31 22:44:02+0000 2008-12-31 22:44:02+0000 2008-12-31 22:44:02+0000 0 $BadClus
33 6 6 2008-12-31 22:44:02+0000 2008-12-31 22:44:02+0000 2008-12-31 22:44:02+0000 2008-12-31 22:44:02+0000 126112 $Bitmap
34 7 7 2008-12-31 22:44:02+0000 2008-12-31 22:44:02+0000 2008-12-31 22:44:02+0000 2008-12-31 22:44:02+0000 8192 $Boot
35 11 11 2008-12-31 22:44:02+0000 2008-12-31 22:44:02+0000 2008-12-31 22:44:02+0000 2008-12-31 22:44:02+0000 0 $Extend
36 2 2 2008-12-31 22:44:02+0000 2008-12-31 22:44:02+0000 2008-12-31 22:44:02+0000 2008-12-31 22:44:02+0000 4685824 $LogFile
37 0 1 2008-12-31 22:44:02+0000 2008-12-31 22:44:02+0000 2008-12-31 22:44:02+0000 2008-12-31 22:44:02+0000 65536 $MFT
38 ...
39
40 """
41
42 import array
43 import logging
44 import re
45 import struct
46
47 from rekall import addrspace
48 from rekall import plugin
49 from rekall import obj
50 from rekall import testlib
51 from rekall.plugins import core
52 from rekall.plugins import guess_profile
53 from rekall.plugins.filesystems import lznt1
54 from rekall.plugins.overlays import basic
55 from rekall_lib import utils
56
57
58 -class Error(Exception):
60
64
68
85
86
87 FILE_FLAGS = dict(
88 READ_ONLY=0x0001,
89 HIDDEN=0x0002,
90 SYSTEM=0x0004,
91 ARCHIVE=0x0020,
92 DEVICE=0x0040,
93 NORMAL=0x0080,
94 TEMPORARY=0x0100,
95 SPARSE=0x0200,
96 REPARSE_POINT=0x0400,
97 COMPRESSED=0x0800,
98 OFFLINE=0x1000,
99 NOT_INDEXED=0x2000,
100 ENCRYPTED=0x4000
101 )
102
103 ntfs_vtypes = {
104 'NTFS_BOOT_SECTOR': [512, {
105 "oemname": [3, ["String", dict(length=8)]],
106 "sector_size": [11, ["unsigned short"]],
107
108 "_cluster_size": [13, ["unsigned char"]],
109
110
111 "cluster_size": lambda x: x.m("_cluster_size") * x.sector_size,
112
113
114 "block_count": lambda x: x.m("_volume_size") / x.cluster_size,
115
116 "_volume_size": [40, ["unsigned long"]],
117 "_mft_cluster": [48, ["unsigned long"]],
118 "_mirror_mft_cluster": [56, ["unsigned long"]],
119 "_mft_record_size": [64, ["signed byte"]],
120 "index_record_size": [68, ["unsigned char"]],
121 "serial": [72, ["String", dict(length=8)]],
122
123
124 "magic": [510, ["unsigned short"]],
125
126
127
128
129 "MFT": [lambda x: x.m("_mft_cluster") * x.cluster_size,
130 ["Array", dict(
131 target="MFT_ENTRY",
132 target_size=lambda x: x.mft_record_size)
133 ]],
134 }],
135
136 "MFT_ENTRY": [None, {
137 "magic": [0, ["String", dict(length=4)]],
138 "fixup_offset": [4, ["unsigned short"]],
139 "fixup_count": [6, ["unsigned short"]],
140 "logfile_sequence_number": [8, ["unsigned long long"]],
141 "sequence_value": [16, ["unsigned short"]],
142 "link_count": [18, ["unsigned short"]],
143 "attribute_offset": [20, ["unsigned short"]],
144 "flags": [22, ["Flags", dict(
145 target="unsigned short",
146 bitmap=dict(
147 ALLOCATED=0,
148 DIRECTORY=1)
149 )]],
150 "mft_entry_size": [24, ["unsigned short"]],
151 "mft_entry_allocated": [28, ["unsigned short"]],
152 "base_record_reference": [32, ["unsigned long long"]],
153 "next_attribute_id": [40, ["unsigned short"]],
154 "record_number": [44, ["unsigned long"]],
155
156
157 "fixup_magic": [lambda x: x.obj_offset + x.fixup_offset,
158 ["String", dict(length=2, term=None)]],
159
160 "fixup_table": [lambda x: x.obj_offset + x.fixup_offset + 2,
161 ["Array", dict(
162 target="String",
163 target_args=dict(length=2, term=None),
164 count=lambda x: x.fixup_count-1)]],
165
166
167
168 "_attributes": [lambda x: x.obj_offset + x.attribute_offset,
169 ["ListArray",
170 dict(target="NTFS_ATTRIBUTE",
171 maximum_size=lambda x: x.mft_entry_size)]],
172 }],
173
174 "NTFS_ATTRIBUTE": [lambda x: x.length, {
175 "type": [0, [
176 "Enumeration", dict(
177 target="unsigned int",
178
179
180
181
182 choices=lambda x: x.obj_profile.get_constant(
183 "ATTRIBUTE_NAMES")
184 )]],
185 "length": [4, ["unsigned int"]],
186 "resident": [8, ["Enumeration", dict(
187 target="unsigned char",
188 choices={
189 0: "RESIDENT",
190 1: "NON-RESIDENT",
191 }
192 )]],
193
194
195 "is_resident": lambda x: x.resident == 0,
196 "name_length": [9, ["unsigned char"]],
197 "name_offset": [10, ["unsigned short"]],
198 "flags": [12, ["Flags", dict(
199 target="unsigned short",
200 maskmap={
201 "COMPRESSED" : 0x0001,
202 "ENCRYPTED": 0x4000,
203 "SPARSE": 0x8000,
204 }
205 )]],
206 "attribute_id": [14, ["unsigned short"]],
207
208 "name": [lambda x: x.obj_offset + x.name_offset,
209 ["UnicodeString", dict(
210 length=lambda x: x.name_length * 2)]],
211
212
213 "content_size": [16, ["unsigned int"]],
214 "content_offset": [20, ["unsigned short"]],
215
216
217 "runlist_vcn_start": [16, ["unsigned long long"]],
218 "runlist_vcn_end": [24, ["unsigned long long"]],
219 "runlist_offset": [32, ["unsigned short"]],
220 "compression_unit_size": [34, ["unsigned short"]],
221 "allocated_size": [40, ["unsigned long long"]],
222 "actual_size": [48, ["unsigned long long"]],
223 "initialized_size": [56, ["unsigned long long"]],
224 }],
225
226 "STANDARD_INFORMATION": [None, {
227 "create_time": [0, ["WinFileTime"]],
228 "file_altered_time": [8, ["WinFileTime"]],
229 "mft_altered_time": [16, ["WinFileTime"]],
230 "file_accessed_time": [24, ["WinFileTime"]],
231 "flags": [32, ["Flags", dict(
232 target="unsigned int",
233 maskmap=FILE_FLAGS)]],
234 "max_versions": [36, ["unsigned int"]],
235 "version": [40, ["unsigned int"]],
236 "class_id": [44, ["unsigned int"]],
237 "owner_id": [48, ["unsigned int"]],
238 "sid": [52, ["unsigned int"]],
239 "quota": [56, ["unsigned long long"]],
240 "usn": [64, ["unsigned int"]],
241 }],
242
243 "FILE_NAME": [None, {
244 "mftReference": [0, ["BitField", dict(
245 target="unsigned long long",
246 start_bit=0,
247 end_bit=48)]],
248 "seq_num": [6, ["short int"]],
249 "created": [8, ["WinFileTime"]],
250 "file_modified": [16, ["WinFileTime"]],
251 "mft_modified": [24, ["WinFileTime"]],
252 "file_accessed": [32, ["WinFileTime"]],
253 "allocated_size": [40, ["unsigned long long"]],
254 "size": [48, ["unsigned long long"]],
255 "flags": [56, ["Flags", dict(
256 target="unsigned int",
257 bitmap=FILE_FLAGS)]],
258 "reparse_value": [60, ["unsigned int"]],
259 "_length_of_name": [64, ["byte"]],
260 "name_type": [65, ["Enumeration", dict(
261 target="byte",
262 choices={
263 0: "POSIX",
264 1: "Win32",
265 2: "DOS",
266 3: "DOS+Win32"
267 })]],
268 "name": [66, ["UnicodeString", dict(
269 length=lambda x: x.m("_length_of_name") * 2)]],
270 }],
271
272 "STANDARD_INDEX_HEADER": [42, {
273 "magicNumber": [0, ["Signature", dict(
274 value="INDX",
275 )]],
276
277 "fixup_offset": [4, ["unsigned short"]],
278 "fixup_count": [6, ["unsigned short"]],
279 "logFileSeqNum": [8, ["unsigned long long"]],
280 "vcnOfINDX": [16, ["unsigned long long"]],
281 "node": [24, ["INDEX_NODE_HEADER"]],
282
283
284 "fixup_magic": [lambda x: x.obj_offset + x.fixup_offset,
285 ["String", dict(length=2, term=None)]],
286
287 "fixup_table": [lambda x: x.obj_offset + x.fixup_offset + 2,
288 ["Array", dict(
289 target="String",
290 target_args=dict(length=2, term=None),
291 count=lambda x: x.fixup_count-1)]],
292 }],
293
294 "INDEX_RECORD_ENTRY": [lambda x: x.sizeOfIndexEntry.v(), {
295 "mftReference": [0, ["BitField", dict(
296 target="unsigned long long",
297 start_bit=0,
298 end_bit=48)]],
299 "seq_num": [6, ["short int"]],
300 "sizeOfIndexEntry": [8, ["unsigned short"]],
301 "filenameOffset": [10, ["unsigned short"]],
302 "flags": [12, ["unsigned int"]],
303 "file": [16, ["FILE_NAME"]],
304 }],
305
306 "INDEX_ROOT": [None, {
307 "type": [0, [
308 "Enumeration", dict(
309 target="unsigned int",
310
311
312
313
314 choices=lambda x: x.obj_profile.get_constant(
315 "ATTRIBUTE_NAMES")
316 )]],
317
318 "collation_rule": [4, ["unsigned int"]],
319 "idxalloc_size_b": [8, ["unsigned int"]],
320 "idx_size_c": [12, ["unsigned int"]],
321 "node": [16, ["INDEX_NODE_HEADER"]],
322 }],
323
324 "INDEX_NODE_HEADER": [0x10, {
325 "offset_to_index_entry": [0, ["unsigned int"]],
326 "offset_to_end_index_entry": [4, ["unsigned int"]],
327 }],
328
329 "ATTRIBUTE_LIST_ENTRY": [lambda x: x.length, {
330 "type": [0, [
331 "Enumeration", dict(
332 target="unsigned int",
333 choices=lambda x: x.obj_profile.get_constant(
334 "ATTRIBUTE_NAMES")
335 )]],
336 "length": [4, ["unsigned short int"]],
337 "name_length": [6, ["byte"]],
338 "offset_to_name": [7, ["byte"]],
339 "starting_vcn": [8, ["unsigned long long"]],
340 "mftReference": [16, ["BitField", dict(
341 target="unsigned long long",
342 start_bit=0,
343 end_bit=48)]],
344
345 "attribute_id": [24, ["byte"]],
346
347
348 "attribute": lambda x: x.obj_context["mft"][
349 x.mftReference].get_attribute(
350 x.type, x.attribute_id)
351 }],
352 }
357 result = self.obj_profile.ListArray(
358 offset=self.offset_to_index_entry + self.obj_offset,
359 vm=self.obj_vm,
360 maximum_offset=self.offset_to_end_index_entry + self.obj_offset - 1,
361 target="INDEX_RECORD_ENTRY", context=self.obj_context,
362 )
363
364 for x in result:
365 if x.flags > 0:
366 break
367 yield x
368
371 """An address space to implement record fixup."""
372
373 - def __init__(self, fixup_magic, fixup_table, base_offset, length, **kwargs):
374 super(FixupAddressSpace, self).__init__(**kwargs)
375 self.as_assert(self.base is not None, "Address space must be stacked.")
376 self.base_offset = base_offset
377 self.fixup_table = fixup_table
378 self.fixup_magic = fixup_magic
379
380
381 self.buffer = array.array("c", self.base.read(base_offset, length))
382 for i, fixup_value in enumerate(fixup_table):
383 fixup_offset = (i+1) * 512 - 2
384 if (self.buffer[fixup_offset:fixup_offset+2].tostring() !=
385 fixup_magic.v()):
386 raise NTFSParseError("Fixup error")
387
388 self.buffer[fixup_offset:fixup_offset+2] = array.array(
389 "c", fixup_value.v())
390
391 - def read(self, address, length):
394
397 """An address space which is initialized from a runlist."""
398
399 - def __init__(self, run_list, cluster_size=None, size=0, name="", **kwargs):
400 super(RunListAddressSpace, self).__init__(**kwargs)
401 self.PAGE_SIZE = cluster_size or self.session.cluster_size
402 self.compression_unit_size = 16 * self.PAGE_SIZE
403 self._end = size
404 self.name = name
405
406
407 file_offset = 0
408 for range_start, range_length in run_list:
409 if size == 0:
410 self._end += range_length * self.PAGE_SIZE
411
412
413
414 if range_start is None:
415 file_offset += range_length
416
417
418
419 try:
420 run = self.runs[-1][2]
421 if run.length < self.compression_unit_size:
422 run.data["compression"] = True
423
424 except (ValueError, IndexError):
425 pass
426
427 continue
428
429
430
431
432 compressed_subrange = range_length % 16
433 uncompressed_range_length = range_length - compressed_subrange
434 if uncompressed_range_length:
435 self._store_run(
436 file_offset, range_start, uncompressed_range_length)
437
438 file_offset += uncompressed_range_length
439 range_start += uncompressed_range_length
440
441 if compressed_subrange:
442 self._store_run(file_offset, range_start, compressed_subrange)
443
444 file_offset += compressed_subrange
445
446 - def _store_run(self, file_offset, range_start, length):
462
500
519
521 return utils.SmartUnicode(self.name or self.__class__.__name__)
522
525
526
527 -class MFT_ENTRY(obj.Struct):
528 """An MFT Entry.
529
530 Note that MFT entries behave as either files or directories depending on the
531 attributes they have. This object wraps this behavior with convenience
532 methods. Hence callers do not need to manipulate attributes directly.
533 """
534
535 - def __init__(self, **kwargs):
536 super(MFT_ENTRY, self).__init__(**kwargs)
537
538
539
540 if self.obj_context.get("ApplyFixup", True):
541 self.obj_vm = FixupAddressSpace(fixup_magic=self.fixup_magic,
542 fixup_table=self.fixup_table,
543 base_offset=self.obj_offset,
544 length=self.mft_entry_allocated,
545 base=self.obj_vm)
546 self.logging = self.obj_session.logging.getChild("ntfs")
547
548 self.logging.setLevel(logging.ERROR)
549
550 @utils.safe_property
551 - def mft_entry(self):
552 return self.obj_context.get("index", self.record_number.v())
553
554 @utils.safe_property
555 - def attributes(self):
556 seen = set()
557
558 for attribute in self._attributes:
559 if attribute.type == 0xFFFFFFFF:
560 break
561
562 if attribute in seen:
563 continue
564
565 seen.add(attribute)
566 yield attribute
567
568 if attribute.type == "$ATTRIBUTE_LIST":
569 for sub_attr in attribute.DecodeAttribute():
570 if sub_attr.mftReference == self.mft_entry:
571 continue
572
573 result = sub_attr.attribute
574 if result in seen:
575 continue
576
577 yield result
578
579 - def get_attribute(self, type=None, id=None):
580 for attribute in self.attributes:
581 if type is not None and attribute.type != type:
582 continue
583
584 if id is not None and attribute.attribute_id != id:
585 continue
586
587 return attribute
588
589 return obj.NoneObject("Attribute not found")
590
591 - def is_directory(self):
592 """Does this MFT entry behave as a directory?"""
593 for attribute in self.attributes:
594 if (attribute.type in ("$INDEX_ALLOCATION", "$INDEX_ROOT") and
595 attribute.name == "$I30"):
596 return True
597 return False
598
599 - def list_files(self):
600 """List the files contained in this directory.
601
602 Note that any file can contain other files (i.e. be a directory) if it
603 has an $I30 stream. Thats is directories may also contain data and
604 behave as files!
605
606 Returns:
607 An iterator over all INDEX_RECORD_ENTRY.
608 """
609 for attribute in self.attributes:
610 if (attribute.type in ("$INDEX_ALLOCATION", "$INDEX_ROOT") and
611 attribute.name == "$I30"):
612 for index_header in attribute.DecodeAttribute():
613 for x in index_header.node.Entries():
614 yield x
615
616 - def open_file(self):
617 """Returns an address space which maps the content of the file's data.
618
619 If this MFT does not contain any $DATA streams, returns a NoneObject().
620
621 The returned address space is formed by joining all $DATA streams' run
622 lists in this MFT into a contiguous mapping.
623 """
624 runlists = []
625 data_size = 0
626
627
628
629 for attribute in self.attributes:
630 if attribute.type == "$DATA":
631 if attribute.is_resident:
632 return attribute.data
633
634 if data_size == 0:
635 data_size = attribute.size
636
637
638
639 run_length = (attribute.runlist_vcn_end -
640 attribute.runlist_vcn_start + 1)
641 run_list = list(attribute.RunList())
642
643 if sum(x[1] for x in run_list) != run_length:
644 self.logging.error(
645 "NTFS_ATTRIBUTE %s-%s: Not all runs found!",
646 self.mft_entry, attribute)
647
648 runlists.extend(attribute.RunList())
649
650 if runlists:
651 return RunListAddressSpace(
652 run_list=runlists,
653 base=self.obj_session.physical_address_space,
654 session=self.obj_session,
655 name=self.full_path,
656 size=data_size)
657
658 return obj.NoneObject("No data")
659
660 @utils.safe_property
661 - def filename(self):
662 dos_name = obj.NoneObject()
663 for attribute in self.attributes:
664 if attribute.type == "$FILE_NAME":
665 attribute = attribute.DecodeAttribute()
666
667
668 if "Win32" in str(attribute.name_type):
669 return attribute
670
671 dos_name = attribute
672
673
674 return dos_name
675
676 @utils.safe_property
677 - def full_path(self):
678 """Returns the full path of this MFT to the root."""
679 result = []
680 mft = self.obj_context["mft"]
681 mft_entry = self
682 depth = 0
683 while depth < 10:
684 filename_record = mft_entry.filename
685 filename = unicode(filename_record.name)
686 if filename == ".":
687 break
688
689 result.append(filename)
690 mft_entry = mft[filename_record.mftReference]
691 if mft_entry == None:
692 break
693
694 depth += 1
695
696 result.reverse()
697 return "/".join(result)
698
699 @utils.safe_property
700 - def data_size(self):
701 """Search all the $DATA attributes for the allocated size."""
702 for attribute in self.attributes:
703 if attribute.type == "$DATA" and attribute.size > 0:
704 return attribute.size
705
706 return 0
707
710 """A class to parse and access the NTFS boot sector."""
711
712
713 mft_record_size = 0
714
716 """Parse the boot sector and calculate offsets."""
717 super(NTFS_BOOT_SECTOR, self).__init__(**kwargs)
718 if self._mft_record_size > 0:
719 self.mft_record_size = self._mft_record_size * self.cluster_size
720 else:
721 self.mft_record_size = 1 << -self._mft_record_size
722
724 """Verify the boot sector for sanity."""
725
726 if self.magic != 0xAA55:
727 raise NTFSParseError("Magic not correct.")
728
729 if self.cluster_size not in [
730 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80]:
731 raise NTFSParseError("Invalid cluster_size")
732
733 if self.sector_size == 0 or self.sector_size % 512:
734 raise NTFSParseError("invalid sector_size")
735
736 if self.block_count == 0:
737 raise NTFSParseError("Volume size is 0")
738
741 """The NTFS attribute."""
742
743
744
745 MASK = {
746 0: 0,
747 1: 0xFF,
748 2: 0xFFFF,
749 3: 0xFFFFFF,
750 4: 0xFFFFFFFF,
751 5: 0xFFFFFFFFFF,
752 6: 0xFFFFFFFFFFFF,
753 7: 0xFFFFFFFFFFFFFF,
754 8: 0xFFFFFFFFFFFFFFFF,
755 }
756
757
758
759 SIGN_BIT = {
760 0: 0,
761 1: 1 << (1 * 8 - 1),
762 2: 1 << (2 * 8 - 1),
763 3: 1 << (3 * 8 - 1),
764 4: 1 << (4 * 8 - 1),
765 5: 1 << (5 * 8 - 1),
766 6: 1 << (6 * 8 - 1),
767 7: 1 << (7 * 8 - 1),
768 8: 1 << (8 * 8 - 1),
769 }
770
772 """Sign extend a value based on the number of bytes it should take."""
773 m = self.SIGN_BIT[b]
774 x = x & self.MASK[b]
775 return (x ^ m) - m
776
777 @utils.safe_property
796
798 if self.type == "$STANDARD_INFORMATION":
799 return self.obj_profile.STANDARD_INFORMATION(
800 offset=0, vm=self.data, context=self.obj_context)
801
802 elif self.type == "$FILE_NAME":
803 return self.obj_profile.FILE_NAME(
804 offset=0, vm=self.data, context=self.obj_context)
805
806 elif self.type == "$DATA":
807 return list(self.RunList())
808
809 elif self.type == "$INDEX_ALLOCATION":
810 result = []
811 for i in xrange(0, self.size, 0x1000):
812 result.append(
813 self.obj_profile.STANDARD_INDEX_HEADER(
814 offset=i, vm=self.data, context=self.obj_context))
815
816 return result
817
818 elif self.type == "$INDEX_ROOT":
819 return [self.obj_profile.INDEX_ROOT(
820 offset=0, vm=self.data, context=self.obj_context)]
821
822 elif self.type == "$ATTRIBUTE_LIST":
823 result = self.obj_profile.ListArray(
824 offset=0, vm=self.data,
825 target="ATTRIBUTE_LIST_ENTRY",
826 maximum_size=self.content_size,
827 context=self.obj_context
828 )
829
830 return result
831
833 """Decodes the runlist for this attribute."""
834 if self.is_resident:
835 return
836
837 offset = self.obj_offset + self.runlist_offset
838 run_offset = 0
839
840 while 1:
841 idx = ord(self.obj_vm.read(offset, 1))
842 if idx == 0:
843 return
844
845 length_size = idx & 0xF
846 run_offset_size = idx >> 4
847 offset += 1
848
849 run_length = struct.unpack("<Q", self.obj_vm.read(offset, 8))[0]
850 run_length &= self.MASK[length_size]
851 offset += length_size
852
853 relative_run_offset = struct.unpack(
854 "<Q", self.obj_vm.read(offset, 8))[0]
855
856 relative_run_offset = self.sign_extend(relative_run_offset,
857 run_offset_size)
858
859 run_offset += relative_run_offset
860 offset += run_offset_size
861
862
863 if relative_run_offset == 0:
864 yield None, run_length
865 else:
866 yield run_offset, run_length
867
868 @utils.safe_property
870 return self.type, self.attribute_id
871
872 @utils.safe_property
874 """The MFT entry containing this entry."""
875
876 return self.obj_offset / 0x400
877
878 @utils.safe_property
880 """The size of this attribute's data."""
881 if self.is_resident:
882 return self.content_size
883
884
885
886 return self.actual_size
887
890 """The index header must manage its own fixups."""
891
903
904
905
906 -class NTFSProfile(basic.ProfileLLP64, basic.BasicClasses):
907 """A profile for the NTFS."""
908
910 super(NTFSProfile, self).__init__(**kwargs)
911 self.add_overlay(ntfs_vtypes)
912 self.add_classes(dict(
913 NTFS_BOOT_SECTOR=NTFS_BOOT_SECTOR,
914 MFT_ENTRY=MFT_ENTRY,
915 NTFS_ATTRIBUTE=NTFS_ATTRIBUTE,
916 INDEX_NODE_HEADER=INDEX_NODE_HEADER,
917 STANDARD_INDEX_HEADER=STANDARD_INDEX_HEADER,
918 ))
919
920
921
922 self.add_constants(dict(ATTRIBUTE_NAMES={
923 16: "$STANDARD_INFORMATION",
924 32: "$ATTRIBUTE_LIST",
925 48: "$FILE_NAME",
926 64: "$OBJECT_ID",
927 80: "$SECURITY_DESCRIPTOR",
928 96: "$VOLUME_NAME",
929 112: "$VOLUME_INFORMATION",
930 128: "$DATA",
931 144: "$INDEX_ROOT",
932 160: "$INDEX_ALLOCATION",
933 176: "$BITMAP",
934 192: "$REPARSE_POINT",
935 256: "$LOGGED_UTILITY_STREAM",
936 }))
937
938
939 -class NTFS(object):
940 """A class to manage the NTFS filesystem parser."""
941
942 - def __init__(self, address_space, session=None):
976
977 - def MFTEntryByName(self, path):
978 """Return the MFT entry by traversing the path.
979
980 We support both / and \\ as path separators. Path matching is case
981 insensitive.
982
983 Raises IOError if path is not found.
984
985 Returns:
986 a tuple of (path, MFT_ENTRY) where path is the case corrected path.
987
988 """
989 components = filter(None, re.split(r"[\\/]", path))
990 return_path = []
991
992
993 directory = self.mft[5]
994 for component in components:
995 component = component.lower()
996
997 for record in directory.list_files():
998 filename = record.file.name.v()
999 if filename.lower() == component.lower():
1000 directory = self.mft[record.mftReference]
1001 return_path.append(filename)
1002 break
1003 else:
1004 raise IOError("Path %s component not found." % component)
1005
1006 directory.obj_context["path"] = "/".join(return_path)
1007
1008 return directory
1009
1010
1011 -class NTFSPlugins(plugin.PhysicalASMixin, plugin.TypedProfileCommand,
1012 plugin.ProfileCommand):
1013 """Base class for ntfs plugins."""
1014 __abstract = True
1015
1016 mode = "mode_ntfs"
1017
1026
1029 """Mixin for commands which take filenames- delegate to inode commands."""
1030 delegate = ""
1031
1032 __args = [
1033 dict(name="path", default="/", positional=True,
1034 help="Path to print stats for."),
1035 ]
1036
1042
1045 """A mixin for plugins which work on mft entries."""
1046
1047 __args = [
1048 dict(name="mfts", type="ArrayIntParser", default=[5],
1049 required=False, positional=True,
1050 help="MFT entries to list.")
1051 ]
1052
1053
1054 -class FStat(FileBaseCommandMixin, NTFSPlugins):
1055 """Print information by filename."""
1056 name = "fstat"
1057 delegate = "istat"
1058
1059
1060 -class IStat(MFTPluginsMixin, NTFSPlugins):
1061 """Print information related to an MFT entry."""
1062 name = "istat"
1063
1065 for attribute in mft_entry.attributes:
1066 if attribute.type == "$STANDARD_INFORMATION":
1067 decoded_attribute = attribute.DecodeAttribute()
1068
1069 renderer.format("$STANDARD_INFORMATION Attribute Values:\n")
1070
1071 renderer.table_header([
1072 ("Key", "key", "30"),
1073 ("Value", "value", "30")], suppress_headers=True)
1074
1075 renderer.table_row("Flags", decoded_attribute.flags)
1076 renderer.table_row("Owner ID", decoded_attribute.owner_id)
1077 renderer.table_row("SID", decoded_attribute.sid)
1078 renderer.table_row("Created", decoded_attribute.create_time)
1079 renderer.table_row("File Modified",
1080 decoded_attribute.file_altered_time)
1081 renderer.table_row("MFT Modified",
1082 decoded_attribute.mft_altered_time)
1083
1084 renderer.table_row("Accessed",
1085 decoded_attribute.file_accessed_time)
1086
1088 for attribute in mft_entry.attributes:
1089 if attribute.type == "$DATA":
1090 if attribute.is_resident:
1091 return
1092
1093 renderer.format("\nClusters ({0:d}-{1:d}):\n",
1094 attribute.type, attribute.attribute_id)
1095 renderer.table_header([
1096 ("c%s" % x, "c%s" % x, "25") for x in range(4)
1097 ], suppress_headers=True, nowrap=True)
1098
1099 blocks = attribute.DecodeAttribute()
1100 for i in range(0, len(blocks), 8):
1101 ranges = []
1102 for (start, length) in blocks[i:i+8]:
1103 if start is None:
1104 ranges.append("Sparse(%s)" % length)
1105 else:
1106 ranges.append("%s-%s(%s)" % (
1107 start, start + length, length))
1108
1109 renderer.table_row(*ranges)
1110
1120
1122 if mft_entry.is_directory():
1123 renderer.format("\n$I30 Analysis:\n")
1124 renderer.table_header([
1125 ("MFT", "mft", ">10"),
1126 ("Seq", "seq", ">5"),
1127 ("Created", "created", "25"),
1128 ("File Mod", "file_mod", "25"),
1129 ("MFT Mod", "mft_mod", "25"),
1130 ("Access", "accessed", "25"),
1131 ("Size", "size", ">10"),
1132 ("Filename", "filename", ""),
1133 ])
1134
1135 for record in mft_entry.list_files():
1136 file_record = record.file
1137
1138 renderer.table_row(
1139 record.mftReference,
1140 record.seq_num,
1141 file_record.created,
1142 file_record.file_modified,
1143 file_record.mft_modified,
1144 file_record.file_accessed,
1145 file_record.size,
1146 file_record.name)
1147
1149 for mft in self.plugin_args.mfts:
1150 mft_entry = self.ntfs.mft[mft]
1151
1152 renderer.format("MFT Entry Header Values:\n")
1153 renderer.format("Entry: {0:d} Sequence: {1:d}\n",
1154 mft, mft_entry.sequence_value)
1155
1156 renderer.format("$LogFile Sequence Number: {0:d}\n",
1157 mft_entry.logfile_sequence_number)
1158 renderer.format("Links: {0:d}\n\n", mft_entry.link_count)
1159
1160 self.render_standard_info(renderer, mft_entry)
1161
1162 renderer.format("\nAttributes:\n")
1163 renderer.table_header([
1164 ("Inode", "inode", ">15"),
1165 ("Type", "type", "30"),
1166 ("Name", "name", "10"),
1167 ("Res", "resident", "5"),
1168 ("Size", "size", ">10"),
1169 ("Comment", "comment", "")])
1170
1171 for attribute in mft_entry.attributes:
1172 renderer.table_row(
1173 "%d-%d-%d" % (attribute.owner_MFT, attribute.type,
1174 attribute.attribute_id),
1175 attribute.type,
1176 attribute.name,
1177 attribute.is_resident,
1178 attribute.size, self.comment(attribute))
1179
1180 self.render_block_allocation(renderer, mft_entry)
1181 self.render_i30(renderer, mft_entry)
1182
1183
1184 -class FLS(FileBaseCommandMixin, NTFSPlugins):
1187
1188
1189 -class ILS(MFTPluginsMixin, NTFSPlugins):
1190 """List files in an NTFS image."""
1191
1192 name = "ils"
1193
1195 for mft in self.plugin_args.mfts:
1196 directory = self.ntfs.mft[mft]
1197
1198
1199 renderer.table_header([
1200 ("MFT", "mft", ">10"),
1201 ("Seq", "seq", ">5"),
1202 ("Created", "created", "25"),
1203 ("File Mod", "file_mod", "25"),
1204 ("MFT Mod", "mft_mod", "25"),
1205 ("Access", "accessed", "25"),
1206 ("Size", "size", ">10"),
1207 ("Filename", "filename", ""),
1208 ])
1209
1210 for record in directory.list_files():
1211 file_record = record.file
1212
1213 renderer.table_row(
1214 record.mftReference,
1215 record.seq_num,
1216 file_record.created,
1217 file_record.file_modified,
1218 file_record.mft_modified,
1219 file_record.file_accessed,
1220 file_record.size,
1221 file_record.name)
1222
1223
1224 -class IDump(NTFSPlugins):
1225 """Dump a part of an MFT file."""
1226 name = "idump"
1227
1228 __args = [
1229 dict(name="mft", type="IntParser", default=5,
1230 required=True, positional=True,
1231 help="MFT entry to dump."),
1232
1233 dict(name="type", type="IntParser", default=128,
1234 required=False, positional=True,
1235 help="Attribute type to dump."),
1236
1237 dict(name="id", type="IntParser", default=None,
1238 required=False, positional=True,
1239 help="Id of attribute to dump."),
1240 ]
1241
1242
1243 offset = 0
1244
1256
1257
1258 -class IExport(core.DirectoryDumperMixin, IDump):
1259 """Extracts files from NTFS.
1260
1261 For each specified MFT entry, dump the file to the specified dump
1262 directory. The filename is taken as the longest filename of this MFT entry.
1263 """
1264
1265 name = "iexport"
1266
1284
1290
1291
1292 -class TestIStat(testlib.SimpleTestCase):
1296
1297
1298 -class TestFStat(testlib.SimpleTestCase):
1302
1309