/src/sleuthkit/tsk/fs/hfs.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | ** The Sleuth Kit |
3 | | ** |
4 | | ** This software is subject to the IBM Public License ver. 1.0, |
5 | | ** which was displayed prior to download and is included in the readme.txt |
6 | | ** file accompanying the Sleuth Kit files. It may also be requested from: |
7 | | ** Crucial Security Inc. |
8 | | ** 14900 Conference Center Drive |
9 | | ** Chantilly, VA 20151 |
10 | | ** |
11 | | |
12 | | ** Copyright (c) 2009 Brian Carrier. All rights reserved. |
13 | | ** |
14 | | ** Judson Powers [jpowers@atc-nycorp.com] |
15 | | ** Matt Stillerman [matt@atc-nycorp.com] |
16 | | ** Rob Joyce [rob@atc-nycorp.com] |
17 | | ** Copyright (c) 2008, 2012 ATC-NY. All rights reserved. |
18 | | ** This file contains data developed with support from the National |
19 | | ** Institute of Justice, Office of Justice Programs, U.S. Department of Justice. |
20 | | ** |
21 | | ** Wyatt Banks [wbanks@crucialsecurity.com] |
22 | | ** Copyright (c) 2005 Crucial Security Inc. All rights reserved. |
23 | | ** |
24 | | ** Brian Carrier [carrier@sleuthkit.org] |
25 | | ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved |
26 | | ** |
27 | | ** Copyright (c) 1997,1998,1999, International Business Machines |
28 | | ** Corporation and others. All Rights Reserved. |
29 | | */ |
30 | | |
31 | | /* TCT |
32 | | * LICENSE |
33 | | * This software is distributed under the IBM Public License. |
34 | | * AUTHOR(S) |
35 | | * Wietse Venema |
36 | | * IBM T.J. Watson Research |
37 | | * P.O. Box 704 |
38 | | * Yorktown Heights, NY 10598, USA |
39 | | --*/ |
40 | | |
41 | | /* |
42 | | ** You may distribute the Sleuth Kit, or other software that incorporates |
43 | | ** part of all of the Sleuth Kit, in object code form under a license agreement, |
44 | | ** provided that: |
45 | | ** a) you comply with the terms and conditions of the IBM Public License |
46 | | ** ver 1.0; and |
47 | | ** b) the license agreement |
48 | | ** i) effectively disclaims on behalf of all Contributors all warranties |
49 | | ** and conditions, express and implied, including warranties or |
50 | | ** conditions of title and non-infringement, and implied warranties |
51 | | ** or conditions of merchantability and fitness for a particular |
52 | | ** purpose. |
53 | | ** ii) effectively excludes on behalf of all Contributors liability for |
54 | | ** damages, including direct, indirect, special, incidental and |
55 | | ** consequential damages such as lost profits. |
56 | | ** iii) states that any provisions which differ from IBM Public License |
57 | | ** ver. 1.0 are offered by that Contributor alone and not by any |
58 | | ** other party; and |
59 | | ** iv) states that the source code for the program is available from you, |
60 | | ** and informs licensees how to obtain it in a reasonable manner on or |
61 | | ** through a medium customarily used for software exchange. |
62 | | ** |
63 | | ** When the Sleuth Kit or other software that incorporates part or all of |
64 | | ** the Sleuth Kit is made available in source code form: |
65 | | ** a) it must be made available under IBM Public License ver. 1.0; and |
66 | | ** b) a copy of the IBM Public License ver. 1.0 must be included with |
67 | | ** each copy of the program. |
68 | | */ |
69 | | |
70 | | /** \file hfs.c |
71 | | * Contains the general internal TSK HFS metadata and data unit code |
72 | | */ |
73 | | |
74 | | #include "tsk_fs_i.h" |
75 | | #include "tsk_hfs.h" |
76 | | #include "decmpfs.h" |
77 | | |
78 | | #include <memory> |
79 | | #include <new> |
80 | | |
81 | | #include <stdarg.h> |
82 | | #ifdef TSK_WIN32 |
83 | | #include <string.h> |
84 | | #else |
85 | | #include <strings.h> |
86 | | #endif |
87 | | |
88 | 0 | #define XSWAP(a,b) { a ^= b; b ^= a; a ^= b; } |
89 | | |
90 | | // Compression Stuff |
91 | | |
92 | | #ifdef HAVE_LIBZ |
93 | | #include <zlib.h> |
94 | | #endif |
95 | | |
96 | | #include "lzvn.h" |
97 | | |
98 | | // Forward declarations: |
99 | | static uint8_t hfs_load_attrs(TSK_FS_FILE * fs_file); |
100 | | static uint8_t hfs_load_extended_attrs(TSK_FS_FILE * file, |
101 | | unsigned char *isCompressed, unsigned char *cmpType, |
102 | | uint64_t * uncSize); |
103 | | |
104 | | /* may set error up to string 1 |
105 | | * returns 0 on success, 1 on failure */ |
106 | | uint8_t |
107 | | hfs_checked_read_random(TSK_FS_INFO * fs, char *buf, size_t len, |
108 | | TSK_OFF_T offs) |
109 | 0 | { |
110 | 0 | ssize_t r; |
111 | |
|
112 | 0 | r = tsk_fs_read(fs, offs, buf, len); |
113 | 0 | if (r != (ssize_t) len) { |
114 | 0 | if (r >= 0) { |
115 | 0 | tsk_error_reset(); |
116 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
117 | 0 | } |
118 | 0 | return 1; |
119 | 0 | } |
120 | 0 | return 0; |
121 | 0 | } |
122 | | |
123 | | /********************************************************************** |
124 | | * |
125 | | * MISC FUNCS |
126 | | * |
127 | | **********************************************************************/ |
128 | | |
129 | | /* convert the HFS Time (seconds from 1/1/1904) |
130 | | * to UNIX (UTC seconds from 1/1/1970) |
131 | | * The number is borrowed from linux HFS driver source |
132 | | */ |
133 | | uint32_t |
134 | | hfs_convert_2_unix_time(uint32_t hfsdate) |
135 | 0 | { |
136 | 0 | if (hfsdate < NSEC_BTWN_1904_1970) |
137 | 0 | return 0; |
138 | 0 | return (uint32_t) (hfsdate - NSEC_BTWN_1904_1970); |
139 | 0 | } |
140 | | |
141 | | |
142 | | /** |
143 | | * Convert a cnid (metadata address) to big endian array. |
144 | | * This is used to create the key for tree lookups. |
145 | | * @param cnid Metadata address to convert |
146 | | * @param array [out] Array to write data into. |
147 | | */ |
148 | | static void |
149 | | cnid_to_array(uint32_t cnid, uint8_t array[4]) |
150 | 0 | { |
151 | 0 | array[3] = (cnid >> 0) & 0xff; |
152 | 0 | array[2] = (cnid >> 8) & 0xff; |
153 | 0 | array[1] = (cnid >> 16) & 0xff; |
154 | 0 | array[0] = (cnid >> 24) & 0xff; |
155 | 0 | } |
156 | | |
157 | | /********************************************************************** |
158 | | * |
159 | | * Lookup Functions |
160 | | * |
161 | | **********************************************************************/ |
162 | | |
163 | | |
164 | | |
165 | | /* Compares the given HFS+ Extents B-tree key to key constructed |
166 | | * for finding the beginning of the data fork extents for the given |
167 | | * CNID. (That is, the search key uses the given CNID and has |
168 | | * fork = 0 and start_block = 0.) |
169 | | */ |
170 | | static int |
171 | | hfs_ext_compare_keys(HFS_INFO * hfs, uint32_t cnid, |
172 | | const hfs_btree_key_ext * key) |
173 | 0 | { |
174 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); |
175 | 0 | uint32_t key_cnid; |
176 | |
|
177 | 0 | key_cnid = tsk_getu32(fs->endian, key->file_id); |
178 | 0 | if (key_cnid < cnid) |
179 | 0 | return -1; |
180 | 0 | if (key_cnid > cnid) |
181 | 0 | return 1; |
182 | | |
183 | | /* referring to the same cnids */ |
184 | | |
185 | | /* we are always looking for the data fork */ |
186 | 0 | if (key->fork_type != HFS_EXT_KEY_TYPE_DATA) |
187 | 0 | return 1; |
188 | | |
189 | | /* we are always looking for a start_block of zero |
190 | | (interested in the beginning of the extents, regardless |
191 | | of what the start_block is); all files except the bad |
192 | | blocks file should have a start_block greater than |
193 | | zero */ |
194 | 0 | if (tsk_getu32(fs->endian, key->start_block) == 0) |
195 | 0 | return 0; |
196 | 0 | return 1; |
197 | 0 | } |
198 | | |
199 | | |
200 | | /** \internal |
201 | | * Returns the length of an HFS+ B-tree INDEX key based on the tree header |
202 | | * structure and the length claimed in the record. With some trees, |
203 | | * the length given in the record is not used. |
204 | | * Note that this neither detects nor correctly handles 8-bit keys |
205 | | * (which should not be present in HFS+). |
206 | | * |
207 | | * This does not give the right answer for the Attributes File B-tree, for some |
208 | | * HFS+ file systems produced by the Apple OS, while it works for others. For |
209 | | * the Attributes file, INDEX keys should always be as stated in the record itself, |
210 | | * never the "maxKeyLen" of the B-tree header. |
211 | | * |
212 | | * In this software, this function is only invoked when dealing with the Extents file. In |
213 | | * that usage, it is not sufficiently well tested to know if it always gives the right |
214 | | * answer or not. We can only test that with a highly fragmented disk. |
215 | | * @param hfs File System |
216 | | * @param keylen Length of key as given in record |
217 | | * @param header Tree header |
218 | | * @returns Length of key |
219 | | */ |
220 | | uint16_t |
221 | | hfs_get_idxkeylen(HFS_INFO * hfs, uint16_t keylen, |
222 | | const hfs_btree_header_record * header) |
223 | 0 | { |
224 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); |
225 | | |
226 | | // if the flag is set, use the length given in the record |
227 | 0 | if (tsk_getu32(fs->endian, header->attr) & HFS_BT_HEAD_ATTR_VARIDXKEYS) |
228 | 0 | return keylen; |
229 | 0 | else |
230 | 0 | return tsk_getu16(fs->endian, header->maxKeyLen); |
231 | 0 | } |
232 | | |
233 | | |
234 | | /** |
235 | | * Convert the extents runs to TSK_FS_ATTR_RUN runs. |
236 | | * |
237 | | * @param a_fs File system to analyze |
238 | | * @param a_extents Raw extents to process (in an array of 8) |
239 | | * @param a_start_off Starting block offset of these runs |
240 | | * @returns NULL on error or if no runs are in extents (test tsk_errno) |
241 | | */ |
242 | | static TSK_FS_ATTR_RUN * |
243 | | hfs_extents_to_attr(TSK_FS_INFO * a_fs, const hfs_ext_desc * a_extents, |
244 | | TSK_OFF_T a_start_off) |
245 | 0 | { |
246 | 0 | TSK_FS_ATTR_RUN *head_run = NULL; |
247 | 0 | TSK_FS_ATTR_RUN *prev_run = NULL; |
248 | 0 | int i; |
249 | 0 | TSK_OFF_T cur_off = a_start_off; |
250 | | |
251 | | // since tsk_errno is checked as a return value, make sure it is clean. |
252 | 0 | tsk_error_reset(); |
253 | |
|
254 | 0 | if (tsk_verbose) |
255 | 0 | tsk_fprintf(stderr, |
256 | 0 | "hfs_extents_to_attr: Converting extents from offset %" PRIdOFF |
257 | 0 | " to runlist\n", a_start_off); |
258 | |
|
259 | 0 | for (i = 0; i < 8; ++i) { |
260 | 0 | TSK_FS_ATTR_RUN *cur_run; |
261 | |
|
262 | 0 | uint32_t addr = tsk_getu32(a_fs->endian, a_extents[i].start_blk); |
263 | 0 | uint32_t len = tsk_getu32(a_fs->endian, a_extents[i].blk_cnt); |
264 | |
|
265 | 0 | if (tsk_verbose) |
266 | 0 | tsk_fprintf(stderr, |
267 | 0 | "hfs_extents_to_attr: run %i at addr %" PRIu32 |
268 | 0 | " with len %" PRIu32 "\n", i, addr, len); |
269 | |
|
270 | 0 | if ((addr == 0) && (len == 0)) { |
271 | 0 | break; |
272 | 0 | } |
273 | | |
274 | | // make a non-resident run |
275 | 0 | if ((cur_run = tsk_fs_attr_run_alloc()) == NULL) { |
276 | 0 | error_returned(" - hfs_extents_to_attr"); |
277 | 0 | return NULL; |
278 | 0 | } |
279 | | |
280 | 0 | cur_run->addr = addr; |
281 | 0 | cur_run->len = len; |
282 | 0 | cur_run->offset = cur_off; |
283 | |
|
284 | 0 | if (head_run == NULL) |
285 | 0 | head_run = cur_run; |
286 | 0 | if (prev_run != NULL) |
287 | 0 | prev_run->next = cur_run; |
288 | 0 | cur_off += cur_run->len; |
289 | 0 | prev_run = cur_run; |
290 | 0 | } |
291 | | |
292 | 0 | return head_run; |
293 | 0 | } |
294 | | |
295 | | |
296 | | /** |
297 | | * Look in the extents catalog for entries for a given file. Add the runs |
298 | | * to the passed attribute structure. |
299 | | * |
300 | | * @param hfs File system being analyzed |
301 | | * @param cnid file id of file to search for |
302 | | * @param a_attr Attribute to add extents runs to |
303 | | * @param dataForkQ if true, then find extents for the data fork. If false, then find extents for the Resource fork. |
304 | | * @returns 1 on error and 0 on success |
305 | | */ |
306 | | static uint8_t |
307 | | hfs_ext_find_extent_record_attr(HFS_INFO * hfs, uint32_t cnid, |
308 | | TSK_FS_ATTR * a_attr, unsigned char dataForkQ) |
309 | 0 | { |
310 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); |
311 | 0 | uint16_t nodesize; /* size of nodes (all, regardless of the name) */ |
312 | 0 | uint32_t cur_node; /* node id of the current node */ |
313 | 0 | uint8_t is_done; |
314 | 0 | uint8_t desiredType; |
315 | |
|
316 | 0 | tsk_error_reset(); |
317 | |
|
318 | 0 | if (tsk_verbose) |
319 | 0 | tsk_fprintf(stderr, |
320 | 0 | "hfs_ext_find_extent_record_attr: Looking for extents for file %" |
321 | 0 | PRIu32 " %s\n", cnid, |
322 | 0 | dataForkQ ? "data fork" : "resource fork"); |
323 | |
|
324 | 0 | if (!hfs->has_extents_file) { |
325 | | // No extents file (which is optional), and so, no further extents are possible. |
326 | 0 | return 0; |
327 | 0 | } |
328 | | |
329 | | // Are we looking for extents of the data fork or the resource fork? |
330 | 0 | desiredType = |
331 | 0 | dataForkQ ? HFS_EXT_KEY_TYPE_DATA : HFS_EXT_KEY_TYPE_RSRC; |
332 | | |
333 | | // Load the extents attribute, if it has not been done so yet. |
334 | 0 | if (hfs->extents_file == NULL) { |
335 | 0 | ssize_t cnt; |
336 | |
|
337 | 0 | if ((hfs->extents_file = |
338 | 0 | tsk_fs_file_open_meta(fs, NULL, |
339 | 0 | HFS_EXTENTS_FILE_ID)) == NULL) { |
340 | 0 | return 1; |
341 | 0 | } |
342 | | |
343 | | /* cache the data attribute */ |
344 | 0 | hfs->extents_attr = |
345 | 0 | tsk_fs_attrlist_get(hfs->extents_file->meta->attr, |
346 | 0 | TSK_FS_ATTR_TYPE_DEFAULT); |
347 | 0 | if (!hfs->extents_attr) { |
348 | 0 | tsk_error_errstr2_concat |
349 | 0 | (" - Default Attribute not found in Extents File"); |
350 | 0 | return 1; |
351 | 0 | } |
352 | | |
353 | | // cache the extents file header |
354 | 0 | cnt = tsk_fs_attr_read(hfs->extents_attr, 14, |
355 | 0 | (char *) &(hfs->extents_header), |
356 | 0 | sizeof(hfs_btree_header_record), TSK_FS_FILE_READ_FLAG_NONE); |
357 | 0 | if (cnt != sizeof(hfs_btree_header_record)) { |
358 | 0 | if (cnt >= 0) { |
359 | 0 | tsk_error_reset(); |
360 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
361 | 0 | } |
362 | 0 | tsk_error_set_errstr2 |
363 | 0 | ("hfs_ext_find_extent_record_attr: Error reading header"); |
364 | 0 | return 1; |
365 | 0 | } |
366 | 0 | } |
367 | | |
368 | | // allocate a node buffer |
369 | 0 | nodesize = tsk_getu16(fs->endian, hfs->extents_header.nodesize); |
370 | 0 | std::unique_ptr<char[]> node{new(std::nothrow) char[nodesize]}; |
371 | 0 | if (!node) { |
372 | 0 | return 1; |
373 | 0 | } |
374 | | |
375 | | /* start at root node */ |
376 | 0 | cur_node = tsk_getu32(fs->endian, hfs->extents_header.rootNode); |
377 | | |
378 | | /* if the root node is zero, then the extents btree is empty */ |
379 | | /* if no files have overflow extents, the Extents B-tree still |
380 | | exists on disk, but is an empty B-tree containing only |
381 | | the header node */ |
382 | 0 | if (cur_node == 0) { |
383 | 0 | if (tsk_verbose) |
384 | 0 | tsk_fprintf(stderr, "hfs_ext_find_extent_record: " |
385 | 0 | "empty extents btree\n"); |
386 | 0 | return 0; |
387 | 0 | } |
388 | | |
389 | 0 | if (tsk_verbose) |
390 | 0 | tsk_fprintf(stderr, "hfs_ext_find_extent_record: starting at " |
391 | 0 | "root node %" PRIu32 "; nodesize = %" |
392 | 0 | PRIu16 "\n", cur_node, nodesize); |
393 | | |
394 | | /* Recurse down to the needed leaf nodes and then go forward */ |
395 | 0 | is_done = 0; |
396 | 0 | while (is_done == 0) { |
397 | 0 | TSK_OFF_T cur_off; /* start address of cur_node */ |
398 | 0 | uint16_t num_rec; /* number of records in this node */ |
399 | 0 | ssize_t cnt; |
400 | 0 | hfs_btree_node *node_desc; |
401 | | |
402 | | // sanity check |
403 | 0 | if (cur_node > tsk_getu32(fs->endian, |
404 | 0 | hfs->extents_header.totalNodes)) { |
405 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
406 | 0 | tsk_error_set_errstr |
407 | 0 | ("hfs_ext_find_extent_record_attr: Node %d too large for file", |
408 | 0 | cur_node); |
409 | 0 | return 1; |
410 | 0 | } |
411 | | |
412 | | // read the current node |
413 | 0 | cur_off = (TSK_OFF_T)cur_node * nodesize; |
414 | 0 | if (tsk_verbose) |
415 | 0 | tsk_fprintf(stderr, |
416 | 0 | "hfs_ext_find_extent_record: reading node %" PRIu32 |
417 | 0 | " at offset %" PRIdOFF "\n", cur_node, cur_off); |
418 | |
|
419 | 0 | cnt = tsk_fs_attr_read(hfs->extents_attr, cur_off, |
420 | 0 | node.get(), nodesize, TSK_FS_FILE_READ_FLAG_NONE); |
421 | 0 | if (cnt != nodesize) { |
422 | 0 | if (cnt >= 0) { |
423 | 0 | tsk_error_reset(); |
424 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
425 | 0 | } |
426 | 0 | tsk_error_set_errstr2 |
427 | 0 | ("hfs_ext_find_extent_record_attr: Error reading node %d at offset %" |
428 | 0 | PRIdOFF, cur_node, cur_off); |
429 | 0 | return 1; |
430 | 0 | } |
431 | | |
432 | | // process the header / descriptor |
433 | 0 | if (nodesize < sizeof(hfs_btree_node)) { |
434 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
435 | 0 | tsk_error_set_errstr |
436 | 0 | ("hfs_ext_find_extent_record_attr: Node size %d is too small to be valid", nodesize); |
437 | 0 | return 1; |
438 | 0 | } |
439 | 0 | node_desc = (hfs_btree_node *) node.get(); |
440 | 0 | num_rec = tsk_getu16(fs->endian, node_desc->num_rec); |
441 | |
|
442 | 0 | if (num_rec == 0) { |
443 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
444 | 0 | tsk_error_set_errstr |
445 | 0 | ("hfs_ext_find_extent_record: zero records in node %" |
446 | 0 | PRIu32, cur_node); |
447 | 0 | return 1; |
448 | 0 | } |
449 | | |
450 | | /* With an index node, find the record with the largest key that is smaller |
451 | | * to or equal to cnid */ |
452 | 0 | if (node_desc->type == HFS_BT_NODE_TYPE_IDX) { |
453 | 0 | uint32_t next_node = 0; |
454 | 0 | int rec; |
455 | |
|
456 | 0 | if (tsk_verbose) |
457 | 0 | tsk_fprintf(stderr, |
458 | 0 | "hfs_ext_find_extent_record: Index node %" PRIu32 |
459 | 0 | " @ %" PRIu64 " has %" PRIu16 " records\n", cur_node, |
460 | 0 | cur_off, num_rec); |
461 | |
|
462 | 0 | for (rec = 0; rec < num_rec; ++rec) { |
463 | 0 | int cmp; |
464 | 0 | size_t rec_off; |
465 | 0 | hfs_btree_key_ext *key; |
466 | | |
467 | | // Make sure node is large enough, note that (rec + 1) * 2 is an offset |
468 | | // relative to the end of node |
469 | 0 | if ((rec + 1) * 2 > (int) nodesize) { |
470 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
471 | 0 | tsk_error_set_errstr |
472 | 0 | ("hfs_ext_find_extent_record: offset of record %d in leaf node %d too small (%" |
473 | 0 | PRIu16 ")", rec, cur_node, nodesize); |
474 | 0 | return 1; |
475 | 0 | } |
476 | | // get the record offset in the node |
477 | 0 | rec_off = |
478 | 0 | tsk_getu16(fs->endian, |
479 | 0 | &node[nodesize - (rec + 1) * 2]); |
480 | 0 | if (rec_off > nodesize - sizeof(hfs_btree_key_ext)) { |
481 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
482 | 0 | tsk_error_set_errstr |
483 | 0 | ("hfs_ext_find_extent_record_attr: offset of record %d in index node %d too large (%d vs %" |
484 | 0 | PRIu16 ")", rec, cur_node, (int) rec_off, |
485 | 0 | nodesize); |
486 | 0 | return 1; |
487 | 0 | } |
488 | 0 | key = (hfs_btree_key_ext *) & node[rec_off]; |
489 | |
|
490 | 0 | cmp = hfs_ext_compare_keys(hfs, cnid, key); |
491 | |
|
492 | 0 | if (tsk_verbose) |
493 | 0 | tsk_fprintf(stderr, |
494 | 0 | "hfs_ext_find_extent_record: record %" PRIu16 |
495 | 0 | " ; keylen %" PRIu16 " (FileId: %" PRIu32 |
496 | 0 | ", ForkType: %" PRIu8 ", StartBlk: %" PRIu32 |
497 | 0 | "); compare: %d\n", rec, tsk_getu16(fs->endian, |
498 | 0 | key->key_len), tsk_getu32(fs->endian, |
499 | 0 | key->file_id), key->fork_type, |
500 | 0 | tsk_getu32(fs->endian, key->start_block), cmp); |
501 | | |
502 | | /* save the info from this record unless it is bigger than cnid */ |
503 | 0 | if (cmp <= 0 || next_node == 0) { |
504 | 0 | hfs_btree_index_record *idx_rec; |
505 | 0 | size_t keylen = |
506 | 0 | 2 + hfs_get_idxkeylen(hfs, tsk_getu16(fs->endian, |
507 | 0 | key->key_len), &(hfs->extents_header)); |
508 | 0 | if (nodesize < 4 || keylen + 4 > nodesize || rec_off >= nodesize - 4 - keylen) { |
509 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
510 | 0 | tsk_error_set_errstr |
511 | 0 | ("hfs_ext_find_extent_record_attr: offset and keylenth of record %d in index node %d too large (%" PRIuSIZE " vs %" |
512 | 0 | PRIu16 ")", rec, cur_node, |
513 | 0 | rec_off + keylen, nodesize); |
514 | 0 | return 1; |
515 | 0 | } |
516 | 0 | idx_rec = |
517 | 0 | (hfs_btree_index_record *) & node[rec_off + |
518 | 0 | keylen]; |
519 | 0 | next_node = tsk_getu32(fs->endian, idx_rec->childNode); |
520 | 0 | } |
521 | | |
522 | | // we are bigger than cnid, so move on to the next node |
523 | 0 | if (cmp > 0) { |
524 | 0 | break; |
525 | 0 | } |
526 | 0 | } |
527 | | |
528 | | // check if we found a relevant node, if not stop. |
529 | 0 | if (next_node == 0) { |
530 | 0 | if (tsk_verbose) |
531 | 0 | tsk_fprintf(stderr, |
532 | 0 | "hfs_ext_find_extent_record_attr: did not find any keys for %d in index node %d", |
533 | 0 | cnid, cur_node); |
534 | 0 | is_done = 1; |
535 | 0 | break; |
536 | 0 | } |
537 | 0 | cur_node = next_node; |
538 | 0 | } |
539 | | |
540 | | /* with a leaf, we process until we are past cnid. We move right too if we can */ |
541 | 0 | else if (node_desc->type == HFS_BT_NODE_TYPE_LEAF) { |
542 | 0 | int rec; |
543 | |
|
544 | 0 | if (tsk_verbose) |
545 | 0 | tsk_fprintf(stderr, |
546 | 0 | "hfs_ext_find_extent_record: Leaf node %" PRIu32 " @ %" |
547 | 0 | PRIu64 " has %" PRIu16 " records\n", cur_node, cur_off, |
548 | 0 | num_rec); |
549 | |
|
550 | 0 | for (rec = 0; rec < num_rec; ++rec) { |
551 | 0 | size_t rec_off; |
552 | 0 | hfs_btree_key_ext *key; |
553 | 0 | uint32_t rec_cnid; |
554 | 0 | hfs_extents *extents; |
555 | 0 | TSK_OFF_T ext_off = 0; |
556 | 0 | int keylen; |
557 | 0 | TSK_FS_ATTR_RUN *attr_run; |
558 | | |
559 | | // Make sure node is large enough, note that (rec + 1) * 2 is an offset |
560 | | // relative to the end of node |
561 | 0 | if ((rec + 1) * 2 > (int) nodesize) { |
562 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
563 | 0 | tsk_error_set_errstr |
564 | 0 | ("hfs_ext_find_extent_record_attr: offset of record %d in leaf node %d too small (%" |
565 | 0 | PRIu16 ")", rec, cur_node, nodesize); |
566 | 0 | return 1; |
567 | 0 | } |
568 | | // get the record offset in the node |
569 | 0 | rec_off = |
570 | 0 | tsk_getu16(fs->endian, |
571 | 0 | &node[nodesize - (rec + 1) * 2]); |
572 | |
|
573 | 0 | if (rec_off >= nodesize - sizeof(hfs_btree_key_ext)) { |
574 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
575 | 0 | tsk_error_set_errstr |
576 | 0 | ("hfs_ext_find_extent_record_attr: offset of record %d in leaf node %d too large (%d vs %" |
577 | 0 | PRIu16 ")", rec, cur_node, (int) rec_off, |
578 | 0 | nodesize); |
579 | 0 | return 1; |
580 | 0 | } |
581 | | |
582 | | // Check that the whole hfs_btree_key_ext structure is set |
583 | 0 | if (sizeof(hfs_btree_key_ext) > nodesize - rec_off) { |
584 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
585 | 0 | tsk_error_set_errstr |
586 | 0 | ("hfs_ext_find_extent_record_attr: record %d in leaf node %d truncated (have %d vs %" PRIuSIZE " bytes)", rec, cur_node, nodesize - (int)rec_off, |
587 | 0 | sizeof(hfs_btree_key_ext)); |
588 | 0 | return 1; |
589 | 0 | } |
590 | | |
591 | 0 | key = (hfs_btree_key_ext *) & node[rec_off]; |
592 | |
|
593 | 0 | if (tsk_verbose) |
594 | 0 | tsk_fprintf(stderr, |
595 | 0 | "hfs_ext_find_extent_record: record %" PRIu16 |
596 | 0 | "; keylen %" PRIu16 " (%" PRIu32 |
597 | 0 | ", %" PRIu8 ", %" PRIu32 ")\n", rec, |
598 | 0 | tsk_getu16(fs->endian, key->key_len), |
599 | 0 | tsk_getu32(fs->endian, key->file_id), |
600 | 0 | key->fork_type, tsk_getu32(fs->endian, |
601 | 0 | key->start_block)); |
602 | |
|
603 | 0 | rec_cnid = tsk_getu32(fs->endian, key->file_id); |
604 | | |
605 | | // see if this record is for our file |
606 | | // OLD logic, just handles the DATA fork |
607 | | // if (rec_cnid < cnid) { |
608 | | // continue; |
609 | | // } |
610 | | // else if ((rec_cnid > cnid) |
611 | | // || (key->fork_type != HFS_EXT_KEY_TYPE_DATA)) { |
612 | | // is_done = 1; |
613 | | // break; |
614 | | // } |
615 | | |
616 | | // NEW logic, handles both DATA and RSRC forks. |
617 | 0 | if (rec_cnid < cnid) { |
618 | 0 | continue; |
619 | 0 | } |
620 | 0 | if (rec_cnid > cnid) { |
621 | 0 | is_done = 1; |
622 | 0 | break; |
623 | 0 | } |
624 | | |
625 | | |
626 | 0 | if (key->fork_type != desiredType) { |
627 | 0 | if (dataForkQ) { |
628 | 0 | is_done = 1; |
629 | 0 | break; |
630 | 0 | } |
631 | 0 | else |
632 | 0 | continue; |
633 | 0 | } |
634 | | |
635 | | // OK, this is one of the extents records that we are seeking, so save it. |
636 | | // Make sure there is room for the hfs_extents struct |
637 | 0 | keylen = 2 + tsk_getu16(fs->endian, key->key_len); |
638 | 0 | if (rec_off + keylen + sizeof(hfs_extents) > nodesize) { |
639 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
640 | 0 | tsk_error_set_errstr |
641 | 0 | ("hfs_ext_find_extent_record_attr: offset and keylenth of record %d in leaf node %d too large (%d vs %" |
642 | 0 | PRIu16 ")", rec, cur_node, (int) rec_off + keylen, |
643 | 0 | nodesize); |
644 | 0 | return 1; |
645 | 0 | } |
646 | | |
647 | | // get the starting offset of this extent |
648 | 0 | ext_off = tsk_getu32(fs->endian, key->start_block); |
649 | | |
650 | | // convert the extents to the TSK format |
651 | 0 | extents = (hfs_extents *) & node[rec_off + keylen]; |
652 | |
|
653 | 0 | attr_run = |
654 | 0 | hfs_extents_to_attr(fs, extents->extents, ext_off); |
655 | 0 | if ((attr_run == NULL) && (tsk_error_get_errno() != 0)) { |
656 | 0 | tsk_error_errstr2_concat |
657 | 0 | (" - hfs_ext_find_extent_record_attr"); |
658 | 0 | return 1; |
659 | 0 | } |
660 | | |
661 | 0 | if (tsk_fs_attr_add_run(fs, a_attr, attr_run)) { |
662 | 0 | tsk_error_errstr2_concat |
663 | 0 | (" - hfs_ext_find_extent_record_attr"); |
664 | 0 | return 1; |
665 | 0 | } |
666 | 0 | } |
667 | 0 | cur_node = tsk_getu32(fs->endian, node_desc->flink); |
668 | 0 | if (cur_node == 0) { |
669 | 0 | is_done = 1; |
670 | 0 | break; |
671 | 0 | } |
672 | 0 | } |
673 | 0 | else { |
674 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
675 | 0 | tsk_error_set_errstr("hfs_ext_find_extent_record: btree node %" |
676 | 0 | PRIu32 " (%" PRIdOFF ") is neither index nor leaf (%" PRIu8 |
677 | 0 | ")", cur_node, cur_off, node_desc->type); |
678 | 0 | return 1; |
679 | 0 | } |
680 | 0 | } |
681 | | |
682 | 0 | return 0; |
683 | 0 | } |
684 | | |
685 | | |
686 | | /** \internal |
687 | | * Compares two Catalog B-tree keys. |
688 | | * @param hfs File System being analyzed |
689 | | * @param key1 Key 1 to compare |
690 | | * @param key2 Key 2 to compare |
691 | | * @returns -1 if key1 is smaller, 0 if equal, and 1 if key1 is larger |
692 | | */ |
693 | | int |
694 | | hfs_cat_compare_keys(HFS_INFO * hfs, const hfs_btree_key_cat * key1, |
695 | | int keylen1, const hfs_btree_key_cat * key2) |
696 | 0 | { |
697 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); |
698 | 0 | uint32_t cnid1, cnid2; |
699 | |
|
700 | 0 | if (keylen1 < 6) { |
701 | | // Note that it would be better to return an error value here |
702 | | // but the current function interface does not support this |
703 | | // Also see issue #2365 |
704 | 0 | return -1; |
705 | 0 | } |
706 | 0 | cnid1 = tsk_getu32(fs->endian, key1->parent_cnid); |
707 | 0 | cnid2 = tsk_getu32(fs->endian, key2->parent_cnid); |
708 | |
|
709 | 0 | if (cnid1 < cnid2) |
710 | 0 | return -1; |
711 | 0 | if (cnid1 > cnid2) |
712 | 0 | return 1; |
713 | | |
714 | 0 | return hfs_unicode_compare(hfs, &key1->name, keylen1 - 6, &key2->name); |
715 | 0 | } |
716 | | |
717 | | |
718 | | /** \internal |
719 | | * |
720 | | * Traverse the HFS catalog file. Call the callback for each |
721 | | * record. |
722 | | * |
723 | | * @param hfs File system |
724 | | * @param a_cb callback |
725 | | * @param ptr Pointer to pass to callback |
726 | | * @returns 1 on error |
727 | | */ |
728 | | uint8_t |
729 | | hfs_cat_traverse(HFS_INFO * hfs, |
730 | | TSK_HFS_BTREE_CB a_cb, void *ptr) |
731 | 0 | { |
732 | 0 | TSK_FS_INFO *fs = &(hfs->fs_info); |
733 | 0 | uint32_t cur_node; /* node id of the current node */ |
734 | |
|
735 | 0 | uint16_t nodesize; |
736 | 0 | uint8_t is_done = 0; |
737 | |
|
738 | 0 | tsk_error_reset(); |
739 | |
|
740 | 0 | nodesize = tsk_getu16(fs->endian, hfs->catalog_header.nodesize); |
741 | 0 | std::unique_ptr<char[]> node{new(std::nothrow) char[nodesize]}; |
742 | 0 | if (!node) { |
743 | 0 | return 1; |
744 | 0 | } |
745 | | |
746 | | /* start at root node */ |
747 | 0 | cur_node = tsk_getu32(fs->endian, hfs->catalog_header.rootNode); |
748 | | |
749 | | /* if the root node is zero, then the extents btree is empty */ |
750 | | /* if no files have overflow extents, the Extents B-tree still |
751 | | exists on disk, but is an empty B-tree containing only |
752 | | the header node */ |
753 | 0 | if (cur_node == 0) { |
754 | 0 | if (tsk_verbose) |
755 | 0 | tsk_fprintf(stderr, "hfs_cat_traverse: " |
756 | 0 | "empty extents btree\n"); |
757 | 0 | return 1; |
758 | 0 | } |
759 | | |
760 | 0 | if (tsk_verbose) |
761 | 0 | tsk_fprintf(stderr, "hfs_cat_traverse: starting at " |
762 | 0 | "root node %" PRIu32 "; nodesize = %" |
763 | 0 | PRIu16 "\n", cur_node, nodesize); |
764 | | |
765 | | /* Recurse down to the needed leaf nodes and then go forward */ |
766 | 0 | is_done = 0; |
767 | 0 | while (is_done == 0) { |
768 | 0 | TSK_OFF_T cur_off; /* start address of cur_node */ |
769 | 0 | uint16_t num_rec; /* number of records in this node */ |
770 | 0 | ssize_t cnt; |
771 | 0 | hfs_btree_node *node_desc; |
772 | | |
773 | | // sanity check |
774 | 0 | if (cur_node > tsk_getu32(fs->endian, |
775 | 0 | hfs->catalog_header.totalNodes)) { |
776 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
777 | 0 | tsk_error_set_errstr |
778 | 0 | ("hfs_cat_traverse: Node %d too large for file", cur_node); |
779 | 0 | return 1; |
780 | 0 | } |
781 | | |
782 | | // read the current node |
783 | 0 | cur_off = (TSK_OFF_T)cur_node * nodesize; |
784 | 0 | cnt = tsk_fs_attr_read(hfs->catalog_attr, cur_off, |
785 | 0 | node.get(), nodesize, TSK_FS_FILE_READ_FLAG_NONE); |
786 | 0 | if (cnt != nodesize) { |
787 | 0 | if (cnt >= 0) { |
788 | 0 | tsk_error_reset(); |
789 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
790 | 0 | } |
791 | 0 | tsk_error_set_errstr2 |
792 | 0 | ("hfs_cat_traverse: Error reading node %d at offset %" |
793 | 0 | PRIdOFF, cur_node, cur_off); |
794 | 0 | return 1; |
795 | 0 | } |
796 | | |
797 | | // process the header / descriptor |
798 | 0 | if (nodesize < sizeof(hfs_btree_node)) { |
799 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
800 | 0 | tsk_error_set_errstr |
801 | 0 | ("hfs_cat_traverse: Node size %d is too small to be valid", nodesize); |
802 | 0 | return 1; |
803 | 0 | } |
804 | 0 | node_desc = (hfs_btree_node *) node.get(); |
805 | 0 | num_rec = tsk_getu16(fs->endian, node_desc->num_rec); |
806 | |
|
807 | 0 | if (tsk_verbose) |
808 | 0 | tsk_fprintf(stderr, "hfs_cat_traverse: node %" PRIu32 |
809 | 0 | " @ %" PRIu64 " has %" PRIu16 " records\n", |
810 | 0 | cur_node, cur_off, num_rec); |
811 | |
|
812 | 0 | if (num_rec == 0) { |
813 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
814 | 0 | tsk_error_set_errstr("hfs_cat_traverse: zero records in node %" |
815 | 0 | PRIu32, cur_node); |
816 | 0 | return 1; |
817 | 0 | } |
818 | | |
819 | | /* With an index node, find the record with the largest key that is smaller |
820 | | * to or equal to cnid */ |
821 | 0 | if (node_desc->type == HFS_BT_NODE_TYPE_IDX) { |
822 | 0 | uint32_t next_node = 0; |
823 | 0 | int rec; |
824 | |
|
825 | 0 | for (rec = 0; rec < num_rec; ++rec) { |
826 | 0 | size_t rec_off; |
827 | 0 | hfs_btree_key_cat *key; |
828 | 0 | uint8_t retval; |
829 | 0 | size_t keylen; |
830 | | |
831 | | // Make sure node is large enough, note that (rec + 1) * 2 is an offset |
832 | | // relative to the end of node |
833 | 0 | if ((rec + 1) * 2 > (int) nodesize) { |
834 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
835 | 0 | tsk_error_set_errstr |
836 | 0 | ("hfs_cat_traverse: offset of record %d in leaf node %d too small (%" |
837 | 0 | PRIu16 ")", rec, cur_node, nodesize); |
838 | 0 | return 1; |
839 | 0 | } |
840 | | // get the record offset in the node |
841 | 0 | rec_off = |
842 | 0 | tsk_getu16(fs->endian, |
843 | 0 | &node[nodesize - (rec + 1) * 2]); |
844 | | |
845 | | // Need at least 2 bytes for key_len |
846 | 0 | if (rec_off + 2 >= nodesize) { |
847 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
848 | 0 | tsk_error_set_errstr |
849 | 0 | ("hfs_cat_traverse: offset of record %d in index node %d too large (%d vs %" |
850 | 0 | PRIu16 ")", rec, cur_node, (int) rec_off, |
851 | 0 | nodesize); |
852 | 0 | return 1; |
853 | 0 | } |
854 | | |
855 | 0 | key = (hfs_btree_key_cat *) & node[rec_off]; |
856 | 0 | keylen = 2 + tsk_getu16(hfs->fs_info.endian, key->key_len); |
857 | | |
858 | | // Want a key of at least 6 bytes, the size of the first 2 members of hfs_btree_key_cat |
859 | 0 | if ((keylen < 6) || (keylen > nodesize - rec_off)) { |
860 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
861 | 0 | tsk_error_set_errstr |
862 | 0 | ("hfs_cat_traverse: length of key %d in index node %d out of bounds (6 < %" PRIuSIZE " < %" |
863 | 0 | PRIuSIZE ")", rec, cur_node, keylen, nodesize - rec_off); |
864 | 0 | return 1; |
865 | 0 | } |
866 | | |
867 | | /* |
868 | | if (tsk_verbose) |
869 | | tsk_fprintf(stderr, |
870 | | "hfs_cat_traverse: record %" PRIu16 |
871 | | " ; keylen %" PRIu16 " (%" PRIu32 ")\n", rec, |
872 | | tsk_getu16(fs->endian, key->key_len), |
873 | | tsk_getu32(fs->endian, key->parent_cnid)); |
874 | | */ |
875 | | |
876 | | |
877 | | /* save the info from this record unless it is too big */ |
878 | 0 | retval = |
879 | 0 | a_cb(hfs, HFS_BT_NODE_TYPE_IDX, key, keylen, nodesize, |
880 | 0 | cur_off + rec_off, ptr); |
881 | 0 | if (retval == HFS_BTREE_CB_ERR) { |
882 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
883 | 0 | tsk_error_set_errstr2 |
884 | 0 | ("hfs_cat_traverse: Callback returned error"); |
885 | 0 | return 1; |
886 | 0 | } |
887 | | // record the closest entry |
888 | 0 | else if ((retval == HFS_BTREE_CB_IDX_LT) |
889 | 0 | || (next_node == 0)) { |
890 | 0 | hfs_btree_index_record *idx_rec; |
891 | 0 | size_t keylen = |
892 | 0 | 2 + hfs_get_idxkeylen(hfs, tsk_getu16(fs->endian, |
893 | 0 | key->key_len), &(hfs->catalog_header)); |
894 | 0 | if (keylen > nodesize - rec_off) { |
895 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
896 | 0 | tsk_error_set_errstr |
897 | 0 | ("hfs_cat_traverse: offset of record and keylength %d in index node %d too large (%" PRIuSIZE " vs %" |
898 | 0 | PRIu16 ")", rec, cur_node, |
899 | 0 | (int) rec_off + keylen, nodesize); |
900 | 0 | return 1; |
901 | 0 | } |
902 | 0 | if (sizeof(hfs_btree_index_record) > nodesize - rec_off - keylen) { |
903 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
904 | 0 | tsk_error_set_errstr("hfs_cat_traverse: truncated btree index record"); |
905 | 0 | return 1; |
906 | 0 | } |
907 | 0 | idx_rec = |
908 | 0 | (hfs_btree_index_record *) & node[rec_off + |
909 | 0 | keylen]; |
910 | 0 | next_node = tsk_getu32(fs->endian, idx_rec->childNode); |
911 | 0 | } |
912 | 0 | if (retval == HFS_BTREE_CB_IDX_EQGT) { |
913 | | // move down to the next node |
914 | 0 | break; |
915 | 0 | } |
916 | 0 | } |
917 | | // check if we found a relevant node |
918 | 0 | if (next_node == 0) { |
919 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
920 | 0 | tsk_error_set_errstr |
921 | 0 | ("hfs_cat_traverse: did not find any keys in index node %d", |
922 | 0 | cur_node); |
923 | 0 | is_done = 1; |
924 | 0 | break; |
925 | 0 | } |
926 | | // TODO: Handle multinode loops |
927 | 0 | if (next_node == cur_node) { |
928 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
929 | 0 | tsk_error_set_errstr |
930 | 0 | ("hfs_cat_traverse: node %d references itself as next node", |
931 | 0 | cur_node); |
932 | 0 | is_done = 1; |
933 | 0 | break; |
934 | 0 | } |
935 | 0 | cur_node = next_node; |
936 | 0 | } |
937 | | |
938 | | /* With a leaf, we look for the specific record. */ |
939 | 0 | else if (node_desc->type == HFS_BT_NODE_TYPE_LEAF) { |
940 | 0 | int rec; |
941 | |
|
942 | 0 | for (rec = 0; rec < num_rec; ++rec) { |
943 | 0 | size_t rec_off; |
944 | 0 | hfs_btree_key_cat *key; |
945 | 0 | uint8_t retval; |
946 | 0 | size_t keylen; |
947 | | |
948 | | // Make sure node is large enough, note that (rec + 1) * 2 is an offset |
949 | | // relative to the end of node |
950 | 0 | if ((rec + 1) * 2 > (int) nodesize) { |
951 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
952 | 0 | tsk_error_set_errstr |
953 | 0 | ("hfs_cat_traverse: offset of record %d in leaf node %d too small (%" |
954 | 0 | PRIu16 ")", rec, cur_node, nodesize); |
955 | 0 | return 1; |
956 | 0 | } |
957 | | // get the record offset in the node |
958 | 0 | rec_off = |
959 | 0 | tsk_getu16(fs->endian, |
960 | 0 | &node[nodesize - (rec + 1) * 2]); |
961 | | |
962 | | // Need at least 2 bytes for key_len |
963 | 0 | if (rec_off + 2 >= nodesize) { |
964 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
965 | 0 | tsk_error_set_errstr |
966 | 0 | ("hfs_cat_traverse: offset of record %d in leaf node %d too large (%d vs %" |
967 | 0 | PRIu16 ")", rec, cur_node, (int) rec_off, |
968 | 0 | nodesize); |
969 | 0 | return 1; |
970 | 0 | } |
971 | | |
972 | 0 | key = (hfs_btree_key_cat *) & node[rec_off]; |
973 | 0 | keylen = 2 + tsk_getu16(hfs->fs_info.endian, key->key_len); |
974 | | |
975 | | // Want a key of at least 6 bytes, the size of the first 2 members of hfs_btree_key_cat |
976 | 0 | if ((keylen < 6) || (keylen > nodesize - rec_off)) { |
977 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
978 | 0 | tsk_error_set_errstr |
979 | 0 | ("hfs_cat_traverse: length of key %d in leaf node %d out of bounds (6 < %" PRIuSIZE " < %" |
980 | 0 | PRIu16 ")", rec, cur_node, keylen, nodesize); |
981 | 0 | return 1; |
982 | 0 | } |
983 | | |
984 | | /* |
985 | | if (tsk_verbose) |
986 | | tsk_fprintf(stderr, |
987 | | "hfs_cat_traverse: record %" PRIu16 |
988 | | "; keylen %" PRIu16 " (%" PRIu32 ")\n", rec, |
989 | | tsk_getu16(fs->endian, key->key_len), |
990 | | tsk_getu32(fs->endian, key->parent_cnid)); |
991 | | */ |
992 | | // rec_cnid = tsk_getu32(fs->endian, key->file_id); |
993 | | |
994 | | // The nodesize passed to the callback should contain the available node |
995 | | // data size relative from the start of the key. |
996 | 0 | retval = |
997 | 0 | a_cb(hfs, HFS_BT_NODE_TYPE_LEAF, key, keylen, nodesize - rec_off, |
998 | 0 | cur_off + rec_off, ptr); |
999 | 0 | if (retval == HFS_BTREE_CB_LEAF_STOP) { |
1000 | 0 | is_done = 1; |
1001 | 0 | break; |
1002 | 0 | } |
1003 | 0 | else if (retval == HFS_BTREE_CB_ERR) { |
1004 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
1005 | 0 | tsk_error_set_errstr2 |
1006 | 0 | ("hfs_cat_traverse: Callback returned error"); |
1007 | 0 | return 1; |
1008 | 0 | } |
1009 | 0 | } |
1010 | | |
1011 | | // move right to the next node if we got this far |
1012 | 0 | if (is_done == 0) { |
1013 | 0 | cur_node = tsk_getu32(fs->endian, node_desc->flink); |
1014 | 0 | if (cur_node == 0) { |
1015 | 0 | is_done = 1; |
1016 | 0 | } |
1017 | 0 | if (tsk_verbose) |
1018 | 0 | tsk_fprintf(stderr, |
1019 | 0 | "hfs_cat_traverse: moving forward to next leaf"); |
1020 | 0 | } |
1021 | 0 | } |
1022 | 0 | else { |
1023 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
1024 | 0 | tsk_error_set_errstr("hfs_cat_traverse: btree node %" PRIu32 |
1025 | 0 | " (%" PRIu64 ") is neither index nor leaf (%" PRIu8 ")", |
1026 | 0 | cur_node, cur_off, node_desc->type); |
1027 | 0 | return 1; |
1028 | 0 | } |
1029 | 0 | } |
1030 | | |
1031 | 0 | return 0; |
1032 | 0 | } |
1033 | | |
1034 | | typedef struct { |
1035 | | const hfs_btree_key_cat *targ_key; |
1036 | | TSK_OFF_T off; |
1037 | | } HFS_CAT_GET_RECORD_OFFSET_DATA; |
1038 | | |
1039 | | static uint8_t |
1040 | | hfs_cat_get_record_offset_cb( |
1041 | | HFS_INFO * hfs, |
1042 | | int8_t level_type, |
1043 | | const hfs_btree_key_cat * cur_key, |
1044 | | int cur_keylen, |
1045 | | [[maybe_unused]] size_t node_size, |
1046 | | TSK_OFF_T key_off, |
1047 | | void *ptr) |
1048 | 0 | { |
1049 | 0 | HFS_CAT_GET_RECORD_OFFSET_DATA *offset_data = (HFS_CAT_GET_RECORD_OFFSET_DATA *)ptr; |
1050 | 0 | const hfs_btree_key_cat *targ_key = offset_data->targ_key; |
1051 | |
|
1052 | 0 | if (tsk_verbose) |
1053 | 0 | tsk_fprintf(stderr, |
1054 | 0 | "hfs_cat_get_record_offset_cb: %s node want: %" PRIu32 |
1055 | 0 | " vs have: %" PRIu32 "\n", |
1056 | 0 | (level_type == HFS_BT_NODE_TYPE_IDX) ? "Index" : "Leaf", |
1057 | 0 | tsk_getu32(hfs->fs_info.endian, targ_key->parent_cnid), |
1058 | 0 | tsk_getu32(hfs->fs_info.endian, cur_key->parent_cnid)); |
1059 | |
|
1060 | 0 | if (level_type == HFS_BT_NODE_TYPE_IDX) { |
1061 | 0 | int diff = hfs_cat_compare_keys(hfs, cur_key, cur_keylen, targ_key); |
1062 | 0 | if (diff < 0) |
1063 | 0 | return HFS_BTREE_CB_IDX_LT; |
1064 | 0 | else |
1065 | 0 | return HFS_BTREE_CB_IDX_EQGT; |
1066 | 0 | } |
1067 | 0 | else { |
1068 | 0 | int diff = hfs_cat_compare_keys(hfs, cur_key, cur_keylen, targ_key); |
1069 | | |
1070 | | // see if this record is for our file or if we passed the interesting entries |
1071 | 0 | if (diff < 0) { |
1072 | 0 | return HFS_BTREE_CB_LEAF_GO; |
1073 | 0 | } |
1074 | 0 | else if (diff == 0) { |
1075 | 0 | offset_data->off = |
1076 | 0 | key_off + 2 + tsk_getu16(hfs->fs_info.endian, |
1077 | 0 | cur_key->key_len); |
1078 | 0 | } |
1079 | 0 | return HFS_BTREE_CB_LEAF_STOP; |
1080 | 0 | } |
1081 | 0 | } |
1082 | | |
1083 | | |
1084 | | /** \internal |
1085 | | * Find the byte offset (from the start of the catalog file) to a record |
1086 | | * in the catalog file. |
1087 | | * @param hfs File System being analyzed |
1088 | | * @param needle Key to search for |
1089 | | * @returns Byte offset or 0 on error. 0 is also returned if catalog |
1090 | | * record was not found. Check tsk_errno to determine if error occurred. |
1091 | | */ |
1092 | | static TSK_OFF_T |
1093 | | hfs_cat_get_record_offset(HFS_INFO * hfs, const hfs_btree_key_cat * needle) |
1094 | 0 | { |
1095 | 0 | HFS_CAT_GET_RECORD_OFFSET_DATA offset_data; |
1096 | 0 | offset_data.off = 0; |
1097 | 0 | offset_data.targ_key = needle; |
1098 | 0 | if (hfs_cat_traverse(hfs, hfs_cat_get_record_offset_cb, &offset_data)) { |
1099 | 0 | return 0; |
1100 | 0 | } |
1101 | 0 | return offset_data.off; |
1102 | 0 | } |
1103 | | |
1104 | | |
1105 | | /** \internal |
1106 | | * Given a byte offset to a leaf record in teh catalog file, read the data as |
1107 | | * a thread record. This will zero the buffer and read in the size of the thread |
1108 | | * data. |
1109 | | * @param hfs File System |
1110 | | * @param off Byte offset of record in catalog file (not including key) |
1111 | | * @param thread [out] Buffer to write thread data into. |
1112 | | * @returns 0 on success, 1 on failure; sets up to error string 1 */ |
1113 | | uint8_t |
1114 | | hfs_cat_read_thread_record(HFS_INFO * hfs, TSK_OFF_T off, |
1115 | | hfs_thread * thread) |
1116 | 0 | { |
1117 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); |
1118 | 0 | uint16_t uni_len; |
1119 | 0 | ssize_t cnt; |
1120 | |
|
1121 | 0 | memset(thread, 0, sizeof(hfs_thread)); |
1122 | 0 | cnt = tsk_fs_attr_read(hfs->catalog_attr, off, (char *) thread, 10, TSK_FS_FILE_READ_FLAG_NONE); |
1123 | 0 | if (cnt != 10) { |
1124 | 0 | if (cnt >= 0) { |
1125 | 0 | tsk_error_reset(); |
1126 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
1127 | 0 | } |
1128 | 0 | tsk_error_set_errstr2 |
1129 | 0 | ("hfs_cat_read_thread_record: Error reading catalog offset %" |
1130 | 0 | PRIdOFF " (header)", off); |
1131 | 0 | return 1; |
1132 | 0 | } |
1133 | | |
1134 | 0 | if ((tsk_getu16(fs->endian, thread->rec_type) != HFS_FOLDER_THREAD) |
1135 | 0 | && (tsk_getu16(fs->endian, thread->rec_type) != HFS_FILE_THREAD)) { |
1136 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
1137 | 0 | tsk_error_set_errstr |
1138 | 0 | ("hfs_cat_read_thread_record: unexpected record type %" PRIu16, |
1139 | 0 | tsk_getu16(fs->endian, thread->rec_type)); |
1140 | 0 | return 1; |
1141 | 0 | } |
1142 | | |
1143 | 0 | uni_len = tsk_getu16(fs->endian, thread->name.length); |
1144 | |
|
1145 | 0 | if (uni_len > 255) { |
1146 | 0 | tsk_error_set_errno(TSK_ERR_FS_INODE_COR); |
1147 | 0 | tsk_error_set_errstr |
1148 | 0 | ("hfs_cat_read_thread_record: invalid string length (%" PRIu16 |
1149 | 0 | ")", uni_len); |
1150 | 0 | return 1; |
1151 | 0 | } |
1152 | | |
1153 | 0 | cnt = |
1154 | 0 | tsk_fs_attr_read(hfs->catalog_attr, off + 10, |
1155 | 0 | (char *) thread->name.unicode, uni_len * 2, TSK_FS_FILE_READ_FLAG_NONE); |
1156 | 0 | if (cnt != uni_len * 2) { |
1157 | 0 | if (cnt >= 0) { |
1158 | 0 | tsk_error_reset(); |
1159 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
1160 | 0 | } |
1161 | 0 | tsk_error_set_errstr2 |
1162 | 0 | ("hfs_cat_read_thread_record: Error reading catalog offset %" |
1163 | 0 | PRIdOFF " (name)", off + 10); |
1164 | 0 | return 1; |
1165 | 0 | } |
1166 | | |
1167 | 0 | return 0; |
1168 | 0 | } |
1169 | | |
1170 | | /** \internal |
1171 | | * Read a catalog record into a local data structure. This reads the |
1172 | | * correct amount, depending on if it is a file or folder. |
1173 | | * @param hfs File system being analyzed |
1174 | | * @param off Byte offset (in catalog file) of record (not including key) |
1175 | | * @param record [out] Structure to read data into |
1176 | | * @returns 1 on error |
1177 | | */ |
1178 | | uint8_t |
1179 | | hfs_cat_read_file_folder_record(HFS_INFO * hfs, TSK_OFF_T off, |
1180 | | hfs_file_folder * record) |
1181 | 0 | { |
1182 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); |
1183 | 0 | ssize_t cnt; |
1184 | 0 | char rec_type[2]; |
1185 | |
|
1186 | 0 | memset(record, 0, sizeof(hfs_file_folder)); |
1187 | |
|
1188 | 0 | cnt = tsk_fs_attr_read(hfs->catalog_attr, off, rec_type, 2, TSK_FS_FILE_READ_FLAG_NONE); |
1189 | 0 | if (cnt != 2) { |
1190 | 0 | if (cnt >= 0) { |
1191 | 0 | tsk_error_reset(); |
1192 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
1193 | 0 | } |
1194 | 0 | tsk_error_set_errstr2 |
1195 | 0 | ("hfs_cat_read_file_folder_record: Error reading record type from catalog offset %" |
1196 | 0 | PRIdOFF " (header)", off); |
1197 | 0 | return 1; |
1198 | 0 | } |
1199 | | |
1200 | 0 | if (tsk_getu16(fs->endian, rec_type) == HFS_FOLDER_RECORD) { |
1201 | 0 | cnt = |
1202 | 0 | tsk_fs_attr_read(hfs->catalog_attr, off, (char *) record, |
1203 | 0 | sizeof(hfs_folder), TSK_FS_FILE_READ_FLAG_NONE); |
1204 | 0 | if (cnt != sizeof(hfs_folder)) { |
1205 | 0 | if (cnt >= 0) { |
1206 | 0 | tsk_error_reset(); |
1207 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
1208 | 0 | } |
1209 | 0 | tsk_error_set_errstr2 |
1210 | 0 | ("hfs_cat_read_file_folder_record: Error reading catalog offset %" |
1211 | 0 | PRIdOFF " (folder)", off); |
1212 | 0 | return 1; |
1213 | 0 | } |
1214 | 0 | } |
1215 | 0 | else if (tsk_getu16(fs->endian, rec_type) == HFS_FILE_RECORD) { |
1216 | 0 | cnt = |
1217 | 0 | tsk_fs_attr_read(hfs->catalog_attr, off, (char *) record, |
1218 | 0 | sizeof(hfs_file), TSK_FS_FILE_READ_FLAG_NONE); |
1219 | 0 | if (cnt != sizeof(hfs_file)) { |
1220 | 0 | if (cnt >= 0) { |
1221 | 0 | tsk_error_reset(); |
1222 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
1223 | 0 | } |
1224 | 0 | tsk_error_set_errstr2 |
1225 | 0 | ("hfs_cat_read_file_folder_record: Error reading catalog offset %" |
1226 | 0 | PRIdOFF " (file)", off); |
1227 | 0 | return 1; |
1228 | 0 | } |
1229 | 0 | } |
1230 | 0 | else { |
1231 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
1232 | 0 | tsk_error_set_errstr |
1233 | 0 | ("hfs_cat_read_file_folder_record: unexpected record type %" |
1234 | 0 | PRIu16, tsk_getu16(fs->endian, rec_type)); |
1235 | 0 | return 1; |
1236 | 0 | } |
1237 | | |
1238 | 0 | return 0; |
1239 | 0 | } |
1240 | | |
1241 | | // hfs_lookup_hard_link appears to be unnecessary - it looks up the cnid |
1242 | | // by seeing if there's a file/dir with the standard hard link name plus |
1243 | | // linknum and returns the meta_addr. But this should always be the same as linknum, |
1244 | | // and is very slow when there are many hard links, so it shouldn't be used. |
1245 | | //static TSK_INUM_T |
1246 | | //hfs_lookup_hard_link(HFS_INFO * hfs, TSK_INUM_T linknum, |
1247 | | // unsigned char is_directory) |
1248 | | //{ |
1249 | | // char fBuff[30]; |
1250 | | // TSK_FS_DIR *mdir; |
1251 | | // size_t indx; |
1252 | | // TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs; |
1253 | | // |
1254 | | // memset(fBuff, 0, 30); |
1255 | | // |
1256 | | // if (is_directory) { |
1257 | | // |
1258 | | // tsk_take_lock(&(hfs->metadata_dir_cache_lock)); |
1259 | | // if (hfs->dir_meta_dir == NULL) { |
1260 | | // hfs->dir_meta_dir = |
1261 | | // tsk_fs_dir_open_meta(fs, hfs->meta_dir_inum); |
1262 | | // } |
1263 | | // tsk_release_lock(&(hfs->metadata_dir_cache_lock)); |
1264 | | // |
1265 | | // if (hfs->dir_meta_dir == NULL) { |
1266 | | // error_returned |
1267 | | // ("hfs_lookup_hard_link: could not open the dir metadata directory"); |
1268 | | // return 0; |
1269 | | // } |
1270 | | // else { |
1271 | | // mdir = hfs->dir_meta_dir; |
1272 | | // } |
1273 | | // snprintf(fBuff, 30, "dir_%" PRIuINUM, linknum); |
1274 | | // |
1275 | | // } |
1276 | | // else { |
1277 | | // |
1278 | | // tsk_take_lock(&(hfs->metadata_dir_cache_lock)); |
1279 | | // if (hfs->meta_dir == NULL) { |
1280 | | // hfs->meta_dir = tsk_fs_dir_open_meta(fs, hfs->meta_inum); |
1281 | | // } |
1282 | | // tsk_release_lock(&(hfs->metadata_dir_cache_lock)); |
1283 | | // |
1284 | | // if (hfs->meta_dir == NULL) { |
1285 | | // error_returned |
1286 | | // ("hfs_lookup_hard_link: could not open file metadata directory"); |
1287 | | // return 0; |
1288 | | // } |
1289 | | // else { |
1290 | | // mdir = hfs->meta_dir; |
1291 | | // } |
1292 | | // snprintf(fBuff, 30, "iNode%" PRIuINUM, linknum); |
1293 | | // } |
1294 | | // |
1295 | | // for (indx = 0; indx < tsk_fs_dir_getsize(mdir); ++indx) { |
1296 | | // if ((mdir->names != NULL) && mdir->names[indx].name && |
1297 | | // (fs->name_cmp(fs, mdir->names[indx].name, fBuff) == 0)) { |
1298 | | // // OK this is the one |
1299 | | // return mdir->names[indx].meta_addr; |
1300 | | // } |
1301 | | // } |
1302 | | // |
1303 | | // // OK, we did not find that linknum |
1304 | | // return 0; |
1305 | | //} |
1306 | | |
1307 | | /* |
1308 | | * Given a catalog entry, will test that entry to see if it is a hard link. |
1309 | | * If it is a hard link, the function returns the inum (or cnid) of the target file. |
1310 | | * If it is NOT a hard link, then then function returns the inum of the given entry. |
1311 | | * In both cases, the parameter is_error is set to zero. |
1312 | | * |
1313 | | * If an ERROR occurs, if it is a mild error, then is_error is set to 1, and the |
1314 | | * inum of the given entry is returned. This signals that hard link detection cannot |
1315 | | * be carried out. |
1316 | | * |
1317 | | * If the error is serious, then is_error is set to 2 or 3, depending on the kind of error, and |
1318 | | * the TSK error code is set, and the function returns zero. is_error==2 means that an error |
1319 | | * occurred in looking up the target file in the Catalog. is_error==3 means that the given |
1320 | | * entry appears to be a hard link, but the target file does not exist in the Catalog. |
1321 | | * |
1322 | | * @param hfs The file system |
1323 | | * @param entry The catalog entry to check |
1324 | | * @param is_error A Boolean that is returned indicating an error, or no error.\ |
1325 | | * @return The inum (or cnid) of the hard link target, or of the given catalog entry, or zero. |
1326 | | */ |
1327 | | TSK_INUM_T |
1328 | | hfs_follow_hard_link(HFS_INFO * hfs, hfs_file * cat, |
1329 | | unsigned char *is_error) |
1330 | 0 | { |
1331 | |
|
1332 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs; |
1333 | 0 | TSK_INUM_T cnid; |
1334 | 0 | time_t crtime; |
1335 | 0 | uint32_t file_type; |
1336 | 0 | uint32_t file_creator; |
1337 | |
|
1338 | 0 | *is_error = 0; // default, not an error |
1339 | |
|
1340 | 0 | if (cat == NULL) { |
1341 | 0 | error_detected(TSK_ERR_FS_ARG, |
1342 | 0 | "hfs_follow_hard_link: Pointer to Catalog entry (2nd arg) is null"); |
1343 | 0 | return 0; |
1344 | 0 | } |
1345 | | |
1346 | 0 | cnid = tsk_getu32(fs->endian, cat->std.cnid); |
1347 | |
|
1348 | 0 | if (cnid < HFS_FIRST_USER_CNID) { |
1349 | | // Can't be a hard link. And, cannot look up in Catalog file either! |
1350 | 0 | return cnid; |
1351 | 0 | } |
1352 | | |
1353 | 0 | crtime = |
1354 | 0 | (time_t) hfs_convert_2_unix_time(tsk_getu32(fs->endian, |
1355 | 0 | cat->std.crtime)); |
1356 | | |
1357 | |
|
1358 | 0 | file_type = tsk_getu32(fs->endian, cat->std.u_info.file_type); |
1359 | 0 | file_creator = tsk_getu32(fs->endian, cat->std.u_info.file_cr); |
1360 | | |
1361 | | // Only proceed with the rest of this if the flags etc are right |
1362 | 0 | if (file_type == HFS_HARDLINK_FILE_TYPE |
1363 | 0 | && file_creator == HFS_HARDLINK_FILE_CREATOR) { |
1364 | | |
1365 | | // see if we have the HFS+ Private Data dir for file links; |
1366 | | // if not, it can't be a hard link. (We could warn the user, but |
1367 | | // we also rely on this when finding the HFS+ Private Data dir in |
1368 | | // the first place and we don't want a warning on every hfs_open.) |
1369 | 0 | if (hfs->meta_inum == 0) |
1370 | 0 | return cnid; |
1371 | | |
1372 | | // For this to work, we need the FS creation times. Is at least one of these set? |
1373 | 0 | if ((!hfs->has_root_crtime) && (!hfs->has_meta_dir_crtime) |
1374 | 0 | && (!hfs->has_meta_crtime)) { |
1375 | 0 | uint32_t linkNum = |
1376 | 0 | tsk_getu32(fs->endian, cat->std.perm.special.inum); |
1377 | 0 | *is_error = 1; |
1378 | 0 | if (tsk_verbose) |
1379 | 0 | tsk_fprintf(stderr, |
1380 | 0 | "WARNING: hfs_follow_hard_link: File system creation times are not set. " |
1381 | 0 | "Cannot test inode for hard link. File type and creator indicate that this" |
1382 | 0 | " is a hard link (file), with LINK ID = %" PRIu32 "\n", |
1383 | 0 | linkNum); |
1384 | 0 | return cnid; |
1385 | 0 | } |
1386 | | |
1387 | 0 | if ((!hfs->has_root_crtime) || (!hfs->has_meta_crtime)) { |
1388 | 0 | if (tsk_verbose) |
1389 | 0 | tsk_fprintf(stderr, |
1390 | 0 | "WARNING: hfs_follow_hard_link: Either the root folder or the" |
1391 | 0 | " file metadata folder is not accessible. Testing this potential hard link" |
1392 | 0 | " may be impaired.\n"); |
1393 | 0 | } |
1394 | | |
1395 | | // Now we need to check the creation time against the three FS creation times |
1396 | 0 | if ((hfs->has_meta_crtime && (crtime == hfs->meta_crtime)) || |
1397 | 0 | (hfs->has_meta_dir_crtime && (crtime == hfs->metadir_crtime)) |
1398 | 0 | || (hfs->has_root_crtime && (crtime == hfs->root_crtime))) { |
1399 | | // OK, this is a hard link to a file. |
1400 | 0 | uint32_t linkNum = |
1401 | 0 | tsk_getu32(fs->endian, cat->std.perm.special.inum); |
1402 | | |
1403 | | // We used to resolve this ID to a file in X folder using hfs_lookup_hard_link, but found |
1404 | | // that it was very ineffecient and always resulted in the same linkNum value. |
1405 | | // We now just use linkNum |
1406 | 0 | return linkNum; |
1407 | 0 | } |
1408 | 0 | } |
1409 | 0 | else if (file_type == HFS_LINKDIR_FILE_TYPE |
1410 | 0 | && file_creator == HFS_LINKDIR_FILE_CREATOR) { |
1411 | | |
1412 | | // see if we have the HFS+ Private Directory Data dir for links; |
1413 | | // if not, it can't be a hard link. (We could warn the user, but |
1414 | | // we also rely on this when finding the HFS+ Private Directory Data dir in |
1415 | | // the first place and we don't want a warning on every hfs_open.) |
1416 | 0 | if (hfs->meta_dir_inum == 0) |
1417 | 0 | return cnid; |
1418 | | |
1419 | | // For this to work, we need the FS creation times. Is at least one of these set? |
1420 | 0 | if ((!hfs->has_root_crtime) && (!hfs->has_meta_dir_crtime) |
1421 | 0 | && (!hfs->has_meta_crtime)) { |
1422 | 0 | uint32_t linkNum = |
1423 | 0 | tsk_getu32(fs->endian, cat->std.perm.special.inum); |
1424 | 0 | *is_error = 1; |
1425 | |
|
1426 | 0 | if (tsk_verbose) |
1427 | 0 | tsk_fprintf(stderr, |
1428 | 0 | "WARNING: hfs_follow_hard_link: File system creation times are not set. " |
1429 | 0 | "Cannot test inode for hard link. File type and creator indicate that this" |
1430 | 0 | " is a hard link (directory), with LINK ID = %" PRIu32 |
1431 | 0 | "\n", linkNum); |
1432 | 0 | return cnid; |
1433 | 0 | } |
1434 | | |
1435 | 0 | if ((!hfs->has_root_crtime) || (!hfs->has_meta_crtime) |
1436 | 0 | || (!hfs->has_meta_dir_crtime)) { |
1437 | 0 | if (tsk_verbose) |
1438 | 0 | tsk_fprintf(stderr, |
1439 | 0 | "WARNING: hfs_follow_hard_link: Either the root folder or the" |
1440 | 0 | " file metadata folder or the directory metatdata folder is" |
1441 | 0 | " not accessible. Testing this potential hard linked folder " |
1442 | 0 | "may be impaired.\n"); |
1443 | 0 | } |
1444 | | |
1445 | | // Now we need to check the creation time against the three FS creation times |
1446 | 0 | if ((hfs->has_meta_crtime && (crtime == hfs->meta_crtime)) || |
1447 | 0 | (hfs->has_meta_dir_crtime && (crtime == hfs->metadir_crtime)) |
1448 | 0 | || (hfs->has_root_crtime && (crtime == hfs->root_crtime))) { |
1449 | | // OK, this is a hard link to a directory. |
1450 | 0 | uint32_t linkNum = |
1451 | 0 | tsk_getu32(fs->endian, cat->std.perm.special.inum); |
1452 | | |
1453 | | // We used to resolve this ID to a file in X folder using hfs_lookup_hard_link, but found |
1454 | | // that it was very ineffecient and always resulted in the same linkNum value. |
1455 | | // We now just use linkNum |
1456 | 0 | return linkNum; |
1457 | 0 | } |
1458 | 0 | } |
1459 | | |
1460 | | // It cannot be a hard link (file or directory) |
1461 | 0 | return cnid; |
1462 | 0 | } |
1463 | | |
1464 | | |
1465 | | /** \internal |
1466 | | * Lookup an entry in the catalog file and save it into the entry. Do not |
1467 | | * call this for the special files that do not have an entry in the catalog. |
1468 | | * data structure. |
1469 | | * @param hfs File system being analyzed |
1470 | | * @param inum Address (cnid) of file to open |
1471 | | * @param entry [out] Structure to read data into |
1472 | | * @returns 1 on error or not found, 0 on success. Check tsk_errno |
1473 | | * to differentiate between error and not found. If it is not found, then the |
1474 | | * errno will be TSK_ERR_FS_INODE_NUM. Else, it will be some other value. |
1475 | | */ |
1476 | | uint8_t |
1477 | | hfs_cat_file_lookup(HFS_INFO * hfs, TSK_INUM_T inum, HFS_ENTRY * entry, |
1478 | | unsigned char follow_hard_link) |
1479 | 0 | { |
1480 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); |
1481 | 0 | hfs_btree_key_cat key; /* current catalog key */ |
1482 | 0 | hfs_thread thread; /* thread record */ |
1483 | 0 | hfs_file_folder record; /* file/folder record */ |
1484 | 0 | TSK_OFF_T off; |
1485 | |
|
1486 | 0 | tsk_error_reset(); |
1487 | |
|
1488 | 0 | if (tsk_verbose) |
1489 | 0 | tsk_fprintf(stderr, |
1490 | 0 | "hfs_cat_file_lookup: called for inum %" PRIuINUM "\n", inum); |
1491 | | |
1492 | | // Test if this is a special file that is not located in the catalog |
1493 | 0 | if ((inum == HFS_EXTENTS_FILE_ID) || |
1494 | 0 | (inum == HFS_CATALOG_FILE_ID) || |
1495 | 0 | (inum == HFS_ALLOCATION_FILE_ID) || |
1496 | 0 | (inum == HFS_STARTUP_FILE_ID) || |
1497 | 0 | (inum == HFS_ATTRIBUTES_FILE_ID)) { |
1498 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
1499 | 0 | tsk_error_set_errstr |
1500 | 0 | ("hfs_cat_file_lookup: Called on special file: %" PRIuINUM, |
1501 | 0 | inum); |
1502 | 0 | return 1; |
1503 | 0 | } |
1504 | | |
1505 | | |
1506 | | /* first look up the thread record for the item we're searching for */ |
1507 | | |
1508 | | /* set up the thread record key */ |
1509 | 0 | memset((char *) &key, 0, sizeof(hfs_btree_key_cat)); |
1510 | 0 | cnid_to_array((uint32_t) inum, key.parent_cnid); |
1511 | |
|
1512 | 0 | if (tsk_verbose) |
1513 | 0 | tsk_fprintf(stderr, |
1514 | 0 | "hfs_cat_file_lookup: Looking up thread record (%" PRIuINUM |
1515 | 0 | ")\n", inum); |
1516 | | |
1517 | | /* look up the thread record */ |
1518 | 0 | off = hfs_cat_get_record_offset(hfs, &key); |
1519 | 0 | if (off == 0) { |
1520 | | // no parsing error, just not found |
1521 | 0 | if (tsk_error_get_errno() == 0) { |
1522 | 0 | tsk_error_set_errno(TSK_ERR_FS_INODE_NUM); |
1523 | 0 | tsk_error_set_errstr |
1524 | 0 | ("hfs_cat_file_lookup: Error finding thread node for file (%" |
1525 | 0 | PRIuINUM ")", inum); |
1526 | 0 | } |
1527 | 0 | else { |
1528 | 0 | tsk_error_set_errstr2 |
1529 | 0 | (" hfs_cat_file_lookup: thread for file (%" PRIuINUM ")", |
1530 | 0 | inum); |
1531 | 0 | } |
1532 | 0 | return 1; |
1533 | 0 | } |
1534 | | |
1535 | | /* read the thread record */ |
1536 | 0 | if (hfs_cat_read_thread_record(hfs, off, &thread)) { |
1537 | 0 | tsk_error_set_errstr2(" hfs_cat_file_lookup: file (%" PRIuINUM ")", |
1538 | 0 | inum); |
1539 | 0 | return 1; |
1540 | 0 | } |
1541 | | |
1542 | | /* now look up the actual file/folder record */ |
1543 | | |
1544 | | /* build key */ |
1545 | 0 | memset((char *) &key, 0, sizeof(hfs_btree_key_cat)); |
1546 | 0 | memcpy((char *) key.parent_cnid, (char *) thread.parent_cnid, |
1547 | 0 | sizeof(key.parent_cnid)); |
1548 | 0 | memcpy((char *) &key.name, (char *) &thread.name, sizeof(key.name)); |
1549 | |
|
1550 | 0 | if (tsk_verbose) |
1551 | 0 | tsk_fprintf(stderr, |
1552 | 0 | "hfs_cat_file_lookup: Looking up file record (parent: %" |
1553 | 0 | PRIuINUM ")\n", (uint64_t) tsk_getu32(fs->endian, |
1554 | 0 | key.parent_cnid)); |
1555 | | |
1556 | | /* look up the record */ |
1557 | 0 | off = hfs_cat_get_record_offset(hfs, &key); |
1558 | 0 | if (off == 0) { |
1559 | | // no parsing error, just not found |
1560 | 0 | if (tsk_error_get_errno() == 0) { |
1561 | 0 | tsk_error_set_errno(TSK_ERR_FS_INODE_NUM); |
1562 | 0 | tsk_error_set_errstr |
1563 | 0 | ("hfs_cat_file_lookup: Error finding record node %" |
1564 | 0 | PRIuINUM, inum); |
1565 | 0 | } |
1566 | 0 | else { |
1567 | 0 | tsk_error_set_errstr2(" hfs_cat_file_lookup: file (%" PRIuINUM |
1568 | 0 | ")", inum); |
1569 | 0 | } |
1570 | 0 | return 1; |
1571 | 0 | } |
1572 | | |
1573 | | /* read the record */ |
1574 | 0 | if (hfs_cat_read_file_folder_record(hfs, off, &record)) { |
1575 | 0 | tsk_error_set_errstr2(" hfs_cat_file_lookup: file (%" PRIuINUM ")", |
1576 | 0 | inum); |
1577 | 0 | return 1; |
1578 | 0 | } |
1579 | | |
1580 | | /* these memcpy can be gotten rid of, really */ |
1581 | 0 | if (tsk_getu16(fs->endian, |
1582 | 0 | record.file.std.rec_type) == HFS_FOLDER_RECORD) { |
1583 | 0 | if (tsk_verbose) |
1584 | 0 | tsk_fprintf(stderr, |
1585 | 0 | "hfs_cat_file_lookup: found folder record valence %" PRIu32 |
1586 | 0 | ", cnid %" PRIu32 "\n", tsk_getu32(fs->endian, |
1587 | 0 | record.folder.std.valence), tsk_getu32(fs->endian, |
1588 | 0 | record.folder.std.cnid)); |
1589 | 0 | memcpy((char *) &entry->cat, (char *) &record, sizeof(hfs_folder)); |
1590 | 0 | } |
1591 | 0 | else if (tsk_getu16(fs->endian, |
1592 | 0 | record.file.std.rec_type) == HFS_FILE_RECORD) { |
1593 | 0 | if (tsk_verbose) |
1594 | 0 | tsk_fprintf(stderr, |
1595 | 0 | "hfs_cat_file_lookup: found file record cnid %" PRIu32 |
1596 | 0 | "\n", tsk_getu32(fs->endian, record.file.std.cnid)); |
1597 | 0 | memcpy((char *) &entry->cat, (char *) &record, sizeof(hfs_file)); |
1598 | 0 | } |
1599 | | /* other cases already caught by hfs_cat_read_file_folder_record */ |
1600 | |
|
1601 | 0 | memcpy((char *) &entry->thread, (char *) &thread, sizeof(hfs_thread)); |
1602 | |
|
1603 | 0 | entry->flags = TSK_FS_META_FLAG_ALLOC | TSK_FS_META_FLAG_USED; |
1604 | 0 | entry->inum = inum; |
1605 | |
|
1606 | 0 | if (follow_hard_link) { |
1607 | | // TEST to see if this is a hard link |
1608 | 0 | unsigned char is_err; |
1609 | 0 | TSK_INUM_T target_cnid = |
1610 | 0 | hfs_follow_hard_link(hfs, &(entry->cat), &is_err); |
1611 | 0 | if (is_err > 1) { |
1612 | 0 | error_returned |
1613 | 0 | ("hfs_cat_file_lookup: error occurred while following a possible hard link for " |
1614 | 0 | "inum (cnid) = %" PRIuINUM, inum); |
1615 | 0 | return 1; |
1616 | 0 | } |
1617 | 0 | if (target_cnid != inum) { |
1618 | | // This is a hard link, and we have got the cnid of the target file, so look it up. |
1619 | 0 | uint8_t res = |
1620 | 0 | hfs_cat_file_lookup(hfs, target_cnid, entry, FALSE); |
1621 | 0 | if (res != 0) { |
1622 | 0 | error_returned |
1623 | 0 | ("hfs_cat_file_lookup: error occurred while looking up the Catalog entry for " |
1624 | 0 | "the target of inum (cnid) = %" PRIuINUM " target", |
1625 | 0 | inum); |
1626 | 0 | } |
1627 | 0 | return 1; |
1628 | 0 | } |
1629 | | |
1630 | | // Target is NOT a hard link, so fall through to the non-hard link exit. |
1631 | 0 | } |
1632 | | |
1633 | 0 | if (tsk_verbose) |
1634 | 0 | tsk_fprintf(stderr, "hfs_cat_file_lookup exiting\n"); |
1635 | 0 | return 0; |
1636 | 0 | } |
1637 | | |
1638 | | |
1639 | | static uint8_t |
1640 | | hfs_find_highest_inum_cb( |
1641 | | HFS_INFO * hfs, |
1642 | | [[maybe_unused]] int8_t level_type, |
1643 | | const hfs_btree_key_cat * cur_key, |
1644 | | int cur_keylen, |
1645 | | [[maybe_unused]] size_t node_size, |
1646 | | [[maybe_unused]] TSK_OFF_T key_off, |
1647 | | void *ptr) |
1648 | 0 | { |
1649 | 0 | if (cur_keylen < 6) { |
1650 | | // Note that it would be better to return an error value here |
1651 | | // but the current function interface does not support this |
1652 | | // Also see issue #2365 |
1653 | 0 | return -1; |
1654 | 0 | } |
1655 | | // NOTE: This assumes that the biggest inum is the last one that we |
1656 | | // see. the traverse method does not currently promise that as part of |
1657 | | // its callback "contract". |
1658 | 0 | *((TSK_INUM_T*) ptr) = tsk_getu32(hfs->fs_info.endian, cur_key->parent_cnid); |
1659 | 0 | return HFS_BTREE_CB_IDX_LT; |
1660 | 0 | } |
1661 | | |
1662 | | /** \internal |
1663 | | * Returns the largest inode number in file system |
1664 | | * @param hfs File system being analyzed |
1665 | | * @returns largest metadata address |
1666 | | */ |
1667 | | static TSK_INUM_T |
1668 | | hfs_find_highest_inum(HFS_INFO * hfs) |
1669 | 0 | { |
1670 | | // @@@ get actual number from Catalog file (go to far right) (we can't always trust the vol header) |
1671 | 0 | TSK_INUM_T inum; |
1672 | 0 | if (hfs_cat_traverse(hfs, hfs_find_highest_inum_cb, &inum)) { |
1673 | | /* Catalog traversal failed, fallback on legacy method : |
1674 | | if HFS_VH_ATTR_CNIDS_REUSED is set, then |
1675 | | the maximum CNID is 2^32-1; if it's not set, then nextCatalogId is |
1676 | | supposed to be larger than all CNIDs on disk. |
1677 | | */ |
1678 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); |
1679 | 0 | if (tsk_getu32(fs->endian, hfs->fs->attr) & HFS_VH_ATTR_CNIDS_REUSED) |
1680 | 0 | return (TSK_INUM_T) 0xffffffff; |
1681 | 0 | else |
1682 | 0 | return (TSK_INUM_T) tsk_getu32(fs->endian, |
1683 | 0 | hfs->fs->next_cat_id) - 1; |
1684 | 0 | } |
1685 | 0 | return inum; |
1686 | 0 | } |
1687 | | |
1688 | | |
1689 | | static TSK_FS_META_MODE_ENUM |
1690 | | hfs_mode_to_tsk_mode(uint16_t a_mode) |
1691 | 0 | { |
1692 | 0 | TSK_FS_META_MODE_ENUM mode = TSK_FS_META_MODE_UNSPECIFIED; |
1693 | |
|
1694 | 0 | if (a_mode & HFS_IN_ISUID) |
1695 | 0 | mode = (TSK_FS_META_MODE_ENUM) (mode | TSK_FS_META_MODE_ISUID); |
1696 | 0 | if (a_mode & HFS_IN_ISGID) |
1697 | 0 | mode = (TSK_FS_META_MODE_ENUM) (mode | TSK_FS_META_MODE_ISGID); |
1698 | 0 | if (a_mode & HFS_IN_ISVTX) |
1699 | 0 | mode = (TSK_FS_META_MODE_ENUM) (mode | TSK_FS_META_MODE_ISVTX); |
1700 | |
|
1701 | 0 | if (a_mode & HFS_IN_IRUSR) |
1702 | 0 | mode = (TSK_FS_META_MODE_ENUM) (mode | TSK_FS_META_MODE_IRUSR); |
1703 | 0 | if (a_mode & HFS_IN_IWUSR) |
1704 | 0 | mode = (TSK_FS_META_MODE_ENUM) (mode | TSK_FS_META_MODE_IWUSR); |
1705 | 0 | if (a_mode & HFS_IN_IXUSR) |
1706 | 0 | mode = (TSK_FS_META_MODE_ENUM) (mode | TSK_FS_META_MODE_IXUSR); |
1707 | |
|
1708 | 0 | if (a_mode & HFS_IN_IRGRP) |
1709 | 0 | mode = (TSK_FS_META_MODE_ENUM) (mode | TSK_FS_META_MODE_IRGRP); |
1710 | 0 | if (a_mode & HFS_IN_IWGRP) |
1711 | 0 | mode = (TSK_FS_META_MODE_ENUM) (mode | TSK_FS_META_MODE_IWGRP); |
1712 | 0 | if (a_mode & HFS_IN_IXGRP) |
1713 | 0 | mode = (TSK_FS_META_MODE_ENUM) (mode | TSK_FS_META_MODE_IXGRP); |
1714 | |
|
1715 | 0 | if (a_mode & HFS_IN_IROTH) |
1716 | 0 | mode = (TSK_FS_META_MODE_ENUM) (mode | TSK_FS_META_MODE_IROTH); |
1717 | 0 | if (a_mode & HFS_IN_IWOTH) |
1718 | 0 | mode = (TSK_FS_META_MODE_ENUM) (mode | TSK_FS_META_MODE_IWOTH); |
1719 | 0 | if (a_mode & HFS_IN_IXOTH) |
1720 | 0 | mode = (TSK_FS_META_MODE_ENUM) (mode | TSK_FS_META_MODE_IXOTH); |
1721 | |
|
1722 | 0 | return mode; |
1723 | 0 | } |
1724 | | |
1725 | | static TSK_FS_META_TYPE_ENUM |
1726 | | hfs_mode_to_tsk_meta_type(uint16_t a_mode) |
1727 | 0 | { |
1728 | 0 | switch (a_mode & HFS_IN_IFMT) { |
1729 | 0 | case HFS_IN_IFIFO: |
1730 | 0 | return TSK_FS_META_TYPE_FIFO; |
1731 | 0 | case HFS_IN_IFCHR: |
1732 | 0 | return TSK_FS_META_TYPE_CHR; |
1733 | 0 | case HFS_IN_IFDIR: |
1734 | 0 | return TSK_FS_META_TYPE_DIR; |
1735 | 0 | case HFS_IN_IFBLK: |
1736 | 0 | return TSK_FS_META_TYPE_BLK; |
1737 | 0 | case HFS_IN_IFREG: |
1738 | 0 | return TSK_FS_META_TYPE_REG; |
1739 | 0 | case HFS_IN_IFLNK: |
1740 | 0 | return TSK_FS_META_TYPE_LNK; |
1741 | 0 | case HFS_IN_IFSOCK: |
1742 | 0 | return TSK_FS_META_TYPE_SOCK; |
1743 | 0 | case HFS_IFWHT: |
1744 | 0 | return TSK_FS_META_TYPE_WHT; |
1745 | 0 | case HFS_IFXATTR: |
1746 | 0 | return TSK_FS_META_TYPE_UNDEF; |
1747 | 0 | default: |
1748 | | /* error */ |
1749 | 0 | return TSK_FS_META_TYPE_UNDEF; |
1750 | 0 | } |
1751 | 0 | } |
1752 | | |
1753 | | |
1754 | | static uint8_t |
1755 | | hfs_make_specialbase(TSK_FS_FILE * fs_file) |
1756 | 0 | { |
1757 | 0 | fs_file->meta->type = TSK_FS_META_TYPE_REG; |
1758 | 0 | fs_file->meta->mode = TSK_FS_META_MODE_UNSPECIFIED; |
1759 | 0 | fs_file->meta->nlink = 1; |
1760 | 0 | fs_file->meta->flags = (TSK_FS_META_FLAG_ENUM) |
1761 | 0 | (TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_ALLOC); |
1762 | 0 | fs_file->meta->uid = fs_file->meta->gid = 0; |
1763 | 0 | fs_file->meta->mtime = fs_file->meta->atime = fs_file->meta->ctime = |
1764 | 0 | fs_file->meta->crtime = 0; |
1765 | 0 | fs_file->meta->mtime_nano = fs_file->meta->atime_nano = |
1766 | 0 | fs_file->meta->ctime_nano = fs_file->meta->crtime_nano = 0; |
1767 | |
|
1768 | 0 | if (fs_file->meta->name2 == NULL) { |
1769 | 0 | if ((fs_file->meta->name2 = (TSK_FS_META_NAME_LIST *) |
1770 | 0 | tsk_malloc(sizeof(TSK_FS_META_NAME_LIST))) == NULL) { |
1771 | 0 | error_returned |
1772 | 0 | (" - hfs_make_specialbase, couldn't malloc space for a name list"); |
1773 | 0 | return 1; |
1774 | 0 | } |
1775 | 0 | fs_file->meta->name2->next = NULL; |
1776 | 0 | } |
1777 | | |
1778 | 0 | if (fs_file->meta->attr != NULL) { |
1779 | 0 | tsk_fs_attrlist_markunused(fs_file->meta->attr); |
1780 | 0 | } |
1781 | 0 | else { |
1782 | 0 | fs_file->meta->attr = tsk_fs_attrlist_alloc(); |
1783 | 0 | } |
1784 | 0 | return 0; |
1785 | 0 | } |
1786 | | |
1787 | | /** |
1788 | | * \internal |
1789 | | * Create an FS_INODE structure for the catalog file. |
1790 | | * |
1791 | | * @param hfs File system to analyze |
1792 | | * @param fs_file Structure to copy file information into. |
1793 | | * @return 1 on error and 0 on success |
1794 | | */ |
1795 | | static uint8_t |
1796 | | hfs_make_catalog(HFS_INFO * hfs, TSK_FS_FILE * fs_file) |
1797 | 0 | { |
1798 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs; |
1799 | 0 | TSK_FS_ATTR *fs_attr; |
1800 | 0 | TSK_FS_ATTR_RUN *attr_run; |
1801 | 0 | unsigned char dummy1, dummy2; |
1802 | 0 | uint64_t dummy3; |
1803 | 0 | uint8_t result; |
1804 | |
|
1805 | 0 | if (tsk_verbose) |
1806 | 0 | tsk_fprintf(stderr, |
1807 | 0 | "hfs_make_catalog: Making virtual catalog file\n"); |
1808 | |
|
1809 | 0 | if (hfs_make_specialbase(fs_file)) { |
1810 | 0 | error_returned(" - hfs_make_catalog"); |
1811 | 0 | return 1; |
1812 | 0 | } |
1813 | | |
1814 | 0 | fs_file->meta->addr = HFS_CATALOG_FILE_ID; |
1815 | 0 | strncpy(fs_file->meta->name2->name, HFS_CATALOGNAME, |
1816 | 0 | TSK_FS_META_NAME_LIST_NSIZE); |
1817 | |
|
1818 | 0 | fs_file->meta->size = |
1819 | 0 | tsk_getu64(fs->endian, hfs->fs->cat_file.logic_sz); |
1820 | | |
1821 | | |
1822 | | // convert the runs in the volume header to attribute runs |
1823 | 0 | if (((attr_run = |
1824 | 0 | hfs_extents_to_attr(fs, hfs->fs->cat_file.extents, |
1825 | 0 | 0)) == NULL) && (tsk_error_get_errno() != 0)) { |
1826 | 0 | error_returned(" - hfs_make_catalog"); |
1827 | 0 | return 1; |
1828 | 0 | } |
1829 | | |
1830 | 0 | if ((fs_attr = |
1831 | 0 | tsk_fs_attrlist_getnew(fs_file->meta->attr, |
1832 | 0 | TSK_FS_ATTR_NONRES)) == NULL) { |
1833 | 0 | error_returned(" - hfs_make_catalog"); |
1834 | 0 | tsk_fs_attr_run_free(attr_run); |
1835 | 0 | return 1; |
1836 | 0 | } |
1837 | | |
1838 | | // initialize the data run |
1839 | 0 | if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL, |
1840 | 0 | TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA, |
1841 | 0 | tsk_getu64(fs->endian, hfs->fs->cat_file.logic_sz), |
1842 | 0 | tsk_getu64(fs->endian, hfs->fs->cat_file.logic_sz), |
1843 | 0 | tsk_getu64(fs->endian, hfs->fs->cat_file.logic_sz), TSK_FS_ATTR_FLAG_NONE, 0)) { |
1844 | 0 | error_returned(" - hfs_make_catalog"); |
1845 | 0 | tsk_fs_attr_run_free(attr_run); |
1846 | 0 | return 1; |
1847 | 0 | } |
1848 | | |
1849 | | // see if catalog file has additional runs |
1850 | 0 | if (hfs_ext_find_extent_record_attr(hfs, HFS_CATALOG_FILE_ID, fs_attr, |
1851 | 0 | TRUE)) { |
1852 | 0 | error_returned(" - hfs_make_catalog"); |
1853 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; |
1854 | 0 | return 1; |
1855 | 0 | } |
1856 | | |
1857 | 0 | result = hfs_load_extended_attrs(fs_file, &dummy1, &dummy2, &dummy3); |
1858 | 0 | if (result != 0) { |
1859 | 0 | if (tsk_verbose) |
1860 | 0 | tsk_fprintf(stderr, |
1861 | 0 | "WARNING: Extended attributes failed to load for the Catalog file.\n"); |
1862 | 0 | tsk_error_reset(); |
1863 | 0 | } |
1864 | |
|
1865 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED; |
1866 | 0 | return 0; |
1867 | 0 | } |
1868 | | |
1869 | | /** |
1870 | | * \internal |
1871 | | * Create an FS_FILE for the extents file |
1872 | | * |
1873 | | * @param hfs File system to analyze |
1874 | | * @param fs_file Structure to copy file information into. |
1875 | | * @return 1 on error and 0 on success |
1876 | | */ |
1877 | | static uint8_t |
1878 | | hfs_make_extents(HFS_INFO * hfs, TSK_FS_FILE * fs_file) |
1879 | 0 | { |
1880 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs; |
1881 | 0 | TSK_FS_ATTR *fs_attr; |
1882 | 0 | TSK_FS_ATTR_RUN *attr_run; |
1883 | |
|
1884 | 0 | if (tsk_verbose) |
1885 | 0 | tsk_fprintf(stderr, |
1886 | 0 | "hfs_make_extents: Making virtual extents file\n"); |
1887 | |
|
1888 | 0 | if (hfs_make_specialbase(fs_file)) { |
1889 | 0 | error_returned(" - hfs_make_extents"); |
1890 | 0 | return 1; |
1891 | 0 | } |
1892 | | |
1893 | 0 | fs_file->meta->addr = HFS_EXTENTS_FILE_ID; |
1894 | 0 | strncpy(fs_file->meta->name2->name, HFS_EXTENTSNAME, |
1895 | 0 | TSK_FS_META_NAME_LIST_NSIZE); |
1896 | |
|
1897 | 0 | fs_file->meta->size = |
1898 | 0 | tsk_getu64(fs->endian, hfs->fs->ext_file.logic_sz); |
1899 | | |
1900 | |
|
1901 | 0 | if (((attr_run = |
1902 | 0 | hfs_extents_to_attr(fs, hfs->fs->ext_file.extents, |
1903 | 0 | 0)) == NULL) && (tsk_error_get_errno() != 0)) { |
1904 | 0 | error_returned(" - hfs_make_extents"); |
1905 | 0 | return 1; |
1906 | 0 | } |
1907 | | |
1908 | 0 | if ((fs_attr = |
1909 | 0 | tsk_fs_attrlist_getnew(fs_file->meta->attr, |
1910 | 0 | TSK_FS_ATTR_NONRES)) == NULL) { |
1911 | 0 | error_returned(" - hfs_make_extents"); |
1912 | 0 | tsk_fs_attr_run_free(attr_run); |
1913 | 0 | return 1; |
1914 | 0 | } |
1915 | | |
1916 | | // initialize the data run |
1917 | 0 | if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL, |
1918 | 0 | TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA, |
1919 | 0 | tsk_getu64(fs->endian, hfs->fs->ext_file.logic_sz), |
1920 | 0 | tsk_getu64(fs->endian, hfs->fs->ext_file.logic_sz), |
1921 | 0 | tsk_getu64(fs->endian, hfs->fs->ext_file.logic_sz), TSK_FS_ATTR_FLAG_NONE, 0)) { |
1922 | 0 | error_returned(" - hfs_make_extents"); |
1923 | 0 | tsk_fs_attr_run_free(attr_run); |
1924 | 0 | return 1; |
1925 | 0 | } |
1926 | | |
1927 | | //hfs_load_extended_attrs(fs_file); |
1928 | | |
1929 | | // Extents doesn't have an entry in itself |
1930 | | |
1931 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED; |
1932 | 0 | return 0; |
1933 | 0 | } |
1934 | | |
1935 | | |
1936 | | /** |
1937 | | * \internal |
1938 | | * Create an FS_INODE structure for the blockmap / allocation file. |
1939 | | * |
1940 | | * @param hfs File system to analyze |
1941 | | * @param fs_file Structure to copy file information into. |
1942 | | * @return 1 on error and 0 on success |
1943 | | */ |
1944 | | static uint8_t |
1945 | | hfs_make_blockmap(HFS_INFO * hfs, TSK_FS_FILE * fs_file) |
1946 | 0 | { |
1947 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs; |
1948 | 0 | TSK_FS_ATTR *fs_attr; |
1949 | 0 | TSK_FS_ATTR_RUN *attr_run; |
1950 | 0 | unsigned char dummy1, dummy2; |
1951 | 0 | uint64_t dummy3; |
1952 | 0 | uint8_t result; |
1953 | |
|
1954 | 0 | if (tsk_verbose) |
1955 | 0 | tsk_fprintf(stderr, |
1956 | 0 | "hfs_make_blockmap: Making virtual blockmap file\n"); |
1957 | |
|
1958 | 0 | if (hfs_make_specialbase(fs_file)) { |
1959 | 0 | error_returned(" - hfs_make_blockmap"); |
1960 | 0 | return 1; |
1961 | 0 | } |
1962 | | |
1963 | 0 | fs_file->meta->addr = HFS_ALLOCATION_FILE_ID; |
1964 | 0 | strncpy(fs_file->meta->name2->name, HFS_ALLOCATIONNAME, |
1965 | 0 | TSK_FS_META_NAME_LIST_NSIZE); |
1966 | |
|
1967 | 0 | fs_file->meta->size = |
1968 | 0 | tsk_getu64(fs->endian, hfs->fs->alloc_file.logic_sz); |
1969 | |
|
1970 | 0 | if (((attr_run = |
1971 | 0 | hfs_extents_to_attr(fs, hfs->fs->alloc_file.extents, |
1972 | 0 | 0)) == NULL) && (tsk_error_get_errno() != 0)) { |
1973 | 0 | error_returned(" - hfs_make_blockmap"); |
1974 | 0 | return 1; |
1975 | 0 | } |
1976 | | |
1977 | 0 | if ((fs_attr = |
1978 | 0 | tsk_fs_attrlist_getnew(fs_file->meta->attr, |
1979 | 0 | TSK_FS_ATTR_NONRES)) == NULL) { |
1980 | 0 | error_returned(" - hfs_make_blockmap"); |
1981 | 0 | tsk_fs_attr_run_free(attr_run); |
1982 | 0 | return 1; |
1983 | 0 | } |
1984 | | |
1985 | | // initialize the data run |
1986 | 0 | if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL, |
1987 | 0 | TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA, |
1988 | 0 | tsk_getu64(fs->endian, hfs->fs->alloc_file.logic_sz), |
1989 | 0 | tsk_getu64(fs->endian, hfs->fs->alloc_file.logic_sz), |
1990 | 0 | tsk_getu64(fs->endian, hfs->fs->alloc_file.logic_sz), TSK_FS_ATTR_FLAG_NONE, 0)) { |
1991 | 0 | error_returned(" - hfs_make_blockmap"); |
1992 | 0 | tsk_fs_attr_run_free(attr_run); |
1993 | 0 | return 1; |
1994 | 0 | } |
1995 | | |
1996 | | // see if catalog file has additional runs |
1997 | 0 | if (hfs_ext_find_extent_record_attr(hfs, HFS_ALLOCATION_FILE_ID, |
1998 | 0 | fs_attr, TRUE)) { |
1999 | 0 | error_returned(" - hfs_make_blockmap"); |
2000 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; |
2001 | 0 | return 1; |
2002 | 0 | } |
2003 | | |
2004 | | |
2005 | 0 | result = hfs_load_extended_attrs(fs_file, &dummy1, &dummy2, &dummy3); |
2006 | 0 | if (result != 0) { |
2007 | 0 | if (tsk_verbose) |
2008 | 0 | tsk_fprintf(stderr, |
2009 | 0 | "WARNING: Extended attributes failed to load for the Allocation file.\n"); |
2010 | 0 | tsk_error_reset(); |
2011 | 0 | } |
2012 | |
|
2013 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED; |
2014 | 0 | return 0; |
2015 | 0 | } |
2016 | | |
2017 | | /** |
2018 | | * \internal |
2019 | | * Create an FS_INODE structure for the startup / boot file. |
2020 | | * |
2021 | | * @param hfs File system to analyze |
2022 | | * @param fs_file Structure to copy file information into. |
2023 | | * @return 1 on error and 0 on success |
2024 | | */ |
2025 | | static uint8_t |
2026 | | hfs_make_startfile(HFS_INFO * hfs, TSK_FS_FILE * fs_file) |
2027 | 0 | { |
2028 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs; |
2029 | 0 | TSK_FS_ATTR *fs_attr; |
2030 | 0 | TSK_FS_ATTR_RUN *attr_run; |
2031 | 0 | unsigned char dummy1, dummy2; |
2032 | 0 | uint64_t dummy3; |
2033 | 0 | uint8_t result; |
2034 | |
|
2035 | 0 | if (tsk_verbose) |
2036 | 0 | tsk_fprintf(stderr, |
2037 | 0 | "hfs_make_startfile: Making virtual startup file\n"); |
2038 | |
|
2039 | 0 | if (hfs_make_specialbase(fs_file)) { |
2040 | 0 | error_returned(" - hfs_make_startfile"); |
2041 | 0 | return 1; |
2042 | 0 | } |
2043 | | |
2044 | 0 | fs_file->meta->addr = HFS_STARTUP_FILE_ID; |
2045 | 0 | strncpy(fs_file->meta->name2->name, HFS_STARTUPNAME, |
2046 | 0 | TSK_FS_META_NAME_LIST_NSIZE); |
2047 | |
|
2048 | 0 | fs_file->meta->size = |
2049 | 0 | tsk_getu64(fs->endian, hfs->fs->start_file.logic_sz); |
2050 | |
|
2051 | 0 | if (((attr_run = |
2052 | 0 | hfs_extents_to_attr(fs, hfs->fs->start_file.extents, |
2053 | 0 | 0)) == NULL) && (tsk_error_get_errno() != 0)) { |
2054 | 0 | error_returned(" - hfs_make_startfile"); |
2055 | 0 | return 1; |
2056 | 0 | } |
2057 | | |
2058 | 0 | if ((fs_attr = |
2059 | 0 | tsk_fs_attrlist_getnew(fs_file->meta->attr, |
2060 | 0 | TSK_FS_ATTR_NONRES)) == NULL) { |
2061 | 0 | error_returned(" - hfs_make_startfile"); |
2062 | 0 | tsk_fs_attr_run_free(attr_run); |
2063 | 0 | return 1; |
2064 | 0 | } |
2065 | | |
2066 | | // initialize the data run |
2067 | 0 | if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL, |
2068 | 0 | TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA, |
2069 | 0 | tsk_getu64(fs->endian, hfs->fs->start_file.logic_sz), |
2070 | 0 | tsk_getu64(fs->endian, hfs->fs->start_file.logic_sz), |
2071 | 0 | tsk_getu64(fs->endian, hfs->fs->start_file.logic_sz), TSK_FS_ATTR_FLAG_NONE, 0)) { |
2072 | 0 | error_returned(" - hfs_make_startfile"); |
2073 | 0 | tsk_fs_attr_run_free(attr_run); |
2074 | 0 | return 1; |
2075 | 0 | } |
2076 | | |
2077 | | // see if catalog file has additional runs |
2078 | 0 | if (hfs_ext_find_extent_record_attr(hfs, HFS_STARTUP_FILE_ID, fs_attr, |
2079 | 0 | TRUE)) { |
2080 | 0 | error_returned(" - hfs_make_startfile"); |
2081 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; |
2082 | 0 | return 1; |
2083 | 0 | } |
2084 | | |
2085 | 0 | result = hfs_load_extended_attrs(fs_file, &dummy1, &dummy2, &dummy3); |
2086 | 0 | if (result != 0) { |
2087 | 0 | if (tsk_verbose) |
2088 | 0 | tsk_fprintf(stderr, |
2089 | 0 | "WARNING: Extended attributes failed to load for the Start file.\n"); |
2090 | 0 | tsk_error_reset(); |
2091 | 0 | } |
2092 | |
|
2093 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED; |
2094 | 0 | return 0; |
2095 | 0 | } |
2096 | | |
2097 | | |
2098 | | /** |
2099 | | * \internal |
2100 | | * Create an FS_INODE structure for the attributes file. |
2101 | | * |
2102 | | * @param hfs File system to analyze |
2103 | | * @param fs_file Structure to copy file information into. |
2104 | | * @return 1 on error and 0 on success |
2105 | | */ |
2106 | | static uint8_t |
2107 | | hfs_make_attrfile(HFS_INFO * hfs, TSK_FS_FILE * fs_file) |
2108 | 0 | { |
2109 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs; |
2110 | 0 | TSK_FS_ATTR *fs_attr; |
2111 | 0 | TSK_FS_ATTR_RUN *attr_run; |
2112 | |
|
2113 | 0 | if (tsk_verbose) |
2114 | 0 | tsk_fprintf(stderr, |
2115 | 0 | "hfs_make_attrfile: Making virtual attributes file\n"); |
2116 | |
|
2117 | 0 | if (hfs_make_specialbase(fs_file)) { |
2118 | 0 | error_returned(" - hfs_make_attrfile"); |
2119 | 0 | return 1; |
2120 | 0 | } |
2121 | | |
2122 | 0 | fs_file->meta->addr = HFS_ATTRIBUTES_FILE_ID; |
2123 | 0 | strncpy(fs_file->meta->name2->name, HFS_ATTRIBUTESNAME, |
2124 | 0 | TSK_FS_META_NAME_LIST_NSIZE); |
2125 | |
|
2126 | 0 | fs_file->meta->size = |
2127 | 0 | tsk_getu64(fs->endian, hfs->fs->attr_file.logic_sz); |
2128 | |
|
2129 | 0 | if (((attr_run = |
2130 | 0 | hfs_extents_to_attr(fs, hfs->fs->attr_file.extents, |
2131 | 0 | 0)) == NULL) && (tsk_error_get_errno() != 0)) { |
2132 | 0 | error_returned(" - hfs_make_attrfile"); |
2133 | 0 | return 1; |
2134 | 0 | } |
2135 | | |
2136 | 0 | if ((fs_attr = |
2137 | 0 | tsk_fs_attrlist_getnew(fs_file->meta->attr, |
2138 | 0 | TSK_FS_ATTR_NONRES)) == NULL) { |
2139 | 0 | error_returned(" - hfs_make_attrfile"); |
2140 | 0 | tsk_fs_attr_run_free(attr_run); |
2141 | 0 | return 1; |
2142 | 0 | } |
2143 | | |
2144 | | // initialize the data run |
2145 | 0 | if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL, |
2146 | 0 | TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA, |
2147 | 0 | tsk_getu64(fs->endian, hfs->fs->attr_file.logic_sz), |
2148 | 0 | tsk_getu64(fs->endian, hfs->fs->attr_file.logic_sz), |
2149 | 0 | tsk_getu64(fs->endian, hfs->fs->attr_file.logic_sz), TSK_FS_ATTR_FLAG_NONE, 0)) { |
2150 | 0 | error_returned(" - hfs_make_attrfile"); |
2151 | 0 | tsk_fs_attr_run_free(attr_run); |
2152 | 0 | return 1; |
2153 | 0 | } |
2154 | | |
2155 | | // see if catalog file has additional runs |
2156 | 0 | if (hfs_ext_find_extent_record_attr(hfs, HFS_ATTRIBUTES_FILE_ID, |
2157 | 0 | fs_attr, TRUE)) { |
2158 | 0 | error_returned(" - hfs_make_attrfile"); |
2159 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; |
2160 | 0 | return 1; |
2161 | 0 | } |
2162 | | |
2163 | | //hfs_load_extended_attrs(fs_file); |
2164 | | |
2165 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED; |
2166 | 0 | return 0; |
2167 | 0 | } |
2168 | | |
2169 | | |
2170 | | |
2171 | | /** |
2172 | | * \internal |
2173 | | * Create an FS_FILE structure for the BadBlocks file. |
2174 | | * |
2175 | | * @param hfs File system to analyze |
2176 | | * @param fs_file Structure to copy file information into. |
2177 | | * @return 1 on error and 0 on success |
2178 | | */ |
2179 | | static uint8_t |
2180 | | hfs_make_badblockfile(HFS_INFO * hfs, TSK_FS_FILE * fs_file) |
2181 | 0 | { |
2182 | 0 | TSK_FS_ATTR *fs_attr; |
2183 | 0 | unsigned char dummy1, dummy2; |
2184 | 0 | uint64_t dummy3; |
2185 | 0 | uint8_t result; |
2186 | |
|
2187 | 0 | if (tsk_verbose) |
2188 | 0 | tsk_fprintf(stderr, |
2189 | 0 | "hfs_make_badblockfile: Making virtual badblock file\n"); |
2190 | |
|
2191 | 0 | if (hfs_make_specialbase(fs_file)) { |
2192 | 0 | error_returned(" - hfs_make_badblockfile"); |
2193 | 0 | return 1; |
2194 | 0 | } |
2195 | | |
2196 | 0 | fs_file->meta->addr = HFS_BAD_BLOCK_FILE_ID; |
2197 | 0 | strncpy(fs_file->meta->name2->name, HFS_BAD_BLOCK_FILE_NAME, |
2198 | 0 | TSK_FS_META_NAME_LIST_NSIZE); |
2199 | |
|
2200 | 0 | fs_file->meta->size = 0; |
2201 | |
|
2202 | 0 | if ((fs_attr = |
2203 | 0 | tsk_fs_attrlist_getnew(fs_file->meta->attr, |
2204 | 0 | TSK_FS_ATTR_NONRES)) == NULL) { |
2205 | 0 | error_returned(" - hfs_make_badblockfile"); |
2206 | 0 | return 1; |
2207 | 0 | } |
2208 | | |
2209 | | // add the run to the file. |
2210 | 0 | if (tsk_fs_attr_set_run(fs_file, fs_attr, NULL, NULL, |
2211 | 0 | TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA, |
2212 | 0 | fs_file->meta->size, fs_file->meta->size, fs_file->meta->size, |
2213 | 0 | TSK_FS_ATTR_FLAG_NONE, 0)) { |
2214 | 0 | error_returned(" - hfs_make_badblockfile"); |
2215 | 0 | return 1; |
2216 | 0 | } |
2217 | | |
2218 | | // see if file has additional runs |
2219 | 0 | if (hfs_ext_find_extent_record_attr(hfs, HFS_BAD_BLOCK_FILE_ID, |
2220 | 0 | fs_attr, TRUE)) { |
2221 | 0 | error_returned(" - hfs_make_badblockfile"); |
2222 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; |
2223 | 0 | return 1; |
2224 | 0 | } |
2225 | | |
2226 | | /* @@@ We have a chicken and egg problem here... The current design of |
2227 | | * fs_attr_set() requires the size to be set, but we dont' know the size |
2228 | | * until we look into the extents file (which adds to an attribute...). |
2229 | | * This does not seem to be the best design... neeed a way to test this. */ |
2230 | 0 | fs_file->meta->size = fs_attr->nrd.initsize; |
2231 | 0 | fs_attr->size = fs_file->meta->size; |
2232 | 0 | fs_attr->nrd.allocsize = fs_file->meta->size; |
2233 | |
|
2234 | 0 | result = hfs_load_extended_attrs(fs_file, &dummy1, &dummy2, &dummy3); |
2235 | 0 | if (result != 0) { |
2236 | 0 | if (tsk_verbose) |
2237 | 0 | tsk_fprintf(stderr, |
2238 | 0 | "WARNING: Extended attributes failed to load for the BadBlocks file.\n"); |
2239 | 0 | tsk_error_reset(); |
2240 | 0 | } |
2241 | |
|
2242 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED; |
2243 | 0 | return 0; |
2244 | 0 | } |
2245 | | |
2246 | | |
2247 | | /** \internal |
2248 | | * Copy the catalog file or folder record entry into a TSK data structure. |
2249 | | * @param a_hfs File system being analyzed |
2250 | | * @param a_hfs_entry Catalog record entry (HFS_ENTRY *) |
2251 | | * @param a_fs_file Structure to copy data into (TSK_FS_FILE *) |
2252 | | * Returns 1 on error. |
2253 | | */ |
2254 | | static uint8_t |
2255 | | hfs_dinode_copy(HFS_INFO * a_hfs, const HFS_ENTRY * a_hfs_entry, |
2256 | | TSK_FS_FILE * a_fs_file) |
2257 | 0 | { |
2258 | | |
2259 | | // Note, a_hfs_entry->cat is really of type hfs_file. But, hfs_file_folder is a union |
2260 | | // of that type with hfs_folder. Both of hfs_file and hfs_folder have the same first member. |
2261 | | // So, this cast is appropriate. |
2262 | 0 | const hfs_file_folder *a_entry = |
2263 | 0 | (hfs_file_folder *) & (a_hfs_entry->cat); |
2264 | 0 | const hfs_file_fold_std *std; |
2265 | 0 | TSK_FS_META *a_fs_meta = a_fs_file->meta; |
2266 | 0 | TSK_FS_INFO *fs; |
2267 | 0 | uint16_t hfsmode; |
2268 | 0 | TSK_INUM_T iStd; // the inum (or CNID) that occurs in the standard file metadata |
2269 | |
|
2270 | 0 | if (a_entry == NULL) { |
2271 | 0 | error_detected(TSK_ERR_FS_ARG, |
2272 | 0 | "hfs_dinode_copy: a_entry = a_hfs_entry->cat is NULL"); |
2273 | 0 | return 1; |
2274 | 0 | } |
2275 | | |
2276 | 0 | fs = (TSK_FS_INFO *) & a_hfs->fs_info; |
2277 | | |
2278 | | |
2279 | | // Just a sanity check. The inum (or cnid) occurs in two places in the |
2280 | | // entry data structure. |
2281 | 0 | iStd = tsk_getu32(fs->endian, a_entry->file.std.cnid); |
2282 | 0 | if (iStd != a_hfs_entry->inum) { |
2283 | 0 | if (tsk_verbose) |
2284 | 0 | tsk_fprintf(stderr, |
2285 | 0 | "WARNING: hfs_dinode_copy: HFS_ENTRY with conflicting values for inum (or cnid).\n"); |
2286 | 0 | } |
2287 | |
|
2288 | 0 | if (a_fs_meta == NULL) { |
2289 | 0 | tsk_error_set_errno(TSK_ERR_FS_ARG); |
2290 | 0 | tsk_error_set_errstr("hfs_dinode_copy: a_fs_meta is NULL"); |
2291 | 0 | return 1; |
2292 | 0 | } |
2293 | | |
2294 | | // both files and folders start off the same |
2295 | 0 | std = &(a_entry->file.std); |
2296 | |
|
2297 | 0 | if (tsk_verbose) |
2298 | 0 | tsk_fprintf(stderr, |
2299 | 0 | "hfs_dinode_copy: called for file/folder %" PRIu32 "\n", |
2300 | 0 | tsk_getu32(fs->endian, std->cnid)); |
2301 | |
|
2302 | 0 | if (a_fs_meta->content_len < HFS_FILE_CONTENT_LEN) { |
2303 | 0 | if ((a_fs_meta = |
2304 | 0 | tsk_fs_meta_realloc(a_fs_meta, |
2305 | 0 | HFS_FILE_CONTENT_LEN)) == NULL) { |
2306 | 0 | return 1; |
2307 | 0 | } |
2308 | 0 | } |
2309 | 0 | a_fs_meta->attr_state = TSK_FS_META_ATTR_EMPTY; |
2310 | 0 | if (a_fs_meta->attr) { |
2311 | 0 | tsk_fs_attrlist_markunused(a_fs_meta->attr); |
2312 | 0 | } |
2313 | | |
2314 | | |
2315 | | /* |
2316 | | * Copy the file type specific stuff first |
2317 | | */ |
2318 | 0 | hfsmode = tsk_getu16(fs->endian, std->perm.mode); |
2319 | |
|
2320 | 0 | if (tsk_getu16(fs->endian, std->rec_type) == HFS_FOLDER_RECORD) { |
2321 | | // set the type of mode is not set |
2322 | 0 | if ((hfsmode & HFS_IN_IFMT) == 0) |
2323 | 0 | a_fs_meta->type = TSK_FS_META_TYPE_DIR; |
2324 | 0 | a_fs_meta->size = 0; |
2325 | 0 | memset(a_fs_meta->content_ptr, 0, HFS_FILE_CONTENT_LEN); |
2326 | 0 | } |
2327 | 0 | else if (tsk_getu16(fs->endian, std->rec_type) == HFS_FILE_RECORD) { |
2328 | 0 | hfs_fork *fork; |
2329 | | // set the type of mode is not set |
2330 | 0 | if ((hfsmode & HFS_IN_IFMT) == 0) |
2331 | 0 | a_fs_meta->type = TSK_FS_META_TYPE_REG; |
2332 | 0 | a_fs_meta->size = |
2333 | 0 | tsk_getu64(fs->endian, a_entry->file.data.logic_sz); |
2334 | | |
2335 | | // copy the data and resource forks |
2336 | 0 | fork = (hfs_fork *) a_fs_meta->content_ptr; |
2337 | 0 | memcpy(fork, &(a_entry->file.data), sizeof(hfs_fork)); |
2338 | 0 | memcpy(&fork[1], &(a_entry->file.resource), sizeof(hfs_fork)); |
2339 | 0 | } |
2340 | 0 | else { |
2341 | 0 | if (tsk_verbose) |
2342 | 0 | tsk_fprintf(stderr, |
2343 | 0 | "hfs_dinode_copy error: catalog entry is neither file nor folder\n"); |
2344 | 0 | return 1; |
2345 | 0 | } |
2346 | | |
2347 | | /* |
2348 | | * Copy the standard stuff. |
2349 | | * Use default values (as defined in spec) if mode is not defined. |
2350 | | */ |
2351 | 0 | if ((hfsmode & HFS_IN_IFMT) == 0) { |
2352 | 0 | a_fs_meta->mode = TSK_FS_META_MODE_UNSPECIFIED; |
2353 | 0 | a_fs_meta->uid = 99; |
2354 | 0 | a_fs_meta->gid = 99; |
2355 | 0 | } |
2356 | 0 | else { |
2357 | 0 | a_fs_meta->mode = hfs_mode_to_tsk_mode(hfsmode); |
2358 | 0 | a_fs_meta->type = hfs_mode_to_tsk_meta_type(hfsmode); |
2359 | 0 | a_fs_meta->uid = tsk_getu32(fs->endian, std->perm.owner); |
2360 | 0 | a_fs_meta->gid = tsk_getu32(fs->endian, std->perm.group); |
2361 | 0 | } |
2362 | | |
2363 | | // this field is set only for "indirect" entries |
2364 | 0 | if (tsk_getu32(fs->endian, std->perm.special.nlink)) |
2365 | 0 | a_fs_meta->nlink = tsk_getu32(fs->endian, std->perm.special.nlink); |
2366 | 0 | else |
2367 | 0 | a_fs_meta->nlink = 1; |
2368 | |
|
2369 | 0 | a_fs_meta->mtime = |
2370 | 0 | hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->cmtime)); |
2371 | 0 | a_fs_meta->atime = |
2372 | 0 | hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->atime)); |
2373 | 0 | a_fs_meta->crtime = |
2374 | 0 | hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->crtime)); |
2375 | 0 | a_fs_meta->ctime = |
2376 | 0 | hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->amtime)); |
2377 | 0 | a_fs_meta->time2.hfs.bkup_time = |
2378 | 0 | hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->bkup_date)); |
2379 | 0 | a_fs_meta->mtime_nano = a_fs_meta->atime_nano = a_fs_meta->ctime_nano = |
2380 | 0 | a_fs_meta->crtime_nano = 0; |
2381 | 0 | a_fs_meta->time2.hfs.bkup_time_nano = 0; |
2382 | |
|
2383 | 0 | a_fs_meta->addr = tsk_getu32(fs->endian, std->cnid); |
2384 | | |
2385 | | // All entries here are used. |
2386 | 0 | a_fs_meta->flags = (TSK_FS_META_FLAG_ENUM) (TSK_FS_META_FLAG_ALLOC | TSK_FS_META_FLAG_USED); |
2387 | |
|
2388 | 0 | if (std->perm.o_flags & HFS_PERM_OFLAG_COMPRESSED) |
2389 | 0 | a_fs_meta->flags = (TSK_FS_META_FLAG_ENUM) (a_fs_meta->flags | TSK_FS_META_FLAG_COMP); |
2390 | | |
2391 | | // We copy this inum (or cnid) here, because this file *might* have been a hard link. In |
2392 | | // that case, we want to make sure that a_fs_file points consistently to the target of the |
2393 | | // link. |
2394 | | |
2395 | | //if (a_fs_file->name != NULL) { |
2396 | | // a_fs_file->name->meta_addr = a_fs_meta->addr; |
2397 | | //} |
2398 | | |
2399 | | /* TODO @@@ could fill in name2 with this entry's name and parent inode |
2400 | | from Catalog entry */ |
2401 | | |
2402 | | /* set the link string (if the file is a link) |
2403 | | * The size check is a sanity check so that we don't try to allocate |
2404 | | * a huge amount of memory for a bad inode value |
2405 | | */ |
2406 | 0 | if ((a_fs_meta->type == TSK_FS_META_TYPE_LNK) && |
2407 | 0 | (a_fs_meta->size >= 0) && (a_fs_meta->size < HFS_MAXPATHLEN)) { |
2408 | |
|
2409 | 0 | ssize_t bytes_read; |
2410 | |
|
2411 | 0 | a_fs_meta->link = (char*) tsk_malloc((size_t) a_fs_meta->size + 1); |
2412 | 0 | if (a_fs_meta->link == NULL) |
2413 | 0 | return 1; |
2414 | | |
2415 | 0 | bytes_read = tsk_fs_file_read(a_fs_file, (TSK_OFF_T) 0, |
2416 | 0 | a_fs_meta->link, (size_t) a_fs_meta->size, |
2417 | 0 | TSK_FS_FILE_READ_FLAG_NONE); |
2418 | 0 | a_fs_meta->link[a_fs_meta->size] = '\0'; |
2419 | |
|
2420 | 0 | if (bytes_read != a_fs_meta->size) { |
2421 | 0 | if (tsk_verbose) |
2422 | 0 | tsk_fprintf(stderr, |
2423 | 0 | "hfs_dinode_copy: failed to read contents of symbolic link; " |
2424 | 0 | "expected %u bytes but tsk_fs_file_read() returned %u\n", |
2425 | 0 | a_fs_meta->size, bytes_read); |
2426 | 0 | free(a_fs_meta->link); |
2427 | 0 | a_fs_meta->link = NULL; |
2428 | 0 | return 1; |
2429 | 0 | } |
2430 | 0 | } |
2431 | | |
2432 | 0 | return 0; |
2433 | 0 | } |
2434 | | |
2435 | | |
2436 | | /** \internal |
2437 | | * Load a catalog file entry and save it in the TSK_FS_FILE structure. |
2438 | | * |
2439 | | * @param fs File system to read from. |
2440 | | * @param a_fs_file Structure to read into. |
2441 | | * @param inum File address to load |
2442 | | * @returns 1 on error |
2443 | | */ |
2444 | | static uint8_t |
2445 | | hfs_inode_lookup(TSK_FS_INFO * fs, TSK_FS_FILE * a_fs_file, |
2446 | | TSK_INUM_T inum) |
2447 | 0 | { |
2448 | 0 | HFS_INFO *hfs = (HFS_INFO *) fs; |
2449 | 0 | HFS_ENTRY entry; |
2450 | |
|
2451 | 0 | if (a_fs_file == NULL) { |
2452 | 0 | tsk_error_set_errno(TSK_ERR_FS_ARG); |
2453 | 0 | tsk_error_set_errstr("hfs_inode_lookup: fs_file is NULL"); |
2454 | 0 | return 1; |
2455 | 0 | } |
2456 | | |
2457 | 0 | if (a_fs_file->meta == NULL) { |
2458 | 0 | a_fs_file->meta = tsk_fs_meta_alloc(HFS_FILE_CONTENT_LEN); |
2459 | 0 | } |
2460 | |
|
2461 | 0 | if (a_fs_file->meta == NULL) { |
2462 | 0 | return 1; |
2463 | 0 | } |
2464 | 0 | else { |
2465 | 0 | tsk_fs_meta_reset(a_fs_file->meta); |
2466 | 0 | } |
2467 | | |
2468 | 0 | if (tsk_verbose) |
2469 | 0 | tsk_fprintf(stderr, "hfs_inode_lookup: looking up %" PRIuINUM "\n", |
2470 | 0 | inum); |
2471 | | |
2472 | | // @@@ Will need to add orphan stuff here too |
2473 | | |
2474 | | /* First see if this is a special entry |
2475 | | * the special ones have their metadata stored in the volume header */ |
2476 | 0 | if (inum == HFS_EXTENTS_FILE_ID) { |
2477 | 0 | if (!hfs->has_extents_file) { |
2478 | 0 | error_detected(TSK_ERR_FS_INODE_NUM, |
2479 | 0 | "Extents File not present"); |
2480 | 0 | return 1; |
2481 | 0 | } |
2482 | | |
2483 | 0 | return hfs_make_extents(hfs, a_fs_file); |
2484 | 0 | } |
2485 | 0 | else if (inum == HFS_CATALOG_FILE_ID) { |
2486 | 0 | return hfs_make_catalog(hfs, a_fs_file); |
2487 | 0 | } |
2488 | 0 | else if (inum == HFS_BAD_BLOCK_FILE_ID) { |
2489 | | // Note: the Extents file and the BadBlocks file are really the same. |
2490 | 0 | if (!hfs->has_extents_file) { |
2491 | 0 | error_detected(TSK_ERR_FS_INODE_NUM, |
2492 | 0 | "BadBlocks File not present"); |
2493 | 0 | return 1; |
2494 | 0 | } |
2495 | 0 | return hfs_make_badblockfile(hfs, a_fs_file); |
2496 | 0 | } |
2497 | 0 | else if (inum == HFS_ALLOCATION_FILE_ID) { |
2498 | 0 | return hfs_make_blockmap(hfs, a_fs_file); |
2499 | 0 | } |
2500 | 0 | else if (inum == HFS_STARTUP_FILE_ID) { |
2501 | 0 | if (!hfs->has_startup_file) { |
2502 | 0 | error_detected(TSK_ERR_FS_INODE_NUM, |
2503 | 0 | "Startup File not present"); |
2504 | 0 | return 1; |
2505 | 0 | } |
2506 | 0 | return hfs_make_startfile(hfs, a_fs_file); |
2507 | 0 | } |
2508 | 0 | else if (inum == HFS_ATTRIBUTES_FILE_ID) { |
2509 | 0 | if (!hfs->has_attributes_file) { |
2510 | 0 | error_detected(TSK_ERR_FS_INODE_NUM, |
2511 | 0 | "Attributes File not present"); |
2512 | 0 | return 1; |
2513 | 0 | } |
2514 | 0 | return hfs_make_attrfile(hfs, a_fs_file); |
2515 | 0 | } |
2516 | | |
2517 | | /* Lookup inode and store it in the HFS structure */ |
2518 | 0 | if (hfs_cat_file_lookup(hfs, inum, &entry, TRUE)) { |
2519 | 0 | return 1; |
2520 | 0 | } |
2521 | | |
2522 | | /* Copy the structure in hfs to generic fs_inode */ |
2523 | 0 | if (hfs_dinode_copy(hfs, &entry, a_fs_file)) { |
2524 | 0 | return 1; |
2525 | 0 | } |
2526 | | |
2527 | | /* If this is potentially a compressed file, its |
2528 | | * actual size is unknown until we examine the |
2529 | | * extended attributes */ |
2530 | 0 | if ((a_fs_file->meta->size == 0) && |
2531 | 0 | (a_fs_file->meta->type == TSK_FS_META_TYPE_REG) && |
2532 | 0 | (a_fs_file->meta->attr_state != TSK_FS_META_ATTR_ERROR) && |
2533 | 0 | ((a_fs_file->meta->attr_state != TSK_FS_META_ATTR_STUDIED) || |
2534 | 0 | (a_fs_file->meta->attr == NULL))) { |
2535 | 0 | hfs_load_attrs(a_fs_file); |
2536 | 0 | } |
2537 | |
|
2538 | 0 | return 0; |
2539 | 0 | } |
2540 | | |
2541 | | typedef struct { |
2542 | | uint32_t offset; |
2543 | | uint32_t length; |
2544 | | } CMP_OFFSET_ENTRY; |
2545 | | |
2546 | | |
2547 | | /** |
2548 | | * \internal |
2549 | | * Reads the ZLIB compression block table from the attribute. |
2550 | | * |
2551 | | * @param rAtttr the attribute to read |
2552 | | * @param offsetTableOut block table |
2553 | | * @param tableSizeOut size of block table |
2554 | | * @param tableOffsetOut the offset of the block table in the resource fork |
2555 | | * @return 1 on success, 0 on error |
2556 | | */ |
2557 | | std::unique_ptr<CMP_OFFSET_ENTRY[]> |
2558 | | hfs_read_zlib_block_table( |
2559 | | const TSK_FS_ATTR *rAttr, |
2560 | | uint32_t* tableSizeOut, |
2561 | | uint32_t* tableOffsetOut) |
2562 | 0 | { |
2563 | 0 | ssize_t attrReadResult; |
2564 | 0 | hfs_resource_fork_header rfHeader; |
2565 | 0 | uint32_t dataOffset; |
2566 | 0 | uint32_t offsetTableOffset; |
2567 | 0 | char fourBytes[4]; // Size of the offset table, little endian |
2568 | 0 | uint32_t tableSize; // Size of the offset table |
2569 | 0 | size_t indx; |
2570 | | |
2571 | | // Read the resource fork header |
2572 | 0 | attrReadResult = tsk_fs_attr_read(rAttr, 0, (char *) &rfHeader, |
2573 | 0 | sizeof(hfs_resource_fork_header), TSK_FS_FILE_READ_FLAG_NONE); |
2574 | 0 | if (attrReadResult != sizeof(hfs_resource_fork_header)) { |
2575 | 0 | error_returned |
2576 | 0 | (" %s: trying to read the resource fork header", __func__); |
2577 | 0 | return nullptr; |
2578 | 0 | } |
2579 | | |
2580 | | // Begin to parse the resource fork. For now, we just need the data offset. |
2581 | 0 | dataOffset = tsk_getu32(TSK_BIG_ENDIAN, rfHeader.dataOffset); |
2582 | | |
2583 | | // The resource's data begins with an offset table, which defines blocks |
2584 | | // of (optionally) zlib-compressed data (so that the OS can do file seeks |
2585 | | // efficiently; each uncompressed block is 64KB). |
2586 | 0 | offsetTableOffset = dataOffset + 4; |
2587 | | |
2588 | | // read 4 bytes, the number of table entries, little endian |
2589 | 0 | attrReadResult = |
2590 | 0 | tsk_fs_attr_read(rAttr, offsetTableOffset, fourBytes, 4, |
2591 | 0 | TSK_FS_FILE_READ_FLAG_NONE); |
2592 | 0 | if (attrReadResult != 4) { |
2593 | 0 | error_returned |
2594 | 0 | (" %s: trying to read the offset table size, " |
2595 | 0 | "return value of %u should have been 4", __func__, attrReadResult); |
2596 | 0 | return nullptr; |
2597 | 0 | } |
2598 | 0 | tableSize = tsk_getu32(TSK_LIT_ENDIAN, fourBytes); |
2599 | | |
2600 | | // Each table entry is 8 bytes long |
2601 | 0 | std::unique_ptr<char[]> offsetTableData{new(std::nothrow) char[tableSize * 8]}; |
2602 | 0 | if (!offsetTableData) { |
2603 | 0 | error_returned |
2604 | 0 | (" %s: space for the offset table raw data", __func__); |
2605 | 0 | return nullptr; |
2606 | 0 | } |
2607 | | |
2608 | 0 | std::unique_ptr<CMP_OFFSET_ENTRY[]> offsetTable{new(std::nothrow) CMP_OFFSET_ENTRY[tableSize]}; |
2609 | 0 | if (!offsetTable) { |
2610 | 0 | error_returned |
2611 | 0 | (" %s: space for the offset table", __func__); |
2612 | 0 | return nullptr; |
2613 | 0 | } |
2614 | | |
2615 | 0 | attrReadResult = tsk_fs_attr_read(rAttr, offsetTableOffset + 4, |
2616 | 0 | offsetTableData.get(), tableSize * 8, TSK_FS_FILE_READ_FLAG_NONE); |
2617 | 0 | if (attrReadResult != (ssize_t) tableSize * 8) { |
2618 | 0 | error_returned |
2619 | 0 | (" %s: reading in the compression offset table, " |
2620 | 0 | "return value %u should have been %u", __func__, attrReadResult, |
2621 | 0 | tableSize * 8); |
2622 | 0 | return nullptr; |
2623 | 0 | } |
2624 | | |
2625 | 0 | for (indx = 0; indx < tableSize; ++indx) { |
2626 | 0 | offsetTable[indx].offset = |
2627 | 0 | tsk_getu32(TSK_LIT_ENDIAN, offsetTableData.get() + indx * 8); |
2628 | 0 | offsetTable[indx].length = |
2629 | 0 | tsk_getu32(TSK_LIT_ENDIAN, offsetTableData.get() + indx * 8 + 4); |
2630 | 0 | } |
2631 | |
|
2632 | 0 | *tableSizeOut = tableSize; |
2633 | 0 | *tableOffsetOut = offsetTableOffset; |
2634 | 0 | return offsetTable; |
2635 | 0 | } |
2636 | | |
2637 | | |
2638 | | /** |
2639 | | * \internal |
2640 | | * Reads the LZVN compression block table from the attribute. |
2641 | | * |
2642 | | * @param rAtttr the attribute to read |
2643 | | * @param offsetTableOut block table |
2644 | | * @param tableSizeOut size of block table |
2645 | | * @param tableOffsetOut the offset of the block table in the resource fork |
2646 | | * @return 1 on success, 0 on error |
2647 | | */ |
2648 | | std::unique_ptr<CMP_OFFSET_ENTRY[]> |
2649 | | hfs_read_lzvn_block_table( |
2650 | | const TSK_FS_ATTR *rAttr, |
2651 | | uint32_t* tableSizeOut, |
2652 | | uint32_t* tableOffsetOut) |
2653 | 0 | { |
2654 | 0 | ssize_t attrReadResult; |
2655 | 0 | char fourBytes[4]; |
2656 | 0 | uint32_t tableDataSize; |
2657 | 0 | uint32_t tableSize; // Size of the offset table |
2658 | | |
2659 | | // The offset table is a sequence of 4-byte offsets of compressed |
2660 | | // blocks. The first 4 bytes is thus the offset of the first block, |
2661 | | // but also 4 times the number of entries in the table. |
2662 | 0 | attrReadResult = tsk_fs_attr_read(rAttr, 0, fourBytes, 4, |
2663 | 0 | TSK_FS_FILE_READ_FLAG_NONE); |
2664 | 0 | if (attrReadResult != 4) { |
2665 | 0 | error_returned |
2666 | 0 | (" %s: trying to read the offset table size, " |
2667 | 0 | "return value of %u should have been 4", __func__, attrReadResult); |
2668 | 0 | return nullptr; |
2669 | 0 | } |
2670 | | |
2671 | 0 | tableDataSize = tsk_getu32(TSK_LIT_ENDIAN, fourBytes); |
2672 | |
|
2673 | 0 | std::unique_ptr<char[]> offsetTableData(new(std::nothrow) char[tableDataSize]); |
2674 | 0 | if (!offsetTableData) { |
2675 | 0 | error_returned |
2676 | 0 | (" %s: space for the offset table raw data", __func__); |
2677 | 0 | return nullptr; |
2678 | 0 | } |
2679 | | |
2680 | | // table entries are 4 bytes, last entry is end of data |
2681 | 0 | tableSize = tableDataSize / 4 - 1; |
2682 | |
|
2683 | 0 | std::unique_ptr<CMP_OFFSET_ENTRY[]> offsetTable(new(std::nothrow) CMP_OFFSET_ENTRY[tableSize]); |
2684 | 0 | if (!offsetTable) { |
2685 | 0 | error_returned |
2686 | 0 | (" %s: space for the offset table", __func__); |
2687 | 0 | return nullptr; |
2688 | 0 | } |
2689 | | |
2690 | 0 | attrReadResult = tsk_fs_attr_read(rAttr, 0, |
2691 | 0 | offsetTableData.get(), tableDataSize, TSK_FS_FILE_READ_FLAG_NONE); |
2692 | 0 | if (attrReadResult != (ssize_t) tableDataSize) { |
2693 | 0 | error_returned |
2694 | 0 | (" %s: reading in the compression offset table, " |
2695 | 0 | "return value %u should have been %u", __func__, attrReadResult, |
2696 | 0 | tableDataSize); |
2697 | 0 | return nullptr; |
2698 | 0 | } |
2699 | | |
2700 | 0 | uint32_t a = tableDataSize; |
2701 | 0 | uint32_t b; |
2702 | 0 | size_t i; |
2703 | |
|
2704 | 0 | for (i = 0; i < tableSize; ++i) { |
2705 | 0 | b = tsk_getu32(TSK_LIT_ENDIAN, offsetTableData.get() + 4*(i+1)); |
2706 | 0 | offsetTable[i].offset = a; |
2707 | 0 | offsetTable[i].length = b - a; |
2708 | 0 | a = b; |
2709 | 0 | } |
2710 | |
|
2711 | 0 | *tableSizeOut = tableSize; |
2712 | 0 | *tableOffsetOut = 0; |
2713 | 0 | return offsetTable; |
2714 | 0 | } |
2715 | | |
2716 | | /** |
2717 | | * \internal |
2718 | | * "Decompress" a block which was stored uncompressed. |
2719 | | * |
2720 | | * @param rawBuf the compressed data |
2721 | | * @param len length of the compressed data |
2722 | | * @param uncBuf the decompressed data |
2723 | | * @param uncLen length of the decompressed data |
2724 | | * @return 1 on success, 0 on error |
2725 | | */ |
2726 | 0 | static int hfs_decompress_noncompressed_block(char* rawBuf, uint32_t len, char* uncBuf, uint64_t* uncLen) { |
2727 | 0 | // actually an uncompressed block of data; just copy |
2728 | 0 | if (tsk_verbose) |
2729 | 0 | tsk_fprintf(stderr, |
2730 | 0 | "%s: Copying an uncompressed compression unit\n", __func__); |
2731 | 0 |
|
2732 | 0 | if ((len - 1) > COMPRESSION_UNIT_SIZE) { |
2733 | 0 | error_detected(TSK_ERR_FS_READ, |
2734 | 0 | "%s: uncompressed block length %u is longer " |
2735 | 0 | "than compression unit size %u", __func__, len - 1, |
2736 | 0 | COMPRESSION_UNIT_SIZE); |
2737 | 0 | return 0; |
2738 | 0 | } |
2739 | 0 | memcpy(uncBuf, rawBuf + 1, len - 1); |
2740 | 0 | *uncLen = len - 1; |
2741 | 0 | return 1; |
2742 | 0 | } |
2743 | | |
2744 | | |
2745 | | #ifdef HAVE_LIBZ |
2746 | | /** |
2747 | | * \internal |
2748 | | * Decompress a block which was stored with ZLIB. |
2749 | | * |
2750 | | * @param rawBuf the compressed data |
2751 | | * @param len length of the compressed data |
2752 | | * @param uncBuf the decompressed data |
2753 | | * @param uncLen length of the decompressed data |
2754 | | * @return 1 on success, 0 on error |
2755 | | */ |
2756 | | static int hfs_decompress_zlib_block(char* rawBuf, uint32_t len, char* uncBuf, uint64_t* uncLen) |
2757 | 0 | { |
2758 | 0 | // see if this block is compressed |
2759 | 0 | if (len > 0 && (rawBuf[0] & 0x0F) != 0x0F) { |
2760 | 0 | // Uncompress the chunk of data |
2761 | 0 | if (tsk_verbose) |
2762 | 0 | tsk_fprintf(stderr, |
2763 | 0 | "%s: Inflating the compression unit\n", __func__); |
2764 | 0 |
|
2765 | 0 | unsigned long bytesConsumed; |
2766 | 0 | int infResult = zlib_inflate(rawBuf, (uint64_t) len, |
2767 | 0 | uncBuf, (uint64_t) COMPRESSION_UNIT_SIZE, |
2768 | 0 | uncLen, &bytesConsumed); |
2769 | 0 | if (infResult != 0) { |
2770 | 0 | error_returned |
2771 | 0 | (" %s: zlib inflation (uncompression) failed", |
2772 | 0 | __func__, infResult); |
2773 | 0 | return 0; |
2774 | 0 | } |
2775 | 0 |
|
2776 | 0 | if (bytesConsumed != len) { |
2777 | 0 | error_detected(TSK_ERR_FS_READ, |
2778 | 0 | " %s, decompressor did not consume the whole compressed data", |
2779 | 0 | __func__); |
2780 | 0 | return 0; |
2781 | 0 | } |
2782 | 0 |
|
2783 | 0 | return 1; |
2784 | 0 | } |
2785 | 0 | else { |
2786 | 0 | // actually an uncompressed block of data; just copy |
2787 | 0 | return hfs_decompress_noncompressed_block(rawBuf, len, uncBuf, uncLen); |
2788 | 0 | } |
2789 | 0 | } |
2790 | | #endif |
2791 | | |
2792 | | |
2793 | | /** |
2794 | | * \internal |
2795 | | * Decompress a block which was stored with LZVN. |
2796 | | * |
2797 | | * @param rawBuf the compressed data |
2798 | | * @param len length of the compressed data |
2799 | | * @param uncBuf the decompressed data |
2800 | | * @param uncLen length of the decompressed data |
2801 | | * @return 1 on success, 0 on error |
2802 | | */ |
2803 | | static int hfs_decompress_lzvn_block(char* rawBuf, uint32_t len, char* uncBuf, uint64_t* uncLen) |
2804 | 0 | { |
2805 | 0 | // see if this block is compressed |
2806 | 0 | if (len > 0 && rawBuf[0] != 0x06) { |
2807 | 0 | *uncLen = lzvn_decode_buffer(uncBuf, COMPRESSION_UNIT_SIZE, rawBuf, len); |
2808 | 0 | return 1; // apparently this can't fail |
2809 | 0 | } |
2810 | 0 | else { |
2811 | 0 | // actually an uncompressed block of data; just copy |
2812 | 0 | return hfs_decompress_noncompressed_block(rawBuf, len, uncBuf, uncLen); |
2813 | 0 | } |
2814 | 0 | } |
2815 | | |
2816 | | /** |
2817 | | * \internal |
2818 | | * Decompress a block. |
2819 | | * |
2820 | | * @param rAttr the attribute to read |
2821 | | * @param rawBuf the compressed data |
2822 | | * @param uncBuf the decompressed data |
2823 | | * @param offsetTable table of compressed block offsets |
2824 | | * @param offsetTableSize size of table of compressed block offsets |
2825 | | * @param offsetTableOffset offset of table of compressed block offsets |
2826 | | * @param indx index of block to read |
2827 | | * @param decompress_block pointer to decompression function |
2828 | | * @return decompressed size on success, -1 on error |
2829 | | */ |
2830 | | static ssize_t read_and_decompress_block( |
2831 | | const TSK_FS_ATTR* rAttr, |
2832 | | char* rawBuf, |
2833 | | char* uncBuf, |
2834 | | const CMP_OFFSET_ENTRY* offsetTable, |
2835 | | uint32_t offsetTableSize, |
2836 | | uint32_t offsetTableOffset, |
2837 | | size_t indx, |
2838 | | int (*decompress_block)(char* rawBuf, |
2839 | | uint32_t len, |
2840 | | char* uncBuf, |
2841 | | uint64_t* uncLen) |
2842 | | ) |
2843 | 0 | { |
2844 | 0 | // @@@ BC: Looks like we should have bounds checks that indx < offsetTableSize, but we should confirm |
2845 | 0 | ssize_t attrReadResult; |
2846 | 0 | uint32_t offset = offsetTableOffset + offsetTable[indx].offset; |
2847 | 0 | uint32_t len = offsetTable[indx].length; |
2848 | 0 | uint64_t uncLen; |
2849 | 0 |
|
2850 | 0 | if (tsk_verbose) |
2851 | 0 | tsk_fprintf(stderr, |
2852 | 0 | "%s: Reading compression unit %d, length %d\n", |
2853 | 0 | __func__, indx, len); |
2854 | 0 |
|
2855 | 0 | /* Github #383 referenced that if len is 0, then the below code causes |
2856 | 0 | * problems. Added this check, but I don't have data to verify this on. |
2857 | 0 | * it looks like it should at least not crash, but it isn't clear if it |
2858 | 0 | * will also do the right thing and if should actually break here |
2859 | 0 | * instead. */ |
2860 | 0 | if (len == 0) { |
2861 | 0 | return 0; |
2862 | 0 | } |
2863 | 0 |
|
2864 | 0 | if (len > COMPRESSION_UNIT_SIZE + 1) { |
2865 | 0 | error_detected(TSK_ERR_FS_READ, |
2866 | 0 | "%s: block size is too large: %u", __func__, len); |
2867 | 0 | return -1; |
2868 | 0 | } |
2869 | 0 |
|
2870 | 0 | // Read in the block of compressed data |
2871 | 0 | attrReadResult = tsk_fs_attr_read(rAttr, offset, |
2872 | 0 | rawBuf, len, TSK_FS_FILE_READ_FLAG_NONE); |
2873 | 0 | if (attrReadResult != (ssize_t) len) { |
2874 | 0 | char msg[] = |
2875 | 0 | "%s%s: reading in the compression offset table, " |
2876 | 0 | "return value %u should have been %u"; |
2877 | 0 |
|
2878 | 0 | if (attrReadResult < 0 ) { |
2879 | 0 | error_returned(msg, " ", __func__, attrReadResult, len); |
2880 | 0 | } |
2881 | 0 | else { |
2882 | 0 | error_detected(TSK_ERR_FS_READ, "", __func__, attrReadResult, len); |
2883 | 0 | } |
2884 | 0 | return -1; |
2885 | 0 | } |
2886 | 0 |
|
2887 | 0 | if (!decompress_block(rawBuf, len, uncBuf, &uncLen)) { |
2888 | 0 | return -1; |
2889 | 0 | } |
2890 | 0 |
|
2891 | 0 | // If size is a multiple of COMPRESSION_UNIT_SIZE, |
2892 | 0 | // expected uncompressed length is COMPRESSION_UNIT_SIZE |
2893 | 0 | const uint32_t expUncLen = indx == offsetTableSize - 1 ? |
2894 | 0 | ((rAttr->fs_file->meta->size - 1) % COMPRESSION_UNIT_SIZE) + 1 : |
2895 | 0 | COMPRESSION_UNIT_SIZE; |
2896 | 0 |
|
2897 | 0 | if (uncLen != expUncLen) { |
2898 | 0 | error_detected(TSK_ERR_FS_READ, |
2899 | 0 | "%s: compressed block decompressed to %u bytes, " |
2900 | 0 | "should have been %u bytes", __func__, uncLen, expUncLen); |
2901 | 0 | return -1; |
2902 | 0 | } |
2903 | 0 |
|
2904 | 0 | // There are now uncLen bytes of uncompressed data available from |
2905 | 0 | // this comp unit. |
2906 | 0 | return (ssize_t)uncLen; |
2907 | 0 | } |
2908 | | |
2909 | | /** |
2910 | | * \internal |
2911 | | * Attr walk callback function for compressed resources |
2912 | | * |
2913 | | * @param fs_attr the attribute to read |
2914 | | * @param flags |
2915 | | * @param a_action action callback |
2916 | | * @param ptr context for the action callback |
2917 | | * @param read_block_table pointer to block table read function |
2918 | | * @param decompress_block pointer to decompression function |
2919 | | * @return 0 on success, 1 on error |
2920 | | */ |
2921 | | static uint8_t |
2922 | | hfs_attr_walk_compressed_rsrc( |
2923 | | const TSK_FS_ATTR * fs_attr, |
2924 | | [[maybe_unused]] int flags, |
2925 | | TSK_FS_FILE_WALK_CB a_action, |
2926 | | void *ptr, |
2927 | | std::unique_ptr<CMP_OFFSET_ENTRY[]> (*read_block_table)( |
2928 | | const TSK_FS_ATTR *rAttr, |
2929 | | uint32_t* tableSizeOut, |
2930 | | uint32_t* tableOffsetOut |
2931 | | ), |
2932 | | int (*decompress_block)( |
2933 | | char* rawBuf, |
2934 | | uint32_t len, |
2935 | | char* uncBuf, |
2936 | | uint64_t* uncLen |
2937 | | ) |
2938 | | ) |
2939 | 0 | { |
2940 | 0 | TSK_FS_INFO *fs; |
2941 | 0 | TSK_FS_FILE *fs_file; |
2942 | 0 | const TSK_FS_ATTR *rAttr; // resource fork attribute |
2943 | 0 | uint32_t offsetTableOffset; |
2944 | 0 | uint32_t offsetTableSize; // The number of table entries |
2945 | 0 | size_t indx; // index for looping over the offset table |
2946 | 0 | TSK_OFF_T off = 0; // the offset in the uncompressed data stream consumed thus far |
2947 | 0 |
|
2948 | 0 | if (tsk_verbose) |
2949 | 0 | tsk_fprintf(stderr, |
2950 | 0 | "%s: Entered, because this is a compressed file with compressed data in the resource fork\n", __func__); |
2951 | 0 |
|
2952 | 0 | // clean up any error messages that are lying around |
2953 | 0 | tsk_error_reset(); |
2954 | 0 | if ((fs_attr == NULL) || (fs_attr->fs_file == NULL) |
2955 | 0 | || (fs_attr->fs_file->meta == NULL) |
2956 | 0 | || (fs_attr->fs_file->fs_info == NULL)) { |
2957 | 0 | tsk_error_set_errno(TSK_ERR_FS_ARG); |
2958 | 0 | tsk_error_set_errstr("%s: Null arguments given\n", __func__); |
2959 | 0 | return 1; |
2960 | 0 | } |
2961 | 0 |
|
2962 | 0 | // Check that the ATTR being read is the main DATA resource, 128-0, |
2963 | 0 | // because this is the only one that can be compressed in HFS+ |
2964 | 0 | if ((fs_attr->id != HFS_FS_ATTR_ID_DATA) || |
2965 | 0 | (fs_attr->type != TSK_FS_ATTR_TYPE_HFS_DATA)) { |
2966 | 0 | error_detected(TSK_ERR_FS_ARG, |
2967 | 0 | "%s: arg specified an attribute %u-%u that is not the data fork, " |
2968 | 0 | "Only the data fork can be compressed.", __func__, fs_attr->type, |
2969 | 0 | fs_attr->id); |
2970 | 0 | return 1; |
2971 | 0 | } |
2972 | 0 |
|
2973 | 0 | /* This MUST be a compressed attribute */ |
2974 | 0 | if (!(fs_attr->flags & TSK_FS_ATTR_COMP)) { |
2975 | 0 | error_detected(TSK_ERR_FS_FWALK, |
2976 | 0 | "%s: called with non-special attribute: %x", |
2977 | 0 | __func__, fs_attr->flags); |
2978 | 0 | return 1; |
2979 | 0 | } |
2980 | 0 |
|
2981 | 0 | fs = fs_attr->fs_file->fs_info; |
2982 | 0 | fs_file = fs_attr->fs_file; |
2983 | 0 |
|
2984 | 0 | /******** Open the Resource Fork ***********/ |
2985 | 0 |
|
2986 | 0 | // find the attribute for the resource fork |
2987 | 0 | rAttr = |
2988 | 0 | tsk_fs_file_attr_get_type(fs_file, TSK_FS_ATTR_TYPE_HFS_RSRC, |
2989 | 0 | HFS_FS_ATTR_ID_RSRC, TRUE); |
2990 | 0 | if (rAttr == NULL) { |
2991 | 0 | error_returned |
2992 | 0 | (" %s: could not get the attribute for the resource fork of the file", __func__); |
2993 | 0 | return 1; |
2994 | 0 | } |
2995 | 0 |
|
2996 | 0 | // read the offset table from the fork header |
2997 | 0 | std::unique_ptr<CMP_OFFSET_ENTRY[]> offsetTable = read_block_table( |
2998 | 0 | rAttr, &offsetTableSize, &offsetTableOffset |
2999 | 0 | ); |
3000 | 0 | if (!offsetTable) { |
3001 | 0 | return 1; |
3002 | 0 | } |
3003 | 0 |
|
3004 | 0 | // Allocate two buffers for the raw and uncompressed data |
3005 | 0 | /* Raw data can be COMPRESSION_UNIT_SIZE+1 if the data is not |
3006 | 0 | * compressed and there is a 1-byte flag that indicates that |
3007 | 0 | * the data is not compressed. */ |
3008 | 0 | std::unique_ptr<char[]> rawBuf{new(std::nothrow) char[COMPRESSION_UNIT_SIZE + 1]}; |
3009 | 0 | if (!rawBuf) { |
3010 | 0 | error_returned |
3011 | 0 | (" %s: buffers for reading and uncompressing", __func__); |
3012 | 0 | return 1; |
3013 | 0 | } |
3014 | 0 |
|
3015 | 0 | std::unique_ptr<char[]> uncBuf{new(std::nothrow) char[COMPRESSION_UNIT_SIZE]}; |
3016 | 0 | if (!uncBuf) { |
3017 | 0 | error_returned |
3018 | 0 | (" %s: buffers for reading and uncompressing", __func__); |
3019 | 0 | return 1; |
3020 | 0 | } |
3021 | 0 |
|
3022 | 0 | // FOR entry in the table DO |
3023 | 0 | for (indx = 0; indx < offsetTableSize; ++indx) { |
3024 | 0 | ssize_t uncLen; // uncompressed length |
3025 | 0 | unsigned int blockSize; |
3026 | 0 | uint64_t lumpSize; |
3027 | 0 | uint64_t remaining; |
3028 | 0 | char *lumpStart; |
3029 | 0 |
|
3030 | 0 | switch ((uncLen = read_and_decompress_block( |
3031 | 0 | rAttr, rawBuf.get(), uncBuf.get(), |
3032 | 0 | offsetTable.get(), offsetTableSize, offsetTableOffset, indx, |
3033 | 0 | decompress_block))) |
3034 | 0 | { |
3035 | 0 | case -1: |
3036 | 0 | return 1; |
3037 | 0 | case 0: |
3038 | 0 | continue; |
3039 | 0 | default: |
3040 | 0 | break; |
3041 | 0 | } |
3042 | 0 |
|
3043 | 0 | // Call the a_action callback with "Lumps" |
3044 | 0 | // that are at most the block size. |
3045 | 0 | blockSize = fs->block_size; |
3046 | 0 | remaining = uncLen; |
3047 | 0 | lumpStart = uncBuf.get(); |
3048 | 0 |
|
3049 | 0 | while (remaining > 0) { |
3050 | 0 | int retval; // action return value |
3051 | 0 | lumpSize = remaining <= blockSize ? remaining : blockSize; |
3052 | 0 |
|
3053 | 0 | // Apply the callback function |
3054 | 0 | if (tsk_verbose) |
3055 | 0 | tsk_fprintf(stderr, |
3056 | 0 | "%s: Calling action on lump of size %" |
3057 | 0 | PRIu64 " offset %" PRIu64 " in the compression unit\n", |
3058 | 0 | __func__, lumpSize, uncLen - remaining); |
3059 | 0 | if (lumpSize > SIZE_MAX) { |
3060 | 0 | error_detected(TSK_ERR_FS_FWALK, |
3061 | 0 | " %s: lumpSize is too large for the action", __func__); |
3062 | 0 | return 1; |
3063 | 0 | } |
3064 | 0 |
|
3065 | 0 | retval = a_action(fs_attr->fs_file, off, 0, lumpStart, |
3066 | 0 | (size_t) lumpSize, // cast OK because of above test |
3067 | 0 | TSK_FS_BLOCK_FLAG_COMP, ptr); |
3068 | 0 |
|
3069 | 0 | if (retval == TSK_WALK_ERROR) { |
3070 | 0 | error_detected(TSK_ERR_FS | 201, |
3071 | 0 | "%s: callback returned an error", __func__); |
3072 | 0 | return 1; |
3073 | 0 | } |
3074 | 0 | else if (retval == TSK_WALK_STOP) { |
3075 | 0 | break; |
3076 | 0 | } |
3077 | 0 |
|
3078 | 0 | // Find the next lump |
3079 | 0 | off += lumpSize; |
3080 | 0 | remaining -= lumpSize; |
3081 | 0 | lumpStart += lumpSize; |
3082 | 0 | } |
3083 | 0 | } |
3084 | 0 |
|
3085 | 0 | return 0; |
3086 | 0 | } |
3087 | | |
3088 | | |
3089 | | #ifdef HAVE_LIBZ |
3090 | | /** |
3091 | | * \internal |
3092 | | * Attr walk callback function for ZLIB compressed resources |
3093 | | * |
3094 | | * @param fs_attr the attribute to read |
3095 | | * @param flags |
3096 | | * @param a_action action callback |
3097 | | * @param ptr context for the action callback |
3098 | | * @return 0 on success, 1 on error |
3099 | | */ |
3100 | | UNUSED |
3101 | | static uint8_t |
3102 | | hfs_attr_walk_zlib_rsrc(const TSK_FS_ATTR * fs_attr, |
3103 | | int flags, TSK_FS_FILE_WALK_CB a_action, void *ptr) |
3104 | 0 | { |
3105 | 0 | return hfs_attr_walk_compressed_rsrc( |
3106 | 0 | fs_attr, flags, a_action, ptr, |
3107 | 0 | hfs_read_zlib_block_table, |
3108 | 0 | hfs_decompress_zlib_block |
3109 | 0 | ); |
3110 | 0 | } |
3111 | | #endif |
3112 | | |
3113 | | /** |
3114 | | * \internal |
3115 | | * Attr walk callback function for LZVN compressed resources |
3116 | | * |
3117 | | * @param fs_attr the attribute to read |
3118 | | * @param flags |
3119 | | * @param a_action action callback |
3120 | | * @param ptr context for the action callback |
3121 | | * @return 0 on success, 1 on error |
3122 | | */ |
3123 | | UNUSED |
3124 | | static uint8_t |
3125 | | hfs_attr_walk_lzvn_rsrc(const TSK_FS_ATTR * fs_attr, |
3126 | | int flags, TSK_FS_FILE_WALK_CB a_action, void *ptr) |
3127 | 0 | { |
3128 | 0 | return hfs_attr_walk_compressed_rsrc( |
3129 | 0 | fs_attr, flags, a_action, ptr, |
3130 | 0 | hfs_read_lzvn_block_table, |
3131 | 0 | hfs_decompress_lzvn_block |
3132 | 0 | ); |
3133 | 0 | } |
3134 | | |
3135 | | |
3136 | | /** |
3137 | | * \internal |
3138 | | * Read a compressed resource |
3139 | | * |
3140 | | * @param fs_attr the attribute to read |
3141 | | * @param a_offset the offset from which to read |
3142 | | * @param a_buf the buffer into which to read |
3143 | | * @param a_len the length of the buffer |
3144 | | * @param read_block_table pointer to block table read function |
3145 | | * @param decompress_block pointer to decompression function |
3146 | | * @return number of bytes read or -1 on error (incl if offset is past EOF) |
3147 | | */ |
3148 | | static ssize_t |
3149 | | hfs_file_read_compressed_rsrc(const TSK_FS_ATTR * a_fs_attr, |
3150 | | TSK_OFF_T a_offset, char *a_buf, size_t a_len, |
3151 | | std::unique_ptr<CMP_OFFSET_ENTRY[]> (*read_block_table)( |
3152 | | const TSK_FS_ATTR *rAttr, |
3153 | | uint32_t* tableSizeOut, |
3154 | | uint32_t* tableOffsetOut), |
3155 | | int (*decompress_block)( |
3156 | | char* rawBuf, |
3157 | | uint32_t len, |
3158 | | char* uncBuf, |
3159 | | uint64_t* uncLen) |
3160 | | ) |
3161 | 0 | { |
3162 | 0 | TSK_FS_FILE *fs_file; |
3163 | 0 | const TSK_FS_ATTR *rAttr; |
3164 | 0 | uint32_t offsetTableOffset; |
3165 | 0 | uint32_t offsetTableSize; // Size of the offset table |
3166 | 0 | TSK_OFF_T indx; // index for looping over the offset table |
3167 | 0 | TSK_OFF_T startUnit = 0; |
3168 | 0 | uint32_t startUnitOffset = 0; |
3169 | 0 | TSK_OFF_T endUnit = 0; |
3170 | 0 | uint64_t bytesCopied; |
3171 | 0 |
|
3172 | 0 | if (tsk_verbose) |
3173 | 0 | tsk_fprintf(stderr, |
3174 | 0 | "%s: called because this file is compressed, with data in the resource fork\n", __func__); |
3175 | 0 |
|
3176 | 0 | // Reading zero bytes? OK at any offset, I say! |
3177 | 0 | if (a_len == 0) |
3178 | 0 | return 0; |
3179 | 0 |
|
3180 | 0 | if (a_offset < 0) { |
3181 | 0 | error_detected(TSK_ERR_FS_ARG, |
3182 | 0 | "%s: reading from file at a negative offset", |
3183 | 0 | __func__); |
3184 | 0 | return -1; |
3185 | 0 | } |
3186 | 0 |
|
3187 | 0 | if (a_len > SIZE_MAX / 2) { |
3188 | 0 | error_detected(TSK_ERR_FS_ARG, |
3189 | 0 | "%s: trying to read more than SIZE_MAX/2 is not supported.", |
3190 | 0 | __func__); |
3191 | 0 | return -1; |
3192 | 0 | } |
3193 | 0 |
|
3194 | 0 | if ((a_fs_attr == NULL) || (a_fs_attr->fs_file == NULL) |
3195 | 0 | || (a_fs_attr->fs_file->meta == NULL) |
3196 | 0 | || (a_fs_attr->fs_file->fs_info == NULL)) { |
3197 | 0 | error_detected(TSK_ERR_FS_ARG, |
3198 | 0 | "%s: NULL parameters passed", __func__); |
3199 | 0 | return -1; |
3200 | 0 | } |
3201 | 0 |
|
3202 | 0 | // This should be a compressed file. If not, that's an error! |
3203 | 0 | if (!(a_fs_attr->flags & TSK_FS_ATTR_COMP)) { |
3204 | 0 | error_detected(TSK_ERR_FS_ARG, |
3205 | 0 | "%s: called with non-special attribute: %x", |
3206 | 0 | __func__, a_fs_attr->flags); |
3207 | 0 | return -1; |
3208 | 0 | } |
3209 | 0 |
|
3210 | 0 | // Check that the ATTR being read is the main DATA resource, 4352-0, |
3211 | 0 | // because this is the only one that can be compressed in HFS+ |
3212 | 0 | if ((a_fs_attr->id != HFS_FS_ATTR_ID_DATA) || |
3213 | 0 | (a_fs_attr->type != TSK_FS_ATTR_TYPE_HFS_DATA)) { |
3214 | 0 | error_detected(TSK_ERR_FS_ARG, |
3215 | 0 | "%s: arg specified an attribute %u-%u that is not the data fork, " |
3216 | 0 | "Only the data fork can be compressed.", __func__, |
3217 | 0 | a_fs_attr->type, a_fs_attr->id); |
3218 | 0 | return -1; |
3219 | 0 | } |
3220 | 0 |
|
3221 | 0 | /******** Open the Resource Fork ***********/ |
3222 | 0 | // The file |
3223 | 0 | fs_file = a_fs_attr->fs_file; |
3224 | 0 |
|
3225 | 0 | // find the attribute for the resource fork |
3226 | 0 | rAttr = |
3227 | 0 | tsk_fs_file_attr_get_type(fs_file, TSK_FS_ATTR_TYPE_HFS_RSRC, |
3228 | 0 | HFS_FS_ATTR_ID_RSRC, TRUE); |
3229 | 0 | if (rAttr == NULL) { |
3230 | 0 | error_returned |
3231 | 0 | (" %s: could not get the attribute for the resource fork of the file", __func__); |
3232 | 0 | return -1; |
3233 | 0 | } |
3234 | 0 |
|
3235 | 0 | // read the offset table from the fork header |
3236 | 0 | std::unique_ptr<CMP_OFFSET_ENTRY[]> offsetTable = read_block_table( |
3237 | 0 | rAttr, &offsetTableSize, &offsetTableOffset |
3238 | 0 | ); |
3239 | 0 | if (!offsetTable) { |
3240 | 0 | return -1; |
3241 | 0 | } |
3242 | 0 |
|
3243 | 0 | // Compute the range of compression units needed for the request |
3244 | 0 | startUnit = a_offset / COMPRESSION_UNIT_SIZE; |
3245 | 0 | startUnitOffset = a_offset % COMPRESSION_UNIT_SIZE; |
3246 | 0 | endUnit = (a_offset + a_len - 1) / COMPRESSION_UNIT_SIZE; |
3247 | 0 |
|
3248 | 0 | if (startUnit >= offsetTableSize || endUnit >= offsetTableSize) { |
3249 | 0 | error_detected(TSK_ERR_FS_ARG, |
3250 | 0 | "%s: range of bytes requested %lld - %lld falls past the " |
3251 | 0 | "end of the uncompressed stream %llu\n", |
3252 | 0 | __func__, a_offset, a_offset + a_len, |
3253 | 0 | offsetTable[offsetTableSize-1].offset + |
3254 | 0 | offsetTable[offsetTableSize-1].length); |
3255 | 0 | return -1; |
3256 | 0 | } |
3257 | 0 |
|
3258 | 0 | if (tsk_verbose) |
3259 | 0 | tsk_fprintf(stderr, |
3260 | 0 | "%s: reading compression units: %" PRIdOFF |
3261 | 0 | " to %" PRIdOFF "\n", __func__, startUnit, endUnit); |
3262 | 0 | bytesCopied = 0; |
3263 | 0 |
|
3264 | 0 | // Allocate buffers for the raw and uncompressed data |
3265 | 0 | /* Raw data can be COMPRESSION_UNIT_SIZE+1 if the zlib data is not |
3266 | 0 | * compressed and there is a 1-byte flag that indicates that |
3267 | 0 | * the data is not compressed. */ |
3268 | 0 | std::unique_ptr<char[]> rawBuf{new(std::nothrow) char[COMPRESSION_UNIT_SIZE + 1]}; |
3269 | 0 | if (!rawBuf) { |
3270 | 0 | error_returned |
3271 | 0 | (" %s: buffers for reading and uncompressing", __func__); |
3272 | 0 | return -1; |
3273 | 0 | } |
3274 | 0 |
|
3275 | 0 | std::unique_ptr<char[]> uncBuf{new(std::nothrow) char[COMPRESSION_UNIT_SIZE]}; |
3276 | 0 | if (!uncBuf) { |
3277 | 0 | error_returned |
3278 | 0 | (" %s: buffers for reading and uncompressing", __func__); |
3279 | 0 | return -1; |
3280 | 0 | } |
3281 | 0 |
|
3282 | 0 | // Read from the indicated comp units |
3283 | 0 | for (indx = startUnit; indx <= endUnit; ++indx) { |
3284 | 0 | char *uncBufPtr = uncBuf.get(); |
3285 | 0 | size_t bytesToCopy; |
3286 | 0 |
|
3287 | 0 | const ssize_t ret = read_and_decompress_block( |
3288 | 0 | rAttr, rawBuf.get(), uncBuf.get(), |
3289 | 0 | offsetTable.get(), offsetTableSize, offsetTableOffset, (size_t)indx, |
3290 | 0 | decompress_block |
3291 | 0 | ); |
3292 | 0 |
|
3293 | 0 | switch (ret) { |
3294 | 0 | case -1: |
3295 | 0 | return -1; |
3296 | 0 | case 0: |
3297 | 0 | continue; |
3298 | 0 | default: |
3299 | 0 | break; |
3300 | 0 | } |
3301 | 0 |
|
3302 | 0 | uint64_t uncLen = ret; |
3303 | 0 |
|
3304 | 0 | // If this is the first comp unit, then we must skip over the |
3305 | 0 | // startUnitOffset bytes. |
3306 | 0 | if (indx == startUnit) { |
3307 | 0 | uncLen -= startUnitOffset; |
3308 | 0 | uncBufPtr += startUnitOffset; |
3309 | 0 | } |
3310 | 0 |
|
3311 | 0 | // How many bytes to copy from this compression unit? |
3312 | 0 |
|
3313 | 0 | if (bytesCopied + uncLen < (uint64_t) a_len) // cast OK because a_len > 0 |
3314 | 0 | bytesToCopy = (size_t) uncLen; // uncLen <= size of compression unit, which is small, so cast is OK |
3315 | 0 | else |
3316 | 0 | bytesToCopy = (size_t) (((uint64_t) a_len) - bytesCopied); // diff <= compression unit size, so cast is OK |
3317 | 0 |
|
3318 | 0 | // Copy into the output buffer, and update bookkeeping. |
3319 | 0 | memcpy(a_buf + bytesCopied, uncBufPtr, bytesToCopy); |
3320 | 0 | bytesCopied += bytesToCopy; |
3321 | 0 | } |
3322 | 0 |
|
3323 | 0 | // Well, we don't know (without a lot of work) what the |
3324 | 0 | // true uncompressed size of the stream is. All we know is the "upper bound" which |
3325 | 0 | // assumes that all of the compression units expand to their full size. If we did |
3326 | 0 | // know the true size, then we could reject requests that go beyond the end of the |
3327 | 0 | // stream. Instead, we treat the stream as if it is padded out to the full size of |
3328 | 0 | // the last compression unit with zeros. |
3329 | 0 |
|
3330 | 0 | // Have we read and copied all of the bytes requested? |
3331 | 0 | if (bytesCopied < a_len) { |
3332 | 0 | // set the remaining bytes to zero |
3333 | 0 | memset(a_buf + bytesCopied, 0, a_len - (size_t) bytesCopied); // cast OK because diff must be < compression unit size |
3334 | 0 | } |
3335 | 0 |
|
3336 | 0 | return (ssize_t) bytesCopied; // cast OK, cannot be greater than a_len which cannot be greater than SIZE_MAX/2 (rounded down). |
3337 | 0 | } |
3338 | | |
3339 | | |
3340 | | #ifdef HAVE_LIBZ |
3341 | | /** |
3342 | | * \internal |
3343 | | * Read a ZLIB compressed resource |
3344 | | * |
3345 | | * @param fs_attr the attribute to read |
3346 | | * @param a_offset the offset from which to read |
3347 | | * @param a_buf the buffer into which to read |
3348 | | * @param a_len the length of the buffer |
3349 | | * @return number of bytes read or -1 on error (incl if offset is past EOF) |
3350 | | */ |
3351 | | UNUSED |
3352 | | static ssize_t |
3353 | | hfs_file_read_zlib_rsrc(const TSK_FS_ATTR * a_fs_attr, |
3354 | | TSK_OFF_T a_offset, char *a_buf, size_t a_len) |
3355 | 0 | { |
3356 | 0 | return hfs_file_read_compressed_rsrc( |
3357 | 0 | a_fs_attr, a_offset, a_buf, a_len, |
3358 | 0 | hfs_read_zlib_block_table, |
3359 | 0 | hfs_decompress_zlib_block |
3360 | 0 | ); |
3361 | 0 | } |
3362 | | #endif |
3363 | | |
3364 | | |
3365 | | /** |
3366 | | * \internal |
3367 | | * Read an LZVN compressed resource |
3368 | | * |
3369 | | * @param fs_attr the attribute to read |
3370 | | * @param a_offset the offset from which to read |
3371 | | * @param a_buf the buffer into which to read |
3372 | | * @param a_len the length of the buffer |
3373 | | * @return number of bytes read or -1 on error (incl if offset is past EOF) |
3374 | | */ |
3375 | | UNUSED |
3376 | | static ssize_t |
3377 | | hfs_file_read_lzvn_rsrc(const TSK_FS_ATTR * a_fs_attr, |
3378 | | TSK_OFF_T a_offset, char *a_buf, size_t a_len) |
3379 | 0 | { |
3380 | 0 | return hfs_file_read_compressed_rsrc( |
3381 | 0 | a_fs_attr, a_offset, a_buf, a_len, |
3382 | 0 | hfs_read_lzvn_block_table, |
3383 | 0 | hfs_decompress_lzvn_block |
3384 | 0 | ); |
3385 | 0 | } |
3386 | | |
3387 | | |
3388 | | /** |
3389 | | * \internal |
3390 | | * "Decompress" an uncompressed attr |
3391 | | * |
3392 | | * HFS+ compression schemes allow for some blocks to be stored uncompressed. |
3393 | | * |
3394 | | * @param rawBuf source buffer |
3395 | | * @param rawSize size of source buffer |
3396 | | * @param uncSize expected uncompressed size |
3397 | | * @param dstBuf destination buffer |
3398 | | * @param dstSize size of destination buffer |
3399 | | * @param dstBufFree true iff the caller must free the destination buffer |
3400 | | * @return 1 |
3401 | | */ |
3402 | | static int hfs_decompress_noncompressed_attr( |
3403 | | char* rawBuf, |
3404 | | [[maybe_unused]] uint32_t rawSize, |
3405 | | uint64_t uncSize, |
3406 | | char** dstBuf, |
3407 | | uint64_t* dstSize, |
3408 | | int* dstBufFree) |
3409 | 0 | { |
3410 | 0 | if (tsk_verbose) |
3411 | 0 | tsk_fprintf(stderr, |
3412 | 0 | "%s: Leading byte, 0x%02x, indicates that the data is not really compressed.\n" |
3413 | 0 | "%s: Loading the default DATA attribute.", __func__, rawBuf[0], __func__); |
3414 | 0 |
|
3415 | 0 | *dstBuf = rawBuf + 1; // + 1 indicator byte |
3416 | 0 | *dstSize = uncSize; |
3417 | 0 | *dstBufFree = FALSE; |
3418 | 0 | return 1; |
3419 | 0 | } |
3420 | | |
3421 | | |
3422 | | /** |
3423 | | * \internal |
3424 | | * Decompress a ZLIB compressed attr |
3425 | | * |
3426 | | * @param rawBuf source buffer |
3427 | | * @param rawSize size of source buffer |
3428 | | * @param uncSize expected uncompressed size |
3429 | | * @param dstBuf destination buffer |
3430 | | * @param dstSize size of destination buffer |
3431 | | * @param dstBufFree true iff the caller must free the destination buffer |
3432 | | * @return 1 on success, 0 on error |
3433 | | */ |
3434 | | static int hfs_decompress_zlib_attr(char* rawBuf, uint32_t rawSize, uint64_t uncSize, char** dstBuf, uint64_t* dstSize, int* dstBufFree) |
3435 | 0 | { |
3436 | 0 | // ZLIB blocks cannot start with 0xF as the low nibble, so that's used |
3437 | 0 | // as the flag for noncompressed blocks |
3438 | 0 | if ((rawBuf[0] & 0x0F) == 0x0F) { |
3439 | 0 | return hfs_decompress_noncompressed_attr( |
3440 | 0 | rawBuf, rawSize, uncSize, dstBuf, dstSize, dstBufFree); |
3441 | 0 | } |
3442 | 0 | else { |
3443 | 0 | #ifdef HAVE_LIBZ |
3444 | 0 | char* uncBuf = NULL; |
3445 | 0 | uint64_t uLen; |
3446 | 0 | unsigned long bytesConsumed; |
3447 | 0 | int infResult; |
3448 | 0 |
|
3449 | 0 | if (tsk_verbose) |
3450 | 0 | tsk_fprintf(stderr, |
3451 | 0 | "%s: Uncompressing (inflating) data.", __func__); |
3452 | 0 | // Uncompress the remainder of the attribute, and load as 128-0 |
3453 | 0 | // Note: cast is OK because uncSize will be quite modest, < 4000. |
3454 | 0 |
|
3455 | 0 | uncBuf = (char *) tsk_malloc((size_t) uncSize + 100); // add some extra space |
3456 | 0 | if (uncBuf == NULL) { |
3457 | 0 | error_returned |
3458 | 0 | (" - %s, space for the uncompressed attr", __func__); |
3459 | 0 | return 0; |
3460 | 0 | } |
3461 | 0 |
|
3462 | 0 | infResult = zlib_inflate(rawBuf, (uint64_t) rawSize, |
3463 | 0 | uncBuf, (uint64_t) (uncSize + 100), |
3464 | 0 | &uLen, &bytesConsumed); |
3465 | 0 | if (infResult != 0) { |
3466 | 0 | error_returned |
3467 | 0 | (" %s, zlib could not uncompress attr", __func__); |
3468 | 0 | free(uncBuf); |
3469 | 0 | return 0; |
3470 | 0 | } |
3471 | 0 |
|
3472 | 0 | if (bytesConsumed != rawSize) { |
3473 | 0 | error_detected(TSK_ERR_FS_READ, |
3474 | 0 | " %s, decompressor did not consume the whole compressed data", |
3475 | 0 | __func__); |
3476 | 0 | free(uncBuf); |
3477 | 0 | return 0; |
3478 | 0 | } |
3479 | 0 |
|
3480 | 0 | *dstBuf = uncBuf; |
3481 | 0 | *dstSize = uncSize; |
3482 | 0 | *dstBufFree = TRUE; |
3483 | 0 | #else |
3484 | 0 | // ZLIB compression library is not available, so we will load a |
3485 | 0 | // zero-length default DATA attribute. Without this, icat may |
3486 | 0 | // misbehave. |
3487 | 0 |
|
3488 | 0 | if (tsk_verbose) |
3489 | 0 | tsk_fprintf(stderr, |
3490 | 0 | "%s: ZLIB not available, so loading an empty default DATA attribute.\n", __func__); |
3491 | 0 |
|
3492 | 0 | // Dummy is one byte long, so the ptr is not null, but we set the |
3493 | 0 | // length to zero bytes, so it is never read. |
3494 | 0 | static char dummy[1]; |
3495 | 0 |
|
3496 | 0 | *dstBuf = dummy; |
3497 | 0 | *dstSize = 0; |
3498 | 0 | *dstBufFree = FALSE; |
3499 | 0 | #endif |
3500 | 0 | } |
3501 | 0 |
|
3502 | 0 | return 1; |
3503 | 0 | } |
3504 | | |
3505 | | |
3506 | | /** |
3507 | | * \internal |
3508 | | * Decompress an LZVN compressed attr |
3509 | | * |
3510 | | * @param rawBuf source buffer |
3511 | | * @param rawSize size of source buffer |
3512 | | * @param uncSize expected uncompressed size |
3513 | | * @param dstBuf destination buffer |
3514 | | * @param dstSize size of destination buffer |
3515 | | * @param dstBufFree true iff the caller must free the destination buffer |
3516 | | * @return 1 on success, 0 on error |
3517 | | */ |
3518 | | static int hfs_decompress_lzvn_attr(char* rawBuf, uint32_t rawSize, uint64_t uncSize, char** dstBuf, uint64_t* dstSize, int* dstBufFree) |
3519 | 0 | { |
3520 | 0 | // LZVN blocks cannot start with 0x06, so that's used as the flag for |
3521 | 0 | // noncompressed blocks |
3522 | 0 | if (rawBuf[0] == 0x06) { |
3523 | 0 | return hfs_decompress_noncompressed_attr( |
3524 | 0 | rawBuf, rawSize, uncSize, dstBuf, dstSize, dstBufFree); |
3525 | 0 | } |
3526 | 0 |
|
3527 | 0 | char* uncBuf = (char *) tsk_malloc((size_t) uncSize); |
3528 | 0 | *dstSize = lzvn_decode_buffer(uncBuf, uncSize, rawBuf, rawSize); |
3529 | 0 | *dstBuf = uncBuf; |
3530 | 0 | *dstBufFree = TRUE; |
3531 | 0 |
|
3532 | 0 | return 1; |
3533 | 0 | } |
3534 | | |
3535 | | |
3536 | | /** |
3537 | | * \internal |
3538 | | * Read a compressed attr |
3539 | | * |
3540 | | * @param fs_file the file |
3541 | | * @param cmpType compression type |
3542 | | * @param buffer destination buffer |
3543 | | * @param attributeLength length of the attribute |
3544 | | * @param uncSize uncompressed size |
3545 | | * @param decompress_attr pointer to the decompression function |
3546 | | * @return 1 on success, 0 on error |
3547 | | */ |
3548 | | static int |
3549 | | hfs_file_read_compressed_attr(TSK_FS_FILE* fs_file, |
3550 | | uint8_t cmpType, |
3551 | | char* buffer, |
3552 | | uint32_t attributeLength, |
3553 | | uint64_t uncSize, |
3554 | | int (*decompress_attr)(char* rawBuf, |
3555 | | uint32_t rawSize, |
3556 | | uint64_t uncSize, |
3557 | | char** dstBuf, |
3558 | | uint64_t* dstSize, |
3559 | | int* dstBufFree)) |
3560 | 0 | { |
3561 | 0 | // Data is inline. We will load the uncompressed data as a |
3562 | 0 | // resident attribute. |
3563 | 0 | if (tsk_verbose) |
3564 | 0 | tsk_fprintf(stderr, |
3565 | 0 | "%s: Compressed data is inline in the attribute, will load this as the default DATA attribute.\n", __func__); |
3566 | 0 |
|
3567 | 0 | if (attributeLength <= 16) { |
3568 | 0 | if (tsk_verbose) |
3569 | 0 | tsk_fprintf(stderr, |
3570 | 0 | "%s: WARNING, Compression Record of type %u is not followed by" |
3571 | 0 | " compressed data. No data will be loaded into the DATA" |
3572 | 0 | " attribute.\n", __func__, cmpType); |
3573 | 0 |
|
3574 | 0 | // oddly, this is not actually considered an error |
3575 | 0 | return 1; |
3576 | 0 | } |
3577 | 0 |
|
3578 | 0 | TSK_FS_ATTR *fs_attr_unc; |
3579 | 0 |
|
3580 | 0 | // There is data following the compression record, as there should be. |
3581 | 0 | if ((fs_attr_unc = tsk_fs_attrlist_getnew( |
3582 | 0 | fs_file->meta->attr, TSK_FS_ATTR_RES)) == NULL) |
3583 | 0 | { |
3584 | 0 | error_returned(" - %s, FS_ATTR for uncompressed data", __func__); |
3585 | 0 | return 0; |
3586 | 0 | } |
3587 | 0 |
|
3588 | 0 | char* dstBuf; |
3589 | 0 | uint64_t dstSize; |
3590 | 0 | int dstBufFree = FALSE; |
3591 | 0 |
|
3592 | 0 | if (!decompress_attr(buffer + 16, attributeLength - 16, uncSize, |
3593 | 0 | &dstBuf, &dstSize, &dstBufFree)) { |
3594 | 0 | return 0; |
3595 | 0 | } |
3596 | 0 |
|
3597 | 0 | if (dstSize != uncSize) { |
3598 | 0 | error_detected(TSK_ERR_FS_READ, |
3599 | 0 | " %s, actual uncompressed size not equal to the size in the compression record", __func__); |
3600 | 0 | goto on_error; |
3601 | 0 | } |
3602 | 0 |
|
3603 | 0 | if (tsk_verbose) |
3604 | 0 | tsk_fprintf(stderr, |
3605 | 0 | "%s: Loading decompressed data as default DATA attribute.", |
3606 | 0 | __func__); |
3607 | 0 |
|
3608 | 0 | // Load the remainder of the attribute as 128-0 |
3609 | 0 | // set the details in the fs_attr structure. |
3610 | 0 | // Note, we are loading this as a RESIDENT attribute. |
3611 | 0 | if (tsk_fs_attr_set_str(fs_file, |
3612 | 0 | fs_attr_unc, "DATA", |
3613 | 0 | TSK_FS_ATTR_TYPE_HFS_DATA, |
3614 | 0 | HFS_FS_ATTR_ID_DATA, dstBuf, |
3615 | 0 | dstSize)) |
3616 | 0 | { |
3617 | 0 | error_returned(" - %s", __func__); |
3618 | 0 | goto on_error; |
3619 | 0 | } |
3620 | 0 |
|
3621 | 0 | if (dstBufFree) { |
3622 | 0 | free(dstBuf); |
3623 | 0 | } |
3624 | 0 | return 1; |
3625 | 0 |
|
3626 | 0 | on_error: |
3627 | 0 | if (dstBufFree) { |
3628 | 0 | free(dstBuf); |
3629 | 0 | } |
3630 | 0 | return 0; |
3631 | 0 | } |
3632 | | |
3633 | | |
3634 | | /** |
3635 | | * \internal |
3636 | | * Read a ZLIB compressed attr |
3637 | | * |
3638 | | * @param fs_file the file |
3639 | | * @param buffer destination buffer |
3640 | | * @param attributeLength length of the attribute |
3641 | | * @param uncSize uncompressed size |
3642 | | * @return 1 on success, 0 on error |
3643 | | */ |
3644 | | UNUSED |
3645 | | static int hfs_file_read_zlib_attr(TSK_FS_FILE* fs_file, |
3646 | | char* buffer, |
3647 | | uint32_t attributeLength, |
3648 | | uint64_t uncSize) |
3649 | 0 | { |
3650 | 0 | return hfs_file_read_compressed_attr( |
3651 | 0 | fs_file, DECMPFS_TYPE_ZLIB_ATTR, |
3652 | 0 | buffer, attributeLength, uncSize, |
3653 | 0 | hfs_decompress_zlib_attr |
3654 | 0 | ); |
3655 | 0 | } |
3656 | | |
3657 | | |
3658 | | /** |
3659 | | * \internal |
3660 | | * Read an LZVN compressed attr |
3661 | | * |
3662 | | * @param fs_file the file |
3663 | | * @param buffer destination buffer |
3664 | | * @param attributeLength length of the attribute |
3665 | | * @param uncSize uncompressed size |
3666 | | * @return 1 on success, 0 on error |
3667 | | */ |
3668 | | UNUSED |
3669 | | static int hfs_file_read_lzvn_attr(TSK_FS_FILE* fs_file, |
3670 | | char* buffer, |
3671 | | uint32_t attributeLength, |
3672 | | uint64_t uncSize) |
3673 | 0 | { |
3674 | 0 | return hfs_file_read_compressed_attr( |
3675 | 0 | fs_file, DECMPFS_TYPE_LZVN_ATTR, |
3676 | 0 | buffer, attributeLength, uncSize, |
3677 | 0 | hfs_decompress_lzvn_attr |
3678 | 0 | ); |
3679 | 0 | } |
3680 | | |
3681 | | |
3682 | | typedef struct { |
3683 | | TSK_FS_INFO *fs; // the HFS file system |
3684 | | TSK_FS_FILE *file; // the Attributes file, if open |
3685 | | hfs_btree_header_record *header; // the Attributes btree header record. |
3686 | | // For Convenience, unpacked values. |
3687 | | TSK_ENDIAN_ENUM endian; |
3688 | | uint32_t rootNode; |
3689 | | uint16_t nodeSize; |
3690 | | uint16_t maxKeyLen; |
3691 | | } ATTR_FILE_T; |
3692 | | |
3693 | | |
3694 | | /** \internal |
3695 | | * Open the Attributes file, and read the btree header record. Fill in the fields of the ATTR_FILE_T struct. |
3696 | | * |
3697 | | * @param fs -- the HFS file system |
3698 | | * @param header -- the header record struct |
3699 | | * |
3700 | | * @return 1 on error, 0 on success |
3701 | | */ |
3702 | | static uint8_t |
3703 | | open_attr_file(TSK_FS_INFO * fs, ATTR_FILE_T * attr_file) |
3704 | 0 | { |
3705 | |
|
3706 | 0 | ssize_t cnt; // will hold bytes read |
3707 | |
|
3708 | 0 | hfs_btree_header_record *hrec; |
3709 | | |
3710 | | // clean up any error messages that are lying around |
3711 | 0 | tsk_error_reset(); |
3712 | |
|
3713 | 0 | if (fs == NULL) { |
3714 | 0 | tsk_error_set_errno(TSK_ERR_FS_ARG); |
3715 | 0 | tsk_error_set_errstr("open_attr_file: fs is NULL"); |
3716 | 0 | return 1; |
3717 | 0 | } |
3718 | | |
3719 | 0 | if (attr_file == NULL) { |
3720 | 0 | tsk_error_set_errno(TSK_ERR_FS_ARG); |
3721 | 0 | tsk_error_set_errstr("open_attr_file: attr_file is NULL"); |
3722 | 0 | return 1; |
3723 | 0 | } |
3724 | | |
3725 | | // Open the Attributes File |
3726 | 0 | attr_file->file = |
3727 | 0 | tsk_fs_file_open_meta(fs, NULL, HFS_ATTRIBUTES_FILE_ID); |
3728 | |
|
3729 | 0 | if (attr_file->file == NULL) { |
3730 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
3731 | 0 | tsk_error_set_errstr |
3732 | 0 | ("open_attr_file: could not open the Attributes file"); |
3733 | 0 | return 1; |
3734 | 0 | } |
3735 | | |
3736 | | // Allocate some space for the Attributes btree header record (which |
3737 | | // is passed back to the caller) |
3738 | 0 | hrec = (hfs_btree_header_record *) |
3739 | 0 | malloc(sizeof(hfs_btree_header_record)); |
3740 | |
|
3741 | 0 | if (hrec == NULL) { |
3742 | 0 | tsk_error_set_errno(TSK_ERR_FS); |
3743 | 0 | tsk_error_set_errstr |
3744 | 0 | ("open_attr_file: could not malloc space for Attributes header record"); |
3745 | 0 | return 1; |
3746 | 0 | } |
3747 | | |
3748 | | // Read the btree header record |
3749 | 0 | cnt = tsk_fs_file_read(attr_file->file, |
3750 | 0 | 14, |
3751 | 0 | (char *) hrec, |
3752 | 0 | sizeof(hfs_btree_header_record), (TSK_FS_FILE_READ_FLAG_ENUM) 0); |
3753 | 0 | if (cnt != (ssize_t)sizeof(hfs_btree_header_record)) { |
3754 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
3755 | 0 | tsk_error_set_errstr |
3756 | 0 | ("open_attr_file: could not open the Attributes file"); |
3757 | 0 | tsk_fs_file_close(attr_file->file); |
3758 | 0 | free(hrec); |
3759 | 0 | return 1; |
3760 | 0 | } |
3761 | | |
3762 | | // Fill in the fields of the attr_file struct (which was passed in by the caller) |
3763 | 0 | attr_file->fs = fs; |
3764 | 0 | attr_file->header = hrec; |
3765 | 0 | attr_file->endian = fs->endian; |
3766 | 0 | attr_file->nodeSize = tsk_getu16(attr_file->endian, hrec->nodesize); |
3767 | 0 | attr_file->rootNode = tsk_getu32(attr_file->endian, hrec->rootNode); |
3768 | 0 | attr_file->maxKeyLen = tsk_getu16(attr_file->endian, hrec->maxKeyLen); |
3769 | |
|
3770 | 0 | return 0; |
3771 | 0 | } |
3772 | | |
3773 | | |
3774 | | /** \internal |
3775 | | * Closes and frees the data structures associated with ATTR_FILE_T |
3776 | | */ |
3777 | | static uint8_t |
3778 | | close_attr_file(ATTR_FILE_T * attr_file) |
3779 | 0 | { |
3780 | 0 | if (attr_file == NULL) { |
3781 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
3782 | 0 | tsk_error_set_errstr("close_attr_file: NULL attr_file arg"); |
3783 | 0 | return 1; |
3784 | 0 | } |
3785 | | |
3786 | 0 | if (attr_file->file != NULL) { |
3787 | 0 | tsk_fs_file_close(attr_file->file); |
3788 | 0 | attr_file->file = NULL; |
3789 | 0 | } |
3790 | |
|
3791 | 0 | free(attr_file->header); |
3792 | 0 | attr_file->header = NULL; |
3793 | |
|
3794 | 0 | attr_file->rootNode = 0; |
3795 | 0 | attr_file->nodeSize = 0; |
3796 | | // Note that we leave the fs component alone. |
3797 | 0 | return 0; |
3798 | 0 | } |
3799 | | |
3800 | | |
3801 | | static const char * |
3802 | | hfs_attrTypeName(uint32_t typeNum) |
3803 | 0 | { |
3804 | 0 | switch (typeNum) { |
3805 | 0 | case TSK_FS_ATTR_TYPE_HFS_DEFAULT: |
3806 | 0 | return "DFLT"; |
3807 | 0 | case TSK_FS_ATTR_TYPE_HFS_DATA: |
3808 | 0 | return "DATA"; |
3809 | 0 | case TSK_FS_ATTR_TYPE_HFS_EXT_ATTR: |
3810 | 0 | return "ExATTR"; |
3811 | 0 | case TSK_FS_ATTR_TYPE_HFS_COMP_REC: |
3812 | 0 | return "CMPF"; |
3813 | 0 | case TSK_FS_ATTR_TYPE_HFS_RSRC: |
3814 | 0 | return "RSRC"; |
3815 | 0 | default: |
3816 | 0 | return "UNKN"; |
3817 | 0 | } |
3818 | 0 | } |
3819 | | |
3820 | | |
3821 | | // TODO: Function description missing here no idea what it is supposed to return |
3822 | | // in which circumstances. |
3823 | | static uint8_t |
3824 | | hfs_load_extended_attrs(TSK_FS_FILE * fs_file, |
3825 | | unsigned char *isCompressed, unsigned char *cmpType, |
3826 | | uint64_t *uncompressedSize) |
3827 | 0 | { |
3828 | 0 | TSK_FS_INFO *fs = fs_file->fs_info; |
3829 | 0 | uint64_t fileID; |
3830 | 0 | ATTR_FILE_T attrFile; |
3831 | 0 | uint8_t *nodeData; |
3832 | 0 | TSK_ENDIAN_ENUM endian; |
3833 | 0 | hfs_btree_node *nodeDescriptor; // The node descriptor |
3834 | 0 | uint32_t nodeID; // The number or ID of the Attributes file node to read. |
3835 | 0 | hfs_btree_key_attr *keyB; // ptr to the key of the Attr file record. |
3836 | 0 | unsigned char done; // Flag to indicate that we are done looping over leaf nodes |
3837 | 0 | uint16_t attribute_counter = 2; // The ID of the next attribute to be loaded. |
3838 | 0 | HFS_INFO *hfs; |
3839 | 0 | char *buffer = NULL; // buffer to hold the attribute |
3840 | 0 | TSK_LIST *nodeIDs_processed = NULL; // Keep track of node IDs to prevent an infinite loop |
3841 | 0 | ssize_t cnt; // count of chars read from file. |
3842 | |
|
3843 | 0 | tsk_error_reset(); |
3844 | | |
3845 | | // The CNID (or inode number) of the file |
3846 | | // Note that in TSK such numbers are 64 bits, but in HFS+ they are only 32 bits. |
3847 | 0 | fileID = fs_file->meta->addr; |
3848 | |
|
3849 | 0 | if (fs == NULL) { |
3850 | 0 | error_detected(TSK_ERR_FS_ARG, |
3851 | 0 | "hfs_load_extended_attrs: NULL fs arg"); |
3852 | 0 | return 1; |
3853 | 0 | } |
3854 | | |
3855 | 0 | hfs = (HFS_INFO *) fs; |
3856 | |
|
3857 | 0 | if (!hfs->has_attributes_file) { |
3858 | | // No attributes file, and so, no extended attributes |
3859 | 0 | return 0; |
3860 | 0 | } |
3861 | | |
3862 | 0 | if (tsk_verbose) { |
3863 | 0 | tsk_fprintf(stderr, |
3864 | 0 | "hfs_load_extended_attrs: Processing file %" PRIuINUM "\n", |
3865 | 0 | fileID); |
3866 | 0 | } |
3867 | | |
3868 | | // Open the Attributes File |
3869 | 0 | if (open_attr_file(fs, &attrFile)) { |
3870 | 0 | error_returned |
3871 | 0 | ("hfs_load_extended_attrs: could not open Attributes file"); |
3872 | 0 | return 1; |
3873 | 0 | } |
3874 | | |
3875 | | // Is the Attributes file empty? |
3876 | 0 | if (attrFile.rootNode == 0) { |
3877 | 0 | if (tsk_verbose) |
3878 | 0 | tsk_fprintf(stderr, |
3879 | 0 | "hfs_load_extended_attrs: Attributes file is empty\n"); |
3880 | 0 | close_attr_file(&attrFile); |
3881 | 0 | *isCompressed = FALSE; |
3882 | 0 | *cmpType = 0; |
3883 | 0 | return 0; |
3884 | 0 | } |
3885 | | |
3886 | 0 | if (attrFile.nodeSize < sizeof(hfs_btree_node)) { |
3887 | 0 | error_returned |
3888 | 0 | ("hfs_load_extended_attrs: node size too small"); |
3889 | 0 | close_attr_file(&attrFile); |
3890 | 0 | return 1; |
3891 | 0 | } |
3892 | | |
3893 | | // A place to hold one node worth of data |
3894 | 0 | nodeData = (uint8_t *) malloc(attrFile.nodeSize); |
3895 | 0 | if (nodeData == NULL) { |
3896 | 0 | error_detected(TSK_ERR_AUX_MALLOC, |
3897 | 0 | "hfs_load_extended_attrs: Could not malloc space for an Attributes file node"); |
3898 | 0 | goto on_error; |
3899 | 0 | } |
3900 | | |
3901 | | // Initialize these |
3902 | 0 | *isCompressed = FALSE; |
3903 | 0 | *cmpType = 0; |
3904 | |
|
3905 | 0 | endian = attrFile.fs->endian; |
3906 | | |
3907 | | // Start with the root node |
3908 | 0 | nodeID = attrFile.rootNode; |
3909 | | |
3910 | | // While loop, over nodes in path from root node to the correct LEAF node. |
3911 | 0 | while (1) { |
3912 | 0 | uint16_t numRec; // Number of records in the node |
3913 | 0 | int recIndx; // index for looping over records |
3914 | |
|
3915 | 0 | if (tsk_verbose) { |
3916 | 0 | tsk_fprintf(stderr, |
3917 | 0 | "hfs_load_extended_attrs: Reading Attributes File node with ID %" |
3918 | 0 | PRIu32 "\n", nodeID); |
3919 | 0 | } |
3920 | | |
3921 | | /* Make sure we do not get into an infinite loop */ |
3922 | 0 | if (tsk_list_find(nodeIDs_processed, nodeID)) { |
3923 | 0 | error_detected(TSK_ERR_FS_READ, |
3924 | 0 | "hfs_load_extended_attrs: Infinite loop detected - trying to read node %" PRIu32 " which has already been processed", nodeID); |
3925 | 0 | goto on_error; |
3926 | 0 | } |
3927 | | |
3928 | | |
3929 | | /* Read the node */ |
3930 | 0 | cnt = tsk_fs_file_read(attrFile.file, |
3931 | 0 | (TSK_OFF_T)nodeID * attrFile.nodeSize, |
3932 | 0 | (char *) nodeData, |
3933 | 0 | attrFile.nodeSize, (TSK_FS_FILE_READ_FLAG_ENUM) 0); |
3934 | 0 | if (cnt != (ssize_t)attrFile.nodeSize) { |
3935 | 0 | error_returned |
3936 | 0 | ("hfs_load_extended_attrs: Could not read in a node from the Attributes File"); |
3937 | 0 | goto on_error; |
3938 | 0 | } |
3939 | | |
3940 | | /* Save this node ID to the list of processed nodes */ |
3941 | 0 | if (tsk_list_add(&nodeIDs_processed, nodeID)) { |
3942 | 0 | error_detected(TSK_ERR_FS_READ, |
3943 | 0 | "hfs_load_extended_attrs: Could not save nodeID to the list of processed nodes"); |
3944 | 0 | goto on_error; |
3945 | 0 | } |
3946 | | |
3947 | | /** Node has a: |
3948 | | * Descriptor |
3949 | | * Set of records |
3950 | | * Table at the end with pointers to the records |
3951 | | */ |
3952 | | // Parse the Node header |
3953 | 0 | nodeDescriptor = (hfs_btree_node *) nodeData; |
3954 | | |
3955 | | // If we are at a leaf node, then we have found the right node |
3956 | 0 | if (nodeDescriptor->type == HFS_ATTR_NODE_LEAF) { |
3957 | 0 | break; |
3958 | 0 | } |
3959 | | |
3960 | | // This had better be an INDEX node, if not its an error |
3961 | 0 | else if (nodeDescriptor->type != HFS_ATTR_NODE_INDEX) { |
3962 | 0 | error_detected(TSK_ERR_FS_READ, |
3963 | 0 | "hfs_load_extended_attrs: Reached a non-INDEX and non-LEAF node in searching the Attributes File"); |
3964 | 0 | goto on_error; |
3965 | 0 | } |
3966 | | |
3967 | | // OK, we are in an INDEX node. loop over the records to find the last one whose key is |
3968 | | // smaller than or equal to the desired key |
3969 | | |
3970 | 0 | numRec = tsk_getu16(endian, nodeDescriptor->num_rec); |
3971 | 0 | if (numRec == 0) { |
3972 | | // This is wrong, there must always be at least 1 record in an INDEX node. |
3973 | 0 | error_detected(TSK_ERR_FS_READ, |
3974 | 0 | "hfs_load_extended_attrs:Attributes File index node %" |
3975 | 0 | PRIu32 " has zero records", nodeID); |
3976 | 0 | goto on_error; |
3977 | 0 | } |
3978 | | |
3979 | 0 | for (recIndx = 0; recIndx < numRec; ++recIndx) { |
3980 | 0 | uint16_t keyLength; |
3981 | 0 | int comp; // comparison result |
3982 | 0 | const char *compStr; // comparison result, as a string |
3983 | 0 | uint8_t *recData; // pointer to the data part of the record |
3984 | 0 | uint32_t keyFileID; |
3985 | |
|
3986 | 0 | if ((attrFile.nodeSize < 2) || (recIndx > ((attrFile.nodeSize - 2) / 2))) { |
3987 | 0 | error_detected(TSK_ERR_FS_READ, |
3988 | 0 | "hfs_load_extended_attrs: Unable to process attribute (recIndx exceeds attrFile.nodeSize)"); |
3989 | 0 | goto on_error; |
3990 | 0 | } |
3991 | | |
3992 | | // The offset to the record is stored in table at end of node |
3993 | 0 | uint8_t *recOffsetTblEntry = &nodeData[attrFile.nodeSize - (2 * (recIndx + 1))]; // data describing where this record is |
3994 | 0 | uint16_t recOffset = tsk_getu16(endian, recOffsetTblEntry); |
3995 | | //uint8_t * nextRecOffsetData = &nodeData[attrFile.nodeSize - 2* (recIndx+2)]; |
3996 | | |
3997 | | // make sure the record and first fields are in the buffer |
3998 | 0 | if ((attrFile.nodeSize < 14) || (recOffset >= attrFile.nodeSize - 14)) { |
3999 | 0 | error_detected(TSK_ERR_FS_READ, |
4000 | 0 | "hfs_load_extended_attrs: Unable to process attribute (offset too big)"); |
4001 | 0 | goto on_error; |
4002 | 0 | } |
4003 | | |
4004 | | // Pointer to first byte of record |
4005 | 0 | uint8_t *recordBytes = &nodeData[recOffset]; |
4006 | | |
4007 | | |
4008 | | // Cast that to the Attributes file key (n.b., the key is the first thing in the record) |
4009 | 0 | keyB = (hfs_btree_key_attr *) recordBytes; |
4010 | | |
4011 | | // Is this key less than what we are seeking? |
4012 | | //int comp = comp_attr_key(endian, keyB, fileID, attrName, startBlock); |
4013 | |
|
4014 | 0 | keyFileID = tsk_getu32(endian, keyB->file_id); |
4015 | 0 | if (keyFileID < fileID) { |
4016 | 0 | comp = -1; |
4017 | 0 | compStr = "less than"; |
4018 | 0 | } |
4019 | 0 | else if (keyFileID > fileID) { |
4020 | 0 | comp = 1; |
4021 | 0 | compStr = "greater than"; |
4022 | 0 | } |
4023 | 0 | else { |
4024 | 0 | comp = 0; |
4025 | 0 | compStr = "equal to"; |
4026 | 0 | } |
4027 | 0 | if (tsk_verbose) |
4028 | 0 | tsk_fprintf(stderr, |
4029 | 0 | "hfs_load_extended_attrs: INDEX record %d, fileID %" |
4030 | 0 | PRIu32 " is %s the file ID we are seeking, %" PRIu32 |
4031 | 0 | ".\n", recIndx, keyFileID, compStr, fileID); |
4032 | 0 | if (comp > 0) { |
4033 | | // The key of this record is greater than what we are seeking |
4034 | 0 | if (recIndx == 0) { |
4035 | | // This is the first record, so no records are appropriate |
4036 | | // Nothing in this btree will match. We can stop right here. |
4037 | 0 | goto on_exit; |
4038 | 0 | } |
4039 | | |
4040 | | // This is not the first record, so, the previous record's child is the one we want. |
4041 | 0 | break; |
4042 | 0 | } |
4043 | | |
4044 | | // CASE: key in this record matches the key we are seeking. The previous record's child |
4045 | | // is the one we want. However, if this is the first record, then we want THIS record's child. |
4046 | 0 | if (comp == 0 && recIndx != 0) { |
4047 | 0 | break; |
4048 | 0 | } |
4049 | | |
4050 | | // Extract the child node ID from the record data (stored after the key) |
4051 | 0 | keyLength = tsk_getu16(endian, keyB->key_len); |
4052 | | // make sure the fields we care about are still in the buffer |
4053 | | // +2 is because key_len doesn't include its own length |
4054 | | // +4 is because of the amount of data we read from the data |
4055 | 0 | if ((keyLength > attrFile.nodeSize - 2 - 4) || (recOffset >= attrFile.nodeSize - 2 - 4 - keyLength)) { |
4056 | 0 | error_detected(TSK_ERR_FS_READ, |
4057 | 0 | "hfs_load_extended_attrs: Unable to process attribute"); |
4058 | 0 | goto on_error; |
4059 | 0 | } |
4060 | | |
4061 | 0 | recData = &recordBytes[keyLength + 2]; |
4062 | | |
4063 | | // Data must start on an even offset from the beginning of the record. |
4064 | | // So, correct this if needed. |
4065 | 0 | if ((recData - recordBytes) % 2) { |
4066 | 0 | recData += 1; |
4067 | 0 | } |
4068 | | |
4069 | | // The next four bytes should be the Node ID of the child of this node. |
4070 | 0 | nodeID = tsk_getu32(endian, recData); |
4071 | | |
4072 | | // At this point, either comp<0 or comp=0 && recIndx=0. In the latter case we want to |
4073 | | // descend to the child of this node, so we break. |
4074 | 0 | if (recIndx == 0 && comp == 0) { |
4075 | 0 | break; |
4076 | 0 | } |
4077 | | |
4078 | | // CASE: key in this record is less than key we seek. comp < 0 |
4079 | | // So, continue looping over records in this node. |
4080 | 0 | } // END loop over records |
4081 | |
|
4082 | 0 | } // END while loop over Nodes in path from root to LEAF node |
4083 | | |
4084 | | // At this point nodeData holds the contents of a LEAF node with the right range of keys |
4085 | | // and nodeDescriptor points to the descriptor of that node. |
4086 | | |
4087 | | // Loop over successive LEAF nodes, starting with this one |
4088 | 0 | done = FALSE; |
4089 | 0 | while (!done) { |
4090 | 0 | uint16_t numRec; // number of records |
4091 | 0 | unsigned int recIndx; // index for looping over records |
4092 | |
|
4093 | 0 | if (tsk_verbose) |
4094 | 0 | tsk_fprintf(stderr, |
4095 | 0 | "hfs_load_extended_attrs: Attributes File LEAF Node %" |
4096 | 0 | PRIu32 ".\n", nodeID); |
4097 | 0 | numRec = tsk_getu16(endian, nodeDescriptor->num_rec); |
4098 | | // Note, leaf node could have one (or maybe zero) records |
4099 | | |
4100 | | // Loop over the records in this node |
4101 | 0 | for (recIndx = 0; recIndx < numRec; ++recIndx) { |
4102 | |
|
4103 | 0 | if (attrFile.nodeSize < 2 || 2*recIndx + 2 > attrFile.nodeSize) { |
4104 | 0 | error_detected(TSK_ERR_FS_READ, |
4105 | 0 | "hfs_load_extended_attrs: Unable to process attribute (recIndx exceeds attrFile.nodeSize)"); |
4106 | 0 | goto on_error; |
4107 | 0 | } |
4108 | | // The offset to the record is stored in table at end of node |
4109 | 0 | uint8_t *recOffsetTblEntry = &nodeData[attrFile.nodeSize - (2 * (recIndx + 1))]; // data describing where this record is |
4110 | 0 | uint16_t recOffset = tsk_getu16(endian, recOffsetTblEntry); |
4111 | |
|
4112 | 0 | int comp; // comparison result |
4113 | 0 | const char *compStr; // comparison result as a string |
4114 | 0 | uint32_t keyFileID; |
4115 | | |
4116 | | // make sure the record and first fields are in the buffer |
4117 | 0 | if (recOffset >= attrFile.nodeSize - 14) { |
4118 | 0 | error_detected(TSK_ERR_FS_READ, |
4119 | 0 | "hfs_load_extended_attrs: Unable to process attribute (offset too big)"); |
4120 | 0 | goto on_error; |
4121 | 0 | } |
4122 | | |
4123 | | // Pointer to first byte of record |
4124 | 0 | uint8_t *recordBytes = &nodeData[recOffset]; |
4125 | | |
4126 | | // Cast that to the Attributes file key |
4127 | 0 | keyB = (hfs_btree_key_attr *) recordBytes; |
4128 | | |
4129 | | // Compare recordBytes key to the key that we are seeking |
4130 | 0 | keyFileID = tsk_getu32(endian, keyB->file_id); |
4131 | | |
4132 | | //fprintf(stdout, " Key file ID = %lu\n", keyFileID); |
4133 | 0 | if (keyFileID < fileID) { |
4134 | 0 | comp = -1; |
4135 | 0 | compStr = "less than"; |
4136 | 0 | } |
4137 | 0 | else if (keyFileID > fileID) { |
4138 | 0 | comp = 1; |
4139 | 0 | compStr = "greater than"; |
4140 | 0 | } |
4141 | 0 | else { |
4142 | 0 | comp = 0; |
4143 | 0 | compStr = "equal to"; |
4144 | 0 | } |
4145 | |
|
4146 | 0 | if (tsk_verbose) |
4147 | 0 | tsk_fprintf(stderr, |
4148 | 0 | "hfs_load_extended_attrs: LEAF Record key file ID %" |
4149 | 0 | PRIu32 " is %s the desired file ID %" PRIu32 "\n", |
4150 | 0 | keyFileID, compStr, fileID); |
4151 | | // Are they the same? |
4152 | 0 | if (comp == 0) { |
4153 | | // Yes, so load this attribute |
4154 | |
|
4155 | 0 | uint8_t *recData; // pointer to the data part of the recordBytes |
4156 | 0 | hfs_attr_data *attrData; |
4157 | 0 | uint32_t attributeLength; |
4158 | 0 | uint32_t nameLength; |
4159 | 0 | uint32_t recordType; |
4160 | 0 | uint16_t keyLength; |
4161 | 0 | int conversionResult; |
4162 | 0 | char nameBuff[HFS_MAX_ATTR_NAME_LEN_UTF8_B+1]; |
4163 | 0 | TSK_FS_ATTR_TYPE_ENUM attrType; |
4164 | 0 | TSK_FS_ATTR *fs_attr; // Points to the attribute to be loaded. |
4165 | |
|
4166 | 0 | keyLength = tsk_getu16(endian, keyB->key_len); |
4167 | | // make sure the fields we care about are still in the buffer |
4168 | | // +2 because key_len doesn't include its own length |
4169 | | // +16 for the amount of data we'll read from data |
4170 | 0 | if (attrFile.nodeSize < 2 + 16 || keyLength > attrFile.nodeSize - 2 - 16 || recOffset + 2 + 16 + keyLength >= attrFile.nodeSize) { |
4171 | 0 | error_detected(TSK_ERR_FS_READ, |
4172 | 0 | "hfs_load_extended_attrs: Unable to process attribute"); |
4173 | 0 | goto on_error; |
4174 | 0 | } |
4175 | | |
4176 | 0 | recData = &recordBytes[keyLength + 2]; |
4177 | | |
4178 | | // Data must start on an even offset from the beginning of the record. |
4179 | | // So, correct this if needed. |
4180 | 0 | if ((recData - recordBytes) % 2) { |
4181 | 0 | recData += 1; |
4182 | 0 | } |
4183 | |
|
4184 | 0 | attrData = (hfs_attr_data *) recData; |
4185 | | |
4186 | | // Check we can process the record type before allocating memory |
4187 | 0 | recordType = tsk_getu32(endian, attrData->record_type); |
4188 | 0 | if (recordType != HFS_ATTR_RECORD_INLINE_DATA) { |
4189 | 0 | error_detected(TSK_ERR_FS_UNSUPTYPE, |
4190 | 0 | "hfs_load_extended_attrs: Unsupported record type: (%d)", |
4191 | 0 | recordType); |
4192 | 0 | goto on_error; |
4193 | 0 | } |
4194 | | |
4195 | | // This is the length of the useful data, not including the record header |
4196 | 0 | attributeLength = tsk_getu32(endian, attrData->attr_size); |
4197 | | |
4198 | | // Check the attribute fits in the node |
4199 | | //if (recordType != HFS_ATTR_RECORD_INLINE_DATA) { |
4200 | 0 | if (attributeLength + 2 + 16 + keyLength > attrFile.nodeSize || recOffset + 2 + 16 + keyLength + attributeLength >= attrFile.nodeSize) { |
4201 | 0 | error_detected(TSK_ERR_FS_READ, |
4202 | 0 | "hfs_load_extended_attrs: Unable to process attribute"); |
4203 | 0 | goto on_error; |
4204 | 0 | } |
4205 | | |
4206 | | // attr_name_len is in UTF_16 chars |
4207 | 0 | nameLength = tsk_getu16(endian, keyB->attr_name_len); |
4208 | 0 | if (2 * nameLength > HFS_MAX_ATTR_NAME_LEN_UTF16_B) { |
4209 | 0 | error_detected(TSK_ERR_FS_CORRUPT, |
4210 | 0 | "hfs_load_extended_attrs: Name length in bytes (%d) > max name length in bytes (%d).", |
4211 | 0 | 2*nameLength, HFS_MAX_ATTR_NAME_LEN_UTF16_B); |
4212 | 0 | goto on_error; |
4213 | 0 | } |
4214 | | |
4215 | 0 | if ((int32_t)(2*nameLength) > keyLength - 12) { |
4216 | 0 | error_detected(TSK_ERR_FS_CORRUPT, |
4217 | 0 | "hfs_load_extended_attrs: Name length in bytes (%d) > remaining struct length (%d).", |
4218 | 0 | 2*nameLength, keyLength - 12); |
4219 | 0 | goto on_error; |
4220 | 0 | } |
4221 | | |
4222 | 0 | buffer = (char*) tsk_malloc(attributeLength); |
4223 | 0 | if (buffer == NULL) { |
4224 | 0 | error_detected(TSK_ERR_AUX_MALLOC, |
4225 | 0 | "hfs_load_extended_attrs: Could not malloc space for the attribute."); |
4226 | 0 | goto on_error; |
4227 | 0 | } |
4228 | | |
4229 | 0 | memcpy(buffer, attrData->attr_data, attributeLength); |
4230 | | |
4231 | | // Use the "attr_name" part of the key as the attribute name |
4232 | | // but must convert to UTF8. Unfortunately, there does not seem to |
4233 | | // be any easy way to determine how long the converted string will |
4234 | | // be because UTF8 is a variable length encoding. However, the longest |
4235 | | // it will be is 3 * the max number of UTF16 code units. Add one for null |
4236 | | // termination. (thanks Judson!) |
4237 | | |
4238 | |
|
4239 | 0 | conversionResult = hfs_UTF16toUTF8(fs, keyB->attr_name, |
4240 | 0 | nameLength, nameBuff, HFS_MAX_ATTR_NAME_LEN_UTF8_B+1, 0); |
4241 | 0 | if (conversionResult != 0) { |
4242 | 0 | error_returned |
4243 | 0 | ("-- hfs_load_extended_attrs could not convert the attr_name in the btree key into a UTF8 attribute name"); |
4244 | 0 | goto on_error; |
4245 | 0 | } |
4246 | | |
4247 | | // What is the type of this attribute? If it is a compression record, then |
4248 | | // use TSK_FS_ATTR_TYPE_HFS_COMP_REC. Else, use TSK_FS_ATTR_TYPE_HFS_EXT_ATTR |
4249 | | // Only "inline data" kind of record is handled. |
4250 | 0 | if (strcmp(nameBuff, "com.apple.decmpfs") == 0 && |
4251 | 0 | tsk_getu32(endian, attrData->record_type) == HFS_ATTR_RECORD_INLINE_DATA) { |
4252 | | // Now, look at the compression record |
4253 | 0 | DECMPFS_DISK_HEADER *cmph = (DECMPFS_DISK_HEADER *) buffer; |
4254 | 0 | *cmpType = |
4255 | 0 | tsk_getu32(TSK_LIT_ENDIAN, cmph->compression_type); |
4256 | 0 | uint64_t uncSize = tsk_getu64(TSK_LIT_ENDIAN, |
4257 | 0 | cmph->uncompressed_size); |
4258 | |
|
4259 | 0 | if (tsk_verbose) |
4260 | 0 | tsk_fprintf(stderr, |
4261 | 0 | "hfs_load_extended_attrs: This attribute is a compression record.\n"); |
4262 | |
|
4263 | 0 | attrType = TSK_FS_ATTR_TYPE_HFS_COMP_REC; |
4264 | 0 | *isCompressed = TRUE; // The data is governed by a compression record (but might not be compressed) |
4265 | 0 | *uncompressedSize = uncSize; |
4266 | |
|
4267 | 0 | switch (*cmpType) { |
4268 | | // Data is inline. We will load the uncompressed |
4269 | | // data as a resident attribute. |
4270 | 0 | case DECMPFS_TYPE_ZLIB_ATTR: |
4271 | 0 | if (!decmpfs_file_read_zlib_attr( |
4272 | 0 | fs_file, buffer, attributeLength, uncSize)) |
4273 | 0 | { |
4274 | 0 | goto on_error; |
4275 | 0 | } |
4276 | 0 | break; |
4277 | | |
4278 | 0 | case DECMPFS_TYPE_LZVN_ATTR: |
4279 | 0 | if (!decmpfs_file_read_lzvn_attr( |
4280 | 0 | fs_file, buffer, attributeLength, uncSize)) |
4281 | 0 | { |
4282 | 0 | goto on_error; |
4283 | 0 | } |
4284 | 0 | break; |
4285 | | |
4286 | | // Data is compressed in the resource fork |
4287 | 0 | case DECMPFS_TYPE_ZLIB_RSRC: |
4288 | 0 | case DECMPFS_TYPE_LZVN_RSRC: |
4289 | 0 | if (tsk_verbose) |
4290 | 0 | tsk_fprintf(stderr, |
4291 | 0 | "%s: Compressed data is in the file Resource Fork.\n", __func__); |
4292 | 0 | break; |
4293 | 0 | } |
4294 | 0 | } |
4295 | 0 | else { // Attrbute name is NOT com.apple.decmpfs |
4296 | 0 | attrType = TSK_FS_ATTR_TYPE_HFS_EXT_ATTR; |
4297 | 0 | } // END if attribute name is com.apple.decmpfs ELSE clause |
4298 | | |
4299 | 0 | if ((fs_attr = |
4300 | 0 | tsk_fs_attrlist_getnew(fs_file->meta->attr, |
4301 | 0 | TSK_FS_ATTR_RES)) == NULL) { |
4302 | 0 | error_returned(" - hfs_load_extended_attrs"); |
4303 | 0 | goto on_error; |
4304 | 0 | } |
4305 | | |
4306 | 0 | if (tsk_verbose) { |
4307 | 0 | tsk_fprintf(stderr, |
4308 | 0 | "hfs_load_extended_attrs: loading attribute %s, type %u (%s)\n", |
4309 | 0 | nameBuff, (uint32_t) attrType, |
4310 | 0 | hfs_attrTypeName((uint32_t) attrType)); |
4311 | 0 | } |
4312 | | |
4313 | | // set the details in the fs_attr structure |
4314 | 0 | if (tsk_fs_attr_set_str(fs_file, fs_attr, nameBuff, |
4315 | 0 | attrType, attribute_counter, buffer, |
4316 | 0 | attributeLength)) { |
4317 | 0 | error_returned(" - hfs_load_extended_attrs"); |
4318 | 0 | goto on_error; |
4319 | 0 | } |
4320 | | |
4321 | 0 | free(buffer); |
4322 | 0 | buffer = NULL; |
4323 | |
|
4324 | 0 | ++attribute_counter; |
4325 | 0 | } // END if comp == 0 |
4326 | 0 | if (comp == 1) { |
4327 | | // since this record key is greater than our search key, all |
4328 | | // subsequent records will also be greater. |
4329 | 0 | done = TRUE; |
4330 | 0 | break; |
4331 | 0 | } |
4332 | 0 | } // END loop over records in one LEAF node |
4333 | | |
4334 | | /* |
4335 | | * We get to this point if either: |
4336 | | * |
4337 | | * 1. We finish the loop over records and we are still loading attributes |
4338 | | * for the given file. In this case we are NOT done, and must read in |
4339 | | * the next leaf node, and process its records. The following code |
4340 | | * loads the next leaf node before we return to the top of the loop. |
4341 | | * |
4342 | | * 2. We "broke" out of the loop over records because we found a key that |
4343 | | * whose file ID is greater than the one we are working on. In that case |
4344 | | * we are done. The following code does not run, and we exit the |
4345 | | * while loop over successive leaf nodes. |
4346 | | */ |
4347 | | |
4348 | 0 | if (!done) { |
4349 | | // We did not finish loading the attributes when we got to the end of that node, |
4350 | | // so we must get the next node, and continue. |
4351 | | |
4352 | | // First determine the nodeID of the next LEAF node |
4353 | 0 | uint32_t newNodeID = tsk_getu32(endian, nodeDescriptor->flink); |
4354 | | |
4355 | | //fprintf(stdout, "Next Node ID = %u\n", newNodeID); |
4356 | 0 | if (tsk_verbose) |
4357 | 0 | tsk_fprintf(stderr, |
4358 | 0 | "hfs_load_extended_attrs: Processed last record of THIS node, still gathering attributes.\n"); |
4359 | | |
4360 | | // If we are at the very last leaf node in the btree, then |
4361 | | // this "flink" will be zero. We break out of this loop over LEAF nodes. |
4362 | 0 | if (newNodeID == 0) { |
4363 | 0 | if (tsk_verbose) |
4364 | 0 | tsk_fprintf(stderr, |
4365 | 0 | "hfs_load_extended_attrs: But, there are no more leaf nodes, so we are done.\n"); |
4366 | 0 | break; |
4367 | 0 | } |
4368 | | |
4369 | 0 | if (tsk_verbose) |
4370 | 0 | tsk_fprintf(stderr, |
4371 | 0 | "hfs_load_extended_attrs: Reading the next LEAF node %" |
4372 | 0 | PRIu32 ".\n", nodeID); |
4373 | |
|
4374 | 0 | nodeID = newNodeID; |
4375 | |
|
4376 | 0 | cnt = tsk_fs_file_read(attrFile.file, |
4377 | 0 | nodeID * attrFile.nodeSize, |
4378 | 0 | (char *) nodeData, |
4379 | 0 | attrFile.nodeSize, (TSK_FS_FILE_READ_FLAG_ENUM) 0); |
4380 | 0 | if (cnt != (ssize_t)attrFile.nodeSize) { |
4381 | 0 | error_returned |
4382 | 0 | ("hfs_load_extended_attrs: Could not read in the next LEAF node from the Attributes File btree"); |
4383 | 0 | goto on_error; |
4384 | 0 | } |
4385 | | |
4386 | | // Parse the Node header |
4387 | 0 | nodeDescriptor = (hfs_btree_node *) nodeData; |
4388 | | |
4389 | | // If we are NOT leaf node, then this is an error |
4390 | 0 | if (nodeDescriptor->type != HFS_ATTR_NODE_LEAF) { |
4391 | 0 | error_detected(TSK_ERR_FS_CORRUPT, |
4392 | 0 | "hfs_load_extended_attrs: found a non-LEAF node as a successor to a LEAF node"); |
4393 | 0 | goto on_error; |
4394 | 0 | } |
4395 | 0 | } // END if(! done) |
4396 | | |
4397 | | |
4398 | |
|
4399 | 0 | } // END while(! done) loop over successive LEAF nodes |
4400 | | |
4401 | 0 | on_exit: |
4402 | 0 | free(nodeData); |
4403 | 0 | tsk_list_free(nodeIDs_processed); |
4404 | 0 | close_attr_file(&attrFile); |
4405 | 0 | return 0; |
4406 | | |
4407 | 0 | on_error: |
4408 | 0 | free(buffer); |
4409 | 0 | free(nodeData); |
4410 | 0 | tsk_list_free(nodeIDs_processed); |
4411 | 0 | close_attr_file(&attrFile); |
4412 | 0 | return 1; |
4413 | 0 | } |
4414 | | |
4415 | | typedef struct RES_DESCRIPTOR { |
4416 | | char type[5]; // type is really 4 chars, but we will null-terminate |
4417 | | uint16_t id; |
4418 | | uint32_t offset; |
4419 | | uint32_t length; |
4420 | | char *name; // NULL if a name is not defined for this resource |
4421 | | struct RES_DESCRIPTOR *next; |
4422 | | } RES_DESCRIPTOR; |
4423 | | |
4424 | | void |
4425 | | free_res_descriptor(RES_DESCRIPTOR * rd) |
4426 | 0 | { |
4427 | 0 | RES_DESCRIPTOR *nxt; |
4428 | |
|
4429 | 0 | if (rd == NULL) |
4430 | 0 | return; |
4431 | 0 | nxt = rd->next; |
4432 | 0 | free(rd->name); |
4433 | 0 | free(rd); |
4434 | 0 | free_res_descriptor(nxt); // tail recursive |
4435 | 0 | } |
4436 | | |
4437 | | /** |
4438 | | * The purpose of this function is to parse the resource fork of a file, and to return |
4439 | | * a data structure that is, in effect, a table of contents for the resource fork. The |
4440 | | * data structure is a null-terminated linked list of entries. Each one describes one |
4441 | | * resource. If the resource fork is empty, or if there is not a resource fork at all, |
4442 | | * or an error occurs, this function returns NULL. |
4443 | | * |
4444 | | * A non-NULL answer should be freed by the caller, using free_res_descriptor. |
4445 | | * |
4446 | | */ |
4447 | | |
4448 | | static RES_DESCRIPTOR * |
4449 | | hfs_parse_resource_fork(TSK_FS_FILE * fs_file) |
4450 | 0 | { |
4451 | |
|
4452 | 0 | RES_DESCRIPTOR *result = NULL; |
4453 | 0 | RES_DESCRIPTOR *last = NULL; |
4454 | 0 | TSK_FS_INFO *fs_info; |
4455 | 0 | hfs_fork *fork_info; |
4456 | 0 | hfs_fork *resForkInfo; |
4457 | 0 | uint64_t resSize; |
4458 | 0 | const TSK_FS_ATTR *rAttr; |
4459 | 0 | hfs_resource_fork_header rfHeader; |
4460 | 0 | hfs_resource_fork_header *resHead; |
4461 | 0 | uint32_t dataOffset; |
4462 | 0 | uint32_t mapOffset; |
4463 | 0 | uint32_t mapLength; |
4464 | 0 | char *map; |
4465 | 0 | ssize_t attrReadResult; |
4466 | 0 | ssize_t attrReadResult1; |
4467 | 0 | ssize_t attrReadResult2; |
4468 | 0 | hfs_resource_fork_map_header *mapHdr; |
4469 | 0 | uint16_t typeListOffset; |
4470 | 0 | uint16_t nameListOffset; |
4471 | 0 | unsigned char hasNameList; |
4472 | 0 | char *nameListBegin = NULL; |
4473 | 0 | hfs_resource_type_list *typeList; |
4474 | 0 | uint16_t numTypes; |
4475 | 0 | hfs_resource_type_list_item *tlItem; |
4476 | 0 | int mindx; // index for looping over resource types |
4477 | |
|
4478 | 0 | if (fs_file == NULL) { |
4479 | 0 | error_detected(TSK_ERR_FS_ARG, |
4480 | 0 | "hfs_parse_resource_fork: null fs_file"); |
4481 | 0 | return NULL; |
4482 | 0 | } |
4483 | | |
4484 | | |
4485 | 0 | if (fs_file->meta == NULL) { |
4486 | 0 | error_detected(TSK_ERR_FS_ARG, |
4487 | 0 | "hfs_parse_resource_fork: fs_file has null metadata"); |
4488 | 0 | return NULL; |
4489 | 0 | } |
4490 | | |
4491 | 0 | if (fs_file->meta->content_ptr == NULL) { |
4492 | 0 | if (tsk_verbose) |
4493 | 0 | fprintf(stderr, |
4494 | 0 | "hfs_parse_resource_fork: fs_file has null fork data structures, so no resources.\n"); |
4495 | 0 | return NULL; |
4496 | 0 | } |
4497 | | |
4498 | | // Extract the fs |
4499 | 0 | fs_info = fs_file->fs_info; |
4500 | 0 | if (fs_info == NULL) { |
4501 | 0 | error_detected(TSK_ERR_FS_ARG, |
4502 | 0 | "hfs_parse_resource_fork: null fs within fs_info"); |
4503 | 0 | return NULL; |
4504 | 0 | } |
4505 | | |
4506 | | // Try to look at the Resource Fork for an HFS+ file |
4507 | | // Should be able to cast this to hfs_fork * |
4508 | 0 | fork_info = (hfs_fork *) fs_file->meta->content_ptr; // The data fork |
4509 | | // The resource fork is the second one. |
4510 | 0 | resForkInfo = &fork_info[1]; |
4511 | 0 | resSize = tsk_getu64(fs_info->endian, resForkInfo->logic_sz); |
4512 | | //uint32_t numBlocks = tsk_getu32(fs_info->endian, resForkInfo->total_blk); |
4513 | | //uint32_t clmpSize = tsk_getu32(fs_info->endian, resForkInfo->clmp_sz); |
4514 | | |
4515 | | // Hmm, certainly no resources here! |
4516 | 0 | if (resSize == 0) { |
4517 | 0 | return NULL; |
4518 | 0 | } |
4519 | | |
4520 | | // OK, resource size must be > 0 |
4521 | | |
4522 | | // find the attribute for the resource fork |
4523 | 0 | rAttr = |
4524 | 0 | tsk_fs_file_attr_get_type(fs_file, TSK_FS_ATTR_TYPE_HFS_RSRC, |
4525 | 0 | HFS_FS_ATTR_ID_RSRC, TRUE); |
4526 | | |
4527 | |
|
4528 | 0 | if (rAttr == NULL) { |
4529 | 0 | error_returned |
4530 | 0 | ("hfs_parse_resource_fork: could not get the resource fork attribute"); |
4531 | 0 | return NULL; |
4532 | 0 | } |
4533 | | |
4534 | | // JUST read the resource fork header |
4535 | | |
4536 | | |
4537 | 0 | attrReadResult1 = |
4538 | 0 | tsk_fs_attr_read(rAttr, 0, (char *) &rfHeader, |
4539 | 0 | sizeof(hfs_resource_fork_header), TSK_FS_FILE_READ_FLAG_NONE); |
4540 | |
|
4541 | 0 | if (attrReadResult1 < 0 |
4542 | 0 | || attrReadResult1 != sizeof(hfs_resource_fork_header)) { |
4543 | 0 | error_returned |
4544 | 0 | (" hfs_parse_resource_fork: trying to read the resource fork header"); |
4545 | 0 | return NULL; |
4546 | 0 | } |
4547 | | |
4548 | | // Begin to parse the resource fork |
4549 | 0 | resHead = &rfHeader; |
4550 | 0 | dataOffset = tsk_getu32(fs_info->endian, resHead->dataOffset); |
4551 | 0 | mapOffset = tsk_getu32(fs_info->endian, resHead->mapOffset); |
4552 | | //uint32_t dataLength = tsk_getu32(fs_info->endian, resHead->dataLength); |
4553 | 0 | mapLength = tsk_getu32(fs_info->endian, resHead->mapLength); |
4554 | |
|
4555 | 0 | if (mapLength <= 0) { |
4556 | 0 | error_returned |
4557 | 0 | ("- hfs_parse_resource_fork: map length is 0"); |
4558 | 0 | return NULL; |
4559 | 0 | } |
4560 | | |
4561 | | // Read in the WHOLE map |
4562 | 0 | map = (char *) tsk_malloc(mapLength); |
4563 | 0 | if (map == NULL) { |
4564 | 0 | error_returned |
4565 | 0 | ("- hfs_parse_resource_fork: could not allocate space for the resource fork map"); |
4566 | 0 | return NULL; |
4567 | 0 | } |
4568 | | |
4569 | 0 | attrReadResult = |
4570 | 0 | tsk_fs_attr_read(rAttr, (uint64_t) mapOffset, map, |
4571 | 0 | (size_t) mapLength, TSK_FS_FILE_READ_FLAG_NONE); |
4572 | |
|
4573 | 0 | if (attrReadResult < 0 || attrReadResult != (ssize_t) mapLength) { |
4574 | 0 | error_returned |
4575 | 0 | ("- hfs_parse_resource_fork: could not read the map"); |
4576 | 0 | free(map); |
4577 | 0 | return NULL; |
4578 | 0 | } |
4579 | | |
4580 | 0 | mapHdr = (hfs_resource_fork_map_header *) map; |
4581 | |
|
4582 | 0 | typeListOffset = tsk_getu16(fs_info->endian, mapHdr->typeListOffset); |
4583 | |
|
4584 | 0 | nameListOffset = tsk_getu16(fs_info->endian, mapHdr->nameListOffset); |
4585 | |
|
4586 | 0 | if (nameListOffset >= mapLength || nameListOffset == 0) { |
4587 | 0 | hasNameList = FALSE; |
4588 | 0 | } |
4589 | 0 | else { |
4590 | 0 | hasNameList = TRUE; |
4591 | 0 | nameListBegin = map + nameListOffset; |
4592 | 0 | } |
4593 | |
|
4594 | 0 | typeList = (hfs_resource_type_list *) (map + typeListOffset); |
4595 | 0 | numTypes = tsk_getu16(fs_info->endian, typeList->typeCount) + 1; |
4596 | |
|
4597 | 0 | for (mindx = 0; mindx < numTypes; ++mindx) { |
4598 | 0 | uint16_t numRes; |
4599 | 0 | uint16_t refOff; |
4600 | 0 | int pindx; // index for looping over resources |
4601 | 0 | uint16_t rID; |
4602 | 0 | uint32_t rOffset; |
4603 | |
|
4604 | 0 | tlItem = &(typeList->type[mindx]); |
4605 | 0 | numRes = tsk_getu16(fs_info->endian, tlItem->count) + 1; |
4606 | 0 | refOff = tsk_getu16(fs_info->endian, tlItem->offset); |
4607 | | |
4608 | |
|
4609 | 0 | for (pindx = 0; pindx < numRes; ++pindx) { |
4610 | 0 | int16_t nameOffset; |
4611 | 0 | char *nameBuffer; |
4612 | 0 | RES_DESCRIPTOR *rsrc; |
4613 | 0 | char lenBuff[4]; // first 4 bytes of a resource encodes its length |
4614 | 0 | uint32_t rLen; // Resource length |
4615 | |
|
4616 | 0 | hfs_resource_refListItem *item = |
4617 | 0 | ((hfs_resource_refListItem *) (((uint8_t *) typeList) + |
4618 | 0 | refOff)) + pindx; |
4619 | 0 | nameOffset = tsk_gets16(fs_info->endian, item->resNameOffset); |
4620 | 0 | nameBuffer = NULL; |
4621 | |
|
4622 | 0 | if (hasNameList && nameOffset != -1) { |
4623 | 0 | char *name = nameListBegin + nameOffset; |
4624 | 0 | uint8_t nameLen = (uint8_t) name[0]; |
4625 | 0 | nameBuffer = (char*) tsk_malloc(nameLen + 1); |
4626 | 0 | if (nameBuffer == NULL) { |
4627 | 0 | error_returned |
4628 | 0 | ("hfs_parse_resource_fork: allocating space for the name of a resource"); |
4629 | 0 | free_res_descriptor(result); |
4630 | 0 | return NULL; |
4631 | 0 | } |
4632 | 0 | memcpy(nameBuffer, name + 1, nameLen); |
4633 | 0 | nameBuffer[nameLen] = (char) 0; |
4634 | 0 | } |
4635 | 0 | else { |
4636 | 0 | nameBuffer = (char*) tsk_malloc(7); |
4637 | 0 | if (nameBuffer == NULL) { |
4638 | 0 | error_returned |
4639 | 0 | ("hfs_parse_resource_fork: allocating space for the (null) name of a resource"); |
4640 | 0 | free_res_descriptor(result); |
4641 | 0 | return NULL; |
4642 | 0 | } |
4643 | 0 | memcpy(nameBuffer, "<none>", 6); |
4644 | 0 | nameBuffer[6] = (char) 0; |
4645 | 0 | } |
4646 | | |
4647 | 0 | rsrc = (RES_DESCRIPTOR *) tsk_malloc(sizeof(RES_DESCRIPTOR)); |
4648 | 0 | if (rsrc == NULL) { |
4649 | 0 | error_returned |
4650 | 0 | ("hfs_parse_resource_fork: space for a resource descriptor"); |
4651 | 0 | free_res_descriptor(result); |
4652 | 0 | return NULL; |
4653 | 0 | } |
4654 | | |
4655 | | // Build the linked list |
4656 | 0 | if (result == NULL) |
4657 | 0 | result = rsrc; |
4658 | 0 | if (last != NULL) |
4659 | 0 | last->next = rsrc; |
4660 | 0 | last = rsrc; |
4661 | 0 | rsrc->next = NULL; |
4662 | |
|
4663 | 0 | rID = tsk_getu16(fs_info->endian, item->resID); |
4664 | 0 | rOffset = |
4665 | 0 | tsk_getu24(fs_info->endian, |
4666 | 0 | item->resDataOffset) + dataOffset; |
4667 | | |
4668 | | // Just read the first four bytes of the resource to get its length. It MUST |
4669 | | // be at least 4 bytes long |
4670 | 0 | attrReadResult2 = tsk_fs_attr_read(rAttr, (uint64_t) rOffset, |
4671 | 0 | lenBuff, (size_t) 4, TSK_FS_FILE_READ_FLAG_NONE); |
4672 | |
|
4673 | 0 | if (attrReadResult2 != 4) { |
4674 | 0 | error_returned |
4675 | 0 | ("- hfs_parse_resource_fork: could not read the 4-byte length at beginning of resource"); |
4676 | 0 | free_res_descriptor(result); |
4677 | 0 | return NULL; |
4678 | 0 | } |
4679 | 0 | rLen = tsk_getu32(TSK_BIG_ENDIAN, lenBuff); //TODO |
4680 | |
|
4681 | 0 | rsrc->id = rID; |
4682 | 0 | rsrc->offset = rOffset + 4; |
4683 | 0 | memcpy(rsrc->type, tlItem->type, 4); |
4684 | 0 | rsrc->type[4] = (char) 0; |
4685 | 0 | rsrc->length = rLen; |
4686 | 0 | rsrc->name = nameBuffer; |
4687 | |
|
4688 | 0 | } // END loop over resources of one type |
4689 | |
|
4690 | 0 | } // END loop over resource types |
4691 | | |
4692 | 0 | return result; |
4693 | 0 | } |
4694 | | |
4695 | | |
4696 | | static uint8_t |
4697 | | hfs_load_attrs(TSK_FS_FILE * fs_file) |
4698 | 0 | { |
4699 | 0 | TSK_FS_INFO *fs; |
4700 | 0 | HFS_INFO *hfs; |
4701 | 0 | TSK_FS_ATTR *fs_attr; |
4702 | 0 | TSK_FS_ATTR_RUN *attr_run; |
4703 | 0 | hfs_fork *forkx; |
4704 | 0 | unsigned char resource_fork_has_contents = FALSE; |
4705 | 0 | unsigned char compression_flag = FALSE; |
4706 | 0 | unsigned char isCompressed = FALSE; |
4707 | 0 | unsigned char compDataInRSRCFork = FALSE; |
4708 | 0 | unsigned char cmpType = 0; |
4709 | 0 | uint64_t uncompressedSize; |
4710 | 0 | uint64_t logicalSize; // of a fork |
4711 | | |
4712 | | // clean up any error messages that are lying around |
4713 | 0 | tsk_error_reset(); |
4714 | |
|
4715 | 0 | if ((fs_file == NULL) || (fs_file->meta == NULL) |
4716 | 0 | || (fs_file->fs_info == NULL)) { |
4717 | 0 | error_detected(TSK_ERR_FS_ARG, |
4718 | 0 | "hfs_load_attrs: fs_file or meta is NULL"); |
4719 | 0 | return 1; |
4720 | 0 | } |
4721 | | |
4722 | 0 | fs = (TSK_FS_INFO *) fs_file->fs_info; |
4723 | 0 | hfs = (HFS_INFO *) fs; |
4724 | |
|
4725 | 0 | if (tsk_verbose) |
4726 | 0 | tsk_fprintf(stderr, |
4727 | 0 | "hfs_load_attrs: Processing file %" PRIuINUM "\n", |
4728 | 0 | fs_file->meta->addr); |
4729 | | |
4730 | | |
4731 | | // see if we have already loaded the runs |
4732 | 0 | if (fs_file->meta->attr_state == TSK_FS_META_ATTR_STUDIED) { |
4733 | 0 | if (tsk_verbose) |
4734 | 0 | tsk_fprintf(stderr, |
4735 | 0 | "hfs_load_attrs: Attributes already loaded\n"); |
4736 | 0 | return 0; |
4737 | 0 | } |
4738 | 0 | else if (fs_file->meta->attr_state == TSK_FS_META_ATTR_ERROR) { |
4739 | 0 | if (tsk_verbose) |
4740 | 0 | tsk_fprintf(stderr, |
4741 | 0 | "hfs_load_attrs: Previous attempt to load attributes resulted in error\n"); |
4742 | 0 | return 1; |
4743 | 0 | } |
4744 | | |
4745 | | // Now (re)-initialize the attrlist that will hold the list of attributes |
4746 | 0 | if (fs_file->meta->attr != NULL) { |
4747 | 0 | tsk_fs_attrlist_markunused(fs_file->meta->attr); |
4748 | 0 | } |
4749 | 0 | else if (fs_file->meta->attr == NULL) { |
4750 | 0 | fs_file->meta->attr = tsk_fs_attrlist_alloc(); |
4751 | 0 | } |
4752 | | |
4753 | | /****************** EXTENDED ATTRIBUTES *******************************/ |
4754 | | // We do these first, so that we can detect the mode of compression, if |
4755 | | // any. We need to know that mode in order to handle the forks. |
4756 | |
|
4757 | 0 | if (tsk_verbose) |
4758 | 0 | tsk_fprintf(stderr, |
4759 | 0 | "hfs_load_attrs: loading the HFS+ extended attributes\n"); |
4760 | |
|
4761 | 0 | if (hfs_load_extended_attrs(fs_file, &isCompressed, |
4762 | 0 | &cmpType, &uncompressedSize)) { |
4763 | 0 | error_returned(" - hfs_load_attrs A"); |
4764 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; |
4765 | 0 | return 1; |
4766 | 0 | } |
4767 | | |
4768 | | // TODO: What about DECMPFS_TYPE_RAW_RSRC? |
4769 | 0 | switch (cmpType) { |
4770 | 0 | case DECMPFS_TYPE_ZLIB_RSRC: |
4771 | 0 | case DECMPFS_TYPE_LZVN_RSRC: |
4772 | 0 | compDataInRSRCFork = TRUE; |
4773 | 0 | break; |
4774 | 0 | default: |
4775 | 0 | compDataInRSRCFork = FALSE; |
4776 | 0 | break; |
4777 | 0 | } |
4778 | | |
4779 | 0 | if (isCompressed) { |
4780 | 0 | fs_file->meta->size = uncompressedSize; |
4781 | 0 | } |
4782 | | |
4783 | | // This is the flag indicating compression, from the Catalog File record. |
4784 | 0 | compression_flag = (fs_file->meta->flags & TSK_FS_META_FLAG_COMP) != 0; |
4785 | |
|
4786 | 0 | if (compression_flag && !isCompressed) { |
4787 | 0 | if (tsk_verbose) |
4788 | 0 | tsk_fprintf(stderr, |
4789 | 0 | "hfs_load_attrs: WARNING, HFS marks this as a" |
4790 | 0 | " compressed file, but no compression record was found.\n"); |
4791 | 0 | } |
4792 | 0 | if (isCompressed && !compression_flag) { |
4793 | 0 | if (tsk_verbose) |
4794 | 0 | tsk_fprintf(stderr, |
4795 | 0 | "hfs_load_attrs: WARNING, this file has a compression" |
4796 | 0 | " record, but the HFS compression flag is not set.\n"); |
4797 | 0 | } |
4798 | | |
4799 | | /************* FORKS (both) ************************************/ |
4800 | | |
4801 | | // Process the data and resource forks. We only do this if the |
4802 | | // fork data structures are non-null, so test that: |
4803 | 0 | if (fs_file->meta->content_ptr != NULL) { |
4804 | | |
4805 | | /************** DATA FORK STUFF ***************************/ |
4806 | | |
4807 | | // Get the data fork data-structure |
4808 | 0 | forkx = (hfs_fork *) fs_file->meta->content_ptr; |
4809 | | |
4810 | | // If this is a compressed file, then either this attribute is already loaded |
4811 | | // because the data was in the compression record, OR |
4812 | | // the compressed data is in the resource fork. We will load those runs when |
4813 | | // we handle the resource fork. |
4814 | 0 | if (!isCompressed) { |
4815 | | // We only load this attribute if this fork has non-zero length |
4816 | | // or if this is a REG or LNK file. Otherwise, we skip |
4817 | 0 | logicalSize = tsk_getu64(fs->endian, forkx->logic_sz); |
4818 | |
|
4819 | 0 | if (logicalSize > 0 || |
4820 | 0 | fs_file->meta->type == TSK_FS_META_TYPE_REG || |
4821 | 0 | fs_file->meta->type == TSK_FS_META_TYPE_LNK) { |
4822 | | |
4823 | |
|
4824 | 0 | if (tsk_verbose) |
4825 | 0 | tsk_fprintf(stderr, |
4826 | 0 | "hfs_load_attrs: loading the data fork attribute\n"); |
4827 | | |
4828 | | // get an attribute structure to store the data in |
4829 | 0 | if ((fs_attr = tsk_fs_attrlist_getnew(fs_file->meta->attr, |
4830 | 0 | TSK_FS_ATTR_NONRES)) == NULL) { |
4831 | 0 | error_returned(" - hfs_load_attrs"); |
4832 | 0 | return 1; |
4833 | 0 | } |
4834 | | /* NOTE that fs_attr is now tied to fs_file->meta->attr. |
4835 | | * that means that we do not need to free it if we abort in the |
4836 | | * following code (and doing so will cause double free errors). */ |
4837 | | |
4838 | 0 | if (logicalSize > 0) { |
4839 | | |
4840 | | // Convert runs of blocks to the TSK internal form |
4841 | 0 | if (((attr_run = |
4842 | 0 | hfs_extents_to_attr(fs, forkx->extents, |
4843 | 0 | 0)) == NULL) |
4844 | 0 | && (tsk_error_get_errno() != 0)) { |
4845 | 0 | error_returned(" - hfs_load_attrs"); |
4846 | 0 | return 1; |
4847 | 0 | } |
4848 | | |
4849 | | |
4850 | | |
4851 | | // add the runs to the attribute and the attribute to the file. |
4852 | 0 | if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, |
4853 | 0 | "", TSK_FS_ATTR_TYPE_HFS_DATA, |
4854 | 0 | HFS_FS_ATTR_ID_DATA, logicalSize, logicalSize, |
4855 | 0 | (TSK_OFF_T) tsk_getu32(fs->endian, |
4856 | 0 | forkx->total_blk) * fs->block_size, |
4857 | 0 | TSK_FS_ATTR_FLAG_NONE, 0)) { |
4858 | 0 | error_returned(" - hfs_load_attrs (DATA)"); |
4859 | 0 | tsk_fs_attr_run_free(attr_run); |
4860 | 0 | return 1; |
4861 | 0 | } |
4862 | | |
4863 | | // see if extents file has additional runs |
4864 | 0 | if (hfs_ext_find_extent_record_attr(hfs, |
4865 | 0 | (uint32_t) fs_file->meta->addr, fs_attr, |
4866 | 0 | TRUE)) { |
4867 | 0 | error_returned(" - hfs_load_attrs B"); |
4868 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; |
4869 | 0 | return 1; |
4870 | 0 | } |
4871 | |
|
4872 | 0 | } |
4873 | 0 | else { |
4874 | | // logicalSize == 0, but this is either a REG or LNK file |
4875 | | // so, it should have a DATA fork attribute of zero length. |
4876 | 0 | if (tsk_fs_attr_set_run(fs_file, fs_attr, NULL, "", |
4877 | 0 | TSK_FS_ATTR_TYPE_HFS_DATA, HFS_FS_ATTR_ID_DATA, |
4878 | 0 | 0, 0, 0, TSK_FS_ATTR_FLAG_NONE, 0)) { |
4879 | 0 | error_returned(" - hfs_load_attrs (non-file)"); |
4880 | 0 | return 1; |
4881 | 0 | } |
4882 | 0 | } |
4883 | |
|
4884 | 0 | } // END logicalSize>0 or REG or LNK file type |
4885 | 0 | } // END if not Compressed |
4886 | | |
4887 | | |
4888 | | |
4889 | | /************** RESOURCE FORK STUFF ************************************/ |
4890 | | |
4891 | | // Get the resource fork. |
4892 | | //Note that content_ptr points to an array of two |
4893 | | // hfs_fork data structures, the second of which |
4894 | | // describes the blocks of the resource fork. |
4895 | | |
4896 | 0 | forkx = &((hfs_fork *) fs_file->meta->content_ptr)[1]; |
4897 | |
|
4898 | 0 | logicalSize = tsk_getu64(fs->endian, forkx->logic_sz); |
4899 | | |
4900 | | // Skip if the length of the resource fork is zero |
4901 | 0 | if (logicalSize > 0) { |
4902 | |
|
4903 | 0 | if (tsk_verbose) |
4904 | 0 | tsk_fprintf(stderr, |
4905 | 0 | "hfs_load_attrs: loading the resource fork\n"); |
4906 | |
|
4907 | 0 | resource_fork_has_contents = TRUE; |
4908 | | |
4909 | | // get an attribute structure to store the resource fork data in. We will |
4910 | | // reuse the fs_attr variable, since we are done with the data fork. |
4911 | 0 | if ((fs_attr = |
4912 | 0 | tsk_fs_attrlist_getnew(fs_file->meta->attr, |
4913 | 0 | TSK_FS_ATTR_NONRES)) == NULL) { |
4914 | 0 | error_returned(" - hfs_load_attrs (RSRC)"); |
4915 | 0 | return 1; |
4916 | 0 | } |
4917 | | /* NOTE that fs_attr is now tied to fs_file->meta->attr. |
4918 | | * that means that we do not need to free it if we abort in the |
4919 | | * following code (and doing so will cause double free errors). */ |
4920 | | |
4921 | | |
4922 | | // convert the resource fork to the TSK format |
4923 | 0 | if (((attr_run = |
4924 | 0 | hfs_extents_to_attr(fs, forkx->extents, |
4925 | 0 | 0)) == NULL) |
4926 | 0 | && (tsk_error_get_errno() != 0)) { |
4927 | 0 | error_returned(" - hfs_load_attrs"); |
4928 | 0 | return 1; |
4929 | 0 | } |
4930 | | |
4931 | | // add the runs to the attribute and the attribute to the file. |
4932 | 0 | if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, "RSRC", |
4933 | 0 | TSK_FS_ATTR_TYPE_HFS_RSRC, HFS_FS_ATTR_ID_RSRC, |
4934 | 0 | tsk_getu64(fs->endian, forkx->logic_sz), |
4935 | 0 | tsk_getu64(fs->endian, forkx->logic_sz), |
4936 | 0 | (TSK_OFF_T) tsk_getu32(fs->endian, |
4937 | 0 | forkx->total_blk) * fs->block_size, TSK_FS_ATTR_FLAG_NONE, 0)) { |
4938 | 0 | error_returned(" - hfs_load_attrs (RSRC)"); |
4939 | 0 | tsk_fs_attr_run_free(attr_run); |
4940 | 0 | return 1; |
4941 | 0 | } |
4942 | | |
4943 | | // see if extents file has additional runs for the resource fork. |
4944 | 0 | if (hfs_ext_find_extent_record_attr(hfs, |
4945 | 0 | (uint32_t) fs_file->meta->addr, fs_attr, FALSE)) { |
4946 | 0 | error_returned(" - hfs_load_attrs C"); |
4947 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; |
4948 | 0 | return 1; |
4949 | 0 | } |
4950 | | |
4951 | 0 | if (isCompressed && compDataInRSRCFork) { |
4952 | | // OK, we are going to load those same resource fork blocks as the "DATA" |
4953 | | // attribute, but will mark it as compressed. |
4954 | | // get an attribute structure to store the resource fork data in. We will |
4955 | | // reuse the fs_attr variable, since we are done with the data fork. |
4956 | 0 | if (tsk_verbose) |
4957 | 0 | tsk_fprintf(stderr, |
4958 | 0 | "File is compressed with data in the resource fork. " |
4959 | 0 | "Loading the default DATA attribute.\n"); |
4960 | 0 | if ((fs_attr = |
4961 | 0 | tsk_fs_attrlist_getnew(fs_file->meta->attr, |
4962 | 0 | TSK_FS_ATTR_NONRES)) == NULL) { |
4963 | 0 | error_returned |
4964 | 0 | (" - hfs_load_attrs (RSRC loading as DATA)"); |
4965 | 0 | return 1; |
4966 | 0 | } |
4967 | | /* NOTE that fs_attr is now tied to fs_file->meta->attr. |
4968 | | * that means that we do not need to free it if we abort in the |
4969 | | * following code (and doing so will cause double free errors). */ |
4970 | | |
4971 | 0 | switch (cmpType) { |
4972 | 0 | case DECMPFS_TYPE_ZLIB_RSRC: |
4973 | 0 | #ifdef HAVE_LIBZ |
4974 | 0 | fs_attr->w = decmpfs_attr_walk_zlib_rsrc; |
4975 | 0 | fs_attr->r = decmpfs_file_read_zlib_rsrc; |
4976 | | #else |
4977 | | // We don't have zlib, so the uncompressed data is not |
4978 | | // available to us; however, we must have a default DATA |
4979 | | // attribute, or icat will misbehave. |
4980 | | if (tsk_verbose) |
4981 | | tsk_fprintf(stderr, |
4982 | | "hfs_load_attrs: No zlib compression library, so setting a zero-length default DATA attribute.\n"); |
4983 | | |
4984 | | if (tsk_fs_attr_set_run(fs_file, fs_attr, NULL, "DATA", |
4985 | | TSK_FS_ATTR_TYPE_HFS_DATA, HFS_FS_ATTR_ID_DATA, 0, |
4986 | | 0, 0, TSK_FS_ATTR_FLAG_NONE, 0)) { |
4987 | | error_returned(" - hfs_load_attrs (non-file)"); |
4988 | | return 1; |
4989 | | } |
4990 | | #endif |
4991 | 0 | break; |
4992 | | |
4993 | 0 | case DECMPFS_TYPE_LZVN_RSRC: |
4994 | |
|
4995 | 0 | fs_attr->w = decmpfs_attr_walk_lzvn_rsrc; |
4996 | 0 | fs_attr->r = decmpfs_file_read_lzvn_rsrc; |
4997 | |
|
4998 | 0 | break; |
4999 | 0 | } |
5000 | | |
5001 | | // convert the resource fork to the TSK format |
5002 | 0 | if (((attr_run = |
5003 | 0 | hfs_extents_to_attr(fs, forkx->extents, |
5004 | 0 | 0)) == NULL) |
5005 | 0 | && (tsk_error_get_errno() != 0)) { |
5006 | 0 | error_returned |
5007 | 0 | (" - hfs_load_attrs, RSRC fork as DATA fork"); |
5008 | 0 | return 1; |
5009 | 0 | } |
5010 | | |
5011 | 0 | if (tsk_verbose) |
5012 | 0 | tsk_fprintf(stderr, |
5013 | 0 | "hfs_load_attrs: Loading RSRC fork block runs as the default DATA attribute.\n"); |
5014 | | |
5015 | | // add the runs to the attribute and the attribute to the file. |
5016 | 0 | if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, "DECOMP", |
5017 | 0 | TSK_FS_ATTR_TYPE_HFS_DATA, HFS_FS_ATTR_ID_DATA, |
5018 | 0 | logicalSize, |
5019 | 0 | logicalSize, |
5020 | 0 | (TSK_OFF_T) tsk_getu32(fs->endian, |
5021 | 0 | forkx->total_blk) * fs->block_size, |
5022 | 0 | (TSK_FS_ATTR_FLAG_ENUM) (TSK_FS_ATTR_COMP | TSK_FS_ATTR_NONRES), 0)) { |
5023 | 0 | error_returned |
5024 | 0 | (" - hfs_load_attrs (RSRC loading as DATA)"); |
5025 | 0 | tsk_fs_attr_run_free(attr_run); |
5026 | 0 | return 1; |
5027 | 0 | } |
5028 | | |
5029 | | // see if extents file has additional runs for the resource fork. |
5030 | 0 | if (hfs_ext_find_extent_record_attr(hfs, |
5031 | 0 | (uint32_t) fs_file->meta->addr, fs_attr, FALSE)) { |
5032 | 0 | error_returned |
5033 | 0 | (" - hfs_load_attrs (RSRC loading as DATA"); |
5034 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; |
5035 | 0 | return 1; |
5036 | 0 | } |
5037 | | |
5038 | 0 | if (tsk_verbose) |
5039 | 0 | tsk_fprintf(stderr, |
5040 | 0 | "hfs_load_attrs: setting the \"special\" function pointers to inflate compressed data.\n"); |
5041 | 0 | } |
5042 | |
|
5043 | 0 | } // END resource fork size > 0 |
5044 | |
|
5045 | 0 | } // END the fork data structures are non-NULL |
5046 | | |
5047 | 0 | if (isCompressed && compDataInRSRCFork && !resource_fork_has_contents) { |
5048 | 0 | if (tsk_verbose) |
5049 | 0 | tsk_fprintf(stderr, |
5050 | 0 | "hfs_load_attrs: WARNING, compression record claims that compressed data" |
5051 | 0 | " is in the Resource Fork, but that fork is empty or non-existent.\n"); |
5052 | 0 | } |
5053 | | |
5054 | | // Finish up. |
5055 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED; |
5056 | |
|
5057 | 0 | return 0; |
5058 | 0 | } |
5059 | | |
5060 | | |
5061 | | /** \internal |
5062 | | * Get allocation status of file system block. |
5063 | | * adapted from IsAllocationBlockUsed from: |
5064 | | * http://developer.apple.com/technotes/tn/tn1150.html |
5065 | | * |
5066 | | * @param hfs File system being analyzed |
5067 | | * @param b Block address |
5068 | | * @returns 1 if allocated, 0 if not, -1 on error |
5069 | | */ |
5070 | | static int8_t |
5071 | | hfs_block_is_alloc(HFS_INFO * hfs, TSK_DADDR_T a_addr) |
5072 | 0 | { |
5073 | 0 | TSK_FS_INFO *fs = &(hfs->fs_info); |
5074 | 0 | TSK_OFF_T b; |
5075 | 0 | size_t b2; |
5076 | | |
5077 | | // lazy loading |
5078 | 0 | if (hfs->blockmap_file == NULL) { |
5079 | 0 | if ((hfs->blockmap_file = |
5080 | 0 | tsk_fs_file_open_meta(fs, NULL, |
5081 | 0 | HFS_ALLOCATION_FILE_ID)) == NULL) { |
5082 | 0 | tsk_error_errstr2_concat(" - Loading blockmap file"); |
5083 | 0 | return -1; |
5084 | 0 | } |
5085 | | |
5086 | | /* cache the data attribute */ |
5087 | 0 | hfs->blockmap_attr = |
5088 | 0 | tsk_fs_attrlist_get(hfs->blockmap_file->meta->attr, |
5089 | 0 | TSK_FS_ATTR_TYPE_DEFAULT); |
5090 | 0 | if (!hfs->blockmap_attr) { |
5091 | 0 | tsk_error_errstr2_concat |
5092 | 0 | (" - Data Attribute not found in Blockmap File"); |
5093 | 0 | return -1; |
5094 | 0 | } |
5095 | 0 | hfs->blockmap_cache_start = -1; |
5096 | 0 | hfs->blockmap_cache_len = 0; |
5097 | 0 | } |
5098 | | |
5099 | | // get the byte offset |
5100 | 0 | b = (TSK_OFF_T) a_addr / 8; |
5101 | 0 | if (b > hfs->blockmap_file->meta->size) { |
5102 | 0 | tsk_error_set_errno(TSK_ERR_FS_CORRUPT); |
5103 | 0 | tsk_error_set_errstr("hfs_block_is_alloc: block %" PRIuDADDR |
5104 | 0 | " is too large for bitmap (%" PRIdOFF ")", a_addr, |
5105 | 0 | hfs->blockmap_file->meta->size); |
5106 | 0 | return -1; |
5107 | 0 | } |
5108 | | |
5109 | | // see if it is in the cache |
5110 | 0 | if ((hfs->blockmap_cache_start == -1) |
5111 | 0 | || (hfs->blockmap_cache_start > b) |
5112 | 0 | || (hfs->blockmap_cache_start + hfs->blockmap_cache_len <= (size_t) b)) { |
5113 | 0 | ssize_t cnt = tsk_fs_attr_read(hfs->blockmap_attr, b, |
5114 | 0 | hfs->blockmap_cache, |
5115 | 0 | sizeof(hfs->blockmap_cache), TSK_FS_FILE_READ_FLAG_NONE); |
5116 | 0 | if (cnt < 1) { |
5117 | 0 | tsk_error_set_errstr2 |
5118 | 0 | ("hfs_block_is_alloc: Error reading block bitmap at offset %" |
5119 | 0 | PRIdOFF, b); |
5120 | 0 | return -1; |
5121 | 0 | } |
5122 | 0 | hfs->blockmap_cache_start = b; |
5123 | 0 | hfs->blockmap_cache_len = cnt; |
5124 | 0 | } |
5125 | 0 | b2 = (size_t) (b - hfs->blockmap_cache_start); |
5126 | 0 | return (hfs->blockmap_cache[b2] & (1 << (7 - (a_addr % 8)))) != 0; |
5127 | 0 | } |
5128 | | |
5129 | | |
5130 | | TSK_FS_BLOCK_FLAG_ENUM |
5131 | | hfs_block_getflags(TSK_FS_INFO * a_fs, TSK_DADDR_T a_addr) |
5132 | 0 | { |
5133 | 0 | return (hfs_block_is_alloc((HFS_INFO *) a_fs, a_addr) == 1) ? |
5134 | 0 | TSK_FS_BLOCK_FLAG_ALLOC : TSK_FS_BLOCK_FLAG_UNALLOC; |
5135 | 0 | } |
5136 | | |
5137 | | |
5138 | | static uint8_t |
5139 | | hfs_block_walk(TSK_FS_INFO * fs, TSK_DADDR_T start_blk, |
5140 | | TSK_DADDR_T end_blk, TSK_FS_BLOCK_WALK_FLAG_ENUM flags, |
5141 | | TSK_FS_BLOCK_WALK_CB action, void *ptr) |
5142 | 0 | { |
5143 | 0 | const char *myname = "hfs_block_walk"; |
5144 | 0 | HFS_INFO *hfs = (HFS_INFO *) fs; |
5145 | 0 | TSK_FS_BLOCK *fs_block; |
5146 | 0 | TSK_DADDR_T addr; |
5147 | |
|
5148 | 0 | if (tsk_verbose) |
5149 | 0 | tsk_fprintf(stderr, |
5150 | 0 | "%s: start_blk: %" PRIuDADDR " end_blk: %" |
5151 | 0 | PRIuDADDR " flags: %" PRIu32 "\n", myname, start_blk, end_blk, |
5152 | 0 | flags); |
5153 | | |
5154 | | // clean up any error messages that are lying around |
5155 | 0 | tsk_error_reset(); |
5156 | | |
5157 | | /* |
5158 | | * Sanity checks. |
5159 | | */ |
5160 | 0 | if (start_blk < fs->first_block || start_blk > fs->last_block) { |
5161 | 0 | tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); |
5162 | 0 | tsk_error_set_errstr("%s: invalid start block number: %" PRIuDADDR |
5163 | 0 | "", myname, start_blk); |
5164 | 0 | return 1; |
5165 | 0 | } |
5166 | 0 | if (end_blk < fs->first_block || end_blk > fs->last_block) { |
5167 | 0 | tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); |
5168 | 0 | tsk_error_set_errstr("%s: invalid last block number: %" PRIuDADDR |
5169 | 0 | "", myname, end_blk); |
5170 | 0 | return 1; |
5171 | 0 | } |
5172 | | |
5173 | 0 | if (start_blk > end_blk) |
5174 | 0 | XSWAP(start_blk, end_blk); |
5175 | | |
5176 | | /* Sanity check on flags -- make sure at least one ALLOC is set */ |
5177 | 0 | if (((flags & TSK_FS_BLOCK_WALK_FLAG_ALLOC) == 0) && |
5178 | 0 | ((flags & TSK_FS_BLOCK_WALK_FLAG_UNALLOC) == 0)) { |
5179 | 0 | flags = (TSK_FS_BLOCK_WALK_FLAG_ENUM) |
5180 | 0 | (flags | TSK_FS_BLOCK_WALK_FLAG_ALLOC | |
5181 | 0 | TSK_FS_BLOCK_WALK_FLAG_UNALLOC); |
5182 | 0 | } |
5183 | 0 | if (((flags & TSK_FS_BLOCK_WALK_FLAG_META) == 0) && |
5184 | 0 | ((flags & TSK_FS_BLOCK_WALK_FLAG_CONT) == 0)) { |
5185 | 0 | flags = (TSK_FS_BLOCK_WALK_FLAG_ENUM) |
5186 | 0 | (flags | TSK_FS_BLOCK_WALK_FLAG_CONT | TSK_FS_BLOCK_WALK_FLAG_META); |
5187 | 0 | } |
5188 | |
|
5189 | 0 | if ((fs_block = tsk_fs_block_alloc(fs)) == NULL) { |
5190 | 0 | return 1; |
5191 | 0 | } |
5192 | | |
5193 | | /* |
5194 | | * Iterate |
5195 | | */ |
5196 | 0 | for (addr = start_blk; addr <= end_blk; ++addr) { |
5197 | 0 | int retval; |
5198 | 0 | int myflags; |
5199 | | |
5200 | | /* identify if the block is allocated or not */ |
5201 | 0 | myflags = hfs_block_is_alloc(hfs, addr) ? |
5202 | 0 | TSK_FS_BLOCK_FLAG_ALLOC : TSK_FS_BLOCK_FLAG_UNALLOC; |
5203 | | |
5204 | | // test if we should call the callback with this one |
5205 | 0 | if ((myflags & TSK_FS_BLOCK_FLAG_ALLOC) |
5206 | 0 | && (!(flags & TSK_FS_BLOCK_WALK_FLAG_ALLOC))) |
5207 | 0 | continue; |
5208 | 0 | else if ((myflags & TSK_FS_BLOCK_FLAG_UNALLOC) |
5209 | 0 | && (!(flags & TSK_FS_BLOCK_WALK_FLAG_UNALLOC))) |
5210 | 0 | continue; |
5211 | | |
5212 | 0 | if (flags & TSK_FS_BLOCK_WALK_FLAG_AONLY) |
5213 | 0 | myflags |= TSK_FS_BLOCK_FLAG_AONLY; |
5214 | |
|
5215 | 0 | if (tsk_fs_block_get_flag(fs, fs_block, addr, |
5216 | 0 | (TSK_FS_BLOCK_FLAG_ENUM) myflags) == NULL) { |
5217 | 0 | tsk_fs_block_free(fs_block); |
5218 | 0 | return 1; |
5219 | 0 | } |
5220 | | |
5221 | 0 | retval = action(fs_block, ptr); |
5222 | 0 | if (TSK_WALK_STOP == retval) { |
5223 | 0 | break; |
5224 | 0 | } |
5225 | 0 | else if (TSK_WALK_ERROR == retval) { |
5226 | 0 | tsk_fs_block_free(fs_block); |
5227 | 0 | return 1; |
5228 | 0 | } |
5229 | 0 | } |
5230 | | |
5231 | 0 | tsk_fs_block_free(fs_block); |
5232 | 0 | return 0; |
5233 | 0 | } |
5234 | | |
5235 | | |
5236 | | uint8_t |
5237 | | hfs_inode_walk(TSK_FS_INFO * fs, TSK_INUM_T start_inum, |
5238 | | TSK_INUM_T end_inum, TSK_FS_META_FLAG_ENUM flags, |
5239 | | TSK_FS_META_WALK_CB action, void *ptr) |
5240 | 0 | { |
5241 | 0 | TSK_INUM_T inum; |
5242 | |
|
5243 | 0 | if (tsk_verbose) |
5244 | 0 | tsk_fprintf(stderr, |
5245 | 0 | "hfs_inode_walk: start_inum: %" PRIuINUM " end_inum: %" |
5246 | 0 | PRIuINUM " flags: %" PRIu32 "\n", start_inum, end_inum, flags); |
5247 | | |
5248 | | /* |
5249 | | * Sanity checks. |
5250 | | */ |
5251 | 0 | if (start_inum < fs->first_inum || start_inum > fs->last_inum) { |
5252 | 0 | tsk_error_reset(); |
5253 | 0 | tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); |
5254 | 0 | tsk_error_set_errstr("inode_walk: Start inode: %" PRIuINUM "", |
5255 | 0 | start_inum); |
5256 | 0 | return 1; |
5257 | 0 | } |
5258 | 0 | else if (end_inum < fs->first_inum || end_inum > fs->last_inum |
5259 | 0 | || end_inum < start_inum) { |
5260 | 0 | tsk_error_reset(); |
5261 | 0 | tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); |
5262 | 0 | tsk_error_set_errstr("inode_walk: End inode: %" PRIuINUM "", |
5263 | 0 | end_inum); |
5264 | 0 | return 1; |
5265 | 0 | } |
5266 | | |
5267 | | /* If ORPHAN is wanted, then make sure that the flags are correct */ |
5268 | 0 | if (flags & TSK_FS_META_FLAG_ORPHAN) { |
5269 | 0 | flags = (TSK_FS_META_FLAG_ENUM) (flags | TSK_FS_META_FLAG_UNALLOC); |
5270 | 0 | flags = (TSK_FS_META_FLAG_ENUM) (flags & ~TSK_FS_META_FLAG_ALLOC); |
5271 | 0 | flags = (TSK_FS_META_FLAG_ENUM) (flags | TSK_FS_META_FLAG_USED); |
5272 | 0 | flags = (TSK_FS_META_FLAG_ENUM) (flags & ~TSK_FS_META_FLAG_UNUSED); |
5273 | 0 | } |
5274 | | |
5275 | 0 | else { |
5276 | 0 | if (((flags & TSK_FS_META_FLAG_ALLOC) == 0) && |
5277 | 0 | ((flags & TSK_FS_META_FLAG_UNALLOC) == 0)) { |
5278 | 0 | flags = (TSK_FS_META_FLAG_ENUM) (flags | TSK_FS_META_FLAG_ALLOC | TSK_FS_META_FLAG_UNALLOC); |
5279 | 0 | } |
5280 | | |
5281 | | /* If neither of the USED or UNUSED flags are set, then set them |
5282 | | * both |
5283 | | */ |
5284 | 0 | if (((flags & TSK_FS_META_FLAG_USED) == 0) && |
5285 | 0 | ((flags & TSK_FS_META_FLAG_UNUSED) == 0)) { |
5286 | 0 | flags = (TSK_FS_META_FLAG_ENUM) (flags | TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_UNUSED); |
5287 | 0 | } |
5288 | 0 | } |
5289 | |
|
5290 | 0 | std::unique_ptr<TSK_FS_FILE, decltype(&tsk_fs_file_close)> fs_file{ |
5291 | 0 | tsk_fs_file_alloc(fs), |
5292 | 0 | tsk_fs_file_close |
5293 | 0 | }; |
5294 | |
|
5295 | 0 | if (!fs_file) { |
5296 | 0 | return 1; |
5297 | 0 | } |
5298 | | |
5299 | 0 | if ((fs_file->meta = tsk_fs_meta_alloc(HFS_FILE_CONTENT_LEN)) == NULL) { |
5300 | 0 | return 1; |
5301 | 0 | } |
5302 | | |
5303 | 0 | if (start_inum > end_inum) |
5304 | 0 | XSWAP(start_inum, end_inum); |
5305 | |
|
5306 | 0 | for (inum = start_inum; inum <= end_inum; ++inum) { |
5307 | 0 | int retval; |
5308 | |
|
5309 | 0 | if (hfs_inode_lookup(fs, fs_file.get(), inum)) { |
5310 | | // deleted files may not exist in the catalog |
5311 | 0 | if (tsk_error_get_errno() == TSK_ERR_FS_INODE_NUM) { |
5312 | 0 | tsk_error_reset(); |
5313 | 0 | continue; |
5314 | 0 | } |
5315 | 0 | else { |
5316 | 0 | return 1; |
5317 | 0 | } |
5318 | 0 | } |
5319 | | |
5320 | 0 | if ((fs_file->meta->flags & flags) != fs_file->meta->flags) |
5321 | 0 | continue; |
5322 | | |
5323 | | /* call action */ |
5324 | 0 | retval = action(fs_file.get(), ptr); |
5325 | 0 | if (retval == TSK_WALK_STOP) { |
5326 | 0 | return 0; |
5327 | 0 | } |
5328 | 0 | else if (retval == TSK_WALK_ERROR) { |
5329 | 0 | return 1; |
5330 | 0 | } |
5331 | 0 | } |
5332 | | |
5333 | 0 | return 0; |
5334 | 0 | } |
5335 | | |
5336 | | /* return the name of a file at a given inode |
5337 | | * in a newly-allocated string, or NULL on error |
5338 | | */ |
5339 | | char * |
5340 | | hfs_get_inode_name(TSK_FS_INFO * fs, TSK_INUM_T inum) |
5341 | 0 | { |
5342 | 0 | HFS_INFO *hfs = (HFS_INFO *) fs; |
5343 | 0 | HFS_ENTRY entry; |
5344 | 0 | char *fn = NULL; |
5345 | |
|
5346 | 0 | if (hfs_cat_file_lookup(hfs, inum, &entry, FALSE)) |
5347 | 0 | return NULL; |
5348 | | |
5349 | 0 | fn = (char*) malloc(HFS_MAXNAMLEN + 1); |
5350 | 0 | if (fn == NULL) |
5351 | 0 | return NULL; |
5352 | | |
5353 | 0 | if (hfs_UTF16toUTF8(fs, entry.thread.name.unicode, |
5354 | 0 | tsk_getu16(fs->endian, entry.thread.name.length), fn, |
5355 | 0 | HFS_MAXNAMLEN + 1, HFS_U16U8_FLAG_REPLACE_SLASH)) { |
5356 | 0 | free(fn); |
5357 | 0 | return NULL; |
5358 | 0 | } |
5359 | | |
5360 | 0 | return fn; |
5361 | 0 | } |
5362 | | |
5363 | | /* print the name of a file at a given inode |
5364 | | * returns 0 on success, 1 on error */ |
5365 | | static uint8_t |
5366 | | print_inode_name(FILE * hFile, TSK_FS_INFO * fs, TSK_INUM_T inum) |
5367 | 0 | { |
5368 | 0 | HFS_INFO *hfs = (HFS_INFO *) fs; |
5369 | 0 | char fn[HFS_MAXNAMLEN + 1]; |
5370 | 0 | HFS_ENTRY entry; |
5371 | |
|
5372 | 0 | if (hfs_cat_file_lookup(hfs, inum, &entry, FALSE)) |
5373 | 0 | return 1; |
5374 | | |
5375 | 0 | if (hfs_UTF16toUTF8(fs, entry.thread.name.unicode, |
5376 | 0 | tsk_getu16(fs->endian, entry.thread.name.length), fn, |
5377 | 0 | HFS_MAXNAMLEN + 1, HFS_U16U8_FLAG_REPLACE_SLASH)) |
5378 | 0 | return 1; |
5379 | | |
5380 | 0 | tsk_fprintf(hFile, "%s", fn); |
5381 | |
|
5382 | 0 | return 0; |
5383 | 0 | } |
5384 | | |
5385 | | /* tail recursive function to print a path... prints the parent path, then |
5386 | | * appends / and the name of the given inode. prints nothing for root |
5387 | | * returns 0 on success, 1 on failure |
5388 | | */ |
5389 | | static uint8_t |
5390 | | print_parent_path(FILE * hFile, TSK_FS_INFO * fs, TSK_INUM_T inum) |
5391 | 0 | { |
5392 | 0 | HFS_INFO *hfs = (HFS_INFO *) fs; |
5393 | 0 | char fn[HFS_MAXNAMLEN + 1]; |
5394 | 0 | HFS_ENTRY entry; |
5395 | |
|
5396 | 0 | if (inum == HFS_ROOT_INUM) |
5397 | 0 | return 0; |
5398 | | |
5399 | 0 | if (inum <= HFS_ROOT_INUM) { |
5400 | 0 | tsk_error_set_errno(TSK_ERR_FS_INODE_NUM); |
5401 | 0 | tsk_error_set_errstr("print_parent_path: out-of-range inode %" |
5402 | 0 | PRIuINUM, inum); |
5403 | 0 | return 1; |
5404 | 0 | } |
5405 | | |
5406 | 0 | if (hfs_cat_file_lookup(hfs, inum, &entry, FALSE)) |
5407 | 0 | return 1; |
5408 | | |
5409 | 0 | if (hfs_UTF16toUTF8(fs, entry.thread.name.unicode, |
5410 | 0 | tsk_getu16(fs->endian, entry.thread.name.length), fn, |
5411 | 0 | HFS_MAXNAMLEN + 1, |
5412 | 0 | HFS_U16U8_FLAG_REPLACE_SLASH | HFS_U16U8_FLAG_REPLACE_CONTROL)) |
5413 | 0 | return 1; |
5414 | | |
5415 | 0 | if (print_parent_path(hFile, fs, (TSK_INUM_T) tsk_getu32(fs->endian, |
5416 | 0 | entry.thread.parent_cnid))) |
5417 | 0 | return 1; |
5418 | | |
5419 | 0 | tsk_fprintf(hFile, "/%s", fn); |
5420 | 0 | return 0; |
5421 | 0 | } |
5422 | | |
5423 | | /* print the file name corresponding to an inode, in brackets after a space. |
5424 | | * uses Unix path conventions, and does not include the volume name. |
5425 | | * returns 0 on success, 1 on failure |
5426 | | */ |
5427 | | static uint8_t |
5428 | | print_inode_file(FILE * hFile, TSK_FS_INFO * fs, TSK_INUM_T inum) |
5429 | 0 | { |
5430 | 0 | tsk_fprintf(hFile, " ["); |
5431 | 0 | if (inum == HFS_ROOT_INUM) |
5432 | 0 | tsk_fprintf(hFile, "/"); |
5433 | 0 | else { |
5434 | 0 | if (print_parent_path(hFile, fs, inum)) { |
5435 | 0 | tsk_fprintf(hFile, "unknown]"); |
5436 | 0 | return 1; |
5437 | 0 | } |
5438 | 0 | } |
5439 | 0 | tsk_fprintf(hFile, "]"); |
5440 | 0 | return 0; |
5441 | 0 | } |
5442 | | |
5443 | | static uint8_t |
5444 | | hfs_fscheck( |
5445 | | [[maybe_unused]] TSK_FS_INFO * fs, |
5446 | | [[maybe_unused]] FILE * hFile) |
5447 | 0 | { |
5448 | 0 | tsk_error_reset(); |
5449 | 0 | tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); |
5450 | 0 | tsk_error_set_errstr("fscheck not implemented for HFS yet"); |
5451 | 0 | return 1; |
5452 | 0 | } |
5453 | | |
5454 | | |
5455 | | static uint8_t |
5456 | | hfs_fsstat(TSK_FS_INFO * fs, FILE * hFile) |
5457 | 0 | { |
5458 | | // char *myname = "hfs_fsstat"; |
5459 | 0 | HFS_INFO *hfs = (HFS_INFO *) fs; |
5460 | 0 | hfs_plus_vh *sb = hfs->fs; |
5461 | 0 | time_t mac_time; |
5462 | 0 | TSK_INUM_T inode; |
5463 | 0 | char timeBuf[128]; |
5464 | |
|
5465 | 0 | if (tsk_verbose) |
5466 | 0 | tsk_fprintf(stderr, "hfs_fstat: called\n"); |
5467 | |
|
5468 | 0 | tsk_fprintf(hFile, "FILE SYSTEM INFORMATION\n"); |
5469 | 0 | tsk_fprintf(hFile, "--------------------------------------------\n"); |
5470 | |
|
5471 | 0 | tsk_fprintf(hFile, "File System Type: "); |
5472 | 0 | if (tsk_getu16(fs->endian, hfs->fs->signature) == HFS_VH_SIG_HFSPLUS) |
5473 | 0 | tsk_fprintf(hFile, "HFS+\n"); |
5474 | 0 | else if (tsk_getu16(fs->endian, hfs->fs->signature) == HFS_VH_SIG_HFSX) |
5475 | 0 | tsk_fprintf(hFile, "HFSX\n"); |
5476 | 0 | else |
5477 | 0 | tsk_fprintf(hFile, "Unknown\n"); |
5478 | | |
5479 | | // print name and number of version |
5480 | 0 | tsk_fprintf(hFile, "File System Version: "); |
5481 | 0 | switch (tsk_getu16(fs->endian, hfs->fs->version)) { |
5482 | 0 | case 4: |
5483 | 0 | tsk_fprintf(hFile, "HFS+\n"); |
5484 | 0 | break; |
5485 | 0 | case 5: |
5486 | 0 | tsk_fprintf(hFile, "HFSX\n"); |
5487 | 0 | break; |
5488 | 0 | default: |
5489 | 0 | tsk_fprintf(hFile, "Unknown (%" PRIu16 ")\n", |
5490 | 0 | tsk_getu16(fs->endian, hfs->fs->version)); |
5491 | 0 | break; |
5492 | 0 | } |
5493 | | |
5494 | 0 | if (tsk_getu16(fs->endian, hfs->fs->signature) == HFS_VH_SIG_HFSX) { |
5495 | 0 | tsk_fprintf(hFile, "Case Sensitive: %s\n", |
5496 | 0 | hfs->is_case_sensitive ? "yes" : "no"); |
5497 | 0 | } |
5498 | |
|
5499 | 0 | if (hfs->hfs_wrapper_offset > 0) { |
5500 | 0 | tsk_fprintf(hFile, |
5501 | 0 | "File system is embedded in an HFS wrapper at offset %" PRIdOFF |
5502 | 0 | "\n", hfs->hfs_wrapper_offset); |
5503 | 0 | } |
5504 | |
|
5505 | 0 | tsk_fprintf(hFile, "\nVolume Name: "); |
5506 | 0 | if (print_inode_name(hFile, fs, HFS_ROOT_INUM)) |
5507 | 0 | return 1; |
5508 | 0 | tsk_fprintf(hFile, "\n"); |
5509 | |
|
5510 | 0 | tsk_fprintf(hFile, "Volume Identifier: %08" PRIx32 "%08" PRIx32 "\n", |
5511 | 0 | tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_ID1]), |
5512 | 0 | tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_ID2])); |
5513 | | |
5514 | | |
5515 | | // print last mounted info |
5516 | 0 | tsk_fprintf(hFile, "\nLast Mounted By: "); |
5517 | 0 | if (tsk_getu32(fs->endian, sb->last_mnt_ver) == HFS_VH_MVER_HFSPLUS) |
5518 | 0 | tsk_fprintf(hFile, "Mac OS X\n"); |
5519 | 0 | else if (tsk_getu32(fs->endian, sb->last_mnt_ver) == HFS_VH_MVER_HFSJ) |
5520 | 0 | tsk_fprintf(hFile, "Mac OS X, Journaled\n"); |
5521 | 0 | else if (tsk_getu32(fs->endian, sb->last_mnt_ver) == HFS_VH_MVER_FSK) |
5522 | 0 | tsk_fprintf(hFile, "failed journal replay\n"); |
5523 | 0 | else if (tsk_getu32(fs->endian, sb->last_mnt_ver) == HFS_VH_MVER_FSCK) |
5524 | 0 | tsk_fprintf(hFile, "fsck_hfs\n"); |
5525 | 0 | else if (tsk_getu32(fs->endian, sb->last_mnt_ver) == HFS_VH_MVER_OS89) |
5526 | 0 | tsk_fprintf(hFile, "Mac OS 8.1 - 9.2.2\n"); |
5527 | 0 | else |
5528 | 0 | tsk_fprintf(hFile, "Unknown (%" PRIx32 "\n", |
5529 | 0 | tsk_getu32(fs->endian, sb->last_mnt_ver)); |
5530 | | |
5531 | | /* State of the file system */ |
5532 | 0 | if ((tsk_getu32(fs->endian, hfs->fs->attr) & HFS_VH_ATTR_UNMOUNTED) |
5533 | 0 | && (!(tsk_getu32(fs->endian, |
5534 | 0 | hfs->fs->attr) & HFS_VH_ATTR_INCONSISTENT))) |
5535 | 0 | tsk_fprintf(hFile, "Volume Unmounted Properly\n"); |
5536 | 0 | else |
5537 | 0 | tsk_fprintf(hFile, "Volume Unmounted Improperly\n"); |
5538 | |
|
5539 | 0 | tsk_fprintf(hFile, "Mount Count: %" PRIu32 "\n", |
5540 | 0 | tsk_getu32(fs->endian, sb->write_cnt)); |
5541 | | |
5542 | | |
5543 | | // Dates |
5544 | | // (creation date is in local time zone, not UTC, according to TN 1150) |
5545 | 0 | mac_time = |
5546 | 0 | hfs_convert_2_unix_time(tsk_getu32(fs->endian, hfs->fs->cr_date)); |
5547 | 0 | tsk_fprintf(hFile, "\nCreation Date: \t%s\n", |
5548 | 0 | tsk_fs_time_to_str(mktime(gmtime(&mac_time)), timeBuf)); |
5549 | |
|
5550 | 0 | mac_time = |
5551 | 0 | hfs_convert_2_unix_time(tsk_getu32(fs->endian, hfs->fs->m_date)); |
5552 | 0 | tsk_fprintf(hFile, "Last Written Date: \t%s\n", |
5553 | 0 | tsk_fs_time_to_str(mac_time, timeBuf)); |
5554 | |
|
5555 | 0 | mac_time = |
5556 | 0 | hfs_convert_2_unix_time(tsk_getu32(fs->endian, |
5557 | 0 | hfs->fs->bkup_date)); |
5558 | 0 | tsk_fprintf(hFile, "Last Backup Date: \t%s\n", |
5559 | 0 | tsk_fs_time_to_str(mac_time, timeBuf)); |
5560 | |
|
5561 | 0 | mac_time = |
5562 | 0 | hfs_convert_2_unix_time(tsk_getu32(fs->endian, hfs->fs->chk_date)); |
5563 | 0 | tsk_fprintf(hFile, "Last Checked Date: \t%s\n", |
5564 | 0 | tsk_fs_time_to_str(mac_time, timeBuf)); |
5565 | | |
5566 | |
|
5567 | 0 | if (tsk_getu32(fs->endian, hfs->fs->attr) & HFS_VH_ATTR_SOFTWARE_LOCK) |
5568 | 0 | tsk_fprintf(hFile, "Software write protect enabled\n"); |
5569 | | |
5570 | | /* Print journal information */ |
5571 | 0 | if (tsk_getu32(fs->endian, sb->attr) & HFS_VH_ATTR_JOURNALED) { |
5572 | 0 | tsk_fprintf(hFile, "\nJournal Info Block: %" PRIu32 "\n", |
5573 | 0 | tsk_getu32(fs->endian, sb->jinfo_blk)); |
5574 | 0 | } |
5575 | |
|
5576 | 0 | tsk_fprintf(hFile, "\nMETADATA INFORMATION\n"); |
5577 | 0 | tsk_fprintf(hFile, "--------------------------------------------\n"); |
5578 | |
|
5579 | 0 | tsk_fprintf(hFile, "Range: %" PRIuINUM " - %" PRIuINUM "\n", |
5580 | 0 | fs->first_inum, fs->last_inum); |
5581 | |
|
5582 | 0 | inode = tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_BOOT]); |
5583 | 0 | tsk_fprintf(hFile, "Bootable Folder ID: %" PRIuINUM, inode); |
5584 | 0 | if (inode > 0) |
5585 | 0 | print_inode_file(hFile, fs, inode); |
5586 | 0 | tsk_fprintf(hFile, "\n"); |
5587 | |
|
5588 | 0 | inode = tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_START]); |
5589 | 0 | tsk_fprintf(hFile, "Startup App ID: %" PRIuINUM, inode); |
5590 | 0 | if (inode > 0) |
5591 | 0 | print_inode_file(hFile, fs, inode); |
5592 | 0 | tsk_fprintf(hFile, "\n"); |
5593 | |
|
5594 | 0 | inode = tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_OPEN]); |
5595 | 0 | tsk_fprintf(hFile, "Startup Open Folder ID: %" PRIuINUM, inode); |
5596 | 0 | if (inode > 0) |
5597 | 0 | print_inode_file(hFile, fs, inode); |
5598 | 0 | tsk_fprintf(hFile, "\n"); |
5599 | |
|
5600 | 0 | inode = tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_BOOT9]); |
5601 | 0 | tsk_fprintf(hFile, "Mac OS 8/9 Blessed System Folder ID: %" PRIuINUM, |
5602 | 0 | inode); |
5603 | 0 | if (inode > 0) |
5604 | 0 | print_inode_file(hFile, fs, inode); |
5605 | 0 | tsk_fprintf(hFile, "\n"); |
5606 | |
|
5607 | 0 | inode = tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_BOOTX]); |
5608 | 0 | tsk_fprintf(hFile, "Mac OS X Blessed System Folder ID: %" PRIuINUM, |
5609 | 0 | inode); |
5610 | 0 | if (inode > 0) |
5611 | 0 | print_inode_file(hFile, fs, inode); |
5612 | 0 | tsk_fprintf(hFile, "\n"); |
5613 | |
|
5614 | 0 | tsk_fprintf(hFile, "Number of files: %" PRIu32 "\n", |
5615 | 0 | tsk_getu32(fs->endian, sb->file_cnt)); |
5616 | 0 | tsk_fprintf(hFile, "Number of folders: %" PRIu32 "\n", |
5617 | 0 | tsk_getu32(fs->endian, sb->fldr_cnt)); |
5618 | | |
5619 | |
|
5620 | 0 | tsk_fprintf(hFile, "\nCONTENT INFORMATION\n"); |
5621 | 0 | tsk_fprintf(hFile, "--------------------------------------------\n"); |
5622 | |
|
5623 | 0 | tsk_fprintf(hFile, "Block Range: %" PRIuDADDR " - %" PRIuDADDR "\n", |
5624 | 0 | fs->first_block, fs->last_block); |
5625 | |
|
5626 | 0 | if (fs->last_block != fs->last_block_act) |
5627 | 0 | tsk_fprintf(hFile, |
5628 | 0 | "Total Range in Image: %" PRIuDADDR " - %" PRIuDADDR "\n", |
5629 | 0 | fs->first_block, fs->last_block_act); |
5630 | |
|
5631 | 0 | tsk_fprintf(hFile, "Allocation Block Size: %u\n", fs->block_size); |
5632 | |
|
5633 | 0 | tsk_fprintf(hFile, "Number of Free Blocks: %" PRIu32 "\n", |
5634 | 0 | tsk_getu32(fs->endian, sb->free_blks)); |
5635 | |
|
5636 | 0 | if (tsk_getu32(fs->endian, hfs->fs->attr) & HFS_VH_ATTR_BADBLOCKS) |
5637 | 0 | tsk_fprintf(hFile, "Volume has bad blocks\n"); |
5638 | |
|
5639 | 0 | return 0; |
5640 | 0 | } |
5641 | | |
5642 | | |
5643 | | /************************* istat *******************************/ |
5644 | | |
5645 | | |
5646 | | /** |
5647 | | * Text encoding names defined in TN1150, Table 2. |
5648 | | */ |
5649 | | static const char * |
5650 | | text_encoding_name(uint32_t enc) |
5651 | 0 | { |
5652 | 0 | switch (enc) { |
5653 | 0 | case 0: |
5654 | 0 | return "MacRoman"; |
5655 | 0 | case 1: |
5656 | 0 | return "MacJapanese"; |
5657 | 0 | case 2: |
5658 | 0 | return "MacChineseTrad"; |
5659 | 0 | case 4: |
5660 | 0 | return "MacKorean"; |
5661 | 0 | case 5: |
5662 | 0 | return "MacArabic"; |
5663 | 0 | case 6: |
5664 | 0 | return "MacHebrew"; |
5665 | 0 | case 7: |
5666 | 0 | return "MacGreek"; |
5667 | 0 | case 8: |
5668 | 0 | return "MacCyrillic"; |
5669 | 0 | case 9: |
5670 | 0 | return "MacDevanagari"; |
5671 | 0 | case 10: |
5672 | 0 | return "MacGurmukhi"; |
5673 | 0 | case 11: |
5674 | 0 | return "MacGujarati"; |
5675 | 0 | case 12: |
5676 | 0 | return "MacOriya"; |
5677 | 0 | case 13: |
5678 | 0 | return "MacBengali"; |
5679 | 0 | case 14: |
5680 | 0 | return "MacTamil"; |
5681 | 0 | case 15: |
5682 | 0 | return "Telugu"; |
5683 | 0 | case 16: |
5684 | 0 | return "MacKannada"; |
5685 | 0 | case 17: |
5686 | 0 | return "MacMalayalam"; |
5687 | 0 | case 18: |
5688 | 0 | return "MacSinhalese"; |
5689 | 0 | case 19: |
5690 | 0 | return "MacBurmese"; |
5691 | 0 | case 20: |
5692 | 0 | return "MacKhmer"; |
5693 | 0 | case 21: |
5694 | 0 | return "MacThai"; |
5695 | 0 | case 22: |
5696 | 0 | return "MacLaotian"; |
5697 | 0 | case 23: |
5698 | 0 | return "MacGeorgian"; |
5699 | 0 | case 24: |
5700 | 0 | return "MacArmenian"; |
5701 | 0 | case 25: |
5702 | 0 | return "MacChineseSimp"; |
5703 | 0 | case 26: |
5704 | 0 | return "MacTibetan"; |
5705 | 0 | case 27: |
5706 | 0 | return "MacMongolian"; |
5707 | 0 | case 28: |
5708 | 0 | return "MacEthiopic"; |
5709 | 0 | case 29: |
5710 | 0 | return "MacCentralEurRoman"; |
5711 | 0 | case 30: |
5712 | 0 | return "MacVietnamese"; |
5713 | 0 | case 31: |
5714 | 0 | return "MacExtArabic"; |
5715 | 0 | case 33: |
5716 | 0 | return "MacSymbol"; |
5717 | 0 | case 34: |
5718 | 0 | return "MacDingbats"; |
5719 | 0 | case 35: |
5720 | 0 | return "MacTurkish"; |
5721 | 0 | case 36: |
5722 | 0 | return "MacCroatian"; |
5723 | 0 | case 37: |
5724 | 0 | return "MacIcelandic"; |
5725 | 0 | case 38: |
5726 | 0 | return "MacRomanian"; |
5727 | 0 | case 49: |
5728 | 0 | case 140: |
5729 | 0 | return "MacFarsi"; |
5730 | 0 | case 48: |
5731 | 0 | case 152: |
5732 | 0 | return "MacUkrainian"; |
5733 | 0 | default: |
5734 | 0 | return "Unknown encoding"; |
5735 | 0 | } |
5736 | 0 | } |
5737 | | |
5738 | 0 | #define HFS_PRINT_WIDTH 8 |
5739 | | typedef struct { |
5740 | | FILE *hFile; |
5741 | | int idx; |
5742 | | TSK_DADDR_T startBlock; |
5743 | | uint32_t blockCount; |
5744 | | unsigned char accumulating; |
5745 | | } HFS_PRINT_ADDR; |
5746 | | |
5747 | | static void |
5748 | | output_print_addr(HFS_PRINT_ADDR * print) |
5749 | 0 | { |
5750 | 0 | if (!print->accumulating) |
5751 | 0 | return; |
5752 | 0 | if (print->blockCount == 1) { |
5753 | 0 | tsk_fprintf(print->hFile, "%" PRIuDADDR " ", print->startBlock); |
5754 | 0 | print->idx += 1; |
5755 | 0 | } |
5756 | 0 | else if (print->blockCount > 1) { |
5757 | 0 | tsk_fprintf(print->hFile, "%" PRIuDADDR "-%" PRIuDADDR " ", |
5758 | 0 | print->startBlock, print->startBlock + print->blockCount - 1); |
5759 | 0 | print->idx += 2; |
5760 | 0 | } |
5761 | 0 | if (print->idx >= HFS_PRINT_WIDTH) { |
5762 | 0 | tsk_fprintf(print->hFile, "\n"); |
5763 | 0 | print->idx = 0; |
5764 | 0 | } |
5765 | 0 | } |
5766 | | |
5767 | | static TSK_WALK_RET_ENUM |
5768 | | print_addr_act( |
5769 | | [[maybe_unused]] TSK_FS_FILE * fs_file, |
5770 | | [[maybe_unused]] TSK_OFF_T a_off, |
5771 | | TSK_DADDR_T addr, |
5772 | | [[maybe_unused]] char *buf, |
5773 | | [[maybe_unused]] size_t size, |
5774 | | [[maybe_unused]] TSK_FS_BLOCK_FLAG_ENUM flags, |
5775 | | void *ptr) |
5776 | 0 | { |
5777 | 0 | HFS_PRINT_ADDR *print = (HFS_PRINT_ADDR *) ptr; |
5778 | |
|
5779 | 0 | if (print->accumulating) { |
5780 | 0 | if (addr == print->startBlock + print->blockCount) { |
5781 | 0 | ++print->blockCount; |
5782 | 0 | } |
5783 | 0 | else { |
5784 | 0 | output_print_addr(print); |
5785 | |
|
5786 | 0 | print->startBlock = addr; |
5787 | 0 | print->blockCount = 1; |
5788 | 0 | } |
5789 | 0 | } |
5790 | 0 | else { |
5791 | 0 | print->startBlock = addr; |
5792 | 0 | print->blockCount = 1; |
5793 | 0 | print->accumulating = TRUE; |
5794 | 0 | } |
5795 | |
|
5796 | 0 | return TSK_WALK_CONT; |
5797 | 0 | } |
5798 | | |
5799 | | /** |
5800 | | * Print details on a specific file to a file handle. |
5801 | | * |
5802 | | * @param fs File system file is located in |
5803 | | * @param hFile File name to print text to |
5804 | | * @param inum Address of file in file system |
5805 | | * @param numblock The number of blocks in file to force print (can go beyond file size) |
5806 | | * @param sec_skew Clock skew in seconds to also print times in |
5807 | | * |
5808 | | * @returns 1 on error and 0 on success |
5809 | | */ |
5810 | | static uint8_t |
5811 | | hfs_istat(TSK_FS_INFO * fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE * hFile, TSK_INUM_T inum, |
5812 | | TSK_DADDR_T numblock, int32_t sec_skew) |
5813 | 0 | { |
5814 | 0 | HFS_INFO *hfs = (HFS_INFO *) fs; |
5815 | 0 | char hfs_mode[12]; |
5816 | 0 | HFS_PRINT_ADDR print; |
5817 | 0 | HFS_ENTRY entry; |
5818 | 0 | char timeBuf[128]; |
5819 | | // Compression ATTR, if there is one: |
5820 | 0 | const TSK_FS_ATTR *compressionAttr = NULL; |
5821 | 0 | RES_DESCRIPTOR *rd; // descriptor of a resource |
5822 | |
|
5823 | 0 | tsk_error_reset(); |
5824 | |
|
5825 | 0 | if (tsk_verbose) |
5826 | 0 | tsk_fprintf(stderr, |
5827 | 0 | "hfs_istat: inum: %" PRIuINUM " numblock: %" PRIu32 "\n", |
5828 | 0 | inum, numblock); |
5829 | |
|
5830 | 0 | std::unique_ptr<TSK_FS_FILE, decltype(&tsk_fs_file_close)> fs_file{ |
5831 | 0 | tsk_fs_file_open_meta(fs, NULL, inum), |
5832 | 0 | tsk_fs_file_close |
5833 | 0 | }; |
5834 | |
|
5835 | 0 | if (!fs_file) { |
5836 | 0 | error_returned("hfs_istat: getting metadata for the file"); |
5837 | 0 | return 1; |
5838 | 0 | } |
5839 | | |
5840 | 0 | if (inum >= HFS_FIRST_USER_CNID) { |
5841 | 0 | int rslt; |
5842 | 0 | tsk_fprintf(hFile, "File Path: "); |
5843 | 0 | rslt = print_parent_path(hFile, fs, inum); |
5844 | 0 | if (rslt != 0) |
5845 | 0 | tsk_fprintf(hFile, " Error in printing path\n"); |
5846 | 0 | else |
5847 | 0 | tsk_fprintf(hFile, "\n"); |
5848 | 0 | } |
5849 | 0 | else { |
5850 | | // All of the files in this inum range have names without nulls, |
5851 | | // slashes or control characters. So, it is OK to print this UTF8 |
5852 | | // string this way. |
5853 | 0 | if (fs_file->meta->name2 != NULL) |
5854 | 0 | tsk_fprintf(hFile, "File Name: %s\n", |
5855 | 0 | fs_file->meta->name2->name); |
5856 | 0 | } |
5857 | |
|
5858 | 0 | tsk_fprintf(hFile, "Catalog Record: %" PRIuINUM "\n", inum); |
5859 | 0 | tsk_fprintf(hFile, "%sAllocated\n", |
5860 | 0 | (fs_file->meta->flags & TSK_FS_META_FLAG_UNALLOC) ? "Not " : ""); |
5861 | |
|
5862 | 0 | tsk_fprintf(hFile, "Type:\t"); |
5863 | 0 | if (fs_file->meta->type == TSK_FS_META_TYPE_REG) |
5864 | 0 | tsk_fprintf(hFile, "File\n"); |
5865 | 0 | else if (TSK_FS_IS_DIR_META(fs_file->meta->type)) |
5866 | 0 | tsk_fprintf(hFile, "Folder\n"); |
5867 | 0 | else |
5868 | 0 | tsk_fprintf(hFile, "\n"); |
5869 | |
|
5870 | 0 | tsk_fs_meta_make_ls(fs_file->meta, hfs_mode, sizeof(hfs_mode)); |
5871 | 0 | tsk_fprintf(hFile, "Mode:\t%s\n", hfs_mode); |
5872 | 0 | tsk_fprintf(hFile, "Size:\t%" PRIdOFF "\n", fs_file->meta->size); |
5873 | |
|
5874 | 0 | if (fs_file->meta->link) |
5875 | 0 | tsk_fprintf(hFile, "Symbolic link to:\t%s\n", fs_file->meta->link); |
5876 | |
|
5877 | 0 | tsk_fprintf(hFile, "uid / gid: %" PRIuUID " / %" PRIuGID "\n", |
5878 | 0 | fs_file->meta->uid, fs_file->meta->gid); |
5879 | |
|
5880 | 0 | tsk_fprintf(hFile, "Link count:\t%d\n", fs_file->meta->nlink); |
5881 | |
|
5882 | 0 | if (hfs_cat_file_lookup(hfs, inum, &entry, TRUE) == 0) { |
5883 | 0 | hfs_uni_str *nm = &entry.thread.name; |
5884 | 0 | char name_buf[HFS_MAXNAMLEN + 1]; |
5885 | 0 | TSK_INUM_T par_cnid; // parent CNID |
5886 | |
|
5887 | 0 | tsk_fprintf(hFile, "\n"); |
5888 | 0 | hfs_UTF16toUTF8(fs, nm->unicode, (int) tsk_getu16(fs->endian, |
5889 | 0 | nm->length), &name_buf[0], HFS_MAXNAMLEN + 1, |
5890 | 0 | HFS_U16U8_FLAG_REPLACE_SLASH | HFS_U16U8_FLAG_REPLACE_CONTROL); |
5891 | 0 | tsk_fprintf(hFile, "File Name: %s\n", name_buf); |
5892 | | |
5893 | | // Test here to see if this is a hard link. |
5894 | 0 | par_cnid = tsk_getu32(fs->endian, &(entry.thread.parent_cnid)); |
5895 | 0 | if ((hfs->has_meta_dir_crtime && par_cnid == hfs->meta_dir_inum) || |
5896 | 0 | (hfs->has_meta_crtime && par_cnid == hfs->meta_inum)) { |
5897 | 0 | int instr = strncmp(name_buf, "iNode", 5); |
5898 | 0 | int drstr = strncmp(name_buf, "dir_", 4); |
5899 | |
|
5900 | 0 | if (instr == 0 && |
5901 | 0 | hfs->has_meta_crtime && par_cnid == hfs->meta_inum) { |
5902 | 0 | tsk_fprintf(hFile, "This is a hard link to a file\n"); |
5903 | 0 | } |
5904 | 0 | else if (drstr == 0 && |
5905 | 0 | hfs->has_meta_dir_crtime && |
5906 | 0 | par_cnid == hfs->meta_dir_inum) { |
5907 | 0 | tsk_fprintf(hFile, "This is a hard link to a folder.\n"); |
5908 | 0 | } |
5909 | 0 | } |
5910 | | |
5911 | | /* The cat.perm union contains file-type specific values. |
5912 | | * Print them if they are relevant. */ |
5913 | 0 | if ((fs_file->meta->type == TSK_FS_META_TYPE_CHR) || |
5914 | 0 | (fs_file->meta->type == TSK_FS_META_TYPE_BLK)) { |
5915 | 0 | tsk_fprintf(hFile, "Device ID:\t%" PRIu32 "\n", |
5916 | 0 | tsk_getu32(fs->endian, entry.cat.std.perm.special.raw)); |
5917 | 0 | } |
5918 | 0 | else if ((tsk_getu32(fs->endian, |
5919 | 0 | entry.cat.std.u_info.file_type) == |
5920 | 0 | HFS_HARDLINK_FILE_TYPE) |
5921 | 0 | && (tsk_getu32(fs->endian, |
5922 | 0 | entry.cat.std.u_info.file_cr) == |
5923 | 0 | HFS_HARDLINK_FILE_CREATOR)) { |
5924 | | // technically, the creation date of this item should be the same as either the |
5925 | | // creation date of the "HFS+ Private Data" folder or the creation date of the root folder |
5926 | 0 | tsk_fprintf(hFile, "Hard link inode number\t %" PRIu32 "\n", |
5927 | 0 | tsk_getu32(fs->endian, entry.cat.std.perm.special.inum)); |
5928 | 0 | } |
5929 | |
|
5930 | 0 | tsk_fprintf(hFile, "Admin flags: %" PRIu8, |
5931 | 0 | entry.cat.std.perm.a_flags); |
5932 | 0 | if (entry.cat.std.perm.a_flags != 0) { |
5933 | 0 | tsk_fprintf(hFile, " - "); |
5934 | 0 | if (entry.cat.std.perm.a_flags & HFS_PERM_AFLAG_ARCHIVED) |
5935 | 0 | tsk_fprintf(hFile, "archived "); |
5936 | 0 | if (entry.cat.std.perm.a_flags & HFS_PERM_AFLAG_IMMUTABLE) |
5937 | 0 | tsk_fprintf(hFile, "immutable "); |
5938 | 0 | if (entry.cat.std.perm.a_flags & HFS_PERM_AFLAG_APPEND) |
5939 | 0 | tsk_fprintf(hFile, "append-only "); |
5940 | 0 | } |
5941 | 0 | tsk_fprintf(hFile, "\n"); |
5942 | |
|
5943 | 0 | tsk_fprintf(hFile, "Owner flags: %" PRIu8, |
5944 | 0 | entry.cat.std.perm.o_flags); |
5945 | 0 | if (entry.cat.std.perm.o_flags != 0) { |
5946 | 0 | tsk_fprintf(hFile, " - "); |
5947 | 0 | if (entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_NODUMP) |
5948 | 0 | tsk_fprintf(hFile, "no-dump "); |
5949 | 0 | if (entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_IMMUTABLE) |
5950 | 0 | tsk_fprintf(hFile, "immutable "); |
5951 | 0 | if (entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_APPEND) |
5952 | 0 | tsk_fprintf(hFile, "append-only "); |
5953 | 0 | if (entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_OPAQUE) |
5954 | 0 | tsk_fprintf(hFile, "opaque "); |
5955 | 0 | if (entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_COMPRESSED) |
5956 | 0 | tsk_fprintf(hFile, "compressed "); |
5957 | 0 | } |
5958 | 0 | tsk_fprintf(hFile, "\n"); |
5959 | |
|
5960 | 0 | if (tsk_getu16(fs->endian, |
5961 | 0 | entry.cat.std.flags) & HFS_FILE_FLAG_LOCKED) |
5962 | 0 | tsk_fprintf(hFile, "Locked\n"); |
5963 | 0 | if (tsk_getu16(fs->endian, |
5964 | 0 | entry.cat.std.flags) & HFS_FILE_FLAG_ATTR) |
5965 | 0 | tsk_fprintf(hFile, "Has extended attributes\n"); |
5966 | 0 | if (tsk_getu16(fs->endian, |
5967 | 0 | entry.cat.std.flags) & HFS_FILE_FLAG_ACL) |
5968 | 0 | tsk_fprintf(hFile, "Has security data (ACLs)\n"); |
5969 | | |
5970 | | // File_type and file_cr are not relevant for Folders |
5971 | 0 | if ( !TSK_FS_IS_DIR_META(fs_file->meta->type)){ |
5972 | 0 | int windx; // loop index |
5973 | 0 | tsk_fprintf(hFile, |
5974 | 0 | "File type:\t%04" PRIx32 " ", |
5975 | 0 | tsk_getu32(fs->endian, entry.cat.std.u_info.file_type)); |
5976 | |
|
5977 | 0 | for (windx = 0; windx < 4; ++windx) { |
5978 | 0 | uint8_t cu = entry.cat.std.u_info.file_type[windx]; |
5979 | 0 | if (cu >= 32 && cu <= 126) |
5980 | 0 | tsk_fprintf(hFile, "%c", (char) cu); |
5981 | 0 | else |
5982 | 0 | tsk_fprintf(hFile, " "); |
5983 | 0 | } |
5984 | 0 | tsk_fprintf(hFile, "\n"); |
5985 | 0 | tsk_fprintf(hFile, |
5986 | 0 | "File creator:\t%04" PRIx32 " ", |
5987 | 0 | tsk_getu32(fs->endian, entry.cat.std.u_info.file_cr)); |
5988 | 0 | for (windx = 0; windx < 4; ++windx) { |
5989 | 0 | uint8_t cu = entry.cat.std.u_info.file_cr[windx]; |
5990 | 0 | if (cu >= 32 && cu <= 126) |
5991 | 0 | tsk_fprintf(hFile, "%c", (char) cu); |
5992 | 0 | else |
5993 | 0 | tsk_fprintf(hFile, " "); |
5994 | 0 | } |
5995 | 0 | tsk_fprintf(hFile, "\n"); |
5996 | 0 | } // END if(not folder) |
5997 | |
|
5998 | 0 | if (tsk_getu16(fs->endian, |
5999 | 0 | entry.cat.std.u_info.flags) & HFS_FINDER_FLAG_NAME_LOCKED) |
6000 | 0 | tsk_fprintf(hFile, "Name locked\n"); |
6001 | 0 | if (tsk_getu16(fs->endian, |
6002 | 0 | entry.cat.std.u_info.flags) & HFS_FINDER_FLAG_HAS_BUNDLE) |
6003 | 0 | tsk_fprintf(hFile, "Has bundle\n"); |
6004 | 0 | if (tsk_getu16(fs->endian, |
6005 | 0 | entry.cat.std.u_info.flags) & HFS_FINDER_FLAG_IS_INVISIBLE) |
6006 | 0 | tsk_fprintf(hFile, "Is invisible\n"); |
6007 | 0 | if (tsk_getu16(fs->endian, |
6008 | 0 | entry.cat.std.u_info.flags) & HFS_FINDER_FLAG_IS_ALIAS) |
6009 | 0 | tsk_fprintf(hFile, "Is alias\n"); |
6010 | |
|
6011 | 0 | tsk_fprintf(hFile, "Text encoding:\t%" PRIx32 " = %s\n", |
6012 | 0 | tsk_getu32(fs->endian, entry.cat.std.text_enc), |
6013 | 0 | text_encoding_name(tsk_getu32(fs->endian, |
6014 | 0 | entry.cat.std.text_enc))); |
6015 | |
|
6016 | 0 | if (tsk_getu16(fs->endian, |
6017 | 0 | entry.cat.std.rec_type) == HFS_FILE_RECORD) { |
6018 | 0 | tsk_fprintf(hFile, "Resource fork size:\t%" PRIu64 "\n", |
6019 | 0 | tsk_getu64(fs->endian, entry.cat.resource.logic_sz)); |
6020 | 0 | } |
6021 | 0 | } |
6022 | |
|
6023 | 0 | if (sec_skew != 0) { |
6024 | 0 | tsk_fprintf(hFile, "\nAdjusted times:\n"); |
6025 | 0 | if (fs_file->meta->mtime) |
6026 | 0 | fs_file->meta->mtime -= sec_skew; |
6027 | 0 | if (fs_file->meta->atime) |
6028 | 0 | fs_file->meta->atime -= sec_skew; |
6029 | 0 | if (fs_file->meta->ctime) |
6030 | 0 | fs_file->meta->ctime -= sec_skew; |
6031 | 0 | if (fs_file->meta->crtime) |
6032 | 0 | fs_file->meta->crtime -= sec_skew; |
6033 | 0 | if (fs_file->meta->time2.hfs.bkup_time) |
6034 | 0 | fs_file->meta->time2.hfs.bkup_time -= sec_skew; |
6035 | |
|
6036 | 0 | tsk_fprintf(hFile, "Created:\t%s\n", |
6037 | 0 | tsk_fs_time_to_str(fs_file->meta->crtime, timeBuf)); |
6038 | 0 | tsk_fprintf(hFile, "Content Modified:\t%s\n", |
6039 | 0 | tsk_fs_time_to_str(fs_file->meta->mtime, timeBuf)); |
6040 | 0 | tsk_fprintf(hFile, "Attributes Modified:\t%s\n", |
6041 | 0 | tsk_fs_time_to_str(fs_file->meta->ctime, timeBuf)); |
6042 | 0 | tsk_fprintf(hFile, "Accessed:\t%s\n", |
6043 | 0 | tsk_fs_time_to_str(fs_file->meta->atime, timeBuf)); |
6044 | 0 | tsk_fprintf(hFile, "Backed Up:\t%s\n", |
6045 | 0 | tsk_fs_time_to_str(fs_file->meta->time2.hfs.bkup_time, |
6046 | 0 | timeBuf)); |
6047 | |
|
6048 | 0 | if (fs_file->meta->mtime) |
6049 | 0 | fs_file->meta->mtime += sec_skew; |
6050 | 0 | if (fs_file->meta->atime) |
6051 | 0 | fs_file->meta->atime += sec_skew; |
6052 | 0 | if (fs_file->meta->ctime) |
6053 | 0 | fs_file->meta->ctime += sec_skew; |
6054 | 0 | if (fs_file->meta->crtime) |
6055 | 0 | fs_file->meta->crtime += sec_skew; |
6056 | 0 | if (fs_file->meta->time2.hfs.bkup_time) |
6057 | 0 | fs_file->meta->time2.hfs.bkup_time += sec_skew; |
6058 | |
|
6059 | 0 | tsk_fprintf(hFile, "\nOriginal times:\n"); |
6060 | 0 | } |
6061 | 0 | else { |
6062 | 0 | tsk_fprintf(hFile, "\nTimes:\n"); |
6063 | 0 | } |
6064 | |
|
6065 | 0 | tsk_fprintf(hFile, "Created:\t%s\n", |
6066 | 0 | tsk_fs_time_to_str(fs_file->meta->crtime, timeBuf)); |
6067 | 0 | tsk_fprintf(hFile, "Content Modified:\t%s\n", |
6068 | 0 | tsk_fs_time_to_str(fs_file->meta->mtime, timeBuf)); |
6069 | 0 | tsk_fprintf(hFile, "Attributes Modified:\t%s\n", |
6070 | 0 | tsk_fs_time_to_str(fs_file->meta->ctime, timeBuf)); |
6071 | 0 | tsk_fprintf(hFile, "Accessed:\t%s\n", |
6072 | 0 | tsk_fs_time_to_str(fs_file->meta->atime, timeBuf)); |
6073 | 0 | tsk_fprintf(hFile, "Backed Up:\t%s\n", |
6074 | 0 | tsk_fs_time_to_str(fs_file->meta->time2.hfs.bkup_time, timeBuf)); |
6075 | | |
6076 | | // IF this is a regular file, then print out the blocks of the DATA and RSRC forks. |
6077 | 0 | if (tsk_getu16(fs->endian, entry.cat.std.rec_type) == HFS_FILE_RECORD) { |
6078 | | // Only print DATA fork blocks if this file is NOT compressed |
6079 | | // N.B., a compressed file has no data fork, and tsk_fs_file_walk() will |
6080 | | // do the wrong thing! |
6081 | 0 | if (!(entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_COMPRESSED)) { |
6082 | |
|
6083 | 0 | if (!(istat_flags & TSK_FS_ISTAT_RUNLIST)) { |
6084 | 0 | tsk_fprintf(hFile, "\nData Fork Blocks:\n"); |
6085 | 0 | print.idx = 0; |
6086 | 0 | print.hFile = hFile; |
6087 | 0 | print.accumulating = FALSE; |
6088 | 0 | print.startBlock = 0; |
6089 | 0 | print.blockCount = 0; |
6090 | |
|
6091 | 0 | if (tsk_fs_file_walk_type(fs_file.get(), |
6092 | 0 | TSK_FS_ATTR_TYPE_HFS_DATA, HFS_FS_ATTR_ID_DATA, |
6093 | 0 | (TSK_FS_FILE_WALK_FLAG_ENUM) (TSK_FS_FILE_WALK_FLAG_AONLY | |
6094 | 0 | TSK_FS_FILE_WALK_FLAG_SLACK), print_addr_act, |
6095 | 0 | (void *)&print)) { |
6096 | 0 | tsk_fprintf(hFile, "\nError reading file data fork\n"); |
6097 | 0 | tsk_error_print(hFile); |
6098 | 0 | tsk_error_reset(); |
6099 | 0 | } |
6100 | 0 | else { |
6101 | 0 | output_print_addr(&print); |
6102 | 0 | if (print.idx != 0) |
6103 | 0 | tsk_fprintf(hFile, "\n"); |
6104 | 0 | } |
6105 | 0 | } |
6106 | 0 | } |
6107 | | |
6108 | | // Only print out the blocks of the Resource fork if it has nonzero size |
6109 | 0 | if (tsk_getu64(fs->endian, entry.cat.resource.logic_sz) > 0) { |
6110 | |
|
6111 | 0 | if (! (istat_flags & TSK_FS_ISTAT_RUNLIST)) { |
6112 | 0 | tsk_fprintf(hFile, "\nResource Fork Blocks:\n"); |
6113 | |
|
6114 | 0 | print.idx = 0; |
6115 | 0 | print.hFile = hFile; |
6116 | 0 | print.accumulating = FALSE; |
6117 | 0 | print.startBlock = 0; |
6118 | 0 | print.blockCount = 0; |
6119 | |
|
6120 | 0 | if (tsk_fs_file_walk_type(fs_file.get(), |
6121 | 0 | TSK_FS_ATTR_TYPE_HFS_RSRC, HFS_FS_ATTR_ID_RSRC, |
6122 | 0 | (TSK_FS_FILE_WALK_FLAG_ENUM) (TSK_FS_FILE_WALK_FLAG_AONLY | |
6123 | 0 | TSK_FS_FILE_WALK_FLAG_SLACK), print_addr_act, |
6124 | 0 | (void *)&print)) { |
6125 | 0 | tsk_fprintf(hFile, "\nError reading file resource fork\n"); |
6126 | 0 | tsk_error_print(hFile); |
6127 | 0 | tsk_error_reset(); |
6128 | 0 | } |
6129 | 0 | else { |
6130 | 0 | output_print_addr(&print); |
6131 | 0 | if (print.idx != 0) |
6132 | 0 | tsk_fprintf(hFile, "\n"); |
6133 | 0 | } |
6134 | 0 | } |
6135 | 0 | } |
6136 | 0 | } |
6137 | | |
6138 | | // Force the loading of all attributes. |
6139 | 0 | (void) tsk_fs_file_attr_get(fs_file.get()); |
6140 | | |
6141 | | /* Print all of the attributes */ |
6142 | 0 | tsk_fprintf(hFile, "\nAttributes: \n"); |
6143 | 0 | if (fs_file->meta->attr) { |
6144 | 0 | int cnt, i; |
6145 | | |
6146 | | // cycle through the attributes |
6147 | 0 | cnt = tsk_fs_file_attr_getsize(fs_file.get()); |
6148 | 0 | for (i = 0; i < cnt; ++i) { |
6149 | 0 | const char *type; // type of the attribute as a string |
6150 | 0 | const TSK_FS_ATTR *fs_attr = |
6151 | 0 | tsk_fs_file_attr_get_idx(fs_file.get(), i); |
6152 | 0 | if (!fs_attr) |
6153 | 0 | continue; |
6154 | | |
6155 | 0 | type = hfs_attrTypeName((uint32_t) fs_attr->type); |
6156 | | |
6157 | | // We will need to do something better than this, in the end. |
6158 | | //type = "Data"; |
6159 | | |
6160 | | /* print the layout if it is non-resident and not "special" */ |
6161 | 0 | if (fs_attr->flags & TSK_FS_ATTR_NONRES) { |
6162 | | //NTFS_PRINT_ADDR print_addr; |
6163 | |
|
6164 | 0 | tsk_fprintf(hFile, |
6165 | 0 | "Type: %s (%" PRIu32 "-%" PRIu16 |
6166 | 0 | ") Name: %s Non-Resident%s%s%s size: %" |
6167 | 0 | PRIdOFF " init_size: %" PRIdOFF "\n", type, |
6168 | 0 | fs_attr->type, fs_attr->id, |
6169 | 0 | (fs_attr->name) ? fs_attr->name : "N/A", |
6170 | 0 | (fs_attr->flags & TSK_FS_ATTR_ENC) ? ", Encrypted" : |
6171 | 0 | "", |
6172 | 0 | (fs_attr->flags & TSK_FS_ATTR_COMP) ? ", Compressed" : |
6173 | 0 | "", |
6174 | 0 | (fs_attr->flags & TSK_FS_ATTR_SPARSE) ? ", Sparse" : |
6175 | 0 | "", fs_attr->size, fs_attr->nrd.initsize); |
6176 | |
|
6177 | 0 | if (istat_flags & TSK_FS_ISTAT_RUNLIST) { |
6178 | 0 | if (tsk_fs_attr_print(fs_attr, hFile)) { |
6179 | 0 | tsk_fprintf(hFile, "\nError creating run lists\n"); |
6180 | 0 | tsk_error_print(hFile); |
6181 | 0 | tsk_error_reset(); |
6182 | 0 | } |
6183 | 0 | } |
6184 | 0 | } // END: non-resident attribute case |
6185 | 0 | else { |
6186 | 0 | tsk_fprintf(hFile, |
6187 | 0 | "Type: %s (%" PRIu32 "-%" PRIu16 |
6188 | 0 | ") Name: %s Resident%s%s%s size: %" |
6189 | 0 | PRIdOFF "\n", |
6190 | 0 | type, |
6191 | 0 | fs_attr->type, |
6192 | 0 | fs_attr->id, |
6193 | 0 | (fs_attr->name) ? fs_attr->name : "N/A", |
6194 | 0 | (fs_attr->flags & TSK_FS_ATTR_ENC) ? ", Encrypted" : |
6195 | 0 | "", |
6196 | 0 | (fs_attr->flags & TSK_FS_ATTR_COMP) ? ", Compressed" : |
6197 | 0 | "", |
6198 | 0 | (fs_attr->flags & TSK_FS_ATTR_SPARSE) ? ", Sparse" : |
6199 | 0 | "", fs_attr->size); |
6200 | 0 | if (fs_attr->type == TSK_FS_ATTR_TYPE_HFS_COMP_REC) { |
6201 | 0 | if (compressionAttr == NULL) { |
6202 | 0 | compressionAttr = fs_attr; |
6203 | 0 | } |
6204 | 0 | else { |
6205 | | // Problem: there is more than one compression attribute |
6206 | 0 | error_detected(TSK_ERR_FS_CORRUPT, |
6207 | 0 | "hfs_istat: more than one compression attribute"); |
6208 | 0 | return 1; |
6209 | 0 | } |
6210 | 0 | } |
6211 | 0 | } // END: else (RESIDENT attribute case) |
6212 | 0 | } // END: for(;;) loop over attributes |
6213 | 0 | } // END: if(fs_file->meta->attr is non-NULL) |
6214 | | |
6215 | 0 | if ((entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_COMPRESSED) |
6216 | 0 | && (compressionAttr == NULL)) |
6217 | 0 | tsk_fprintf(hFile, |
6218 | 0 | "WARNING: Compression Flag is set, but there" |
6219 | 0 | " is no compression record for this file.\n"); |
6220 | 0 | if (((entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_COMPRESSED) == 0) |
6221 | 0 | && (compressionAttr != NULL)) |
6222 | 0 | tsk_fprintf(hFile, |
6223 | 0 | "WARNING: Compression Flag is NOT set, but there" |
6224 | 0 | " is a compression record for this file.\n"); |
6225 | | |
6226 | | // IF this is a compressed file |
6227 | 0 | if (compressionAttr != NULL) { |
6228 | 0 | const TSK_FS_ATTR *fs_attr = compressionAttr; |
6229 | 0 | ssize_t attrReadResult; |
6230 | 0 | DECMPFS_DISK_HEADER *cmph; |
6231 | 0 | uint32_t cmpType; |
6232 | 0 | uint64_t uncSize; |
6233 | 0 | uint64_t cmpSize = 0; |
6234 | | |
6235 | | // Read the attribute. It cannot be too large because it is stored in |
6236 | | // a btree node |
6237 | 0 | char *aBuf = (char *) tsk_malloc((size_t) fs_attr->size); |
6238 | 0 | if (aBuf == NULL) { |
6239 | 0 | error_returned("hfs_istat: space for a compression attribute"); |
6240 | 0 | return 1; |
6241 | 0 | } |
6242 | 0 | attrReadResult = tsk_fs_attr_read(fs_attr, (TSK_OFF_T) 0, |
6243 | 0 | aBuf, (size_t) fs_attr->size, |
6244 | 0 | (TSK_FS_FILE_READ_FLAG_ENUM) 0x00); |
6245 | 0 | if (attrReadResult == -1) { |
6246 | 0 | error_returned("hfs_istat: reading the compression attribute"); |
6247 | 0 | free(aBuf); |
6248 | 0 | return 1; |
6249 | 0 | } |
6250 | 0 | else if (attrReadResult < fs_attr->size) { |
6251 | 0 | error_detected(TSK_ERR_FS_READ, |
6252 | 0 | "hfs_istat: could not read the whole compression attribute"); |
6253 | 0 | free(aBuf); |
6254 | 0 | return 1; |
6255 | 0 | } |
6256 | | // Now, cast the attr into a compression header |
6257 | 0 | cmph = (DECMPFS_DISK_HEADER *) aBuf; |
6258 | 0 | cmpType = tsk_getu32(TSK_LIT_ENDIAN, cmph->compression_type); |
6259 | 0 | uncSize = tsk_getu64(TSK_LIT_ENDIAN, cmph->uncompressed_size); |
6260 | |
|
6261 | 0 | tsk_fprintf(hFile, "\nCompressed File:\n"); |
6262 | 0 | tsk_fprintf(hFile, " Uncompressed size: %llu\n", uncSize); |
6263 | |
|
6264 | 0 | switch (cmpType) { |
6265 | 0 | case DECMPFS_TYPE_ZLIB_ATTR: |
6266 | | // Data is inline |
6267 | 0 | { |
6268 | | // size of header, with indicator byte if uncompressed |
6269 | 0 | uint32_t off = (cmph->attr_bytes[0] & 0x0F) == 0x0F ? 17 : 16; |
6270 | 0 | cmpSize = fs_attr->size - off; |
6271 | |
|
6272 | 0 | tsk_fprintf(hFile, |
6273 | 0 | " Data follows compression record in the CMPF attribute\n" |
6274 | 0 | " %" PRIu64 " bytes of data at offset %u, %s compressed\n", |
6275 | 0 | cmpSize, off, off == 16 ? "zlib" : "not"); |
6276 | 0 | } |
6277 | 0 | break; |
6278 | | |
6279 | 0 | case DECMPFS_TYPE_LZVN_ATTR: |
6280 | | // Data is inline |
6281 | 0 | { |
6282 | | // size of header, with indicator byte if uncompressed |
6283 | 0 | uint32_t off = cmph->attr_bytes[0] == 0x06 ? 17 : 16; |
6284 | 0 | cmpSize = fs_attr->size - off; |
6285 | |
|
6286 | 0 | tsk_fprintf(hFile, |
6287 | 0 | " Data follows compression record in the CMPF attribute\n" |
6288 | 0 | " %" PRIu64 " bytes of data at offset %u, %s compressed\n", |
6289 | 0 | cmpSize, off, off == 16 ? "lzvn" : "not"); |
6290 | 0 | } |
6291 | 0 | break; |
6292 | | |
6293 | 0 | case DECMPFS_TYPE_ZLIB_RSRC: |
6294 | | // Data is zlib compressed in the resource fork |
6295 | 0 | tsk_fprintf(hFile, |
6296 | 0 | " Data is zlib compressed in the resource fork\n"); |
6297 | 0 | break; |
6298 | | |
6299 | 0 | case DECMPFS_TYPE_LZVN_RSRC: |
6300 | | // Data is lzvn compressed in the resource fork |
6301 | 0 | tsk_fprintf(hFile, |
6302 | 0 | " Data is lzvn compressed in the resource fork\n"); |
6303 | 0 | break; |
6304 | | |
6305 | 0 | default: |
6306 | 0 | tsk_fprintf(hFile, " Compression type is %u: UNKNOWN\n", |
6307 | 0 | cmpType); |
6308 | 0 | } |
6309 | | |
6310 | 0 | free(aBuf); |
6311 | |
|
6312 | 0 | if ((cmpType == DECMPFS_TYPE_ZLIB_RSRC || |
6313 | 0 | cmpType == DECMPFS_TYPE_LZVN_RSRC) |
6314 | 0 | && (tsk_getu64(fs->endian, entry.cat.resource.logic_sz) == 0)) |
6315 | 0 | tsk_fprintf(hFile, |
6316 | 0 | "WARNING: Compression record indicates compressed data" |
6317 | 0 | " in the RSRC Fork, but that fork is empty.\n"); |
6318 | 0 | } |
6319 | | |
6320 | | // This will return NULL if there is an error, or if there are no resources |
6321 | 0 | rd = hfs_parse_resource_fork(fs_file.get()); |
6322 | | // TODO: Should check the errnum here to see if there was an error |
6323 | |
|
6324 | 0 | if (rd != NULL) { |
6325 | 0 | tsk_fprintf(hFile, "\nResources:\n"); |
6326 | 0 | while (rd) { |
6327 | 0 | tsk_fprintf(hFile, |
6328 | 0 | " Type: %s \tID: %-5u \tOffset: %-5u \tSize: %-5u \tName: %s\n", |
6329 | 0 | rd->type, rd->id, rd->offset, rd->length, rd->name); |
6330 | 0 | rd = rd->next; |
6331 | 0 | } |
6332 | 0 | } |
6333 | | // This is OK to call with NULL |
6334 | 0 | free_res_descriptor(rd); |
6335 | |
|
6336 | 0 | return 0; |
6337 | 0 | } |
6338 | | |
6339 | | static TSK_FS_ATTR_TYPE_ENUM |
6340 | | hfs_get_default_attr_type(const TSK_FS_FILE * a_file) |
6341 | 0 | { |
6342 | | // The HFS+ special files have a default attr type of "Default" |
6343 | 0 | TSK_INUM_T inum = a_file->meta->addr; |
6344 | 0 | if (inum == 3 || // Extents File |
6345 | 0 | inum == 4 || // Catalog File |
6346 | 0 | inum == 5 || // Bad Blocks File |
6347 | 0 | inum == 6 || // Block Map (Allocation File) |
6348 | 0 | inum == 7 || // Startup File |
6349 | 0 | inum == 8 || // Attributes File |
6350 | 0 | inum == 14 || // Not sure if these two will actually work. I don't see |
6351 | 0 | inum == 15) // any code to load the attrs of these files, if they exist. |
6352 | 0 | return TSK_FS_ATTR_TYPE_DEFAULT; |
6353 | | // The "regular" files and symbolic links have a DATA fork with type "DATA" |
6354 | 0 | if (a_file->meta->type == TSK_FS_META_TYPE_REG || |
6355 | 0 | a_file->meta->type == TSK_FS_META_TYPE_LNK) |
6356 | | // This should be an HFS-specific type. |
6357 | 0 | return TSK_FS_ATTR_TYPE_HFS_DATA; |
6358 | | |
6359 | | // We've got to return *something* for every file, so we return this. |
6360 | 0 | return TSK_FS_ATTR_TYPE_DEFAULT; |
6361 | 0 | } |
6362 | | |
6363 | | static void |
6364 | | hfs_close(TSK_FS_INFO * fs) |
6365 | 0 | { |
6366 | 0 | HFS_INFO *hfs = (HFS_INFO *) fs; |
6367 | | // We'll grab this lock a bit early. |
6368 | 0 | tsk_take_lock(&(hfs->metadata_dir_cache_lock)); |
6369 | 0 | fs->tag = 0; |
6370 | |
|
6371 | 0 | free(hfs->fs); |
6372 | |
|
6373 | 0 | if (hfs->catalog_file) { |
6374 | 0 | tsk_fs_file_close(hfs->catalog_file); |
6375 | 0 | hfs->catalog_attr = NULL; |
6376 | 0 | } |
6377 | |
|
6378 | 0 | if (hfs->blockmap_file) { |
6379 | 0 | tsk_fs_file_close(hfs->blockmap_file); |
6380 | 0 | hfs->blockmap_attr = NULL; |
6381 | 0 | } |
6382 | |
|
6383 | 0 | if (hfs->meta_dir) { |
6384 | 0 | tsk_fs_dir_close(hfs->meta_dir); |
6385 | 0 | hfs->meta_dir = NULL; |
6386 | 0 | } |
6387 | |
|
6388 | 0 | if (hfs->dir_meta_dir) { |
6389 | 0 | tsk_fs_dir_close(hfs->dir_meta_dir); |
6390 | 0 | hfs->dir_meta_dir = NULL; |
6391 | 0 | } |
6392 | |
|
6393 | 0 | if (hfs->extents_file) { |
6394 | 0 | tsk_fs_file_close(hfs->extents_file); |
6395 | 0 | hfs->extents_file = NULL; |
6396 | 0 | } |
6397 | |
|
6398 | 0 | tsk_release_lock(&(hfs->metadata_dir_cache_lock)); |
6399 | 0 | tsk_deinit_lock(&(hfs->metadata_dir_cache_lock)); |
6400 | |
|
6401 | 0 | tsk_fs_free((TSK_FS_INFO *)hfs); |
6402 | 0 | } |
6403 | | |
6404 | | /* hfs_open - open an hfs file system |
6405 | | * |
6406 | | * Return NULL on error (or not an HFS or HFS+ file system) |
6407 | | * */ |
6408 | | |
6409 | | TSK_FS_INFO * |
6410 | | hfs_open( |
6411 | | TSK_IMG_INFO * img_info, |
6412 | | TSK_OFF_T offset, |
6413 | | TSK_FS_TYPE_ENUM ftype, |
6414 | | [[maybe_unused]] const char* a_pass, |
6415 | | uint8_t test) |
6416 | 0 | { |
6417 | 0 | HFS_INFO *hfs; |
6418 | 0 | unsigned int len; |
6419 | 0 | TSK_FS_INFO *fs; |
6420 | 0 | ssize_t cnt; |
6421 | 0 | TSK_INUM_T inum; // The inum (or CNID) of the metadata directories |
6422 | 0 | int8_t result; // of tsk_fs_path2inum() |
6423 | |
|
6424 | 0 | tsk_error_reset(); |
6425 | |
|
6426 | 0 | if (TSK_FS_TYPE_ISHFS(ftype) == 0) { |
6427 | 0 | tsk_error_set_errno(TSK_ERR_FS_ARG); |
6428 | 0 | tsk_error_set_errstr("Invalid FS Type in hfs_open"); |
6429 | 0 | return NULL; |
6430 | 0 | } |
6431 | | |
6432 | 0 | if ((hfs = (HFS_INFO *) tsk_fs_malloc(sizeof(HFS_INFO))) == NULL) |
6433 | 0 | return NULL; |
6434 | | |
6435 | 0 | fs = &(hfs->fs_info); |
6436 | |
|
6437 | 0 | fs->ftype = TSK_FS_TYPE_HFS; |
6438 | 0 | fs->duname = "Allocation Block"; |
6439 | 0 | fs->tag = TSK_FS_INFO_TAG; |
6440 | 0 | fs->flags = TSK_FS_INFO_FLAG_NONE; |
6441 | |
|
6442 | 0 | fs->img_info = img_info; |
6443 | 0 | fs->offset = offset; |
6444 | | |
6445 | | /* |
6446 | | * Read the superblock. |
6447 | | */ |
6448 | 0 | len = sizeof(hfs_plus_vh); |
6449 | 0 | if ((hfs->fs = (hfs_plus_vh *) tsk_malloc(len)) == NULL) { |
6450 | 0 | fs->tag = 0; |
6451 | 0 | tsk_fs_free((TSK_FS_INFO *)hfs); |
6452 | 0 | return NULL; |
6453 | 0 | } |
6454 | | |
6455 | 0 | if (hfs_checked_read_random(fs, (char *) hfs->fs, len, |
6456 | 0 | (TSK_OFF_T) HFS_VH_OFF)) { |
6457 | 0 | tsk_error_set_errstr2("hfs_open: superblock"); |
6458 | 0 | fs->tag = 0; |
6459 | 0 | free(hfs->fs); |
6460 | 0 | tsk_fs_free((TSK_FS_INFO *)hfs); |
6461 | 0 | return NULL; |
6462 | 0 | } |
6463 | | |
6464 | | /* |
6465 | | * Verify we are looking at an HFS+ image |
6466 | | */ |
6467 | 0 | if (tsk_fs_guessu16(fs, hfs->fs->signature, HFS_VH_SIG_HFSPLUS) && |
6468 | 0 | tsk_fs_guessu16(fs, hfs->fs->signature, HFS_VH_SIG_HFSX) && |
6469 | 0 | tsk_fs_guessu16(fs, hfs->fs->signature, HFS_VH_SIG_HFS)) { |
6470 | |
|
6471 | 0 | fs->tag = 0; |
6472 | 0 | free(hfs->fs); |
6473 | 0 | tsk_fs_free((TSK_FS_INFO *)hfs); |
6474 | 0 | tsk_error_set_errno(TSK_ERR_FS_MAGIC); |
6475 | 0 | tsk_error_set_errstr("not an HFS+ file system (magic)"); |
6476 | 0 | return NULL; |
6477 | 0 | } |
6478 | | |
6479 | | /* |
6480 | | * Handle an HFS-wrapped HFS+ image, which is a HFS volume that contains |
6481 | | * the HFS+ volume inside of it. |
6482 | | */ |
6483 | 0 | if (tsk_getu16(fs->endian, hfs->fs->signature) == HFS_VH_SIG_HFS) { |
6484 | |
|
6485 | 0 | hfs_mdb *wrapper_sb = (hfs_mdb *) hfs->fs; |
6486 | | |
6487 | | // Verify that we are setting a wrapper and not a normal HFS volume |
6488 | 0 | if ((tsk_getu16(fs->endian, |
6489 | 0 | wrapper_sb->drEmbedSigWord) == HFS_VH_SIG_HFSPLUS) |
6490 | 0 | || (tsk_getu16(fs->endian, |
6491 | 0 | wrapper_sb->drEmbedSigWord) == HFS_VH_SIG_HFSX)) { |
6492 | |
|
6493 | 0 | TSK_FS_INFO *fs_info2; |
6494 | | // offset in sectors to start of first HFS block |
6495 | 0 | uint16_t drAlBlSt = |
6496 | 0 | tsk_getu16(fs->endian, wrapper_sb->drAlBlSt); |
6497 | | |
6498 | | // size of each HFS block |
6499 | 0 | uint32_t drAlBlkSiz = |
6500 | 0 | tsk_getu32(fs->endian, wrapper_sb->drAlBlkSiz); |
6501 | | |
6502 | | // start of embedded FS |
6503 | 0 | uint16_t startBlock = tsk_getu16(fs->endian, |
6504 | 0 | wrapper_sb->drEmbedExtent_startBlock); |
6505 | | |
6506 | | // calculate the offset; 512 here is intentional. |
6507 | | // TN1150 says "The drAlBlSt field contains the offset, in |
6508 | | // 512-byte blocks, of the wrapper's allocation block 0 relative |
6509 | | // to the start of the volume" |
6510 | 0 | TSK_OFF_T hfsplus_offset = |
6511 | 0 | (drAlBlSt * (TSK_OFF_T) 512) + |
6512 | 0 | (drAlBlkSiz * (TSK_OFF_T) startBlock); |
6513 | |
|
6514 | 0 | if (tsk_verbose) |
6515 | 0 | tsk_fprintf(stderr, |
6516 | 0 | "hfs_open: HFS+/HFSX within HFS wrapper at byte offset %" |
6517 | 0 | PRIdOFF "\n", hfsplus_offset); |
6518 | |
|
6519 | 0 | fs->tag = 0; |
6520 | 0 | free(hfs->fs); |
6521 | 0 | tsk_fs_free((TSK_FS_INFO *)hfs); |
6522 | | |
6523 | | /* just re-open with the new offset, then record the offset */ |
6524 | 0 | if (hfsplus_offset == 0) { |
6525 | 0 | tsk_error_set_errno(TSK_ERR_FS_CORRUPT); |
6526 | 0 | tsk_error_set_errstr("HFS+ offset is zero"); |
6527 | 0 | return NULL; |
6528 | 0 | } |
6529 | 0 | fs_info2 = |
6530 | 0 | hfs_open(img_info, offset + hfsplus_offset, ftype, "", test); |
6531 | |
|
6532 | 0 | if (fs_info2) |
6533 | 0 | ((HFS_INFO *) fs_info2)->hfs_wrapper_offset = |
6534 | 0 | hfsplus_offset; |
6535 | |
|
6536 | 0 | return fs_info2; |
6537 | 0 | } |
6538 | 0 | else { |
6539 | 0 | fs->tag = 0; |
6540 | 0 | free(hfs->fs); |
6541 | 0 | tsk_fs_free((TSK_FS_INFO *)hfs); |
6542 | 0 | tsk_error_set_errno(TSK_ERR_FS_MAGIC); |
6543 | 0 | tsk_error_set_errstr |
6544 | 0 | ("HFS file systems (other than wrappers HFS+/HFSX file systems) are not supported"); |
6545 | 0 | return NULL; |
6546 | 0 | } |
6547 | 0 | } |
6548 | | |
6549 | 0 | fs->block_count = tsk_getu32(fs->endian, hfs->fs->blk_cnt); |
6550 | 0 | fs->first_block = 0; |
6551 | 0 | fs->last_block = fs->last_block_act = fs->block_count - 1; |
6552 | 0 | fs->block_size = tsk_getu32(fs->endian, hfs->fs->blk_sz); |
6553 | 0 | fs->dev_bsize = fs->img_info->sector_size; |
6554 | | |
6555 | | // determine the last block we have in this image |
6556 | 0 | if (fs->block_size <= 1) { |
6557 | 0 | fs->tag = 0; |
6558 | 0 | free(hfs->fs); |
6559 | 0 | tsk_fs_free((TSK_FS_INFO *)hfs); |
6560 | 0 | tsk_error_set_errno(TSK_ERR_FS_CORRUPT); |
6561 | 0 | tsk_error_set_errstr("HFS+ allocation block size too small"); |
6562 | 0 | return NULL; |
6563 | 0 | } |
6564 | 0 | if ((TSK_DADDR_T) ((img_info->size - offset) / fs->block_size) < |
6565 | 0 | fs->block_count) |
6566 | 0 | fs->last_block_act = |
6567 | 0 | (img_info->size - offset) / fs->block_size - 1; |
6568 | | |
6569 | | // Initialize the lock |
6570 | 0 | tsk_init_lock(&(hfs->metadata_dir_cache_lock)); |
6571 | | |
6572 | | /* |
6573 | | * Set function pointers |
6574 | | */ |
6575 | 0 | fs->inode_walk = hfs_inode_walk; |
6576 | 0 | fs->block_walk = hfs_block_walk; |
6577 | 0 | fs->block_getflags = hfs_block_getflags; |
6578 | 0 | fs->load_attrs = hfs_load_attrs; |
6579 | 0 | fs->get_default_attr_type = hfs_get_default_attr_type; |
6580 | |
|
6581 | 0 | fs->file_add_meta = hfs_inode_lookup; |
6582 | 0 | fs->dir_open_meta = hfs_dir_open_meta; |
6583 | 0 | fs->fsstat = hfs_fsstat; |
6584 | 0 | fs->fscheck = hfs_fscheck; |
6585 | 0 | fs->istat = hfs_istat; |
6586 | 0 | fs->close = hfs_close; |
6587 | | |
6588 | | // lazy loading of block map |
6589 | 0 | hfs->blockmap_file = NULL; |
6590 | 0 | hfs->blockmap_attr = NULL; |
6591 | 0 | hfs->blockmap_cache_start = -1; |
6592 | 0 | hfs->blockmap_cache_len = 0; |
6593 | |
|
6594 | 0 | fs->first_inum = HFS_ROOT_INUM; |
6595 | 0 | fs->root_inum = HFS_ROOT_INUM; |
6596 | 0 | fs->last_inum = HFS_FIRST_USER_CNID - 1; // we will later increase this |
6597 | 0 | fs->inum_count = fs->last_inum - fs->first_inum + 1; |
6598 | | |
6599 | | /* We will load the extents file data when we need it */ |
6600 | 0 | hfs->extents_file = NULL; |
6601 | 0 | hfs->extents_attr = NULL; |
6602 | |
|
6603 | 0 | if (tsk_getu32(fs->endian, |
6604 | 0 | hfs->fs->start_file.extents[0].blk_cnt) == 0) { |
6605 | 0 | if (tsk_verbose) |
6606 | 0 | tsk_fprintf(stderr, |
6607 | 0 | "hfs_open: Optional Startup File is not present.\n"); |
6608 | 0 | hfs->has_startup_file = FALSE; |
6609 | 0 | } |
6610 | 0 | else { |
6611 | 0 | if (tsk_verbose) |
6612 | 0 | tsk_fprintf(stderr, "hfs_open: Startup File is present.\n"); |
6613 | 0 | hfs->has_startup_file = TRUE; |
6614 | 0 | } |
6615 | |
|
6616 | 0 | if (tsk_getu32(fs->endian, hfs->fs->ext_file.extents[0].blk_cnt) == 0) { |
6617 | 0 | if (tsk_verbose) |
6618 | 0 | tsk_fprintf(stderr, |
6619 | 0 | "hfs_open: Optional Extents File (and Badblocks File) is not present.\n"); |
6620 | 0 | hfs->has_extents_file = FALSE; |
6621 | 0 | } |
6622 | 0 | else { |
6623 | 0 | if (tsk_verbose) |
6624 | 0 | tsk_fprintf(stderr, |
6625 | 0 | "hfs_open: Extents File (and BadBlocks File) is present.\n"); |
6626 | 0 | hfs->has_extents_file = TRUE; |
6627 | 0 | } |
6628 | |
|
6629 | 0 | if (tsk_getu32(fs->endian, hfs->fs->attr_file.extents[0].blk_cnt) == 0) { |
6630 | 0 | if (tsk_verbose) |
6631 | 0 | tsk_fprintf(stderr, |
6632 | 0 | "hfs_open: Optional Attributes File is not present.\n"); |
6633 | 0 | hfs->has_attributes_file = FALSE; |
6634 | 0 | } |
6635 | 0 | else { |
6636 | 0 | if (tsk_verbose) |
6637 | 0 | tsk_fprintf(stderr, "hfs_open: Attributes File is present.\n"); |
6638 | 0 | hfs->has_attributes_file = TRUE; |
6639 | 0 | } |
6640 | | |
6641 | | /* Load the catalog file though */ |
6642 | 0 | if ((hfs->catalog_file = |
6643 | 0 | tsk_fs_file_open_meta(fs, NULL, |
6644 | 0 | HFS_CATALOG_FILE_ID)) == NULL) { |
6645 | 0 | hfs_close(fs); |
6646 | 0 | return NULL; |
6647 | 0 | } |
6648 | | |
6649 | | /* cache the data attribute */ |
6650 | 0 | hfs->catalog_attr = |
6651 | 0 | tsk_fs_attrlist_get(hfs->catalog_file->meta->attr, |
6652 | 0 | TSK_FS_ATTR_TYPE_DEFAULT); |
6653 | 0 | if (!hfs->catalog_attr) { |
6654 | 0 | hfs_close(fs); |
6655 | 0 | tsk_error_errstr2_concat |
6656 | 0 | (" - Data Attribute not found in Catalog File"); |
6657 | 0 | return NULL; |
6658 | 0 | } |
6659 | | |
6660 | | // cache the catalog file header |
6661 | 0 | cnt = tsk_fs_attr_read(hfs->catalog_attr, 14, |
6662 | 0 | (char *) &(hfs->catalog_header), |
6663 | 0 | sizeof(hfs_btree_header_record), TSK_FS_FILE_READ_FLAG_NONE); |
6664 | 0 | if (cnt != sizeof(hfs_btree_header_record)) { |
6665 | 0 | if (cnt >= 0) { |
6666 | 0 | tsk_error_reset(); |
6667 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
6668 | 0 | } |
6669 | 0 | hfs_close(fs); |
6670 | 0 | tsk_error_set_errstr2("hfs_open: Error reading catalog header"); |
6671 | 0 | return NULL; |
6672 | 0 | } |
6673 | | |
6674 | 0 | if (tsk_getu16(fs->endian, hfs->fs->version) == HFS_VH_VER_HFSPLUS) |
6675 | 0 | hfs->is_case_sensitive = 0; |
6676 | 0 | else if (tsk_getu16(fs->endian, hfs->fs->version) == HFS_VH_VER_HFSX) { |
6677 | 0 | if (hfs->catalog_header.compType == HFS_BT_HEAD_COMP_SENS) |
6678 | 0 | hfs->is_case_sensitive = 1; |
6679 | 0 | else if (hfs->catalog_header.compType == HFS_BT_HEAD_COMP_INSENS) |
6680 | 0 | hfs->is_case_sensitive = 0; |
6681 | 0 | else { |
6682 | 0 | if (tsk_verbose) |
6683 | 0 | tsk_fprintf(stderr, |
6684 | 0 | "hfs_open: invalid value (0x%02" PRIx8 |
6685 | 0 | ") for key compare type; using case-insensitive\n", |
6686 | 0 | hfs->catalog_header.compType); |
6687 | 0 | hfs->is_case_sensitive = 0; |
6688 | 0 | } |
6689 | 0 | } |
6690 | 0 | else { |
6691 | 0 | if (tsk_verbose) |
6692 | 0 | tsk_fprintf(stderr, |
6693 | 0 | "hfs_open: unknown HFS+/HFSX version (%" PRIu16 "\n", |
6694 | 0 | tsk_getu16(fs->endian, hfs->fs->version)); |
6695 | 0 | hfs->is_case_sensitive = 0; |
6696 | 0 | } |
6697 | | |
6698 | | // update the numbers. |
6699 | 0 | fs->last_inum = hfs_find_highest_inum(hfs); |
6700 | 0 | fs->inum_count = fs->last_inum + 1; |
6701 | |
|
6702 | 0 | snprintf((char *) fs->fs_id, 17, "%08" PRIx32 "%08" PRIx32, |
6703 | 0 | tsk_getu32(fs->endian, hfs->fs->finder_info[HFS_VH_FI_ID1]), |
6704 | 0 | tsk_getu32(fs->endian, hfs->fs->finder_info[HFS_VH_FI_ID2])); |
6705 | 0 | fs->fs_id_used = 16; |
6706 | | |
6707 | | /* journal */ |
6708 | 0 | fs->jblk_walk = hfs_jblk_walk; |
6709 | 0 | fs->jentry_walk = hfs_jentry_walk; |
6710 | 0 | fs->jopen = hfs_jopen; |
6711 | 0 | fs->name_cmp = hfs_name_cmp; |
6712 | 0 | fs->journ_inum = 0; |
6713 | | |
6714 | | /* Creation Times */ |
6715 | | |
6716 | | // First, the root |
6717 | 0 | { |
6718 | 0 | std::unique_ptr<TSK_FS_FILE, decltype(&tsk_fs_file_close)> file{ |
6719 | 0 | tsk_fs_file_open_meta(fs, NULL, 2), |
6720 | 0 | tsk_fs_file_close |
6721 | 0 | }; |
6722 | |
|
6723 | 0 | if (file) { |
6724 | 0 | hfs->root_crtime = file->meta->crtime; |
6725 | 0 | hfs->has_root_crtime = TRUE; |
6726 | 0 | } |
6727 | 0 | else { |
6728 | 0 | hfs->has_root_crtime = FALSE; |
6729 | 0 | } |
6730 | 0 | } |
6731 | | |
6732 | | // disable hard link traversal while finding the hard |
6733 | | // link directories themselves (to prevent problems if |
6734 | | // there are hard links in the root directory) |
6735 | 0 | hfs->meta_inum = 0; |
6736 | 0 | hfs->meta_dir_inum = 0; |
6737 | | |
6738 | | // Now the (file) metadata directory |
6739 | | |
6740 | | // The metadata directory is a sub-directory of the root. Its name begins with four nulls, followed |
6741 | | // by "HFS+ Private Data". The file system parsing code replaces nulls in filenames with UTF8_NULL_REPLACE. |
6742 | | // In the released version of TSK, this replacement is the character '^'. |
6743 | | // NOTE: There is a standard Unicode replacement which is 0xfffd in UTF16 and 0xEF 0xBF 0xBD in UTF8. |
6744 | | // Systems that require the standard definition can redefine UTF8_NULL_REPLACE and UTF16_NULL_REPLACE |
6745 | | // in tsk_hfs.h |
6746 | 0 | hfs->has_meta_crtime = FALSE; |
6747 | 0 | result = |
6748 | 0 | tsk_fs_path2inum(fs, |
6749 | 0 | "/" UTF8_NULL_REPLACE UTF8_NULL_REPLACE UTF8_NULL_REPLACE |
6750 | 0 | UTF8_NULL_REPLACE "HFS+ Private Data", &inum, NULL); |
6751 | 0 | if (result == 0) { |
6752 | 0 | std::unique_ptr<TSK_FS_FILE, decltype(&tsk_fs_file_close)> file_tmp{ |
6753 | 0 | tsk_fs_file_open_meta(fs, NULL, inum), |
6754 | 0 | tsk_fs_file_close |
6755 | 0 | }; |
6756 | |
|
6757 | 0 | if (file_tmp) { |
6758 | 0 | hfs->meta_crtime = file_tmp->meta->crtime; |
6759 | 0 | hfs->has_meta_crtime = TRUE; |
6760 | 0 | hfs->meta_inum = inum; |
6761 | 0 | } |
6762 | 0 | } |
6763 | | |
6764 | | // Now, the directory metadata directory |
6765 | | |
6766 | | // The "directory" metadata directory, where hardlinked directories actually live, is a subdirectory |
6767 | | // of the root. The beginning of the name of this directory is ".HFS+ Private Directory Data" which |
6768 | | // is followed by a carriage return (ASCII 13). |
6769 | 0 | hfs->has_meta_dir_crtime = FALSE; |
6770 | 0 | result = |
6771 | 0 | tsk_fs_path2inum(fs, "/.HFS+ Private Directory Data\r", &inum, |
6772 | 0 | NULL); |
6773 | 0 | if (result == 0) { |
6774 | 0 | std::unique_ptr<TSK_FS_FILE, decltype(&tsk_fs_file_close)> file_tmp{ |
6775 | 0 | tsk_fs_file_open_meta(fs, NULL, inum), |
6776 | 0 | tsk_fs_file_close |
6777 | 0 | }; |
6778 | |
|
6779 | 0 | if (file_tmp) { |
6780 | 0 | hfs->metadir_crtime = file_tmp->meta->crtime; |
6781 | 0 | hfs->has_meta_dir_crtime = TRUE; |
6782 | 0 | hfs->meta_dir_inum = inum; |
6783 | 0 | } |
6784 | 0 | } |
6785 | |
|
6786 | 0 | if (hfs->has_root_crtime && hfs->has_meta_crtime |
6787 | 0 | && hfs->has_meta_dir_crtime) { |
6788 | 0 | if (tsk_verbose) |
6789 | 0 | tsk_fprintf(stderr, |
6790 | 0 | "hfs_open: Creation times for key folders have been read and cached.\n"); |
6791 | 0 | } |
6792 | 0 | if (!hfs->has_root_crtime) { |
6793 | 0 | if (tsk_verbose) |
6794 | 0 | tsk_fprintf(stderr, |
6795 | 0 | "hfs_open: Warning: Could not open the root directory. " |
6796 | 0 | "Hard link detection and some other functions will be impaired\n"); |
6797 | 0 | } |
6798 | 0 | else if (tsk_verbose) { |
6799 | 0 | tsk_fprintf(stderr, |
6800 | 0 | "hfs_open: The root directory is accessible.\n"); |
6801 | 0 | } |
6802 | |
|
6803 | 0 | if (tsk_verbose) { |
6804 | 0 | if (hfs->has_meta_crtime) |
6805 | 0 | tsk_fprintf(stderr, |
6806 | 0 | "hfs_open: \"/^^^^HFS+ Private Data\" metadata folder is accessible.\n"); |
6807 | 0 | else |
6808 | 0 | tsk_fprintf(stderr, |
6809 | 0 | "hfs_open: Optional \"^^^^HFS+ Private Data\" metadata folder is not accessible, or does not exist.\n"); |
6810 | 0 | if (hfs->has_meta_dir_crtime) |
6811 | 0 | tsk_fprintf(stderr, |
6812 | 0 | "hfs_open: \"/HFS+ Private Directory Data^\" metadata folder is accessible.\n"); |
6813 | 0 | else |
6814 | 0 | tsk_fprintf(stderr, |
6815 | 0 | "hfs_open: Optional \"/HFS+ Private Directory Data^\" metadata folder is not accessible, or does not exist.\n"); |
6816 | 0 | } |
6817 | | |
6818 | | // These caches will be set, if they are needed. |
6819 | 0 | hfs->meta_dir = NULL; |
6820 | 0 | hfs->dir_meta_dir = NULL; |
6821 | |
|
6822 | 0 | return fs; |
6823 | 0 | } |
6824 | | |
6825 | | |
6826 | | /* |
6827 | | * Error Handling |
6828 | | */ |
6829 | | |
6830 | | /** |
6831 | | * Call this when an error is first detected. It sets the error code and it also |
6832 | | * sets the primary error string, describing the lowest level of error. (Actually, |
6833 | | * it appends to the error string.) |
6834 | | * |
6835 | | * If the error code is already set, then this appends to the primary error |
6836 | | * string an hex representation of the new error code, plus the new error message. |
6837 | | * |
6838 | | * @param errnum The desired error code |
6839 | | * @param errstr The format string for the error message |
6840 | | */ |
6841 | | void |
6842 | | error_detected(uint32_t errnum, const char *errstr, ...) |
6843 | 0 | { |
6844 | 0 | va_list args; |
6845 | |
|
6846 | 0 | va_start(args, errstr); |
6847 | |
|
6848 | 0 | { |
6849 | 0 | TSK_ERROR_INFO *errInfo = tsk_error_get_info(); |
6850 | 0 | char *loc_errstr = errInfo->errstr; |
6851 | |
|
6852 | 0 | if (errInfo->t_errno == 0) |
6853 | 0 | errInfo->t_errno = errnum; |
6854 | 0 | else { |
6855 | | //This should not happen! We don't want to wipe out the existing error |
6856 | | //code, so we write the new code into the error string, in hex. |
6857 | 0 | size_t sl = strlen(errstr); |
6858 | 0 | snprintf(loc_errstr + sl, TSK_ERROR_STRING_MAX_LENGTH - sl, |
6859 | 0 | " Next errnum: 0x%x ", errnum); |
6860 | 0 | } |
6861 | 0 | if (errstr != NULL) { |
6862 | 0 | size_t sl = strlen(loc_errstr); |
6863 | 0 | vsnprintf(loc_errstr + sl, TSK_ERROR_STRING_MAX_LENGTH - sl, |
6864 | 0 | errstr, args); |
6865 | 0 | } |
6866 | 0 | } |
6867 | |
|
6868 | 0 | va_end(args); |
6869 | |
|
6870 | 0 | } |
6871 | | |
6872 | | /** |
6873 | | * Call this when a called TSK function returns an error. Presumably, that |
6874 | | * function will have set the error code and the primary error string. This |
6875 | | * *appends* to the secondary error string. It should be called to describe |
6876 | | * the context of the call. If no error code has been set, then this sets a |
6877 | | * default code so that it is not zero. |
6878 | | * |
6879 | | * @param errstr The format string for the error message |
6880 | | */ |
6881 | | void |
6882 | | error_returned(const char *errstr, ...) |
6883 | 0 | { |
6884 | 0 | va_list args; |
6885 | 0 | va_start(args, errstr); |
6886 | |
|
6887 | 0 | { |
6888 | 0 | TSK_ERROR_INFO *errInfo = tsk_error_get_info(); |
6889 | 0 | char *loc_errstr2 = errInfo->errstr2; |
6890 | |
|
6891 | 0 | if (errInfo->t_errno == 0) |
6892 | 0 | errInfo->t_errno = TSK_ERR_AUX_GENERIC; |
6893 | 0 | if (errstr != NULL) { |
6894 | 0 | size_t sl = strlen(loc_errstr2); |
6895 | 0 | vsnprintf(loc_errstr2 + sl, TSK_ERROR_STRING_MAX_LENGTH - sl, |
6896 | 0 | errstr, args); |
6897 | 0 | } |
6898 | 0 | } |
6899 | 0 | va_end(args); |
6900 | 0 | } |