/src/sleuthkit/tsk/fs/hfs.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | ** The Sleuth Kit |
3 | | ** |
4 | | ** This software is subject to the IBM Public License ver. 1.0, |
5 | | ** which was displayed prior to download and is included in the readme.txt |
6 | | ** file accompanying the Sleuth Kit files. It may also be requested from: |
7 | | ** Crucial Security Inc. |
8 | | ** 14900 Conference Center Drive |
9 | | ** Chantilly, VA 20151 |
10 | | ** |
11 | | |
12 | | ** Copyright (c) 2009 Brian Carrier. All rights reserved. |
13 | | ** |
14 | | ** Judson Powers [jpowers@atc-nycorp.com] |
15 | | ** Matt Stillerman [matt@atc-nycorp.com] |
16 | | ** Rob Joyce [rob@atc-nycorp.com] |
17 | | ** Copyright (c) 2008, 2012 ATC-NY. All rights reserved. |
18 | | ** This file contains data developed with support from the National |
19 | | ** Institute of Justice, Office of Justice Programs, U.S. Department of Justice. |
20 | | ** |
21 | | ** Wyatt Banks [wbanks@crucialsecurity.com] |
22 | | ** Copyright (c) 2005 Crucial Security Inc. All rights reserved. |
23 | | ** |
24 | | ** Brian Carrier [carrier@sleuthkit.org] |
25 | | ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved |
26 | | ** |
27 | | ** Copyright (c) 1997,1998,1999, International Business Machines |
28 | | ** Corporation and others. All Rights Reserved. |
29 | | */ |
30 | | |
31 | | /* TCT |
32 | | * LICENSE |
33 | | * This software is distributed under the IBM Public License. |
34 | | * AUTHOR(S) |
35 | | * Wietse Venema |
36 | | * IBM T.J. Watson Research |
37 | | * P.O. Box 704 |
38 | | * Yorktown Heights, NY 10598, USA |
39 | | --*/ |
40 | | |
41 | | /* |
42 | | ** You may distribute the Sleuth Kit, or other software that incorporates |
43 | | ** part of all of the Sleuth Kit, in object code form under a license agreement, |
44 | | ** provided that: |
45 | | ** a) you comply with the terms and conditions of the IBM Public License |
46 | | ** ver 1.0; and |
47 | | ** b) the license agreement |
48 | | ** i) effectively disclaims on behalf of all Contributors all warranties |
49 | | ** and conditions, express and implied, including warranties or |
50 | | ** conditions of title and non-infringement, and implied warranties |
51 | | ** or conditions of merchantability and fitness for a particular |
52 | | ** purpose. |
53 | | ** ii) effectively excludes on behalf of all Contributors liability for |
54 | | ** damages, including direct, indirect, special, incidental and |
55 | | ** consequential damages such as lost profits. |
56 | | ** iii) states that any provisions which differ from IBM Public License |
57 | | ** ver. 1.0 are offered by that Contributor alone and not by any |
58 | | ** other party; and |
59 | | ** iv) states that the source code for the program is available from you, |
60 | | ** and informs licensees how to obtain it in a reasonable manner on or |
61 | | ** through a medium customarily used for software exchange. |
62 | | ** |
63 | | ** When the Sleuth Kit or other software that incorporates part or all of |
64 | | ** the Sleuth Kit is made available in source code form: |
65 | | ** a) it must be made available under IBM Public License ver. 1.0; and |
66 | | ** b) a copy of the IBM Public License ver. 1.0 must be included with |
67 | | ** each copy of the program. |
68 | | */ |
69 | | |
70 | | /** \file hfs.c |
71 | | * Contains the general internal TSK HFS metadata and data unit code |
72 | | */ |
73 | | |
74 | | #include "tsk_fs_i.h" |
75 | | #include "tsk_hfs.h" |
76 | | #include "decmpfs.h" |
77 | | |
78 | | #include <stdarg.h> |
79 | | #ifdef TSK_WIN32 |
80 | | #include <string.h> |
81 | | #else |
82 | | #include <strings.h> |
83 | | #endif |
84 | | |
85 | 0 | #define XSWAP(a,b) { a ^= b; b ^= a; a ^= b; } |
86 | | |
87 | | // Compression Stuff |
88 | | |
89 | | #ifdef HAVE_LIBZ |
90 | | #include <zlib.h> |
91 | | #endif |
92 | | |
93 | | #include "lzvn.h" |
94 | | |
95 | | // Forward declarations: |
96 | | static uint8_t hfs_load_attrs(TSK_FS_FILE * fs_file); |
97 | | static uint8_t hfs_load_extended_attrs(TSK_FS_FILE * file, |
98 | | unsigned char *isCompressed, unsigned char *cmpType, |
99 | | uint64_t * uncSize); |
100 | | void error_detected(uint32_t errnum, const char *errstr, ...); |
101 | | void error_returned(const char *errstr, ...); |
102 | | |
103 | | /* may set error up to string 1 |
104 | | * returns 0 on success, 1 on failure */ |
105 | | uint8_t |
106 | | hfs_checked_read_random(TSK_FS_INFO * fs, char *buf, size_t len, |
107 | | TSK_OFF_T offs) |
108 | 2 | { |
109 | 2 | ssize_t r; |
110 | | |
111 | 2 | r = tsk_fs_read(fs, offs, buf, len); |
112 | 2 | if (r != (ssize_t) len) { |
113 | 0 | if (r >= 0) { |
114 | 0 | tsk_error_reset(); |
115 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
116 | 0 | } |
117 | 0 | return 1; |
118 | 0 | } |
119 | 2 | return 0; |
120 | 2 | } |
121 | | |
122 | | /********************************************************************** |
123 | | * |
124 | | * MISC FUNCS |
125 | | * |
126 | | **********************************************************************/ |
127 | | |
128 | | /* convert the HFS Time (seconds from 1/1/1904) |
129 | | * to UNIX (UTC seconds from 1/1/1970) |
130 | | * The number is borrowed from linux HFS driver source |
131 | | */ |
132 | | uint32_t |
133 | | hfs_convert_2_unix_time(uint32_t hfsdate) |
134 | 0 | { |
135 | 0 | if (hfsdate < NSEC_BTWN_1904_1970) |
136 | 0 | return 0; |
137 | 0 | return (uint32_t) (hfsdate - NSEC_BTWN_1904_1970); |
138 | 0 | } |
139 | | |
140 | | |
141 | | /** |
142 | | * Convert a cnid (metadata address) to big endian array. |
143 | | * This is used to create the key for tree lookups. |
144 | | * @param cnid Metadata address to convert |
145 | | * @param array [out] Array to write data into. |
146 | | */ |
147 | | static void |
148 | | cnid_to_array(uint32_t cnid, uint8_t array[4]) |
149 | 0 | { |
150 | 0 | array[3] = (cnid >> 0) & 0xff; |
151 | 0 | array[2] = (cnid >> 8) & 0xff; |
152 | 0 | array[1] = (cnid >> 16) & 0xff; |
153 | 0 | array[0] = (cnid >> 24) & 0xff; |
154 | 0 | } |
155 | | |
156 | | /********************************************************************** |
157 | | * |
158 | | * Lookup Functions |
159 | | * |
160 | | **********************************************************************/ |
161 | | |
162 | | |
163 | | |
164 | | /* Compares the given HFS+ Extents B-tree key to key constructed |
165 | | * for finding the beginning of the data fork extents for the given |
166 | | * CNID. (That is, the search key uses the given CNID and has |
167 | | * fork = 0 and start_block = 0.) |
168 | | */ |
169 | | static int |
170 | | hfs_ext_compare_keys(HFS_INFO * hfs, uint32_t cnid, |
171 | | const hfs_btree_key_ext * key) |
172 | 0 | { |
173 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); |
174 | 0 | uint32_t key_cnid; |
175 | |
|
176 | 0 | key_cnid = tsk_getu32(fs->endian, key->file_id); |
177 | 0 | if (key_cnid < cnid) |
178 | 0 | return -1; |
179 | 0 | if (key_cnid > cnid) |
180 | 0 | return 1; |
181 | | |
182 | | /* referring to the same cnids */ |
183 | | |
184 | | /* we are always looking for the data fork */ |
185 | 0 | if (key->fork_type != HFS_EXT_KEY_TYPE_DATA) |
186 | 0 | return 1; |
187 | | |
188 | | /* we are always looking for a start_block of zero |
189 | | (interested in the beginning of the extents, regardless |
190 | | of what the start_block is); all files except the bad |
191 | | blocks file should have a start_block greater than |
192 | | zero */ |
193 | 0 | if (tsk_getu32(fs->endian, key->start_block) == 0) |
194 | 0 | return 0; |
195 | 0 | return 1; |
196 | 0 | } |
197 | | |
198 | | |
199 | | /** \internal |
200 | | * Returns the length of an HFS+ B-tree INDEX key based on the tree header |
201 | | * structure and the length claimed in the record. With some trees, |
202 | | * the length given in the record is not used. |
203 | | * Note that this neither detects nor correctly handles 8-bit keys |
204 | | * (which should not be present in HFS+). |
205 | | * |
206 | | * This does not give the right answer for the Attributes File B-tree, for some |
207 | | * HFS+ file systems produced by the Apple OS, while it works for others. For |
208 | | * the Attributes file, INDEX keys should always be as stated in the record itself, |
209 | | * never the "maxKeyLen" of the B-tree header. |
210 | | * |
211 | | * In this software, this function is only invoked when dealing with the Extents file. In |
212 | | * that usage, it is not sufficiently well tested to know if it always gives the right |
213 | | * answer or not. We can only test that with a highly fragmented disk. |
214 | | * @param hfs File System |
215 | | * @param keylen Length of key as given in record |
216 | | * @param header Tree header |
217 | | * @returns Length of key |
218 | | */ |
219 | | uint16_t |
220 | | hfs_get_idxkeylen(HFS_INFO * hfs, uint16_t keylen, |
221 | | const hfs_btree_header_record * header) |
222 | 0 | { |
223 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); |
224 | | |
225 | | // if the flag is set, use the length given in the record |
226 | 0 | if (tsk_getu32(fs->endian, header->attr) & HFS_BT_HEAD_ATTR_VARIDXKEYS) |
227 | 0 | return keylen; |
228 | 0 | else |
229 | 0 | return tsk_getu16(fs->endian, header->maxKeyLen); |
230 | 0 | } |
231 | | |
232 | | |
233 | | /** |
234 | | * Convert the extents runs to TSK_FS_ATTR_RUN runs. |
235 | | * |
236 | | * @param a_fs File system to analyze |
237 | | * @param a_extents Raw extents to process (in an array of 8) |
238 | | * @param a_start_off Starting block offset of these runs |
239 | | * @returns NULL on error or if no runs are in extents (test tsk_errno) |
240 | | */ |
241 | | static TSK_FS_ATTR_RUN * |
242 | | hfs_extents_to_attr(TSK_FS_INFO * a_fs, const hfs_ext_desc * a_extents, |
243 | | TSK_OFF_T a_start_off) |
244 | 2 | { |
245 | 2 | TSK_FS_ATTR_RUN *head_run = NULL; |
246 | 2 | TSK_FS_ATTR_RUN *prev_run = NULL; |
247 | 2 | int i; |
248 | 2 | TSK_OFF_T cur_off = a_start_off; |
249 | | |
250 | | // since tsk_errno is checked as a return value, make sure it is clean. |
251 | 2 | tsk_error_reset(); |
252 | | |
253 | 2 | if (tsk_verbose) |
254 | 0 | tsk_fprintf(stderr, |
255 | 0 | "hfs_extents_to_attr: Converting extents from offset %" PRIdOFF |
256 | 0 | " to runlist\n", a_start_off); |
257 | | |
258 | 3 | for (i = 0; i < 8; ++i) { |
259 | 3 | TSK_FS_ATTR_RUN *cur_run; |
260 | | |
261 | 3 | uint32_t addr = tsk_getu32(a_fs->endian, a_extents[i].start_blk); |
262 | 3 | uint32_t len = tsk_getu32(a_fs->endian, a_extents[i].blk_cnt); |
263 | | |
264 | 3 | if (tsk_verbose) |
265 | 0 | tsk_fprintf(stderr, |
266 | 0 | "hfs_extents_to_attr: run %i at addr %" PRIu32 |
267 | 0 | " with len %" PRIu32 "\n", i, addr, len); |
268 | | |
269 | 3 | if ((addr == 0) && (len == 0)) { |
270 | 2 | break; |
271 | 2 | } |
272 | | |
273 | | // make a non-resident run |
274 | 1 | if ((cur_run = tsk_fs_attr_run_alloc()) == NULL) { |
275 | 0 | error_returned(" - hfs_extents_to_attr"); |
276 | 0 | return NULL; |
277 | 0 | } |
278 | | |
279 | 1 | cur_run->addr = addr; |
280 | 1 | cur_run->len = len; |
281 | 1 | cur_run->offset = cur_off; |
282 | | |
283 | 1 | if (head_run == NULL) |
284 | 1 | head_run = cur_run; |
285 | 1 | if (prev_run != NULL) |
286 | 0 | prev_run->next = cur_run; |
287 | 1 | cur_off += cur_run->len; |
288 | 1 | prev_run = cur_run; |
289 | 1 | } |
290 | | |
291 | 2 | return head_run; |
292 | 2 | } |
293 | | |
294 | | |
295 | | /** |
296 | | * Look in the extents catalog for entries for a given file. Add the runs |
297 | | * to the passed attribute structure. |
298 | | * |
299 | | * @param hfs File system being analyzed |
300 | | * @param cnid file id of file to search for |
301 | | * @param a_attr Attribute to add extents runs to |
302 | | * @param dataForkQ if true, then find extents for the data fork. If false, then find extents for the Resource fork. |
303 | | * @returns 1 on error and 0 on success |
304 | | */ |
305 | | static uint8_t |
306 | | hfs_ext_find_extent_record_attr(HFS_INFO * hfs, uint32_t cnid, |
307 | | TSK_FS_ATTR * a_attr, unsigned char dataForkQ) |
308 | 1 | { |
309 | 1 | TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); |
310 | 1 | uint16_t nodesize; /* size of nodes (all, regardless of the name) */ |
311 | 1 | uint32_t cur_node; /* node id of the current node */ |
312 | 1 | char *node = NULL; |
313 | 1 | uint8_t is_done; |
314 | 1 | uint8_t desiredType; |
315 | | |
316 | 1 | tsk_error_reset(); |
317 | | |
318 | 1 | if (tsk_verbose) |
319 | 0 | tsk_fprintf(stderr, |
320 | 0 | "hfs_ext_find_extent_record_attr: Looking for extents for file %" |
321 | 0 | PRIu32 " %s\n", cnid, |
322 | 0 | dataForkQ ? "data fork" : "resource fork"); |
323 | | |
324 | 1 | if (!hfs->has_extents_file) { |
325 | | // No extents file (which is optional), and so, no further extents are possible. |
326 | 0 | return 0; |
327 | 0 | } |
328 | | |
329 | | // Are we looking for extents of the data fork or the resource fork? |
330 | 1 | desiredType = |
331 | 1 | dataForkQ ? HFS_EXT_KEY_TYPE_DATA : HFS_EXT_KEY_TYPE_RSRC; |
332 | | |
333 | | // Load the extents attribute, if it has not been done so yet. |
334 | 1 | if (hfs->extents_file == NULL) { |
335 | 1 | ssize_t cnt; |
336 | | |
337 | 1 | if ((hfs->extents_file = |
338 | 1 | tsk_fs_file_open_meta(fs, NULL, |
339 | 1 | HFS_EXTENTS_FILE_ID)) == NULL) { |
340 | 0 | return 1; |
341 | 0 | } |
342 | | |
343 | | /* cache the data attribute */ |
344 | 1 | hfs->extents_attr = |
345 | 1 | tsk_fs_attrlist_get(hfs->extents_file->meta->attr, |
346 | 1 | TSK_FS_ATTR_TYPE_DEFAULT); |
347 | 1 | if (!hfs->extents_attr) { |
348 | 0 | tsk_error_errstr2_concat |
349 | 0 | (" - Default Attribute not found in Extents File"); |
350 | 0 | return 1; |
351 | 0 | } |
352 | | |
353 | | // cache the extents file header |
354 | 1 | cnt = tsk_fs_attr_read(hfs->extents_attr, 14, |
355 | 1 | (char *) &(hfs->extents_header), |
356 | 1 | sizeof(hfs_btree_header_record), 0); |
357 | 1 | if (cnt != sizeof(hfs_btree_header_record)) { |
358 | 1 | if (cnt >= 0) { |
359 | 0 | tsk_error_reset(); |
360 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
361 | 0 | } |
362 | 1 | tsk_error_set_errstr2 |
363 | 1 | ("hfs_ext_find_extent_record_attr: Error reading header"); |
364 | 1 | return 1; |
365 | 1 | } |
366 | 1 | } |
367 | | |
368 | | // allocate a node buffer |
369 | 0 | nodesize = tsk_getu16(fs->endian, hfs->extents_header.nodesize); |
370 | 0 | if ((node = (char *) tsk_malloc(nodesize)) == NULL) { |
371 | 0 | return 1; |
372 | 0 | } |
373 | | |
374 | | /* start at root node */ |
375 | 0 | cur_node = tsk_getu32(fs->endian, hfs->extents_header.rootNode); |
376 | | |
377 | | /* if the root node is zero, then the extents btree is empty */ |
378 | | /* if no files have overflow extents, the Extents B-tree still |
379 | | exists on disk, but is an empty B-tree containing only |
380 | | the header node */ |
381 | 0 | if (cur_node == 0) { |
382 | 0 | if (tsk_verbose) |
383 | 0 | tsk_fprintf(stderr, "hfs_ext_find_extent_record: " |
384 | 0 | "empty extents btree\n"); |
385 | 0 | free(node); |
386 | 0 | return 0; |
387 | 0 | } |
388 | | |
389 | 0 | if (tsk_verbose) |
390 | 0 | tsk_fprintf(stderr, "hfs_ext_find_extent_record: starting at " |
391 | 0 | "root node %" PRIu32 "; nodesize = %" |
392 | 0 | PRIu16 "\n", cur_node, nodesize); |
393 | | |
394 | | /* Recurse down to the needed leaf nodes and then go forward */ |
395 | 0 | is_done = 0; |
396 | 0 | while (is_done == 0) { |
397 | 0 | TSK_OFF_T cur_off; /* start address of cur_node */ |
398 | 0 | uint16_t num_rec; /* number of records in this node */ |
399 | 0 | ssize_t cnt; |
400 | 0 | hfs_btree_node *node_desc; |
401 | | |
402 | | // sanity check |
403 | 0 | if (cur_node > tsk_getu32(fs->endian, |
404 | 0 | hfs->extents_header.totalNodes)) { |
405 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
406 | 0 | tsk_error_set_errstr |
407 | 0 | ("hfs_ext_find_extent_record_attr: Node %d too large for file", |
408 | 0 | cur_node); |
409 | 0 | free(node); |
410 | 0 | return 1; |
411 | 0 | } |
412 | | |
413 | | // read the current node |
414 | 0 | cur_off = (TSK_OFF_T)cur_node * nodesize; |
415 | 0 | if (tsk_verbose) |
416 | 0 | tsk_fprintf(stderr, |
417 | 0 | "hfs_ext_find_extent_record: reading node %" PRIu32 |
418 | 0 | " at offset %" PRIdOFF "\n", cur_node, cur_off); |
419 | |
|
420 | 0 | cnt = tsk_fs_attr_read(hfs->extents_attr, cur_off, |
421 | 0 | node, nodesize, 0); |
422 | 0 | if (cnt != nodesize) { |
423 | 0 | if (cnt >= 0) { |
424 | 0 | tsk_error_reset(); |
425 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
426 | 0 | } |
427 | 0 | tsk_error_set_errstr2 |
428 | 0 | ("hfs_ext_find_extent_record_attr: Error reading node %d at offset %" |
429 | 0 | PRIdOFF, cur_node, cur_off); |
430 | 0 | free(node); |
431 | 0 | return 1; |
432 | 0 | } |
433 | | |
434 | | // process the header / descriptor |
435 | 0 | if (nodesize < sizeof(hfs_btree_node)) { |
436 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
437 | 0 | tsk_error_set_errstr |
438 | 0 | ("hfs_ext_find_extent_record_attr: Node size %d is too small to be valid", nodesize); |
439 | 0 | free(node); |
440 | 0 | return 1; |
441 | 0 | } |
442 | 0 | node_desc = (hfs_btree_node *) node; |
443 | 0 | num_rec = tsk_getu16(fs->endian, node_desc->num_rec); |
444 | |
|
445 | 0 | if (num_rec == 0) { |
446 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
447 | 0 | tsk_error_set_errstr |
448 | 0 | ("hfs_ext_find_extent_record: zero records in node %" |
449 | 0 | PRIu32, cur_node); |
450 | 0 | free(node); |
451 | 0 | return 1; |
452 | 0 | } |
453 | | |
454 | | |
455 | | /* With an index node, find the record with the largest key that is smaller |
456 | | * to or equal to cnid */ |
457 | 0 | if (node_desc->type == HFS_BT_NODE_TYPE_IDX) { |
458 | 0 | uint32_t next_node = 0; |
459 | 0 | int rec; |
460 | |
|
461 | 0 | if (tsk_verbose) |
462 | 0 | tsk_fprintf(stderr, |
463 | 0 | "hfs_ext_find_extent_record: Index node %" PRIu32 |
464 | 0 | " @ %" PRIu64 " has %" PRIu16 " records\n", cur_node, |
465 | 0 | cur_off, num_rec); |
466 | |
|
467 | 0 | for (rec = 0; rec < num_rec; ++rec) { |
468 | 0 | int cmp; |
469 | 0 | size_t rec_off; |
470 | 0 | hfs_btree_key_ext *key; |
471 | | |
472 | | // Make sure node is large enough, note that (rec + 1) * 2 is an offset |
473 | | // relative to the end of node |
474 | 0 | if ((rec + 1) * 2 > (int) nodesize) { |
475 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
476 | 0 | tsk_error_set_errstr |
477 | 0 | ("hfs_ext_find_extent_record: offset of record %d in leaf node %d too small (%" |
478 | 0 | PRIu16 ")", rec, cur_node, nodesize); |
479 | 0 | free(node); |
480 | 0 | return 1; |
481 | 0 | } |
482 | | // get the record offset in the node |
483 | 0 | rec_off = |
484 | 0 | tsk_getu16(fs->endian, |
485 | 0 | &node[nodesize - (rec + 1) * 2]); |
486 | 0 | if (rec_off > nodesize - sizeof(hfs_btree_key_ext)) { |
487 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
488 | 0 | tsk_error_set_errstr |
489 | 0 | ("hfs_ext_find_extent_record_attr: offset of record %d in index node %d too large (%d vs %" |
490 | 0 | PRIu16 ")", rec, cur_node, (int) rec_off, |
491 | 0 | nodesize); |
492 | 0 | free(node); |
493 | 0 | return 1; |
494 | 0 | } |
495 | 0 | key = (hfs_btree_key_ext *) & node[rec_off]; |
496 | |
|
497 | 0 | cmp = hfs_ext_compare_keys(hfs, cnid, key); |
498 | |
|
499 | 0 | if (tsk_verbose) |
500 | 0 | tsk_fprintf(stderr, |
501 | 0 | "hfs_ext_find_extent_record: record %" PRIu16 |
502 | 0 | " ; keylen %" PRIu16 " (FileId: %" PRIu32 |
503 | 0 | ", ForkType: %" PRIu8 ", StartBlk: %" PRIu32 |
504 | 0 | "); compare: %d\n", rec, tsk_getu16(fs->endian, |
505 | 0 | key->key_len), tsk_getu32(fs->endian, |
506 | 0 | key->file_id), key->fork_type, |
507 | 0 | tsk_getu32(fs->endian, key->start_block), cmp); |
508 | | |
509 | | /* save the info from this record unless it is bigger than cnid */ |
510 | 0 | if ((cmp <= 0) || (next_node == 0)) { |
511 | 0 | hfs_btree_index_record *idx_rec; |
512 | 0 | int keylen = |
513 | 0 | 2 + hfs_get_idxkeylen(hfs, tsk_getu16(fs->endian, |
514 | 0 | key->key_len), &(hfs->extents_header)); |
515 | 0 | if ((nodesize < 4) || (keylen > nodesize - 4) || (rec_off >= nodesize - 4 - keylen)) { |
516 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
517 | 0 | tsk_error_set_errstr |
518 | 0 | ("hfs_ext_find_extent_record_attr: offset and keylenth of record %d in index node %d too large (%d vs %" |
519 | 0 | PRIu16 ")", rec, cur_node, |
520 | 0 | (int) rec_off + keylen, nodesize); |
521 | 0 | free(node); |
522 | 0 | return 1; |
523 | 0 | } |
524 | 0 | idx_rec = |
525 | 0 | (hfs_btree_index_record *) & node[rec_off + |
526 | 0 | keylen]; |
527 | 0 | next_node = tsk_getu32(fs->endian, idx_rec->childNode); |
528 | 0 | } |
529 | | |
530 | | // we are bigger than cnid, so move on to the next node |
531 | 0 | if (cmp > 0) { |
532 | 0 | break; |
533 | 0 | } |
534 | 0 | } |
535 | | |
536 | | // check if we found a relevant node, if not stop. |
537 | 0 | if (next_node == 0) { |
538 | 0 | if (tsk_verbose) |
539 | 0 | tsk_fprintf(stderr, |
540 | 0 | "hfs_ext_find_extent_record_attr: did not find any keys for %d in index node %d", |
541 | 0 | cnid, cur_node); |
542 | 0 | is_done = 1; |
543 | 0 | break; |
544 | 0 | } |
545 | 0 | cur_node = next_node; |
546 | 0 | } |
547 | | |
548 | | /* with a leaf, we process until we are past cnid. We move right too if we can */ |
549 | 0 | else if (node_desc->type == HFS_BT_NODE_TYPE_LEAF) { |
550 | 0 | int rec; |
551 | |
|
552 | 0 | if (tsk_verbose) |
553 | 0 | tsk_fprintf(stderr, |
554 | 0 | "hfs_ext_find_extent_record: Leaf node %" PRIu32 " @ %" |
555 | 0 | PRIu64 " has %" PRIu16 " records\n", cur_node, cur_off, |
556 | 0 | num_rec); |
557 | |
|
558 | 0 | for (rec = 0; rec < num_rec; ++rec) { |
559 | 0 | size_t rec_off; |
560 | 0 | hfs_btree_key_ext *key; |
561 | 0 | uint32_t rec_cnid; |
562 | 0 | hfs_extents *extents; |
563 | 0 | TSK_OFF_T ext_off = 0; |
564 | 0 | int keylen; |
565 | 0 | TSK_FS_ATTR_RUN *attr_run; |
566 | | |
567 | | // Make sure node is large enough, note that (rec + 1) * 2 is an offset |
568 | | // relative to the end of node |
569 | 0 | if ((rec + 1) * 2 > (int) nodesize) { |
570 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
571 | 0 | tsk_error_set_errstr |
572 | 0 | ("hfs_ext_find_extent_record_attr: offset of record %d in leaf node %d too small (%" |
573 | 0 | PRIu16 ")", rec, cur_node, nodesize); |
574 | 0 | free(node); |
575 | 0 | return 1; |
576 | 0 | } |
577 | | // get the record offset in the node |
578 | 0 | rec_off = |
579 | 0 | tsk_getu16(fs->endian, |
580 | 0 | &node[nodesize - (rec + 1) * 2]); |
581 | |
|
582 | 0 | if (rec_off >= nodesize - sizeof(hfs_btree_key_ext)) { |
583 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
584 | 0 | tsk_error_set_errstr |
585 | 0 | ("hfs_ext_find_extent_record_attr: offset of record %d in leaf node %d too large (%d vs %" |
586 | 0 | PRIu16 ")", rec, cur_node, (int) rec_off, |
587 | 0 | nodesize); |
588 | 0 | free(node); |
589 | 0 | return 1; |
590 | 0 | } |
591 | | |
592 | | // Check that the whole hfs_btree_key_ext structure is set |
593 | 0 | if (sizeof(hfs_btree_key_ext) > nodesize - rec_off) { |
594 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
595 | 0 | tsk_error_set_errstr |
596 | 0 | ("hfs_ext_find_extent_record_attr: record %d in leaf node %d truncated (have %d vs %" |
597 | 0 | PRIu16 " bytes)", rec, cur_node, nodesize - (int)rec_off, |
598 | 0 | sizeof(hfs_btree_key_ext)); |
599 | 0 | free(node); |
600 | 0 | return 1; |
601 | 0 | } |
602 | | |
603 | 0 | key = (hfs_btree_key_ext *) & node[rec_off]; |
604 | |
|
605 | 0 | if (tsk_verbose) |
606 | 0 | tsk_fprintf(stderr, |
607 | 0 | "hfs_ext_find_extent_record: record %" PRIu16 |
608 | 0 | "; keylen %" PRIu16 " (%" PRIu32 |
609 | 0 | ", %" PRIu8 ", %" PRIu32 ")\n", rec, |
610 | 0 | tsk_getu16(fs->endian, key->key_len), |
611 | 0 | tsk_getu32(fs->endian, key->file_id), |
612 | 0 | key->fork_type, tsk_getu32(fs->endian, |
613 | 0 | key->start_block)); |
614 | |
|
615 | 0 | rec_cnid = tsk_getu32(fs->endian, key->file_id); |
616 | | |
617 | | // see if this record is for our file |
618 | | // OLD logic, just handles the DATA fork |
619 | | // if (rec_cnid < cnid) { |
620 | | // continue; |
621 | | // } |
622 | | // else if ((rec_cnid > cnid) |
623 | | // || (key->fork_type != HFS_EXT_KEY_TYPE_DATA)) { |
624 | | // is_done = 1; |
625 | | // break; |
626 | | // } |
627 | | |
628 | | // NEW logic, handles both DATA and RSRC forks. |
629 | 0 | if (rec_cnid < cnid) { |
630 | 0 | continue; |
631 | 0 | } |
632 | 0 | if (rec_cnid > cnid) { |
633 | 0 | is_done = 1; |
634 | 0 | break; |
635 | 0 | } |
636 | | |
637 | | |
638 | 0 | if (key->fork_type != desiredType) { |
639 | 0 | if (dataForkQ) { |
640 | 0 | is_done = 1; |
641 | 0 | break; |
642 | 0 | } |
643 | 0 | else |
644 | 0 | continue; |
645 | 0 | } |
646 | | |
647 | | // OK, this is one of the extents records that we are seeking, so save it. |
648 | | // Make sure there is room for the hfs_extents struct |
649 | 0 | keylen = 2 + tsk_getu16(fs->endian, key->key_len); |
650 | 0 | if (rec_off + keylen + sizeof(hfs_extents) > nodesize) { |
651 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
652 | 0 | tsk_error_set_errstr |
653 | 0 | ("hfs_ext_find_extent_record_attr: offset and keylenth of record %d in leaf node %d too large (%d vs %" |
654 | 0 | PRIu16 ")", rec, cur_node, (int) rec_off + keylen, |
655 | 0 | nodesize); |
656 | 0 | free(node); |
657 | 0 | return 1; |
658 | 0 | } |
659 | | |
660 | | // get the starting offset of this extent |
661 | 0 | ext_off = tsk_getu32(fs->endian, key->start_block); |
662 | | |
663 | | // convert the extents to the TSK format |
664 | 0 | extents = (hfs_extents *) & node[rec_off + keylen]; |
665 | |
|
666 | 0 | attr_run = |
667 | 0 | hfs_extents_to_attr(fs, extents->extents, ext_off); |
668 | 0 | if ((attr_run == NULL) && (tsk_error_get_errno() != 0)) { |
669 | 0 | tsk_error_errstr2_concat |
670 | 0 | (" - hfs_ext_find_extent_record_attr"); |
671 | 0 | free(node); |
672 | 0 | return 1; |
673 | 0 | } |
674 | | |
675 | 0 | if (tsk_fs_attr_add_run(fs, a_attr, attr_run)) { |
676 | 0 | tsk_error_errstr2_concat |
677 | 0 | (" - hfs_ext_find_extent_record_attr"); |
678 | 0 | free(node); |
679 | 0 | return 1; |
680 | 0 | } |
681 | 0 | } |
682 | 0 | cur_node = tsk_getu32(fs->endian, node_desc->flink); |
683 | 0 | if (cur_node == 0) { |
684 | 0 | is_done = 1; |
685 | 0 | break; |
686 | 0 | } |
687 | 0 | } |
688 | 0 | else { |
689 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
690 | 0 | tsk_error_set_errstr("hfs_ext_find_extent_record: btree node %" |
691 | 0 | PRIu32 " (%" PRIdOFF ") is neither index nor leaf (%" PRIu8 |
692 | 0 | ")", cur_node, cur_off, node_desc->type); |
693 | 0 | free(node); |
694 | 0 | return 1; |
695 | 0 | } |
696 | 0 | } |
697 | 0 | free(node); |
698 | 0 | return 0; |
699 | 0 | } |
700 | | |
701 | | |
702 | | /** \internal |
703 | | * Compares two Catalog B-tree keys. |
704 | | * @param hfs File System being analyzed |
705 | | * @param key1 Key 1 to compare |
706 | | * @param key2 Key 2 to compare |
707 | | * @returns -1 if key1 is smaller, 0 if equal, and 1 if key1 is larger |
708 | | */ |
709 | | int |
710 | | hfs_cat_compare_keys(HFS_INFO * hfs, const hfs_btree_key_cat * key1, |
711 | | int keylen1, const hfs_btree_key_cat * key2) |
712 | 0 | { |
713 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); |
714 | 0 | uint32_t cnid1, cnid2; |
715 | |
|
716 | 0 | if (keylen1 < 6) { |
717 | | // Note that it would be better to return an error value here |
718 | | // but the current function interface does not support this |
719 | | // Also see issue #2365 |
720 | 0 | return -1; |
721 | 0 | } |
722 | 0 | cnid1 = tsk_getu32(fs->endian, key1->parent_cnid); |
723 | 0 | cnid2 = tsk_getu32(fs->endian, key2->parent_cnid); |
724 | |
|
725 | 0 | if (cnid1 < cnid2) |
726 | 0 | return -1; |
727 | 0 | if (cnid1 > cnid2) |
728 | 0 | return 1; |
729 | | |
730 | 0 | return hfs_unicode_compare(hfs, &key1->name, keylen1 - 6, &key2->name); |
731 | 0 | } |
732 | | |
733 | | |
734 | | /** \internal |
735 | | * |
736 | | * Traverse the HFS catalog file. Call the callback for each |
737 | | * record. |
738 | | * |
739 | | * @param hfs File system |
740 | | * @param a_cb callback |
741 | | * @param ptr Pointer to pass to callback |
742 | | * @returns 1 on error |
743 | | */ |
744 | | uint8_t |
745 | | hfs_cat_traverse(HFS_INFO * hfs, |
746 | | TSK_HFS_BTREE_CB a_cb, void *ptr) |
747 | 0 | { |
748 | 0 | TSK_FS_INFO *fs = &(hfs->fs_info); |
749 | 0 | uint32_t cur_node; /* node id of the current node */ |
750 | 0 | char *node; |
751 | |
|
752 | 0 | uint16_t nodesize; |
753 | 0 | uint8_t is_done = 0; |
754 | |
|
755 | 0 | tsk_error_reset(); |
756 | |
|
757 | 0 | nodesize = tsk_getu16(fs->endian, hfs->catalog_header.nodesize); |
758 | 0 | if ((node = (char *) tsk_malloc(nodesize)) == NULL) |
759 | 0 | return 1; |
760 | | |
761 | | /* start at root node */ |
762 | 0 | cur_node = tsk_getu32(fs->endian, hfs->catalog_header.rootNode); |
763 | | |
764 | | /* if the root node is zero, then the extents btree is empty */ |
765 | | /* if no files have overflow extents, the Extents B-tree still |
766 | | exists on disk, but is an empty B-tree containing only |
767 | | the header node */ |
768 | 0 | if (cur_node == 0) { |
769 | 0 | if (tsk_verbose) |
770 | 0 | tsk_fprintf(stderr, "hfs_cat_traverse: " |
771 | 0 | "empty extents btree\n"); |
772 | 0 | free(node); |
773 | 0 | return 1; |
774 | 0 | } |
775 | | |
776 | 0 | if (tsk_verbose) |
777 | 0 | tsk_fprintf(stderr, "hfs_cat_traverse: starting at " |
778 | 0 | "root node %" PRIu32 "; nodesize = %" |
779 | 0 | PRIu16 "\n", cur_node, nodesize); |
780 | | |
781 | | /* Recurse down to the needed leaf nodes and then go forward */ |
782 | 0 | is_done = 0; |
783 | 0 | while (is_done == 0) { |
784 | 0 | TSK_OFF_T cur_off; /* start address of cur_node */ |
785 | 0 | uint16_t num_rec; /* number of records in this node */ |
786 | 0 | ssize_t cnt; |
787 | 0 | hfs_btree_node *node_desc; |
788 | | |
789 | | // sanity check |
790 | 0 | if (cur_node > tsk_getu32(fs->endian, |
791 | 0 | hfs->catalog_header.totalNodes)) { |
792 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
793 | 0 | tsk_error_set_errstr |
794 | 0 | ("hfs_cat_traverse: Node %d too large for file", cur_node); |
795 | 0 | free(node); |
796 | 0 | return 1; |
797 | 0 | } |
798 | | |
799 | | // read the current node |
800 | 0 | cur_off = (TSK_OFF_T)cur_node * nodesize; |
801 | 0 | cnt = tsk_fs_attr_read(hfs->catalog_attr, cur_off, |
802 | 0 | node, nodesize, 0); |
803 | 0 | if (cnt != nodesize) { |
804 | 0 | if (cnt >= 0) { |
805 | 0 | tsk_error_reset(); |
806 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
807 | 0 | } |
808 | 0 | tsk_error_set_errstr2 |
809 | 0 | ("hfs_cat_traverse: Error reading node %d at offset %" |
810 | 0 | PRIdOFF, cur_node, cur_off); |
811 | 0 | free(node); |
812 | 0 | return 1; |
813 | 0 | } |
814 | | |
815 | | // process the header / descriptor |
816 | 0 | if (nodesize < sizeof(hfs_btree_node)) { |
817 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
818 | 0 | tsk_error_set_errstr |
819 | 0 | ("hfs_cat_traverse: Node size %d is too small to be valid", nodesize); |
820 | 0 | free(node); |
821 | 0 | return 1; |
822 | 0 | } |
823 | 0 | node_desc = (hfs_btree_node *) node; |
824 | 0 | num_rec = tsk_getu16(fs->endian, node_desc->num_rec); |
825 | |
|
826 | 0 | if (tsk_verbose) |
827 | 0 | tsk_fprintf(stderr, "hfs_cat_traverse: node %" PRIu32 |
828 | 0 | " @ %" PRIu64 " has %" PRIu16 " records\n", |
829 | 0 | cur_node, cur_off, num_rec); |
830 | |
|
831 | 0 | if (num_rec == 0) { |
832 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
833 | 0 | tsk_error_set_errstr("hfs_cat_traverse: zero records in node %" |
834 | 0 | PRIu32, cur_node); |
835 | 0 | free(node); |
836 | 0 | return 1; |
837 | 0 | } |
838 | | |
839 | | /* With an index node, find the record with the largest key that is smaller |
840 | | * to or equal to cnid */ |
841 | 0 | if (node_desc->type == HFS_BT_NODE_TYPE_IDX) { |
842 | 0 | uint32_t next_node = 0; |
843 | 0 | int rec; |
844 | |
|
845 | 0 | for (rec = 0; rec < num_rec; ++rec) { |
846 | 0 | size_t rec_off; |
847 | 0 | hfs_btree_key_cat *key; |
848 | 0 | uint8_t retval; |
849 | 0 | int keylen; |
850 | | |
851 | | // Make sure node is large enough, note that (rec + 1) * 2 is an offset |
852 | | // relative to the end of node |
853 | 0 | if ((rec + 1) * 2 > (int) nodesize) { |
854 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
855 | 0 | tsk_error_set_errstr |
856 | 0 | ("hfs_cat_traverse: offset of record %d in leaf node %d too small (%" |
857 | 0 | PRIu16 ")", rec, cur_node, nodesize); |
858 | 0 | free(node); |
859 | 0 | return 1; |
860 | 0 | } |
861 | | // get the record offset in the node |
862 | 0 | rec_off = |
863 | 0 | tsk_getu16(fs->endian, |
864 | 0 | &node[nodesize - (rec + 1) * 2]); |
865 | | |
866 | | // Need at least 2 bytes for key_len |
867 | 0 | if (rec_off >= nodesize - 2) { |
868 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
869 | 0 | tsk_error_set_errstr |
870 | 0 | ("hfs_cat_traverse: offset of record %d in index node %d too large (%d vs %" |
871 | 0 | PRIu16 ")", rec, cur_node, (int) rec_off, |
872 | 0 | nodesize); |
873 | 0 | free(node); |
874 | 0 | return 1; |
875 | 0 | } |
876 | | |
877 | 0 | key = (hfs_btree_key_cat *) & node[rec_off]; |
878 | 0 | keylen = 2 + tsk_getu16(hfs->fs_info.endian, key->key_len); |
879 | | |
880 | | // Want a key of at least 6 bytes, the size of the first 2 members of hfs_btree_key_cat |
881 | 0 | if ((keylen < 6) || (keylen > nodesize - rec_off)) { |
882 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
883 | 0 | tsk_error_set_errstr |
884 | 0 | ("hfs_cat_traverse: length of key %d in index node %d out of bounds (6 < %d < %" |
885 | 0 | PRIu16 ")", rec, cur_node, keylen, (nodesize - rec_off)); |
886 | 0 | free(node); |
887 | 0 | return 1; |
888 | 0 | } |
889 | | |
890 | | /* |
891 | | if (tsk_verbose) |
892 | | tsk_fprintf(stderr, |
893 | | "hfs_cat_traverse: record %" PRIu16 |
894 | | " ; keylen %" PRIu16 " (%" PRIu32 ")\n", rec, |
895 | | tsk_getu16(fs->endian, key->key_len), |
896 | | tsk_getu32(fs->endian, key->parent_cnid)); |
897 | | */ |
898 | | |
899 | | |
900 | | /* save the info from this record unless it is too big */ |
901 | 0 | retval = |
902 | 0 | a_cb(hfs, HFS_BT_NODE_TYPE_IDX, key, keylen, nodesize, |
903 | 0 | cur_off + rec_off, ptr); |
904 | 0 | if (retval == HFS_BTREE_CB_ERR) { |
905 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
906 | 0 | tsk_error_set_errstr2 |
907 | 0 | ("hfs_cat_traverse: Callback returned error"); |
908 | 0 | free(node); |
909 | 0 | return 1; |
910 | 0 | } |
911 | | // record the closest entry |
912 | 0 | else if ((retval == HFS_BTREE_CB_IDX_LT) |
913 | 0 | || (next_node == 0)) { |
914 | 0 | hfs_btree_index_record *idx_rec; |
915 | 0 | int keylen = |
916 | 0 | 2 + hfs_get_idxkeylen(hfs, tsk_getu16(fs->endian, |
917 | 0 | key->key_len), &(hfs->catalog_header)); |
918 | 0 | if (keylen > nodesize - rec_off) { |
919 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
920 | 0 | tsk_error_set_errstr |
921 | 0 | ("hfs_cat_traverse: offset of record and keylength %d in index node %d too large (%d vs %" |
922 | 0 | PRIu16 ")", rec, cur_node, |
923 | 0 | (int) rec_off + keylen, nodesize); |
924 | 0 | free(node); |
925 | 0 | return 1; |
926 | 0 | } |
927 | 0 | if (sizeof(hfs_btree_index_record) > nodesize - rec_off - keylen) { |
928 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
929 | 0 | tsk_error_set_errstr("hfs_cat_traverse: truncated btree index record"); |
930 | 0 | free(node); |
931 | 0 | return 1; |
932 | 0 | } |
933 | 0 | idx_rec = |
934 | 0 | (hfs_btree_index_record *) & node[rec_off + |
935 | 0 | keylen]; |
936 | 0 | next_node = tsk_getu32(fs->endian, idx_rec->childNode); |
937 | 0 | } |
938 | 0 | if (retval == HFS_BTREE_CB_IDX_EQGT) { |
939 | | // move down to the next node |
940 | 0 | break; |
941 | 0 | } |
942 | 0 | } |
943 | | // check if we found a relevant node |
944 | 0 | if (next_node == 0) { |
945 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
946 | 0 | tsk_error_set_errstr |
947 | 0 | ("hfs_cat_traverse: did not find any keys in index node %d", |
948 | 0 | cur_node); |
949 | 0 | is_done = 1; |
950 | 0 | break; |
951 | 0 | } |
952 | | // TODO: Handle multinode loops |
953 | 0 | if (next_node == cur_node) { |
954 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
955 | 0 | tsk_error_set_errstr |
956 | 0 | ("hfs_cat_traverse: node %d references itself as next node", |
957 | 0 | cur_node); |
958 | 0 | is_done = 1; |
959 | 0 | break; |
960 | 0 | } |
961 | 0 | cur_node = next_node; |
962 | 0 | } |
963 | | |
964 | | /* With a leaf, we look for the specific record. */ |
965 | 0 | else if (node_desc->type == HFS_BT_NODE_TYPE_LEAF) { |
966 | 0 | int rec; |
967 | |
|
968 | 0 | for (rec = 0; rec < num_rec; ++rec) { |
969 | 0 | size_t rec_off; |
970 | 0 | hfs_btree_key_cat *key; |
971 | 0 | uint8_t retval; |
972 | 0 | int keylen; |
973 | | |
974 | | // Make sure node is large enough, note that (rec + 1) * 2 is an offset |
975 | | // relative to the end of node |
976 | 0 | if ((rec + 1) * 2 > (int) nodesize) { |
977 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
978 | 0 | tsk_error_set_errstr |
979 | 0 | ("hfs_cat_traverse: offset of record %d in leaf node %d too small (%" |
980 | 0 | PRIu16 ")", rec, cur_node, nodesize); |
981 | 0 | free(node); |
982 | 0 | return 1; |
983 | 0 | } |
984 | | // get the record offset in the node |
985 | 0 | rec_off = |
986 | 0 | tsk_getu16(fs->endian, |
987 | 0 | &node[nodesize - (rec + 1) * 2]); |
988 | | |
989 | | // Need at least 2 bytes for key_len |
990 | 0 | if (rec_off >= nodesize - 2) { |
991 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
992 | 0 | tsk_error_set_errstr |
993 | 0 | ("hfs_cat_traverse: offset of record %d in leaf node %d too large (%d vs %" |
994 | 0 | PRIu16 ")", rec, cur_node, (int) rec_off, |
995 | 0 | nodesize); |
996 | 0 | free(node); |
997 | 0 | return 1; |
998 | 0 | } |
999 | | |
1000 | 0 | key = (hfs_btree_key_cat *) & node[rec_off]; |
1001 | 0 | keylen = 2 + tsk_getu16(hfs->fs_info.endian, key->key_len); |
1002 | | |
1003 | | // Want a key of at least 6 bytes, the size of the first 2 members of hfs_btree_key_cat |
1004 | 0 | if ((keylen < 6) || (keylen > nodesize - rec_off)) { |
1005 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
1006 | 0 | tsk_error_set_errstr |
1007 | 0 | ("hfs_cat_traverse: length of key %d in leaf node %d out of bounds (6 < %d < %" |
1008 | 0 | PRIu16 ")", rec, cur_node, keylen, nodesize); |
1009 | 0 | free(node); |
1010 | 0 | return 1; |
1011 | 0 | } |
1012 | | |
1013 | | /* |
1014 | | if (tsk_verbose) |
1015 | | tsk_fprintf(stderr, |
1016 | | "hfs_cat_traverse: record %" PRIu16 |
1017 | | "; keylen %" PRIu16 " (%" PRIu32 ")\n", rec, |
1018 | | tsk_getu16(fs->endian, key->key_len), |
1019 | | tsk_getu32(fs->endian, key->parent_cnid)); |
1020 | | */ |
1021 | | // rec_cnid = tsk_getu32(fs->endian, key->file_id); |
1022 | | |
1023 | | // The nodesize passed to the callback should contain the available node |
1024 | | // data size relative from the start of the key. |
1025 | 0 | retval = |
1026 | 0 | a_cb(hfs, HFS_BT_NODE_TYPE_LEAF, key, keylen, nodesize - rec_off, |
1027 | 0 | cur_off + rec_off, ptr); |
1028 | 0 | if (retval == HFS_BTREE_CB_LEAF_STOP) { |
1029 | 0 | is_done = 1; |
1030 | 0 | break; |
1031 | 0 | } |
1032 | 0 | else if (retval == HFS_BTREE_CB_ERR) { |
1033 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
1034 | 0 | tsk_error_set_errstr2 |
1035 | 0 | ("hfs_cat_traverse: Callback returned error"); |
1036 | 0 | free(node); |
1037 | 0 | return 1; |
1038 | 0 | } |
1039 | 0 | } |
1040 | | |
1041 | | // move right to the next node if we got this far |
1042 | 0 | if (is_done == 0) { |
1043 | 0 | cur_node = tsk_getu32(fs->endian, node_desc->flink); |
1044 | 0 | if (cur_node == 0) { |
1045 | 0 | is_done = 1; |
1046 | 0 | } |
1047 | 0 | if (tsk_verbose) |
1048 | 0 | tsk_fprintf(stderr, |
1049 | 0 | "hfs_cat_traverse: moving forward to next leaf"); |
1050 | 0 | } |
1051 | 0 | } |
1052 | 0 | else { |
1053 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
1054 | 0 | tsk_error_set_errstr("hfs_cat_traverse: btree node %" PRIu32 |
1055 | 0 | " (%" PRIu64 ") is neither index nor leaf (%" PRIu8 ")", |
1056 | 0 | cur_node, cur_off, node_desc->type); |
1057 | 0 | free(node); |
1058 | 0 | return 1; |
1059 | 0 | } |
1060 | 0 | } |
1061 | 0 | free(node); |
1062 | 0 | return 0; |
1063 | 0 | } |
1064 | | |
1065 | | typedef struct { |
1066 | | const hfs_btree_key_cat *targ_key; |
1067 | | TSK_OFF_T off; |
1068 | | } HFS_CAT_GET_RECORD_OFFSET_DATA; |
1069 | | |
1070 | | static uint8_t |
1071 | | hfs_cat_get_record_offset_cb(HFS_INFO * hfs, int8_t level_type, |
1072 | | const hfs_btree_key_cat * cur_key, int cur_keylen, size_t node_size, |
1073 | | TSK_OFF_T key_off, void *ptr) |
1074 | 0 | { |
1075 | 0 | HFS_CAT_GET_RECORD_OFFSET_DATA *offset_data = (HFS_CAT_GET_RECORD_OFFSET_DATA *)ptr; |
1076 | 0 | const hfs_btree_key_cat *targ_key = offset_data->targ_key; |
1077 | |
|
1078 | 0 | if (tsk_verbose) |
1079 | 0 | tsk_fprintf(stderr, |
1080 | 0 | "hfs_cat_get_record_offset_cb: %s node want: %" PRIu32 |
1081 | 0 | " vs have: %" PRIu32 "\n", |
1082 | 0 | (level_type == HFS_BT_NODE_TYPE_IDX) ? "Index" : "Leaf", |
1083 | 0 | tsk_getu32(hfs->fs_info.endian, targ_key->parent_cnid), |
1084 | 0 | tsk_getu32(hfs->fs_info.endian, cur_key->parent_cnid)); |
1085 | |
|
1086 | 0 | if (level_type == HFS_BT_NODE_TYPE_IDX) { |
1087 | 0 | int diff = hfs_cat_compare_keys(hfs, cur_key, cur_keylen, targ_key); |
1088 | 0 | if (diff < 0) |
1089 | 0 | return HFS_BTREE_CB_IDX_LT; |
1090 | 0 | else |
1091 | 0 | return HFS_BTREE_CB_IDX_EQGT; |
1092 | 0 | } |
1093 | 0 | else { |
1094 | 0 | int diff = hfs_cat_compare_keys(hfs, cur_key, cur_keylen, targ_key); |
1095 | | |
1096 | | // see if this record is for our file or if we passed the interesting entries |
1097 | 0 | if (diff < 0) { |
1098 | 0 | return HFS_BTREE_CB_LEAF_GO; |
1099 | 0 | } |
1100 | 0 | else if (diff == 0) { |
1101 | 0 | offset_data->off = |
1102 | 0 | key_off + 2 + tsk_getu16(hfs->fs_info.endian, |
1103 | 0 | cur_key->key_len); |
1104 | 0 | } |
1105 | 0 | return HFS_BTREE_CB_LEAF_STOP; |
1106 | 0 | } |
1107 | 0 | } |
1108 | | |
1109 | | |
1110 | | /** \internal |
1111 | | * Find the byte offset (from the start of the catalog file) to a record |
1112 | | * in the catalog file. |
1113 | | * @param hfs File System being analyzed |
1114 | | * @param needle Key to search for |
1115 | | * @returns Byte offset or 0 on error. 0 is also returned if catalog |
1116 | | * record was not found. Check tsk_errno to determine if error occurred. |
1117 | | */ |
1118 | | static TSK_OFF_T |
1119 | | hfs_cat_get_record_offset(HFS_INFO * hfs, const hfs_btree_key_cat * needle) |
1120 | 0 | { |
1121 | 0 | HFS_CAT_GET_RECORD_OFFSET_DATA offset_data; |
1122 | 0 | offset_data.off = 0; |
1123 | 0 | offset_data.targ_key = needle; |
1124 | 0 | if (hfs_cat_traverse(hfs, hfs_cat_get_record_offset_cb, &offset_data)) { |
1125 | 0 | return 0; |
1126 | 0 | } |
1127 | 0 | return offset_data.off; |
1128 | 0 | } |
1129 | | |
1130 | | |
1131 | | /** \internal |
1132 | | * Given a byte offset to a leaf record in teh catalog file, read the data as |
1133 | | * a thread record. This will zero the buffer and read in the size of the thread |
1134 | | * data. |
1135 | | * @param hfs File System |
1136 | | * @param off Byte offset of record in catalog file (not including key) |
1137 | | * @param thread [out] Buffer to write thread data into. |
1138 | | * @returns 0 on success, 1 on failure; sets up to error string 1 */ |
1139 | | uint8_t |
1140 | | hfs_cat_read_thread_record(HFS_INFO * hfs, TSK_OFF_T off, |
1141 | | hfs_thread * thread) |
1142 | 0 | { |
1143 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); |
1144 | 0 | uint16_t uni_len; |
1145 | 0 | ssize_t cnt; |
1146 | |
|
1147 | 0 | memset(thread, 0, sizeof(hfs_thread)); |
1148 | 0 | cnt = tsk_fs_attr_read(hfs->catalog_attr, off, (char *) thread, 10, 0); |
1149 | 0 | if (cnt != 10) { |
1150 | 0 | if (cnt >= 0) { |
1151 | 0 | tsk_error_reset(); |
1152 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
1153 | 0 | } |
1154 | 0 | tsk_error_set_errstr2 |
1155 | 0 | ("hfs_cat_read_thread_record: Error reading catalog offset %" |
1156 | 0 | PRIdOFF " (header)", off); |
1157 | 0 | return 1; |
1158 | 0 | } |
1159 | | |
1160 | 0 | if ((tsk_getu16(fs->endian, thread->rec_type) != HFS_FOLDER_THREAD) |
1161 | 0 | && (tsk_getu16(fs->endian, thread->rec_type) != HFS_FILE_THREAD)) { |
1162 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
1163 | 0 | tsk_error_set_errstr |
1164 | 0 | ("hfs_cat_read_thread_record: unexpected record type %" PRIu16, |
1165 | 0 | tsk_getu16(fs->endian, thread->rec_type)); |
1166 | 0 | return 1; |
1167 | 0 | } |
1168 | | |
1169 | 0 | uni_len = tsk_getu16(fs->endian, thread->name.length); |
1170 | |
|
1171 | 0 | if (uni_len > 255) { |
1172 | 0 | tsk_error_set_errno(TSK_ERR_FS_INODE_COR); |
1173 | 0 | tsk_error_set_errstr |
1174 | 0 | ("hfs_cat_read_thread_record: invalid string length (%" PRIu16 |
1175 | 0 | ")", uni_len); |
1176 | 0 | return 1; |
1177 | 0 | } |
1178 | | |
1179 | 0 | cnt = |
1180 | 0 | tsk_fs_attr_read(hfs->catalog_attr, off + 10, |
1181 | 0 | (char *) thread->name.unicode, uni_len * 2, 0); |
1182 | 0 | if (cnt != uni_len * 2) { |
1183 | 0 | if (cnt >= 0) { |
1184 | 0 | tsk_error_reset(); |
1185 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
1186 | 0 | } |
1187 | 0 | tsk_error_set_errstr2 |
1188 | 0 | ("hfs_cat_read_thread_record: Error reading catalog offset %" |
1189 | 0 | PRIdOFF " (name)", off + 10); |
1190 | 0 | return 1; |
1191 | 0 | } |
1192 | | |
1193 | 0 | return 0; |
1194 | 0 | } |
1195 | | |
1196 | | /** \internal |
1197 | | * Read a catalog record into a local data structure. This reads the |
1198 | | * correct amount, depending on if it is a file or folder. |
1199 | | * @param hfs File system being analyzed |
1200 | | * @param off Byte offset (in catalog file) of record (not including key) |
1201 | | * @param record [out] Structure to read data into |
1202 | | * @returns 1 on error |
1203 | | */ |
1204 | | uint8_t |
1205 | | hfs_cat_read_file_folder_record(HFS_INFO * hfs, TSK_OFF_T off, |
1206 | | hfs_file_folder * record) |
1207 | 0 | { |
1208 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); |
1209 | 0 | ssize_t cnt; |
1210 | 0 | char rec_type[2]; |
1211 | |
|
1212 | 0 | memset(record, 0, sizeof(hfs_file_folder)); |
1213 | |
|
1214 | 0 | cnt = tsk_fs_attr_read(hfs->catalog_attr, off, rec_type, 2, 0); |
1215 | 0 | if (cnt != 2) { |
1216 | 0 | if (cnt >= 0) { |
1217 | 0 | tsk_error_reset(); |
1218 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
1219 | 0 | } |
1220 | 0 | tsk_error_set_errstr2 |
1221 | 0 | ("hfs_cat_read_file_folder_record: Error reading record type from catalog offset %" |
1222 | 0 | PRIdOFF " (header)", off); |
1223 | 0 | return 1; |
1224 | 0 | } |
1225 | | |
1226 | 0 | if (tsk_getu16(fs->endian, rec_type) == HFS_FOLDER_RECORD) { |
1227 | 0 | cnt = |
1228 | 0 | tsk_fs_attr_read(hfs->catalog_attr, off, (char *) record, |
1229 | 0 | sizeof(hfs_folder), 0); |
1230 | 0 | if (cnt != sizeof(hfs_folder)) { |
1231 | 0 | if (cnt >= 0) { |
1232 | 0 | tsk_error_reset(); |
1233 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
1234 | 0 | } |
1235 | 0 | tsk_error_set_errstr2 |
1236 | 0 | ("hfs_cat_read_file_folder_record: Error reading catalog offset %" |
1237 | 0 | PRIdOFF " (folder)", off); |
1238 | 0 | return 1; |
1239 | 0 | } |
1240 | 0 | } |
1241 | 0 | else if (tsk_getu16(fs->endian, rec_type) == HFS_FILE_RECORD) { |
1242 | 0 | cnt = |
1243 | 0 | tsk_fs_attr_read(hfs->catalog_attr, off, (char *) record, |
1244 | 0 | sizeof(hfs_file), 0); |
1245 | 0 | if (cnt != sizeof(hfs_file)) { |
1246 | 0 | if (cnt >= 0) { |
1247 | 0 | tsk_error_reset(); |
1248 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
1249 | 0 | } |
1250 | 0 | tsk_error_set_errstr2 |
1251 | 0 | ("hfs_cat_read_file_folder_record: Error reading catalog offset %" |
1252 | 0 | PRIdOFF " (file)", off); |
1253 | 0 | return 1; |
1254 | 0 | } |
1255 | 0 | } |
1256 | 0 | else { |
1257 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
1258 | 0 | tsk_error_set_errstr |
1259 | 0 | ("hfs_cat_read_file_folder_record: unexpected record type %" |
1260 | 0 | PRIu16, tsk_getu16(fs->endian, rec_type)); |
1261 | 0 | return 1; |
1262 | 0 | } |
1263 | | |
1264 | 0 | return 0; |
1265 | 0 | } |
1266 | | |
1267 | | // hfs_lookup_hard_link appears to be unnecessary - it looks up the cnid |
1268 | | // by seeing if there's a file/dir with the standard hard link name plus |
1269 | | // linknum and returns the meta_addr. But this should always be the same as linknum, |
1270 | | // and is very slow when there are many hard links, so it shouldn't be used. |
1271 | | //static TSK_INUM_T |
1272 | | //hfs_lookup_hard_link(HFS_INFO * hfs, TSK_INUM_T linknum, |
1273 | | // unsigned char is_directory) |
1274 | | //{ |
1275 | | // char fBuff[30]; |
1276 | | // TSK_FS_DIR *mdir; |
1277 | | // size_t indx; |
1278 | | // TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs; |
1279 | | // |
1280 | | // memset(fBuff, 0, 30); |
1281 | | // |
1282 | | // if (is_directory) { |
1283 | | // |
1284 | | // tsk_take_lock(&(hfs->metadata_dir_cache_lock)); |
1285 | | // if (hfs->dir_meta_dir == NULL) { |
1286 | | // hfs->dir_meta_dir = |
1287 | | // tsk_fs_dir_open_meta(fs, hfs->meta_dir_inum); |
1288 | | // } |
1289 | | // tsk_release_lock(&(hfs->metadata_dir_cache_lock)); |
1290 | | // |
1291 | | // if (hfs->dir_meta_dir == NULL) { |
1292 | | // error_returned |
1293 | | // ("hfs_lookup_hard_link: could not open the dir metadata directory"); |
1294 | | // return 0; |
1295 | | // } |
1296 | | // else { |
1297 | | // mdir = hfs->dir_meta_dir; |
1298 | | // } |
1299 | | // snprintf(fBuff, 30, "dir_%" PRIuINUM, linknum); |
1300 | | // |
1301 | | // } |
1302 | | // else { |
1303 | | // |
1304 | | // tsk_take_lock(&(hfs->metadata_dir_cache_lock)); |
1305 | | // if (hfs->meta_dir == NULL) { |
1306 | | // hfs->meta_dir = tsk_fs_dir_open_meta(fs, hfs->meta_inum); |
1307 | | // } |
1308 | | // tsk_release_lock(&(hfs->metadata_dir_cache_lock)); |
1309 | | // |
1310 | | // if (hfs->meta_dir == NULL) { |
1311 | | // error_returned |
1312 | | // ("hfs_lookup_hard_link: could not open file metadata directory"); |
1313 | | // return 0; |
1314 | | // } |
1315 | | // else { |
1316 | | // mdir = hfs->meta_dir; |
1317 | | // } |
1318 | | // snprintf(fBuff, 30, "iNode%" PRIuINUM, linknum); |
1319 | | // } |
1320 | | // |
1321 | | // for (indx = 0; indx < tsk_fs_dir_getsize(mdir); ++indx) { |
1322 | | // if ((mdir->names != NULL) && mdir->names[indx].name && |
1323 | | // (fs->name_cmp(fs, mdir->names[indx].name, fBuff) == 0)) { |
1324 | | // // OK this is the one |
1325 | | // return mdir->names[indx].meta_addr; |
1326 | | // } |
1327 | | // } |
1328 | | // |
1329 | | // // OK, we did not find that linknum |
1330 | | // return 0; |
1331 | | //} |
1332 | | |
1333 | | /* |
1334 | | * Given a catalog entry, will test that entry to see if it is a hard link. |
1335 | | * If it is a hard link, the function returns the inum (or cnid) of the target file. |
1336 | | * If it is NOT a hard link, then then function returns the inum of the given entry. |
1337 | | * In both cases, the parameter is_error is set to zero. |
1338 | | * |
1339 | | * If an ERROR occurs, if it is a mild error, then is_error is set to 1, and the |
1340 | | * inum of the given entry is returned. This signals that hard link detection cannot |
1341 | | * be carried out. |
1342 | | * |
1343 | | * If the error is serious, then is_error is set to 2 or 3, depending on the kind of error, and |
1344 | | * the TSK error code is set, and the function returns zero. is_error==2 means that an error |
1345 | | * occurred in looking up the target file in the Catalog. is_error==3 means that the given |
1346 | | * entry appears to be a hard link, but the target file does not exist in the Catalog. |
1347 | | * |
1348 | | * @param hfs The file system |
1349 | | * @param entry The catalog entry to check |
1350 | | * @param is_error A Boolean that is returned indicating an error, or no error.\ |
1351 | | * @return The inum (or cnid) of the hard link target, or of the given catalog entry, or zero. |
1352 | | */ |
1353 | | TSK_INUM_T |
1354 | | hfs_follow_hard_link(HFS_INFO * hfs, hfs_file * cat, |
1355 | | unsigned char *is_error) |
1356 | 0 | { |
1357 | |
|
1358 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs; |
1359 | 0 | TSK_INUM_T cnid; |
1360 | 0 | time_t crtime; |
1361 | 0 | uint32_t file_type; |
1362 | 0 | uint32_t file_creator; |
1363 | |
|
1364 | 0 | *is_error = 0; // default, not an error |
1365 | |
|
1366 | 0 | if (cat == NULL) { |
1367 | 0 | error_detected(TSK_ERR_FS_ARG, |
1368 | 0 | "hfs_follow_hard_link: Pointer to Catalog entry (2nd arg) is null"); |
1369 | 0 | return 0; |
1370 | 0 | } |
1371 | | |
1372 | 0 | cnid = tsk_getu32(fs->endian, cat->std.cnid); |
1373 | |
|
1374 | 0 | if (cnid < HFS_FIRST_USER_CNID) { |
1375 | | // Can't be a hard link. And, cannot look up in Catalog file either! |
1376 | 0 | return cnid; |
1377 | 0 | } |
1378 | | |
1379 | 0 | crtime = |
1380 | 0 | (time_t) hfs_convert_2_unix_time(tsk_getu32(fs->endian, |
1381 | 0 | cat->std.crtime)); |
1382 | | |
1383 | |
|
1384 | 0 | file_type = tsk_getu32(fs->endian, cat->std.u_info.file_type); |
1385 | 0 | file_creator = tsk_getu32(fs->endian, cat->std.u_info.file_cr); |
1386 | | |
1387 | | // Only proceed with the rest of this if the flags etc are right |
1388 | 0 | if (file_type == HFS_HARDLINK_FILE_TYPE |
1389 | 0 | && file_creator == HFS_HARDLINK_FILE_CREATOR) { |
1390 | | |
1391 | | // see if we have the HFS+ Private Data dir for file links; |
1392 | | // if not, it can't be a hard link. (We could warn the user, but |
1393 | | // we also rely on this when finding the HFS+ Private Data dir in |
1394 | | // the first place and we don't want a warning on every hfs_open.) |
1395 | 0 | if (hfs->meta_inum == 0) |
1396 | 0 | return cnid; |
1397 | | |
1398 | | // For this to work, we need the FS creation times. Is at least one of these set? |
1399 | 0 | if ((!hfs->has_root_crtime) && (!hfs->has_meta_dir_crtime) |
1400 | 0 | && (!hfs->has_meta_crtime)) { |
1401 | 0 | uint32_t linkNum = |
1402 | 0 | tsk_getu32(fs->endian, cat->std.perm.special.inum); |
1403 | 0 | *is_error = 1; |
1404 | 0 | if (tsk_verbose) |
1405 | 0 | tsk_fprintf(stderr, |
1406 | 0 | "WARNING: hfs_follow_hard_link: File system creation times are not set. " |
1407 | 0 | "Cannot test inode for hard link. File type and creator indicate that this" |
1408 | 0 | " is a hard link (file), with LINK ID = %" PRIu32 "\n", |
1409 | 0 | linkNum); |
1410 | 0 | return cnid; |
1411 | 0 | } |
1412 | | |
1413 | 0 | if ((!hfs->has_root_crtime) || (!hfs->has_meta_crtime)) { |
1414 | 0 | if (tsk_verbose) |
1415 | 0 | tsk_fprintf(stderr, |
1416 | 0 | "WARNING: hfs_follow_hard_link: Either the root folder or the" |
1417 | 0 | " file metadata folder is not accessible. Testing this potential hard link" |
1418 | 0 | " may be impaired.\n"); |
1419 | 0 | } |
1420 | | |
1421 | | // Now we need to check the creation time against the three FS creation times |
1422 | 0 | if ((hfs->has_meta_crtime && (crtime == hfs->meta_crtime)) || |
1423 | 0 | (hfs->has_meta_dir_crtime && (crtime == hfs->metadir_crtime)) |
1424 | 0 | || (hfs->has_root_crtime && (crtime == hfs->root_crtime))) { |
1425 | | // OK, this is a hard link to a file. |
1426 | 0 | uint32_t linkNum = |
1427 | 0 | tsk_getu32(fs->endian, cat->std.perm.special.inum); |
1428 | | |
1429 | | // We used to resolve this ID to a file in X folder using hfs_lookup_hard_link, but found |
1430 | | // that it was very ineffecient and always resulted in the same linkNum value. |
1431 | | // We now just use linkNum |
1432 | 0 | return linkNum; |
1433 | 0 | } |
1434 | 0 | } |
1435 | 0 | else if (file_type == HFS_LINKDIR_FILE_TYPE |
1436 | 0 | && file_creator == HFS_LINKDIR_FILE_CREATOR) { |
1437 | | |
1438 | | // see if we have the HFS+ Private Directory Data dir for links; |
1439 | | // if not, it can't be a hard link. (We could warn the user, but |
1440 | | // we also rely on this when finding the HFS+ Private Directory Data dir in |
1441 | | // the first place and we don't want a warning on every hfs_open.) |
1442 | 0 | if (hfs->meta_dir_inum == 0) |
1443 | 0 | return cnid; |
1444 | | |
1445 | | // For this to work, we need the FS creation times. Is at least one of these set? |
1446 | 0 | if ((!hfs->has_root_crtime) && (!hfs->has_meta_dir_crtime) |
1447 | 0 | && (!hfs->has_meta_crtime)) { |
1448 | 0 | uint32_t linkNum = |
1449 | 0 | tsk_getu32(fs->endian, cat->std.perm.special.inum); |
1450 | 0 | *is_error = 1; |
1451 | |
|
1452 | 0 | if (tsk_verbose) |
1453 | 0 | tsk_fprintf(stderr, |
1454 | 0 | "WARNING: hfs_follow_hard_link: File system creation times are not set. " |
1455 | 0 | "Cannot test inode for hard link. File type and creator indicate that this" |
1456 | 0 | " is a hard link (directory), with LINK ID = %" PRIu32 |
1457 | 0 | "\n", linkNum); |
1458 | 0 | return cnid; |
1459 | 0 | } |
1460 | | |
1461 | 0 | if ((!hfs->has_root_crtime) || (!hfs->has_meta_crtime) |
1462 | 0 | || (!hfs->has_meta_dir_crtime)) { |
1463 | 0 | if (tsk_verbose) |
1464 | 0 | tsk_fprintf(stderr, |
1465 | 0 | "WARNING: hfs_follow_hard_link: Either the root folder or the" |
1466 | 0 | " file metadata folder or the directory metatdata folder is" |
1467 | 0 | " not accessible. Testing this potential hard linked folder " |
1468 | 0 | "may be impaired.\n"); |
1469 | 0 | } |
1470 | | |
1471 | | // Now we need to check the creation time against the three FS creation times |
1472 | 0 | if ((hfs->has_meta_crtime && (crtime == hfs->meta_crtime)) || |
1473 | 0 | (hfs->has_meta_dir_crtime && (crtime == hfs->metadir_crtime)) |
1474 | 0 | || (hfs->has_root_crtime && (crtime == hfs->root_crtime))) { |
1475 | | // OK, this is a hard link to a directory. |
1476 | 0 | uint32_t linkNum = |
1477 | 0 | tsk_getu32(fs->endian, cat->std.perm.special.inum); |
1478 | | |
1479 | | // We used to resolve this ID to a file in X folder using hfs_lookup_hard_link, but found |
1480 | | // that it was very ineffecient and always resulted in the same linkNum value. |
1481 | | // We now just use linkNum |
1482 | 0 | return linkNum; |
1483 | 0 | } |
1484 | 0 | } |
1485 | | |
1486 | | // It cannot be a hard link (file or directory) |
1487 | 0 | return cnid; |
1488 | 0 | } |
1489 | | |
1490 | | |
1491 | | /** \internal |
1492 | | * Lookup an entry in the catalog file and save it into the entry. Do not |
1493 | | * call this for the special files that do not have an entry in the catalog. |
1494 | | * data structure. |
1495 | | * @param hfs File system being analyzed |
1496 | | * @param inum Address (cnid) of file to open |
1497 | | * @param entry [out] Structure to read data into |
1498 | | * @returns 1 on error or not found, 0 on success. Check tsk_errno |
1499 | | * to differentiate between error and not found. If it is not found, then the |
1500 | | * errno will be TSK_ERR_FS_INODE_NUM. Else, it will be some other value. |
1501 | | */ |
1502 | | uint8_t |
1503 | | hfs_cat_file_lookup(HFS_INFO * hfs, TSK_INUM_T inum, HFS_ENTRY * entry, |
1504 | | unsigned char follow_hard_link) |
1505 | 0 | { |
1506 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); |
1507 | 0 | hfs_btree_key_cat key; /* current catalog key */ |
1508 | 0 | hfs_thread thread; /* thread record */ |
1509 | 0 | hfs_file_folder record; /* file/folder record */ |
1510 | 0 | TSK_OFF_T off; |
1511 | |
|
1512 | 0 | tsk_error_reset(); |
1513 | |
|
1514 | 0 | if (tsk_verbose) |
1515 | 0 | tsk_fprintf(stderr, |
1516 | 0 | "hfs_cat_file_lookup: called for inum %" PRIuINUM "\n", inum); |
1517 | | |
1518 | | // Test if this is a special file that is not located in the catalog |
1519 | 0 | if ((inum == HFS_EXTENTS_FILE_ID) || |
1520 | 0 | (inum == HFS_CATALOG_FILE_ID) || |
1521 | 0 | (inum == HFS_ALLOCATION_FILE_ID) || |
1522 | 0 | (inum == HFS_STARTUP_FILE_ID) || |
1523 | 0 | (inum == HFS_ATTRIBUTES_FILE_ID)) { |
1524 | 0 | tsk_error_set_errno(TSK_ERR_FS_GENFS); |
1525 | 0 | tsk_error_set_errstr |
1526 | 0 | ("hfs_cat_file_lookup: Called on special file: %" PRIuINUM, |
1527 | 0 | inum); |
1528 | 0 | return 1; |
1529 | 0 | } |
1530 | | |
1531 | | |
1532 | | /* first look up the thread record for the item we're searching for */ |
1533 | | |
1534 | | /* set up the thread record key */ |
1535 | 0 | memset((char *) &key, 0, sizeof(hfs_btree_key_cat)); |
1536 | 0 | cnid_to_array((uint32_t) inum, key.parent_cnid); |
1537 | |
|
1538 | 0 | if (tsk_verbose) |
1539 | 0 | tsk_fprintf(stderr, |
1540 | 0 | "hfs_cat_file_lookup: Looking up thread record (%" PRIuINUM |
1541 | 0 | ")\n", inum); |
1542 | | |
1543 | | /* look up the thread record */ |
1544 | 0 | off = hfs_cat_get_record_offset(hfs, &key); |
1545 | 0 | if (off == 0) { |
1546 | | // no parsing error, just not found |
1547 | 0 | if (tsk_error_get_errno() == 0) { |
1548 | 0 | tsk_error_set_errno(TSK_ERR_FS_INODE_NUM); |
1549 | 0 | tsk_error_set_errstr |
1550 | 0 | ("hfs_cat_file_lookup: Error finding thread node for file (%" |
1551 | 0 | PRIuINUM ")", inum); |
1552 | 0 | } |
1553 | 0 | else { |
1554 | 0 | tsk_error_set_errstr2 |
1555 | 0 | (" hfs_cat_file_lookup: thread for file (%" PRIuINUM ")", |
1556 | 0 | inum); |
1557 | 0 | } |
1558 | 0 | return 1; |
1559 | 0 | } |
1560 | | |
1561 | | /* read the thread record */ |
1562 | 0 | if (hfs_cat_read_thread_record(hfs, off, &thread)) { |
1563 | 0 | tsk_error_set_errstr2(" hfs_cat_file_lookup: file (%" PRIuINUM ")", |
1564 | 0 | inum); |
1565 | 0 | return 1; |
1566 | 0 | } |
1567 | | |
1568 | | /* now look up the actual file/folder record */ |
1569 | | |
1570 | | /* build key */ |
1571 | 0 | memset((char *) &key, 0, sizeof(hfs_btree_key_cat)); |
1572 | 0 | memcpy((char *) key.parent_cnid, (char *) thread.parent_cnid, |
1573 | 0 | sizeof(key.parent_cnid)); |
1574 | 0 | memcpy((char *) &key.name, (char *) &thread.name, sizeof(key.name)); |
1575 | |
|
1576 | 0 | if (tsk_verbose) |
1577 | 0 | tsk_fprintf(stderr, |
1578 | 0 | "hfs_cat_file_lookup: Looking up file record (parent: %" |
1579 | 0 | PRIuINUM ")\n", (uint64_t) tsk_getu32(fs->endian, |
1580 | 0 | key.parent_cnid)); |
1581 | | |
1582 | | /* look up the record */ |
1583 | 0 | off = hfs_cat_get_record_offset(hfs, &key); |
1584 | 0 | if (off == 0) { |
1585 | | // no parsing error, just not found |
1586 | 0 | if (tsk_error_get_errno() == 0) { |
1587 | 0 | tsk_error_set_errno(TSK_ERR_FS_INODE_NUM); |
1588 | 0 | tsk_error_set_errstr |
1589 | 0 | ("hfs_cat_file_lookup: Error finding record node %" |
1590 | 0 | PRIuINUM, inum); |
1591 | 0 | } |
1592 | 0 | else { |
1593 | 0 | tsk_error_set_errstr2(" hfs_cat_file_lookup: file (%" PRIuINUM |
1594 | 0 | ")", inum); |
1595 | 0 | } |
1596 | 0 | return 1; |
1597 | 0 | } |
1598 | | |
1599 | | /* read the record */ |
1600 | 0 | if (hfs_cat_read_file_folder_record(hfs, off, &record)) { |
1601 | 0 | tsk_error_set_errstr2(" hfs_cat_file_lookup: file (%" PRIuINUM ")", |
1602 | 0 | inum); |
1603 | 0 | return 1; |
1604 | 0 | } |
1605 | | |
1606 | | /* these memcpy can be gotten rid of, really */ |
1607 | 0 | if (tsk_getu16(fs->endian, |
1608 | 0 | record.file.std.rec_type) == HFS_FOLDER_RECORD) { |
1609 | 0 | if (tsk_verbose) |
1610 | 0 | tsk_fprintf(stderr, |
1611 | 0 | "hfs_cat_file_lookup: found folder record valence %" PRIu32 |
1612 | 0 | ", cnid %" PRIu32 "\n", tsk_getu32(fs->endian, |
1613 | 0 | record.folder.std.valence), tsk_getu32(fs->endian, |
1614 | 0 | record.folder.std.cnid)); |
1615 | 0 | memcpy((char *) &entry->cat, (char *) &record, sizeof(hfs_folder)); |
1616 | 0 | } |
1617 | 0 | else if (tsk_getu16(fs->endian, |
1618 | 0 | record.file.std.rec_type) == HFS_FILE_RECORD) { |
1619 | 0 | if (tsk_verbose) |
1620 | 0 | tsk_fprintf(stderr, |
1621 | 0 | "hfs_cat_file_lookup: found file record cnid %" PRIu32 |
1622 | 0 | "\n", tsk_getu32(fs->endian, record.file.std.cnid)); |
1623 | 0 | memcpy((char *) &entry->cat, (char *) &record, sizeof(hfs_file)); |
1624 | 0 | } |
1625 | | /* other cases already caught by hfs_cat_read_file_folder_record */ |
1626 | |
|
1627 | 0 | memcpy((char *) &entry->thread, (char *) &thread, sizeof(hfs_thread)); |
1628 | |
|
1629 | 0 | entry->flags = TSK_FS_META_FLAG_ALLOC | TSK_FS_META_FLAG_USED; |
1630 | 0 | entry->inum = inum; |
1631 | |
|
1632 | 0 | if (follow_hard_link) { |
1633 | | // TEST to see if this is a hard link |
1634 | 0 | unsigned char is_err; |
1635 | 0 | TSK_INUM_T target_cnid = |
1636 | 0 | hfs_follow_hard_link(hfs, &(entry->cat), &is_err); |
1637 | 0 | if (is_err > 1) { |
1638 | 0 | error_returned |
1639 | 0 | ("hfs_cat_file_lookup: error occurred while following a possible hard link for " |
1640 | 0 | "inum (cnid) = %" PRIuINUM, inum); |
1641 | 0 | return 1; |
1642 | 0 | } |
1643 | 0 | if (target_cnid != inum) { |
1644 | | // This is a hard link, and we have got the cnid of the target file, so look it up. |
1645 | 0 | uint8_t res = |
1646 | 0 | hfs_cat_file_lookup(hfs, target_cnid, entry, FALSE); |
1647 | 0 | if (res != 0) { |
1648 | 0 | error_returned |
1649 | 0 | ("hfs_cat_file_lookup: error occurred while looking up the Catalog entry for " |
1650 | 0 | "the target of inum (cnid) = %" PRIuINUM " target", |
1651 | 0 | inum); |
1652 | 0 | } |
1653 | 0 | return 1; |
1654 | 0 | } |
1655 | | |
1656 | | // Target is NOT a hard link, so fall through to the non-hard link exit. |
1657 | 0 | } |
1658 | | |
1659 | 0 | if (tsk_verbose) |
1660 | 0 | tsk_fprintf(stderr, "hfs_cat_file_lookup exiting\n"); |
1661 | 0 | return 0; |
1662 | 0 | } |
1663 | | |
1664 | | |
1665 | | static uint8_t |
1666 | | hfs_find_highest_inum_cb(HFS_INFO * hfs, int8_t level_type, |
1667 | | const hfs_btree_key_cat * cur_key, int cur_keylen, size_t node_size, |
1668 | | TSK_OFF_T key_off, void *ptr) |
1669 | 0 | { |
1670 | 0 | if (cur_keylen < 6) { |
1671 | | // Note that it would be better to return an error value here |
1672 | | // but the current function interface does not support this |
1673 | | // Also see issue #2365 |
1674 | 0 | return -1; |
1675 | 0 | } |
1676 | | // NOTE: This assumes that the biggest inum is the last one that we |
1677 | | // see. the traverse method does not currently promise that as part of |
1678 | | // its callback "contract". |
1679 | 0 | *((TSK_INUM_T*) ptr) = tsk_getu32(hfs->fs_info.endian, cur_key->parent_cnid); |
1680 | 0 | return HFS_BTREE_CB_IDX_LT; |
1681 | 0 | } |
1682 | | |
1683 | | /** \internal |
1684 | | * Returns the largest inode number in file system |
1685 | | * @param hfs File system being analyzed |
1686 | | * @returns largest metadata address |
1687 | | */ |
1688 | | static TSK_INUM_T |
1689 | | hfs_find_highest_inum(HFS_INFO * hfs) |
1690 | 0 | { |
1691 | | // @@@ get actual number from Catalog file (go to far right) (we can't always trust the vol header) |
1692 | 0 | TSK_INUM_T inum; |
1693 | 0 | if (hfs_cat_traverse(hfs, hfs_find_highest_inum_cb, &inum)) { |
1694 | | /* Catalog traversal failed, fallback on legacy method : |
1695 | | if HFS_VH_ATTR_CNIDS_REUSED is set, then |
1696 | | the maximum CNID is 2^32-1; if it's not set, then nextCatalogId is |
1697 | | supposed to be larger than all CNIDs on disk. |
1698 | | */ |
1699 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); |
1700 | 0 | if (tsk_getu32(fs->endian, hfs->fs->attr) & HFS_VH_ATTR_CNIDS_REUSED) |
1701 | 0 | return (TSK_INUM_T) 0xffffffff; |
1702 | 0 | else |
1703 | 0 | return (TSK_INUM_T) tsk_getu32(fs->endian, |
1704 | 0 | hfs->fs->next_cat_id) - 1; |
1705 | 0 | } |
1706 | 0 | return inum; |
1707 | 0 | } |
1708 | | |
1709 | | |
1710 | | static TSK_FS_META_MODE_ENUM |
1711 | | hfs_mode_to_tsk_mode(uint16_t a_mode) |
1712 | 0 | { |
1713 | 0 | TSK_FS_META_MODE_ENUM mode = 0; |
1714 | |
|
1715 | 0 | if (a_mode & HFS_IN_ISUID) |
1716 | 0 | mode |= TSK_FS_META_MODE_ISUID; |
1717 | 0 | if (a_mode & HFS_IN_ISGID) |
1718 | 0 | mode |= TSK_FS_META_MODE_ISGID; |
1719 | 0 | if (a_mode & HFS_IN_ISVTX) |
1720 | 0 | mode |= TSK_FS_META_MODE_ISVTX; |
1721 | |
|
1722 | 0 | if (a_mode & HFS_IN_IRUSR) |
1723 | 0 | mode |= TSK_FS_META_MODE_IRUSR; |
1724 | 0 | if (a_mode & HFS_IN_IWUSR) |
1725 | 0 | mode |= TSK_FS_META_MODE_IWUSR; |
1726 | 0 | if (a_mode & HFS_IN_IXUSR) |
1727 | 0 | mode |= TSK_FS_META_MODE_IXUSR; |
1728 | |
|
1729 | 0 | if (a_mode & HFS_IN_IRGRP) |
1730 | 0 | mode |= TSK_FS_META_MODE_IRGRP; |
1731 | 0 | if (a_mode & HFS_IN_IWGRP) |
1732 | 0 | mode |= TSK_FS_META_MODE_IWGRP; |
1733 | 0 | if (a_mode & HFS_IN_IXGRP) |
1734 | 0 | mode |= TSK_FS_META_MODE_IXGRP; |
1735 | |
|
1736 | 0 | if (a_mode & HFS_IN_IROTH) |
1737 | 0 | mode |= TSK_FS_META_MODE_IROTH; |
1738 | 0 | if (a_mode & HFS_IN_IWOTH) |
1739 | 0 | mode |= TSK_FS_META_MODE_IWOTH; |
1740 | 0 | if (a_mode & HFS_IN_IXOTH) |
1741 | 0 | mode |= TSK_FS_META_MODE_IXOTH; |
1742 | |
|
1743 | 0 | return mode; |
1744 | 0 | } |
1745 | | |
1746 | | static TSK_FS_META_TYPE_ENUM |
1747 | | hfs_mode_to_tsk_meta_type(uint16_t a_mode) |
1748 | 0 | { |
1749 | 0 | switch (a_mode & HFS_IN_IFMT) { |
1750 | 0 | case HFS_IN_IFIFO: |
1751 | 0 | return TSK_FS_META_TYPE_FIFO; |
1752 | 0 | case HFS_IN_IFCHR: |
1753 | 0 | return TSK_FS_META_TYPE_CHR; |
1754 | 0 | case HFS_IN_IFDIR: |
1755 | 0 | return TSK_FS_META_TYPE_DIR; |
1756 | 0 | case HFS_IN_IFBLK: |
1757 | 0 | return TSK_FS_META_TYPE_BLK; |
1758 | 0 | case HFS_IN_IFREG: |
1759 | 0 | return TSK_FS_META_TYPE_REG; |
1760 | 0 | case HFS_IN_IFLNK: |
1761 | 0 | return TSK_FS_META_TYPE_LNK; |
1762 | 0 | case HFS_IN_IFSOCK: |
1763 | 0 | return TSK_FS_META_TYPE_SOCK; |
1764 | 0 | case HFS_IFWHT: |
1765 | 0 | return TSK_FS_META_TYPE_WHT; |
1766 | 0 | case HFS_IFXATTR: |
1767 | 0 | return TSK_FS_META_TYPE_UNDEF; |
1768 | 0 | default: |
1769 | | /* error */ |
1770 | 0 | return TSK_FS_META_TYPE_UNDEF; |
1771 | 0 | } |
1772 | 0 | } |
1773 | | |
1774 | | |
1775 | | static uint8_t |
1776 | | hfs_make_specialbase(TSK_FS_FILE * fs_file) |
1777 | 2 | { |
1778 | 2 | fs_file->meta->type = TSK_FS_META_TYPE_REG; |
1779 | 2 | fs_file->meta->mode = 0; |
1780 | 2 | fs_file->meta->nlink = 1; |
1781 | 2 | fs_file->meta->flags = |
1782 | 2 | (TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_ALLOC); |
1783 | 2 | fs_file->meta->uid = fs_file->meta->gid = 0; |
1784 | 2 | fs_file->meta->mtime = fs_file->meta->atime = fs_file->meta->ctime = |
1785 | 2 | fs_file->meta->crtime = 0; |
1786 | 2 | fs_file->meta->mtime_nano = fs_file->meta->atime_nano = |
1787 | 2 | fs_file->meta->ctime_nano = fs_file->meta->crtime_nano = 0; |
1788 | | |
1789 | 2 | if (fs_file->meta->name2 == NULL) { |
1790 | 2 | if ((fs_file->meta->name2 = (TSK_FS_META_NAME_LIST *) |
1791 | 2 | tsk_malloc(sizeof(TSK_FS_META_NAME_LIST))) == NULL) { |
1792 | 0 | error_returned |
1793 | 0 | (" - hfs_make_specialbase, couldn't malloc space for a name list"); |
1794 | 0 | return 1; |
1795 | 0 | } |
1796 | 2 | fs_file->meta->name2->next = NULL; |
1797 | 2 | } |
1798 | | |
1799 | 2 | if (fs_file->meta->attr != NULL) { |
1800 | 0 | tsk_fs_attrlist_markunused(fs_file->meta->attr); |
1801 | 0 | } |
1802 | 2 | else { |
1803 | 2 | fs_file->meta->attr = tsk_fs_attrlist_alloc(); |
1804 | 2 | } |
1805 | 2 | return 0; |
1806 | 2 | } |
1807 | | |
1808 | | /** |
1809 | | * \internal |
1810 | | * Create an FS_INODE structure for the catalog file. |
1811 | | * |
1812 | | * @param hfs File system to analyze |
1813 | | * @param fs_file Structure to copy file information into. |
1814 | | * @return 1 on error and 0 on success |
1815 | | */ |
1816 | | static uint8_t |
1817 | | hfs_make_catalog(HFS_INFO * hfs, TSK_FS_FILE * fs_file) |
1818 | 1 | { |
1819 | 1 | TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs; |
1820 | 1 | TSK_FS_ATTR *fs_attr; |
1821 | 1 | TSK_FS_ATTR_RUN *attr_run; |
1822 | 1 | unsigned char dummy1, dummy2; |
1823 | 1 | uint64_t dummy3; |
1824 | 1 | uint8_t result; |
1825 | | |
1826 | 1 | if (tsk_verbose) |
1827 | 0 | tsk_fprintf(stderr, |
1828 | 0 | "hfs_make_catalog: Making virtual catalog file\n"); |
1829 | | |
1830 | 1 | if (hfs_make_specialbase(fs_file)) { |
1831 | 0 | error_returned(" - hfs_make_catalog"); |
1832 | 0 | return 1; |
1833 | 0 | } |
1834 | | |
1835 | 1 | fs_file->meta->addr = HFS_CATALOG_FILE_ID; |
1836 | 1 | strncpy(fs_file->meta->name2->name, HFS_CATALOGNAME, |
1837 | 1 | TSK_FS_META_NAME_LIST_NSIZE); |
1838 | | |
1839 | 1 | fs_file->meta->size = |
1840 | 1 | tsk_getu64(fs->endian, hfs->fs->cat_file.logic_sz); |
1841 | | |
1842 | | |
1843 | | // convert the runs in the volume header to attribute runs |
1844 | 1 | if (((attr_run = |
1845 | 1 | hfs_extents_to_attr(fs, hfs->fs->cat_file.extents, |
1846 | 1 | 0)) == NULL) && (tsk_error_get_errno() != 0)) { |
1847 | 0 | error_returned(" - hfs_make_catalog"); |
1848 | 0 | return 1; |
1849 | 0 | } |
1850 | | |
1851 | 1 | if ((fs_attr = |
1852 | 1 | tsk_fs_attrlist_getnew(fs_file->meta->attr, |
1853 | 1 | TSK_FS_ATTR_NONRES)) == NULL) { |
1854 | 0 | error_returned(" - hfs_make_catalog"); |
1855 | 0 | tsk_fs_attr_run_free(attr_run); |
1856 | 0 | return 1; |
1857 | 0 | } |
1858 | | |
1859 | | // initialize the data run |
1860 | 1 | if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL, |
1861 | 1 | TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA, |
1862 | 1 | tsk_getu64(fs->endian, hfs->fs->cat_file.logic_sz), |
1863 | 1 | tsk_getu64(fs->endian, hfs->fs->cat_file.logic_sz), |
1864 | 1 | tsk_getu64(fs->endian, hfs->fs->cat_file.logic_sz), 0, 0)) { |
1865 | 0 | error_returned(" - hfs_make_catalog"); |
1866 | 0 | tsk_fs_attr_run_free(attr_run); |
1867 | 0 | return 1; |
1868 | 0 | } |
1869 | | |
1870 | | // see if catalog file has additional runs |
1871 | 1 | if (hfs_ext_find_extent_record_attr(hfs, HFS_CATALOG_FILE_ID, fs_attr, |
1872 | 1 | TRUE)) { |
1873 | 1 | error_returned(" - hfs_make_catalog"); |
1874 | 1 | fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; |
1875 | 1 | return 1; |
1876 | 1 | } |
1877 | | |
1878 | 0 | result = hfs_load_extended_attrs(fs_file, &dummy1, &dummy2, &dummy3); |
1879 | 0 | if (result != 0) { |
1880 | 0 | if (tsk_verbose) |
1881 | 0 | tsk_fprintf(stderr, |
1882 | 0 | "WARNING: Extended attributes failed to load for the Catalog file.\n"); |
1883 | 0 | tsk_error_reset(); |
1884 | 0 | } |
1885 | |
|
1886 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED; |
1887 | 0 | return 0; |
1888 | 1 | } |
1889 | | |
1890 | | /** |
1891 | | * \internal |
1892 | | * Create an FS_FILE for the extents file |
1893 | | * |
1894 | | * @param hfs File system to analyze |
1895 | | * @param fs_file Structure to copy file information into. |
1896 | | * @return 1 on error and 0 on success |
1897 | | */ |
1898 | | static uint8_t |
1899 | | hfs_make_extents(HFS_INFO * hfs, TSK_FS_FILE * fs_file) |
1900 | 1 | { |
1901 | 1 | TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs; |
1902 | 1 | TSK_FS_ATTR *fs_attr; |
1903 | 1 | TSK_FS_ATTR_RUN *attr_run; |
1904 | | |
1905 | 1 | if (tsk_verbose) |
1906 | 0 | tsk_fprintf(stderr, |
1907 | 0 | "hfs_make_extents: Making virtual extents file\n"); |
1908 | | |
1909 | 1 | if (hfs_make_specialbase(fs_file)) { |
1910 | 0 | error_returned(" - hfs_make_extents"); |
1911 | 0 | return 1; |
1912 | 0 | } |
1913 | | |
1914 | 1 | fs_file->meta->addr = HFS_EXTENTS_FILE_ID; |
1915 | 1 | strncpy(fs_file->meta->name2->name, HFS_EXTENTSNAME, |
1916 | 1 | TSK_FS_META_NAME_LIST_NSIZE); |
1917 | | |
1918 | 1 | fs_file->meta->size = |
1919 | 1 | tsk_getu64(fs->endian, hfs->fs->ext_file.logic_sz); |
1920 | | |
1921 | | |
1922 | 1 | if (((attr_run = |
1923 | 1 | hfs_extents_to_attr(fs, hfs->fs->ext_file.extents, |
1924 | 1 | 0)) == NULL) && (tsk_error_get_errno() != 0)) { |
1925 | 0 | error_returned(" - hfs_make_extents"); |
1926 | 0 | return 1; |
1927 | 0 | } |
1928 | | |
1929 | 1 | if ((fs_attr = |
1930 | 1 | tsk_fs_attrlist_getnew(fs_file->meta->attr, |
1931 | 1 | TSK_FS_ATTR_NONRES)) == NULL) { |
1932 | 0 | error_returned(" - hfs_make_extents"); |
1933 | 0 | tsk_fs_attr_run_free(attr_run); |
1934 | 0 | return 1; |
1935 | 0 | } |
1936 | | |
1937 | | // initialize the data run |
1938 | 1 | if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL, |
1939 | 1 | TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA, |
1940 | 1 | tsk_getu64(fs->endian, hfs->fs->ext_file.logic_sz), |
1941 | 1 | tsk_getu64(fs->endian, hfs->fs->ext_file.logic_sz), |
1942 | 1 | tsk_getu64(fs->endian, hfs->fs->ext_file.logic_sz), 0, 0)) { |
1943 | 0 | error_returned(" - hfs_make_extents"); |
1944 | 0 | tsk_fs_attr_run_free(attr_run); |
1945 | 0 | return 1; |
1946 | 0 | } |
1947 | | |
1948 | | //hfs_load_extended_attrs(fs_file); |
1949 | | |
1950 | | // Extents doesn't have an entry in itself |
1951 | | |
1952 | 1 | fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED; |
1953 | 1 | return 0; |
1954 | 1 | } |
1955 | | |
1956 | | |
1957 | | /** |
1958 | | * \internal |
1959 | | * Create an FS_INODE structure for the blockmap / allocation file. |
1960 | | * |
1961 | | * @param hfs File system to analyze |
1962 | | * @param fs_file Structure to copy file information into. |
1963 | | * @return 1 on error and 0 on success |
1964 | | */ |
1965 | | static uint8_t |
1966 | | hfs_make_blockmap(HFS_INFO * hfs, TSK_FS_FILE * fs_file) |
1967 | 0 | { |
1968 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs; |
1969 | 0 | TSK_FS_ATTR *fs_attr; |
1970 | 0 | TSK_FS_ATTR_RUN *attr_run; |
1971 | 0 | unsigned char dummy1, dummy2; |
1972 | 0 | uint64_t dummy3; |
1973 | 0 | uint8_t result; |
1974 | |
|
1975 | 0 | if (tsk_verbose) |
1976 | 0 | tsk_fprintf(stderr, |
1977 | 0 | "hfs_make_blockmap: Making virtual blockmap file\n"); |
1978 | |
|
1979 | 0 | if (hfs_make_specialbase(fs_file)) { |
1980 | 0 | error_returned(" - hfs_make_blockmap"); |
1981 | 0 | return 1; |
1982 | 0 | } |
1983 | | |
1984 | 0 | fs_file->meta->addr = HFS_ALLOCATION_FILE_ID; |
1985 | 0 | strncpy(fs_file->meta->name2->name, HFS_ALLOCATIONNAME, |
1986 | 0 | TSK_FS_META_NAME_LIST_NSIZE); |
1987 | |
|
1988 | 0 | fs_file->meta->size = |
1989 | 0 | tsk_getu64(fs->endian, hfs->fs->alloc_file.logic_sz); |
1990 | |
|
1991 | 0 | if (((attr_run = |
1992 | 0 | hfs_extents_to_attr(fs, hfs->fs->alloc_file.extents, |
1993 | 0 | 0)) == NULL) && (tsk_error_get_errno() != 0)) { |
1994 | 0 | error_returned(" - hfs_make_blockmap"); |
1995 | 0 | return 1; |
1996 | 0 | } |
1997 | | |
1998 | 0 | if ((fs_attr = |
1999 | 0 | tsk_fs_attrlist_getnew(fs_file->meta->attr, |
2000 | 0 | TSK_FS_ATTR_NONRES)) == NULL) { |
2001 | 0 | error_returned(" - hfs_make_blockmap"); |
2002 | 0 | tsk_fs_attr_run_free(attr_run); |
2003 | 0 | return 1; |
2004 | 0 | } |
2005 | | |
2006 | | // initialize the data run |
2007 | 0 | if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL, |
2008 | 0 | TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA, |
2009 | 0 | tsk_getu64(fs->endian, hfs->fs->alloc_file.logic_sz), |
2010 | 0 | tsk_getu64(fs->endian, hfs->fs->alloc_file.logic_sz), |
2011 | 0 | tsk_getu64(fs->endian, hfs->fs->alloc_file.logic_sz), 0, 0)) { |
2012 | 0 | error_returned(" - hfs_make_blockmap"); |
2013 | 0 | tsk_fs_attr_run_free(attr_run); |
2014 | 0 | return 1; |
2015 | 0 | } |
2016 | | |
2017 | | // see if catalog file has additional runs |
2018 | 0 | if (hfs_ext_find_extent_record_attr(hfs, HFS_ALLOCATION_FILE_ID, |
2019 | 0 | fs_attr, TRUE)) { |
2020 | 0 | error_returned(" - hfs_make_blockmap"); |
2021 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; |
2022 | 0 | return 1; |
2023 | 0 | } |
2024 | | |
2025 | | |
2026 | 0 | result = hfs_load_extended_attrs(fs_file, &dummy1, &dummy2, &dummy3); |
2027 | 0 | if (result != 0) { |
2028 | 0 | if (tsk_verbose) |
2029 | 0 | tsk_fprintf(stderr, |
2030 | 0 | "WARNING: Extended attributes failed to load for the Allocation file.\n"); |
2031 | 0 | tsk_error_reset(); |
2032 | 0 | } |
2033 | |
|
2034 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED; |
2035 | 0 | return 0; |
2036 | 0 | } |
2037 | | |
2038 | | /** |
2039 | | * \internal |
2040 | | * Create an FS_INODE structure for the startup / boot file. |
2041 | | * |
2042 | | * @param hfs File system to analyze |
2043 | | * @param fs_file Structure to copy file information into. |
2044 | | * @return 1 on error and 0 on success |
2045 | | */ |
2046 | | static uint8_t |
2047 | | hfs_make_startfile(HFS_INFO * hfs, TSK_FS_FILE * fs_file) |
2048 | 0 | { |
2049 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs; |
2050 | 0 | TSK_FS_ATTR *fs_attr; |
2051 | 0 | TSK_FS_ATTR_RUN *attr_run; |
2052 | 0 | unsigned char dummy1, dummy2; |
2053 | 0 | uint64_t dummy3; |
2054 | 0 | uint8_t result; |
2055 | |
|
2056 | 0 | if (tsk_verbose) |
2057 | 0 | tsk_fprintf(stderr, |
2058 | 0 | "hfs_make_startfile: Making virtual startup file\n"); |
2059 | |
|
2060 | 0 | if (hfs_make_specialbase(fs_file)) { |
2061 | 0 | error_returned(" - hfs_make_startfile"); |
2062 | 0 | return 1; |
2063 | 0 | } |
2064 | | |
2065 | 0 | fs_file->meta->addr = HFS_STARTUP_FILE_ID; |
2066 | 0 | strncpy(fs_file->meta->name2->name, HFS_STARTUPNAME, |
2067 | 0 | TSK_FS_META_NAME_LIST_NSIZE); |
2068 | |
|
2069 | 0 | fs_file->meta->size = |
2070 | 0 | tsk_getu64(fs->endian, hfs->fs->start_file.logic_sz); |
2071 | |
|
2072 | 0 | if (((attr_run = |
2073 | 0 | hfs_extents_to_attr(fs, hfs->fs->start_file.extents, |
2074 | 0 | 0)) == NULL) && (tsk_error_get_errno() != 0)) { |
2075 | 0 | error_returned(" - hfs_make_startfile"); |
2076 | 0 | return 1; |
2077 | 0 | } |
2078 | | |
2079 | 0 | if ((fs_attr = |
2080 | 0 | tsk_fs_attrlist_getnew(fs_file->meta->attr, |
2081 | 0 | TSK_FS_ATTR_NONRES)) == NULL) { |
2082 | 0 | error_returned(" - hfs_make_startfile"); |
2083 | 0 | tsk_fs_attr_run_free(attr_run); |
2084 | 0 | return 1; |
2085 | 0 | } |
2086 | | |
2087 | | // initialize the data run |
2088 | 0 | if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL, |
2089 | 0 | TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA, |
2090 | 0 | tsk_getu64(fs->endian, hfs->fs->start_file.logic_sz), |
2091 | 0 | tsk_getu64(fs->endian, hfs->fs->start_file.logic_sz), |
2092 | 0 | tsk_getu64(fs->endian, hfs->fs->start_file.logic_sz), 0, 0)) { |
2093 | 0 | error_returned(" - hfs_make_startfile"); |
2094 | 0 | tsk_fs_attr_run_free(attr_run); |
2095 | 0 | return 1; |
2096 | 0 | } |
2097 | | |
2098 | | // see if catalog file has additional runs |
2099 | 0 | if (hfs_ext_find_extent_record_attr(hfs, HFS_STARTUP_FILE_ID, fs_attr, |
2100 | 0 | TRUE)) { |
2101 | 0 | error_returned(" - hfs_make_startfile"); |
2102 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; |
2103 | 0 | return 1; |
2104 | 0 | } |
2105 | | |
2106 | 0 | result = hfs_load_extended_attrs(fs_file, &dummy1, &dummy2, &dummy3); |
2107 | 0 | if (result != 0) { |
2108 | 0 | if (tsk_verbose) |
2109 | 0 | tsk_fprintf(stderr, |
2110 | 0 | "WARNING: Extended attributes failed to load for the Start file.\n"); |
2111 | 0 | tsk_error_reset(); |
2112 | 0 | } |
2113 | |
|
2114 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED; |
2115 | 0 | return 0; |
2116 | 0 | } |
2117 | | |
2118 | | |
2119 | | /** |
2120 | | * \internal |
2121 | | * Create an FS_INODE structure for the attributes file. |
2122 | | * |
2123 | | * @param hfs File system to analyze |
2124 | | * @param fs_file Structure to copy file information into. |
2125 | | * @return 1 on error and 0 on success |
2126 | | */ |
2127 | | static uint8_t |
2128 | | hfs_make_attrfile(HFS_INFO * hfs, TSK_FS_FILE * fs_file) |
2129 | 0 | { |
2130 | 0 | TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs; |
2131 | 0 | TSK_FS_ATTR *fs_attr; |
2132 | 0 | TSK_FS_ATTR_RUN *attr_run; |
2133 | |
|
2134 | 0 | if (tsk_verbose) |
2135 | 0 | tsk_fprintf(stderr, |
2136 | 0 | "hfs_make_attrfile: Making virtual attributes file\n"); |
2137 | |
|
2138 | 0 | if (hfs_make_specialbase(fs_file)) { |
2139 | 0 | error_returned(" - hfs_make_attrfile"); |
2140 | 0 | return 1; |
2141 | 0 | } |
2142 | | |
2143 | 0 | fs_file->meta->addr = HFS_ATTRIBUTES_FILE_ID; |
2144 | 0 | strncpy(fs_file->meta->name2->name, HFS_ATTRIBUTESNAME, |
2145 | 0 | TSK_FS_META_NAME_LIST_NSIZE); |
2146 | |
|
2147 | 0 | fs_file->meta->size = |
2148 | 0 | tsk_getu64(fs->endian, hfs->fs->attr_file.logic_sz); |
2149 | |
|
2150 | 0 | if (((attr_run = |
2151 | 0 | hfs_extents_to_attr(fs, hfs->fs->attr_file.extents, |
2152 | 0 | 0)) == NULL) && (tsk_error_get_errno() != 0)) { |
2153 | 0 | error_returned(" - hfs_make_attrfile"); |
2154 | 0 | return 1; |
2155 | 0 | } |
2156 | | |
2157 | 0 | if ((fs_attr = |
2158 | 0 | tsk_fs_attrlist_getnew(fs_file->meta->attr, |
2159 | 0 | TSK_FS_ATTR_NONRES)) == NULL) { |
2160 | 0 | error_returned(" - hfs_make_attrfile"); |
2161 | 0 | tsk_fs_attr_run_free(attr_run); |
2162 | 0 | return 1; |
2163 | 0 | } |
2164 | | |
2165 | | // initialize the data run |
2166 | 0 | if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL, |
2167 | 0 | TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA, |
2168 | 0 | tsk_getu64(fs->endian, hfs->fs->attr_file.logic_sz), |
2169 | 0 | tsk_getu64(fs->endian, hfs->fs->attr_file.logic_sz), |
2170 | 0 | tsk_getu64(fs->endian, hfs->fs->attr_file.logic_sz), 0, 0)) { |
2171 | 0 | error_returned(" - hfs_make_attrfile"); |
2172 | 0 | tsk_fs_attr_run_free(attr_run); |
2173 | 0 | return 1; |
2174 | 0 | } |
2175 | | |
2176 | | // see if catalog file has additional runs |
2177 | 0 | if (hfs_ext_find_extent_record_attr(hfs, HFS_ATTRIBUTES_FILE_ID, |
2178 | 0 | fs_attr, TRUE)) { |
2179 | 0 | error_returned(" - hfs_make_attrfile"); |
2180 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; |
2181 | 0 | return 1; |
2182 | 0 | } |
2183 | | |
2184 | | //hfs_load_extended_attrs(fs_file); |
2185 | | |
2186 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED; |
2187 | 0 | return 0; |
2188 | 0 | } |
2189 | | |
2190 | | |
2191 | | |
2192 | | /** |
2193 | | * \internal |
2194 | | * Create an FS_FILE structure for the BadBlocks file. |
2195 | | * |
2196 | | * @param hfs File system to analyze |
2197 | | * @param fs_file Structure to copy file information into. |
2198 | | * @return 1 on error and 0 on success |
2199 | | */ |
2200 | | static uint8_t |
2201 | | hfs_make_badblockfile(HFS_INFO * hfs, TSK_FS_FILE * fs_file) |
2202 | 0 | { |
2203 | 0 | TSK_FS_ATTR *fs_attr; |
2204 | 0 | unsigned char dummy1, dummy2; |
2205 | 0 | uint64_t dummy3; |
2206 | 0 | uint8_t result; |
2207 | |
|
2208 | 0 | if (tsk_verbose) |
2209 | 0 | tsk_fprintf(stderr, |
2210 | 0 | "hfs_make_badblockfile: Making virtual badblock file\n"); |
2211 | |
|
2212 | 0 | if (hfs_make_specialbase(fs_file)) { |
2213 | 0 | error_returned(" - hfs_make_badblockfile"); |
2214 | 0 | return 1; |
2215 | 0 | } |
2216 | | |
2217 | 0 | fs_file->meta->addr = HFS_BAD_BLOCK_FILE_ID; |
2218 | 0 | strncpy(fs_file->meta->name2->name, HFS_BAD_BLOCK_FILE_NAME, |
2219 | 0 | TSK_FS_META_NAME_LIST_NSIZE); |
2220 | |
|
2221 | 0 | fs_file->meta->size = 0; |
2222 | |
|
2223 | 0 | if ((fs_attr = |
2224 | 0 | tsk_fs_attrlist_getnew(fs_file->meta->attr, |
2225 | 0 | TSK_FS_ATTR_NONRES)) == NULL) { |
2226 | 0 | error_returned(" - hfs_make_badblockfile"); |
2227 | 0 | return 1; |
2228 | 0 | } |
2229 | | |
2230 | | // add the run to the file. |
2231 | 0 | if (tsk_fs_attr_set_run(fs_file, fs_attr, NULL, NULL, |
2232 | 0 | TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA, |
2233 | 0 | fs_file->meta->size, fs_file->meta->size, fs_file->meta->size, |
2234 | 0 | 0, 0)) { |
2235 | 0 | error_returned(" - hfs_make_badblockfile"); |
2236 | 0 | return 1; |
2237 | 0 | } |
2238 | | |
2239 | | // see if file has additional runs |
2240 | 0 | if (hfs_ext_find_extent_record_attr(hfs, HFS_BAD_BLOCK_FILE_ID, |
2241 | 0 | fs_attr, TRUE)) { |
2242 | 0 | error_returned(" - hfs_make_badblockfile"); |
2243 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; |
2244 | 0 | return 1; |
2245 | 0 | } |
2246 | | |
2247 | | /* @@@ We have a chicken and egg problem here... The current design of |
2248 | | * fs_attr_set() requires the size to be set, but we dont' know the size |
2249 | | * until we look into the extents file (which adds to an attribute...). |
2250 | | * This does not seem to be the best design... neeed a way to test this. */ |
2251 | 0 | fs_file->meta->size = fs_attr->nrd.initsize; |
2252 | 0 | fs_attr->size = fs_file->meta->size; |
2253 | 0 | fs_attr->nrd.allocsize = fs_file->meta->size; |
2254 | |
|
2255 | 0 | result = hfs_load_extended_attrs(fs_file, &dummy1, &dummy2, &dummy3); |
2256 | 0 | if (result != 0) { |
2257 | 0 | if (tsk_verbose) |
2258 | 0 | tsk_fprintf(stderr, |
2259 | 0 | "WARNING: Extended attributes failed to load for the BadBlocks file.\n"); |
2260 | 0 | tsk_error_reset(); |
2261 | 0 | } |
2262 | |
|
2263 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED; |
2264 | 0 | return 0; |
2265 | 0 | } |
2266 | | |
2267 | | |
2268 | | /** \internal |
2269 | | * Copy the catalog file or folder record entry into a TSK data structure. |
2270 | | * @param a_hfs File system being analyzed |
2271 | | * @param a_hfs_entry Catalog record entry (HFS_ENTRY *) |
2272 | | * @param a_fs_file Structure to copy data into (TSK_FS_FILE *) |
2273 | | * Returns 1 on error. |
2274 | | */ |
2275 | | static uint8_t |
2276 | | hfs_dinode_copy(HFS_INFO * a_hfs, const HFS_ENTRY * a_hfs_entry, |
2277 | | TSK_FS_FILE * a_fs_file) |
2278 | 0 | { |
2279 | | |
2280 | | // Note, a_hfs_entry->cat is really of type hfs_file. But, hfs_file_folder is a union |
2281 | | // of that type with hfs_folder. Both of hfs_file and hfs_folder have the same first member. |
2282 | | // So, this cast is appropriate. |
2283 | 0 | const hfs_file_folder *a_entry = |
2284 | 0 | (hfs_file_folder *) & (a_hfs_entry->cat); |
2285 | 0 | const hfs_file_fold_std *std; |
2286 | 0 | TSK_FS_META *a_fs_meta = a_fs_file->meta; |
2287 | 0 | TSK_FS_INFO *fs; |
2288 | 0 | uint16_t hfsmode; |
2289 | 0 | TSK_INUM_T iStd; // the inum (or CNID) that occurs in the standard file metadata |
2290 | |
|
2291 | 0 | if (a_entry == NULL) { |
2292 | 0 | error_detected(TSK_ERR_FS_ARG, |
2293 | 0 | "hfs_dinode_copy: a_entry = a_hfs_entry->cat is NULL"); |
2294 | 0 | return 1; |
2295 | 0 | } |
2296 | | |
2297 | 0 | fs = (TSK_FS_INFO *) & a_hfs->fs_info; |
2298 | | |
2299 | | |
2300 | | // Just a sanity check. The inum (or cnid) occurs in two places in the |
2301 | | // entry data structure. |
2302 | 0 | iStd = tsk_getu32(fs->endian, a_entry->file.std.cnid); |
2303 | 0 | if (iStd != a_hfs_entry->inum) { |
2304 | 0 | if (tsk_verbose) |
2305 | 0 | tsk_fprintf(stderr, |
2306 | 0 | "WARNING: hfs_dinode_copy: HFS_ENTRY with conflicting values for inum (or cnid).\n"); |
2307 | 0 | } |
2308 | |
|
2309 | 0 | if (a_fs_meta == NULL) { |
2310 | 0 | tsk_error_set_errno(TSK_ERR_FS_ARG); |
2311 | 0 | tsk_error_set_errstr("hfs_dinode_copy: a_fs_meta is NULL"); |
2312 | 0 | return 1; |
2313 | 0 | } |
2314 | | |
2315 | | // both files and folders start off the same |
2316 | 0 | std = &(a_entry->file.std); |
2317 | |
|
2318 | 0 | if (tsk_verbose) |
2319 | 0 | tsk_fprintf(stderr, |
2320 | 0 | "hfs_dinode_copy: called for file/folder %" PRIu32 "\n", |
2321 | 0 | tsk_getu32(fs->endian, std->cnid)); |
2322 | |
|
2323 | 0 | if (a_fs_meta->content_len < HFS_FILE_CONTENT_LEN) { |
2324 | 0 | if ((a_fs_meta = |
2325 | 0 | tsk_fs_meta_realloc(a_fs_meta, |
2326 | 0 | HFS_FILE_CONTENT_LEN)) == NULL) { |
2327 | 0 | return 1; |
2328 | 0 | } |
2329 | 0 | } |
2330 | 0 | a_fs_meta->attr_state = TSK_FS_META_ATTR_EMPTY; |
2331 | 0 | if (a_fs_meta->attr) { |
2332 | 0 | tsk_fs_attrlist_markunused(a_fs_meta->attr); |
2333 | 0 | } |
2334 | | |
2335 | | |
2336 | | /* |
2337 | | * Copy the file type specific stuff first |
2338 | | */ |
2339 | 0 | hfsmode = tsk_getu16(fs->endian, std->perm.mode); |
2340 | |
|
2341 | 0 | if (tsk_getu16(fs->endian, std->rec_type) == HFS_FOLDER_RECORD) { |
2342 | | // set the type of mode is not set |
2343 | 0 | if ((hfsmode & HFS_IN_IFMT) == 0) |
2344 | 0 | a_fs_meta->type = TSK_FS_META_TYPE_DIR; |
2345 | 0 | a_fs_meta->size = 0; |
2346 | 0 | memset(a_fs_meta->content_ptr, 0, HFS_FILE_CONTENT_LEN); |
2347 | 0 | } |
2348 | 0 | else if (tsk_getu16(fs->endian, std->rec_type) == HFS_FILE_RECORD) { |
2349 | 0 | hfs_fork *fork; |
2350 | | // set the type of mode is not set |
2351 | 0 | if ((hfsmode & HFS_IN_IFMT) == 0) |
2352 | 0 | a_fs_meta->type = TSK_FS_META_TYPE_REG; |
2353 | 0 | a_fs_meta->size = |
2354 | 0 | tsk_getu64(fs->endian, a_entry->file.data.logic_sz); |
2355 | | |
2356 | | // copy the data and resource forks |
2357 | 0 | fork = (hfs_fork *) a_fs_meta->content_ptr; |
2358 | 0 | memcpy(fork, &(a_entry->file.data), sizeof(hfs_fork)); |
2359 | 0 | memcpy(&fork[1], &(a_entry->file.resource), sizeof(hfs_fork)); |
2360 | 0 | } |
2361 | 0 | else { |
2362 | 0 | if (tsk_verbose) |
2363 | 0 | tsk_fprintf(stderr, |
2364 | 0 | "hfs_dinode_copy error: catalog entry is neither file nor folder\n"); |
2365 | 0 | return 1; |
2366 | 0 | } |
2367 | | |
2368 | | /* |
2369 | | * Copy the standard stuff. |
2370 | | * Use default values (as defined in spec) if mode is not defined. |
2371 | | */ |
2372 | 0 | if ((hfsmode & HFS_IN_IFMT) == 0) { |
2373 | 0 | a_fs_meta->mode = 0; |
2374 | 0 | a_fs_meta->uid = 99; |
2375 | 0 | a_fs_meta->gid = 99; |
2376 | 0 | } |
2377 | 0 | else { |
2378 | 0 | a_fs_meta->mode = hfs_mode_to_tsk_mode(hfsmode); |
2379 | 0 | a_fs_meta->type = hfs_mode_to_tsk_meta_type(hfsmode); |
2380 | 0 | a_fs_meta->uid = tsk_getu32(fs->endian, std->perm.owner); |
2381 | 0 | a_fs_meta->gid = tsk_getu32(fs->endian, std->perm.group); |
2382 | 0 | } |
2383 | | |
2384 | | // this field is set only for "indirect" entries |
2385 | 0 | if (tsk_getu32(fs->endian, std->perm.special.nlink)) |
2386 | 0 | a_fs_meta->nlink = tsk_getu32(fs->endian, std->perm.special.nlink); |
2387 | 0 | else |
2388 | 0 | a_fs_meta->nlink = 1; |
2389 | |
|
2390 | 0 | a_fs_meta->mtime = |
2391 | 0 | hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->cmtime)); |
2392 | 0 | a_fs_meta->atime = |
2393 | 0 | hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->atime)); |
2394 | 0 | a_fs_meta->crtime = |
2395 | 0 | hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->crtime)); |
2396 | 0 | a_fs_meta->ctime = |
2397 | 0 | hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->amtime)); |
2398 | 0 | a_fs_meta->time2.hfs.bkup_time = |
2399 | 0 | hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->bkup_date)); |
2400 | 0 | a_fs_meta->mtime_nano = a_fs_meta->atime_nano = a_fs_meta->ctime_nano = |
2401 | 0 | a_fs_meta->crtime_nano = 0; |
2402 | 0 | a_fs_meta->time2.hfs.bkup_time_nano = 0; |
2403 | |
|
2404 | 0 | a_fs_meta->addr = tsk_getu32(fs->endian, std->cnid); |
2405 | | |
2406 | | // All entries here are used. |
2407 | 0 | a_fs_meta->flags = TSK_FS_META_FLAG_ALLOC | TSK_FS_META_FLAG_USED; |
2408 | |
|
2409 | 0 | if (std->perm.o_flags & HFS_PERM_OFLAG_COMPRESSED) |
2410 | 0 | a_fs_meta->flags |= TSK_FS_META_FLAG_COMP; |
2411 | | |
2412 | | // We copy this inum (or cnid) here, because this file *might* have been a hard link. In |
2413 | | // that case, we want to make sure that a_fs_file points consistently to the target of the |
2414 | | // link. |
2415 | | |
2416 | | //if (a_fs_file->name != NULL) { |
2417 | | // a_fs_file->name->meta_addr = a_fs_meta->addr; |
2418 | | //} |
2419 | | |
2420 | | /* TODO @@@ could fill in name2 with this entry's name and parent inode |
2421 | | from Catalog entry */ |
2422 | | |
2423 | | /* set the link string (if the file is a link) |
2424 | | * The size check is a sanity check so that we don't try to allocate |
2425 | | * a huge amount of memory for a bad inode value |
2426 | | */ |
2427 | 0 | if ((a_fs_meta->type == TSK_FS_META_TYPE_LNK) && |
2428 | 0 | (a_fs_meta->size >= 0) && (a_fs_meta->size < HFS_MAXPATHLEN)) { |
2429 | |
|
2430 | 0 | ssize_t bytes_read; |
2431 | |
|
2432 | 0 | a_fs_meta->link = tsk_malloc((size_t) a_fs_meta->size + 1); |
2433 | 0 | if (a_fs_meta->link == NULL) |
2434 | 0 | return 1; |
2435 | | |
2436 | 0 | bytes_read = tsk_fs_file_read(a_fs_file, (TSK_OFF_T) 0, |
2437 | 0 | a_fs_meta->link, (size_t) a_fs_meta->size, |
2438 | 0 | TSK_FS_FILE_READ_FLAG_NONE); |
2439 | 0 | a_fs_meta->link[a_fs_meta->size] = '\0'; |
2440 | |
|
2441 | 0 | if (bytes_read != a_fs_meta->size) { |
2442 | 0 | if (tsk_verbose) |
2443 | 0 | tsk_fprintf(stderr, |
2444 | 0 | "hfs_dinode_copy: failed to read contents of symbolic link; " |
2445 | 0 | "expected %u bytes but tsk_fs_file_read() returned %u\n", |
2446 | 0 | a_fs_meta->size, bytes_read); |
2447 | 0 | free(a_fs_meta->link); |
2448 | 0 | a_fs_meta->link = NULL; |
2449 | 0 | return 1; |
2450 | 0 | } |
2451 | 0 | } |
2452 | | |
2453 | 0 | return 0; |
2454 | 0 | } |
2455 | | |
2456 | | |
2457 | | /** \internal |
2458 | | * Load a catalog file entry and save it in the TSK_FS_FILE structure. |
2459 | | * |
2460 | | * @param fs File system to read from. |
2461 | | * @param a_fs_file Structure to read into. |
2462 | | * @param inum File address to load |
2463 | | * @returns 1 on error |
2464 | | */ |
2465 | | static uint8_t |
2466 | | hfs_inode_lookup(TSK_FS_INFO * fs, TSK_FS_FILE * a_fs_file, |
2467 | | TSK_INUM_T inum) |
2468 | 2 | { |
2469 | 2 | HFS_INFO *hfs = (HFS_INFO *) fs; |
2470 | 2 | HFS_ENTRY entry; |
2471 | | |
2472 | 2 | if (a_fs_file == NULL) { |
2473 | 0 | tsk_error_set_errno(TSK_ERR_FS_ARG); |
2474 | 0 | tsk_error_set_errstr("hfs_inode_lookup: fs_file is NULL"); |
2475 | 0 | return 1; |
2476 | 0 | } |
2477 | | |
2478 | 2 | if (a_fs_file->meta == NULL) { |
2479 | 2 | a_fs_file->meta = tsk_fs_meta_alloc(HFS_FILE_CONTENT_LEN); |
2480 | 2 | } |
2481 | | |
2482 | 2 | if (a_fs_file->meta == NULL) { |
2483 | 0 | return 1; |
2484 | 0 | } |
2485 | 2 | else { |
2486 | 2 | tsk_fs_meta_reset(a_fs_file->meta); |
2487 | 2 | } |
2488 | | |
2489 | 2 | if (tsk_verbose) |
2490 | 0 | tsk_fprintf(stderr, "hfs_inode_lookup: looking up %" PRIuINUM "\n", |
2491 | 0 | inum); |
2492 | | |
2493 | | // @@@ Will need to add orphan stuff here too |
2494 | | |
2495 | | /* First see if this is a special entry |
2496 | | * the special ones have their metadata stored in the volume header */ |
2497 | 2 | if (inum == HFS_EXTENTS_FILE_ID) { |
2498 | 1 | if (!hfs->has_extents_file) { |
2499 | 0 | error_detected(TSK_ERR_FS_INODE_NUM, |
2500 | 0 | "Extents File not present"); |
2501 | 0 | return 1; |
2502 | 0 | } |
2503 | | |
2504 | 1 | return hfs_make_extents(hfs, a_fs_file); |
2505 | 1 | } |
2506 | 1 | else if (inum == HFS_CATALOG_FILE_ID) { |
2507 | 1 | return hfs_make_catalog(hfs, a_fs_file); |
2508 | 1 | } |
2509 | 0 | else if (inum == HFS_BAD_BLOCK_FILE_ID) { |
2510 | | // Note: the Extents file and the BadBlocks file are really the same. |
2511 | 0 | if (!hfs->has_extents_file) { |
2512 | 0 | error_detected(TSK_ERR_FS_INODE_NUM, |
2513 | 0 | "BadBlocks File not present"); |
2514 | 0 | return 1; |
2515 | 0 | } |
2516 | 0 | return hfs_make_badblockfile(hfs, a_fs_file); |
2517 | 0 | } |
2518 | 0 | else if (inum == HFS_ALLOCATION_FILE_ID) { |
2519 | 0 | return hfs_make_blockmap(hfs, a_fs_file); |
2520 | 0 | } |
2521 | 0 | else if (inum == HFS_STARTUP_FILE_ID) { |
2522 | 0 | if (!hfs->has_startup_file) { |
2523 | 0 | error_detected(TSK_ERR_FS_INODE_NUM, |
2524 | 0 | "Startup File not present"); |
2525 | 0 | return 1; |
2526 | 0 | } |
2527 | 0 | return hfs_make_startfile(hfs, a_fs_file); |
2528 | 0 | } |
2529 | 0 | else if (inum == HFS_ATTRIBUTES_FILE_ID) { |
2530 | 0 | if (!hfs->has_attributes_file) { |
2531 | 0 | error_detected(TSK_ERR_FS_INODE_NUM, |
2532 | 0 | "Attributes File not present"); |
2533 | 0 | return 1; |
2534 | 0 | } |
2535 | 0 | return hfs_make_attrfile(hfs, a_fs_file); |
2536 | 0 | } |
2537 | | |
2538 | | /* Lookup inode and store it in the HFS structure */ |
2539 | 0 | if (hfs_cat_file_lookup(hfs, inum, &entry, TRUE)) { |
2540 | 0 | return 1; |
2541 | 0 | } |
2542 | | |
2543 | | /* Copy the structure in hfs to generic fs_inode */ |
2544 | 0 | if (hfs_dinode_copy(hfs, &entry, a_fs_file)) { |
2545 | 0 | return 1; |
2546 | 0 | } |
2547 | | |
2548 | | /* If this is potentially a compressed file, its |
2549 | | * actual size is unknown until we examine the |
2550 | | * extended attributes */ |
2551 | 0 | if ((a_fs_file->meta->size == 0) && |
2552 | 0 | (a_fs_file->meta->type == TSK_FS_META_TYPE_REG) && |
2553 | 0 | (a_fs_file->meta->attr_state != TSK_FS_META_ATTR_ERROR) && |
2554 | 0 | ((a_fs_file->meta->attr_state != TSK_FS_META_ATTR_STUDIED) || |
2555 | 0 | (a_fs_file->meta->attr == NULL))) { |
2556 | 0 | hfs_load_attrs(a_fs_file); |
2557 | 0 | } |
2558 | |
|
2559 | 0 | return 0; |
2560 | 0 | } |
2561 | | |
2562 | | typedef struct { |
2563 | | uint32_t offset; |
2564 | | uint32_t length; |
2565 | | } CMP_OFFSET_ENTRY; |
2566 | | |
2567 | | |
2568 | | /** |
2569 | | * \internal |
2570 | | * Reads the ZLIB compression block table from the attribute. |
2571 | | * |
2572 | | * @param rAtttr the attribute to read |
2573 | | * @param offsetTableOut block table |
2574 | | * @param tableSizeOut size of block table |
2575 | | * @param tableOffsetOut the offset of the block table in the resource fork |
2576 | | * @return 1 on success, 0 on error |
2577 | | */ |
2578 | | static int |
2579 | 0 | hfs_read_zlib_block_table(const TSK_FS_ATTR *rAttr, CMP_OFFSET_ENTRY** offsetTableOut, uint32_t* tableSizeOut, uint32_t* tableOffsetOut) { |
2580 | 0 | ssize_t attrReadResult; |
2581 | 0 | hfs_resource_fork_header rfHeader; |
2582 | 0 | uint32_t dataOffset; |
2583 | 0 | uint32_t offsetTableOffset; |
2584 | 0 | char fourBytes[4]; // Size of the offset table, little endian |
2585 | 0 | uint32_t tableSize; // Size of the offset table |
2586 | 0 | char *offsetTableData = NULL; |
2587 | 0 | CMP_OFFSET_ENTRY *offsetTable = NULL; |
2588 | 0 | size_t indx; |
2589 | 0 |
|
2590 | 0 | // Read the resource fork header |
2591 | 0 | attrReadResult = tsk_fs_attr_read(rAttr, 0, (char *) &rfHeader, |
2592 | 0 | sizeof(hfs_resource_fork_header), TSK_FS_FILE_READ_FLAG_NONE); |
2593 | 0 | if (attrReadResult != sizeof(hfs_resource_fork_header)) { |
2594 | 0 | error_returned |
2595 | 0 | (" %s: trying to read the resource fork header", __func__); |
2596 | 0 | return 0; |
2597 | 0 | } |
2598 | 0 |
|
2599 | 0 | // Begin to parse the resource fork. For now, we just need the data offset. |
2600 | 0 | dataOffset = tsk_getu32(TSK_BIG_ENDIAN, rfHeader.dataOffset); |
2601 | 0 |
|
2602 | 0 | // The resource's data begins with an offset table, which defines blocks |
2603 | 0 | // of (optionally) zlib-compressed data (so that the OS can do file seeks |
2604 | 0 | // efficiently; each uncompressed block is 64KB). |
2605 | 0 | offsetTableOffset = dataOffset + 4; |
2606 | 0 |
|
2607 | 0 | // read 4 bytes, the number of table entries, little endian |
2608 | 0 | attrReadResult = |
2609 | 0 | tsk_fs_attr_read(rAttr, offsetTableOffset, fourBytes, 4, |
2610 | 0 | TSK_FS_FILE_READ_FLAG_NONE); |
2611 | 0 | if (attrReadResult != 4) { |
2612 | 0 | error_returned |
2613 | 0 | (" %s: trying to read the offset table size, " |
2614 | 0 | "return value of %u should have been 4", __func__, attrReadResult); |
2615 | 0 | return 0; |
2616 | 0 | } |
2617 | 0 | tableSize = tsk_getu32(TSK_LIT_ENDIAN, fourBytes); |
2618 | 0 |
|
2619 | 0 | // Each table entry is 8 bytes long |
2620 | 0 | offsetTableData = tsk_malloc(tableSize * 8); |
2621 | 0 | if (offsetTableData == NULL) { |
2622 | 0 | error_returned |
2623 | 0 | (" %s: space for the offset table raw data", __func__); |
2624 | 0 | return 0; |
2625 | 0 | } |
2626 | 0 |
|
2627 | 0 | offsetTable = |
2628 | 0 | (CMP_OFFSET_ENTRY *) tsk_malloc(tableSize * |
2629 | 0 | sizeof(CMP_OFFSET_ENTRY)); |
2630 | 0 | if (offsetTable == NULL) { |
2631 | 0 | error_returned |
2632 | 0 | (" %s: space for the offset table", __func__); |
2633 | 0 | goto on_error; |
2634 | 0 | } |
2635 | 0 |
|
2636 | 0 | attrReadResult = tsk_fs_attr_read(rAttr, offsetTableOffset + 4, |
2637 | 0 | offsetTableData, tableSize * 8, TSK_FS_FILE_READ_FLAG_NONE); |
2638 | 0 | if (attrReadResult != (ssize_t) tableSize * 8) { |
2639 | 0 | error_returned |
2640 | 0 | (" %s: reading in the compression offset table, " |
2641 | 0 | "return value %u should have been %u", __func__, attrReadResult, |
2642 | 0 | tableSize * 8); |
2643 | 0 | goto on_error; |
2644 | 0 | } |
2645 | 0 |
|
2646 | 0 | for (indx = 0; indx < tableSize; ++indx) { |
2647 | 0 | offsetTable[indx].offset = |
2648 | 0 | tsk_getu32(TSK_LIT_ENDIAN, offsetTableData + indx * 8); |
2649 | 0 | offsetTable[indx].length = |
2650 | 0 | tsk_getu32(TSK_LIT_ENDIAN, offsetTableData + indx * 8 + 4); |
2651 | 0 | } |
2652 | 0 |
|
2653 | 0 | free(offsetTableData); |
2654 | 0 |
|
2655 | 0 | *offsetTableOut = offsetTable; |
2656 | 0 | *tableSizeOut = tableSize; |
2657 | 0 | *tableOffsetOut = offsetTableOffset; |
2658 | 0 | return 1; |
2659 | 0 |
|
2660 | 0 | on_error: |
2661 | 0 | free(offsetTable); |
2662 | 0 | free(offsetTableData); |
2663 | 0 | return 0; |
2664 | 0 | } |
2665 | | |
2666 | | |
2667 | | /** |
2668 | | * \internal |
2669 | | * Reads the LZVN compression block table from the attribute. |
2670 | | * |
2671 | | * @param rAtttr the attribute to read |
2672 | | * @param offsetTableOut block table |
2673 | | * @param tableSizeOut size of block table |
2674 | | * @param tableOffsetOut the offset of the block table in the resource fork |
2675 | | * @return 1 on success, 0 on error |
2676 | | */ |
2677 | | static int |
2678 | 0 | hfs_read_lzvn_block_table(const TSK_FS_ATTR *rAttr, CMP_OFFSET_ENTRY** offsetTableOut, uint32_t* tableSizeOut, uint32_t* tableOffsetOut) { |
2679 | 0 | ssize_t attrReadResult; |
2680 | 0 | char fourBytes[4]; |
2681 | 0 | uint32_t tableDataSize; |
2682 | 0 | uint32_t tableSize; // Size of the offset table |
2683 | 0 | char *offsetTableData = NULL; |
2684 | 0 | CMP_OFFSET_ENTRY *offsetTable = NULL; |
2685 | 0 |
|
2686 | 0 | // The offset table is a sequence of 4-byte offsets of compressed |
2687 | 0 | // blocks. The first 4 bytes is thus the offset of the first block, |
2688 | 0 | // but also 4 times the number of entries in the table. |
2689 | 0 | attrReadResult = tsk_fs_attr_read(rAttr, 0, fourBytes, 4, |
2690 | 0 | TSK_FS_FILE_READ_FLAG_NONE); |
2691 | 0 | if (attrReadResult != 4) { |
2692 | 0 | error_returned |
2693 | 0 | (" %s: trying to read the offset table size, " |
2694 | 0 | "return value of %u should have been 4", __func__, attrReadResult); |
2695 | 0 | return 0; |
2696 | 0 | } |
2697 | 0 |
|
2698 | 0 | tableDataSize = tsk_getu32(TSK_LIT_ENDIAN, fourBytes); |
2699 | 0 |
|
2700 | 0 | offsetTableData = tsk_malloc(tableDataSize); |
2701 | 0 | if (offsetTableData == NULL) { |
2702 | 0 | error_returned |
2703 | 0 | (" %s: space for the offset table raw data", __func__); |
2704 | 0 | return 0; |
2705 | 0 | } |
2706 | 0 |
|
2707 | 0 | // table entries are 4 bytes, last entry is end of data |
2708 | 0 | tableSize = tableDataSize / 4 - 1; |
2709 | 0 |
|
2710 | 0 | offsetTable = |
2711 | 0 | (CMP_OFFSET_ENTRY *) tsk_malloc(tableSize * |
2712 | 0 | sizeof(CMP_OFFSET_ENTRY)); |
2713 | 0 | if (offsetTable == NULL) { |
2714 | 0 | error_returned |
2715 | 0 | (" %s: space for the offset table", __func__); |
2716 | 0 | goto on_error; |
2717 | 0 | } |
2718 | 0 |
|
2719 | 0 | attrReadResult = tsk_fs_attr_read(rAttr, 0, |
2720 | 0 | offsetTableData, tableDataSize, TSK_FS_FILE_READ_FLAG_NONE); |
2721 | 0 | if (attrReadResult != (ssize_t) tableDataSize) { |
2722 | 0 | error_returned |
2723 | 0 | (" %s: reading in the compression offset table, " |
2724 | 0 | "return value %u should have been %u", __func__, attrReadResult, |
2725 | 0 | tableDataSize); |
2726 | 0 | goto on_error; |
2727 | 0 | } |
2728 | 0 |
|
2729 | 0 | uint32_t a = tableDataSize; |
2730 | 0 | uint32_t b; |
2731 | 0 | size_t i; |
2732 | 0 |
|
2733 | 0 | for (i = 0; i < tableSize; ++i) { |
2734 | 0 | b = tsk_getu32(TSK_LIT_ENDIAN, offsetTableData + 4*(i+1)); |
2735 | 0 | offsetTable[i].offset = a; |
2736 | 0 | offsetTable[i].length = b - a; |
2737 | 0 | a = b; |
2738 | 0 | } |
2739 | 0 |
|
2740 | 0 | free(offsetTableData); |
2741 | 0 |
|
2742 | 0 | *offsetTableOut = offsetTable; |
2743 | 0 | *tableSizeOut = tableSize; |
2744 | 0 | *tableOffsetOut = 0; |
2745 | 0 | return 1; |
2746 | 0 |
|
2747 | 0 | on_error: |
2748 | 0 | free(offsetTable); |
2749 | 0 | free(offsetTableData); |
2750 | 0 | return 0; |
2751 | 0 | } |
2752 | | |
2753 | | /** |
2754 | | * \internal |
2755 | | * "Decompress" a block which was stored uncompressed. |
2756 | | * |
2757 | | * @param rawBuf the compressed data |
2758 | | * @param len length of the compressed data |
2759 | | * @param uncBuf the decompressed data |
2760 | | * @param uncLen length of the decompressed data |
2761 | | * @return 1 on success, 0 on error |
2762 | | */ |
2763 | 0 | static int hfs_decompress_noncompressed_block(char* rawBuf, uint32_t len, char* uncBuf, uint64_t* uncLen) { |
2764 | 0 | // actually an uncompressed block of data; just copy |
2765 | 0 | if (tsk_verbose) |
2766 | 0 | tsk_fprintf(stderr, |
2767 | 0 | "%s: Copying an uncompressed compression unit\n", __func__); |
2768 | 0 |
|
2769 | 0 | if ((len - 1) > COMPRESSION_UNIT_SIZE) { |
2770 | 0 | error_detected(TSK_ERR_FS_READ, |
2771 | 0 | "%s: uncompressed block length %u is longer " |
2772 | 0 | "than compression unit size %u", __func__, len - 1, |
2773 | 0 | COMPRESSION_UNIT_SIZE); |
2774 | 0 | return 0; |
2775 | 0 | } |
2776 | 0 | memcpy(uncBuf, rawBuf + 1, len - 1); |
2777 | 0 | *uncLen = len - 1; |
2778 | 0 | return 1; |
2779 | 0 | } |
2780 | | |
2781 | | |
2782 | | #ifdef HAVE_LIBZ |
2783 | | /** |
2784 | | * \internal |
2785 | | * Decompress a block which was stored with ZLIB. |
2786 | | * |
2787 | | * @param rawBuf the compressed data |
2788 | | * @param len length of the compressed data |
2789 | | * @param uncBuf the decompressed data |
2790 | | * @param uncLen length of the decompressed data |
2791 | | * @return 1 on success, 0 on error |
2792 | | */ |
2793 | | static int hfs_decompress_zlib_block(char* rawBuf, uint32_t len, char* uncBuf, uint64_t* uncLen) |
2794 | 0 | { |
2795 | 0 | // see if this block is compressed |
2796 | 0 | if (len > 0 && (rawBuf[0] & 0x0F) != 0x0F) { |
2797 | 0 | // Uncompress the chunk of data |
2798 | 0 | if (tsk_verbose) |
2799 | 0 | tsk_fprintf(stderr, |
2800 | 0 | "%s: Inflating the compression unit\n", __func__); |
2801 | 0 |
|
2802 | 0 | unsigned long bytesConsumed; |
2803 | 0 | int infResult = zlib_inflate(rawBuf, (uint64_t) len, |
2804 | 0 | uncBuf, (uint64_t) COMPRESSION_UNIT_SIZE, |
2805 | 0 | uncLen, &bytesConsumed); |
2806 | 0 | if (infResult != 0) { |
2807 | 0 | error_returned |
2808 | 0 | (" %s: zlib inflation (uncompression) failed", |
2809 | 0 | __func__, infResult); |
2810 | 0 | return 0; |
2811 | 0 | } |
2812 | 0 |
|
2813 | 0 | if (bytesConsumed != len) { |
2814 | 0 | error_detected(TSK_ERR_FS_READ, |
2815 | 0 | " %s, decompressor did not consume the whole compressed data", |
2816 | 0 | __func__); |
2817 | 0 | return 0; |
2818 | 0 | } |
2819 | 0 |
|
2820 | 0 | return 1; |
2821 | 0 | } |
2822 | 0 | else { |
2823 | 0 | // actually an uncompressed block of data; just copy |
2824 | 0 | return hfs_decompress_noncompressed_block(rawBuf, len, uncBuf, uncLen); |
2825 | 0 | } |
2826 | 0 | } |
2827 | | #endif |
2828 | | |
2829 | | |
2830 | | /** |
2831 | | * \internal |
2832 | | * Decompress a block which was stored with LZVN. |
2833 | | * |
2834 | | * @param rawBuf the compressed data |
2835 | | * @param len length of the compressed data |
2836 | | * @param uncBuf the decompressed data |
2837 | | * @param uncLen length of the decompressed data |
2838 | | * @return 1 on success, 0 on error |
2839 | | */ |
2840 | | static int hfs_decompress_lzvn_block(char* rawBuf, uint32_t len, char* uncBuf, uint64_t* uncLen) |
2841 | 0 | { |
2842 | 0 | // see if this block is compressed |
2843 | 0 | if (len > 0 && rawBuf[0] != 0x06) { |
2844 | 0 | *uncLen = lzvn_decode_buffer(uncBuf, COMPRESSION_UNIT_SIZE, rawBuf, len); |
2845 | 0 | return 1; // apparently this can't fail |
2846 | 0 | } |
2847 | 0 | else { |
2848 | 0 | // actually an uncompressed block of data; just copy |
2849 | 0 | return hfs_decompress_noncompressed_block(rawBuf, len, uncBuf, uncLen); |
2850 | 0 | } |
2851 | 0 | } |
2852 | | |
2853 | | /** |
2854 | | * \internal |
2855 | | * Decompress a block. |
2856 | | * |
2857 | | * @param rAttr the attribute to read |
2858 | | * @param rawBuf the compressed data |
2859 | | * @param uncBuf the decompressed data |
2860 | | * @param offsetTable table of compressed block offsets |
2861 | | * @param offsetTableSize size of table of compressed block offsets |
2862 | | * @param offsetTableOffset offset of table of compressed block offsets |
2863 | | * @param indx index of block to read |
2864 | | * @param decompress_block pointer to decompression function |
2865 | | * @return decompressed size on success, -1 on error |
2866 | | */ |
2867 | | static ssize_t read_and_decompress_block( |
2868 | | const TSK_FS_ATTR* rAttr, |
2869 | | char* rawBuf, |
2870 | | char* uncBuf, |
2871 | | const CMP_OFFSET_ENTRY* offsetTable, |
2872 | | uint32_t offsetTableSize, |
2873 | | uint32_t offsetTableOffset, |
2874 | | size_t indx, |
2875 | | int (*decompress_block)(char* rawBuf, |
2876 | | uint32_t len, |
2877 | | char* uncBuf, |
2878 | | uint64_t* uncLen) |
2879 | | ) |
2880 | 0 | { |
2881 | 0 | // @@@ BC: Looks like we should have bounds checks that indx < offsetTableSize, but we should confirm |
2882 | 0 | ssize_t attrReadResult; |
2883 | 0 | uint32_t offset = offsetTableOffset + offsetTable[indx].offset; |
2884 | 0 | uint32_t len = offsetTable[indx].length; |
2885 | 0 | uint64_t uncLen; |
2886 | 0 |
|
2887 | 0 | if (tsk_verbose) |
2888 | 0 | tsk_fprintf(stderr, |
2889 | 0 | "%s: Reading compression unit %d, length %d\n", |
2890 | 0 | __func__, indx, len); |
2891 | 0 |
|
2892 | 0 | /* Github #383 referenced that if len is 0, then the below code causes |
2893 | 0 | * problems. Added this check, but I don't have data to verify this on. |
2894 | 0 | * it looks like it should at least not crash, but it isn't clear if it |
2895 | 0 | * will also do the right thing and if should actually break here |
2896 | 0 | * instead. */ |
2897 | 0 | if (len == 0) { |
2898 | 0 | return 0; |
2899 | 0 | } |
2900 | 0 |
|
2901 | 0 | if (len > COMPRESSION_UNIT_SIZE + 1) { |
2902 | 0 | error_detected(TSK_ERR_FS_READ, |
2903 | 0 | "%s: block size is too large: %u", __func__, len); |
2904 | 0 | return -1; |
2905 | 0 | } |
2906 | 0 |
|
2907 | 0 | // Read in the block of compressed data |
2908 | 0 | attrReadResult = tsk_fs_attr_read(rAttr, offset, |
2909 | 0 | rawBuf, len, TSK_FS_FILE_READ_FLAG_NONE); |
2910 | 0 | if (attrReadResult != (ssize_t) len) { |
2911 | 0 | char msg[] = |
2912 | 0 | "%s%s: reading in the compression offset table, " |
2913 | 0 | "return value %u should have been %u"; |
2914 | 0 |
|
2915 | 0 | if (attrReadResult < 0 ) { |
2916 | 0 | error_returned(msg, " ", __func__, attrReadResult, len); |
2917 | 0 | } |
2918 | 0 | else { |
2919 | 0 | error_detected(TSK_ERR_FS_READ, "", __func__, attrReadResult, len); |
2920 | 0 | } |
2921 | 0 | return -1; |
2922 | 0 | } |
2923 | 0 |
|
2924 | 0 | if (!decompress_block(rawBuf, len, uncBuf, &uncLen)) { |
2925 | 0 | return -1; |
2926 | 0 | } |
2927 | 0 |
|
2928 | 0 | // If size is a multiple of COMPRESSION_UNIT_SIZE, |
2929 | 0 | // expected uncompressed length is COMPRESSION_UNIT_SIZE |
2930 | 0 | const uint32_t expUncLen = indx == offsetTableSize - 1 ? |
2931 | 0 | ((rAttr->fs_file->meta->size - 1) % COMPRESSION_UNIT_SIZE) + 1 : |
2932 | 0 | COMPRESSION_UNIT_SIZE; |
2933 | 0 |
|
2934 | 0 | if (uncLen != expUncLen) { |
2935 | 0 | error_detected(TSK_ERR_FS_READ, |
2936 | 0 | "%s: compressed block decompressed to %u bytes, " |
2937 | 0 | "should have been %u bytes", __func__, uncLen, expUncLen); |
2938 | 0 | return -1; |
2939 | 0 | } |
2940 | 0 |
|
2941 | 0 | // There are now uncLen bytes of uncompressed data available from |
2942 | 0 | // this comp unit. |
2943 | 0 | return (ssize_t)uncLen; |
2944 | 0 | } |
2945 | | |
2946 | | /** |
2947 | | * \internal |
2948 | | * Attr walk callback function for compressed resources |
2949 | | * |
2950 | | * @param fs_attr the attribute to read |
2951 | | * @param flags |
2952 | | * @param a_action action callback |
2953 | | * @param ptr context for the action callback |
2954 | | * @param read_block_table pointer to block table read function |
2955 | | * @param decompress_block pointer to decompression function |
2956 | | * @return 0 on success, 1 on error |
2957 | | */ |
2958 | | static uint8_t |
2959 | | hfs_attr_walk_compressed_rsrc(const TSK_FS_ATTR * fs_attr, |
2960 | | int flags, TSK_FS_FILE_WALK_CB a_action, void *ptr, |
2961 | | int (*read_block_table)(const TSK_FS_ATTR *rAttr, |
2962 | | CMP_OFFSET_ENTRY** offsetTableOut, |
2963 | | uint32_t* tableSizeOut, |
2964 | | uint32_t* tableOffsetOut), |
2965 | | int (*decompress_block)(char* rawBuf, |
2966 | | uint32_t len, |
2967 | | char* uncBuf, |
2968 | | uint64_t* uncLen)) |
2969 | 0 | { |
2970 | 0 | TSK_FS_INFO *fs; |
2971 | 0 | TSK_FS_FILE *fs_file; |
2972 | 0 | const TSK_FS_ATTR *rAttr; // resource fork attribute |
2973 | 0 | char *rawBuf = NULL; // compressed data |
2974 | 0 | char *uncBuf = NULL; // uncompressed data |
2975 | 0 | uint32_t offsetTableOffset; |
2976 | 0 | uint32_t offsetTableSize; // The number of table entries |
2977 | 0 | CMP_OFFSET_ENTRY *offsetTable = NULL; |
2978 | 0 | size_t indx; // index for looping over the offset table |
2979 | 0 | TSK_OFF_T off = 0; // the offset in the uncompressed data stream consumed thus far |
2980 | 0 |
|
2981 | 0 | if (tsk_verbose) |
2982 | 0 | tsk_fprintf(stderr, |
2983 | 0 | "%s: Entered, because this is a compressed file with compressed data in the resource fork\n", __func__); |
2984 | 0 |
|
2985 | 0 | // clean up any error messages that are lying around |
2986 | 0 | tsk_error_reset(); |
2987 | 0 | if ((fs_attr == NULL) || (fs_attr->fs_file == NULL) |
2988 | 0 | || (fs_attr->fs_file->meta == NULL) |
2989 | 0 | || (fs_attr->fs_file->fs_info == NULL)) { |
2990 | 0 | tsk_error_set_errno(TSK_ERR_FS_ARG); |
2991 | 0 | tsk_error_set_errstr("%s: Null arguments given\n", __func__); |
2992 | 0 | return 1; |
2993 | 0 | } |
2994 | 0 |
|
2995 | 0 | // Check that the ATTR being read is the main DATA resource, 128-0, |
2996 | 0 | // because this is the only one that can be compressed in HFS+ |
2997 | 0 | if ((fs_attr->id != HFS_FS_ATTR_ID_DATA) || |
2998 | 0 | (fs_attr->type != TSK_FS_ATTR_TYPE_HFS_DATA)) { |
2999 | 0 | error_detected(TSK_ERR_FS_ARG, |
3000 | 0 | "%s: arg specified an attribute %u-%u that is not the data fork, " |
3001 | 0 | "Only the data fork can be compressed.", __func__, fs_attr->type, |
3002 | 0 | fs_attr->id); |
3003 | 0 | return 1; |
3004 | 0 | } |
3005 | 0 |
|
3006 | 0 | /* This MUST be a compressed attribute */ |
3007 | 0 | if (!(fs_attr->flags & TSK_FS_ATTR_COMP)) { |
3008 | 0 | error_detected(TSK_ERR_FS_FWALK, |
3009 | 0 | "%s: called with non-special attribute: %x", |
3010 | 0 | __func__, fs_attr->flags); |
3011 | 0 | return 1; |
3012 | 0 | } |
3013 | 0 |
|
3014 | 0 | fs = fs_attr->fs_file->fs_info; |
3015 | 0 | fs_file = fs_attr->fs_file; |
3016 | 0 |
|
3017 | 0 | /******** Open the Resource Fork ***********/ |
3018 | 0 |
|
3019 | 0 | // find the attribute for the resource fork |
3020 | 0 | rAttr = |
3021 | 0 | tsk_fs_file_attr_get_type(fs_file, TSK_FS_ATTR_TYPE_HFS_RSRC, |
3022 | 0 | HFS_FS_ATTR_ID_RSRC, TRUE); |
3023 | 0 | if (rAttr == NULL) { |
3024 | 0 | error_returned |
3025 | 0 | (" %s: could not get the attribute for the resource fork of the file", __func__); |
3026 | 0 | return 1; |
3027 | 0 | } |
3028 | 0 |
|
3029 | 0 | // read the offset table from the fork header |
3030 | 0 | if (!read_block_table(rAttr, &offsetTable, &offsetTableSize, &offsetTableOffset)) { |
3031 | 0 | return 1; |
3032 | 0 | } |
3033 | 0 |
|
3034 | 0 | // Allocate two buffers for the raw and uncompressed data |
3035 | 0 | /* Raw data can be COMPRESSION_UNIT_SIZE+1 if the data is not |
3036 | 0 | * compressed and there is a 1-byte flag that indicates that |
3037 | 0 | * the data is not compressed. */ |
3038 | 0 | rawBuf = (char *) tsk_malloc(COMPRESSION_UNIT_SIZE + 1); |
3039 | 0 | if (rawBuf == NULL) { |
3040 | 0 | error_returned |
3041 | 0 | (" %s: buffers for reading and uncompressing", __func__); |
3042 | 0 | goto on_error; |
3043 | 0 | } |
3044 | 0 |
|
3045 | 0 | uncBuf = (char *) tsk_malloc(COMPRESSION_UNIT_SIZE); |
3046 | 0 | if (uncBuf == NULL) { |
3047 | 0 | error_returned |
3048 | 0 | (" %s: buffers for reading and uncompressing", __func__); |
3049 | 0 | goto on_error; |
3050 | 0 | } |
3051 | 0 |
|
3052 | 0 | // FOR entry in the table DO |
3053 | 0 | for (indx = 0; indx < offsetTableSize; ++indx) { |
3054 | 0 | ssize_t uncLen; // uncompressed length |
3055 | 0 | unsigned int blockSize; |
3056 | 0 | uint64_t lumpSize; |
3057 | 0 | uint64_t remaining; |
3058 | 0 | char *lumpStart; |
3059 | 0 |
|
3060 | 0 | switch ((uncLen = read_and_decompress_block( |
3061 | 0 | rAttr, rawBuf, uncBuf, |
3062 | 0 | offsetTable, offsetTableSize, offsetTableOffset, indx, |
3063 | 0 | decompress_block))) |
3064 | 0 | { |
3065 | 0 | case -1: |
3066 | 0 | goto on_error; |
3067 | 0 | case 0: |
3068 | 0 | continue; |
3069 | 0 | default: |
3070 | 0 | break; |
3071 | 0 | } |
3072 | 0 |
|
3073 | 0 | // Call the a_action callback with "Lumps" |
3074 | 0 | // that are at most the block size. |
3075 | 0 | blockSize = fs->block_size; |
3076 | 0 | remaining = uncLen; |
3077 | 0 | lumpStart = uncBuf; |
3078 | 0 |
|
3079 | 0 | while (remaining > 0) { |
3080 | 0 | int retval; // action return value |
3081 | 0 | lumpSize = remaining <= blockSize ? remaining : blockSize; |
3082 | 0 |
|
3083 | 0 | // Apply the callback function |
3084 | 0 | if (tsk_verbose) |
3085 | 0 | tsk_fprintf(stderr, |
3086 | 0 | "%s: Calling action on lump of size %" |
3087 | 0 | PRIu64 " offset %" PRIu64 " in the compression unit\n", |
3088 | 0 | __func__, lumpSize, uncLen - remaining); |
3089 | 0 | if (lumpSize > SIZE_MAX) { |
3090 | 0 | error_detected(TSK_ERR_FS_FWALK, |
3091 | 0 | " %s: lumpSize is too large for the action", __func__); |
3092 | 0 | goto on_error; |
3093 | 0 | } |
3094 | 0 |
|
3095 | 0 | retval = a_action(fs_attr->fs_file, off, 0, lumpStart, |
3096 | 0 | (size_t) lumpSize, // cast OK because of above test |
3097 | 0 | TSK_FS_BLOCK_FLAG_COMP, ptr); |
3098 | 0 |
|
3099 | 0 | if (retval == TSK_WALK_ERROR) { |
3100 | 0 | error_detected(TSK_ERR_FS | 201, |
3101 | 0 | "%s: callback returned an error", __func__); |
3102 | 0 | goto on_error; |
3103 | 0 | } |
3104 | 0 | else if (retval == TSK_WALK_STOP) { |
3105 | 0 | break; |
3106 | 0 | } |
3107 | 0 |
|
3108 | 0 | // Find the next lump |
3109 | 0 | off += lumpSize; |
3110 | 0 | remaining -= lumpSize; |
3111 | 0 | lumpStart += lumpSize; |
3112 | 0 | } |
3113 | 0 | } |
3114 | 0 |
|
3115 | 0 | // Done, so free up the allocated resources. |
3116 | 0 | free(offsetTable); |
3117 | 0 | free(rawBuf); |
3118 | 0 | free(uncBuf); |
3119 | 0 | return 0; |
3120 | 0 |
|
3121 | 0 | on_error: |
3122 | 0 | free(offsetTable); |
3123 | 0 | free(rawBuf); |
3124 | 0 | free(uncBuf); |
3125 | 0 | return 1; |
3126 | 0 | } |
3127 | | |
3128 | | |
3129 | | #ifdef HAVE_LIBZ |
3130 | | /** |
3131 | | * \internal |
3132 | | * Attr walk callback function for ZLIB compressed resources |
3133 | | * |
3134 | | * @param fs_attr the attribute to read |
3135 | | * @param flags |
3136 | | * @param a_action action callback |
3137 | | * @param ptr context for the action callback |
3138 | | * @return 0 on success, 1 on error |
3139 | | */ |
3140 | | static uint8_t |
3141 | | hfs_attr_walk_zlib_rsrc(const TSK_FS_ATTR * fs_attr, |
3142 | | int flags, TSK_FS_FILE_WALK_CB a_action, void *ptr) |
3143 | 0 | { |
3144 | 0 | return hfs_attr_walk_compressed_rsrc( |
3145 | 0 | fs_attr, flags, a_action, ptr, |
3146 | 0 | hfs_read_zlib_block_table, |
3147 | 0 | hfs_decompress_zlib_block |
3148 | 0 | ); |
3149 | 0 | } |
3150 | | #endif |
3151 | | |
3152 | | /** |
3153 | | * \internal |
3154 | | * Attr walk callback function for LZVN compressed resources |
3155 | | * |
3156 | | * @param fs_attr the attribute to read |
3157 | | * @param flags |
3158 | | * @param a_action action callback |
3159 | | * @param ptr context for the action callback |
3160 | | * @return 0 on success, 1 on error |
3161 | | */ |
3162 | | static uint8_t |
3163 | | hfs_attr_walk_lzvn_rsrc(const TSK_FS_ATTR * fs_attr, |
3164 | | int flags, TSK_FS_FILE_WALK_CB a_action, void *ptr) |
3165 | 0 | { |
3166 | 0 | return hfs_attr_walk_compressed_rsrc( |
3167 | 0 | fs_attr, flags, a_action, ptr, |
3168 | 0 | hfs_read_lzvn_block_table, |
3169 | 0 | hfs_decompress_lzvn_block |
3170 | 0 | ); |
3171 | 0 | } |
3172 | | |
3173 | | |
3174 | | /** |
3175 | | * \internal |
3176 | | * Read a compressed resource |
3177 | | * |
3178 | | * @param fs_attr the attribute to read |
3179 | | * @param a_offset the offset from which to read |
3180 | | * @param a_buf the buffer into which to read |
3181 | | * @param a_len the length of the buffer |
3182 | | * @param read_block_table pointer to block table read function |
3183 | | * @param decompress_block pointer to decompression function |
3184 | | * @return number of bytes read or -1 on error (incl if offset is past EOF) |
3185 | | */ |
3186 | | static ssize_t |
3187 | | hfs_file_read_compressed_rsrc(const TSK_FS_ATTR * a_fs_attr, |
3188 | | TSK_OFF_T a_offset, char *a_buf, size_t a_len, |
3189 | | int (*read_block_table)(const TSK_FS_ATTR *rAttr, |
3190 | | CMP_OFFSET_ENTRY** offsetTableOut, |
3191 | | uint32_t* tableSizeOut, |
3192 | | uint32_t* tableOffsetOut), |
3193 | | int (*decompress_block)(char* rawBuf, |
3194 | | uint32_t len, |
3195 | | char* uncBuf, |
3196 | | uint64_t* uncLen)) |
3197 | 0 | { |
3198 | 0 | TSK_FS_FILE *fs_file; |
3199 | 0 | const TSK_FS_ATTR *rAttr; |
3200 | 0 | char *rawBuf = NULL; |
3201 | 0 | char *uncBuf = NULL; |
3202 | 0 | uint32_t offsetTableOffset; |
3203 | 0 | uint32_t offsetTableSize; // Size of the offset table |
3204 | 0 | CMP_OFFSET_ENTRY *offsetTable = NULL; |
3205 | 0 | TSK_OFF_T indx; // index for looping over the offset table |
3206 | 0 | TSK_OFF_T startUnit = 0; |
3207 | 0 | uint32_t startUnitOffset = 0; |
3208 | 0 | TSK_OFF_T endUnit = 0; |
3209 | 0 | uint64_t bytesCopied; |
3210 | 0 |
|
3211 | 0 | if (tsk_verbose) |
3212 | 0 | tsk_fprintf(stderr, |
3213 | 0 | "%s: called because this file is compressed, with data in the resource fork\n", __func__); |
3214 | 0 |
|
3215 | 0 | // Reading zero bytes? OK at any offset, I say! |
3216 | 0 | if (a_len == 0) |
3217 | 0 | return 0; |
3218 | 0 |
|
3219 | 0 | if (a_offset < 0) { |
3220 | 0 | error_detected(TSK_ERR_FS_ARG, |
3221 | 0 | "%s: reading from file at a negative offset", |
3222 | 0 | __func__); |
3223 | 0 | return -1; |
3224 | 0 | } |
3225 | 0 |
|
3226 | 0 | if (a_len > SIZE_MAX / 2) { |
3227 | 0 | error_detected(TSK_ERR_FS_ARG, |
3228 | 0 | "%s: trying to read more than SIZE_MAX/2 is not supported.", |
3229 | 0 | __func__); |
3230 | 0 | return -1; |
3231 | 0 | } |
3232 | 0 |
|
3233 | 0 | if ((a_fs_attr == NULL) || (a_fs_attr->fs_file == NULL) |
3234 | 0 | || (a_fs_attr->fs_file->meta == NULL) |
3235 | 0 | || (a_fs_attr->fs_file->fs_info == NULL)) { |
3236 | 0 | error_detected(TSK_ERR_FS_ARG, |
3237 | 0 | "%s: NULL parameters passed", __func__); |
3238 | 0 | return -1; |
3239 | 0 | } |
3240 | 0 |
|
3241 | 0 | // This should be a compressed file. If not, that's an error! |
3242 | 0 | if (!(a_fs_attr->flags & TSK_FS_ATTR_COMP)) { |
3243 | 0 | error_detected(TSK_ERR_FS_ARG, |
3244 | 0 | "%s: called with non-special attribute: %x", |
3245 | 0 | __func__, a_fs_attr->flags); |
3246 | 0 | return -1; |
3247 | 0 | } |
3248 | 0 |
|
3249 | 0 | // Check that the ATTR being read is the main DATA resource, 4352-0, |
3250 | 0 | // because this is the only one that can be compressed in HFS+ |
3251 | 0 | if ((a_fs_attr->id != HFS_FS_ATTR_ID_DATA) || |
3252 | 0 | (a_fs_attr->type != TSK_FS_ATTR_TYPE_HFS_DATA)) { |
3253 | 0 | error_detected(TSK_ERR_FS_ARG, |
3254 | 0 | "%s: arg specified an attribute %u-%u that is not the data fork, " |
3255 | 0 | "Only the data fork can be compressed.", __func__, |
3256 | 0 | a_fs_attr->type, a_fs_attr->id); |
3257 | 0 | return -1; |
3258 | 0 | } |
3259 | 0 |
|
3260 | 0 | /******** Open the Resource Fork ***********/ |
3261 | 0 | // The file |
3262 | 0 | fs_file = a_fs_attr->fs_file; |
3263 | 0 |
|
3264 | 0 | // find the attribute for the resource fork |
3265 | 0 | rAttr = |
3266 | 0 | tsk_fs_file_attr_get_type(fs_file, TSK_FS_ATTR_TYPE_HFS_RSRC, |
3267 | 0 | HFS_FS_ATTR_ID_RSRC, TRUE); |
3268 | 0 | if (rAttr == NULL) { |
3269 | 0 | error_returned |
3270 | 0 | (" %s: could not get the attribute for the resource fork of the file", __func__); |
3271 | 0 | return -1; |
3272 | 0 | } |
3273 | 0 |
|
3274 | 0 | // read the offset table from the fork header |
3275 | 0 | if (!read_block_table(rAttr, &offsetTable, &offsetTableSize, &offsetTableOffset)) { |
3276 | 0 | return -1; |
3277 | 0 | } |
3278 | 0 |
|
3279 | 0 | // Compute the range of compression units needed for the request |
3280 | 0 | startUnit = a_offset / COMPRESSION_UNIT_SIZE; |
3281 | 0 | startUnitOffset = a_offset % COMPRESSION_UNIT_SIZE; |
3282 | 0 | endUnit = (a_offset + a_len - 1) / COMPRESSION_UNIT_SIZE; |
3283 | 0 |
|
3284 | 0 | if (startUnit >= offsetTableSize || endUnit >= offsetTableSize) { |
3285 | 0 | error_detected(TSK_ERR_FS_ARG, |
3286 | 0 | "%s: range of bytes requested %lld - %lld falls past the " |
3287 | 0 | "end of the uncompressed stream %llu\n", |
3288 | 0 | __func__, a_offset, a_offset + a_len, |
3289 | 0 | offsetTable[offsetTableSize-1].offset + |
3290 | 0 | offsetTable[offsetTableSize-1].length); |
3291 | 0 | goto on_error; |
3292 | 0 | } |
3293 | 0 |
|
3294 | 0 | if (tsk_verbose) |
3295 | 0 | tsk_fprintf(stderr, |
3296 | 0 | "%s: reading compression units: %" PRIdOFF |
3297 | 0 | " to %" PRIdOFF "\n", __func__, startUnit, endUnit); |
3298 | 0 | bytesCopied = 0; |
3299 | 0 |
|
3300 | 0 | // Allocate buffers for the raw and uncompressed data |
3301 | 0 | /* Raw data can be COMPRESSION_UNIT_SIZE+1 if the zlib data is not |
3302 | 0 | * compressed and there is a 1-byte flag that indicates that |
3303 | 0 | * the data is not compressed. */ |
3304 | 0 | rawBuf = (char *) tsk_malloc(COMPRESSION_UNIT_SIZE + 1); |
3305 | 0 | if (rawBuf == NULL) { |
3306 | 0 | error_returned |
3307 | 0 | (" %s: buffers for reading and uncompressing", __func__); |
3308 | 0 | goto on_error; |
3309 | 0 | } |
3310 | 0 |
|
3311 | 0 | uncBuf = (char *) tsk_malloc(COMPRESSION_UNIT_SIZE); |
3312 | 0 | if (uncBuf == NULL) { |
3313 | 0 | error_returned |
3314 | 0 | (" %s: buffers for reading and uncompressing", __func__); |
3315 | 0 | goto on_error; |
3316 | 0 | } |
3317 | 0 |
|
3318 | 0 | // Read from the indicated comp units |
3319 | 0 | for (indx = startUnit; indx <= endUnit; ++indx) { |
3320 | 0 | uint64_t uncLen; |
3321 | 0 | char *uncBufPtr = uncBuf; |
3322 | 0 | size_t bytesToCopy; |
3323 | 0 |
|
3324 | 0 | switch ((uncLen = read_and_decompress_block( |
3325 | 0 | rAttr, rawBuf, uncBuf, |
3326 | 0 | offsetTable, offsetTableSize, offsetTableOffset, (size_t)indx, |
3327 | 0 | decompress_block))) |
3328 | 0 | { |
3329 | 0 | case -1: |
3330 | 0 | goto on_error; |
3331 | 0 | case 0: |
3332 | 0 | continue; |
3333 | 0 | default: |
3334 | 0 | break; |
3335 | 0 | } |
3336 | 0 |
|
3337 | 0 | // If this is the first comp unit, then we must skip over the |
3338 | 0 | // startUnitOffset bytes. |
3339 | 0 | if (indx == startUnit) { |
3340 | 0 | uncLen -= startUnitOffset; |
3341 | 0 | uncBufPtr += startUnitOffset; |
3342 | 0 | } |
3343 | 0 |
|
3344 | 0 | // How many bytes to copy from this compression unit? |
3345 | 0 |
|
3346 | 0 | if (bytesCopied + uncLen < (uint64_t) a_len) // cast OK because a_len > 0 |
3347 | 0 | bytesToCopy = (size_t) uncLen; // uncLen <= size of compression unit, which is small, so cast is OK |
3348 | 0 | else |
3349 | 0 | bytesToCopy = (size_t) (((uint64_t) a_len) - bytesCopied); // diff <= compression unit size, so cast is OK |
3350 | 0 |
|
3351 | 0 | // Copy into the output buffer, and update bookkeeping. |
3352 | 0 | memcpy(a_buf + bytesCopied, uncBufPtr, bytesToCopy); |
3353 | 0 | bytesCopied += bytesToCopy; |
3354 | 0 | } |
3355 | 0 |
|
3356 | 0 | // Well, we don't know (without a lot of work) what the |
3357 | 0 | // true uncompressed size of the stream is. All we know is the "upper bound" which |
3358 | 0 | // assumes that all of the compression units expand to their full size. If we did |
3359 | 0 | // know the true size, then we could reject requests that go beyond the end of the |
3360 | 0 | // stream. Instead, we treat the stream as if it is padded out to the full size of |
3361 | 0 | // the last compression unit with zeros. |
3362 | 0 |
|
3363 | 0 | // Have we read and copied all of the bytes requested? |
3364 | 0 | if (bytesCopied < a_len) { |
3365 | 0 | // set the remaining bytes to zero |
3366 | 0 | memset(a_buf + bytesCopied, 0, a_len - (size_t) bytesCopied); // cast OK because diff must be < compression unit size |
3367 | 0 | } |
3368 | 0 |
|
3369 | 0 | free(offsetTable); |
3370 | 0 | free(rawBuf); |
3371 | 0 | free(uncBuf); |
3372 | 0 |
|
3373 | 0 | return (ssize_t) bytesCopied; // cast OK, cannot be greater than a_len which cannot be greater than SIZE_MAX/2 (rounded down). |
3374 | 0 |
|
3375 | 0 | on_error: |
3376 | 0 | free(offsetTable); |
3377 | 0 | free(rawBuf); |
3378 | 0 | free(uncBuf); |
3379 | 0 | return -1; |
3380 | 0 | } |
3381 | | |
3382 | | |
3383 | | #ifdef HAVE_LIBZ |
3384 | | /** |
3385 | | * \internal |
3386 | | * Read a ZLIB compressed resource |
3387 | | * |
3388 | | * @param fs_attr the attribute to read |
3389 | | * @param a_offset the offset from which to read |
3390 | | * @param a_buf the buffer into which to read |
3391 | | * @param a_len the length of the buffer |
3392 | | * @return number of bytes read or -1 on error (incl if offset is past EOF) |
3393 | | */ |
3394 | | static ssize_t |
3395 | | hfs_file_read_zlib_rsrc(const TSK_FS_ATTR * a_fs_attr, |
3396 | | TSK_OFF_T a_offset, char *a_buf, size_t a_len) |
3397 | 0 | { |
3398 | 0 | return hfs_file_read_compressed_rsrc( |
3399 | 0 | a_fs_attr, a_offset, a_buf, a_len, |
3400 | 0 | hfs_read_zlib_block_table, |
3401 | 0 | hfs_decompress_zlib_block |
3402 | 0 | ); |
3403 | 0 | } |
3404 | | #endif |
3405 | | |
3406 | | |
3407 | | /** |
3408 | | * \internal |
3409 | | * Read an LZVN compressed resource |
3410 | | * |
3411 | | * @param fs_attr the attribute to read |
3412 | | * @param a_offset the offset from which to read |
3413 | | * @param a_buf the buffer into which to read |
3414 | | * @param a_len the length of the buffer |
3415 | | * @return number of bytes read or -1 on error (incl if offset is past EOF) |
3416 | | */ |
3417 | | static ssize_t |
3418 | | hfs_file_read_lzvn_rsrc(const TSK_FS_ATTR * a_fs_attr, |
3419 | | TSK_OFF_T a_offset, char *a_buf, size_t a_len) |
3420 | 0 | { |
3421 | 0 | return hfs_file_read_compressed_rsrc( |
3422 | 0 | a_fs_attr, a_offset, a_buf, a_len, |
3423 | 0 | hfs_read_lzvn_block_table, |
3424 | 0 | hfs_decompress_lzvn_block |
3425 | 0 | ); |
3426 | 0 | } |
3427 | | |
3428 | | |
3429 | | /** |
3430 | | * \internal |
3431 | | * "Decompress" an uncompressed attr |
3432 | | * |
3433 | | * HFS+ compression schemes allow for some blocks to be stored uncompressed. |
3434 | | * |
3435 | | * @param rawBuf source buffer |
3436 | | * @param rawSize size of source buffer |
3437 | | * @param uncSize expected uncompressed size |
3438 | | * @param dstBuf destination buffer |
3439 | | * @param dstSize size of destination buffer |
3440 | | * @param dstBufFree true iff the caller must free the destination buffer |
3441 | | * @return 1 |
3442 | | */ |
3443 | 0 | static int hfs_decompress_noncompressed_attr(char* rawBuf, uint32_t rawSize, uint64_t uncSize, char** dstBuf, uint64_t* dstSize, int* dstBufFree) { |
3444 | 0 | if (tsk_verbose) |
3445 | 0 | tsk_fprintf(stderr, |
3446 | 0 | "%s: Leading byte, 0x%02x, indicates that the data is not really compressed.\n" |
3447 | 0 | "%s: Loading the default DATA attribute.", __func__, rawBuf[0], __func__); |
3448 | 0 |
|
3449 | 0 | *dstBuf = rawBuf + 1; // + 1 indicator byte |
3450 | 0 | *dstSize = uncSize; |
3451 | 0 | *dstBufFree = FALSE; |
3452 | 0 | return 1; |
3453 | 0 | } |
3454 | | |
3455 | | |
3456 | | /** |
3457 | | * \internal |
3458 | | * Decompress a ZLIB compressed attr |
3459 | | * |
3460 | | * @param rawBuf source buffer |
3461 | | * @param rawSize size of source buffer |
3462 | | * @param uncSize expected uncompressed size |
3463 | | * @param dstBuf destination buffer |
3464 | | * @param dstSize size of destination buffer |
3465 | | * @param dstBufFree true iff the caller must free the destination buffer |
3466 | | * @return 1 on success, 0 on error |
3467 | | */ |
3468 | | static int hfs_decompress_zlib_attr(char* rawBuf, uint32_t rawSize, uint64_t uncSize, char** dstBuf, uint64_t* dstSize, int* dstBufFree) |
3469 | 0 | { |
3470 | 0 | // ZLIB blocks cannot start with 0xF as the low nibble, so that's used |
3471 | 0 | // as the flag for noncompressed blocks |
3472 | 0 | if ((rawBuf[0] & 0x0F) == 0x0F) { |
3473 | 0 | return hfs_decompress_noncompressed_attr( |
3474 | 0 | rawBuf, rawSize, uncSize, dstBuf, dstSize, dstBufFree); |
3475 | 0 | } |
3476 | 0 | else { |
3477 | 0 | #ifdef HAVE_LIBZ |
3478 | 0 | char* uncBuf = NULL; |
3479 | 0 | uint64_t uLen; |
3480 | 0 | unsigned long bytesConsumed; |
3481 | 0 | int infResult; |
3482 | 0 |
|
3483 | 0 | if (tsk_verbose) |
3484 | 0 | tsk_fprintf(stderr, |
3485 | 0 | "%s: Uncompressing (inflating) data.", __func__); |
3486 | 0 | // Uncompress the remainder of the attribute, and load as 128-0 |
3487 | 0 | // Note: cast is OK because uncSize will be quite modest, < 4000. |
3488 | 0 |
|
3489 | 0 | uncBuf = (char *) tsk_malloc((size_t) uncSize + 100); // add some extra space |
3490 | 0 | if (uncBuf == NULL) { |
3491 | 0 | error_returned |
3492 | 0 | (" - %s, space for the uncompressed attr", __func__); |
3493 | 0 | return 0; |
3494 | 0 | } |
3495 | 0 |
|
3496 | 0 | infResult = zlib_inflate(rawBuf, (uint64_t) rawSize, |
3497 | 0 | uncBuf, (uint64_t) (uncSize + 100), |
3498 | 0 | &uLen, &bytesConsumed); |
3499 | 0 | if (infResult != 0) { |
3500 | 0 | error_returned |
3501 | 0 | (" %s, zlib could not uncompress attr", __func__); |
3502 | 0 | free(uncBuf); |
3503 | 0 | return 0; |
3504 | 0 | } |
3505 | 0 |
|
3506 | 0 | if (bytesConsumed != rawSize) { |
3507 | 0 | error_detected(TSK_ERR_FS_READ, |
3508 | 0 | " %s, decompressor did not consume the whole compressed data", |
3509 | 0 | __func__); |
3510 | 0 | free(uncBuf); |
3511 | 0 | return 0; |
3512 | 0 | } |
3513 | 0 |
|
3514 | 0 | *dstBuf = uncBuf; |
3515 | 0 | *dstSize = uncSize; |
3516 | 0 | *dstBufFree = TRUE; |
3517 | 0 | #else |
3518 | 0 | // ZLIB compression library is not available, so we will load a |
3519 | 0 | // zero-length default DATA attribute. Without this, icat may |
3520 | 0 | // misbehave. |
3521 | 0 |
|
3522 | 0 | if (tsk_verbose) |
3523 | 0 | tsk_fprintf(stderr, |
3524 | 0 | "%s: ZLIB not available, so loading an empty default DATA attribute.\n", __func__); |
3525 | 0 |
|
3526 | 0 | // Dummy is one byte long, so the ptr is not null, but we set the |
3527 | 0 | // length to zero bytes, so it is never read. |
3528 | 0 | static uint8_t dummy[1]; |
3529 | 0 |
|
3530 | 0 | *dstBuf = dummy; |
3531 | 0 | *dstSize = 0; |
3532 | 0 | *dstBufFree = FALSE; |
3533 | 0 | #endif |
3534 | 0 | } |
3535 | 0 |
|
3536 | 0 | return 1; |
3537 | 0 | } |
3538 | | |
3539 | | |
3540 | | /** |
3541 | | * \internal |
3542 | | * Decompress an LZVN compressed attr |
3543 | | * |
3544 | | * @param rawBuf source buffer |
3545 | | * @param rawSize size of source buffer |
3546 | | * @param uncSize expected uncompressed size |
3547 | | * @param dstBuf destination buffer |
3548 | | * @param dstSize size of destination buffer |
3549 | | * @param dstBufFree true iff the caller must free the destination buffer |
3550 | | * @return 1 on success, 0 on error |
3551 | | */ |
3552 | | static int hfs_decompress_lzvn_attr(char* rawBuf, uint32_t rawSize, uint64_t uncSize, char** dstBuf, uint64_t* dstSize, int* dstBufFree) |
3553 | 0 | { |
3554 | 0 | // LZVN blocks cannot start with 0x06, so that's used as the flag for |
3555 | 0 | // noncompressed blocks |
3556 | 0 | if (rawBuf[0] == 0x06) { |
3557 | 0 | return hfs_decompress_noncompressed_attr( |
3558 | 0 | rawBuf, rawSize, uncSize, dstBuf, dstSize, dstBufFree); |
3559 | 0 | } |
3560 | 0 |
|
3561 | 0 | char* uncBuf = (char *) tsk_malloc((size_t) uncSize); |
3562 | 0 | *dstSize = lzvn_decode_buffer(uncBuf, uncSize, rawBuf, rawSize); |
3563 | 0 | *dstBuf = uncBuf; |
3564 | 0 | *dstBufFree = TRUE; |
3565 | 0 |
|
3566 | 0 | return 1; |
3567 | 0 | } |
3568 | | |
3569 | | |
3570 | | /** |
3571 | | * \internal |
3572 | | * Read a compressed attr |
3573 | | * |
3574 | | * @param fs_file the file |
3575 | | * @param cmpType compression type |
3576 | | * @param buffer destination buffer |
3577 | | * @param attributeLength length of the attribute |
3578 | | * @param uncSize uncompressed size |
3579 | | * @param decompress_attr pointer to the decompression function |
3580 | | * @return 1 on success, 0 on error |
3581 | | */ |
3582 | | static int |
3583 | | hfs_file_read_compressed_attr(TSK_FS_FILE* fs_file, |
3584 | | uint8_t cmpType, |
3585 | | char* buffer, |
3586 | | uint32_t attributeLength, |
3587 | | uint64_t uncSize, |
3588 | | int (*decompress_attr)(char* rawBuf, |
3589 | | uint32_t rawSize, |
3590 | | uint64_t uncSize, |
3591 | | char** dstBuf, |
3592 | | uint64_t* dstSize, |
3593 | | int* dstBufFree)) |
3594 | 0 | { |
3595 | 0 | // Data is inline. We will load the uncompressed data as a |
3596 | 0 | // resident attribute. |
3597 | 0 | if (tsk_verbose) |
3598 | 0 | tsk_fprintf(stderr, |
3599 | 0 | "%s: Compressed data is inline in the attribute, will load this as the default DATA attribute.\n", __func__); |
3600 | 0 |
|
3601 | 0 | if (attributeLength <= 16) { |
3602 | 0 | if (tsk_verbose) |
3603 | 0 | tsk_fprintf(stderr, |
3604 | 0 | "%s: WARNING, Compression Record of type %u is not followed by" |
3605 | 0 | " compressed data. No data will be loaded into the DATA" |
3606 | 0 | " attribute.\n", __func__, cmpType); |
3607 | 0 |
|
3608 | 0 | // oddly, this is not actually considered an error |
3609 | 0 | return 1; |
3610 | 0 | } |
3611 | 0 |
|
3612 | 0 | TSK_FS_ATTR *fs_attr_unc; |
3613 | 0 |
|
3614 | 0 | // There is data following the compression record, as there should be. |
3615 | 0 | if ((fs_attr_unc = tsk_fs_attrlist_getnew( |
3616 | 0 | fs_file->meta->attr, TSK_FS_ATTR_RES)) == NULL) |
3617 | 0 | { |
3618 | 0 | error_returned(" - %s, FS_ATTR for uncompressed data", __func__); |
3619 | 0 | return 0; |
3620 | 0 | } |
3621 | 0 |
|
3622 | 0 | char* dstBuf; |
3623 | 0 | uint64_t dstSize; |
3624 | 0 | int dstBufFree = FALSE; |
3625 | 0 |
|
3626 | 0 | if (!decompress_attr(buffer + 16, attributeLength - 16, uncSize, |
3627 | 0 | &dstBuf, &dstSize, &dstBufFree)) { |
3628 | 0 | return 0; |
3629 | 0 | } |
3630 | 0 |
|
3631 | 0 | if (dstSize != uncSize) { |
3632 | 0 | error_detected(TSK_ERR_FS_READ, |
3633 | 0 | " %s, actual uncompressed size not equal to the size in the compression record", __func__); |
3634 | 0 | goto on_error; |
3635 | 0 | } |
3636 | 0 |
|
3637 | 0 | if (tsk_verbose) |
3638 | 0 | tsk_fprintf(stderr, |
3639 | 0 | "%s: Loading decompressed data as default DATA attribute.", |
3640 | 0 | __func__); |
3641 | 0 |
|
3642 | 0 | // Load the remainder of the attribute as 128-0 |
3643 | 0 | // set the details in the fs_attr structure. |
3644 | 0 | // Note, we are loading this as a RESIDENT attribute. |
3645 | 0 | if (tsk_fs_attr_set_str(fs_file, |
3646 | 0 | fs_attr_unc, "DATA", |
3647 | 0 | TSK_FS_ATTR_TYPE_HFS_DATA, |
3648 | 0 | HFS_FS_ATTR_ID_DATA, dstBuf, |
3649 | 0 | dstSize)) |
3650 | 0 | { |
3651 | 0 | error_returned(" - %s", __func__); |
3652 | 0 | goto on_error; |
3653 | 0 | } |
3654 | 0 |
|
3655 | 0 | if (dstBufFree) { |
3656 | 0 | free(dstBuf); |
3657 | 0 | } |
3658 | 0 | return 1; |
3659 | 0 |
|
3660 | 0 | on_error: |
3661 | 0 | if (dstBufFree) { |
3662 | 0 | free(dstBuf); |
3663 | 0 | } |
3664 | 0 | return 0; |
3665 | 0 | } |
3666 | | |
3667 | | |
3668 | | /** |
3669 | | * \internal |
3670 | | * Read a ZLIB compressed attr |
3671 | | * |
3672 | | * @param fs_file the file |
3673 | | * @param buffer destination buffer |
3674 | | * @param attributeLength length of the attribute |
3675 | | * @param uncSize uncompressed size |
3676 | | * @return 1 on success, 0 on error |
3677 | | */ |
3678 | | static int hfs_file_read_zlib_attr(TSK_FS_FILE* fs_file, |
3679 | | char* buffer, |
3680 | | uint32_t attributeLength, |
3681 | | uint64_t uncSize) |
3682 | 0 | { |
3683 | 0 | return hfs_file_read_compressed_attr( |
3684 | 0 | fs_file, DECMPFS_TYPE_ZLIB_ATTR, |
3685 | 0 | buffer, attributeLength, uncSize, |
3686 | 0 | hfs_decompress_zlib_attr |
3687 | 0 | ); |
3688 | 0 | } |
3689 | | |
3690 | | |
3691 | | /** |
3692 | | * \internal |
3693 | | * Read an LZVN compressed attr |
3694 | | * |
3695 | | * @param fs_file the file |
3696 | | * @param buffer destination buffer |
3697 | | * @param attributeLength length of the attribute |
3698 | | * @param uncSize uncompressed size |
3699 | | * @return 1 on success, 0 on error |
3700 | | */ |
3701 | | static int hfs_file_read_lzvn_attr(TSK_FS_FILE* fs_file, |
3702 | | char* buffer, |
3703 | | uint32_t attributeLength, |
3704 | | uint64_t uncSize) |
3705 | 0 | { |
3706 | 0 | return hfs_file_read_compressed_attr( |
3707 | 0 | fs_file, DECMPFS_TYPE_LZVN_ATTR, |
3708 | 0 | buffer, attributeLength, uncSize, |
3709 | 0 | hfs_decompress_lzvn_attr |
3710 | 0 | ); |
3711 | 0 | } |
3712 | | |
3713 | | |
3714 | | typedef struct { |
3715 | | TSK_FS_INFO *fs; // the HFS file system |
3716 | | TSK_FS_FILE *file; // the Attributes file, if open |
3717 | | hfs_btree_header_record *header; // the Attributes btree header record. |
3718 | | // For Convenience, unpacked values. |
3719 | | TSK_ENDIAN_ENUM endian; |
3720 | | uint32_t rootNode; |
3721 | | uint16_t nodeSize; |
3722 | | uint16_t maxKeyLen; |
3723 | | } ATTR_FILE_T; |
3724 | | |
3725 | | |
3726 | | /** \internal |
3727 | | * Open the Attributes file, and read the btree header record. Fill in the fields of the ATTR_FILE_T struct. |
3728 | | * |
3729 | | * @param fs -- the HFS file system |
3730 | | * @param header -- the header record struct |
3731 | | * |
3732 | | * @return 1 on error, 0 on success |
3733 | | */ |
3734 | | static uint8_t |
3735 | | open_attr_file(TSK_FS_INFO * fs, ATTR_FILE_T * attr_file) |
3736 | 0 | { |
3737 | |
|
3738 | 0 | ssize_t cnt; // will hold bytes read |
3739 | |
|
3740 | 0 | hfs_btree_header_record *hrec; |
3741 | | |
3742 | | // clean up any error messages that are lying around |
3743 | 0 | tsk_error_reset(); |
3744 | |
|
3745 | 0 | if (fs == NULL) { |
3746 | 0 | tsk_error_set_errno(TSK_ERR_FS_ARG); |
3747 | 0 | tsk_error_set_errstr("open_attr_file: fs is NULL"); |
3748 | 0 | return 1; |
3749 | 0 | } |
3750 | | |
3751 | 0 | if (attr_file == NULL) { |
3752 | 0 | tsk_error_set_errno(TSK_ERR_FS_ARG); |
3753 | 0 | tsk_error_set_errstr("open_attr_file: attr_file is NULL"); |
3754 | 0 | return 1; |
3755 | 0 | } |
3756 | | |
3757 | | // Open the Attributes File |
3758 | 0 | attr_file->file = |
3759 | 0 | tsk_fs_file_open_meta(fs, NULL, HFS_ATTRIBUTES_FILE_ID); |
3760 | |
|
3761 | 0 | if (attr_file->file == NULL) { |
3762 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
3763 | 0 | tsk_error_set_errstr |
3764 | 0 | ("open_attr_file: could not open the Attributes file"); |
3765 | 0 | return 1; |
3766 | 0 | } |
3767 | | |
3768 | | // Allocate some space for the Attributes btree header record (which |
3769 | | // is passed back to the caller) |
3770 | 0 | hrec = (hfs_btree_header_record *) |
3771 | 0 | malloc(sizeof(hfs_btree_header_record)); |
3772 | |
|
3773 | 0 | if (hrec == NULL) { |
3774 | 0 | tsk_error_set_errno(TSK_ERR_FS); |
3775 | 0 | tsk_error_set_errstr |
3776 | 0 | ("open_attr_file: could not malloc space for Attributes header record"); |
3777 | 0 | return 1; |
3778 | 0 | } |
3779 | | |
3780 | | // Read the btree header record |
3781 | 0 | cnt = tsk_fs_file_read(attr_file->file, |
3782 | 0 | 14, |
3783 | 0 | (char *) hrec, |
3784 | 0 | sizeof(hfs_btree_header_record), (TSK_FS_FILE_READ_FLAG_ENUM) 0); |
3785 | 0 | if (cnt != (ssize_t)sizeof(hfs_btree_header_record)) { |
3786 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
3787 | 0 | tsk_error_set_errstr |
3788 | 0 | ("open_attr_file: could not open the Attributes file"); |
3789 | 0 | tsk_fs_file_close(attr_file->file); |
3790 | 0 | free(hrec); |
3791 | 0 | return 1; |
3792 | 0 | } |
3793 | | |
3794 | | // Fill in the fields of the attr_file struct (which was passed in by the caller) |
3795 | 0 | attr_file->fs = fs; |
3796 | 0 | attr_file->header = hrec; |
3797 | 0 | attr_file->endian = fs->endian; |
3798 | 0 | attr_file->nodeSize = tsk_getu16(attr_file->endian, hrec->nodesize); |
3799 | 0 | attr_file->rootNode = tsk_getu32(attr_file->endian, hrec->rootNode); |
3800 | 0 | attr_file->maxKeyLen = tsk_getu16(attr_file->endian, hrec->maxKeyLen); |
3801 | |
|
3802 | 0 | return 0; |
3803 | 0 | } |
3804 | | |
3805 | | |
3806 | | /** \internal |
3807 | | * Closes and frees the data structures associated with ATTR_FILE_T |
3808 | | */ |
3809 | | static uint8_t |
3810 | | close_attr_file(ATTR_FILE_T * attr_file) |
3811 | 0 | { |
3812 | 0 | if (attr_file == NULL) { |
3813 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
3814 | 0 | tsk_error_set_errstr("close_attr_file: NULL attr_file arg"); |
3815 | 0 | return 1; |
3816 | 0 | } |
3817 | | |
3818 | 0 | if (attr_file->file != NULL) { |
3819 | 0 | tsk_fs_file_close(attr_file->file); |
3820 | 0 | attr_file->file = NULL; |
3821 | 0 | } |
3822 | |
|
3823 | 0 | free(attr_file->header); |
3824 | 0 | attr_file->header = NULL; |
3825 | |
|
3826 | 0 | attr_file->rootNode = 0; |
3827 | 0 | attr_file->nodeSize = 0; |
3828 | | // Note that we leave the fs component alone. |
3829 | 0 | return 0; |
3830 | 0 | } |
3831 | | |
3832 | | |
3833 | | static const char * |
3834 | | hfs_attrTypeName(uint32_t typeNum) |
3835 | 0 | { |
3836 | 0 | switch (typeNum) { |
3837 | 0 | case TSK_FS_ATTR_TYPE_HFS_DEFAULT: |
3838 | 0 | return "DFLT"; |
3839 | 0 | case TSK_FS_ATTR_TYPE_HFS_DATA: |
3840 | 0 | return "DATA"; |
3841 | 0 | case TSK_FS_ATTR_TYPE_HFS_EXT_ATTR: |
3842 | 0 | return "ExATTR"; |
3843 | 0 | case TSK_FS_ATTR_TYPE_HFS_COMP_REC: |
3844 | 0 | return "CMPF"; |
3845 | 0 | case TSK_FS_ATTR_TYPE_HFS_RSRC: |
3846 | 0 | return "RSRC"; |
3847 | 0 | default: |
3848 | 0 | return "UNKN"; |
3849 | 0 | } |
3850 | 0 | } |
3851 | | |
3852 | | |
3853 | | // TODO: Function description missing here no idea what it is supposed to return |
3854 | | // in which circumstances. |
3855 | | static uint8_t |
3856 | | hfs_load_extended_attrs(TSK_FS_FILE * fs_file, |
3857 | | unsigned char *isCompressed, unsigned char *cmpType, |
3858 | | uint64_t *uncompressedSize) |
3859 | 0 | { |
3860 | 0 | TSK_FS_INFO *fs = fs_file->fs_info; |
3861 | 0 | uint64_t fileID; |
3862 | 0 | ATTR_FILE_T attrFile; |
3863 | 0 | uint8_t *nodeData; |
3864 | 0 | TSK_ENDIAN_ENUM endian; |
3865 | 0 | hfs_btree_node *nodeDescriptor; // The node descriptor |
3866 | 0 | uint32_t nodeID; // The number or ID of the Attributes file node to read. |
3867 | 0 | hfs_btree_key_attr *keyB; // ptr to the key of the Attr file record. |
3868 | 0 | unsigned char done; // Flag to indicate that we are done looping over leaf nodes |
3869 | 0 | uint16_t attribute_counter = 2; // The ID of the next attribute to be loaded. |
3870 | 0 | HFS_INFO *hfs; |
3871 | 0 | char *buffer = NULL; // buffer to hold the attribute |
3872 | 0 | TSK_LIST *nodeIDs_processed = NULL; // Keep track of node IDs to prevent an infinite loop |
3873 | 0 | ssize_t cnt; // count of chars read from file. |
3874 | |
|
3875 | 0 | tsk_error_reset(); |
3876 | | |
3877 | | // The CNID (or inode number) of the file |
3878 | | // Note that in TSK such numbers are 64 bits, but in HFS+ they are only 32 bits. |
3879 | 0 | fileID = fs_file->meta->addr; |
3880 | |
|
3881 | 0 | if (fs == NULL) { |
3882 | 0 | error_detected(TSK_ERR_FS_ARG, |
3883 | 0 | "hfs_load_extended_attrs: NULL fs arg"); |
3884 | 0 | return 1; |
3885 | 0 | } |
3886 | | |
3887 | 0 | hfs = (HFS_INFO *) fs; |
3888 | |
|
3889 | 0 | if (!hfs->has_attributes_file) { |
3890 | | // No attributes file, and so, no extended attributes |
3891 | 0 | return 0; |
3892 | 0 | } |
3893 | | |
3894 | 0 | if (tsk_verbose) { |
3895 | 0 | tsk_fprintf(stderr, |
3896 | 0 | "hfs_load_extended_attrs: Processing file %" PRIuINUM "\n", |
3897 | 0 | fileID); |
3898 | 0 | } |
3899 | | |
3900 | | // Open the Attributes File |
3901 | 0 | if (open_attr_file(fs, &attrFile)) { |
3902 | 0 | error_returned |
3903 | 0 | ("hfs_load_extended_attrs: could not open Attributes file"); |
3904 | 0 | return 1; |
3905 | 0 | } |
3906 | | |
3907 | | // Is the Attributes file empty? |
3908 | 0 | if (attrFile.rootNode == 0) { |
3909 | 0 | if (tsk_verbose) |
3910 | 0 | tsk_fprintf(stderr, |
3911 | 0 | "hfs_load_extended_attrs: Attributes file is empty\n"); |
3912 | 0 | close_attr_file(&attrFile); |
3913 | 0 | *isCompressed = FALSE; |
3914 | 0 | *cmpType = 0; |
3915 | 0 | return 0; |
3916 | 0 | } |
3917 | | |
3918 | 0 | if (attrFile.nodeSize < sizeof(hfs_btree_node)) { |
3919 | 0 | error_returned |
3920 | 0 | ("hfs_load_extended_attrs: node size too small"); |
3921 | 0 | close_attr_file(&attrFile); |
3922 | 0 | return 1; |
3923 | 0 | } |
3924 | | |
3925 | | // A place to hold one node worth of data |
3926 | 0 | nodeData = (uint8_t *) malloc(attrFile.nodeSize); |
3927 | 0 | if (nodeData == NULL) { |
3928 | 0 | error_detected(TSK_ERR_AUX_MALLOC, |
3929 | 0 | "hfs_load_extended_attrs: Could not malloc space for an Attributes file node"); |
3930 | 0 | goto on_error; |
3931 | 0 | } |
3932 | | |
3933 | | // Initialize these |
3934 | 0 | *isCompressed = FALSE; |
3935 | 0 | *cmpType = 0; |
3936 | |
|
3937 | 0 | endian = attrFile.fs->endian; |
3938 | | |
3939 | | // Start with the root node |
3940 | 0 | nodeID = attrFile.rootNode; |
3941 | | |
3942 | | // While loop, over nodes in path from root node to the correct LEAF node. |
3943 | 0 | while (1) { |
3944 | 0 | uint16_t numRec; // Number of records in the node |
3945 | 0 | int recIndx; // index for looping over records |
3946 | |
|
3947 | 0 | if (tsk_verbose) { |
3948 | 0 | tsk_fprintf(stderr, |
3949 | 0 | "hfs_load_extended_attrs: Reading Attributes File node with ID %" |
3950 | 0 | PRIu32 "\n", nodeID); |
3951 | 0 | } |
3952 | | |
3953 | | /* Make sure we do not get into an infinite loop */ |
3954 | 0 | if (tsk_list_find(nodeIDs_processed, nodeID)) { |
3955 | 0 | error_detected(TSK_ERR_FS_READ, |
3956 | 0 | "hfs_load_extended_attrs: Infinite loop detected - trying to read node %" PRIu32 " which has already been processed", nodeID); |
3957 | 0 | goto on_error; |
3958 | 0 | } |
3959 | | |
3960 | | |
3961 | | /* Read the node */ |
3962 | 0 | cnt = tsk_fs_file_read(attrFile.file, |
3963 | 0 | (TSK_OFF_T)nodeID * attrFile.nodeSize, |
3964 | 0 | (char *) nodeData, |
3965 | 0 | attrFile.nodeSize, (TSK_FS_FILE_READ_FLAG_ENUM) 0); |
3966 | 0 | if (cnt != (ssize_t)attrFile.nodeSize) { |
3967 | 0 | error_returned |
3968 | 0 | ("hfs_load_extended_attrs: Could not read in a node from the Attributes File"); |
3969 | 0 | goto on_error; |
3970 | 0 | } |
3971 | | |
3972 | | /* Save this node ID to the list of processed nodes */ |
3973 | 0 | if (tsk_list_add(&nodeIDs_processed, nodeID)) { |
3974 | 0 | error_detected(TSK_ERR_FS_READ, |
3975 | 0 | "hfs_load_extended_attrs: Could not save nodeID to the list of processed nodes"); |
3976 | 0 | goto on_error; |
3977 | 0 | } |
3978 | | |
3979 | | /** Node has a: |
3980 | | * Descriptor |
3981 | | * Set of records |
3982 | | * Table at the end with pointers to the records |
3983 | | */ |
3984 | | // Parse the Node header |
3985 | 0 | nodeDescriptor = (hfs_btree_node *) nodeData; |
3986 | | |
3987 | | // If we are at a leaf node, then we have found the right node |
3988 | 0 | if (nodeDescriptor->type == HFS_ATTR_NODE_LEAF) { |
3989 | 0 | break; |
3990 | 0 | } |
3991 | | |
3992 | | // This had better be an INDEX node, if not its an error |
3993 | 0 | else if (nodeDescriptor->type != HFS_ATTR_NODE_INDEX) { |
3994 | 0 | error_detected(TSK_ERR_FS_READ, |
3995 | 0 | "hfs_load_extended_attrs: Reached a non-INDEX and non-LEAF node in searching the Attributes File"); |
3996 | 0 | goto on_error; |
3997 | 0 | } |
3998 | | |
3999 | | // OK, we are in an INDEX node. loop over the records to find the last one whose key is |
4000 | | // smaller than or equal to the desired key |
4001 | | |
4002 | 0 | numRec = tsk_getu16(endian, nodeDescriptor->num_rec); |
4003 | 0 | if (numRec == 0) { |
4004 | | // This is wrong, there must always be at least 1 record in an INDEX node. |
4005 | 0 | error_detected(TSK_ERR_FS_READ, |
4006 | 0 | "hfs_load_extended_attrs:Attributes File index node %" |
4007 | 0 | PRIu32 " has zero records", nodeID); |
4008 | 0 | goto on_error; |
4009 | 0 | } |
4010 | | |
4011 | 0 | for (recIndx = 0; recIndx < numRec; ++recIndx) { |
4012 | 0 | uint16_t keyLength; |
4013 | 0 | int comp; // comparison result |
4014 | 0 | char *compStr; // comparison result, as a string |
4015 | 0 | uint8_t *recData; // pointer to the data part of the record |
4016 | 0 | uint32_t keyFileID; |
4017 | |
|
4018 | 0 | if ((attrFile.nodeSize < 2) || (recIndx > ((attrFile.nodeSize - 2) / 2))) { |
4019 | 0 | error_detected(TSK_ERR_FS_READ, |
4020 | 0 | "hfs_load_extended_attrs: Unable to process attribute (recIndx exceeds attrFile.nodeSize)"); |
4021 | 0 | goto on_error; |
4022 | 0 | } |
4023 | | |
4024 | | // The offset to the record is stored in table at end of node |
4025 | 0 | uint8_t *recOffsetTblEntry = &nodeData[attrFile.nodeSize - (2 * (recIndx + 1))]; // data describing where this record is |
4026 | 0 | uint16_t recOffset = tsk_getu16(endian, recOffsetTblEntry); |
4027 | | //uint8_t * nextRecOffsetData = &nodeData[attrFile.nodeSize - 2* (recIndx+2)]; |
4028 | | |
4029 | | // make sure the record and first fields are in the buffer |
4030 | 0 | if ((attrFile.nodeSize < 14) || (recOffset >= attrFile.nodeSize - 14)) { |
4031 | 0 | error_detected(TSK_ERR_FS_READ, |
4032 | 0 | "hfs_load_extended_attrs: Unable to process attribute (offset too big)"); |
4033 | 0 | goto on_error; |
4034 | 0 | } |
4035 | | |
4036 | | // Pointer to first byte of record |
4037 | 0 | uint8_t *recordBytes = &nodeData[recOffset]; |
4038 | | |
4039 | | |
4040 | | // Cast that to the Attributes file key (n.b., the key is the first thing in the record) |
4041 | 0 | keyB = (hfs_btree_key_attr *) recordBytes; |
4042 | | |
4043 | | // Is this key less than what we are seeking? |
4044 | | //int comp = comp_attr_key(endian, keyB, fileID, attrName, startBlock); |
4045 | |
|
4046 | 0 | keyFileID = tsk_getu32(endian, keyB->file_id); |
4047 | 0 | if (keyFileID < fileID) { |
4048 | 0 | comp = -1; |
4049 | 0 | compStr = "less than"; |
4050 | 0 | } |
4051 | 0 | else if (keyFileID > fileID) { |
4052 | 0 | comp = 1; |
4053 | 0 | compStr = "greater than"; |
4054 | 0 | } |
4055 | 0 | else { |
4056 | 0 | comp = 0; |
4057 | 0 | compStr = "equal to"; |
4058 | 0 | } |
4059 | 0 | if (tsk_verbose) |
4060 | 0 | tsk_fprintf(stderr, |
4061 | 0 | "hfs_load_extended_attrs: INDEX record %d, fileID %" |
4062 | 0 | PRIu32 " is %s the file ID we are seeking, %" PRIu32 |
4063 | 0 | ".\n", recIndx, keyFileID, compStr, fileID); |
4064 | 0 | if (comp > 0) { |
4065 | | // The key of this record is greater than what we are seeking |
4066 | 0 | if (recIndx == 0) { |
4067 | | // This is the first record, so no records are appropriate |
4068 | | // Nothing in this btree will match. We can stop right here. |
4069 | 0 | goto on_exit; |
4070 | 0 | } |
4071 | | |
4072 | | // This is not the first record, so, the previous record's child is the one we want. |
4073 | 0 | break; |
4074 | 0 | } |
4075 | | |
4076 | | // CASE: key in this record matches the key we are seeking. The previous record's child |
4077 | | // is the one we want. However, if this is the first record, then we want THIS record's child. |
4078 | 0 | if (comp == 0 && recIndx != 0) { |
4079 | 0 | break; |
4080 | 0 | } |
4081 | | |
4082 | | // Extract the child node ID from the record data (stored after the key) |
4083 | 0 | keyLength = tsk_getu16(endian, keyB->key_len); |
4084 | | // make sure the fields we care about are still in the buffer |
4085 | | // +2 is because key_len doesn't include its own length |
4086 | | // +4 is because of the amount of data we read from the data |
4087 | 0 | if ((keyLength > attrFile.nodeSize - 2 - 4) || (recOffset >= attrFile.nodeSize - 2 - 4 - keyLength)) { |
4088 | 0 | error_detected(TSK_ERR_FS_READ, |
4089 | 0 | "hfs_load_extended_attrs: Unable to process attribute"); |
4090 | 0 | goto on_error; |
4091 | 0 | } |
4092 | | |
4093 | 0 | recData = &recordBytes[keyLength + 2]; |
4094 | | |
4095 | | // Data must start on an even offset from the beginning of the record. |
4096 | | // So, correct this if needed. |
4097 | 0 | if ((recData - recordBytes) % 2) { |
4098 | 0 | recData += 1; |
4099 | 0 | } |
4100 | | |
4101 | | // The next four bytes should be the Node ID of the child of this node. |
4102 | 0 | nodeID = tsk_getu32(endian, recData); |
4103 | | |
4104 | | // At this point, either comp<0 or comp=0 && recIndx=0. In the latter case we want to |
4105 | | // descend to the child of this node, so we break. |
4106 | 0 | if (recIndx == 0 && comp == 0) { |
4107 | 0 | break; |
4108 | 0 | } |
4109 | | |
4110 | | // CASE: key in this record is less than key we seek. comp < 0 |
4111 | | // So, continue looping over records in this node. |
4112 | 0 | } // END loop over records |
4113 | |
|
4114 | 0 | } // END while loop over Nodes in path from root to LEAF node |
4115 | | |
4116 | | // At this point nodeData holds the contents of a LEAF node with the right range of keys |
4117 | | // and nodeDescriptor points to the descriptor of that node. |
4118 | | |
4119 | | // Loop over successive LEAF nodes, starting with this one |
4120 | 0 | done = FALSE; |
4121 | 0 | while (!done) { |
4122 | 0 | uint16_t numRec; // number of records |
4123 | 0 | unsigned int recIndx; // index for looping over records |
4124 | |
|
4125 | 0 | if (tsk_verbose) |
4126 | 0 | tsk_fprintf(stderr, |
4127 | 0 | "hfs_load_extended_attrs: Attributes File LEAF Node %" |
4128 | 0 | PRIu32 ".\n", nodeID); |
4129 | 0 | numRec = tsk_getu16(endian, nodeDescriptor->num_rec); |
4130 | | // Note, leaf node could have one (or maybe zero) records |
4131 | | |
4132 | | // Loop over the records in this node |
4133 | 0 | for (recIndx = 0; recIndx < numRec; ++recIndx) { |
4134 | |
|
4135 | 0 | if ((attrFile.nodeSize < 2) || (recIndx > ((attrFile.nodeSize - 2) / 2))) { |
4136 | 0 | error_detected(TSK_ERR_FS_READ, |
4137 | 0 | "hfs_load_extended_attrs: Unable to process attribute (recIndx exceeds attrFile.nodeSize)"); |
4138 | 0 | goto on_error; |
4139 | 0 | } |
4140 | | // The offset to the record is stored in table at end of node |
4141 | 0 | uint8_t *recOffsetTblEntry = &nodeData[attrFile.nodeSize - (2 * (recIndx + 1))]; // data describing where this record is |
4142 | 0 | uint16_t recOffset = tsk_getu16(endian, recOffsetTblEntry); |
4143 | |
|
4144 | 0 | int comp; // comparison result |
4145 | 0 | char *compStr; // comparison result as a string |
4146 | 0 | uint32_t keyFileID; |
4147 | | |
4148 | | // make sure the record and first fields are in the buffer |
4149 | 0 | if (recOffset >= attrFile.nodeSize - 14) { |
4150 | 0 | error_detected(TSK_ERR_FS_READ, |
4151 | 0 | "hfs_load_extended_attrs: Unable to process attribute (offset too big)"); |
4152 | 0 | goto on_error; |
4153 | 0 | } |
4154 | | |
4155 | | // Pointer to first byte of record |
4156 | 0 | uint8_t *recordBytes = &nodeData[recOffset]; |
4157 | | |
4158 | | // Cast that to the Attributes file key |
4159 | 0 | keyB = (hfs_btree_key_attr *) recordBytes; |
4160 | | |
4161 | | // Compare recordBytes key to the key that we are seeking |
4162 | 0 | keyFileID = tsk_getu32(endian, keyB->file_id); |
4163 | | |
4164 | | //fprintf(stdout, " Key file ID = %lu\n", keyFileID); |
4165 | 0 | if (keyFileID < fileID) { |
4166 | 0 | comp = -1; |
4167 | 0 | compStr = "less than"; |
4168 | 0 | } |
4169 | 0 | else if (keyFileID > fileID) { |
4170 | 0 | comp = 1; |
4171 | 0 | compStr = "greater than"; |
4172 | 0 | } |
4173 | 0 | else { |
4174 | 0 | comp = 0; |
4175 | 0 | compStr = "equal to"; |
4176 | 0 | } |
4177 | |
|
4178 | 0 | if (tsk_verbose) |
4179 | 0 | tsk_fprintf(stderr, |
4180 | 0 | "hfs_load_extended_attrs: LEAF Record key file ID %" |
4181 | 0 | PRIu32 " is %s the desired file ID %" PRIu32 "\n", |
4182 | 0 | keyFileID, compStr, fileID); |
4183 | | // Are they the same? |
4184 | 0 | if (comp == 0) { |
4185 | | // Yes, so load this attribute |
4186 | |
|
4187 | 0 | uint8_t *recData; // pointer to the data part of the recordBytes |
4188 | 0 | hfs_attr_data *attrData; |
4189 | 0 | uint32_t attributeLength; |
4190 | 0 | uint32_t nameLength; |
4191 | 0 | uint32_t recordType; |
4192 | 0 | uint16_t keyLength; |
4193 | 0 | int conversionResult; |
4194 | 0 | char nameBuff[HFS_MAX_ATTR_NAME_LEN_UTF8_B+1]; |
4195 | 0 | TSK_FS_ATTR_TYPE_ENUM attrType; |
4196 | 0 | TSK_FS_ATTR *fs_attr; // Points to the attribute to be loaded. |
4197 | |
|
4198 | 0 | keyLength = tsk_getu16(endian, keyB->key_len); |
4199 | | // make sure the fields we care about are still in the buffer |
4200 | | // +2 because key_len doesn't include its own length |
4201 | | // +16 for the amount of data we'll read from data |
4202 | 0 | if ((attrFile.nodeSize < 2 + 16) || (keyLength > attrFile.nodeSize - 2 - 16) || (recOffset >= attrFile.nodeSize - 2 - 16 - keyLength)) { |
4203 | 0 | error_detected(TSK_ERR_FS_READ, |
4204 | 0 | "hfs_load_extended_attrs: Unable to process attribute"); |
4205 | 0 | goto on_error; |
4206 | 0 | } |
4207 | | |
4208 | 0 | recData = &recordBytes[keyLength + 2]; |
4209 | | |
4210 | | // Data must start on an even offset from the beginning of the record. |
4211 | | // So, correct this if needed. |
4212 | 0 | if ((recData - recordBytes) % 2) { |
4213 | 0 | recData += 1; |
4214 | 0 | } |
4215 | |
|
4216 | 0 | attrData = (hfs_attr_data *) recData; |
4217 | | |
4218 | | // Check we can process the record type before allocating memory |
4219 | 0 | recordType = tsk_getu32(endian, attrData->record_type); |
4220 | 0 | if (recordType != HFS_ATTR_RECORD_INLINE_DATA) { |
4221 | 0 | error_detected(TSK_ERR_FS_UNSUPTYPE, |
4222 | 0 | "hfs_load_extended_attrs: Unsupported record type: (%d)", |
4223 | 0 | recordType); |
4224 | 0 | goto on_error; |
4225 | 0 | } |
4226 | | |
4227 | | // This is the length of the useful data, not including the record header |
4228 | 0 | attributeLength = tsk_getu32(endian, attrData->attr_size); |
4229 | | |
4230 | | // Check the attribute fits in the node |
4231 | | //if (recordType != HFS_ATTR_RECORD_INLINE_DATA) { |
4232 | 0 | if ((attributeLength > attrFile.nodeSize - 2 - 16 - keyLength) || (recOffset >= attrFile.nodeSize - 2 - 16 - keyLength - attributeLength)) { |
4233 | 0 | error_detected(TSK_ERR_FS_READ, |
4234 | 0 | "hfs_load_extended_attrs: Unable to process attribute"); |
4235 | 0 | goto on_error; |
4236 | 0 | } |
4237 | | |
4238 | | // attr_name_len is in UTF_16 chars |
4239 | 0 | nameLength = tsk_getu16(endian, keyB->attr_name_len); |
4240 | 0 | if (2 * nameLength > HFS_MAX_ATTR_NAME_LEN_UTF16_B) { |
4241 | 0 | error_detected(TSK_ERR_FS_CORRUPT, |
4242 | 0 | "hfs_load_extended_attrs: Name length in bytes (%d) > max name length in bytes (%d).", |
4243 | 0 | 2*nameLength, HFS_MAX_ATTR_NAME_LEN_UTF16_B); |
4244 | 0 | goto on_error; |
4245 | 0 | } |
4246 | | |
4247 | 0 | if ((int32_t)(2*nameLength) > keyLength - 12) { |
4248 | 0 | error_detected(TSK_ERR_FS_CORRUPT, |
4249 | 0 | "hfs_load_extended_attrs: Name length in bytes (%d) > remaining struct length (%d).", |
4250 | 0 | 2*nameLength, keyLength - 12); |
4251 | 0 | goto on_error; |
4252 | 0 | } |
4253 | | |
4254 | 0 | buffer = tsk_malloc(attributeLength); |
4255 | 0 | if (buffer == NULL) { |
4256 | 0 | error_detected(TSK_ERR_AUX_MALLOC, |
4257 | 0 | "hfs_load_extended_attrs: Could not malloc space for the attribute."); |
4258 | 0 | goto on_error; |
4259 | 0 | } |
4260 | | |
4261 | 0 | memcpy(buffer, attrData->attr_data, attributeLength); |
4262 | | |
4263 | | // Use the "attr_name" part of the key as the attribute name |
4264 | | // but must convert to UTF8. Unfortunately, there does not seem to |
4265 | | // be any easy way to determine how long the converted string will |
4266 | | // be because UTF8 is a variable length encoding. However, the longest |
4267 | | // it will be is 3 * the max number of UTF16 code units. Add one for null |
4268 | | // termination. (thanks Judson!) |
4269 | | |
4270 | |
|
4271 | 0 | conversionResult = hfs_UTF16toUTF8(fs, keyB->attr_name, |
4272 | 0 | nameLength, nameBuff, HFS_MAX_ATTR_NAME_LEN_UTF8_B+1, 0); |
4273 | 0 | if (conversionResult != 0) { |
4274 | 0 | error_returned |
4275 | 0 | ("-- hfs_load_extended_attrs could not convert the attr_name in the btree key into a UTF8 attribute name"); |
4276 | 0 | goto on_error; |
4277 | 0 | } |
4278 | | |
4279 | | // What is the type of this attribute? If it is a compression record, then |
4280 | | // use TSK_FS_ATTR_TYPE_HFS_COMP_REC. Else, use TSK_FS_ATTR_TYPE_HFS_EXT_ATTR |
4281 | | // Only "inline data" kind of record is handled. |
4282 | 0 | if (strcmp(nameBuff, "com.apple.decmpfs") == 0 && |
4283 | 0 | tsk_getu32(endian, attrData->record_type) == HFS_ATTR_RECORD_INLINE_DATA) { |
4284 | | // Now, look at the compression record |
4285 | 0 | DECMPFS_DISK_HEADER *cmph = (DECMPFS_DISK_HEADER *) buffer; |
4286 | 0 | *cmpType = |
4287 | 0 | tsk_getu32(TSK_LIT_ENDIAN, cmph->compression_type); |
4288 | 0 | uint64_t uncSize = tsk_getu64(TSK_LIT_ENDIAN, |
4289 | 0 | cmph->uncompressed_size); |
4290 | |
|
4291 | 0 | if (tsk_verbose) |
4292 | 0 | tsk_fprintf(stderr, |
4293 | 0 | "hfs_load_extended_attrs: This attribute is a compression record.\n"); |
4294 | |
|
4295 | 0 | attrType = TSK_FS_ATTR_TYPE_HFS_COMP_REC; |
4296 | 0 | *isCompressed = TRUE; // The data is governed by a compression record (but might not be compressed) |
4297 | 0 | *uncompressedSize = uncSize; |
4298 | |
|
4299 | 0 | switch (*cmpType) { |
4300 | | // Data is inline. We will load the uncompressed |
4301 | | // data as a resident attribute. |
4302 | 0 | case DECMPFS_TYPE_ZLIB_ATTR: |
4303 | 0 | if (!decmpfs_file_read_zlib_attr( |
4304 | 0 | fs_file, buffer, attributeLength, uncSize)) |
4305 | 0 | { |
4306 | 0 | goto on_error; |
4307 | 0 | } |
4308 | 0 | break; |
4309 | | |
4310 | 0 | case DECMPFS_TYPE_LZVN_ATTR: |
4311 | 0 | if (!decmpfs_file_read_lzvn_attr( |
4312 | 0 | fs_file, buffer, attributeLength, uncSize)) |
4313 | 0 | { |
4314 | 0 | goto on_error; |
4315 | 0 | } |
4316 | 0 | break; |
4317 | | |
4318 | | // Data is compressed in the resource fork |
4319 | 0 | case DECMPFS_TYPE_ZLIB_RSRC: |
4320 | 0 | case DECMPFS_TYPE_LZVN_RSRC: |
4321 | 0 | if (tsk_verbose) |
4322 | 0 | tsk_fprintf(stderr, |
4323 | 0 | "%s: Compressed data is in the file Resource Fork.\n", __func__); |
4324 | 0 | break; |
4325 | 0 | } |
4326 | 0 | } |
4327 | 0 | else { // Attrbute name is NOT com.apple.decmpfs |
4328 | 0 | attrType = TSK_FS_ATTR_TYPE_HFS_EXT_ATTR; |
4329 | 0 | } // END if attribute name is com.apple.decmpfs ELSE clause |
4330 | | |
4331 | 0 | if ((fs_attr = |
4332 | 0 | tsk_fs_attrlist_getnew(fs_file->meta->attr, |
4333 | 0 | TSK_FS_ATTR_RES)) == NULL) { |
4334 | 0 | error_returned(" - hfs_load_extended_attrs"); |
4335 | 0 | goto on_error; |
4336 | 0 | } |
4337 | | |
4338 | 0 | if (tsk_verbose) { |
4339 | 0 | tsk_fprintf(stderr, |
4340 | 0 | "hfs_load_extended_attrs: loading attribute %s, type %u (%s)\n", |
4341 | 0 | nameBuff, (uint32_t) attrType, |
4342 | 0 | hfs_attrTypeName((uint32_t) attrType)); |
4343 | 0 | } |
4344 | | |
4345 | | // set the details in the fs_attr structure |
4346 | 0 | if (tsk_fs_attr_set_str(fs_file, fs_attr, nameBuff, |
4347 | 0 | attrType, attribute_counter, buffer, |
4348 | 0 | attributeLength)) { |
4349 | 0 | error_returned(" - hfs_load_extended_attrs"); |
4350 | 0 | goto on_error; |
4351 | 0 | } |
4352 | | |
4353 | 0 | free(buffer); |
4354 | 0 | buffer = NULL; |
4355 | |
|
4356 | 0 | ++attribute_counter; |
4357 | 0 | } // END if comp == 0 |
4358 | 0 | if (comp == 1) { |
4359 | | // since this record key is greater than our search key, all |
4360 | | // subsequent records will also be greater. |
4361 | 0 | done = TRUE; |
4362 | 0 | break; |
4363 | 0 | } |
4364 | 0 | } // END loop over records in one LEAF node |
4365 | | |
4366 | | /* |
4367 | | * We get to this point if either: |
4368 | | * |
4369 | | * 1. We finish the loop over records and we are still loading attributes |
4370 | | * for the given file. In this case we are NOT done, and must read in |
4371 | | * the next leaf node, and process its records. The following code |
4372 | | * loads the next leaf node before we return to the top of the loop. |
4373 | | * |
4374 | | * 2. We "broke" out of the loop over records because we found a key that |
4375 | | * whose file ID is greater than the one we are working on. In that case |
4376 | | * we are done. The following code does not run, and we exit the |
4377 | | * while loop over successive leaf nodes. |
4378 | | */ |
4379 | | |
4380 | 0 | if (!done) { |
4381 | | // We did not finish loading the attributes when we got to the end of that node, |
4382 | | // so we must get the next node, and continue. |
4383 | | |
4384 | | // First determine the nodeID of the next LEAF node |
4385 | 0 | uint32_t newNodeID = tsk_getu32(endian, nodeDescriptor->flink); |
4386 | | |
4387 | | //fprintf(stdout, "Next Node ID = %u\n", newNodeID); |
4388 | 0 | if (tsk_verbose) |
4389 | 0 | tsk_fprintf(stderr, |
4390 | 0 | "hfs_load_extended_attrs: Processed last record of THIS node, still gathering attributes.\n"); |
4391 | | |
4392 | | // If we are at the very last leaf node in the btree, then |
4393 | | // this "flink" will be zero. We break out of this loop over LEAF nodes. |
4394 | 0 | if (newNodeID == 0) { |
4395 | 0 | if (tsk_verbose) |
4396 | 0 | tsk_fprintf(stderr, |
4397 | 0 | "hfs_load_extended_attrs: But, there are no more leaf nodes, so we are done.\n"); |
4398 | 0 | break; |
4399 | 0 | } |
4400 | | |
4401 | 0 | if (tsk_verbose) |
4402 | 0 | tsk_fprintf(stderr, |
4403 | 0 | "hfs_load_extended_attrs: Reading the next LEAF node %" |
4404 | 0 | PRIu32 ".\n", nodeID); |
4405 | |
|
4406 | 0 | nodeID = newNodeID; |
4407 | |
|
4408 | 0 | cnt = tsk_fs_file_read(attrFile.file, |
4409 | 0 | nodeID * attrFile.nodeSize, |
4410 | 0 | (char *) nodeData, |
4411 | 0 | attrFile.nodeSize, (TSK_FS_FILE_READ_FLAG_ENUM) 0); |
4412 | 0 | if (cnt != (ssize_t)attrFile.nodeSize) { |
4413 | 0 | error_returned |
4414 | 0 | ("hfs_load_extended_attrs: Could not read in the next LEAF node from the Attributes File btree"); |
4415 | 0 | goto on_error; |
4416 | 0 | } |
4417 | | |
4418 | | // Parse the Node header |
4419 | 0 | nodeDescriptor = (hfs_btree_node *) nodeData; |
4420 | | |
4421 | | // If we are NOT leaf node, then this is an error |
4422 | 0 | if (nodeDescriptor->type != HFS_ATTR_NODE_LEAF) { |
4423 | 0 | error_detected(TSK_ERR_FS_CORRUPT, |
4424 | 0 | "hfs_load_extended_attrs: found a non-LEAF node as a successor to a LEAF node"); |
4425 | 0 | goto on_error; |
4426 | 0 | } |
4427 | 0 | } // END if(! done) |
4428 | | |
4429 | | |
4430 | |
|
4431 | 0 | } // END while(! done) loop over successive LEAF nodes |
4432 | | |
4433 | 0 | on_exit: |
4434 | 0 | free(nodeData); |
4435 | 0 | tsk_list_free(nodeIDs_processed); |
4436 | 0 | close_attr_file(&attrFile); |
4437 | 0 | return 0; |
4438 | | |
4439 | 0 | on_error: |
4440 | 0 | free(buffer); |
4441 | 0 | free(nodeData); |
4442 | 0 | tsk_list_free(nodeIDs_processed); |
4443 | 0 | close_attr_file(&attrFile); |
4444 | 0 | return 1; |
4445 | 0 | } |
4446 | | |
4447 | | typedef struct RES_DESCRIPTOR { |
4448 | | char type[5]; // type is really 4 chars, but we will null-terminate |
4449 | | uint16_t id; |
4450 | | uint32_t offset; |
4451 | | uint32_t length; |
4452 | | char *name; // NULL if a name is not defined for this resource |
4453 | | struct RES_DESCRIPTOR *next; |
4454 | | } RES_DESCRIPTOR; |
4455 | | |
4456 | | void |
4457 | | free_res_descriptor(RES_DESCRIPTOR * rd) |
4458 | 0 | { |
4459 | 0 | RES_DESCRIPTOR *nxt; |
4460 | |
|
4461 | 0 | if (rd == NULL) |
4462 | 0 | return; |
4463 | 0 | nxt = rd->next; |
4464 | 0 | free(rd->name); |
4465 | 0 | free(rd); |
4466 | 0 | free_res_descriptor(nxt); // tail recursive |
4467 | 0 | } |
4468 | | |
4469 | | /** |
4470 | | * The purpose of this function is to parse the resource fork of a file, and to return |
4471 | | * a data structure that is, in effect, a table of contents for the resource fork. The |
4472 | | * data structure is a null-terminated linked list of entries. Each one describes one |
4473 | | * resource. If the resource fork is empty, or if there is not a resource fork at all, |
4474 | | * or an error occurs, this function returns NULL. |
4475 | | * |
4476 | | * A non-NULL answer should be freed by the caller, using free_res_descriptor. |
4477 | | * |
4478 | | */ |
4479 | | |
4480 | | static RES_DESCRIPTOR * |
4481 | | hfs_parse_resource_fork(TSK_FS_FILE * fs_file) |
4482 | 0 | { |
4483 | |
|
4484 | 0 | RES_DESCRIPTOR *result = NULL; |
4485 | 0 | RES_DESCRIPTOR *last = NULL; |
4486 | 0 | TSK_FS_INFO *fs_info; |
4487 | 0 | hfs_fork *fork_info; |
4488 | 0 | hfs_fork *resForkInfo; |
4489 | 0 | uint64_t resSize; |
4490 | 0 | const TSK_FS_ATTR *rAttr; |
4491 | 0 | hfs_resource_fork_header rfHeader; |
4492 | 0 | hfs_resource_fork_header *resHead; |
4493 | 0 | uint32_t dataOffset; |
4494 | 0 | uint32_t mapOffset; |
4495 | 0 | uint32_t mapLength; |
4496 | 0 | char *map; |
4497 | 0 | ssize_t attrReadResult; |
4498 | 0 | ssize_t attrReadResult1; |
4499 | 0 | ssize_t attrReadResult2; |
4500 | 0 | hfs_resource_fork_map_header *mapHdr; |
4501 | 0 | uint16_t typeListOffset; |
4502 | 0 | uint16_t nameListOffset; |
4503 | 0 | unsigned char hasNameList; |
4504 | 0 | char *nameListBegin = NULL; |
4505 | 0 | hfs_resource_type_list *typeList; |
4506 | 0 | uint16_t numTypes; |
4507 | 0 | hfs_resource_type_list_item *tlItem; |
4508 | 0 | int mindx; // index for looping over resource types |
4509 | |
|
4510 | 0 | if (fs_file == NULL) { |
4511 | 0 | error_detected(TSK_ERR_FS_ARG, |
4512 | 0 | "hfs_parse_resource_fork: null fs_file"); |
4513 | 0 | return NULL; |
4514 | 0 | } |
4515 | | |
4516 | | |
4517 | 0 | if (fs_file->meta == NULL) { |
4518 | 0 | error_detected(TSK_ERR_FS_ARG, |
4519 | 0 | "hfs_parse_resource_fork: fs_file has null metadata"); |
4520 | 0 | return NULL; |
4521 | 0 | } |
4522 | | |
4523 | 0 | if (fs_file->meta->content_ptr == NULL) { |
4524 | 0 | if (tsk_verbose) |
4525 | 0 | fprintf(stderr, |
4526 | 0 | "hfs_parse_resource_fork: fs_file has null fork data structures, so no resources.\n"); |
4527 | 0 | return NULL; |
4528 | 0 | } |
4529 | | |
4530 | | // Extract the fs |
4531 | 0 | fs_info = fs_file->fs_info; |
4532 | 0 | if (fs_info == NULL) { |
4533 | 0 | error_detected(TSK_ERR_FS_ARG, |
4534 | 0 | "hfs_parse_resource_fork: null fs within fs_info"); |
4535 | 0 | return NULL; |
4536 | 0 | } |
4537 | | |
4538 | | // Try to look at the Resource Fork for an HFS+ file |
4539 | | // Should be able to cast this to hfs_fork * |
4540 | 0 | fork_info = (hfs_fork *) fs_file->meta->content_ptr; // The data fork |
4541 | | // The resource fork is the second one. |
4542 | 0 | resForkInfo = &fork_info[1]; |
4543 | 0 | resSize = tsk_getu64(fs_info->endian, resForkInfo->logic_sz); |
4544 | | //uint32_t numBlocks = tsk_getu32(fs_info->endian, resForkInfo->total_blk); |
4545 | | //uint32_t clmpSize = tsk_getu32(fs_info->endian, resForkInfo->clmp_sz); |
4546 | | |
4547 | | // Hmm, certainly no resources here! |
4548 | 0 | if (resSize == 0) { |
4549 | 0 | return NULL; |
4550 | 0 | } |
4551 | | |
4552 | | // OK, resource size must be > 0 |
4553 | | |
4554 | | // find the attribute for the resource fork |
4555 | 0 | rAttr = |
4556 | 0 | tsk_fs_file_attr_get_type(fs_file, TSK_FS_ATTR_TYPE_HFS_RSRC, |
4557 | 0 | HFS_FS_ATTR_ID_RSRC, TRUE); |
4558 | | |
4559 | |
|
4560 | 0 | if (rAttr == NULL) { |
4561 | 0 | error_returned |
4562 | 0 | ("hfs_parse_resource_fork: could not get the resource fork attribute"); |
4563 | 0 | return NULL; |
4564 | 0 | } |
4565 | | |
4566 | | // JUST read the resource fork header |
4567 | | |
4568 | | |
4569 | 0 | attrReadResult1 = |
4570 | 0 | tsk_fs_attr_read(rAttr, 0, (char *) &rfHeader, |
4571 | 0 | sizeof(hfs_resource_fork_header), TSK_FS_FILE_READ_FLAG_NONE); |
4572 | |
|
4573 | 0 | if (attrReadResult1 < 0 |
4574 | 0 | || attrReadResult1 != sizeof(hfs_resource_fork_header)) { |
4575 | 0 | error_returned |
4576 | 0 | (" hfs_parse_resource_fork: trying to read the resource fork header"); |
4577 | 0 | return NULL; |
4578 | 0 | } |
4579 | | |
4580 | | // Begin to parse the resource fork |
4581 | 0 | resHead = &rfHeader; |
4582 | 0 | dataOffset = tsk_getu32(fs_info->endian, resHead->dataOffset); |
4583 | 0 | mapOffset = tsk_getu32(fs_info->endian, resHead->mapOffset); |
4584 | | //uint32_t dataLength = tsk_getu32(fs_info->endian, resHead->dataLength); |
4585 | 0 | mapLength = tsk_getu32(fs_info->endian, resHead->mapLength); |
4586 | | |
4587 | | // Read in the WHOLE map |
4588 | 0 | map = (char *) tsk_malloc(mapLength); |
4589 | 0 | if (map == NULL) { |
4590 | 0 | error_returned |
4591 | 0 | ("- hfs_parse_resource_fork: could not allocate space for the resource fork map"); |
4592 | 0 | return NULL; |
4593 | 0 | } |
4594 | | |
4595 | 0 | attrReadResult = |
4596 | 0 | tsk_fs_attr_read(rAttr, (uint64_t) mapOffset, map, |
4597 | 0 | (size_t) mapLength, TSK_FS_FILE_READ_FLAG_NONE); |
4598 | |
|
4599 | 0 | if (attrReadResult < 0 || attrReadResult != (ssize_t) mapLength) { |
4600 | 0 | error_returned |
4601 | 0 | ("- hfs_parse_resource_fork: could not read the map"); |
4602 | 0 | free(map); |
4603 | 0 | return NULL; |
4604 | 0 | } |
4605 | | |
4606 | 0 | mapHdr = (hfs_resource_fork_map_header *) map; |
4607 | |
|
4608 | 0 | typeListOffset = tsk_getu16(fs_info->endian, mapHdr->typeListOffset); |
4609 | |
|
4610 | 0 | nameListOffset = tsk_getu16(fs_info->endian, mapHdr->nameListOffset); |
4611 | |
|
4612 | 0 | if (nameListOffset >= mapLength || nameListOffset == 0) { |
4613 | 0 | hasNameList = FALSE; |
4614 | 0 | } |
4615 | 0 | else { |
4616 | 0 | hasNameList = TRUE; |
4617 | 0 | nameListBegin = map + nameListOffset; |
4618 | 0 | } |
4619 | |
|
4620 | 0 | typeList = (hfs_resource_type_list *) (map + typeListOffset); |
4621 | 0 | numTypes = tsk_getu16(fs_info->endian, typeList->typeCount) + 1; |
4622 | |
|
4623 | 0 | for (mindx = 0; mindx < numTypes; ++mindx) { |
4624 | 0 | uint16_t numRes; |
4625 | 0 | uint16_t refOff; |
4626 | 0 | int pindx; // index for looping over resources |
4627 | 0 | uint16_t rID; |
4628 | 0 | uint32_t rOffset; |
4629 | |
|
4630 | 0 | tlItem = &(typeList->type[mindx]); |
4631 | 0 | numRes = tsk_getu16(fs_info->endian, tlItem->count) + 1; |
4632 | 0 | refOff = tsk_getu16(fs_info->endian, tlItem->offset); |
4633 | | |
4634 | |
|
4635 | 0 | for (pindx = 0; pindx < numRes; ++pindx) { |
4636 | 0 | int16_t nameOffset; |
4637 | 0 | char *nameBuffer; |
4638 | 0 | RES_DESCRIPTOR *rsrc; |
4639 | 0 | char lenBuff[4]; // first 4 bytes of a resource encodes its length |
4640 | 0 | uint32_t rLen; // Resource length |
4641 | |
|
4642 | 0 | hfs_resource_refListItem *item = |
4643 | 0 | ((hfs_resource_refListItem *) (((uint8_t *) typeList) + |
4644 | 0 | refOff)) + pindx; |
4645 | 0 | nameOffset = tsk_gets16(fs_info->endian, item->resNameOffset); |
4646 | 0 | nameBuffer = NULL; |
4647 | |
|
4648 | 0 | if (hasNameList && nameOffset != -1) { |
4649 | 0 | char *name = nameListBegin + nameOffset; |
4650 | 0 | uint8_t nameLen = (uint8_t) name[0]; |
4651 | 0 | nameBuffer = tsk_malloc(nameLen + 1); |
4652 | 0 | if (nameBuffer == NULL) { |
4653 | 0 | error_returned |
4654 | 0 | ("hfs_parse_resource_fork: allocating space for the name of a resource"); |
4655 | 0 | free_res_descriptor(result); |
4656 | 0 | return NULL; |
4657 | 0 | } |
4658 | 0 | memcpy(nameBuffer, name + 1, nameLen); |
4659 | 0 | nameBuffer[nameLen] = (char) 0; |
4660 | 0 | } |
4661 | 0 | else { |
4662 | 0 | nameBuffer = tsk_malloc(7); |
4663 | 0 | if (nameBuffer == NULL) { |
4664 | 0 | error_returned |
4665 | 0 | ("hfs_parse_resource_fork: allocating space for the (null) name of a resource"); |
4666 | 0 | free_res_descriptor(result); |
4667 | 0 | return NULL; |
4668 | 0 | } |
4669 | 0 | memcpy(nameBuffer, "<none>", 6); |
4670 | 0 | nameBuffer[6] = (char) 0; |
4671 | 0 | } |
4672 | | |
4673 | 0 | rsrc = (RES_DESCRIPTOR *) tsk_malloc(sizeof(RES_DESCRIPTOR)); |
4674 | 0 | if (rsrc == NULL) { |
4675 | 0 | error_returned |
4676 | 0 | ("hfs_parse_resource_fork: space for a resource descriptor"); |
4677 | 0 | free_res_descriptor(result); |
4678 | 0 | return NULL; |
4679 | 0 | } |
4680 | | |
4681 | | // Build the linked list |
4682 | 0 | if (result == NULL) |
4683 | 0 | result = rsrc; |
4684 | 0 | if (last != NULL) |
4685 | 0 | last->next = rsrc; |
4686 | 0 | last = rsrc; |
4687 | 0 | rsrc->next = NULL; |
4688 | |
|
4689 | 0 | rID = tsk_getu16(fs_info->endian, item->resID); |
4690 | 0 | rOffset = |
4691 | 0 | tsk_getu24(fs_info->endian, |
4692 | 0 | item->resDataOffset) + dataOffset; |
4693 | | |
4694 | | // Just read the first four bytes of the resource to get its length. It MUST |
4695 | | // be at least 4 bytes long |
4696 | 0 | attrReadResult2 = tsk_fs_attr_read(rAttr, (uint64_t) rOffset, |
4697 | 0 | lenBuff, (size_t) 4, TSK_FS_FILE_READ_FLAG_NONE); |
4698 | |
|
4699 | 0 | if (attrReadResult2 != 4) { |
4700 | 0 | error_returned |
4701 | 0 | ("- hfs_parse_resource_fork: could not read the 4-byte length at beginning of resource"); |
4702 | 0 | free_res_descriptor(result); |
4703 | 0 | return NULL; |
4704 | 0 | } |
4705 | 0 | rLen = tsk_getu32(TSK_BIG_ENDIAN, lenBuff); //TODO |
4706 | |
|
4707 | 0 | rsrc->id = rID; |
4708 | 0 | rsrc->offset = rOffset + 4; |
4709 | 0 | memcpy(rsrc->type, tlItem->type, 4); |
4710 | 0 | rsrc->type[4] = (char) 0; |
4711 | 0 | rsrc->length = rLen; |
4712 | 0 | rsrc->name = nameBuffer; |
4713 | |
|
4714 | 0 | } // END loop over resources of one type |
4715 | |
|
4716 | 0 | } // END loop over resource types |
4717 | | |
4718 | 0 | return result; |
4719 | 0 | } |
4720 | | |
4721 | | |
4722 | | static uint8_t |
4723 | | hfs_load_attrs(TSK_FS_FILE * fs_file) |
4724 | 0 | { |
4725 | 0 | TSK_FS_INFO *fs; |
4726 | 0 | HFS_INFO *hfs; |
4727 | 0 | TSK_FS_ATTR *fs_attr; |
4728 | 0 | TSK_FS_ATTR_RUN *attr_run; |
4729 | 0 | hfs_fork *forkx; |
4730 | 0 | unsigned char resource_fork_has_contents = FALSE; |
4731 | 0 | unsigned char compression_flag = FALSE; |
4732 | 0 | unsigned char isCompressed = FALSE; |
4733 | 0 | unsigned char compDataInRSRCFork = FALSE; |
4734 | 0 | unsigned char cmpType = 0; |
4735 | 0 | uint64_t uncompressedSize; |
4736 | 0 | uint64_t logicalSize; // of a fork |
4737 | | |
4738 | | // clean up any error messages that are lying around |
4739 | 0 | tsk_error_reset(); |
4740 | |
|
4741 | 0 | if ((fs_file == NULL) || (fs_file->meta == NULL) |
4742 | 0 | || (fs_file->fs_info == NULL)) { |
4743 | 0 | error_detected(TSK_ERR_FS_ARG, |
4744 | 0 | "hfs_load_attrs: fs_file or meta is NULL"); |
4745 | 0 | return 1; |
4746 | 0 | } |
4747 | | |
4748 | 0 | fs = (TSK_FS_INFO *) fs_file->fs_info; |
4749 | 0 | hfs = (HFS_INFO *) fs; |
4750 | |
|
4751 | 0 | if (tsk_verbose) |
4752 | 0 | tsk_fprintf(stderr, |
4753 | 0 | "hfs_load_attrs: Processing file %" PRIuINUM "\n", |
4754 | 0 | fs_file->meta->addr); |
4755 | | |
4756 | | |
4757 | | // see if we have already loaded the runs |
4758 | 0 | if (fs_file->meta->attr_state == TSK_FS_META_ATTR_STUDIED) { |
4759 | 0 | if (tsk_verbose) |
4760 | 0 | tsk_fprintf(stderr, |
4761 | 0 | "hfs_load_attrs: Attributes already loaded\n"); |
4762 | 0 | return 0; |
4763 | 0 | } |
4764 | 0 | else if (fs_file->meta->attr_state == TSK_FS_META_ATTR_ERROR) { |
4765 | 0 | if (tsk_verbose) |
4766 | 0 | tsk_fprintf(stderr, |
4767 | 0 | "hfs_load_attrs: Previous attempt to load attributes resulted in error\n"); |
4768 | 0 | return 1; |
4769 | 0 | } |
4770 | | |
4771 | | // Now (re)-initialize the attrlist that will hold the list of attributes |
4772 | 0 | if (fs_file->meta->attr != NULL) { |
4773 | 0 | tsk_fs_attrlist_markunused(fs_file->meta->attr); |
4774 | 0 | } |
4775 | 0 | else if (fs_file->meta->attr == NULL) { |
4776 | 0 | fs_file->meta->attr = tsk_fs_attrlist_alloc(); |
4777 | 0 | } |
4778 | | |
4779 | | /****************** EXTENDED ATTRIBUTES *******************************/ |
4780 | | // We do these first, so that we can detect the mode of compression, if |
4781 | | // any. We need to know that mode in order to handle the forks. |
4782 | |
|
4783 | 0 | if (tsk_verbose) |
4784 | 0 | tsk_fprintf(stderr, |
4785 | 0 | "hfs_load_attrs: loading the HFS+ extended attributes\n"); |
4786 | |
|
4787 | 0 | if (hfs_load_extended_attrs(fs_file, &isCompressed, |
4788 | 0 | &cmpType, &uncompressedSize)) { |
4789 | 0 | error_returned(" - hfs_load_attrs A"); |
4790 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; |
4791 | 0 | return 1; |
4792 | 0 | } |
4793 | | |
4794 | | // TODO: What about DECMPFS_TYPE_RAW_RSRC? |
4795 | 0 | switch (cmpType) { |
4796 | 0 | case DECMPFS_TYPE_ZLIB_RSRC: |
4797 | 0 | case DECMPFS_TYPE_LZVN_RSRC: |
4798 | 0 | compDataInRSRCFork = TRUE; |
4799 | 0 | break; |
4800 | 0 | default: |
4801 | 0 | compDataInRSRCFork = FALSE; |
4802 | 0 | break; |
4803 | 0 | } |
4804 | | |
4805 | 0 | if (isCompressed) { |
4806 | 0 | fs_file->meta->size = uncompressedSize; |
4807 | 0 | } |
4808 | | |
4809 | | // This is the flag indicating compression, from the Catalog File record. |
4810 | 0 | compression_flag = (fs_file->meta->flags & TSK_FS_META_FLAG_COMP) != 0; |
4811 | |
|
4812 | 0 | if (compression_flag && !isCompressed) { |
4813 | 0 | if (tsk_verbose) |
4814 | 0 | tsk_fprintf(stderr, |
4815 | 0 | "hfs_load_attrs: WARNING, HFS marks this as a" |
4816 | 0 | " compressed file, but no compression record was found.\n"); |
4817 | 0 | } |
4818 | 0 | if (isCompressed && !compression_flag) { |
4819 | 0 | if (tsk_verbose) |
4820 | 0 | tsk_fprintf(stderr, |
4821 | 0 | "hfs_load_attrs: WARNING, this file has a compression" |
4822 | 0 | " record, but the HFS compression flag is not set.\n"); |
4823 | 0 | } |
4824 | | |
4825 | | /************* FORKS (both) ************************************/ |
4826 | | |
4827 | | // Process the data and resource forks. We only do this if the |
4828 | | // fork data structures are non-null, so test that: |
4829 | 0 | if (fs_file->meta->content_ptr != NULL) { |
4830 | | |
4831 | | /************** DATA FORK STUFF ***************************/ |
4832 | | |
4833 | | // Get the data fork data-structure |
4834 | 0 | forkx = (hfs_fork *) fs_file->meta->content_ptr; |
4835 | | |
4836 | | // If this is a compressed file, then either this attribute is already loaded |
4837 | | // because the data was in the compression record, OR |
4838 | | // the compressed data is in the resource fork. We will load those runs when |
4839 | | // we handle the resource fork. |
4840 | 0 | if (!isCompressed) { |
4841 | | // We only load this attribute if this fork has non-zero length |
4842 | | // or if this is a REG or LNK file. Otherwise, we skip |
4843 | 0 | logicalSize = tsk_getu64(fs->endian, forkx->logic_sz); |
4844 | |
|
4845 | 0 | if (logicalSize > 0 || |
4846 | 0 | fs_file->meta->type == TSK_FS_META_TYPE_REG || |
4847 | 0 | fs_file->meta->type == TSK_FS_META_TYPE_LNK) { |
4848 | | |
4849 | |
|
4850 | 0 | if (tsk_verbose) |
4851 | 0 | tsk_fprintf(stderr, |
4852 | 0 | "hfs_load_attrs: loading the data fork attribute\n"); |
4853 | | |
4854 | | // get an attribute structure to store the data in |
4855 | 0 | if ((fs_attr = tsk_fs_attrlist_getnew(fs_file->meta->attr, |
4856 | 0 | TSK_FS_ATTR_NONRES)) == NULL) { |
4857 | 0 | error_returned(" - hfs_load_attrs"); |
4858 | 0 | return 1; |
4859 | 0 | } |
4860 | | /* NOTE that fs_attr is now tied to fs_file->meta->attr. |
4861 | | * that means that we do not need to free it if we abort in the |
4862 | | * following code (and doing so will cause double free errors). */ |
4863 | | |
4864 | 0 | if (logicalSize > 0) { |
4865 | | |
4866 | | // Convert runs of blocks to the TSK internal form |
4867 | 0 | if (((attr_run = |
4868 | 0 | hfs_extents_to_attr(fs, forkx->extents, |
4869 | 0 | 0)) == NULL) |
4870 | 0 | && (tsk_error_get_errno() != 0)) { |
4871 | 0 | error_returned(" - hfs_load_attrs"); |
4872 | 0 | return 1; |
4873 | 0 | } |
4874 | | |
4875 | | |
4876 | | |
4877 | | // add the runs to the attribute and the attribute to the file. |
4878 | 0 | if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, |
4879 | 0 | "", TSK_FS_ATTR_TYPE_HFS_DATA, |
4880 | 0 | HFS_FS_ATTR_ID_DATA, logicalSize, logicalSize, |
4881 | 0 | (TSK_OFF_T) tsk_getu32(fs->endian, |
4882 | 0 | forkx->total_blk) * fs->block_size, 0, |
4883 | 0 | 0)) { |
4884 | 0 | error_returned(" - hfs_load_attrs (DATA)"); |
4885 | 0 | tsk_fs_attr_run_free(attr_run); |
4886 | 0 | return 1; |
4887 | 0 | } |
4888 | | |
4889 | | // see if extents file has additional runs |
4890 | 0 | if (hfs_ext_find_extent_record_attr(hfs, |
4891 | 0 | (uint32_t) fs_file->meta->addr, fs_attr, |
4892 | 0 | TRUE)) { |
4893 | 0 | error_returned(" - hfs_load_attrs B"); |
4894 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; |
4895 | 0 | return 1; |
4896 | 0 | } |
4897 | |
|
4898 | 0 | } |
4899 | 0 | else { |
4900 | | // logicalSize == 0, but this is either a REG or LNK file |
4901 | | // so, it should have a DATA fork attribute of zero length. |
4902 | 0 | if (tsk_fs_attr_set_run(fs_file, fs_attr, NULL, "", |
4903 | 0 | TSK_FS_ATTR_TYPE_HFS_DATA, HFS_FS_ATTR_ID_DATA, |
4904 | 0 | 0, 0, 0, 0, 0)) { |
4905 | 0 | error_returned(" - hfs_load_attrs (non-file)"); |
4906 | 0 | return 1; |
4907 | 0 | } |
4908 | 0 | } |
4909 | |
|
4910 | 0 | } // END logicalSize>0 or REG or LNK file type |
4911 | 0 | } // END if not Compressed |
4912 | | |
4913 | | |
4914 | | |
4915 | | /************** RESOURCE FORK STUFF ************************************/ |
4916 | | |
4917 | | // Get the resource fork. |
4918 | | //Note that content_ptr points to an array of two |
4919 | | // hfs_fork data structures, the second of which |
4920 | | // describes the blocks of the resource fork. |
4921 | | |
4922 | 0 | forkx = &((hfs_fork *) fs_file->meta->content_ptr)[1]; |
4923 | |
|
4924 | 0 | logicalSize = tsk_getu64(fs->endian, forkx->logic_sz); |
4925 | | |
4926 | | // Skip if the length of the resource fork is zero |
4927 | 0 | if (logicalSize > 0) { |
4928 | |
|
4929 | 0 | if (tsk_verbose) |
4930 | 0 | tsk_fprintf(stderr, |
4931 | 0 | "hfs_load_attrs: loading the resource fork\n"); |
4932 | |
|
4933 | 0 | resource_fork_has_contents = TRUE; |
4934 | | |
4935 | | // get an attribute structure to store the resource fork data in. We will |
4936 | | // reuse the fs_attr variable, since we are done with the data fork. |
4937 | 0 | if ((fs_attr = |
4938 | 0 | tsk_fs_attrlist_getnew(fs_file->meta->attr, |
4939 | 0 | TSK_FS_ATTR_NONRES)) == NULL) { |
4940 | 0 | error_returned(" - hfs_load_attrs (RSRC)"); |
4941 | 0 | return 1; |
4942 | 0 | } |
4943 | | /* NOTE that fs_attr is now tied to fs_file->meta->attr. |
4944 | | * that means that we do not need to free it if we abort in the |
4945 | | * following code (and doing so will cause double free errors). */ |
4946 | | |
4947 | | |
4948 | | // convert the resource fork to the TSK format |
4949 | 0 | if (((attr_run = |
4950 | 0 | hfs_extents_to_attr(fs, forkx->extents, |
4951 | 0 | 0)) == NULL) |
4952 | 0 | && (tsk_error_get_errno() != 0)) { |
4953 | 0 | error_returned(" - hfs_load_attrs"); |
4954 | 0 | return 1; |
4955 | 0 | } |
4956 | | |
4957 | | // add the runs to the attribute and the attribute to the file. |
4958 | 0 | if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, "RSRC", |
4959 | 0 | TSK_FS_ATTR_TYPE_HFS_RSRC, HFS_FS_ATTR_ID_RSRC, |
4960 | 0 | tsk_getu64(fs->endian, forkx->logic_sz), |
4961 | 0 | tsk_getu64(fs->endian, forkx->logic_sz), |
4962 | 0 | (TSK_OFF_T) tsk_getu32(fs->endian, |
4963 | 0 | forkx->total_blk) * fs->block_size, 0, 0)) { |
4964 | 0 | error_returned(" - hfs_load_attrs (RSRC)"); |
4965 | 0 | tsk_fs_attr_run_free(attr_run); |
4966 | 0 | return 1; |
4967 | 0 | } |
4968 | | |
4969 | | // see if extents file has additional runs for the resource fork. |
4970 | 0 | if (hfs_ext_find_extent_record_attr(hfs, |
4971 | 0 | (uint32_t) fs_file->meta->addr, fs_attr, FALSE)) { |
4972 | 0 | error_returned(" - hfs_load_attrs C"); |
4973 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; |
4974 | 0 | return 1; |
4975 | 0 | } |
4976 | | |
4977 | 0 | if (isCompressed && compDataInRSRCFork) { |
4978 | | // OK, we are going to load those same resource fork blocks as the "DATA" |
4979 | | // attribute, but will mark it as compressed. |
4980 | | // get an attribute structure to store the resource fork data in. We will |
4981 | | // reuse the fs_attr variable, since we are done with the data fork. |
4982 | 0 | if (tsk_verbose) |
4983 | 0 | tsk_fprintf(stderr, |
4984 | 0 | "File is compressed with data in the resource fork. " |
4985 | 0 | "Loading the default DATA attribute.\n"); |
4986 | 0 | if ((fs_attr = |
4987 | 0 | tsk_fs_attrlist_getnew(fs_file->meta->attr, |
4988 | 0 | TSK_FS_ATTR_NONRES)) == NULL) { |
4989 | 0 | error_returned |
4990 | 0 | (" - hfs_load_attrs (RSRC loading as DATA)"); |
4991 | 0 | return 1; |
4992 | 0 | } |
4993 | | /* NOTE that fs_attr is now tied to fs_file->meta->attr. |
4994 | | * that means that we do not need to free it if we abort in the |
4995 | | * following code (and doing so will cause double free errors). */ |
4996 | | |
4997 | 0 | switch (cmpType) { |
4998 | 0 | case DECMPFS_TYPE_ZLIB_RSRC: |
4999 | 0 | #ifdef HAVE_LIBZ |
5000 | 0 | fs_attr->w = decmpfs_attr_walk_zlib_rsrc; |
5001 | 0 | fs_attr->r = decmpfs_file_read_zlib_rsrc; |
5002 | | #else |
5003 | | // We don't have zlib, so the uncompressed data is not |
5004 | | // available to us; however, we must have a default DATA |
5005 | | // attribute, or icat will misbehave. |
5006 | | if (tsk_verbose) |
5007 | | tsk_fprintf(stderr, |
5008 | | "hfs_load_attrs: No zlib compression library, so setting a zero-length default DATA attribute.\n"); |
5009 | | |
5010 | | if (tsk_fs_attr_set_run(fs_file, fs_attr, NULL, "DATA", |
5011 | | TSK_FS_ATTR_TYPE_HFS_DATA, HFS_FS_ATTR_ID_DATA, 0, |
5012 | | 0, 0, 0, 0)) { |
5013 | | error_returned(" - hfs_load_attrs (non-file)"); |
5014 | | return 1; |
5015 | | } |
5016 | | #endif |
5017 | 0 | break; |
5018 | | |
5019 | 0 | case DECMPFS_TYPE_LZVN_RSRC: |
5020 | |
|
5021 | 0 | fs_attr->w = decmpfs_attr_walk_lzvn_rsrc; |
5022 | 0 | fs_attr->r = decmpfs_file_read_lzvn_rsrc; |
5023 | |
|
5024 | 0 | break; |
5025 | 0 | } |
5026 | | |
5027 | | // convert the resource fork to the TSK format |
5028 | 0 | if (((attr_run = |
5029 | 0 | hfs_extents_to_attr(fs, forkx->extents, |
5030 | 0 | 0)) == NULL) |
5031 | 0 | && (tsk_error_get_errno() != 0)) { |
5032 | 0 | error_returned |
5033 | 0 | (" - hfs_load_attrs, RSRC fork as DATA fork"); |
5034 | 0 | return 1; |
5035 | 0 | } |
5036 | | |
5037 | 0 | if (tsk_verbose) |
5038 | 0 | tsk_fprintf(stderr, |
5039 | 0 | "hfs_load_attrs: Loading RSRC fork block runs as the default DATA attribute.\n"); |
5040 | | |
5041 | | // add the runs to the attribute and the attribute to the file. |
5042 | 0 | if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, "DECOMP", |
5043 | 0 | TSK_FS_ATTR_TYPE_HFS_DATA, HFS_FS_ATTR_ID_DATA, |
5044 | 0 | logicalSize, |
5045 | 0 | logicalSize, |
5046 | 0 | (TSK_OFF_T) tsk_getu32(fs->endian, |
5047 | 0 | forkx->total_blk) * fs->block_size, |
5048 | 0 | TSK_FS_ATTR_COMP | TSK_FS_ATTR_NONRES, 0)) { |
5049 | 0 | error_returned |
5050 | 0 | (" - hfs_load_attrs (RSRC loading as DATA)"); |
5051 | 0 | tsk_fs_attr_run_free(attr_run); |
5052 | 0 | return 1; |
5053 | 0 | } |
5054 | | |
5055 | | // see if extents file has additional runs for the resource fork. |
5056 | 0 | if (hfs_ext_find_extent_record_attr(hfs, |
5057 | 0 | (uint32_t) fs_file->meta->addr, fs_attr, FALSE)) { |
5058 | 0 | error_returned |
5059 | 0 | (" - hfs_load_attrs (RSRC loading as DATA"); |
5060 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; |
5061 | 0 | return 1; |
5062 | 0 | } |
5063 | | |
5064 | 0 | if (tsk_verbose) |
5065 | 0 | tsk_fprintf(stderr, |
5066 | 0 | "hfs_load_attrs: setting the \"special\" function pointers to inflate compressed data.\n"); |
5067 | 0 | } |
5068 | |
|
5069 | 0 | } // END resource fork size > 0 |
5070 | |
|
5071 | 0 | } // END the fork data structures are non-NULL |
5072 | | |
5073 | 0 | if (isCompressed && compDataInRSRCFork && !resource_fork_has_contents) { |
5074 | 0 | if (tsk_verbose) |
5075 | 0 | tsk_fprintf(stderr, |
5076 | 0 | "hfs_load_attrs: WARNING, compression record claims that compressed data" |
5077 | 0 | " is in the Resource Fork, but that fork is empty or non-existent.\n"); |
5078 | 0 | } |
5079 | | |
5080 | | // Finish up. |
5081 | 0 | fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED; |
5082 | |
|
5083 | 0 | return 0; |
5084 | 0 | } |
5085 | | |
5086 | | |
5087 | | /** \internal |
5088 | | * Get allocation status of file system block. |
5089 | | * adapted from IsAllocationBlockUsed from: |
5090 | | * http://developer.apple.com/technotes/tn/tn1150.html |
5091 | | * |
5092 | | * @param hfs File system being analyzed |
5093 | | * @param b Block address |
5094 | | * @returns 1 if allocated, 0 if not, -1 on error |
5095 | | */ |
5096 | | static int8_t |
5097 | | hfs_block_is_alloc(HFS_INFO * hfs, TSK_DADDR_T a_addr) |
5098 | 0 | { |
5099 | 0 | TSK_FS_INFO *fs = &(hfs->fs_info); |
5100 | 0 | TSK_OFF_T b; |
5101 | 0 | size_t b2; |
5102 | | |
5103 | | // lazy loading |
5104 | 0 | if (hfs->blockmap_file == NULL) { |
5105 | 0 | if ((hfs->blockmap_file = |
5106 | 0 | tsk_fs_file_open_meta(fs, NULL, |
5107 | 0 | HFS_ALLOCATION_FILE_ID)) == NULL) { |
5108 | 0 | tsk_error_errstr2_concat(" - Loading blockmap file"); |
5109 | 0 | return -1; |
5110 | 0 | } |
5111 | | |
5112 | | /* cache the data attribute */ |
5113 | 0 | hfs->blockmap_attr = |
5114 | 0 | tsk_fs_attrlist_get(hfs->blockmap_file->meta->attr, |
5115 | 0 | TSK_FS_ATTR_TYPE_DEFAULT); |
5116 | 0 | if (!hfs->blockmap_attr) { |
5117 | 0 | tsk_error_errstr2_concat |
5118 | 0 | (" - Data Attribute not found in Blockmap File"); |
5119 | 0 | return -1; |
5120 | 0 | } |
5121 | 0 | hfs->blockmap_cache_start = -1; |
5122 | 0 | hfs->blockmap_cache_len = 0; |
5123 | 0 | } |
5124 | | |
5125 | | // get the byte offset |
5126 | 0 | b = (TSK_OFF_T) a_addr / 8; |
5127 | 0 | if (b > hfs->blockmap_file->meta->size) { |
5128 | 0 | tsk_error_set_errno(TSK_ERR_FS_CORRUPT); |
5129 | 0 | tsk_error_set_errstr("hfs_block_is_alloc: block %" PRIuDADDR |
5130 | 0 | " is too large for bitmap (%" PRIdOFF ")", a_addr, |
5131 | 0 | hfs->blockmap_file->meta->size); |
5132 | 0 | return -1; |
5133 | 0 | } |
5134 | | |
5135 | | // see if it is in the cache |
5136 | 0 | if ((hfs->blockmap_cache_start == -1) |
5137 | 0 | || (hfs->blockmap_cache_start > b) |
5138 | 0 | || (hfs->blockmap_cache_start + hfs->blockmap_cache_len <= (size_t) b)) { |
5139 | 0 | ssize_t cnt = tsk_fs_attr_read(hfs->blockmap_attr, b, |
5140 | 0 | hfs->blockmap_cache, |
5141 | 0 | sizeof(hfs->blockmap_cache), 0); |
5142 | 0 | if (cnt < 1) { |
5143 | 0 | tsk_error_set_errstr2 |
5144 | 0 | ("hfs_block_is_alloc: Error reading block bitmap at offset %" |
5145 | 0 | PRIdOFF, b); |
5146 | 0 | return -1; |
5147 | 0 | } |
5148 | 0 | hfs->blockmap_cache_start = b; |
5149 | 0 | hfs->blockmap_cache_len = cnt; |
5150 | 0 | } |
5151 | 0 | b2 = (size_t) (b - hfs->blockmap_cache_start); |
5152 | 0 | return (hfs->blockmap_cache[b2] & (1 << (7 - (a_addr % 8)))) != 0; |
5153 | 0 | } |
5154 | | |
5155 | | |
5156 | | TSK_FS_BLOCK_FLAG_ENUM |
5157 | | hfs_block_getflags(TSK_FS_INFO * a_fs, TSK_DADDR_T a_addr) |
5158 | 0 | { |
5159 | 0 | return (hfs_block_is_alloc((HFS_INFO *) a_fs, a_addr) == 1) ? |
5160 | 0 | TSK_FS_BLOCK_FLAG_ALLOC : TSK_FS_BLOCK_FLAG_UNALLOC; |
5161 | 0 | } |
5162 | | |
5163 | | |
5164 | | static uint8_t |
5165 | | hfs_block_walk(TSK_FS_INFO * fs, TSK_DADDR_T start_blk, |
5166 | | TSK_DADDR_T end_blk, TSK_FS_BLOCK_WALK_FLAG_ENUM flags, |
5167 | | TSK_FS_BLOCK_WALK_CB action, void *ptr) |
5168 | 0 | { |
5169 | 0 | char *myname = "hfs_block_walk"; |
5170 | 0 | HFS_INFO *hfs = (HFS_INFO *) fs; |
5171 | 0 | TSK_FS_BLOCK *fs_block; |
5172 | 0 | TSK_DADDR_T addr; |
5173 | |
|
5174 | 0 | if (tsk_verbose) |
5175 | 0 | tsk_fprintf(stderr, |
5176 | 0 | "%s: start_blk: %" PRIuDADDR " end_blk: %" |
5177 | 0 | PRIuDADDR " flags: %" PRIu32 "\n", myname, start_blk, end_blk, |
5178 | 0 | flags); |
5179 | | |
5180 | | // clean up any error messages that are lying around |
5181 | 0 | tsk_error_reset(); |
5182 | | |
5183 | | /* |
5184 | | * Sanity checks. |
5185 | | */ |
5186 | 0 | if (start_blk < fs->first_block || start_blk > fs->last_block) { |
5187 | 0 | tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); |
5188 | 0 | tsk_error_set_errstr("%s: invalid start block number: %" PRIuDADDR |
5189 | 0 | "", myname, start_blk); |
5190 | 0 | return 1; |
5191 | 0 | } |
5192 | 0 | if (end_blk < fs->first_block || end_blk > fs->last_block) { |
5193 | 0 | tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); |
5194 | 0 | tsk_error_set_errstr("%s: invalid last block number: %" PRIuDADDR |
5195 | 0 | "", myname, end_blk); |
5196 | 0 | return 1; |
5197 | 0 | } |
5198 | | |
5199 | 0 | if (start_blk > end_blk) |
5200 | 0 | XSWAP(start_blk, end_blk); |
5201 | | |
5202 | | /* Sanity check on flags -- make sure at least one ALLOC is set */ |
5203 | 0 | if (((flags & TSK_FS_BLOCK_WALK_FLAG_ALLOC) == 0) && |
5204 | 0 | ((flags & TSK_FS_BLOCK_WALK_FLAG_UNALLOC) == 0)) { |
5205 | 0 | flags |= |
5206 | 0 | (TSK_FS_BLOCK_WALK_FLAG_ALLOC | |
5207 | 0 | TSK_FS_BLOCK_WALK_FLAG_UNALLOC); |
5208 | 0 | } |
5209 | 0 | if (((flags & TSK_FS_BLOCK_WALK_FLAG_META) == 0) && |
5210 | 0 | ((flags & TSK_FS_BLOCK_WALK_FLAG_CONT) == 0)) { |
5211 | 0 | flags |= |
5212 | 0 | (TSK_FS_BLOCK_WALK_FLAG_CONT | TSK_FS_BLOCK_WALK_FLAG_META); |
5213 | 0 | } |
5214 | |
|
5215 | 0 | if ((fs_block = tsk_fs_block_alloc(fs)) == NULL) { |
5216 | 0 | return 1; |
5217 | 0 | } |
5218 | | |
5219 | | /* |
5220 | | * Iterate |
5221 | | */ |
5222 | 0 | for (addr = start_blk; addr <= end_blk; ++addr) { |
5223 | 0 | int retval; |
5224 | 0 | int myflags; |
5225 | | |
5226 | | /* identify if the block is allocated or not */ |
5227 | 0 | myflags = hfs_block_is_alloc(hfs, addr) ? |
5228 | 0 | TSK_FS_BLOCK_FLAG_ALLOC : TSK_FS_BLOCK_FLAG_UNALLOC; |
5229 | | |
5230 | | // test if we should call the callback with this one |
5231 | 0 | if ((myflags & TSK_FS_BLOCK_FLAG_ALLOC) |
5232 | 0 | && (!(flags & TSK_FS_BLOCK_WALK_FLAG_ALLOC))) |
5233 | 0 | continue; |
5234 | 0 | else if ((myflags & TSK_FS_BLOCK_FLAG_UNALLOC) |
5235 | 0 | && (!(flags & TSK_FS_BLOCK_WALK_FLAG_UNALLOC))) |
5236 | 0 | continue; |
5237 | | |
5238 | 0 | if (flags & TSK_FS_BLOCK_WALK_FLAG_AONLY) |
5239 | 0 | myflags |= TSK_FS_BLOCK_FLAG_AONLY; |
5240 | |
|
5241 | 0 | if (tsk_fs_block_get_flag(fs, fs_block, addr, |
5242 | 0 | (TSK_FS_BLOCK_FLAG_ENUM) myflags) == NULL) { |
5243 | 0 | tsk_fs_block_free(fs_block); |
5244 | 0 | return 1; |
5245 | 0 | } |
5246 | | |
5247 | 0 | retval = action(fs_block, ptr); |
5248 | 0 | if (TSK_WALK_STOP == retval) { |
5249 | 0 | break; |
5250 | 0 | } |
5251 | 0 | else if (TSK_WALK_ERROR == retval) { |
5252 | 0 | tsk_fs_block_free(fs_block); |
5253 | 0 | return 1; |
5254 | 0 | } |
5255 | 0 | } |
5256 | | |
5257 | 0 | tsk_fs_block_free(fs_block); |
5258 | 0 | return 0; |
5259 | 0 | } |
5260 | | |
5261 | | |
5262 | | uint8_t |
5263 | | hfs_inode_walk(TSK_FS_INFO * fs, TSK_INUM_T start_inum, |
5264 | | TSK_INUM_T end_inum, TSK_FS_META_FLAG_ENUM flags, |
5265 | | TSK_FS_META_WALK_CB action, void *ptr) |
5266 | 0 | { |
5267 | 0 | TSK_INUM_T inum; |
5268 | 0 | TSK_FS_FILE *fs_file; |
5269 | |
|
5270 | 0 | if (tsk_verbose) |
5271 | 0 | tsk_fprintf(stderr, |
5272 | 0 | "hfs_inode_walk: start_inum: %" PRIuINUM " end_inum: %" |
5273 | 0 | PRIuINUM " flags: %" PRIu32 "\n", start_inum, end_inum, flags); |
5274 | | |
5275 | | /* |
5276 | | * Sanity checks. |
5277 | | */ |
5278 | 0 | if (start_inum < fs->first_inum || start_inum > fs->last_inum) { |
5279 | 0 | tsk_error_reset(); |
5280 | 0 | tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); |
5281 | 0 | tsk_error_set_errstr("inode_walk: Start inode: %" PRIuINUM "", |
5282 | 0 | start_inum); |
5283 | 0 | return 1; |
5284 | 0 | } |
5285 | 0 | else if (end_inum < fs->first_inum || end_inum > fs->last_inum |
5286 | 0 | || end_inum < start_inum) { |
5287 | 0 | tsk_error_reset(); |
5288 | 0 | tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); |
5289 | 0 | tsk_error_set_errstr("inode_walk: End inode: %" PRIuINUM "", |
5290 | 0 | end_inum); |
5291 | 0 | return 1; |
5292 | 0 | } |
5293 | | |
5294 | | /* If ORPHAN is wanted, then make sure that the flags are correct */ |
5295 | 0 | if (flags & TSK_FS_META_FLAG_ORPHAN) { |
5296 | 0 | flags |= TSK_FS_META_FLAG_UNALLOC; |
5297 | 0 | flags &= ~TSK_FS_META_FLAG_ALLOC; |
5298 | 0 | flags |= TSK_FS_META_FLAG_USED; |
5299 | 0 | flags &= ~TSK_FS_META_FLAG_UNUSED; |
5300 | 0 | } |
5301 | | |
5302 | 0 | else { |
5303 | 0 | if (((flags & TSK_FS_META_FLAG_ALLOC) == 0) && |
5304 | 0 | ((flags & TSK_FS_META_FLAG_UNALLOC) == 0)) { |
5305 | 0 | flags |= (TSK_FS_META_FLAG_ALLOC | TSK_FS_META_FLAG_UNALLOC); |
5306 | 0 | } |
5307 | | |
5308 | | /* If neither of the USED or UNUSED flags are set, then set them |
5309 | | * both |
5310 | | */ |
5311 | 0 | if (((flags & TSK_FS_META_FLAG_USED) == 0) && |
5312 | 0 | ((flags & TSK_FS_META_FLAG_UNUSED) == 0)) { |
5313 | 0 | flags |= (TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_UNUSED); |
5314 | 0 | } |
5315 | 0 | } |
5316 | |
|
5317 | 0 | if ((fs_file = tsk_fs_file_alloc(fs)) == NULL) |
5318 | 0 | return 1; |
5319 | | |
5320 | 0 | if ((fs_file->meta = tsk_fs_meta_alloc(HFS_FILE_CONTENT_LEN)) == NULL) |
5321 | 0 | return 1; |
5322 | | |
5323 | 0 | if (start_inum > end_inum) |
5324 | 0 | XSWAP(start_inum, end_inum); |
5325 | |
|
5326 | 0 | for (inum = start_inum; inum <= end_inum; ++inum) { |
5327 | 0 | int retval; |
5328 | |
|
5329 | 0 | if (hfs_inode_lookup(fs, fs_file, inum)) { |
5330 | | // deleted files may not exist in the catalog |
5331 | 0 | if (tsk_error_get_errno() == TSK_ERR_FS_INODE_NUM) { |
5332 | 0 | tsk_error_reset(); |
5333 | 0 | continue; |
5334 | 0 | } |
5335 | 0 | else { |
5336 | 0 | return 1; |
5337 | 0 | } |
5338 | 0 | } |
5339 | | |
5340 | 0 | if ((fs_file->meta->flags & flags) != fs_file->meta->flags) |
5341 | 0 | continue; |
5342 | | |
5343 | | /* call action */ |
5344 | 0 | retval = action(fs_file, ptr); |
5345 | 0 | if (retval == TSK_WALK_STOP) { |
5346 | 0 | tsk_fs_file_close(fs_file); |
5347 | 0 | return 0; |
5348 | 0 | } |
5349 | 0 | else if (retval == TSK_WALK_ERROR) { |
5350 | 0 | tsk_fs_file_close(fs_file); |
5351 | 0 | return 1; |
5352 | 0 | } |
5353 | 0 | } |
5354 | | |
5355 | 0 | tsk_fs_file_close(fs_file); |
5356 | 0 | return 0; |
5357 | 0 | } |
5358 | | |
5359 | | /* return the name of a file at a given inode |
5360 | | * in a newly-allocated string, or NULL on error |
5361 | | */ |
5362 | | char * |
5363 | | hfs_get_inode_name(TSK_FS_INFO * fs, TSK_INUM_T inum) |
5364 | 0 | { |
5365 | 0 | HFS_INFO *hfs = (HFS_INFO *) fs; |
5366 | 0 | HFS_ENTRY entry; |
5367 | 0 | char *fn = NULL; |
5368 | |
|
5369 | 0 | if (hfs_cat_file_lookup(hfs, inum, &entry, FALSE)) |
5370 | 0 | return NULL; |
5371 | | |
5372 | 0 | fn = malloc(HFS_MAXNAMLEN + 1); |
5373 | 0 | if (fn == NULL) |
5374 | 0 | return NULL; |
5375 | | |
5376 | 0 | if (hfs_UTF16toUTF8(fs, entry.thread.name.unicode, |
5377 | 0 | tsk_getu16(fs->endian, entry.thread.name.length), fn, |
5378 | 0 | HFS_MAXNAMLEN + 1, HFS_U16U8_FLAG_REPLACE_SLASH)) { |
5379 | 0 | free(fn); |
5380 | 0 | return NULL; |
5381 | 0 | } |
5382 | | |
5383 | 0 | return fn; |
5384 | 0 | } |
5385 | | |
5386 | | /* print the name of a file at a given inode |
5387 | | * returns 0 on success, 1 on error */ |
5388 | | static uint8_t |
5389 | | print_inode_name(FILE * hFile, TSK_FS_INFO * fs, TSK_INUM_T inum) |
5390 | 0 | { |
5391 | 0 | HFS_INFO *hfs = (HFS_INFO *) fs; |
5392 | 0 | char fn[HFS_MAXNAMLEN + 1]; |
5393 | 0 | HFS_ENTRY entry; |
5394 | |
|
5395 | 0 | if (hfs_cat_file_lookup(hfs, inum, &entry, FALSE)) |
5396 | 0 | return 1; |
5397 | | |
5398 | 0 | if (hfs_UTF16toUTF8(fs, entry.thread.name.unicode, |
5399 | 0 | tsk_getu16(fs->endian, entry.thread.name.length), fn, |
5400 | 0 | HFS_MAXNAMLEN + 1, HFS_U16U8_FLAG_REPLACE_SLASH)) |
5401 | 0 | return 1; |
5402 | | |
5403 | 0 | tsk_fprintf(hFile, "%s", fn); |
5404 | |
|
5405 | 0 | return 0; |
5406 | 0 | } |
5407 | | |
5408 | | /* tail recursive function to print a path... prints the parent path, then |
5409 | | * appends / and the name of the given inode. prints nothing for root |
5410 | | * returns 0 on success, 1 on failure |
5411 | | */ |
5412 | | static uint8_t |
5413 | | print_parent_path(FILE * hFile, TSK_FS_INFO * fs, TSK_INUM_T inum) |
5414 | 0 | { |
5415 | 0 | HFS_INFO *hfs = (HFS_INFO *) fs; |
5416 | 0 | char fn[HFS_MAXNAMLEN + 1]; |
5417 | 0 | HFS_ENTRY entry; |
5418 | |
|
5419 | 0 | if (inum == HFS_ROOT_INUM) |
5420 | 0 | return 0; |
5421 | | |
5422 | 0 | if (inum <= HFS_ROOT_INUM) { |
5423 | 0 | tsk_error_set_errno(TSK_ERR_FS_INODE_NUM); |
5424 | 0 | tsk_error_set_errstr("print_parent_path: out-of-range inode %" |
5425 | 0 | PRIuINUM, inum); |
5426 | 0 | return 1; |
5427 | 0 | } |
5428 | | |
5429 | 0 | if (hfs_cat_file_lookup(hfs, inum, &entry, FALSE)) |
5430 | 0 | return 1; |
5431 | | |
5432 | 0 | if (hfs_UTF16toUTF8(fs, entry.thread.name.unicode, |
5433 | 0 | tsk_getu16(fs->endian, entry.thread.name.length), fn, |
5434 | 0 | HFS_MAXNAMLEN + 1, |
5435 | 0 | HFS_U16U8_FLAG_REPLACE_SLASH | HFS_U16U8_FLAG_REPLACE_CONTROL)) |
5436 | 0 | return 1; |
5437 | | |
5438 | 0 | if (print_parent_path(hFile, fs, (TSK_INUM_T) tsk_getu32(fs->endian, |
5439 | 0 | entry.thread.parent_cnid))) |
5440 | 0 | return 1; |
5441 | | |
5442 | 0 | tsk_fprintf(hFile, "/%s", fn); |
5443 | 0 | return 0; |
5444 | 0 | } |
5445 | | |
5446 | | /* print the file name corresponding to an inode, in brackets after a space. |
5447 | | * uses Unix path conventions, and does not include the volume name. |
5448 | | * returns 0 on success, 1 on failure |
5449 | | */ |
5450 | | static uint8_t |
5451 | | print_inode_file(FILE * hFile, TSK_FS_INFO * fs, TSK_INUM_T inum) |
5452 | 0 | { |
5453 | 0 | tsk_fprintf(hFile, " ["); |
5454 | 0 | if (inum == HFS_ROOT_INUM) |
5455 | 0 | tsk_fprintf(hFile, "/"); |
5456 | 0 | else { |
5457 | 0 | if (print_parent_path(hFile, fs, inum)) { |
5458 | 0 | tsk_fprintf(hFile, "unknown]"); |
5459 | 0 | return 1; |
5460 | 0 | } |
5461 | 0 | } |
5462 | 0 | tsk_fprintf(hFile, "]"); |
5463 | 0 | return 0; |
5464 | 0 | } |
5465 | | |
5466 | | static uint8_t |
5467 | | hfs_fscheck(TSK_FS_INFO * fs, FILE * hFile) |
5468 | 0 | { |
5469 | 0 | tsk_error_reset(); |
5470 | 0 | tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); |
5471 | 0 | tsk_error_set_errstr("fscheck not implemented for HFS yet"); |
5472 | 0 | return 1; |
5473 | 0 | } |
5474 | | |
5475 | | |
5476 | | static uint8_t |
5477 | | hfs_fsstat(TSK_FS_INFO * fs, FILE * hFile) |
5478 | 0 | { |
5479 | | // char *myname = "hfs_fsstat"; |
5480 | 0 | HFS_INFO *hfs = (HFS_INFO *) fs; |
5481 | 0 | hfs_plus_vh *sb = hfs->fs; |
5482 | 0 | time_t mac_time; |
5483 | 0 | TSK_INUM_T inode; |
5484 | 0 | char timeBuf[128]; |
5485 | |
|
5486 | 0 | if (tsk_verbose) |
5487 | 0 | tsk_fprintf(stderr, "hfs_fstat: called\n"); |
5488 | |
|
5489 | 0 | tsk_fprintf(hFile, "FILE SYSTEM INFORMATION\n"); |
5490 | 0 | tsk_fprintf(hFile, "--------------------------------------------\n"); |
5491 | |
|
5492 | 0 | tsk_fprintf(hFile, "File System Type: "); |
5493 | 0 | if (tsk_getu16(fs->endian, hfs->fs->signature) == HFS_VH_SIG_HFSPLUS) |
5494 | 0 | tsk_fprintf(hFile, "HFS+\n"); |
5495 | 0 | else if (tsk_getu16(fs->endian, hfs->fs->signature) == HFS_VH_SIG_HFSX) |
5496 | 0 | tsk_fprintf(hFile, "HFSX\n"); |
5497 | 0 | else |
5498 | 0 | tsk_fprintf(hFile, "Unknown\n"); |
5499 | | |
5500 | | // print name and number of version |
5501 | 0 | tsk_fprintf(hFile, "File System Version: "); |
5502 | 0 | switch (tsk_getu16(fs->endian, hfs->fs->version)) { |
5503 | 0 | case 4: |
5504 | 0 | tsk_fprintf(hFile, "HFS+\n"); |
5505 | 0 | break; |
5506 | 0 | case 5: |
5507 | 0 | tsk_fprintf(hFile, "HFSX\n"); |
5508 | 0 | break; |
5509 | 0 | default: |
5510 | 0 | tsk_fprintf(hFile, "Unknown (%" PRIu16 ")\n", |
5511 | 0 | tsk_getu16(fs->endian, hfs->fs->version)); |
5512 | 0 | break; |
5513 | 0 | } |
5514 | | |
5515 | 0 | if (tsk_getu16(fs->endian, hfs->fs->signature) == HFS_VH_SIG_HFSX) { |
5516 | 0 | tsk_fprintf(hFile, "Case Sensitive: %s\n", |
5517 | 0 | hfs->is_case_sensitive ? "yes" : "no"); |
5518 | 0 | } |
5519 | |
|
5520 | 0 | if (hfs->hfs_wrapper_offset > 0) { |
5521 | 0 | tsk_fprintf(hFile, |
5522 | 0 | "File system is embedded in an HFS wrapper at offset %" PRIdOFF |
5523 | 0 | "\n", hfs->hfs_wrapper_offset); |
5524 | 0 | } |
5525 | |
|
5526 | 0 | tsk_fprintf(hFile, "\nVolume Name: "); |
5527 | 0 | if (print_inode_name(hFile, fs, HFS_ROOT_INUM)) |
5528 | 0 | return 1; |
5529 | 0 | tsk_fprintf(hFile, "\n"); |
5530 | |
|
5531 | 0 | tsk_fprintf(hFile, "Volume Identifier: %08" PRIx32 "%08" PRIx32 "\n", |
5532 | 0 | tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_ID1]), |
5533 | 0 | tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_ID2])); |
5534 | | |
5535 | | |
5536 | | // print last mounted info |
5537 | 0 | tsk_fprintf(hFile, "\nLast Mounted By: "); |
5538 | 0 | if (tsk_getu32(fs->endian, sb->last_mnt_ver) == HFS_VH_MVER_HFSPLUS) |
5539 | 0 | tsk_fprintf(hFile, "Mac OS X\n"); |
5540 | 0 | else if (tsk_getu32(fs->endian, sb->last_mnt_ver) == HFS_VH_MVER_HFSJ) |
5541 | 0 | tsk_fprintf(hFile, "Mac OS X, Journaled\n"); |
5542 | 0 | else if (tsk_getu32(fs->endian, sb->last_mnt_ver) == HFS_VH_MVER_FSK) |
5543 | 0 | tsk_fprintf(hFile, "failed journal replay\n"); |
5544 | 0 | else if (tsk_getu32(fs->endian, sb->last_mnt_ver) == HFS_VH_MVER_FSCK) |
5545 | 0 | tsk_fprintf(hFile, "fsck_hfs\n"); |
5546 | 0 | else if (tsk_getu32(fs->endian, sb->last_mnt_ver) == HFS_VH_MVER_OS89) |
5547 | 0 | tsk_fprintf(hFile, "Mac OS 8.1 - 9.2.2\n"); |
5548 | 0 | else |
5549 | 0 | tsk_fprintf(hFile, "Unknown (%" PRIx32 "\n", |
5550 | 0 | tsk_getu32(fs->endian, sb->last_mnt_ver)); |
5551 | | |
5552 | | /* State of the file system */ |
5553 | 0 | if ((tsk_getu32(fs->endian, hfs->fs->attr) & HFS_VH_ATTR_UNMOUNTED) |
5554 | 0 | && (!(tsk_getu32(fs->endian, |
5555 | 0 | hfs->fs->attr) & HFS_VH_ATTR_INCONSISTENT))) |
5556 | 0 | tsk_fprintf(hFile, "Volume Unmounted Properly\n"); |
5557 | 0 | else |
5558 | 0 | tsk_fprintf(hFile, "Volume Unmounted Improperly\n"); |
5559 | |
|
5560 | 0 | tsk_fprintf(hFile, "Mount Count: %" PRIu32 "\n", |
5561 | 0 | tsk_getu32(fs->endian, sb->write_cnt)); |
5562 | | |
5563 | | |
5564 | | // Dates |
5565 | | // (creation date is in local time zone, not UTC, according to TN 1150) |
5566 | 0 | mac_time = |
5567 | 0 | hfs_convert_2_unix_time(tsk_getu32(fs->endian, hfs->fs->cr_date)); |
5568 | 0 | tsk_fprintf(hFile, "\nCreation Date: \t%s\n", |
5569 | 0 | tsk_fs_time_to_str(mktime(gmtime(&mac_time)), timeBuf)); |
5570 | |
|
5571 | 0 | mac_time = |
5572 | 0 | hfs_convert_2_unix_time(tsk_getu32(fs->endian, hfs->fs->m_date)); |
5573 | 0 | tsk_fprintf(hFile, "Last Written Date: \t%s\n", |
5574 | 0 | tsk_fs_time_to_str(mac_time, timeBuf)); |
5575 | |
|
5576 | 0 | mac_time = |
5577 | 0 | hfs_convert_2_unix_time(tsk_getu32(fs->endian, |
5578 | 0 | hfs->fs->bkup_date)); |
5579 | 0 | tsk_fprintf(hFile, "Last Backup Date: \t%s\n", |
5580 | 0 | tsk_fs_time_to_str(mac_time, timeBuf)); |
5581 | |
|
5582 | 0 | mac_time = |
5583 | 0 | hfs_convert_2_unix_time(tsk_getu32(fs->endian, hfs->fs->chk_date)); |
5584 | 0 | tsk_fprintf(hFile, "Last Checked Date: \t%s\n", |
5585 | 0 | tsk_fs_time_to_str(mac_time, timeBuf)); |
5586 | | |
5587 | |
|
5588 | 0 | if (tsk_getu32(fs->endian, hfs->fs->attr) & HFS_VH_ATTR_SOFTWARE_LOCK) |
5589 | 0 | tsk_fprintf(hFile, "Software write protect enabled\n"); |
5590 | | |
5591 | | /* Print journal information */ |
5592 | 0 | if (tsk_getu32(fs->endian, sb->attr) & HFS_VH_ATTR_JOURNALED) { |
5593 | 0 | tsk_fprintf(hFile, "\nJournal Info Block: %" PRIu32 "\n", |
5594 | 0 | tsk_getu32(fs->endian, sb->jinfo_blk)); |
5595 | 0 | } |
5596 | |
|
5597 | 0 | tsk_fprintf(hFile, "\nMETADATA INFORMATION\n"); |
5598 | 0 | tsk_fprintf(hFile, "--------------------------------------------\n"); |
5599 | |
|
5600 | 0 | tsk_fprintf(hFile, "Range: %" PRIuINUM " - %" PRIuINUM "\n", |
5601 | 0 | fs->first_inum, fs->last_inum); |
5602 | |
|
5603 | 0 | inode = tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_BOOT]); |
5604 | 0 | tsk_fprintf(hFile, "Bootable Folder ID: %" PRIuINUM, inode); |
5605 | 0 | if (inode > 0) |
5606 | 0 | print_inode_file(hFile, fs, inode); |
5607 | 0 | tsk_fprintf(hFile, "\n"); |
5608 | |
|
5609 | 0 | inode = tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_START]); |
5610 | 0 | tsk_fprintf(hFile, "Startup App ID: %" PRIuINUM, inode); |
5611 | 0 | if (inode > 0) |
5612 | 0 | print_inode_file(hFile, fs, inode); |
5613 | 0 | tsk_fprintf(hFile, "\n"); |
5614 | |
|
5615 | 0 | inode = tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_OPEN]); |
5616 | 0 | tsk_fprintf(hFile, "Startup Open Folder ID: %" PRIuINUM, inode); |
5617 | 0 | if (inode > 0) |
5618 | 0 | print_inode_file(hFile, fs, inode); |
5619 | 0 | tsk_fprintf(hFile, "\n"); |
5620 | |
|
5621 | 0 | inode = tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_BOOT9]); |
5622 | 0 | tsk_fprintf(hFile, "Mac OS 8/9 Blessed System Folder ID: %" PRIuINUM, |
5623 | 0 | inode); |
5624 | 0 | if (inode > 0) |
5625 | 0 | print_inode_file(hFile, fs, inode); |
5626 | 0 | tsk_fprintf(hFile, "\n"); |
5627 | |
|
5628 | 0 | inode = tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_BOOTX]); |
5629 | 0 | tsk_fprintf(hFile, "Mac OS X Blessed System Folder ID: %" PRIuINUM, |
5630 | 0 | inode); |
5631 | 0 | if (inode > 0) |
5632 | 0 | print_inode_file(hFile, fs, inode); |
5633 | 0 | tsk_fprintf(hFile, "\n"); |
5634 | |
|
5635 | 0 | tsk_fprintf(hFile, "Number of files: %" PRIu32 "\n", |
5636 | 0 | tsk_getu32(fs->endian, sb->file_cnt)); |
5637 | 0 | tsk_fprintf(hFile, "Number of folders: %" PRIu32 "\n", |
5638 | 0 | tsk_getu32(fs->endian, sb->fldr_cnt)); |
5639 | | |
5640 | |
|
5641 | 0 | tsk_fprintf(hFile, "\nCONTENT INFORMATION\n"); |
5642 | 0 | tsk_fprintf(hFile, "--------------------------------------------\n"); |
5643 | |
|
5644 | 0 | tsk_fprintf(hFile, "Block Range: %" PRIuDADDR " - %" PRIuDADDR "\n", |
5645 | 0 | fs->first_block, fs->last_block); |
5646 | |
|
5647 | 0 | if (fs->last_block != fs->last_block_act) |
5648 | 0 | tsk_fprintf(hFile, |
5649 | 0 | "Total Range in Image: %" PRIuDADDR " - %" PRIuDADDR "\n", |
5650 | 0 | fs->first_block, fs->last_block_act); |
5651 | |
|
5652 | 0 | tsk_fprintf(hFile, "Allocation Block Size: %u\n", fs->block_size); |
5653 | |
|
5654 | 0 | tsk_fprintf(hFile, "Number of Free Blocks: %" PRIu32 "\n", |
5655 | 0 | tsk_getu32(fs->endian, sb->free_blks)); |
5656 | |
|
5657 | 0 | if (tsk_getu32(fs->endian, hfs->fs->attr) & HFS_VH_ATTR_BADBLOCKS) |
5658 | 0 | tsk_fprintf(hFile, "Volume has bad blocks\n"); |
5659 | |
|
5660 | 0 | return 0; |
5661 | 0 | } |
5662 | | |
5663 | | |
5664 | | /************************* istat *******************************/ |
5665 | | |
5666 | | |
5667 | | /** |
5668 | | * Text encoding names defined in TN1150, Table 2. |
5669 | | */ |
5670 | | static char * |
5671 | | text_encoding_name(uint32_t enc) |
5672 | 0 | { |
5673 | 0 | switch (enc) { |
5674 | 0 | case 0: |
5675 | 0 | return "MacRoman"; |
5676 | 0 | case 1: |
5677 | 0 | return "MacJapanese"; |
5678 | 0 | case 2: |
5679 | 0 | return "MacChineseTrad"; |
5680 | 0 | case 4: |
5681 | 0 | return "MacKorean"; |
5682 | 0 | case 5: |
5683 | 0 | return "MacArabic"; |
5684 | 0 | case 6: |
5685 | 0 | return "MacHebrew"; |
5686 | 0 | case 7: |
5687 | 0 | return "MacGreek"; |
5688 | 0 | case 8: |
5689 | 0 | return "MacCyrillic"; |
5690 | 0 | case 9: |
5691 | 0 | return "MacDevanagari"; |
5692 | 0 | case 10: |
5693 | 0 | return "MacGurmukhi"; |
5694 | 0 | case 11: |
5695 | 0 | return "MacGujarati"; |
5696 | 0 | case 12: |
5697 | 0 | return "MacOriya"; |
5698 | 0 | case 13: |
5699 | 0 | return "MacBengali"; |
5700 | 0 | case 14: |
5701 | 0 | return "MacTamil"; |
5702 | 0 | case 15: |
5703 | 0 | return "Telugu"; |
5704 | 0 | case 16: |
5705 | 0 | return "MacKannada"; |
5706 | 0 | case 17: |
5707 | 0 | return "MacMalayalam"; |
5708 | 0 | case 18: |
5709 | 0 | return "MacSinhalese"; |
5710 | 0 | case 19: |
5711 | 0 | return "MacBurmese"; |
5712 | 0 | case 20: |
5713 | 0 | return "MacKhmer"; |
5714 | 0 | case 21: |
5715 | 0 | return "MacThai"; |
5716 | 0 | case 22: |
5717 | 0 | return "MacLaotian"; |
5718 | 0 | case 23: |
5719 | 0 | return "MacGeorgian"; |
5720 | 0 | case 24: |
5721 | 0 | return "MacArmenian"; |
5722 | 0 | case 25: |
5723 | 0 | return "MacChineseSimp"; |
5724 | 0 | case 26: |
5725 | 0 | return "MacTibetan"; |
5726 | 0 | case 27: |
5727 | 0 | return "MacMongolian"; |
5728 | 0 | case 28: |
5729 | 0 | return "MacEthiopic"; |
5730 | 0 | case 29: |
5731 | 0 | return "MacCentralEurRoman"; |
5732 | 0 | case 30: |
5733 | 0 | return "MacVietnamese"; |
5734 | 0 | case 31: |
5735 | 0 | return "MacExtArabic"; |
5736 | 0 | case 33: |
5737 | 0 | return "MacSymbol"; |
5738 | 0 | case 34: |
5739 | 0 | return "MacDingbats"; |
5740 | 0 | case 35: |
5741 | 0 | return "MacTurkish"; |
5742 | 0 | case 36: |
5743 | 0 | return "MacCroatian"; |
5744 | 0 | case 37: |
5745 | 0 | return "MacIcelandic"; |
5746 | 0 | case 38: |
5747 | 0 | return "MacRomanian"; |
5748 | 0 | case 49: |
5749 | 0 | case 140: |
5750 | 0 | return "MacFarsi"; |
5751 | 0 | case 48: |
5752 | 0 | case 152: |
5753 | 0 | return "MacUkrainian"; |
5754 | 0 | default: |
5755 | 0 | return "Unknown encoding"; |
5756 | 0 | } |
5757 | 0 | } |
5758 | | |
5759 | 0 | #define HFS_PRINT_WIDTH 8 |
5760 | | typedef struct { |
5761 | | FILE *hFile; |
5762 | | int idx; |
5763 | | TSK_DADDR_T startBlock; |
5764 | | uint32_t blockCount; |
5765 | | unsigned char accumulating; |
5766 | | } HFS_PRINT_ADDR; |
5767 | | |
5768 | | static void |
5769 | | output_print_addr(HFS_PRINT_ADDR * print) |
5770 | 0 | { |
5771 | 0 | if (!print->accumulating) |
5772 | 0 | return; |
5773 | 0 | if (print->blockCount == 1) { |
5774 | 0 | tsk_fprintf(print->hFile, "%" PRIuDADDR " ", print->startBlock); |
5775 | 0 | print->idx += 1; |
5776 | 0 | } |
5777 | 0 | else if (print->blockCount > 1) { |
5778 | 0 | tsk_fprintf(print->hFile, "%" PRIuDADDR "-%" PRIuDADDR " ", |
5779 | 0 | print->startBlock, print->startBlock + print->blockCount - 1); |
5780 | 0 | print->idx += 2; |
5781 | 0 | } |
5782 | 0 | if (print->idx >= HFS_PRINT_WIDTH) { |
5783 | 0 | tsk_fprintf(print->hFile, "\n"); |
5784 | 0 | print->idx = 0; |
5785 | 0 | } |
5786 | 0 | } |
5787 | | |
5788 | | static TSK_WALK_RET_ENUM |
5789 | | print_addr_act(TSK_FS_FILE * fs_file, TSK_OFF_T a_off, TSK_DADDR_T addr, |
5790 | | char *buf, size_t size, TSK_FS_BLOCK_FLAG_ENUM flags, void *ptr) |
5791 | 0 | { |
5792 | 0 | HFS_PRINT_ADDR *print = (HFS_PRINT_ADDR *) ptr; |
5793 | |
|
5794 | 0 | if (print->accumulating) { |
5795 | 0 | if (addr == print->startBlock + print->blockCount) { |
5796 | 0 | ++print->blockCount; |
5797 | 0 | } |
5798 | 0 | else { |
5799 | 0 | output_print_addr(print); |
5800 | |
|
5801 | 0 | print->startBlock = addr; |
5802 | 0 | print->blockCount = 1; |
5803 | 0 | } |
5804 | 0 | } |
5805 | 0 | else { |
5806 | 0 | print->startBlock = addr; |
5807 | 0 | print->blockCount = 1; |
5808 | 0 | print->accumulating = TRUE; |
5809 | 0 | } |
5810 | |
|
5811 | 0 | return TSK_WALK_CONT; |
5812 | 0 | } |
5813 | | |
5814 | | /** |
5815 | | * Print details on a specific file to a file handle. |
5816 | | * |
5817 | | * @param fs File system file is located in |
5818 | | * @param hFile File name to print text to |
5819 | | * @param inum Address of file in file system |
5820 | | * @param numblock The number of blocks in file to force print (can go beyond file size) |
5821 | | * @param sec_skew Clock skew in seconds to also print times in |
5822 | | * |
5823 | | * @returns 1 on error and 0 on success |
5824 | | */ |
5825 | | static uint8_t |
5826 | | hfs_istat(TSK_FS_INFO * fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE * hFile, TSK_INUM_T inum, |
5827 | | TSK_DADDR_T numblock, int32_t sec_skew) |
5828 | 0 | { |
5829 | 0 | HFS_INFO *hfs = (HFS_INFO *) fs; |
5830 | 0 | TSK_FS_FILE *fs_file; |
5831 | 0 | char hfs_mode[12]; |
5832 | 0 | HFS_PRINT_ADDR print; |
5833 | 0 | HFS_ENTRY entry; |
5834 | 0 | char timeBuf[128]; |
5835 | | // Compression ATTR, if there is one: |
5836 | 0 | const TSK_FS_ATTR *compressionAttr = NULL; |
5837 | 0 | RES_DESCRIPTOR *rd; // descriptor of a resource |
5838 | |
|
5839 | 0 | tsk_error_reset(); |
5840 | |
|
5841 | 0 | if (tsk_verbose) |
5842 | 0 | tsk_fprintf(stderr, |
5843 | 0 | "hfs_istat: inum: %" PRIuINUM " numblock: %" PRIu32 "\n", |
5844 | 0 | inum, numblock); |
5845 | |
|
5846 | 0 | if ((fs_file = tsk_fs_file_open_meta(fs, NULL, inum)) == NULL) { |
5847 | 0 | error_returned("hfs_istat: getting metadata for the file"); |
5848 | 0 | return 1; |
5849 | 0 | } |
5850 | | |
5851 | 0 | if (inum >= HFS_FIRST_USER_CNID) { |
5852 | 0 | int rslt; |
5853 | 0 | tsk_fprintf(hFile, "File Path: "); |
5854 | 0 | rslt = print_parent_path(hFile, fs, inum); |
5855 | 0 | if (rslt != 0) |
5856 | 0 | tsk_fprintf(hFile, " Error in printing path\n"); |
5857 | 0 | else |
5858 | 0 | tsk_fprintf(hFile, "\n"); |
5859 | 0 | } |
5860 | 0 | else { |
5861 | | // All of the files in this inum range have names without nulls, |
5862 | | // slashes or control characters. So, it is OK to print this UTF8 |
5863 | | // string this way. |
5864 | 0 | if (fs_file->meta->name2 != NULL) |
5865 | 0 | tsk_fprintf(hFile, "File Name: %s\n", |
5866 | 0 | fs_file->meta->name2->name); |
5867 | 0 | } |
5868 | |
|
5869 | 0 | tsk_fprintf(hFile, "Catalog Record: %" PRIuINUM "\n", inum); |
5870 | 0 | tsk_fprintf(hFile, "%sAllocated\n", |
5871 | 0 | (fs_file->meta->flags & TSK_FS_META_FLAG_UNALLOC) ? "Not " : ""); |
5872 | |
|
5873 | 0 | tsk_fprintf(hFile, "Type:\t"); |
5874 | 0 | if (fs_file->meta->type == TSK_FS_META_TYPE_REG) |
5875 | 0 | tsk_fprintf(hFile, "File\n"); |
5876 | 0 | else if (TSK_FS_IS_DIR_META(fs_file->meta->type)) |
5877 | 0 | tsk_fprintf(hFile, "Folder\n"); |
5878 | 0 | else |
5879 | 0 | tsk_fprintf(hFile, "\n"); |
5880 | |
|
5881 | 0 | tsk_fs_meta_make_ls(fs_file->meta, hfs_mode, sizeof(hfs_mode)); |
5882 | 0 | tsk_fprintf(hFile, "Mode:\t%s\n", hfs_mode); |
5883 | 0 | tsk_fprintf(hFile, "Size:\t%" PRIdOFF "\n", fs_file->meta->size); |
5884 | |
|
5885 | 0 | if (fs_file->meta->link) |
5886 | 0 | tsk_fprintf(hFile, "Symbolic link to:\t%s\n", fs_file->meta->link); |
5887 | |
|
5888 | 0 | tsk_fprintf(hFile, "uid / gid: %" PRIuUID " / %" PRIuGID "\n", |
5889 | 0 | fs_file->meta->uid, fs_file->meta->gid); |
5890 | |
|
5891 | 0 | tsk_fprintf(hFile, "Link count:\t%d\n", fs_file->meta->nlink); |
5892 | |
|
5893 | 0 | if (hfs_cat_file_lookup(hfs, inum, &entry, TRUE) == 0) { |
5894 | 0 | hfs_uni_str *nm = &entry.thread.name; |
5895 | 0 | char name_buf[HFS_MAXNAMLEN + 1]; |
5896 | 0 | TSK_INUM_T par_cnid; // parent CNID |
5897 | |
|
5898 | 0 | tsk_fprintf(hFile, "\n"); |
5899 | 0 | hfs_UTF16toUTF8(fs, nm->unicode, (int) tsk_getu16(fs->endian, |
5900 | 0 | nm->length), &name_buf[0], HFS_MAXNAMLEN + 1, |
5901 | 0 | HFS_U16U8_FLAG_REPLACE_SLASH | HFS_U16U8_FLAG_REPLACE_CONTROL); |
5902 | 0 | tsk_fprintf(hFile, "File Name: %s\n", name_buf); |
5903 | | |
5904 | | // Test here to see if this is a hard link. |
5905 | 0 | par_cnid = tsk_getu32(fs->endian, &(entry.thread.parent_cnid)); |
5906 | 0 | if ((hfs->has_meta_dir_crtime && par_cnid == hfs->meta_dir_inum) || |
5907 | 0 | (hfs->has_meta_crtime && par_cnid == hfs->meta_inum)) { |
5908 | 0 | int instr = strncmp(name_buf, "iNode", 5); |
5909 | 0 | int drstr = strncmp(name_buf, "dir_", 4); |
5910 | |
|
5911 | 0 | if (instr == 0 && |
5912 | 0 | hfs->has_meta_crtime && par_cnid == hfs->meta_inum) { |
5913 | 0 | tsk_fprintf(hFile, "This is a hard link to a file\n"); |
5914 | 0 | } |
5915 | 0 | else if (drstr == 0 && |
5916 | 0 | hfs->has_meta_dir_crtime && |
5917 | 0 | par_cnid == hfs->meta_dir_inum) { |
5918 | 0 | tsk_fprintf(hFile, "This is a hard link to a folder.\n"); |
5919 | 0 | } |
5920 | 0 | } |
5921 | | |
5922 | | /* The cat.perm union contains file-type specific values. |
5923 | | * Print them if they are relevant. */ |
5924 | 0 | if ((fs_file->meta->type == TSK_FS_META_TYPE_CHR) || |
5925 | 0 | (fs_file->meta->type == TSK_FS_META_TYPE_BLK)) { |
5926 | 0 | tsk_fprintf(hFile, "Device ID:\t%" PRIu32 "\n", |
5927 | 0 | tsk_getu32(fs->endian, entry.cat.std.perm.special.raw)); |
5928 | 0 | } |
5929 | 0 | else if ((tsk_getu32(fs->endian, |
5930 | 0 | entry.cat.std.u_info.file_type) == |
5931 | 0 | HFS_HARDLINK_FILE_TYPE) |
5932 | 0 | && (tsk_getu32(fs->endian, |
5933 | 0 | entry.cat.std.u_info.file_cr) == |
5934 | 0 | HFS_HARDLINK_FILE_CREATOR)) { |
5935 | | // technically, the creation date of this item should be the same as either the |
5936 | | // creation date of the "HFS+ Private Data" folder or the creation date of the root folder |
5937 | 0 | tsk_fprintf(hFile, "Hard link inode number\t %" PRIu32 "\n", |
5938 | 0 | tsk_getu32(fs->endian, entry.cat.std.perm.special.inum)); |
5939 | 0 | } |
5940 | |
|
5941 | 0 | tsk_fprintf(hFile, "Admin flags: %" PRIu8, |
5942 | 0 | entry.cat.std.perm.a_flags); |
5943 | 0 | if (entry.cat.std.perm.a_flags != 0) { |
5944 | 0 | tsk_fprintf(hFile, " - "); |
5945 | 0 | if (entry.cat.std.perm.a_flags & HFS_PERM_AFLAG_ARCHIVED) |
5946 | 0 | tsk_fprintf(hFile, "archived "); |
5947 | 0 | if (entry.cat.std.perm.a_flags & HFS_PERM_AFLAG_IMMUTABLE) |
5948 | 0 | tsk_fprintf(hFile, "immutable "); |
5949 | 0 | if (entry.cat.std.perm.a_flags & HFS_PERM_AFLAG_APPEND) |
5950 | 0 | tsk_fprintf(hFile, "append-only "); |
5951 | 0 | } |
5952 | 0 | tsk_fprintf(hFile, "\n"); |
5953 | |
|
5954 | 0 | tsk_fprintf(hFile, "Owner flags: %" PRIu8, |
5955 | 0 | entry.cat.std.perm.o_flags); |
5956 | 0 | if (entry.cat.std.perm.o_flags != 0) { |
5957 | 0 | tsk_fprintf(hFile, " - "); |
5958 | 0 | if (entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_NODUMP) |
5959 | 0 | tsk_fprintf(hFile, "no-dump "); |
5960 | 0 | if (entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_IMMUTABLE) |
5961 | 0 | tsk_fprintf(hFile, "immutable "); |
5962 | 0 | if (entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_APPEND) |
5963 | 0 | tsk_fprintf(hFile, "append-only "); |
5964 | 0 | if (entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_OPAQUE) |
5965 | 0 | tsk_fprintf(hFile, "opaque "); |
5966 | 0 | if (entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_COMPRESSED) |
5967 | 0 | tsk_fprintf(hFile, "compressed "); |
5968 | 0 | } |
5969 | 0 | tsk_fprintf(hFile, "\n"); |
5970 | |
|
5971 | 0 | if (tsk_getu16(fs->endian, |
5972 | 0 | entry.cat.std.flags) & HFS_FILE_FLAG_LOCKED) |
5973 | 0 | tsk_fprintf(hFile, "Locked\n"); |
5974 | 0 | if (tsk_getu16(fs->endian, |
5975 | 0 | entry.cat.std.flags) & HFS_FILE_FLAG_ATTR) |
5976 | 0 | tsk_fprintf(hFile, "Has extended attributes\n"); |
5977 | 0 | if (tsk_getu16(fs->endian, |
5978 | 0 | entry.cat.std.flags) & HFS_FILE_FLAG_ACL) |
5979 | 0 | tsk_fprintf(hFile, "Has security data (ACLs)\n"); |
5980 | | |
5981 | | // File_type and file_cr are not relevant for Folders |
5982 | 0 | if ( !TSK_FS_IS_DIR_META(fs_file->meta->type)){ |
5983 | 0 | int windx; // loop index |
5984 | 0 | tsk_fprintf(hFile, |
5985 | 0 | "File type:\t%04" PRIx32 " ", |
5986 | 0 | tsk_getu32(fs->endian, entry.cat.std.u_info.file_type)); |
5987 | |
|
5988 | 0 | for (windx = 0; windx < 4; ++windx) { |
5989 | 0 | uint8_t cu = entry.cat.std.u_info.file_type[windx]; |
5990 | 0 | if (cu >= 32 && cu <= 126) |
5991 | 0 | tsk_fprintf(hFile, "%c", (char) cu); |
5992 | 0 | else |
5993 | 0 | tsk_fprintf(hFile, " "); |
5994 | 0 | } |
5995 | 0 | tsk_fprintf(hFile, "\n"); |
5996 | 0 | tsk_fprintf(hFile, |
5997 | 0 | "File creator:\t%04" PRIx32 " ", |
5998 | 0 | tsk_getu32(fs->endian, entry.cat.std.u_info.file_cr)); |
5999 | 0 | for (windx = 0; windx < 4; ++windx) { |
6000 | 0 | uint8_t cu = entry.cat.std.u_info.file_cr[windx]; |
6001 | 0 | if (cu >= 32 && cu <= 126) |
6002 | 0 | tsk_fprintf(hFile, "%c", (char) cu); |
6003 | 0 | else |
6004 | 0 | tsk_fprintf(hFile, " "); |
6005 | 0 | } |
6006 | 0 | tsk_fprintf(hFile, "\n"); |
6007 | 0 | } // END if(not folder) |
6008 | |
|
6009 | 0 | if (tsk_getu16(fs->endian, |
6010 | 0 | entry.cat.std.u_info.flags) & HFS_FINDER_FLAG_NAME_LOCKED) |
6011 | 0 | tsk_fprintf(hFile, "Name locked\n"); |
6012 | 0 | if (tsk_getu16(fs->endian, |
6013 | 0 | entry.cat.std.u_info.flags) & HFS_FINDER_FLAG_HAS_BUNDLE) |
6014 | 0 | tsk_fprintf(hFile, "Has bundle\n"); |
6015 | 0 | if (tsk_getu16(fs->endian, |
6016 | 0 | entry.cat.std.u_info.flags) & HFS_FINDER_FLAG_IS_INVISIBLE) |
6017 | 0 | tsk_fprintf(hFile, "Is invisible\n"); |
6018 | 0 | if (tsk_getu16(fs->endian, |
6019 | 0 | entry.cat.std.u_info.flags) & HFS_FINDER_FLAG_IS_ALIAS) |
6020 | 0 | tsk_fprintf(hFile, "Is alias\n"); |
6021 | |
|
6022 | 0 | tsk_fprintf(hFile, "Text encoding:\t%" PRIx32 " = %s\n", |
6023 | 0 | tsk_getu32(fs->endian, entry.cat.std.text_enc), |
6024 | 0 | text_encoding_name(tsk_getu32(fs->endian, |
6025 | 0 | entry.cat.std.text_enc))); |
6026 | |
|
6027 | 0 | if (tsk_getu16(fs->endian, |
6028 | 0 | entry.cat.std.rec_type) == HFS_FILE_RECORD) { |
6029 | 0 | tsk_fprintf(hFile, "Resource fork size:\t%" PRIu64 "\n", |
6030 | 0 | tsk_getu64(fs->endian, entry.cat.resource.logic_sz)); |
6031 | 0 | } |
6032 | 0 | } |
6033 | |
|
6034 | 0 | if (sec_skew != 0) { |
6035 | 0 | tsk_fprintf(hFile, "\nAdjusted times:\n"); |
6036 | 0 | if (fs_file->meta->mtime) |
6037 | 0 | fs_file->meta->mtime -= sec_skew; |
6038 | 0 | if (fs_file->meta->atime) |
6039 | 0 | fs_file->meta->atime -= sec_skew; |
6040 | 0 | if (fs_file->meta->ctime) |
6041 | 0 | fs_file->meta->ctime -= sec_skew; |
6042 | 0 | if (fs_file->meta->crtime) |
6043 | 0 | fs_file->meta->crtime -= sec_skew; |
6044 | 0 | if (fs_file->meta->time2.hfs.bkup_time) |
6045 | 0 | fs_file->meta->time2.hfs.bkup_time -= sec_skew; |
6046 | |
|
6047 | 0 | tsk_fprintf(hFile, "Created:\t%s\n", |
6048 | 0 | tsk_fs_time_to_str(fs_file->meta->crtime, timeBuf)); |
6049 | 0 | tsk_fprintf(hFile, "Content Modified:\t%s\n", |
6050 | 0 | tsk_fs_time_to_str(fs_file->meta->mtime, timeBuf)); |
6051 | 0 | tsk_fprintf(hFile, "Attributes Modified:\t%s\n", |
6052 | 0 | tsk_fs_time_to_str(fs_file->meta->ctime, timeBuf)); |
6053 | 0 | tsk_fprintf(hFile, "Accessed:\t%s\n", |
6054 | 0 | tsk_fs_time_to_str(fs_file->meta->atime, timeBuf)); |
6055 | 0 | tsk_fprintf(hFile, "Backed Up:\t%s\n", |
6056 | 0 | tsk_fs_time_to_str(fs_file->meta->time2.hfs.bkup_time, |
6057 | 0 | timeBuf)); |
6058 | |
|
6059 | 0 | if (fs_file->meta->mtime) |
6060 | 0 | fs_file->meta->mtime += sec_skew; |
6061 | 0 | if (fs_file->meta->atime) |
6062 | 0 | fs_file->meta->atime += sec_skew; |
6063 | 0 | if (fs_file->meta->ctime) |
6064 | 0 | fs_file->meta->ctime += sec_skew; |
6065 | 0 | if (fs_file->meta->crtime) |
6066 | 0 | fs_file->meta->crtime += sec_skew; |
6067 | 0 | if (fs_file->meta->time2.hfs.bkup_time) |
6068 | 0 | fs_file->meta->time2.hfs.bkup_time += sec_skew; |
6069 | |
|
6070 | 0 | tsk_fprintf(hFile, "\nOriginal times:\n"); |
6071 | 0 | } |
6072 | 0 | else { |
6073 | 0 | tsk_fprintf(hFile, "\nTimes:\n"); |
6074 | 0 | } |
6075 | |
|
6076 | 0 | tsk_fprintf(hFile, "Created:\t%s\n", |
6077 | 0 | tsk_fs_time_to_str(fs_file->meta->crtime, timeBuf)); |
6078 | 0 | tsk_fprintf(hFile, "Content Modified:\t%s\n", |
6079 | 0 | tsk_fs_time_to_str(fs_file->meta->mtime, timeBuf)); |
6080 | 0 | tsk_fprintf(hFile, "Attributes Modified:\t%s\n", |
6081 | 0 | tsk_fs_time_to_str(fs_file->meta->ctime, timeBuf)); |
6082 | 0 | tsk_fprintf(hFile, "Accessed:\t%s\n", |
6083 | 0 | tsk_fs_time_to_str(fs_file->meta->atime, timeBuf)); |
6084 | 0 | tsk_fprintf(hFile, "Backed Up:\t%s\n", |
6085 | 0 | tsk_fs_time_to_str(fs_file->meta->time2.hfs.bkup_time, timeBuf)); |
6086 | | |
6087 | | // IF this is a regular file, then print out the blocks of the DATA and RSRC forks. |
6088 | 0 | if (tsk_getu16(fs->endian, entry.cat.std.rec_type) == HFS_FILE_RECORD) { |
6089 | | // Only print DATA fork blocks if this file is NOT compressed |
6090 | | // N.B., a compressed file has no data fork, and tsk_fs_file_walk() will |
6091 | | // do the wrong thing! |
6092 | 0 | if (!(entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_COMPRESSED)) { |
6093 | |
|
6094 | 0 | if (!(istat_flags & TSK_FS_ISTAT_RUNLIST)) { |
6095 | 0 | tsk_fprintf(hFile, "\nData Fork Blocks:\n"); |
6096 | 0 | print.idx = 0; |
6097 | 0 | print.hFile = hFile; |
6098 | 0 | print.accumulating = FALSE; |
6099 | 0 | print.startBlock = 0; |
6100 | 0 | print.blockCount = 0; |
6101 | |
|
6102 | 0 | if (tsk_fs_file_walk_type(fs_file, |
6103 | 0 | TSK_FS_ATTR_TYPE_HFS_DATA, HFS_FS_ATTR_ID_DATA, |
6104 | 0 | (TSK_FS_FILE_WALK_FLAG_AONLY | |
6105 | 0 | TSK_FS_FILE_WALK_FLAG_SLACK), print_addr_act, |
6106 | 0 | (void *)&print)) { |
6107 | 0 | tsk_fprintf(hFile, "\nError reading file data fork\n"); |
6108 | 0 | tsk_error_print(hFile); |
6109 | 0 | tsk_error_reset(); |
6110 | 0 | } |
6111 | 0 | else { |
6112 | 0 | output_print_addr(&print); |
6113 | 0 | if (print.idx != 0) |
6114 | 0 | tsk_fprintf(hFile, "\n"); |
6115 | 0 | } |
6116 | 0 | } |
6117 | 0 | } |
6118 | | |
6119 | | // Only print out the blocks of the Resource fork if it has nonzero size |
6120 | 0 | if (tsk_getu64(fs->endian, entry.cat.resource.logic_sz) > 0) { |
6121 | |
|
6122 | 0 | if (! (istat_flags & TSK_FS_ISTAT_RUNLIST)) { |
6123 | 0 | tsk_fprintf(hFile, "\nResource Fork Blocks:\n"); |
6124 | |
|
6125 | 0 | print.idx = 0; |
6126 | 0 | print.hFile = hFile; |
6127 | 0 | print.accumulating = FALSE; |
6128 | 0 | print.startBlock = 0; |
6129 | 0 | print.blockCount = 0; |
6130 | |
|
6131 | 0 | if (tsk_fs_file_walk_type(fs_file, |
6132 | 0 | TSK_FS_ATTR_TYPE_HFS_RSRC, HFS_FS_ATTR_ID_RSRC, |
6133 | 0 | (TSK_FS_FILE_WALK_FLAG_AONLY | |
6134 | 0 | TSK_FS_FILE_WALK_FLAG_SLACK), print_addr_act, |
6135 | 0 | (void *)&print)) { |
6136 | 0 | tsk_fprintf(hFile, "\nError reading file resource fork\n"); |
6137 | 0 | tsk_error_print(hFile); |
6138 | 0 | tsk_error_reset(); |
6139 | 0 | } |
6140 | 0 | else { |
6141 | 0 | output_print_addr(&print); |
6142 | 0 | if (print.idx != 0) |
6143 | 0 | tsk_fprintf(hFile, "\n"); |
6144 | 0 | } |
6145 | 0 | } |
6146 | 0 | } |
6147 | 0 | } |
6148 | | |
6149 | | // Force the loading of all attributes. |
6150 | 0 | (void) tsk_fs_file_attr_get(fs_file); |
6151 | | |
6152 | | /* Print all of the attributes */ |
6153 | 0 | tsk_fprintf(hFile, "\nAttributes: \n"); |
6154 | 0 | if (fs_file->meta->attr) { |
6155 | 0 | int cnt, i; |
6156 | | |
6157 | | // cycle through the attributes |
6158 | 0 | cnt = tsk_fs_file_attr_getsize(fs_file); |
6159 | 0 | for (i = 0; i < cnt; ++i) { |
6160 | 0 | const char *type; // type of the attribute as a string |
6161 | 0 | const TSK_FS_ATTR *fs_attr = |
6162 | 0 | tsk_fs_file_attr_get_idx(fs_file, i); |
6163 | 0 | if (!fs_attr) |
6164 | 0 | continue; |
6165 | | |
6166 | 0 | type = hfs_attrTypeName((uint32_t) fs_attr->type); |
6167 | | |
6168 | | // We will need to do something better than this, in the end. |
6169 | | //type = "Data"; |
6170 | | |
6171 | | /* print the layout if it is non-resident and not "special" */ |
6172 | 0 | if (fs_attr->flags & TSK_FS_ATTR_NONRES) { |
6173 | | //NTFS_PRINT_ADDR print_addr; |
6174 | |
|
6175 | 0 | tsk_fprintf(hFile, |
6176 | 0 | "Type: %s (%" PRIu32 "-%" PRIu16 |
6177 | 0 | ") Name: %s Non-Resident%s%s%s size: %" |
6178 | 0 | PRIdOFF " init_size: %" PRIdOFF "\n", type, |
6179 | 0 | fs_attr->type, fs_attr->id, |
6180 | 0 | (fs_attr->name) ? fs_attr->name : "N/A", |
6181 | 0 | (fs_attr->flags & TSK_FS_ATTR_ENC) ? ", Encrypted" : |
6182 | 0 | "", |
6183 | 0 | (fs_attr->flags & TSK_FS_ATTR_COMP) ? ", Compressed" : |
6184 | 0 | "", |
6185 | 0 | (fs_attr->flags & TSK_FS_ATTR_SPARSE) ? ", Sparse" : |
6186 | 0 | "", fs_attr->size, fs_attr->nrd.initsize); |
6187 | |
|
6188 | 0 | if (istat_flags & TSK_FS_ISTAT_RUNLIST) { |
6189 | 0 | if (tsk_fs_attr_print(fs_attr, hFile)) { |
6190 | 0 | tsk_fprintf(hFile, "\nError creating run lists\n"); |
6191 | 0 | tsk_error_print(hFile); |
6192 | 0 | tsk_error_reset(); |
6193 | 0 | } |
6194 | 0 | } |
6195 | 0 | } // END: non-resident attribute case |
6196 | 0 | else { |
6197 | 0 | tsk_fprintf(hFile, |
6198 | 0 | "Type: %s (%" PRIu32 "-%" PRIu16 |
6199 | 0 | ") Name: %s Resident%s%s%s size: %" |
6200 | 0 | PRIdOFF "\n", |
6201 | 0 | type, |
6202 | 0 | fs_attr->type, |
6203 | 0 | fs_attr->id, |
6204 | 0 | (fs_attr->name) ? fs_attr->name : "N/A", |
6205 | 0 | (fs_attr->flags & TSK_FS_ATTR_ENC) ? ", Encrypted" : |
6206 | 0 | "", |
6207 | 0 | (fs_attr->flags & TSK_FS_ATTR_COMP) ? ", Compressed" : |
6208 | 0 | "", |
6209 | 0 | (fs_attr->flags & TSK_FS_ATTR_SPARSE) ? ", Sparse" : |
6210 | 0 | "", fs_attr->size); |
6211 | 0 | if (fs_attr->type == TSK_FS_ATTR_TYPE_HFS_COMP_REC) { |
6212 | 0 | if (compressionAttr == NULL) { |
6213 | 0 | compressionAttr = fs_attr; |
6214 | 0 | } |
6215 | 0 | else { |
6216 | | // Problem: there is more than one compression attribute |
6217 | 0 | error_detected(TSK_ERR_FS_CORRUPT, |
6218 | 0 | "hfs_istat: more than one compression attribute"); |
6219 | 0 | return 1; |
6220 | 0 | } |
6221 | 0 | } |
6222 | 0 | } // END: else (RESIDENT attribute case) |
6223 | 0 | } // END: for(;;) loop over attributes |
6224 | 0 | } // END: if(fs_file->meta->attr is non-NULL) |
6225 | | |
6226 | 0 | if ((entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_COMPRESSED) |
6227 | 0 | && (compressionAttr == NULL)) |
6228 | 0 | tsk_fprintf(hFile, |
6229 | 0 | "WARNING: Compression Flag is set, but there" |
6230 | 0 | " is no compression record for this file.\n"); |
6231 | 0 | if (((entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_COMPRESSED) == 0) |
6232 | 0 | && (compressionAttr != NULL)) |
6233 | 0 | tsk_fprintf(hFile, |
6234 | 0 | "WARNING: Compression Flag is NOT set, but there" |
6235 | 0 | " is a compression record for this file.\n"); |
6236 | | |
6237 | | // IF this is a compressed file |
6238 | 0 | if (compressionAttr != NULL) { |
6239 | 0 | const TSK_FS_ATTR *fs_attr = compressionAttr; |
6240 | 0 | ssize_t attrReadResult; |
6241 | 0 | DECMPFS_DISK_HEADER *cmph; |
6242 | 0 | uint32_t cmpType; |
6243 | 0 | uint64_t uncSize; |
6244 | 0 | uint64_t cmpSize = 0; |
6245 | | |
6246 | | // Read the attribute. It cannot be too large because it is stored in |
6247 | | // a btree node |
6248 | 0 | char *aBuf = (char *) tsk_malloc((size_t) fs_attr->size); |
6249 | 0 | if (aBuf == NULL) { |
6250 | 0 | error_returned("hfs_istat: space for a compression attribute"); |
6251 | 0 | return 1; |
6252 | 0 | } |
6253 | 0 | attrReadResult = tsk_fs_attr_read(fs_attr, (TSK_OFF_T) 0, |
6254 | 0 | aBuf, (size_t) fs_attr->size, |
6255 | 0 | (TSK_FS_FILE_READ_FLAG_ENUM) 0x00); |
6256 | 0 | if (attrReadResult == -1) { |
6257 | 0 | error_returned("hfs_istat: reading the compression attribute"); |
6258 | 0 | free(aBuf); |
6259 | 0 | return 1; |
6260 | 0 | } |
6261 | 0 | else if (attrReadResult < fs_attr->size) { |
6262 | 0 | error_detected(TSK_ERR_FS_READ, |
6263 | 0 | "hfs_istat: could not read the whole compression attribute"); |
6264 | 0 | free(aBuf); |
6265 | 0 | return 1; |
6266 | 0 | } |
6267 | | // Now, cast the attr into a compression header |
6268 | 0 | cmph = (DECMPFS_DISK_HEADER *) aBuf; |
6269 | 0 | cmpType = tsk_getu32(TSK_LIT_ENDIAN, cmph->compression_type); |
6270 | 0 | uncSize = tsk_getu64(TSK_LIT_ENDIAN, cmph->uncompressed_size); |
6271 | |
|
6272 | 0 | tsk_fprintf(hFile, "\nCompressed File:\n"); |
6273 | 0 | tsk_fprintf(hFile, " Uncompressed size: %llu\n", uncSize); |
6274 | |
|
6275 | 0 | switch (cmpType) { |
6276 | 0 | case DECMPFS_TYPE_ZLIB_ATTR: |
6277 | | // Data is inline |
6278 | 0 | { |
6279 | | // size of header, with indicator byte if uncompressed |
6280 | 0 | uint32_t off = (cmph->attr_bytes[0] & 0x0F) == 0x0F ? 17 : 16; |
6281 | 0 | cmpSize = fs_attr->size - off; |
6282 | |
|
6283 | 0 | tsk_fprintf(hFile, |
6284 | 0 | " Data follows compression record in the CMPF attribute\n" |
6285 | 0 | " %" PRIu64 " bytes of data at offset %u, %s compressed\n", |
6286 | 0 | cmpSize, off, off == 16 ? "zlib" : "not"); |
6287 | 0 | } |
6288 | 0 | break; |
6289 | | |
6290 | 0 | case DECMPFS_TYPE_LZVN_ATTR: |
6291 | | // Data is inline |
6292 | 0 | { |
6293 | | // size of header, with indicator byte if uncompressed |
6294 | 0 | uint32_t off = cmph->attr_bytes[0] == 0x06 ? 17 : 16; |
6295 | 0 | cmpSize = fs_attr->size - off; |
6296 | |
|
6297 | 0 | tsk_fprintf(hFile, |
6298 | 0 | " Data follows compression record in the CMPF attribute\n" |
6299 | 0 | " %" PRIu64 " bytes of data at offset %u, %s compressed\n", |
6300 | 0 | cmpSize, off, off == 16 ? "lzvn" : "not"); |
6301 | 0 | } |
6302 | 0 | break; |
6303 | | |
6304 | 0 | case DECMPFS_TYPE_ZLIB_RSRC: |
6305 | | // Data is zlib compressed in the resource fork |
6306 | 0 | tsk_fprintf(hFile, |
6307 | 0 | " Data is zlib compressed in the resource fork\n"); |
6308 | 0 | break; |
6309 | | |
6310 | 0 | case DECMPFS_TYPE_LZVN_RSRC: |
6311 | | // Data is lzvn compressed in the resource fork |
6312 | 0 | tsk_fprintf(hFile, |
6313 | 0 | " Data is lzvn compressed in the resource fork\n"); |
6314 | 0 | break; |
6315 | | |
6316 | 0 | default: |
6317 | 0 | tsk_fprintf(hFile, " Compression type is %u: UNKNOWN\n", |
6318 | 0 | cmpType); |
6319 | 0 | } |
6320 | | |
6321 | 0 | free(aBuf); |
6322 | |
|
6323 | 0 | if ((cmpType == DECMPFS_TYPE_ZLIB_RSRC || |
6324 | 0 | cmpType == DECMPFS_TYPE_LZVN_RSRC) |
6325 | 0 | && (tsk_getu64(fs->endian, entry.cat.resource.logic_sz) == 0)) |
6326 | 0 | tsk_fprintf(hFile, |
6327 | 0 | "WARNING: Compression record indicates compressed data" |
6328 | 0 | " in the RSRC Fork, but that fork is empty.\n"); |
6329 | 0 | } |
6330 | | |
6331 | | // This will return NULL if there is an error, or if there are no resources |
6332 | 0 | rd = hfs_parse_resource_fork(fs_file); |
6333 | | // TODO: Should check the errnum here to see if there was an error |
6334 | |
|
6335 | 0 | if (rd != NULL) { |
6336 | 0 | tsk_fprintf(hFile, "\nResources:\n"); |
6337 | 0 | while (rd) { |
6338 | 0 | tsk_fprintf(hFile, |
6339 | 0 | " Type: %s \tID: %-5u \tOffset: %-5u \tSize: %-5u \tName: %s\n", |
6340 | 0 | rd->type, rd->id, rd->offset, rd->length, rd->name); |
6341 | 0 | rd = rd->next; |
6342 | 0 | } |
6343 | 0 | } |
6344 | | // This is OK to call with NULL |
6345 | 0 | free_res_descriptor(rd); |
6346 | |
|
6347 | 0 | tsk_fs_file_close(fs_file); |
6348 | 0 | return 0; |
6349 | 0 | } |
6350 | | |
6351 | | |
6352 | | |
6353 | | static TSK_FS_ATTR_TYPE_ENUM |
6354 | | hfs_get_default_attr_type(const TSK_FS_FILE * a_file) |
6355 | 0 | { |
6356 | | // The HFS+ special files have a default attr type of "Default" |
6357 | 0 | TSK_INUM_T inum = a_file->meta->addr; |
6358 | 0 | if (inum == 3 || // Extents File |
6359 | 0 | inum == 4 || // Catalog File |
6360 | 0 | inum == 5 || // Bad Blocks File |
6361 | 0 | inum == 6 || // Block Map (Allocation File) |
6362 | 0 | inum == 7 || // Startup File |
6363 | 0 | inum == 8 || // Attributes File |
6364 | 0 | inum == 14 || // Not sure if these two will actually work. I don't see |
6365 | 0 | inum == 15) // any code to load the attrs of these files, if they exist. |
6366 | 0 | return TSK_FS_ATTR_TYPE_DEFAULT; |
6367 | | // The "regular" files and symbolic links have a DATA fork with type "DATA" |
6368 | 0 | if (a_file->meta->type == TSK_FS_META_TYPE_REG || |
6369 | 0 | a_file->meta->type == TSK_FS_META_TYPE_LNK) |
6370 | | // This should be an HFS-specific type. |
6371 | 0 | return TSK_FS_ATTR_TYPE_HFS_DATA; |
6372 | | |
6373 | | // We've got to return *something* for every file, so we return this. |
6374 | 0 | return TSK_FS_ATTR_TYPE_DEFAULT; |
6375 | 0 | } |
6376 | | |
6377 | | static void |
6378 | | hfs_close(TSK_FS_INFO * fs) |
6379 | 1 | { |
6380 | 1 | HFS_INFO *hfs = (HFS_INFO *) fs; |
6381 | | // We'll grab this lock a bit early. |
6382 | 1 | tsk_take_lock(&(hfs->metadata_dir_cache_lock)); |
6383 | 1 | fs->tag = 0; |
6384 | | |
6385 | 1 | free(hfs->fs); |
6386 | | |
6387 | 1 | if (hfs->catalog_file) { |
6388 | 0 | tsk_fs_file_close(hfs->catalog_file); |
6389 | 0 | hfs->catalog_attr = NULL; |
6390 | 0 | } |
6391 | | |
6392 | 1 | if (hfs->blockmap_file) { |
6393 | 0 | tsk_fs_file_close(hfs->blockmap_file); |
6394 | 0 | hfs->blockmap_attr = NULL; |
6395 | 0 | } |
6396 | | |
6397 | 1 | if (hfs->meta_dir) { |
6398 | 0 | tsk_fs_dir_close(hfs->meta_dir); |
6399 | 0 | hfs->meta_dir = NULL; |
6400 | 0 | } |
6401 | | |
6402 | 1 | if (hfs->dir_meta_dir) { |
6403 | 0 | tsk_fs_dir_close(hfs->dir_meta_dir); |
6404 | 0 | hfs->dir_meta_dir = NULL; |
6405 | 0 | } |
6406 | | |
6407 | 1 | if (hfs->extents_file) { |
6408 | 1 | tsk_fs_file_close(hfs->extents_file); |
6409 | 1 | hfs->extents_file = NULL; |
6410 | 1 | } |
6411 | | |
6412 | 1 | tsk_release_lock(&(hfs->metadata_dir_cache_lock)); |
6413 | 1 | tsk_deinit_lock(&(hfs->metadata_dir_cache_lock)); |
6414 | | |
6415 | 1 | tsk_fs_free((TSK_FS_INFO *)hfs); |
6416 | 1 | } |
6417 | | |
6418 | | /* hfs_open - open an hfs file system |
6419 | | * |
6420 | | * Return NULL on error (or not an HFS or HFS+ file system) |
6421 | | * */ |
6422 | | |
6423 | | TSK_FS_INFO * |
6424 | | hfs_open(TSK_IMG_INFO * img_info, TSK_OFF_T offset, |
6425 | | TSK_FS_TYPE_ENUM ftype, uint8_t test) |
6426 | 2 | { |
6427 | 2 | HFS_INFO *hfs; |
6428 | 2 | unsigned int len; |
6429 | 2 | TSK_FS_INFO *fs; |
6430 | 2 | ssize_t cnt; |
6431 | 2 | TSK_FS_FILE *file; // The root directory, or the metadata directories |
6432 | 2 | TSK_INUM_T inum; // The inum (or CNID) of the metadata directories |
6433 | 2 | int8_t result; // of tsk_fs_path2inum() |
6434 | | |
6435 | 2 | tsk_error_reset(); |
6436 | | |
6437 | 2 | if (TSK_FS_TYPE_ISHFS(ftype) == 0) { |
6438 | 0 | tsk_error_set_errno(TSK_ERR_FS_ARG); |
6439 | 0 | tsk_error_set_errstr("Invalid FS Type in hfs_open"); |
6440 | 0 | return NULL; |
6441 | 0 | } |
6442 | | |
6443 | 2 | if ((hfs = (HFS_INFO *) tsk_fs_malloc(sizeof(HFS_INFO))) == NULL) |
6444 | 0 | return NULL; |
6445 | | |
6446 | 2 | fs = &(hfs->fs_info); |
6447 | | |
6448 | 2 | fs->ftype = TSK_FS_TYPE_HFS; |
6449 | 2 | fs->duname = "Allocation Block"; |
6450 | 2 | fs->tag = TSK_FS_INFO_TAG; |
6451 | 2 | fs->flags = 0; |
6452 | | |
6453 | 2 | fs->img_info = img_info; |
6454 | 2 | fs->offset = offset; |
6455 | | |
6456 | | /* |
6457 | | * Read the superblock. |
6458 | | */ |
6459 | 2 | len = sizeof(hfs_plus_vh); |
6460 | 2 | if ((hfs->fs = (hfs_plus_vh *) tsk_malloc(len)) == NULL) { |
6461 | 0 | fs->tag = 0; |
6462 | 0 | tsk_fs_free((TSK_FS_INFO *)hfs); |
6463 | 0 | return NULL; |
6464 | 0 | } |
6465 | | |
6466 | 2 | if (hfs_checked_read_random(fs, (char *) hfs->fs, len, |
6467 | 2 | (TSK_OFF_T) HFS_VH_OFF)) { |
6468 | 0 | tsk_error_set_errstr2("hfs_open: superblock"); |
6469 | 0 | fs->tag = 0; |
6470 | 0 | free(hfs->fs); |
6471 | 0 | tsk_fs_free((TSK_FS_INFO *)hfs); |
6472 | 0 | return NULL; |
6473 | 0 | } |
6474 | | |
6475 | | /* |
6476 | | * Verify we are looking at an HFS+ image |
6477 | | */ |
6478 | 2 | if (tsk_fs_guessu16(fs, hfs->fs->signature, HFS_VH_SIG_HFSPLUS) && |
6479 | 2 | tsk_fs_guessu16(fs, hfs->fs->signature, HFS_VH_SIG_HFSX) && |
6480 | 2 | tsk_fs_guessu16(fs, hfs->fs->signature, HFS_VH_SIG_HFS)) { |
6481 | | |
6482 | 1 | fs->tag = 0; |
6483 | 1 | free(hfs->fs); |
6484 | 1 | tsk_fs_free((TSK_FS_INFO *)hfs); |
6485 | 1 | tsk_error_set_errno(TSK_ERR_FS_MAGIC); |
6486 | 1 | tsk_error_set_errstr("not an HFS+ file system (magic)"); |
6487 | 1 | return NULL; |
6488 | 1 | } |
6489 | | |
6490 | | /* |
6491 | | * Handle an HFS-wrapped HFS+ image, which is a HFS volume that contains |
6492 | | * the HFS+ volume inside of it. |
6493 | | */ |
6494 | 1 | if (tsk_getu16(fs->endian, hfs->fs->signature) == HFS_VH_SIG_HFS) { |
6495 | |
|
6496 | 0 | hfs_mdb *wrapper_sb = (hfs_mdb *) hfs->fs; |
6497 | | |
6498 | | // Verify that we are setting a wrapper and not a normal HFS volume |
6499 | 0 | if ((tsk_getu16(fs->endian, |
6500 | 0 | wrapper_sb->drEmbedSigWord) == HFS_VH_SIG_HFSPLUS) |
6501 | 0 | || (tsk_getu16(fs->endian, |
6502 | 0 | wrapper_sb->drEmbedSigWord) == HFS_VH_SIG_HFSX)) { |
6503 | |
|
6504 | 0 | TSK_FS_INFO *fs_info2; |
6505 | | // offset in sectors to start of first HFS block |
6506 | 0 | uint16_t drAlBlSt = |
6507 | 0 | tsk_getu16(fs->endian, wrapper_sb->drAlBlSt); |
6508 | | |
6509 | | // size of each HFS block |
6510 | 0 | uint32_t drAlBlkSiz = |
6511 | 0 | tsk_getu32(fs->endian, wrapper_sb->drAlBlkSiz); |
6512 | | |
6513 | | // start of embedded FS |
6514 | 0 | uint16_t startBlock = tsk_getu16(fs->endian, |
6515 | 0 | wrapper_sb->drEmbedExtent_startBlock); |
6516 | | |
6517 | | // calculate the offset; 512 here is intentional. |
6518 | | // TN1150 says "The drAlBlSt field contains the offset, in |
6519 | | // 512-byte blocks, of the wrapper's allocation block 0 relative |
6520 | | // to the start of the volume" |
6521 | 0 | TSK_OFF_T hfsplus_offset = |
6522 | 0 | (drAlBlSt * (TSK_OFF_T) 512) + |
6523 | 0 | (drAlBlkSiz * (TSK_OFF_T) startBlock); |
6524 | |
|
6525 | 0 | if (tsk_verbose) |
6526 | 0 | tsk_fprintf(stderr, |
6527 | 0 | "hfs_open: HFS+/HFSX within HFS wrapper at byte offset %" |
6528 | 0 | PRIdOFF "\n", hfsplus_offset); |
6529 | |
|
6530 | 0 | fs->tag = 0; |
6531 | 0 | free(hfs->fs); |
6532 | 0 | tsk_fs_free((TSK_FS_INFO *)hfs); |
6533 | | |
6534 | | /* just re-open with the new offset, then record the offset */ |
6535 | 0 | if (hfsplus_offset == 0) { |
6536 | 0 | tsk_error_set_errno(TSK_ERR_FS_CORRUPT); |
6537 | 0 | tsk_error_set_errstr("HFS+ offset is zero"); |
6538 | 0 | return NULL; |
6539 | 0 | } |
6540 | 0 | fs_info2 = |
6541 | 0 | hfs_open(img_info, offset + hfsplus_offset, ftype, test); |
6542 | |
|
6543 | 0 | if (fs_info2) |
6544 | 0 | ((HFS_INFO *) fs_info2)->hfs_wrapper_offset = |
6545 | 0 | hfsplus_offset; |
6546 | |
|
6547 | 0 | return fs_info2; |
6548 | 0 | } |
6549 | 0 | else { |
6550 | 0 | fs->tag = 0; |
6551 | 0 | free(hfs->fs); |
6552 | 0 | tsk_fs_free((TSK_FS_INFO *)hfs); |
6553 | 0 | tsk_error_set_errno(TSK_ERR_FS_MAGIC); |
6554 | 0 | tsk_error_set_errstr |
6555 | 0 | ("HFS file systems (other than wrappers HFS+/HFSX file systems) are not supported"); |
6556 | 0 | return NULL; |
6557 | 0 | } |
6558 | 0 | } |
6559 | | |
6560 | 1 | fs->block_count = tsk_getu32(fs->endian, hfs->fs->blk_cnt); |
6561 | 1 | fs->first_block = 0; |
6562 | 1 | fs->last_block = fs->last_block_act = fs->block_count - 1; |
6563 | | |
6564 | | /* this isn't really accurate; fs->block_size reports only the size |
6565 | | of the allocation block; the size of the device block has to be |
6566 | | found from the device (allocation block size should always be |
6567 | | larger than device block size and an even multiple of the device |
6568 | | block size) */ |
6569 | 1 | fs->dev_bsize = fs->block_size = |
6570 | 1 | tsk_getu32(fs->endian, hfs->fs->blk_sz); |
6571 | | |
6572 | | // determine the last block we have in this image |
6573 | 1 | if (fs->block_size <= 1) { |
6574 | 0 | fs->tag = 0; |
6575 | 0 | free(hfs->fs); |
6576 | 0 | tsk_fs_free((TSK_FS_INFO *)hfs); |
6577 | 0 | tsk_error_set_errno(TSK_ERR_FS_CORRUPT); |
6578 | 0 | tsk_error_set_errstr("HFS+ allocation block size too small"); |
6579 | 0 | return NULL; |
6580 | 0 | } |
6581 | 1 | if ((TSK_DADDR_T) ((img_info->size - offset) / fs->block_size) < |
6582 | 1 | fs->block_count) |
6583 | 0 | fs->last_block_act = |
6584 | 0 | (img_info->size - offset) / fs->block_size - 1; |
6585 | | |
6586 | | // Initialize the lock |
6587 | 1 | tsk_init_lock(&(hfs->metadata_dir_cache_lock)); |
6588 | | |
6589 | | /* |
6590 | | * Set function pointers |
6591 | | */ |
6592 | 1 | fs->inode_walk = hfs_inode_walk; |
6593 | 1 | fs->block_walk = hfs_block_walk; |
6594 | 1 | fs->block_getflags = hfs_block_getflags; |
6595 | 1 | fs->load_attrs = hfs_load_attrs; |
6596 | 1 | fs->get_default_attr_type = hfs_get_default_attr_type; |
6597 | | |
6598 | 1 | fs->file_add_meta = hfs_inode_lookup; |
6599 | 1 | fs->dir_open_meta = hfs_dir_open_meta; |
6600 | 1 | fs->fsstat = hfs_fsstat; |
6601 | 1 | fs->fscheck = hfs_fscheck; |
6602 | 1 | fs->istat = hfs_istat; |
6603 | 1 | fs->close = hfs_close; |
6604 | | |
6605 | | // lazy loading of block map |
6606 | 1 | hfs->blockmap_file = NULL; |
6607 | 1 | hfs->blockmap_attr = NULL; |
6608 | 1 | hfs->blockmap_cache_start = -1; |
6609 | 1 | hfs->blockmap_cache_len = 0; |
6610 | | |
6611 | 1 | fs->first_inum = HFS_ROOT_INUM; |
6612 | 1 | fs->root_inum = HFS_ROOT_INUM; |
6613 | 1 | fs->last_inum = HFS_FIRST_USER_CNID - 1; // we will later increase this |
6614 | 1 | fs->inum_count = fs->last_inum - fs->first_inum + 1; |
6615 | | |
6616 | | /* We will load the extents file data when we need it */ |
6617 | 1 | hfs->extents_file = NULL; |
6618 | 1 | hfs->extents_attr = NULL; |
6619 | | |
6620 | 1 | if (tsk_getu32(fs->endian, |
6621 | 1 | hfs->fs->start_file.extents[0].blk_cnt) == 0) { |
6622 | 1 | if (tsk_verbose) |
6623 | 0 | tsk_fprintf(stderr, |
6624 | 0 | "hfs_open: Optional Startup File is not present.\n"); |
6625 | 1 | hfs->has_startup_file = FALSE; |
6626 | 1 | } |
6627 | 0 | else { |
6628 | 0 | if (tsk_verbose) |
6629 | 0 | tsk_fprintf(stderr, "hfs_open: Startup File is present.\n"); |
6630 | 0 | hfs->has_startup_file = TRUE; |
6631 | 0 | } |
6632 | | |
6633 | 1 | if (tsk_getu32(fs->endian, hfs->fs->ext_file.extents[0].blk_cnt) == 0) { |
6634 | 0 | if (tsk_verbose) |
6635 | 0 | tsk_fprintf(stderr, |
6636 | 0 | "hfs_open: Optional Extents File (and Badblocks File) is not present.\n"); |
6637 | 0 | hfs->has_extents_file = FALSE; |
6638 | 0 | } |
6639 | 1 | else { |
6640 | 1 | if (tsk_verbose) |
6641 | 0 | tsk_fprintf(stderr, |
6642 | 0 | "hfs_open: Extents File (and BadBlocks File) is present.\n"); |
6643 | 1 | hfs->has_extents_file = TRUE; |
6644 | 1 | } |
6645 | | |
6646 | 1 | if (tsk_getu32(fs->endian, hfs->fs->attr_file.extents[0].blk_cnt) == 0) { |
6647 | 1 | if (tsk_verbose) |
6648 | 0 | tsk_fprintf(stderr, |
6649 | 0 | "hfs_open: Optional Attributes File is not present.\n"); |
6650 | 1 | hfs->has_attributes_file = FALSE; |
6651 | 1 | } |
6652 | 0 | else { |
6653 | 0 | if (tsk_verbose) |
6654 | 0 | tsk_fprintf(stderr, "hfs_open: Attributes File is present.\n"); |
6655 | 0 | hfs->has_attributes_file = TRUE; |
6656 | 0 | } |
6657 | | |
6658 | | /* Load the catalog file though */ |
6659 | 1 | if ((hfs->catalog_file = |
6660 | 1 | tsk_fs_file_open_meta(fs, NULL, |
6661 | 1 | HFS_CATALOG_FILE_ID)) == NULL) { |
6662 | 1 | hfs_close(fs); |
6663 | 1 | return NULL; |
6664 | 1 | } |
6665 | | |
6666 | | /* cache the data attribute */ |
6667 | 0 | hfs->catalog_attr = |
6668 | 0 | tsk_fs_attrlist_get(hfs->catalog_file->meta->attr, |
6669 | 0 | TSK_FS_ATTR_TYPE_DEFAULT); |
6670 | 0 | if (!hfs->catalog_attr) { |
6671 | 0 | hfs_close(fs); |
6672 | 0 | tsk_error_errstr2_concat |
6673 | 0 | (" - Data Attribute not found in Catalog File"); |
6674 | 0 | return NULL; |
6675 | 0 | } |
6676 | | |
6677 | | // cache the catalog file header |
6678 | 0 | cnt = tsk_fs_attr_read(hfs->catalog_attr, 14, |
6679 | 0 | (char *) &(hfs->catalog_header), |
6680 | 0 | sizeof(hfs_btree_header_record), 0); |
6681 | 0 | if (cnt != sizeof(hfs_btree_header_record)) { |
6682 | 0 | if (cnt >= 0) { |
6683 | 0 | tsk_error_reset(); |
6684 | 0 | tsk_error_set_errno(TSK_ERR_FS_READ); |
6685 | 0 | } |
6686 | 0 | hfs_close(fs); |
6687 | 0 | tsk_error_set_errstr2("hfs_open: Error reading catalog header"); |
6688 | 0 | return NULL; |
6689 | 0 | } |
6690 | | |
6691 | 0 | if (tsk_getu16(fs->endian, hfs->fs->version) == HFS_VH_VER_HFSPLUS) |
6692 | 0 | hfs->is_case_sensitive = 0; |
6693 | 0 | else if (tsk_getu16(fs->endian, hfs->fs->version) == HFS_VH_VER_HFSX) { |
6694 | 0 | if (hfs->catalog_header.compType == HFS_BT_HEAD_COMP_SENS) |
6695 | 0 | hfs->is_case_sensitive = 1; |
6696 | 0 | else if (hfs->catalog_header.compType == HFS_BT_HEAD_COMP_INSENS) |
6697 | 0 | hfs->is_case_sensitive = 0; |
6698 | 0 | else { |
6699 | 0 | if (tsk_verbose) |
6700 | 0 | tsk_fprintf(stderr, |
6701 | 0 | "hfs_open: invalid value (0x%02" PRIx8 |
6702 | 0 | ") for key compare type; using case-insensitive\n", |
6703 | 0 | hfs->catalog_header.compType); |
6704 | 0 | hfs->is_case_sensitive = 0; |
6705 | 0 | } |
6706 | 0 | } |
6707 | 0 | else { |
6708 | 0 | if (tsk_verbose) |
6709 | 0 | tsk_fprintf(stderr, |
6710 | 0 | "hfs_open: unknown HFS+/HFSX version (%" PRIu16 "\n", |
6711 | 0 | tsk_getu16(fs->endian, hfs->fs->version)); |
6712 | 0 | hfs->is_case_sensitive = 0; |
6713 | 0 | } |
6714 | | |
6715 | | // update the numbers. |
6716 | 0 | fs->last_inum = hfs_find_highest_inum(hfs); |
6717 | 0 | fs->inum_count = fs->last_inum + 1; |
6718 | |
|
6719 | 0 | snprintf((char *) fs->fs_id, 17, "%08" PRIx32 "%08" PRIx32, |
6720 | 0 | tsk_getu32(fs->endian, hfs->fs->finder_info[HFS_VH_FI_ID1]), |
6721 | 0 | tsk_getu32(fs->endian, hfs->fs->finder_info[HFS_VH_FI_ID2])); |
6722 | 0 | fs->fs_id_used = 16; |
6723 | | |
6724 | | /* journal */ |
6725 | 0 | fs->jblk_walk = hfs_jblk_walk; |
6726 | 0 | fs->jentry_walk = hfs_jentry_walk; |
6727 | 0 | fs->jopen = hfs_jopen; |
6728 | 0 | fs->name_cmp = hfs_name_cmp; |
6729 | 0 | fs->journ_inum = 0; |
6730 | | |
6731 | | /* Creation Times */ |
6732 | | |
6733 | | // First, the root |
6734 | 0 | file = tsk_fs_file_open_meta(fs, NULL, 2); |
6735 | 0 | if (file != NULL) { |
6736 | 0 | hfs->root_crtime = file->meta->crtime; |
6737 | 0 | hfs->has_root_crtime = TRUE; |
6738 | 0 | tsk_fs_file_close(file); |
6739 | 0 | } |
6740 | 0 | else { |
6741 | 0 | hfs->has_root_crtime = FALSE; |
6742 | 0 | } |
6743 | 0 | file = NULL; |
6744 | | |
6745 | | // disable hard link traversal while finding the hard |
6746 | | // link directories themselves (to prevent problems if |
6747 | | // there are hard links in the root directory) |
6748 | 0 | hfs->meta_inum = 0; |
6749 | 0 | hfs->meta_dir_inum = 0; |
6750 | | |
6751 | | // Now the (file) metadata directory |
6752 | | |
6753 | | // The metadata directory is a sub-directory of the root. Its name begins with four nulls, followed |
6754 | | // by "HFS+ Private Data". The file system parsing code replaces nulls in filenames with UTF8_NULL_REPLACE. |
6755 | | // In the released version of TSK, this replacement is the character '^'. |
6756 | | // NOTE: There is a standard Unicode replacement which is 0xfffd in UTF16 and 0xEF 0xBF 0xBD in UTF8. |
6757 | | // Systems that require the standard definition can redefine UTF8_NULL_REPLACE and UTF16_NULL_REPLACE |
6758 | | // in tsk_hfs.h |
6759 | 0 | hfs->has_meta_crtime = FALSE; |
6760 | 0 | result = |
6761 | 0 | tsk_fs_path2inum(fs, |
6762 | 0 | "/" UTF8_NULL_REPLACE UTF8_NULL_REPLACE UTF8_NULL_REPLACE |
6763 | 0 | UTF8_NULL_REPLACE "HFS+ Private Data", &inum, NULL); |
6764 | 0 | if (result == 0) { |
6765 | 0 | TSK_FS_FILE *file_tmp = tsk_fs_file_open_meta(fs, NULL, inum); |
6766 | 0 | if (file_tmp != NULL) { |
6767 | 0 | hfs->meta_crtime = file_tmp->meta->crtime; |
6768 | 0 | hfs->has_meta_crtime = TRUE; |
6769 | 0 | hfs->meta_inum = inum; |
6770 | 0 | tsk_fs_file_close(file_tmp); |
6771 | 0 | } |
6772 | 0 | } |
6773 | | |
6774 | | // Now, the directory metadata directory |
6775 | | |
6776 | | // The "directory" metadata directory, where hardlinked directories actually live, is a subdirectory |
6777 | | // of the root. The beginning of the name of this directory is ".HFS+ Private Directory Data" which |
6778 | | // is followed by a carriage return (ASCII 13). |
6779 | 0 | hfs->has_meta_dir_crtime = FALSE; |
6780 | 0 | result = |
6781 | 0 | tsk_fs_path2inum(fs, "/.HFS+ Private Directory Data\r", &inum, |
6782 | 0 | NULL); |
6783 | 0 | if (result == 0) { |
6784 | 0 | TSK_FS_FILE *file_tmp = tsk_fs_file_open_meta(fs, NULL, inum); |
6785 | 0 | if (file_tmp != NULL) { |
6786 | 0 | hfs->metadir_crtime = file_tmp->meta->crtime; |
6787 | 0 | hfs->has_meta_dir_crtime = TRUE; |
6788 | 0 | hfs->meta_dir_inum = inum; |
6789 | 0 | tsk_fs_file_close(file_tmp); |
6790 | 0 | } |
6791 | 0 | } |
6792 | |
|
6793 | 0 | if (hfs->has_root_crtime && hfs->has_meta_crtime |
6794 | 0 | && hfs->has_meta_dir_crtime) { |
6795 | 0 | if (tsk_verbose) |
6796 | 0 | tsk_fprintf(stderr, |
6797 | 0 | "hfs_open: Creation times for key folders have been read and cached.\n"); |
6798 | 0 | } |
6799 | 0 | if (!hfs->has_root_crtime) { |
6800 | 0 | if (tsk_verbose) |
6801 | 0 | tsk_fprintf(stderr, |
6802 | 0 | "hfs_open: Warning: Could not open the root directory. " |
6803 | 0 | "Hard link detection and some other functions will be impaired\n"); |
6804 | 0 | } |
6805 | 0 | else if (tsk_verbose) { |
6806 | 0 | tsk_fprintf(stderr, |
6807 | 0 | "hfs_open: The root directory is accessible.\n"); |
6808 | 0 | } |
6809 | |
|
6810 | 0 | if (tsk_verbose) { |
6811 | 0 | if (hfs->has_meta_crtime) |
6812 | 0 | tsk_fprintf(stderr, |
6813 | 0 | "hfs_open: \"/^^^^HFS+ Private Data\" metadata folder is accessible.\n"); |
6814 | 0 | else |
6815 | 0 | tsk_fprintf(stderr, |
6816 | 0 | "hfs_open: Optional \"^^^^HFS+ Private Data\" metadata folder is not accessible, or does not exist.\n"); |
6817 | 0 | if (hfs->has_meta_dir_crtime) |
6818 | 0 | tsk_fprintf(stderr, |
6819 | 0 | "hfs_open: \"/HFS+ Private Directory Data^\" metadata folder is accessible.\n"); |
6820 | 0 | else |
6821 | 0 | tsk_fprintf(stderr, |
6822 | 0 | "hfs_open: Optional \"/HFS+ Private Directory Data^\" metadata folder is not accessible, or does not exist.\n"); |
6823 | 0 | } |
6824 | | |
6825 | | // These caches will be set, if they are needed. |
6826 | 0 | hfs->meta_dir = NULL; |
6827 | 0 | hfs->dir_meta_dir = NULL; |
6828 | |
|
6829 | 0 | return fs; |
6830 | 0 | } |
6831 | | |
6832 | | |
6833 | | /* |
6834 | | * Error Handling |
6835 | | */ |
6836 | | |
6837 | | /** |
6838 | | * Call this when an error is first detected. It sets the error code and it also |
6839 | | * sets the primary error string, describing the lowest level of error. (Actually, |
6840 | | * it appends to the error string.) |
6841 | | * |
6842 | | * If the error code is already set, then this appends to the primary error |
6843 | | * string an hex representation of the new error code, plus the new error message. |
6844 | | * |
6845 | | * @param errnum The desired error code |
6846 | | * @param errstr The format string for the error message |
6847 | | */ |
6848 | | void |
6849 | | error_detected(uint32_t errnum, const char *errstr, ...) |
6850 | 0 | { |
6851 | 0 | va_list args; |
6852 | |
|
6853 | 0 | va_start(args, errstr); |
6854 | |
|
6855 | 0 | { |
6856 | 0 | TSK_ERROR_INFO *errInfo = tsk_error_get_info(); |
6857 | 0 | char *loc_errstr = errInfo->errstr; |
6858 | |
|
6859 | 0 | if (errInfo->t_errno == 0) |
6860 | 0 | errInfo->t_errno = errnum; |
6861 | 0 | else { |
6862 | | //This should not happen! We don't want to wipe out the existing error |
6863 | | //code, so we write the new code into the error string, in hex. |
6864 | 0 | size_t sl = strlen(errstr); |
6865 | 0 | snprintf(loc_errstr + sl, TSK_ERROR_STRING_MAX_LENGTH - sl, |
6866 | 0 | " Next errnum: 0x%x ", errnum); |
6867 | 0 | } |
6868 | 0 | if (errstr != NULL) { |
6869 | 0 | size_t sl = strlen(loc_errstr); |
6870 | 0 | vsnprintf(loc_errstr + sl, TSK_ERROR_STRING_MAX_LENGTH - sl, |
6871 | 0 | errstr, args); |
6872 | 0 | } |
6873 | 0 | } |
6874 | |
|
6875 | 0 | va_end(args); |
6876 | |
|
6877 | 0 | } |
6878 | | |
6879 | | /** |
6880 | | * Call this when a called TSK function returns an error. Presumably, that |
6881 | | * function will have set the error code and the primary error string. This |
6882 | | * *appends* to the secondary error string. It should be called to describe |
6883 | | * the context of the call. If no error code has been set, then this sets a |
6884 | | * default code so that it is not zero. |
6885 | | * |
6886 | | * @param errstr The format string for the error message |
6887 | | */ |
6888 | | void |
6889 | | error_returned(const char *errstr, ...) |
6890 | 1 | { |
6891 | 1 | va_list args; |
6892 | 1 | va_start(args, errstr); |
6893 | | |
6894 | 1 | { |
6895 | 1 | TSK_ERROR_INFO *errInfo = tsk_error_get_info(); |
6896 | 1 | char *loc_errstr2 = errInfo->errstr2; |
6897 | | |
6898 | 1 | if (errInfo->t_errno == 0) |
6899 | 0 | errInfo->t_errno = TSK_ERR_AUX_GENERIC; |
6900 | 1 | if (errstr != NULL) { |
6901 | 1 | size_t sl = strlen(loc_errstr2); |
6902 | 1 | vsnprintf(loc_errstr2 + sl, TSK_ERROR_STRING_MAX_LENGTH - sl, |
6903 | 1 | errstr, args); |
6904 | 1 | } |
6905 | 1 | } |
6906 | 1 | va_end(args); |
6907 | 1 | } |