Coverage Report

Created: 2024-02-11 06:27

/src/sleuthkit/tsk/fs/hfs.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
** The Sleuth Kit
3
**
4
** This software is subject to the IBM Public License ver. 1.0,
5
** which was displayed prior to download and is included in the readme.txt
6
** file accompanying the Sleuth Kit files.  It may also be requested from:
7
** Crucial Security Inc.
8
** 14900 Conference Center Drive
9
** Chantilly, VA 20151
10
**
11
12
** Copyright (c) 2009 Brian Carrier.  All rights reserved.
13
**
14
** Judson Powers [jpowers@atc-nycorp.com]
15
** Matt Stillerman [matt@atc-nycorp.com]
16
** Rob Joyce [rob@atc-nycorp.com]
17
** Copyright (c) 2008, 2012 ATC-NY.  All rights reserved.
18
** This file contains data developed with support from the National
19
** Institute of Justice, Office of Justice Programs, U.S. Department of Justice.
20
**
21
** Wyatt Banks [wbanks@crucialsecurity.com]
22
** Copyright (c) 2005 Crucial Security Inc.  All rights reserved.
23
**
24
** Brian Carrier [carrier@sleuthkit.org]
25
** Copyright (c) 2003-2005 Brian Carrier.  All rights reserved
26
**
27
** Copyright (c) 1997,1998,1999, International Business Machines
28
** Corporation and others. All Rights Reserved.
29
*/
30
31
/* TCT
32
 * LICENSE
33
 *      This software is distributed under the IBM Public License.
34
 * AUTHOR(S)
35
 *      Wietse Venema
36
 *      IBM T.J. Watson Research
37
 *      P.O. Box 704
38
 *      Yorktown Heights, NY 10598, USA
39
 --*/
40
41
/*
42
** You may distribute the Sleuth Kit, or other software that incorporates
43
** part of all of the Sleuth Kit, in object code form under a license agreement,
44
** provided that:
45
** a) you comply with the terms and conditions of the IBM Public License
46
**    ver 1.0; and
47
** b) the license agreement
48
**     i) effectively disclaims on behalf of all Contributors all warranties
49
**        and conditions, express and implied, including warranties or
50
**        conditions of title and non-infringement, and implied warranties
51
**        or conditions of merchantability and fitness for a particular
52
**        purpose.
53
**    ii) effectively excludes on behalf of all Contributors liability for
54
**        damages, including direct, indirect, special, incidental and
55
**        consequential damages such as lost profits.
56
**   iii) states that any provisions which differ from IBM Public License
57
**        ver. 1.0 are offered by that Contributor alone and not by any
58
**        other party; and
59
**    iv) states that the source code for the program is available from you,
60
**        and informs licensees how to obtain it in a reasonable manner on or
61
**        through a medium customarily used for software exchange.
62
**
63
** When the Sleuth Kit or other software that incorporates part or all of
64
** the Sleuth Kit is made available in source code form:
65
**     a) it must be made available under IBM Public License ver. 1.0; and
66
**     b) a copy of the IBM Public License ver. 1.0 must be included with
67
**        each copy of the program.
68
*/
69
70
/** \file hfs.c
71
 * Contains the general internal TSK HFS metadata and data unit code
72
 */
73
74
#include "tsk_fs_i.h"
75
#include "tsk_hfs.h"
76
#include "decmpfs.h"
77
78
#include <stdarg.h>
79
#ifdef TSK_WIN32
80
#include <string.h>
81
#else
82
#include <strings.h>
83
#endif
84
85
0
#define XSWAP(a,b) { a ^= b; b ^= a; a ^= b; }
86
87
// Compression Stuff
88
89
#ifdef HAVE_LIBZ
90
#include <zlib.h>
91
#endif
92
93
#include "lzvn.h"
94
95
// Forward declarations:
96
static uint8_t hfs_load_attrs(TSK_FS_FILE * fs_file);
97
static uint8_t hfs_load_extended_attrs(TSK_FS_FILE * file,
98
    unsigned char *isCompressed, unsigned char *cmpType,
99
    uint64_t * uncSize);
100
void error_detected(uint32_t errnum, const char *errstr, ...);
101
void error_returned(const char *errstr, ...);
102
103
/* may set error up to string 1
104
 * returns 0 on success, 1 on failure */
105
uint8_t
106
hfs_checked_read_random(TSK_FS_INFO * fs, char *buf, size_t len,
107
    TSK_OFF_T offs)
108
2
{
109
2
    ssize_t r;
110
111
2
    r = tsk_fs_read(fs, offs, buf, len);
112
2
    if (r != (ssize_t) len) {
113
0
        if (r >= 0) {
114
0
            tsk_error_reset();
115
0
            tsk_error_set_errno(TSK_ERR_FS_READ);
116
0
        }
117
0
        return 1;
118
0
    }
119
2
    return 0;
120
2
}
121
122
/**********************************************************************
123
 *
124
 *  MISC FUNCS
125
 *
126
 **********************************************************************/
127
128
/* convert the HFS Time (seconds from 1/1/1904)
129
 * to UNIX (UTC seconds from 1/1/1970)
130
 * The number is borrowed from linux HFS driver source
131
 */
132
uint32_t
133
hfs_convert_2_unix_time(uint32_t hfsdate)
134
0
{
135
0
    if (hfsdate < NSEC_BTWN_1904_1970)
136
0
        return 0;
137
0
    return (uint32_t) (hfsdate - NSEC_BTWN_1904_1970);
138
0
}
139
140
141
/**
142
 * Convert a cnid (metadata address) to big endian array.
143
 * This is used to create the key for tree lookups.
144
 * @param cnid Metadata address to convert
145
 * @param array [out] Array to write data into.
146
 */
147
static void
148
cnid_to_array(uint32_t cnid, uint8_t array[4])
149
0
{
150
0
    array[3] = (cnid >> 0) & 0xff;
151
0
    array[2] = (cnid >> 8) & 0xff;
152
0
    array[1] = (cnid >> 16) & 0xff;
153
0
    array[0] = (cnid >> 24) & 0xff;
154
0
}
155
156
/**********************************************************************
157
 *
158
 * Lookup Functions
159
 *
160
 **********************************************************************/
161
162
163
164
/* Compares the given HFS+ Extents B-tree key to key constructed
165
 * for finding the beginning of the data fork extents for the given
166
 * CNID. (That is, the search key uses the given CNID and has
167
 * fork = 0 and start_block = 0.)
168
 */
169
static int
170
hfs_ext_compare_keys(HFS_INFO * hfs, uint32_t cnid,
171
    const hfs_btree_key_ext * key)
172
0
{
173
0
    TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info);
174
0
    uint32_t key_cnid;
175
176
0
    key_cnid = tsk_getu32(fs->endian, key->file_id);
177
0
    if (key_cnid < cnid)
178
0
        return -1;
179
0
    if (key_cnid > cnid)
180
0
        return 1;
181
182
    /* referring to the same cnids */
183
184
    /* we are always looking for the data fork */
185
0
    if (key->fork_type != HFS_EXT_KEY_TYPE_DATA)
186
0
        return 1;
187
188
    /* we are always looking for a start_block of zero
189
       (interested in the beginning of the extents, regardless
190
       of what the start_block is); all files except the bad
191
       blocks file should have a start_block greater than
192
       zero */
193
0
    if (tsk_getu32(fs->endian, key->start_block) == 0)
194
0
        return 0;
195
0
    return 1;
196
0
}
197
198
199
/** \internal
200
 * Returns the length of an HFS+ B-tree INDEX key based on the tree header
201
 * structure and the length claimed in the record.  With some trees,
202
 * the length given in the record is not used.
203
 * Note that this neither detects nor correctly handles 8-bit keys
204
 * (which should not be present in HFS+).
205
 *
206
 * This does not give the right answer for the Attributes File B-tree, for some
207
 * HFS+ file systems produced by the Apple OS, while it works for others.  For
208
 * the Attributes file, INDEX keys should always be as stated in the record itself,
209
 * never the "maxKeyLen" of the B-tree header.
210
 *
211
 * In this software, this function is only invoked when dealing with the Extents file.  In
212
 * that usage, it is not sufficiently well tested to know if it always gives the right
213
 * answer or not.  We can only test that with a highly fragmented disk.
214
 * @param hfs File System
215
 * @param keylen Length of key as given in record
216
 * @param header Tree header
217
 * @returns Length of key
218
 */
219
uint16_t
220
hfs_get_idxkeylen(HFS_INFO * hfs, uint16_t keylen,
221
    const hfs_btree_header_record * header)
222
0
{
223
0
    TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info);
224
225
    // if the flag is set, use the length given in the record
226
0
    if (tsk_getu32(fs->endian, header->attr) & HFS_BT_HEAD_ATTR_VARIDXKEYS)
227
0
        return keylen;
228
0
    else
229
0
        return tsk_getu16(fs->endian, header->maxKeyLen);
230
0
}
231
232
233
/**
234
 * Convert the extents runs to TSK_FS_ATTR_RUN runs.
235
 *
236
 * @param a_fs File system to analyze
237
 * @param a_extents Raw extents to process (in an array of 8)
238
 * @param a_start_off Starting block offset of these runs
239
 * @returns NULL on error or if no runs are in extents (test tsk_errno)
240
 */
241
static TSK_FS_ATTR_RUN *
242
hfs_extents_to_attr(TSK_FS_INFO * a_fs, const hfs_ext_desc * a_extents,
243
    TSK_OFF_T a_start_off)
244
2
{
245
2
    TSK_FS_ATTR_RUN *head_run = NULL;
246
2
    TSK_FS_ATTR_RUN *prev_run = NULL;
247
2
    int i;
248
2
    TSK_OFF_T cur_off = a_start_off;
249
250
    // since tsk_errno is checked as a return value, make sure it is clean.
251
2
    tsk_error_reset();
252
253
2
    if (tsk_verbose)
254
0
        tsk_fprintf(stderr,
255
0
            "hfs_extents_to_attr: Converting extents from offset %" PRIdOFF
256
0
            " to runlist\n", a_start_off);
257
258
3
    for (i = 0; i < 8; ++i) {
259
3
        TSK_FS_ATTR_RUN *cur_run;
260
261
3
        uint32_t addr = tsk_getu32(a_fs->endian, a_extents[i].start_blk);
262
3
        uint32_t len = tsk_getu32(a_fs->endian, a_extents[i].blk_cnt);
263
264
3
        if (tsk_verbose)
265
0
            tsk_fprintf(stderr,
266
0
                "hfs_extents_to_attr: run %i at addr %" PRIu32
267
0
                " with len %" PRIu32 "\n", i, addr, len);
268
269
3
        if ((addr == 0) && (len == 0)) {
270
2
            break;
271
2
        }
272
273
        // make a non-resident run
274
1
        if ((cur_run = tsk_fs_attr_run_alloc()) == NULL) {
275
0
            error_returned(" - hfs_extents_to_attr");
276
0
            return NULL;
277
0
        }
278
279
1
        cur_run->addr = addr;
280
1
        cur_run->len = len;
281
1
        cur_run->offset = cur_off;
282
283
1
        if (head_run == NULL)
284
1
            head_run = cur_run;
285
1
        if (prev_run != NULL)
286
0
            prev_run->next = cur_run;
287
1
        cur_off += cur_run->len;
288
1
        prev_run = cur_run;
289
1
    }
290
291
2
    return head_run;
292
2
}
293
294
295
/**
296
 * Look in the extents catalog for entries for a given file. Add the runs
297
 * to the passed attribute structure.
298
 *
299
 * @param hfs File system being analyzed
300
 * @param cnid file id of file to search for
301
 * @param a_attr Attribute to add extents runs to
302
 * @param dataForkQ  if true, then find extents for the data fork.  If false, then find extents for the Resource fork.
303
 * @returns 1 on error and 0 on success
304
 */
305
static uint8_t
306
hfs_ext_find_extent_record_attr(HFS_INFO * hfs, uint32_t cnid,
307
    TSK_FS_ATTR * a_attr, unsigned char dataForkQ)
308
1
{
309
1
    TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info);
310
1
    uint16_t nodesize;          /* size of nodes (all, regardless of the name) */
311
1
    uint32_t cur_node;          /* node id of the current node */
312
1
    char *node = NULL;
313
1
    uint8_t is_done;
314
1
    uint8_t desiredType;
315
316
1
    tsk_error_reset();
317
318
1
    if (tsk_verbose)
319
0
        tsk_fprintf(stderr,
320
0
            "hfs_ext_find_extent_record_attr: Looking for extents for file %"
321
0
            PRIu32 " %s\n", cnid,
322
0
            dataForkQ ? "data fork" : "resource fork");
323
324
1
    if (!hfs->has_extents_file) {
325
        // No extents file (which is optional), and so, no further extents are possible.
326
0
        return 0;
327
0
    }
328
329
    // Are we looking for extents of the data fork or the resource fork?
330
1
    desiredType =
331
1
        dataForkQ ? HFS_EXT_KEY_TYPE_DATA : HFS_EXT_KEY_TYPE_RSRC;
332
333
    // Load the extents attribute, if it has not been done so yet.
334
1
    if (hfs->extents_file == NULL) {
335
1
        ssize_t cnt;
336
337
1
        if ((hfs->extents_file =
338
1
                tsk_fs_file_open_meta(fs, NULL,
339
1
                    HFS_EXTENTS_FILE_ID)) == NULL) {
340
0
            return 1;
341
0
        }
342
343
        /* cache the data attribute */
344
1
        hfs->extents_attr =
345
1
            tsk_fs_attrlist_get(hfs->extents_file->meta->attr,
346
1
            TSK_FS_ATTR_TYPE_DEFAULT);
347
1
        if (!hfs->extents_attr) {
348
0
            tsk_error_errstr2_concat
349
0
                (" - Default Attribute not found in Extents File");
350
0
            return 1;
351
0
        }
352
353
        // cache the extents file header
354
1
        cnt = tsk_fs_attr_read(hfs->extents_attr, 14,
355
1
            (char *) &(hfs->extents_header),
356
1
            sizeof(hfs_btree_header_record), 0);
357
1
        if (cnt != sizeof(hfs_btree_header_record)) {
358
1
            if (cnt >= 0) {
359
0
                tsk_error_reset();
360
0
                tsk_error_set_errno(TSK_ERR_FS_READ);
361
0
            }
362
1
            tsk_error_set_errstr2
363
1
                ("hfs_ext_find_extent_record_attr: Error reading header");
364
1
            return 1;
365
1
        }
366
1
    }
367
368
    // allocate a node buffer
369
0
    nodesize = tsk_getu16(fs->endian, hfs->extents_header.nodesize);
370
0
    if ((node = (char *) tsk_malloc(nodesize)) == NULL) {
371
0
        return 1;
372
0
    }
373
374
    /* start at root node */
375
0
    cur_node = tsk_getu32(fs->endian, hfs->extents_header.rootNode);
376
377
    /* if the root node is zero, then the extents btree is empty */
378
    /* if no files have overflow extents, the Extents B-tree still
379
       exists on disk, but is an empty B-tree containing only
380
       the header node */
381
0
    if (cur_node == 0) {
382
0
        if (tsk_verbose)
383
0
            tsk_fprintf(stderr, "hfs_ext_find_extent_record: "
384
0
                "empty extents btree\n");
385
0
        free(node);
386
0
        return 0;
387
0
    }
388
389
0
    if (tsk_verbose)
390
0
        tsk_fprintf(stderr, "hfs_ext_find_extent_record: starting at "
391
0
            "root node %" PRIu32 "; nodesize = %"
392
0
            PRIu16 "\n", cur_node, nodesize);
393
394
    /* Recurse down to the needed leaf nodes and then go forward */
395
0
    is_done = 0;
396
0
    while (is_done == 0) {
397
0
        TSK_OFF_T cur_off;      /* start address of cur_node */
398
0
        uint16_t num_rec;       /* number of records in this node */
399
0
        ssize_t cnt;
400
0
        hfs_btree_node *node_desc;
401
402
        // sanity check
403
0
        if (cur_node > tsk_getu32(fs->endian,
404
0
                hfs->extents_header.totalNodes)) {
405
0
            tsk_error_set_errno(TSK_ERR_FS_GENFS);
406
0
            tsk_error_set_errstr
407
0
                ("hfs_ext_find_extent_record_attr: Node %d too large for file",
408
0
                cur_node);
409
0
            free(node);
410
0
            return 1;
411
0
        }
412
413
        // read the current node
414
0
        cur_off = (TSK_OFF_T)cur_node * nodesize;
415
0
        if (tsk_verbose)
416
0
            tsk_fprintf(stderr,
417
0
                "hfs_ext_find_extent_record: reading node %" PRIu32
418
0
                " at offset %" PRIdOFF "\n", cur_node, cur_off);
419
420
0
        cnt = tsk_fs_attr_read(hfs->extents_attr, cur_off,
421
0
            node, nodesize, 0);
422
0
        if (cnt != nodesize) {
423
0
            if (cnt >= 0) {
424
0
                tsk_error_reset();
425
0
                tsk_error_set_errno(TSK_ERR_FS_READ);
426
0
            }
427
0
            tsk_error_set_errstr2
428
0
                ("hfs_ext_find_extent_record_attr: Error reading node %d at offset %"
429
0
                PRIdOFF, cur_node, cur_off);
430
0
            free(node);
431
0
            return 1;
432
0
        }
433
434
        // process the header / descriptor
435
0
        if (nodesize < sizeof(hfs_btree_node)) {
436
0
            tsk_error_set_errno(TSK_ERR_FS_GENFS);
437
0
            tsk_error_set_errstr
438
0
                ("hfs_ext_find_extent_record_attr: Node size %d is too small to be valid", nodesize);
439
0
            free(node);
440
0
            return 1;
441
0
        }
442
0
        node_desc = (hfs_btree_node *) node;
443
0
        num_rec = tsk_getu16(fs->endian, node_desc->num_rec);
444
445
0
        if (num_rec == 0) {
446
0
            tsk_error_set_errno(TSK_ERR_FS_GENFS);
447
0
            tsk_error_set_errstr
448
0
                ("hfs_ext_find_extent_record: zero records in node %"
449
0
                PRIu32, cur_node);
450
0
            free(node);
451
0
            return 1;
452
0
        }
453
454
455
        /* With an index node, find the record with the largest key that is smaller
456
         * to or equal to cnid */
457
0
        if (node_desc->type == HFS_BT_NODE_TYPE_IDX) {
458
0
            uint32_t next_node = 0;
459
0
            int rec;
460
461
0
            if (tsk_verbose)
462
0
                tsk_fprintf(stderr,
463
0
                    "hfs_ext_find_extent_record: Index node %" PRIu32
464
0
                    " @ %" PRIu64 " has %" PRIu16 " records\n", cur_node,
465
0
                    cur_off, num_rec);
466
467
0
            for (rec = 0; rec < num_rec; ++rec) {
468
0
                int cmp;
469
0
                size_t rec_off;
470
0
                hfs_btree_key_ext *key;
471
472
                // Make sure node is large enough, note that (rec + 1) * 2 is an offset
473
                // relative to the end of node
474
0
                if ((rec + 1) * 2 > (int) nodesize) {
475
0
                    tsk_error_set_errno(TSK_ERR_FS_GENFS);
476
0
                    tsk_error_set_errstr
477
0
                        ("hfs_ext_find_extent_record: offset of record %d in leaf node %d too small (%"
478
0
                        PRIu16 ")", rec, cur_node, nodesize);
479
0
                    free(node);
480
0
                    return 1;
481
0
                }
482
                // get the record offset in the node
483
0
                rec_off =
484
0
                    tsk_getu16(fs->endian,
485
0
                    &node[nodesize - (rec + 1) * 2]);
486
0
                if (rec_off > nodesize - sizeof(hfs_btree_key_ext)) {
487
0
                    tsk_error_set_errno(TSK_ERR_FS_GENFS);
488
0
                    tsk_error_set_errstr
489
0
                        ("hfs_ext_find_extent_record_attr: offset of record %d in index node %d too large (%d vs %"
490
0
                        PRIu16 ")", rec, cur_node, (int) rec_off,
491
0
                        nodesize);
492
0
                    free(node);
493
0
                    return 1;
494
0
                }
495
0
                key = (hfs_btree_key_ext *) & node[rec_off];
496
497
0
                cmp = hfs_ext_compare_keys(hfs, cnid, key);
498
499
0
                if (tsk_verbose)
500
0
                    tsk_fprintf(stderr,
501
0
                        "hfs_ext_find_extent_record: record %" PRIu16
502
0
                        " ; keylen %" PRIu16 " (FileId: %" PRIu32
503
0
                        ", ForkType: %" PRIu8 ", StartBlk: %" PRIu32
504
0
                        "); compare: %d\n", rec, tsk_getu16(fs->endian,
505
0
                            key->key_len), tsk_getu32(fs->endian,
506
0
                            key->file_id), key->fork_type,
507
0
                        tsk_getu32(fs->endian, key->start_block), cmp);
508
509
                /* save the info from this record unless it is bigger than cnid */
510
0
                if ((cmp <= 0) || (next_node == 0)) {
511
0
                    hfs_btree_index_record *idx_rec;
512
0
                    int keylen =
513
0
                        2 + hfs_get_idxkeylen(hfs, tsk_getu16(fs->endian,
514
0
                            key->key_len), &(hfs->extents_header));
515
0
                    if ((nodesize < 4) || (keylen > nodesize - 4) || (rec_off >= nodesize - 4 - keylen)) {
516
0
                        tsk_error_set_errno(TSK_ERR_FS_GENFS);
517
0
                        tsk_error_set_errstr
518
0
                            ("hfs_ext_find_extent_record_attr: offset and keylenth of record %d in index node %d too large (%d vs %"
519
0
                            PRIu16 ")", rec, cur_node,
520
0
                            (int) rec_off + keylen, nodesize);
521
0
                        free(node);
522
0
                        return 1;
523
0
                    }
524
0
                    idx_rec =
525
0
                        (hfs_btree_index_record *) & node[rec_off +
526
0
                        keylen];
527
0
                    next_node = tsk_getu32(fs->endian, idx_rec->childNode);
528
0
                }
529
530
                // we are bigger than cnid, so move on to the next node
531
0
                if (cmp > 0) {
532
0
                    break;
533
0
                }
534
0
            }
535
536
            // check if we found a relevant node, if not stop.
537
0
            if (next_node == 0) {
538
0
                if (tsk_verbose)
539
0
                    tsk_fprintf(stderr,
540
0
                        "hfs_ext_find_extent_record_attr: did not find any keys for %d in index node %d",
541
0
                        cnid, cur_node);
542
0
                is_done = 1;
543
0
                break;
544
0
            }
545
0
            cur_node = next_node;
546
0
        }
547
548
        /* with a leaf, we process until we are past cnid.  We move right too if we can */
549
0
        else if (node_desc->type == HFS_BT_NODE_TYPE_LEAF) {
550
0
            int rec;
551
552
0
            if (tsk_verbose)
553
0
                tsk_fprintf(stderr,
554
0
                    "hfs_ext_find_extent_record: Leaf node %" PRIu32 " @ %"
555
0
                    PRIu64 " has %" PRIu16 " records\n", cur_node, cur_off,
556
0
                    num_rec);
557
558
0
            for (rec = 0; rec < num_rec; ++rec) {
559
0
                size_t rec_off;
560
0
                hfs_btree_key_ext *key;
561
0
                uint32_t rec_cnid;
562
0
                hfs_extents *extents;
563
0
                TSK_OFF_T ext_off = 0;
564
0
                int keylen;
565
0
                TSK_FS_ATTR_RUN *attr_run;
566
567
                // Make sure node is large enough, note that (rec + 1) * 2 is an offset
568
                // relative to the end of node
569
0
                if ((rec + 1) * 2 > (int) nodesize) {
570
0
                    tsk_error_set_errno(TSK_ERR_FS_GENFS);
571
0
                    tsk_error_set_errstr
572
0
                        ("hfs_ext_find_extent_record_attr: offset of record %d in leaf node %d too small (%"
573
0
                        PRIu16 ")", rec, cur_node, nodesize);
574
0
                    free(node);
575
0
                    return 1;
576
0
                }
577
                // get the record offset in the node
578
0
                rec_off =
579
0
                    tsk_getu16(fs->endian,
580
0
                    &node[nodesize - (rec + 1) * 2]);
581
582
0
                if (rec_off >= nodesize - sizeof(hfs_btree_key_ext)) {
583
0
                    tsk_error_set_errno(TSK_ERR_FS_GENFS);
584
0
                    tsk_error_set_errstr
585
0
                        ("hfs_ext_find_extent_record_attr: offset of record %d in leaf node %d too large (%d vs %"
586
0
                        PRIu16 ")", rec, cur_node, (int) rec_off,
587
0
                        nodesize);
588
0
                    free(node);
589
0
                    return 1;
590
0
                }
591
592
                // Check that the whole hfs_btree_key_ext structure is set
593
0
                if (sizeof(hfs_btree_key_ext) > nodesize - rec_off) {
594
0
                    tsk_error_set_errno(TSK_ERR_FS_GENFS);
595
0
                    tsk_error_set_errstr
596
0
                    ("hfs_ext_find_extent_record_attr: record %d in leaf node %d truncated (have %d vs %"
597
0
                        PRIu16 " bytes)", rec, cur_node, nodesize - (int)rec_off,
598
0
                        sizeof(hfs_btree_key_ext));
599
0
                    free(node);
600
0
                    return 1;
601
0
                }
602
603
0
                key = (hfs_btree_key_ext *) & node[rec_off];
604
605
0
                if (tsk_verbose)
606
0
                    tsk_fprintf(stderr,
607
0
                        "hfs_ext_find_extent_record: record %" PRIu16
608
0
                        "; keylen %" PRIu16 " (%" PRIu32
609
0
                        ", %" PRIu8 ", %" PRIu32 ")\n", rec,
610
0
                        tsk_getu16(fs->endian, key->key_len),
611
0
                        tsk_getu32(fs->endian, key->file_id),
612
0
                        key->fork_type, tsk_getu32(fs->endian,
613
0
                            key->start_block));
614
615
0
                rec_cnid = tsk_getu32(fs->endian, key->file_id);
616
617
                // see if this record is for our file
618
                // OLD logic, just handles the DATA fork
619
//                if (rec_cnid < cnid) {
620
//                    continue;
621
//                }
622
//                else if ((rec_cnid > cnid)
623
//                    || (key->fork_type != HFS_EXT_KEY_TYPE_DATA)) {
624
//                    is_done = 1;
625
//                    break;
626
//                }
627
628
                // NEW logic, handles both DATA and RSRC forks.
629
0
                if (rec_cnid < cnid) {
630
0
                    continue;
631
0
                }
632
0
                if (rec_cnid > cnid) {
633
0
                    is_done = 1;
634
0
                    break;
635
0
                }
636
637
638
0
                if (key->fork_type != desiredType) {
639
0
                    if (dataForkQ) {
640
0
                        is_done = 1;
641
0
                        break;
642
0
                    }
643
0
                    else
644
0
                        continue;
645
0
                }
646
647
                // OK, this is one of the extents records that we are seeking, so save it.
648
                // Make sure there is room for the hfs_extents struct
649
0
                keylen = 2 + tsk_getu16(fs->endian, key->key_len);
650
0
                if (rec_off + keylen + sizeof(hfs_extents) > nodesize) {
651
0
                    tsk_error_set_errno(TSK_ERR_FS_GENFS);
652
0
                    tsk_error_set_errstr
653
0
                        ("hfs_ext_find_extent_record_attr: offset and keylenth of record %d in leaf node %d too large (%d vs %"
654
0
                        PRIu16 ")", rec, cur_node, (int) rec_off + keylen,
655
0
                        nodesize);
656
0
                    free(node);
657
0
                    return 1;
658
0
                }
659
660
                // get the starting offset of this extent
661
0
                ext_off = tsk_getu32(fs->endian, key->start_block);
662
663
                // convert the extents to the TSK format
664
0
                extents = (hfs_extents *) & node[rec_off + keylen];
665
666
0
                attr_run =
667
0
                    hfs_extents_to_attr(fs, extents->extents, ext_off);
668
0
                if ((attr_run == NULL) && (tsk_error_get_errno() != 0)) {
669
0
                    tsk_error_errstr2_concat
670
0
                        (" - hfs_ext_find_extent_record_attr");
671
0
                    free(node);
672
0
                    return 1;
673
0
                }
674
675
0
                if (tsk_fs_attr_add_run(fs, a_attr, attr_run)) {
676
0
                    tsk_error_errstr2_concat
677
0
                        (" - hfs_ext_find_extent_record_attr");
678
0
                    free(node);
679
0
                    return 1;
680
0
                }
681
0
            }
682
0
            cur_node = tsk_getu32(fs->endian, node_desc->flink);
683
0
            if (cur_node == 0) {
684
0
                is_done = 1;
685
0
                break;
686
0
            }
687
0
        }
688
0
        else {
689
0
            tsk_error_set_errno(TSK_ERR_FS_GENFS);
690
0
            tsk_error_set_errstr("hfs_ext_find_extent_record: btree node %"
691
0
                PRIu32 " (%" PRIdOFF ") is neither index nor leaf (%" PRIu8
692
0
                ")", cur_node, cur_off, node_desc->type);
693
0
            free(node);
694
0
            return 1;
695
0
        }
696
0
    }
697
0
    free(node);
698
0
    return 0;
699
0
}
700
701
702
/** \internal
703
 * Compares two Catalog B-tree keys.
704
 * @param hfs File System being analyzed
705
 * @param key1 Key 1 to compare
706
 * @param key2 Key 2 to compare
707
 * @returns -1 if key1 is smaller, 0 if equal, and 1 if key1 is larger
708
 */
709
int
710
hfs_cat_compare_keys(HFS_INFO * hfs, const hfs_btree_key_cat * key1,
711
    int keylen1, const hfs_btree_key_cat * key2)
712
0
{
713
0
    TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info);
714
0
    uint32_t cnid1, cnid2;
715
716
0
    if (keylen1 < 6) {
717
        // Note that it would be better to return an error value here
718
        // but the current function interface does not support this
719
        // Also see issue #2365
720
0
        return -1;
721
0
    }
722
0
    cnid1 = tsk_getu32(fs->endian, key1->parent_cnid);
723
0
    cnid2 = tsk_getu32(fs->endian, key2->parent_cnid);
724
725
0
    if (cnid1 < cnid2)
726
0
        return -1;
727
0
    if (cnid1 > cnid2)
728
0
        return 1;
729
730
0
    return hfs_unicode_compare(hfs, &key1->name, keylen1 - 6, &key2->name);
731
0
}
732
733
734
/** \internal
735
 *
736
 * Traverse the HFS catalog file.  Call the callback for each
737
 * record.
738
 *
739
 * @param hfs File system
740
 * @param a_cb callback
741
 * @param ptr Pointer to pass to callback
742
 * @returns 1 on error
743
 */
744
uint8_t
745
hfs_cat_traverse(HFS_INFO * hfs,
746
    TSK_HFS_BTREE_CB a_cb, void *ptr)
747
0
{
748
0
    TSK_FS_INFO *fs = &(hfs->fs_info);
749
0
    uint32_t cur_node;          /* node id of the current node */
750
0
    char *node;
751
752
0
    uint16_t nodesize;
753
0
    uint8_t is_done = 0;
754
755
0
    tsk_error_reset();
756
757
0
    nodesize = tsk_getu16(fs->endian, hfs->catalog_header.nodesize);
758
0
    if ((node = (char *) tsk_malloc(nodesize)) == NULL)
759
0
        return 1;
760
761
    /* start at root node */
762
0
    cur_node = tsk_getu32(fs->endian, hfs->catalog_header.rootNode);
763
764
    /* if the root node is zero, then the extents btree is empty */
765
    /* if no files have overflow extents, the Extents B-tree still
766
       exists on disk, but is an empty B-tree containing only
767
       the header node */
768
0
    if (cur_node == 0) {
769
0
        if (tsk_verbose)
770
0
            tsk_fprintf(stderr, "hfs_cat_traverse: "
771
0
                "empty extents btree\n");
772
0
        free(node);
773
0
        return 1;
774
0
    }
775
776
0
    if (tsk_verbose)
777
0
        tsk_fprintf(stderr, "hfs_cat_traverse: starting at "
778
0
            "root node %" PRIu32 "; nodesize = %"
779
0
            PRIu16 "\n", cur_node, nodesize);
780
781
    /* Recurse down to the needed leaf nodes and then go forward */
782
0
    is_done = 0;
783
0
    while (is_done == 0) {
784
0
        TSK_OFF_T cur_off;      /* start address of cur_node */
785
0
        uint16_t num_rec;       /* number of records in this node */
786
0
        ssize_t cnt;
787
0
        hfs_btree_node *node_desc;
788
789
        // sanity check
790
0
        if (cur_node > tsk_getu32(fs->endian,
791
0
                hfs->catalog_header.totalNodes)) {
792
0
            tsk_error_set_errno(TSK_ERR_FS_GENFS);
793
0
            tsk_error_set_errstr
794
0
                ("hfs_cat_traverse: Node %d too large for file", cur_node);
795
0
            free(node);
796
0
            return 1;
797
0
        }
798
799
        // read the current node
800
0
        cur_off = (TSK_OFF_T)cur_node * nodesize;
801
0
        cnt = tsk_fs_attr_read(hfs->catalog_attr, cur_off,
802
0
            node, nodesize, 0);
803
0
        if (cnt != nodesize) {
804
0
            if (cnt >= 0) {
805
0
                tsk_error_reset();
806
0
                tsk_error_set_errno(TSK_ERR_FS_READ);
807
0
            }
808
0
            tsk_error_set_errstr2
809
0
                ("hfs_cat_traverse: Error reading node %d at offset %"
810
0
                PRIdOFF, cur_node, cur_off);
811
0
            free(node);
812
0
            return 1;
813
0
        }
814
815
        // process the header / descriptor
816
0
        if (nodesize < sizeof(hfs_btree_node)) {
817
0
            tsk_error_set_errno(TSK_ERR_FS_GENFS);
818
0
            tsk_error_set_errstr
819
0
            ("hfs_cat_traverse: Node size %d is too small to be valid", nodesize);
820
0
            free(node);
821
0
            return 1;
822
0
        }
823
0
        node_desc = (hfs_btree_node *) node;
824
0
        num_rec = tsk_getu16(fs->endian, node_desc->num_rec);
825
826
0
        if (tsk_verbose)
827
0
            tsk_fprintf(stderr, "hfs_cat_traverse: node %" PRIu32
828
0
                " @ %" PRIu64 " has %" PRIu16 " records\n",
829
0
                cur_node, cur_off, num_rec);
830
831
0
        if (num_rec == 0) {
832
0
            tsk_error_set_errno(TSK_ERR_FS_GENFS);
833
0
            tsk_error_set_errstr("hfs_cat_traverse: zero records in node %"
834
0
                PRIu32, cur_node);
835
0
            free(node);
836
0
            return 1;
837
0
        }
838
839
        /* With an index node, find the record with the largest key that is smaller
840
         * to or equal to cnid */
841
0
        if (node_desc->type == HFS_BT_NODE_TYPE_IDX) {
842
0
            uint32_t next_node = 0;
843
0
            int rec;
844
845
0
            for (rec = 0; rec < num_rec; ++rec) {
846
0
                size_t rec_off;
847
0
                hfs_btree_key_cat *key;
848
0
                uint8_t retval;
849
0
                int keylen;
850
851
                // Make sure node is large enough, note that (rec + 1) * 2 is an offset
852
                // relative to the end of node
853
0
                if ((rec + 1) * 2 > (int) nodesize) {
854
0
                    tsk_error_set_errno(TSK_ERR_FS_GENFS);
855
0
                    tsk_error_set_errstr
856
0
                        ("hfs_cat_traverse: offset of record %d in leaf node %d too small (%"
857
0
                        PRIu16 ")", rec, cur_node, nodesize);
858
0
                    free(node);
859
0
                    return 1;
860
0
                }
861
                // get the record offset in the node
862
0
                rec_off =
863
0
                    tsk_getu16(fs->endian,
864
0
                    &node[nodesize - (rec + 1) * 2]);
865
866
                // Need at least 2 bytes for key_len
867
0
                if (rec_off >= nodesize - 2) {
868
0
                    tsk_error_set_errno(TSK_ERR_FS_GENFS);
869
0
                    tsk_error_set_errstr
870
0
                        ("hfs_cat_traverse: offset of record %d in index node %d too large (%d vs %"
871
0
                        PRIu16 ")", rec, cur_node, (int) rec_off,
872
0
                        nodesize);
873
0
                    free(node);
874
0
                    return 1;
875
0
                }
876
877
0
                key = (hfs_btree_key_cat *) & node[rec_off];
878
0
                keylen = 2 + tsk_getu16(hfs->fs_info.endian, key->key_len);
879
880
                // Want a key of at least 6 bytes, the size of the first 2 members of hfs_btree_key_cat
881
0
                if ((keylen < 6) || (keylen > nodesize - rec_off)) {
882
0
                    tsk_error_set_errno(TSK_ERR_FS_GENFS);
883
0
                    tsk_error_set_errstr
884
0
                        ("hfs_cat_traverse: length of key %d in index node %d out of bounds (6 < %d < %"
885
0
                        PRIu16 ")", rec, cur_node, keylen, (nodesize - rec_off));
886
0
                    free(node);
887
0
                    return 1;
888
0
                }
889
890
                /*
891
                   if (tsk_verbose)
892
                   tsk_fprintf(stderr,
893
                   "hfs_cat_traverse: record %" PRIu16
894
                   " ; keylen %" PRIu16 " (%" PRIu32 ")\n", rec,
895
                   tsk_getu16(fs->endian, key->key_len),
896
                   tsk_getu32(fs->endian, key->parent_cnid));
897
                 */
898
899
900
                /* save the info from this record unless it is too big */
901
0
                retval =
902
0
                    a_cb(hfs, HFS_BT_NODE_TYPE_IDX, key, keylen, nodesize,
903
0
                    cur_off + rec_off, ptr);
904
0
                if (retval == HFS_BTREE_CB_ERR) {
905
0
                    tsk_error_set_errno(TSK_ERR_FS_GENFS);
906
0
                    tsk_error_set_errstr2
907
0
                        ("hfs_cat_traverse: Callback returned error");
908
0
                    free(node);
909
0
                    return 1;
910
0
                }
911
                // record the closest entry
912
0
                else if ((retval == HFS_BTREE_CB_IDX_LT)
913
0
                    || (next_node == 0)) {
914
0
                    hfs_btree_index_record *idx_rec;
915
0
                    int keylen =
916
0
                        2 + hfs_get_idxkeylen(hfs, tsk_getu16(fs->endian,
917
0
                            key->key_len), &(hfs->catalog_header));
918
0
                    if (keylen > nodesize - rec_off) {
919
0
                        tsk_error_set_errno(TSK_ERR_FS_GENFS);
920
0
                        tsk_error_set_errstr
921
0
                            ("hfs_cat_traverse: offset of record and keylength %d in index node %d too large (%d vs %"
922
0
                            PRIu16 ")", rec, cur_node,
923
0
                            (int) rec_off + keylen, nodesize);
924
0
                        free(node);
925
0
                        return 1;
926
0
                    }
927
0
                    if (sizeof(hfs_btree_index_record) > nodesize - rec_off - keylen) {
928
0
                        tsk_error_set_errno(TSK_ERR_FS_GENFS);
929
0
                        tsk_error_set_errstr("hfs_cat_traverse: truncated btree index record");
930
0
                        free(node);
931
0
                        return 1;
932
0
                    }
933
0
                    idx_rec =
934
0
                        (hfs_btree_index_record *) & node[rec_off +
935
0
                        keylen];
936
0
                    next_node = tsk_getu32(fs->endian, idx_rec->childNode);
937
0
                }
938
0
                if (retval == HFS_BTREE_CB_IDX_EQGT) {
939
                    // move down to the next node
940
0
                    break;
941
0
                }
942
0
            }
943
            // check if we found a relevant node
944
0
            if (next_node == 0) {
945
0
                tsk_error_set_errno(TSK_ERR_FS_GENFS);
946
0
                tsk_error_set_errstr
947
0
                    ("hfs_cat_traverse: did not find any keys in index node %d",
948
0
                    cur_node);
949
0
                is_done = 1;
950
0
                break;
951
0
            }
952
            // TODO: Handle multinode loops
953
0
            if (next_node == cur_node) {
954
0
                tsk_error_set_errno(TSK_ERR_FS_GENFS);
955
0
                tsk_error_set_errstr
956
0
                    ("hfs_cat_traverse: node %d references itself as next node",
957
0
                    cur_node);
958
0
                is_done = 1;
959
0
                break;
960
0
            }
961
0
            cur_node = next_node;
962
0
        }
963
964
        /* With a leaf, we look for the specific record. */
965
0
        else if (node_desc->type == HFS_BT_NODE_TYPE_LEAF) {
966
0
            int rec;
967
968
0
            for (rec = 0; rec < num_rec; ++rec) {
969
0
                size_t rec_off;
970
0
                hfs_btree_key_cat *key;
971
0
                uint8_t retval;
972
0
                int keylen;
973
974
                // Make sure node is large enough, note that (rec + 1) * 2 is an offset
975
                // relative to the end of node
976
0
                if ((rec + 1) * 2 > (int) nodesize) {
977
0
                    tsk_error_set_errno(TSK_ERR_FS_GENFS);
978
0
                    tsk_error_set_errstr
979
0
                        ("hfs_cat_traverse: offset of record %d in leaf node %d too small (%"
980
0
                        PRIu16 ")", rec, cur_node, nodesize);
981
0
                    free(node);
982
0
                    return 1;
983
0
                }
984
                // get the record offset in the node
985
0
                rec_off =
986
0
                    tsk_getu16(fs->endian,
987
0
                    &node[nodesize - (rec + 1) * 2]);
988
989
                // Need at least 2 bytes for key_len
990
0
                if (rec_off >= nodesize - 2) {
991
0
                    tsk_error_set_errno(TSK_ERR_FS_GENFS);
992
0
                    tsk_error_set_errstr
993
0
                        ("hfs_cat_traverse: offset of record %d in leaf node %d too large (%d vs %"
994
0
                        PRIu16 ")", rec, cur_node, (int) rec_off,
995
0
                        nodesize);
996
0
                    free(node);
997
0
                    return 1;
998
0
                }
999
1000
0
                key = (hfs_btree_key_cat *) & node[rec_off];
1001
0
                keylen = 2 + tsk_getu16(hfs->fs_info.endian, key->key_len);
1002
1003
                // Want a key of at least 6 bytes, the size of the first 2 members of hfs_btree_key_cat
1004
0
                if ((keylen < 6) || (keylen > nodesize - rec_off)) {
1005
0
                    tsk_error_set_errno(TSK_ERR_FS_GENFS);
1006
0
                    tsk_error_set_errstr
1007
0
                        ("hfs_cat_traverse: length of key %d in leaf node %d out of bounds (6 < %d < %"
1008
0
                        PRIu16 ")", rec, cur_node, keylen, nodesize);
1009
0
                    free(node);
1010
0
                    return 1;
1011
0
                }
1012
1013
                /*
1014
                   if (tsk_verbose)
1015
                   tsk_fprintf(stderr,
1016
                   "hfs_cat_traverse: record %" PRIu16
1017
                   "; keylen %" PRIu16 " (%" PRIu32 ")\n", rec,
1018
                   tsk_getu16(fs->endian, key->key_len),
1019
                   tsk_getu32(fs->endian, key->parent_cnid));
1020
                 */
1021
                //                rec_cnid = tsk_getu32(fs->endian, key->file_id);
1022
1023
                // The nodesize passed to the callback should contain the available node
1024
                // data size relative from the start of the key.
1025
0
                retval =
1026
0
                    a_cb(hfs, HFS_BT_NODE_TYPE_LEAF, key, keylen, nodesize - rec_off,
1027
0
                    cur_off + rec_off, ptr);
1028
0
                if (retval == HFS_BTREE_CB_LEAF_STOP) {
1029
0
                    is_done = 1;
1030
0
                    break;
1031
0
                }
1032
0
                else if (retval == HFS_BTREE_CB_ERR) {
1033
0
                    tsk_error_set_errno(TSK_ERR_FS_GENFS);
1034
0
                    tsk_error_set_errstr2
1035
0
                        ("hfs_cat_traverse: Callback returned error");
1036
0
                    free(node);
1037
0
                    return 1;
1038
0
                }
1039
0
            }
1040
1041
            // move right to the next node if we got this far
1042
0
            if (is_done == 0) {
1043
0
                cur_node = tsk_getu32(fs->endian, node_desc->flink);
1044
0
                if (cur_node == 0) {
1045
0
                    is_done = 1;
1046
0
                }
1047
0
                if (tsk_verbose)
1048
0
                    tsk_fprintf(stderr,
1049
0
                        "hfs_cat_traverse: moving forward to next leaf");
1050
0
            }
1051
0
        }
1052
0
        else {
1053
0
            tsk_error_set_errno(TSK_ERR_FS_GENFS);
1054
0
            tsk_error_set_errstr("hfs_cat_traverse: btree node %" PRIu32
1055
0
                " (%" PRIu64 ") is neither index nor leaf (%" PRIu8 ")",
1056
0
                cur_node, cur_off, node_desc->type);
1057
0
            free(node);
1058
0
            return 1;
1059
0
        }
1060
0
    }
1061
0
    free(node);
1062
0
    return 0;
1063
0
}
1064
1065
typedef struct {
1066
    const hfs_btree_key_cat *targ_key;
1067
    TSK_OFF_T off;
1068
} HFS_CAT_GET_RECORD_OFFSET_DATA;
1069
1070
static uint8_t
1071
hfs_cat_get_record_offset_cb(HFS_INFO * hfs, int8_t level_type,
1072
    const hfs_btree_key_cat * cur_key, int cur_keylen, size_t node_size,
1073
    TSK_OFF_T key_off, void *ptr)
1074
0
{
1075
0
    HFS_CAT_GET_RECORD_OFFSET_DATA *offset_data = (HFS_CAT_GET_RECORD_OFFSET_DATA *)ptr;
1076
0
    const hfs_btree_key_cat *targ_key = offset_data->targ_key;
1077
1078
0
    if (tsk_verbose)
1079
0
        tsk_fprintf(stderr,
1080
0
            "hfs_cat_get_record_offset_cb: %s node want: %" PRIu32
1081
0
            " vs have: %" PRIu32 "\n",
1082
0
            (level_type == HFS_BT_NODE_TYPE_IDX) ? "Index" : "Leaf",
1083
0
            tsk_getu32(hfs->fs_info.endian, targ_key->parent_cnid),
1084
0
            tsk_getu32(hfs->fs_info.endian, cur_key->parent_cnid));
1085
1086
0
    if (level_type == HFS_BT_NODE_TYPE_IDX) {
1087
0
        int diff = hfs_cat_compare_keys(hfs, cur_key, cur_keylen, targ_key);
1088
0
        if (diff < 0)
1089
0
            return HFS_BTREE_CB_IDX_LT;
1090
0
        else
1091
0
            return HFS_BTREE_CB_IDX_EQGT;
1092
0
    }
1093
0
    else {
1094
0
        int diff = hfs_cat_compare_keys(hfs, cur_key, cur_keylen, targ_key);
1095
1096
        // see if this record is for our file or if we passed the interesting entries
1097
0
        if (diff < 0) {
1098
0
            return HFS_BTREE_CB_LEAF_GO;
1099
0
        }
1100
0
        else if (diff == 0) {
1101
0
            offset_data->off =
1102
0
                key_off + 2 + tsk_getu16(hfs->fs_info.endian,
1103
0
                cur_key->key_len);
1104
0
        }
1105
0
        return HFS_BTREE_CB_LEAF_STOP;
1106
0
    }
1107
0
}
1108
1109
1110
/** \internal
1111
 * Find the byte offset (from the start of the catalog file) to a record
1112
 * in the catalog file.
1113
 * @param hfs File System being analyzed
1114
 * @param needle Key to search for
1115
 * @returns Byte offset or 0 on error. 0 is also returned if catalog
1116
 * record was not found. Check tsk_errno to determine if error occurred.
1117
 */
1118
static TSK_OFF_T
1119
hfs_cat_get_record_offset(HFS_INFO * hfs, const hfs_btree_key_cat * needle)
1120
0
{
1121
0
    HFS_CAT_GET_RECORD_OFFSET_DATA offset_data;
1122
0
    offset_data.off = 0;
1123
0
    offset_data.targ_key = needle;
1124
0
    if (hfs_cat_traverse(hfs, hfs_cat_get_record_offset_cb, &offset_data)) {
1125
0
        return 0;
1126
0
    }
1127
0
    return offset_data.off;
1128
0
}
1129
1130
1131
/** \internal
1132
 * Given a byte offset to a leaf record in teh catalog file, read the data as
1133
 * a thread record. This will zero the buffer and read in the size of the thread
1134
 * data.
1135
 * @param hfs File System
1136
 * @param off Byte offset of record in catalog file (not including key)
1137
 * @param thread [out] Buffer to write thread data into.
1138
 * @returns 0 on success, 1 on failure; sets up to error string 1 */
1139
uint8_t
1140
hfs_cat_read_thread_record(HFS_INFO * hfs, TSK_OFF_T off,
1141
    hfs_thread * thread)
1142
0
{
1143
0
    TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info);
1144
0
    uint16_t uni_len;
1145
0
    ssize_t cnt;
1146
1147
0
    memset(thread, 0, sizeof(hfs_thread));
1148
0
    cnt = tsk_fs_attr_read(hfs->catalog_attr, off, (char *) thread, 10, 0);
1149
0
    if (cnt != 10) {
1150
0
        if (cnt >= 0) {
1151
0
            tsk_error_reset();
1152
0
            tsk_error_set_errno(TSK_ERR_FS_READ);
1153
0
        }
1154
0
        tsk_error_set_errstr2
1155
0
            ("hfs_cat_read_thread_record: Error reading catalog offset %"
1156
0
            PRIdOFF " (header)", off);
1157
0
        return 1;
1158
0
    }
1159
1160
0
    if ((tsk_getu16(fs->endian, thread->rec_type) != HFS_FOLDER_THREAD)
1161
0
        && (tsk_getu16(fs->endian, thread->rec_type) != HFS_FILE_THREAD)) {
1162
0
        tsk_error_set_errno(TSK_ERR_FS_GENFS);
1163
0
        tsk_error_set_errstr
1164
0
            ("hfs_cat_read_thread_record: unexpected record type %" PRIu16,
1165
0
            tsk_getu16(fs->endian, thread->rec_type));
1166
0
        return 1;
1167
0
    }
1168
1169
0
    uni_len = tsk_getu16(fs->endian, thread->name.length);
1170
1171
0
    if (uni_len > 255) {
1172
0
        tsk_error_set_errno(TSK_ERR_FS_INODE_COR);
1173
0
        tsk_error_set_errstr
1174
0
            ("hfs_cat_read_thread_record: invalid string length (%" PRIu16
1175
0
            ")", uni_len);
1176
0
        return 1;
1177
0
    }
1178
1179
0
    cnt =
1180
0
        tsk_fs_attr_read(hfs->catalog_attr, off + 10,
1181
0
        (char *) thread->name.unicode, uni_len * 2, 0);
1182
0
    if (cnt != uni_len * 2) {
1183
0
        if (cnt >= 0) {
1184
0
            tsk_error_reset();
1185
0
            tsk_error_set_errno(TSK_ERR_FS_READ);
1186
0
        }
1187
0
        tsk_error_set_errstr2
1188
0
            ("hfs_cat_read_thread_record: Error reading catalog offset %"
1189
0
            PRIdOFF " (name)", off + 10);
1190
0
        return 1;
1191
0
    }
1192
1193
0
    return 0;
1194
0
}
1195
1196
/** \internal
1197
 * Read a catalog record into a local data structure.  This reads the
1198
 * correct amount, depending on if it is a file or folder.
1199
 * @param hfs File system being analyzed
1200
 * @param off Byte offset (in catalog file) of record (not including key)
1201
 * @param record [out] Structure to read data into
1202
 * @returns 1 on error
1203
 */
1204
uint8_t
1205
hfs_cat_read_file_folder_record(HFS_INFO * hfs, TSK_OFF_T off,
1206
    hfs_file_folder * record)
1207
0
{
1208
0
    TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info);
1209
0
    ssize_t cnt;
1210
0
    char rec_type[2];
1211
1212
0
    memset(record, 0, sizeof(hfs_file_folder));
1213
1214
0
    cnt = tsk_fs_attr_read(hfs->catalog_attr, off, rec_type, 2, 0);
1215
0
    if (cnt != 2) {
1216
0
        if (cnt >= 0) {
1217
0
            tsk_error_reset();
1218
0
            tsk_error_set_errno(TSK_ERR_FS_READ);
1219
0
        }
1220
0
        tsk_error_set_errstr2
1221
0
            ("hfs_cat_read_file_folder_record: Error reading record type from catalog offset %"
1222
0
            PRIdOFF " (header)", off);
1223
0
        return 1;
1224
0
    }
1225
1226
0
    if (tsk_getu16(fs->endian, rec_type) == HFS_FOLDER_RECORD) {
1227
0
        cnt =
1228
0
            tsk_fs_attr_read(hfs->catalog_attr, off, (char *) record,
1229
0
            sizeof(hfs_folder), 0);
1230
0
        if (cnt != sizeof(hfs_folder)) {
1231
0
            if (cnt >= 0) {
1232
0
                tsk_error_reset();
1233
0
                tsk_error_set_errno(TSK_ERR_FS_READ);
1234
0
            }
1235
0
            tsk_error_set_errstr2
1236
0
                ("hfs_cat_read_file_folder_record: Error reading catalog offset %"
1237
0
                PRIdOFF " (folder)", off);
1238
0
            return 1;
1239
0
        }
1240
0
    }
1241
0
    else if (tsk_getu16(fs->endian, rec_type) == HFS_FILE_RECORD) {
1242
0
        cnt =
1243
0
            tsk_fs_attr_read(hfs->catalog_attr, off, (char *) record,
1244
0
            sizeof(hfs_file), 0);
1245
0
        if (cnt != sizeof(hfs_file)) {
1246
0
            if (cnt >= 0) {
1247
0
                tsk_error_reset();
1248
0
                tsk_error_set_errno(TSK_ERR_FS_READ);
1249
0
            }
1250
0
            tsk_error_set_errstr2
1251
0
                ("hfs_cat_read_file_folder_record: Error reading catalog offset %"
1252
0
                PRIdOFF " (file)", off);
1253
0
            return 1;
1254
0
        }
1255
0
    }
1256
0
    else {
1257
0
        tsk_error_set_errno(TSK_ERR_FS_GENFS);
1258
0
        tsk_error_set_errstr
1259
0
            ("hfs_cat_read_file_folder_record: unexpected record type %"
1260
0
            PRIu16, tsk_getu16(fs->endian, rec_type));
1261
0
        return 1;
1262
0
    }
1263
1264
0
    return 0;
1265
0
}
1266
1267
// hfs_lookup_hard_link appears to be unnecessary - it looks up the cnid
1268
// by seeing if there's a file/dir with the standard hard link name plus
1269
// linknum and returns the meta_addr. But this should always be the same as linknum,
1270
// and is very slow when there are many hard links, so it shouldn't be used.
1271
//static TSK_INUM_T
1272
//hfs_lookup_hard_link(HFS_INFO * hfs, TSK_INUM_T linknum,
1273
//    unsigned char is_directory)
1274
//{
1275
//    char fBuff[30];
1276
//    TSK_FS_DIR *mdir;
1277
//    size_t indx;
1278
//    TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs;
1279
//
1280
//    memset(fBuff, 0, 30);
1281
//
1282
//    if (is_directory) {
1283
//
1284
//        tsk_take_lock(&(hfs->metadata_dir_cache_lock));
1285
//        if (hfs->dir_meta_dir == NULL) {
1286
//            hfs->dir_meta_dir =
1287
//                tsk_fs_dir_open_meta(fs, hfs->meta_dir_inum);
1288
//        }
1289
//        tsk_release_lock(&(hfs->metadata_dir_cache_lock));
1290
//
1291
//        if (hfs->dir_meta_dir == NULL) {
1292
//            error_returned
1293
//                ("hfs_lookup_hard_link: could not open the dir metadata directory");
1294
//            return 0;
1295
//        }
1296
//        else {
1297
//            mdir = hfs->dir_meta_dir;
1298
//        }
1299
//        snprintf(fBuff, 30, "dir_%" PRIuINUM, linknum);
1300
//
1301
//    }
1302
//    else {
1303
//
1304
//        tsk_take_lock(&(hfs->metadata_dir_cache_lock));
1305
//        if (hfs->meta_dir == NULL) {
1306
//            hfs->meta_dir = tsk_fs_dir_open_meta(fs, hfs->meta_inum);
1307
//        }
1308
//        tsk_release_lock(&(hfs->metadata_dir_cache_lock));
1309
//
1310
//        if (hfs->meta_dir == NULL) {
1311
//            error_returned
1312
//                ("hfs_lookup_hard_link: could not open file metadata directory");
1313
//            return 0;
1314
//        }
1315
//        else {
1316
//            mdir = hfs->meta_dir;
1317
//        }
1318
//        snprintf(fBuff, 30, "iNode%" PRIuINUM, linknum);
1319
//    }
1320
//
1321
//    for (indx = 0; indx < tsk_fs_dir_getsize(mdir); ++indx) {
1322
//        if ((mdir->names != NULL) && mdir->names[indx].name &&
1323
//            (fs->name_cmp(fs, mdir->names[indx].name, fBuff) == 0)) {
1324
//            // OK this is the one
1325
//            return mdir->names[indx].meta_addr;
1326
//        }
1327
//    }
1328
//
1329
//    // OK, we did not find that linknum
1330
//    return 0;
1331
//}
1332
1333
/*
1334
 * Given a catalog entry, will test that entry to see if it is a hard link.
1335
 * If it is a hard link, the function returns the inum (or cnid) of the target file.
1336
 * If it is NOT a hard link, then then function returns the inum of the given entry.
1337
 * In both cases, the parameter is_error is set to zero.
1338
 *
1339
 * If an ERROR occurs, if it is a mild error, then is_error is set to 1, and the
1340
 * inum of the given entry is returned.  This signals that hard link detection cannot
1341
 * be carried out.
1342
 *
1343
 * If the error is serious, then is_error is set to 2 or 3, depending on the kind of error, and
1344
 * the TSK error code is set, and the function returns zero.  is_error==2 means that an error
1345
 * occurred in looking up the target file in the Catalog.  is_error==3 means that the given
1346
 * entry appears to be a hard link, but the target file does not exist in the Catalog.
1347
 *
1348
 * @param hfs The file system
1349
 * @param entry The catalog entry to check
1350
 * @param is_error A Boolean that is returned indicating an error, or no error.\
1351
 * @return The inum (or cnid) of the hard link target, or of the given catalog entry, or zero.
1352
 */
1353
TSK_INUM_T
1354
hfs_follow_hard_link(HFS_INFO * hfs, hfs_file * cat,
1355
    unsigned char *is_error)
1356
0
{
1357
1358
0
    TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs;
1359
0
    TSK_INUM_T cnid;
1360
0
    time_t crtime;
1361
0
    uint32_t file_type;
1362
0
    uint32_t file_creator;
1363
1364
0
    *is_error = 0;              // default, not an error
1365
1366
0
    if (cat == NULL) {
1367
0
        error_detected(TSK_ERR_FS_ARG,
1368
0
            "hfs_follow_hard_link: Pointer to Catalog entry (2nd arg) is null");
1369
0
        return 0;
1370
0
    }
1371
1372
0
    cnid = tsk_getu32(fs->endian, cat->std.cnid);
1373
1374
0
    if (cnid < HFS_FIRST_USER_CNID) {
1375
        // Can't be a hard link.  And, cannot look up in Catalog file either!
1376
0
        return cnid;
1377
0
    }
1378
1379
0
    crtime =
1380
0
        (time_t) hfs_convert_2_unix_time(tsk_getu32(fs->endian,
1381
0
            cat->std.crtime));
1382
1383
1384
0
    file_type = tsk_getu32(fs->endian, cat->std.u_info.file_type);
1385
0
    file_creator = tsk_getu32(fs->endian, cat->std.u_info.file_cr);
1386
1387
    // Only proceed with the rest of this if the flags etc are right
1388
0
    if (file_type == HFS_HARDLINK_FILE_TYPE
1389
0
        && file_creator == HFS_HARDLINK_FILE_CREATOR) {
1390
1391
        // see if we have the HFS+ Private Data dir for file links;
1392
        // if not, it can't be a hard link.  (We could warn the user, but
1393
        // we also rely on this when finding the HFS+ Private Data dir in
1394
        // the first place and we don't want a warning on every hfs_open.)
1395
0
        if (hfs->meta_inum == 0)
1396
0
            return cnid;
1397
1398
        // For this to work, we need the FS creation times.  Is at least one of these set?
1399
0
        if ((!hfs->has_root_crtime) && (!hfs->has_meta_dir_crtime)
1400
0
            && (!hfs->has_meta_crtime)) {
1401
0
            uint32_t linkNum =
1402
0
                tsk_getu32(fs->endian, cat->std.perm.special.inum);
1403
0
            *is_error = 1;
1404
0
            if (tsk_verbose)
1405
0
                tsk_fprintf(stderr,
1406
0
                    "WARNING: hfs_follow_hard_link: File system creation times are not set. "
1407
0
                    "Cannot test inode for hard link. File type and creator indicate that this"
1408
0
                    " is a hard link (file), with LINK ID = %" PRIu32 "\n",
1409
0
                    linkNum);
1410
0
            return cnid;
1411
0
        }
1412
1413
0
        if ((!hfs->has_root_crtime) || (!hfs->has_meta_crtime)) {
1414
0
            if (tsk_verbose)
1415
0
                tsk_fprintf(stderr,
1416
0
                    "WARNING: hfs_follow_hard_link: Either the root folder or the"
1417
0
                    " file metadata folder is not accessible.  Testing this potential hard link"
1418
0
                    " may be impaired.\n");
1419
0
        }
1420
1421
        // Now we need to check the creation time against the three FS creation times
1422
0
        if ((hfs->has_meta_crtime && (crtime == hfs->meta_crtime)) ||
1423
0
            (hfs->has_meta_dir_crtime && (crtime == hfs->metadir_crtime))
1424
0
            || (hfs->has_root_crtime && (crtime == hfs->root_crtime))) {
1425
            // OK, this is a hard link to a file.
1426
0
            uint32_t linkNum =
1427
0
                tsk_getu32(fs->endian, cat->std.perm.special.inum);
1428
1429
            // We used to resolve this ID to a file in X folder using hfs_lookup_hard_link, but found 
1430
            // that it was very ineffecient and always resulted in the same linkNum value. 
1431
            // We now just use linkNum
1432
0
            return linkNum;
1433
0
        }
1434
0
    }
1435
0
    else if (file_type == HFS_LINKDIR_FILE_TYPE
1436
0
        && file_creator == HFS_LINKDIR_FILE_CREATOR) {
1437
1438
        // see if we have the HFS+ Private Directory Data dir for links;
1439
        // if not, it can't be a hard link.  (We could warn the user, but
1440
        // we also rely on this when finding the HFS+ Private Directory Data dir in
1441
        // the first place and we don't want a warning on every hfs_open.)
1442
0
        if (hfs->meta_dir_inum == 0)
1443
0
            return cnid;
1444
1445
        // For this to work, we need the FS creation times.  Is at least one of these set?
1446
0
        if ((!hfs->has_root_crtime) && (!hfs->has_meta_dir_crtime)
1447
0
            && (!hfs->has_meta_crtime)) {
1448
0
            uint32_t linkNum =
1449
0
                tsk_getu32(fs->endian, cat->std.perm.special.inum);
1450
0
            *is_error = 1;
1451
1452
0
            if (tsk_verbose)
1453
0
                tsk_fprintf(stderr,
1454
0
                    "WARNING: hfs_follow_hard_link: File system creation times are not set. "
1455
0
                    "Cannot test inode for hard link. File type and creator indicate that this"
1456
0
                    " is a hard link (directory), with LINK ID = %" PRIu32
1457
0
                    "\n", linkNum);
1458
0
            return cnid;
1459
0
        }
1460
1461
0
        if ((!hfs->has_root_crtime) || (!hfs->has_meta_crtime)
1462
0
            || (!hfs->has_meta_dir_crtime)) {
1463
0
            if (tsk_verbose)
1464
0
                tsk_fprintf(stderr,
1465
0
                    "WARNING: hfs_follow_hard_link: Either the root folder or the"
1466
0
                    " file metadata folder or the directory metatdata folder is"
1467
0
                    " not accessible.  Testing this potential hard linked folder "
1468
0
                    "may be impaired.\n");
1469
0
        }
1470
1471
        // Now we need to check the creation time against the three FS creation times
1472
0
        if ((hfs->has_meta_crtime && (crtime == hfs->meta_crtime)) ||
1473
0
            (hfs->has_meta_dir_crtime && (crtime == hfs->metadir_crtime))
1474
0
            || (hfs->has_root_crtime && (crtime == hfs->root_crtime))) {
1475
            // OK, this is a hard link to a directory.
1476
0
            uint32_t linkNum =
1477
0
                tsk_getu32(fs->endian, cat->std.perm.special.inum);
1478
1479
            // We used to resolve this ID to a file in X folder using hfs_lookup_hard_link, but found 
1480
            // that it was very ineffecient and always resulted in the same linkNum value. 
1481
            // We now just use linkNum
1482
0
            return linkNum;
1483
0
        }
1484
0
    }
1485
1486
    // It cannot be a hard link (file or directory)
1487
0
    return cnid;
1488
0
}
1489
1490
1491
/** \internal
1492
 * Lookup an entry in the catalog file and save it into the entry.  Do not
1493
 * call this for the special files that do not have an entry in the catalog.
1494
 * data structure.
1495
 * @param hfs File system being analyzed
1496
 * @param inum Address (cnid) of file to open
1497
 * @param entry [out] Structure to read data into
1498
 * @returns 1 on error or not found, 0 on success. Check tsk_errno
1499
 * to differentiate between error and not found.  If it is not found, then the
1500
 * errno will be TSK_ERR_FS_INODE_NUM.  Else, it will be some other value.
1501
 */
1502
uint8_t
1503
hfs_cat_file_lookup(HFS_INFO * hfs, TSK_INUM_T inum, HFS_ENTRY * entry,
1504
    unsigned char follow_hard_link)
1505
0
{
1506
0
    TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info);
1507
0
    hfs_btree_key_cat key;      /* current catalog key */
1508
0
    hfs_thread thread;          /* thread record */
1509
0
    hfs_file_folder record;     /* file/folder record */
1510
0
    TSK_OFF_T off;
1511
1512
0
    tsk_error_reset();
1513
1514
0
    if (tsk_verbose)
1515
0
        tsk_fprintf(stderr,
1516
0
            "hfs_cat_file_lookup: called for inum %" PRIuINUM "\n", inum);
1517
1518
    // Test if this is a special file that is not located in the catalog
1519
0
    if ((inum == HFS_EXTENTS_FILE_ID) ||
1520
0
        (inum == HFS_CATALOG_FILE_ID) ||
1521
0
        (inum == HFS_ALLOCATION_FILE_ID) ||
1522
0
        (inum == HFS_STARTUP_FILE_ID) ||
1523
0
        (inum == HFS_ATTRIBUTES_FILE_ID)) {
1524
0
        tsk_error_set_errno(TSK_ERR_FS_GENFS);
1525
0
        tsk_error_set_errstr
1526
0
            ("hfs_cat_file_lookup: Called on special file: %" PRIuINUM,
1527
0
            inum);
1528
0
        return 1;
1529
0
    }
1530
1531
1532
    /* first look up the thread record for the item we're searching for */
1533
1534
    /* set up the thread record key */
1535
0
    memset((char *) &key, 0, sizeof(hfs_btree_key_cat));
1536
0
    cnid_to_array((uint32_t) inum, key.parent_cnid);
1537
1538
0
    if (tsk_verbose)
1539
0
        tsk_fprintf(stderr,
1540
0
            "hfs_cat_file_lookup: Looking up thread record (%" PRIuINUM
1541
0
            ")\n", inum);
1542
1543
    /* look up the thread record */
1544
0
    off = hfs_cat_get_record_offset(hfs, &key);
1545
0
    if (off == 0) {
1546
        // no parsing error, just not found
1547
0
        if (tsk_error_get_errno() == 0) {
1548
0
            tsk_error_set_errno(TSK_ERR_FS_INODE_NUM);
1549
0
            tsk_error_set_errstr
1550
0
                ("hfs_cat_file_lookup: Error finding thread node for file (%"
1551
0
                PRIuINUM ")", inum);
1552
0
        }
1553
0
        else {
1554
0
            tsk_error_set_errstr2
1555
0
                (" hfs_cat_file_lookup: thread for file (%" PRIuINUM ")",
1556
0
                inum);
1557
0
        }
1558
0
        return 1;
1559
0
    }
1560
1561
    /* read the thread record */
1562
0
    if (hfs_cat_read_thread_record(hfs, off, &thread)) {
1563
0
        tsk_error_set_errstr2(" hfs_cat_file_lookup: file (%" PRIuINUM ")",
1564
0
            inum);
1565
0
        return 1;
1566
0
    }
1567
1568
    /* now look up the actual file/folder record */
1569
1570
    /* build key */
1571
0
    memset((char *) &key, 0, sizeof(hfs_btree_key_cat));
1572
0
    memcpy((char *) key.parent_cnid, (char *) thread.parent_cnid,
1573
0
        sizeof(key.parent_cnid));
1574
0
    memcpy((char *) &key.name, (char *) &thread.name, sizeof(key.name));
1575
1576
0
    if (tsk_verbose)
1577
0
        tsk_fprintf(stderr,
1578
0
            "hfs_cat_file_lookup: Looking up file record (parent: %"
1579
0
            PRIuINUM ")\n", (uint64_t) tsk_getu32(fs->endian,
1580
0
                key.parent_cnid));
1581
1582
    /* look up the record */
1583
0
    off = hfs_cat_get_record_offset(hfs, &key);
1584
0
    if (off == 0) {
1585
        // no parsing error, just not found
1586
0
        if (tsk_error_get_errno() == 0) {
1587
0
            tsk_error_set_errno(TSK_ERR_FS_INODE_NUM);
1588
0
            tsk_error_set_errstr
1589
0
                ("hfs_cat_file_lookup: Error finding record node %"
1590
0
                PRIuINUM, inum);
1591
0
        }
1592
0
        else {
1593
0
            tsk_error_set_errstr2(" hfs_cat_file_lookup: file (%" PRIuINUM
1594
0
                ")", inum);
1595
0
        }
1596
0
        return 1;
1597
0
    }
1598
1599
    /* read the record */
1600
0
    if (hfs_cat_read_file_folder_record(hfs, off, &record)) {
1601
0
        tsk_error_set_errstr2(" hfs_cat_file_lookup: file (%" PRIuINUM ")",
1602
0
            inum);
1603
0
        return 1;
1604
0
    }
1605
1606
    /* these memcpy can be gotten rid of, really */
1607
0
    if (tsk_getu16(fs->endian,
1608
0
            record.file.std.rec_type) == HFS_FOLDER_RECORD) {
1609
0
        if (tsk_verbose)
1610
0
            tsk_fprintf(stderr,
1611
0
                "hfs_cat_file_lookup: found folder record valence %" PRIu32
1612
0
                ", cnid %" PRIu32 "\n", tsk_getu32(fs->endian,
1613
0
                    record.folder.std.valence), tsk_getu32(fs->endian,
1614
0
                    record.folder.std.cnid));
1615
0
        memcpy((char *) &entry->cat, (char *) &record, sizeof(hfs_folder));
1616
0
    }
1617
0
    else if (tsk_getu16(fs->endian,
1618
0
            record.file.std.rec_type) == HFS_FILE_RECORD) {
1619
0
        if (tsk_verbose)
1620
0
            tsk_fprintf(stderr,
1621
0
                "hfs_cat_file_lookup: found file record cnid %" PRIu32
1622
0
                "\n", tsk_getu32(fs->endian, record.file.std.cnid));
1623
0
        memcpy((char *) &entry->cat, (char *) &record, sizeof(hfs_file));
1624
0
    }
1625
    /* other cases already caught by hfs_cat_read_file_folder_record */
1626
1627
0
    memcpy((char *) &entry->thread, (char *) &thread, sizeof(hfs_thread));
1628
1629
0
    entry->flags = TSK_FS_META_FLAG_ALLOC | TSK_FS_META_FLAG_USED;
1630
0
    entry->inum = inum;
1631
1632
0
    if (follow_hard_link) {
1633
        // TEST to see if this is a hard link
1634
0
        unsigned char is_err;
1635
0
        TSK_INUM_T target_cnid =
1636
0
            hfs_follow_hard_link(hfs, &(entry->cat), &is_err);
1637
0
        if (is_err > 1) {
1638
0
            error_returned
1639
0
                ("hfs_cat_file_lookup: error occurred while following a possible hard link for "
1640
0
                "inum (cnid) =  %" PRIuINUM, inum);
1641
0
            return 1;
1642
0
        }
1643
0
        if (target_cnid != inum) {
1644
            // This is a hard link, and we have got the cnid of the target file, so look it up.
1645
0
            uint8_t res =
1646
0
                hfs_cat_file_lookup(hfs, target_cnid, entry, FALSE);
1647
0
            if (res != 0) {
1648
0
                error_returned
1649
0
                    ("hfs_cat_file_lookup: error occurred while looking up the Catalog entry for "
1650
0
                    "the target of inum (cnid) = %" PRIuINUM " target",
1651
0
                    inum);
1652
0
            }
1653
0
            return 1;
1654
0
        }
1655
1656
        // Target is NOT a hard link, so fall through to the non-hard link exit.
1657
0
    }
1658
1659
0
    if (tsk_verbose)
1660
0
        tsk_fprintf(stderr, "hfs_cat_file_lookup exiting\n");
1661
0
    return 0;
1662
0
}
1663
1664
1665
static uint8_t
1666
hfs_find_highest_inum_cb(HFS_INFO * hfs, int8_t level_type,
1667
    const hfs_btree_key_cat * cur_key, int cur_keylen, size_t node_size,
1668
    TSK_OFF_T key_off, void *ptr)
1669
0
{
1670
0
    if (cur_keylen < 6) {
1671
        // Note that it would be better to return an error value here
1672
        // but the current function interface does not support this
1673
        // Also see issue #2365
1674
0
        return -1;
1675
0
    }
1676
    // NOTE: This assumes that the biggest inum is the last one that we
1677
    // see.  the traverse method does not currently promise that as part of
1678
    // its callback "contract".
1679
0
    *((TSK_INUM_T*) ptr) = tsk_getu32(hfs->fs_info.endian, cur_key->parent_cnid);
1680
0
    return HFS_BTREE_CB_IDX_LT;
1681
0
}
1682
1683
/** \internal
1684
* Returns the largest inode number in file system
1685
* @param hfs File system being analyzed
1686
* @returns largest metadata address
1687
*/
1688
static TSK_INUM_T
1689
hfs_find_highest_inum(HFS_INFO * hfs)
1690
0
{
1691
    // @@@ get actual number from Catalog file (go to far right) (we can't always trust the vol header)
1692
0
    TSK_INUM_T inum;
1693
0
    if (hfs_cat_traverse(hfs, hfs_find_highest_inum_cb, &inum)) {
1694
      /* Catalog traversal failed, fallback on legacy method :
1695
         if HFS_VH_ATTR_CNIDS_REUSED is set, then
1696
         the maximum CNID is 2^32-1; if it's not set, then nextCatalogId is
1697
         supposed to be larger than all CNIDs on disk.
1698
       */
1699
0
        TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info);
1700
0
        if (tsk_getu32(fs->endian, hfs->fs->attr) & HFS_VH_ATTR_CNIDS_REUSED)
1701
0
            return (TSK_INUM_T) 0xffffffff;
1702
0
        else
1703
0
            return (TSK_INUM_T) tsk_getu32(fs->endian,
1704
0
                hfs->fs->next_cat_id) - 1;
1705
0
    }
1706
0
    return inum;
1707
0
}
1708
1709
1710
static TSK_FS_META_MODE_ENUM
1711
hfs_mode_to_tsk_mode(uint16_t a_mode)
1712
0
{
1713
0
    TSK_FS_META_MODE_ENUM mode = 0;
1714
1715
0
    if (a_mode & HFS_IN_ISUID)
1716
0
        mode |= TSK_FS_META_MODE_ISUID;
1717
0
    if (a_mode & HFS_IN_ISGID)
1718
0
        mode |= TSK_FS_META_MODE_ISGID;
1719
0
    if (a_mode & HFS_IN_ISVTX)
1720
0
        mode |= TSK_FS_META_MODE_ISVTX;
1721
1722
0
    if (a_mode & HFS_IN_IRUSR)
1723
0
        mode |= TSK_FS_META_MODE_IRUSR;
1724
0
    if (a_mode & HFS_IN_IWUSR)
1725
0
        mode |= TSK_FS_META_MODE_IWUSR;
1726
0
    if (a_mode & HFS_IN_IXUSR)
1727
0
        mode |= TSK_FS_META_MODE_IXUSR;
1728
1729
0
    if (a_mode & HFS_IN_IRGRP)
1730
0
        mode |= TSK_FS_META_MODE_IRGRP;
1731
0
    if (a_mode & HFS_IN_IWGRP)
1732
0
        mode |= TSK_FS_META_MODE_IWGRP;
1733
0
    if (a_mode & HFS_IN_IXGRP)
1734
0
        mode |= TSK_FS_META_MODE_IXGRP;
1735
1736
0
    if (a_mode & HFS_IN_IROTH)
1737
0
        mode |= TSK_FS_META_MODE_IROTH;
1738
0
    if (a_mode & HFS_IN_IWOTH)
1739
0
        mode |= TSK_FS_META_MODE_IWOTH;
1740
0
    if (a_mode & HFS_IN_IXOTH)
1741
0
        mode |= TSK_FS_META_MODE_IXOTH;
1742
1743
0
    return mode;
1744
0
}
1745
1746
static TSK_FS_META_TYPE_ENUM
1747
hfs_mode_to_tsk_meta_type(uint16_t a_mode)
1748
0
{
1749
0
    switch (a_mode & HFS_IN_IFMT) {
1750
0
    case HFS_IN_IFIFO:
1751
0
        return TSK_FS_META_TYPE_FIFO;
1752
0
    case HFS_IN_IFCHR:
1753
0
        return TSK_FS_META_TYPE_CHR;
1754
0
    case HFS_IN_IFDIR:
1755
0
        return TSK_FS_META_TYPE_DIR;
1756
0
    case HFS_IN_IFBLK:
1757
0
        return TSK_FS_META_TYPE_BLK;
1758
0
    case HFS_IN_IFREG:
1759
0
        return TSK_FS_META_TYPE_REG;
1760
0
    case HFS_IN_IFLNK:
1761
0
        return TSK_FS_META_TYPE_LNK;
1762
0
    case HFS_IN_IFSOCK:
1763
0
        return TSK_FS_META_TYPE_SOCK;
1764
0
    case HFS_IFWHT:
1765
0
        return TSK_FS_META_TYPE_WHT;
1766
0
    case HFS_IFXATTR:
1767
0
        return TSK_FS_META_TYPE_UNDEF;
1768
0
    default:
1769
        /* error */
1770
0
        return TSK_FS_META_TYPE_UNDEF;
1771
0
    }
1772
0
}
1773
1774
1775
static uint8_t
1776
hfs_make_specialbase(TSK_FS_FILE * fs_file)
1777
2
{
1778
2
    fs_file->meta->type = TSK_FS_META_TYPE_REG;
1779
2
    fs_file->meta->mode = 0;
1780
2
    fs_file->meta->nlink = 1;
1781
2
    fs_file->meta->flags =
1782
2
        (TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_ALLOC);
1783
2
    fs_file->meta->uid = fs_file->meta->gid = 0;
1784
2
    fs_file->meta->mtime = fs_file->meta->atime = fs_file->meta->ctime =
1785
2
        fs_file->meta->crtime = 0;
1786
2
    fs_file->meta->mtime_nano = fs_file->meta->atime_nano =
1787
2
        fs_file->meta->ctime_nano = fs_file->meta->crtime_nano = 0;
1788
1789
2
    if (fs_file->meta->name2 == NULL) {
1790
2
        if ((fs_file->meta->name2 = (TSK_FS_META_NAME_LIST *)
1791
2
                tsk_malloc(sizeof(TSK_FS_META_NAME_LIST))) == NULL) {
1792
0
            error_returned
1793
0
                (" - hfs_make_specialbase, couldn't malloc space for a name list");
1794
0
            return 1;
1795
0
        }
1796
2
        fs_file->meta->name2->next = NULL;
1797
2
    }
1798
1799
2
    if (fs_file->meta->attr != NULL) {
1800
0
        tsk_fs_attrlist_markunused(fs_file->meta->attr);
1801
0
    }
1802
2
    else {
1803
2
        fs_file->meta->attr = tsk_fs_attrlist_alloc();
1804
2
    }
1805
2
    return 0;
1806
2
}
1807
1808
/**
1809
 * \internal
1810
 * Create an FS_INODE structure for the catalog file.
1811
 *
1812
 * @param hfs File system to analyze
1813
 * @param fs_file Structure to copy file information into.
1814
 * @return 1 on error and 0 on success
1815
 */
1816
static uint8_t
1817
hfs_make_catalog(HFS_INFO * hfs, TSK_FS_FILE * fs_file)
1818
1
{
1819
1
    TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs;
1820
1
    TSK_FS_ATTR *fs_attr;
1821
1
    TSK_FS_ATTR_RUN *attr_run;
1822
1
    unsigned char dummy1, dummy2;
1823
1
    uint64_t dummy3;
1824
1
    uint8_t result;
1825
1826
1
    if (tsk_verbose)
1827
0
        tsk_fprintf(stderr,
1828
0
            "hfs_make_catalog: Making virtual catalog file\n");
1829
1830
1
    if (hfs_make_specialbase(fs_file)) {
1831
0
        error_returned(" - hfs_make_catalog");
1832
0
        return 1;
1833
0
    }
1834
1835
1
    fs_file->meta->addr = HFS_CATALOG_FILE_ID;
1836
1
    strncpy(fs_file->meta->name2->name, HFS_CATALOGNAME,
1837
1
        TSK_FS_META_NAME_LIST_NSIZE);
1838
1839
1
    fs_file->meta->size =
1840
1
        tsk_getu64(fs->endian, hfs->fs->cat_file.logic_sz);
1841
1842
1843
    // convert the  runs in the volume header to attribute runs
1844
1
    if (((attr_run =
1845
1
                hfs_extents_to_attr(fs, hfs->fs->cat_file.extents,
1846
1
                    0)) == NULL) && (tsk_error_get_errno() != 0)) {
1847
0
        error_returned(" - hfs_make_catalog");
1848
0
        return 1;
1849
0
    }
1850
1851
1
    if ((fs_attr =
1852
1
            tsk_fs_attrlist_getnew(fs_file->meta->attr,
1853
1
                TSK_FS_ATTR_NONRES)) == NULL) {
1854
0
        error_returned(" - hfs_make_catalog");
1855
0
        tsk_fs_attr_run_free(attr_run);
1856
0
        return 1;
1857
0
    }
1858
1859
    // initialize the data run
1860
1
    if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL,
1861
1
            TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA,
1862
1
            tsk_getu64(fs->endian, hfs->fs->cat_file.logic_sz),
1863
1
            tsk_getu64(fs->endian, hfs->fs->cat_file.logic_sz),
1864
1
            tsk_getu64(fs->endian, hfs->fs->cat_file.logic_sz), 0, 0)) {
1865
0
        error_returned(" - hfs_make_catalog");
1866
0
        tsk_fs_attr_run_free(attr_run);
1867
0
        return 1;
1868
0
    }
1869
1870
    // see if catalog file has additional runs
1871
1
    if (hfs_ext_find_extent_record_attr(hfs, HFS_CATALOG_FILE_ID, fs_attr,
1872
1
            TRUE)) {
1873
1
        error_returned(" - hfs_make_catalog");
1874
1
        fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR;
1875
1
        return 1;
1876
1
    }
1877
1878
0
    result = hfs_load_extended_attrs(fs_file, &dummy1, &dummy2, &dummy3);
1879
0
    if (result != 0) {
1880
0
        if (tsk_verbose)
1881
0
            tsk_fprintf(stderr,
1882
0
                "WARNING: Extended attributes failed to load for the Catalog file.\n");
1883
0
        tsk_error_reset();
1884
0
    }
1885
1886
0
    fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED;
1887
0
    return 0;
1888
1
}
1889
1890
/**
1891
* \internal
1892
 * Create an FS_FILE for the extents file
1893
 *
1894
 * @param hfs File system to analyze
1895
 * @param fs_file Structure to copy file information into.
1896
 * @return 1 on error and 0 on success
1897
 */
1898
static uint8_t
1899
hfs_make_extents(HFS_INFO * hfs, TSK_FS_FILE * fs_file)
1900
1
{
1901
1
    TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs;
1902
1
    TSK_FS_ATTR *fs_attr;
1903
1
    TSK_FS_ATTR_RUN *attr_run;
1904
1905
1
    if (tsk_verbose)
1906
0
        tsk_fprintf(stderr,
1907
0
            "hfs_make_extents: Making virtual extents file\n");
1908
1909
1
    if (hfs_make_specialbase(fs_file)) {
1910
0
        error_returned(" - hfs_make_extents");
1911
0
        return 1;
1912
0
    }
1913
1914
1
    fs_file->meta->addr = HFS_EXTENTS_FILE_ID;
1915
1
    strncpy(fs_file->meta->name2->name, HFS_EXTENTSNAME,
1916
1
        TSK_FS_META_NAME_LIST_NSIZE);
1917
1918
1
    fs_file->meta->size =
1919
1
        tsk_getu64(fs->endian, hfs->fs->ext_file.logic_sz);
1920
1921
1922
1
    if (((attr_run =
1923
1
                hfs_extents_to_attr(fs, hfs->fs->ext_file.extents,
1924
1
                    0)) == NULL) && (tsk_error_get_errno() != 0)) {
1925
0
        error_returned(" - hfs_make_extents");
1926
0
        return 1;
1927
0
    }
1928
1929
1
    if ((fs_attr =
1930
1
            tsk_fs_attrlist_getnew(fs_file->meta->attr,
1931
1
                TSK_FS_ATTR_NONRES)) == NULL) {
1932
0
        error_returned(" - hfs_make_extents");
1933
0
        tsk_fs_attr_run_free(attr_run);
1934
0
        return 1;
1935
0
    }
1936
1937
    // initialize the data run
1938
1
    if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL,
1939
1
            TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA,
1940
1
            tsk_getu64(fs->endian, hfs->fs->ext_file.logic_sz),
1941
1
            tsk_getu64(fs->endian, hfs->fs->ext_file.logic_sz),
1942
1
            tsk_getu64(fs->endian, hfs->fs->ext_file.logic_sz), 0, 0)) {
1943
0
        error_returned(" - hfs_make_extents");
1944
0
        tsk_fs_attr_run_free(attr_run);
1945
0
        return 1;
1946
0
    }
1947
1948
    //hfs_load_extended_attrs(fs_file);
1949
1950
    // Extents doesn't have an entry in itself
1951
1952
1
    fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED;
1953
1
    return 0;
1954
1
}
1955
1956
1957
/**
1958
 * \internal
1959
 * Create an FS_INODE structure for the blockmap / allocation file.
1960
 *
1961
 * @param hfs File system to analyze
1962
 * @param fs_file Structure to copy file information into.
1963
 * @return 1 on error and 0 on success
1964
 */
1965
static uint8_t
1966
hfs_make_blockmap(HFS_INFO * hfs, TSK_FS_FILE * fs_file)
1967
0
{
1968
0
    TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs;
1969
0
    TSK_FS_ATTR *fs_attr;
1970
0
    TSK_FS_ATTR_RUN *attr_run;
1971
0
    unsigned char dummy1, dummy2;
1972
0
    uint64_t dummy3;
1973
0
    uint8_t result;
1974
1975
0
    if (tsk_verbose)
1976
0
        tsk_fprintf(stderr,
1977
0
            "hfs_make_blockmap: Making virtual blockmap file\n");
1978
1979
0
    if (hfs_make_specialbase(fs_file)) {
1980
0
        error_returned(" - hfs_make_blockmap");
1981
0
        return 1;
1982
0
    }
1983
1984
0
    fs_file->meta->addr = HFS_ALLOCATION_FILE_ID;
1985
0
    strncpy(fs_file->meta->name2->name, HFS_ALLOCATIONNAME,
1986
0
        TSK_FS_META_NAME_LIST_NSIZE);
1987
1988
0
    fs_file->meta->size =
1989
0
        tsk_getu64(fs->endian, hfs->fs->alloc_file.logic_sz);
1990
1991
0
    if (((attr_run =
1992
0
                hfs_extents_to_attr(fs, hfs->fs->alloc_file.extents,
1993
0
                    0)) == NULL) && (tsk_error_get_errno() != 0)) {
1994
0
        error_returned(" - hfs_make_blockmap");
1995
0
        return 1;
1996
0
    }
1997
1998
0
    if ((fs_attr =
1999
0
            tsk_fs_attrlist_getnew(fs_file->meta->attr,
2000
0
                TSK_FS_ATTR_NONRES)) == NULL) {
2001
0
        error_returned(" - hfs_make_blockmap");
2002
0
        tsk_fs_attr_run_free(attr_run);
2003
0
        return 1;
2004
0
    }
2005
2006
    // initialize the data run
2007
0
    if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL,
2008
0
            TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA,
2009
0
            tsk_getu64(fs->endian, hfs->fs->alloc_file.logic_sz),
2010
0
            tsk_getu64(fs->endian, hfs->fs->alloc_file.logic_sz),
2011
0
            tsk_getu64(fs->endian, hfs->fs->alloc_file.logic_sz), 0, 0)) {
2012
0
        error_returned(" - hfs_make_blockmap");
2013
0
        tsk_fs_attr_run_free(attr_run);
2014
0
        return 1;
2015
0
    }
2016
2017
    // see if catalog file has additional runs
2018
0
    if (hfs_ext_find_extent_record_attr(hfs, HFS_ALLOCATION_FILE_ID,
2019
0
            fs_attr, TRUE)) {
2020
0
        error_returned(" - hfs_make_blockmap");
2021
0
        fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR;
2022
0
        return 1;
2023
0
    }
2024
2025
2026
0
    result = hfs_load_extended_attrs(fs_file, &dummy1, &dummy2, &dummy3);
2027
0
    if (result != 0) {
2028
0
        if (tsk_verbose)
2029
0
            tsk_fprintf(stderr,
2030
0
                "WARNING: Extended attributes failed to load for the Allocation file.\n");
2031
0
        tsk_error_reset();
2032
0
    }
2033
2034
0
    fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED;
2035
0
    return 0;
2036
0
}
2037
2038
/**
2039
* \internal
2040
 * Create an FS_INODE structure for the startup / boot file.
2041
 *
2042
 * @param hfs File system to analyze
2043
 * @param fs_file Structure to copy file information into.
2044
 * @return 1 on error and 0 on success
2045
 */
2046
static uint8_t
2047
hfs_make_startfile(HFS_INFO * hfs, TSK_FS_FILE * fs_file)
2048
0
{
2049
0
    TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs;
2050
0
    TSK_FS_ATTR *fs_attr;
2051
0
    TSK_FS_ATTR_RUN *attr_run;
2052
0
    unsigned char dummy1, dummy2;
2053
0
    uint64_t dummy3;
2054
0
    uint8_t result;
2055
2056
0
    if (tsk_verbose)
2057
0
        tsk_fprintf(stderr,
2058
0
            "hfs_make_startfile: Making virtual startup file\n");
2059
2060
0
    if (hfs_make_specialbase(fs_file)) {
2061
0
        error_returned(" - hfs_make_startfile");
2062
0
        return 1;
2063
0
    }
2064
2065
0
    fs_file->meta->addr = HFS_STARTUP_FILE_ID;
2066
0
    strncpy(fs_file->meta->name2->name, HFS_STARTUPNAME,
2067
0
        TSK_FS_META_NAME_LIST_NSIZE);
2068
2069
0
    fs_file->meta->size =
2070
0
        tsk_getu64(fs->endian, hfs->fs->start_file.logic_sz);
2071
2072
0
    if (((attr_run =
2073
0
                hfs_extents_to_attr(fs, hfs->fs->start_file.extents,
2074
0
                    0)) == NULL) && (tsk_error_get_errno() != 0)) {
2075
0
        error_returned(" - hfs_make_startfile");
2076
0
        return 1;
2077
0
    }
2078
2079
0
    if ((fs_attr =
2080
0
            tsk_fs_attrlist_getnew(fs_file->meta->attr,
2081
0
                TSK_FS_ATTR_NONRES)) == NULL) {
2082
0
        error_returned(" - hfs_make_startfile");
2083
0
        tsk_fs_attr_run_free(attr_run);
2084
0
        return 1;
2085
0
    }
2086
2087
    // initialize the data run
2088
0
    if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL,
2089
0
            TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA,
2090
0
            tsk_getu64(fs->endian, hfs->fs->start_file.logic_sz),
2091
0
            tsk_getu64(fs->endian, hfs->fs->start_file.logic_sz),
2092
0
            tsk_getu64(fs->endian, hfs->fs->start_file.logic_sz), 0, 0)) {
2093
0
        error_returned(" - hfs_make_startfile");
2094
0
        tsk_fs_attr_run_free(attr_run);
2095
0
        return 1;
2096
0
    }
2097
2098
    // see if catalog file has additional runs
2099
0
    if (hfs_ext_find_extent_record_attr(hfs, HFS_STARTUP_FILE_ID, fs_attr,
2100
0
            TRUE)) {
2101
0
        error_returned(" - hfs_make_startfile");
2102
0
        fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR;
2103
0
        return 1;
2104
0
    }
2105
2106
0
    result = hfs_load_extended_attrs(fs_file, &dummy1, &dummy2, &dummy3);
2107
0
    if (result != 0) {
2108
0
        if (tsk_verbose)
2109
0
            tsk_fprintf(stderr,
2110
0
                "WARNING: Extended attributes failed to load for the Start file.\n");
2111
0
        tsk_error_reset();
2112
0
    }
2113
2114
0
    fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED;
2115
0
    return 0;
2116
0
}
2117
2118
2119
/**
2120
 * \internal
2121
 * Create an FS_INODE structure for the attributes file.
2122
 *
2123
 * @param hfs File system to analyze
2124
 * @param fs_file Structure to copy file information into.
2125
 * @return 1 on error and 0 on success
2126
 */
2127
static uint8_t
2128
hfs_make_attrfile(HFS_INFO * hfs, TSK_FS_FILE * fs_file)
2129
0
{
2130
0
    TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs;
2131
0
    TSK_FS_ATTR *fs_attr;
2132
0
    TSK_FS_ATTR_RUN *attr_run;
2133
2134
0
    if (tsk_verbose)
2135
0
        tsk_fprintf(stderr,
2136
0
            "hfs_make_attrfile: Making virtual attributes file\n");
2137
2138
0
    if (hfs_make_specialbase(fs_file)) {
2139
0
        error_returned(" - hfs_make_attrfile");
2140
0
        return 1;
2141
0
    }
2142
2143
0
    fs_file->meta->addr = HFS_ATTRIBUTES_FILE_ID;
2144
0
    strncpy(fs_file->meta->name2->name, HFS_ATTRIBUTESNAME,
2145
0
        TSK_FS_META_NAME_LIST_NSIZE);
2146
2147
0
    fs_file->meta->size =
2148
0
        tsk_getu64(fs->endian, hfs->fs->attr_file.logic_sz);
2149
2150
0
    if (((attr_run =
2151
0
                hfs_extents_to_attr(fs, hfs->fs->attr_file.extents,
2152
0
                    0)) == NULL) && (tsk_error_get_errno() != 0)) {
2153
0
        error_returned(" - hfs_make_attrfile");
2154
0
        return 1;
2155
0
    }
2156
2157
0
    if ((fs_attr =
2158
0
            tsk_fs_attrlist_getnew(fs_file->meta->attr,
2159
0
                TSK_FS_ATTR_NONRES)) == NULL) {
2160
0
        error_returned(" - hfs_make_attrfile");
2161
0
        tsk_fs_attr_run_free(attr_run);
2162
0
        return 1;
2163
0
    }
2164
2165
    // initialize the data run
2166
0
    if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL,
2167
0
            TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA,
2168
0
            tsk_getu64(fs->endian, hfs->fs->attr_file.logic_sz),
2169
0
            tsk_getu64(fs->endian, hfs->fs->attr_file.logic_sz),
2170
0
            tsk_getu64(fs->endian, hfs->fs->attr_file.logic_sz), 0, 0)) {
2171
0
        error_returned(" - hfs_make_attrfile");
2172
0
        tsk_fs_attr_run_free(attr_run);
2173
0
        return 1;
2174
0
    }
2175
2176
    // see if catalog file has additional runs
2177
0
    if (hfs_ext_find_extent_record_attr(hfs, HFS_ATTRIBUTES_FILE_ID,
2178
0
            fs_attr, TRUE)) {
2179
0
        error_returned(" - hfs_make_attrfile");
2180
0
        fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR;
2181
0
        return 1;
2182
0
    }
2183
2184
    //hfs_load_extended_attrs(fs_file);
2185
2186
0
    fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED;
2187
0
    return 0;
2188
0
}
2189
2190
2191
2192
/**
2193
 * \internal
2194
 * Create an FS_FILE structure for the BadBlocks file.
2195
 *
2196
 * @param hfs File system to analyze
2197
 * @param fs_file Structure to copy file information into.
2198
 * @return 1 on error and 0 on success
2199
 */
2200
static uint8_t
2201
hfs_make_badblockfile(HFS_INFO * hfs, TSK_FS_FILE * fs_file)
2202
0
{
2203
0
    TSK_FS_ATTR *fs_attr;
2204
0
    unsigned char dummy1, dummy2;
2205
0
    uint64_t dummy3;
2206
0
    uint8_t result;
2207
2208
0
    if (tsk_verbose)
2209
0
        tsk_fprintf(stderr,
2210
0
            "hfs_make_badblockfile: Making virtual badblock file\n");
2211
2212
0
    if (hfs_make_specialbase(fs_file)) {
2213
0
        error_returned(" - hfs_make_badblockfile");
2214
0
        return 1;
2215
0
    }
2216
2217
0
    fs_file->meta->addr = HFS_BAD_BLOCK_FILE_ID;
2218
0
    strncpy(fs_file->meta->name2->name, HFS_BAD_BLOCK_FILE_NAME,
2219
0
        TSK_FS_META_NAME_LIST_NSIZE);
2220
2221
0
    fs_file->meta->size = 0;
2222
2223
0
    if ((fs_attr =
2224
0
            tsk_fs_attrlist_getnew(fs_file->meta->attr,
2225
0
                TSK_FS_ATTR_NONRES)) == NULL) {
2226
0
        error_returned(" - hfs_make_badblockfile");
2227
0
        return 1;
2228
0
    }
2229
2230
    // add the run to the file.
2231
0
    if (tsk_fs_attr_set_run(fs_file, fs_attr, NULL, NULL,
2232
0
            TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA,
2233
0
            fs_file->meta->size, fs_file->meta->size, fs_file->meta->size,
2234
0
            0, 0)) {
2235
0
        error_returned(" - hfs_make_badblockfile");
2236
0
        return 1;
2237
0
    }
2238
2239
    // see if file has additional runs
2240
0
    if (hfs_ext_find_extent_record_attr(hfs, HFS_BAD_BLOCK_FILE_ID,
2241
0
            fs_attr, TRUE)) {
2242
0
        error_returned(" - hfs_make_badblockfile");
2243
0
        fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR;
2244
0
        return 1;
2245
0
    }
2246
2247
    /* @@@ We have a chicken and egg problem here...  The current design of
2248
     * fs_attr_set() requires the size to be set, but we dont' know the size
2249
     * until we look into the extents file (which adds to an attribute...).
2250
     * This does not seem to be the best design...  neeed a way to test this. */
2251
0
    fs_file->meta->size = fs_attr->nrd.initsize;
2252
0
    fs_attr->size = fs_file->meta->size;
2253
0
    fs_attr->nrd.allocsize = fs_file->meta->size;
2254
2255
0
    result = hfs_load_extended_attrs(fs_file, &dummy1, &dummy2, &dummy3);
2256
0
    if (result != 0) {
2257
0
        if (tsk_verbose)
2258
0
            tsk_fprintf(stderr,
2259
0
                "WARNING: Extended attributes failed to load for the BadBlocks file.\n");
2260
0
        tsk_error_reset();
2261
0
    }
2262
2263
0
    fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED;
2264
0
    return 0;
2265
0
}
2266
2267
2268
/** \internal
2269
 * Copy the catalog file or folder record entry into a TSK data structure.
2270
 * @param a_hfs File system being analyzed
2271
 * @param a_hfs_entry Catalog record entry (HFS_ENTRY *)
2272
 * @param a_fs_file Structure to copy data into (TSK_FS_FILE *)
2273
 * Returns 1 on error.
2274
 */
2275
static uint8_t
2276
hfs_dinode_copy(HFS_INFO * a_hfs, const HFS_ENTRY * a_hfs_entry,
2277
    TSK_FS_FILE * a_fs_file)
2278
0
{
2279
2280
    // Note, a_hfs_entry->cat is really of type hfs_file.  But, hfs_file_folder is a union
2281
    // of that type with hfs_folder.  Both of hfs_file and hfs_folder have the same first member.
2282
    // So, this cast is appropriate.
2283
0
    const hfs_file_folder *a_entry =
2284
0
        (hfs_file_folder *) & (a_hfs_entry->cat);
2285
0
    const hfs_file_fold_std *std;
2286
0
    TSK_FS_META *a_fs_meta = a_fs_file->meta;
2287
0
    TSK_FS_INFO *fs;
2288
0
    uint16_t hfsmode;
2289
0
    TSK_INUM_T iStd;            // the inum (or CNID) that occurs in the standard file metadata
2290
2291
0
    if (a_entry == NULL) {
2292
0
        error_detected(TSK_ERR_FS_ARG,
2293
0
            "hfs_dinode_copy: a_entry = a_hfs_entry->cat is NULL");
2294
0
        return 1;
2295
0
    }
2296
2297
0
    fs = (TSK_FS_INFO *) & a_hfs->fs_info;
2298
2299
2300
    // Just a sanity check.  The inum (or cnid) occurs in two places in the
2301
    // entry data structure.
2302
0
    iStd = tsk_getu32(fs->endian, a_entry->file.std.cnid);
2303
0
    if (iStd != a_hfs_entry->inum) {
2304
0
        if (tsk_verbose)
2305
0
            tsk_fprintf(stderr,
2306
0
                "WARNING: hfs_dinode_copy:  HFS_ENTRY with conflicting values for inum (or cnid).\n");
2307
0
    }
2308
2309
0
    if (a_fs_meta == NULL) {
2310
0
        tsk_error_set_errno(TSK_ERR_FS_ARG);
2311
0
        tsk_error_set_errstr("hfs_dinode_copy: a_fs_meta is NULL");
2312
0
        return 1;
2313
0
    }
2314
2315
    // both files and folders start off the same
2316
0
    std = &(a_entry->file.std);
2317
2318
0
    if (tsk_verbose)
2319
0
        tsk_fprintf(stderr,
2320
0
            "hfs_dinode_copy: called for file/folder %" PRIu32 "\n",
2321
0
            tsk_getu32(fs->endian, std->cnid));
2322
2323
0
    if (a_fs_meta->content_len < HFS_FILE_CONTENT_LEN) {
2324
0
        if ((a_fs_meta =
2325
0
                tsk_fs_meta_realloc(a_fs_meta,
2326
0
                    HFS_FILE_CONTENT_LEN)) == NULL) {
2327
0
            return 1;
2328
0
        }
2329
0
    }
2330
0
    a_fs_meta->attr_state = TSK_FS_META_ATTR_EMPTY;
2331
0
    if (a_fs_meta->attr) {
2332
0
        tsk_fs_attrlist_markunused(a_fs_meta->attr);
2333
0
    }
2334
2335
2336
    /*
2337
     * Copy the file type specific stuff first
2338
     */
2339
0
    hfsmode = tsk_getu16(fs->endian, std->perm.mode);
2340
2341
0
    if (tsk_getu16(fs->endian, std->rec_type) == HFS_FOLDER_RECORD) {
2342
        // set the type of mode is not set
2343
0
        if ((hfsmode & HFS_IN_IFMT) == 0)
2344
0
            a_fs_meta->type = TSK_FS_META_TYPE_DIR;
2345
0
        a_fs_meta->size = 0;
2346
0
        memset(a_fs_meta->content_ptr, 0, HFS_FILE_CONTENT_LEN);
2347
0
    }
2348
0
    else if (tsk_getu16(fs->endian, std->rec_type) == HFS_FILE_RECORD) {
2349
0
        hfs_fork *fork;
2350
        // set the type of mode is not set
2351
0
        if ((hfsmode & HFS_IN_IFMT) == 0)
2352
0
            a_fs_meta->type = TSK_FS_META_TYPE_REG;
2353
0
        a_fs_meta->size =
2354
0
            tsk_getu64(fs->endian, a_entry->file.data.logic_sz);
2355
2356
        // copy the data and resource forks
2357
0
        fork = (hfs_fork *) a_fs_meta->content_ptr;
2358
0
        memcpy(fork, &(a_entry->file.data), sizeof(hfs_fork));
2359
0
        memcpy(&fork[1], &(a_entry->file.resource), sizeof(hfs_fork));
2360
0
    }
2361
0
    else {
2362
0
        if (tsk_verbose)
2363
0
            tsk_fprintf(stderr,
2364
0
                "hfs_dinode_copy error: catalog entry is neither file nor folder\n");
2365
0
        return 1;
2366
0
    }
2367
2368
    /*
2369
     * Copy the standard stuff.
2370
     * Use default values (as defined in spec) if mode is not defined.
2371
     */
2372
0
    if ((hfsmode & HFS_IN_IFMT) == 0) {
2373
0
        a_fs_meta->mode = 0;
2374
0
        a_fs_meta->uid = 99;
2375
0
        a_fs_meta->gid = 99;
2376
0
    }
2377
0
    else {
2378
0
        a_fs_meta->mode = hfs_mode_to_tsk_mode(hfsmode);
2379
0
        a_fs_meta->type = hfs_mode_to_tsk_meta_type(hfsmode);
2380
0
        a_fs_meta->uid = tsk_getu32(fs->endian, std->perm.owner);
2381
0
        a_fs_meta->gid = tsk_getu32(fs->endian, std->perm.group);
2382
0
    }
2383
2384
    // this field is set only for "indirect" entries
2385
0
    if (tsk_getu32(fs->endian, std->perm.special.nlink))
2386
0
        a_fs_meta->nlink = tsk_getu32(fs->endian, std->perm.special.nlink);
2387
0
    else
2388
0
        a_fs_meta->nlink = 1;
2389
2390
0
    a_fs_meta->mtime =
2391
0
        hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->cmtime));
2392
0
    a_fs_meta->atime =
2393
0
        hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->atime));
2394
0
    a_fs_meta->crtime =
2395
0
        hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->crtime));
2396
0
    a_fs_meta->ctime =
2397
0
        hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->amtime));
2398
0
    a_fs_meta->time2.hfs.bkup_time =
2399
0
        hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->bkup_date));
2400
0
    a_fs_meta->mtime_nano = a_fs_meta->atime_nano = a_fs_meta->ctime_nano =
2401
0
        a_fs_meta->crtime_nano = 0;
2402
0
    a_fs_meta->time2.hfs.bkup_time_nano = 0;
2403
2404
0
    a_fs_meta->addr = tsk_getu32(fs->endian, std->cnid);
2405
2406
    // All entries here are used.
2407
0
    a_fs_meta->flags = TSK_FS_META_FLAG_ALLOC | TSK_FS_META_FLAG_USED;
2408
2409
0
    if (std->perm.o_flags & HFS_PERM_OFLAG_COMPRESSED)
2410
0
        a_fs_meta->flags |= TSK_FS_META_FLAG_COMP;
2411
2412
    // We copy this inum (or cnid) here, because this file *might* have been a hard link.  In
2413
    // that case, we want to make sure that a_fs_file points consistently to the target of the
2414
    // link.
2415
2416
    //if (a_fs_file->name != NULL) {
2417
    //    a_fs_file->name->meta_addr = a_fs_meta->addr;
2418
    //}
2419
2420
    /* TODO @@@ could fill in name2 with this entry's name and parent inode
2421
       from Catalog entry */
2422
2423
    /* set the link string (if the file is a link)
2424
     * The size check is a sanity check so that we don't try to allocate
2425
     * a huge amount of memory for a bad inode value
2426
     */
2427
0
    if ((a_fs_meta->type == TSK_FS_META_TYPE_LNK) &&
2428
0
        (a_fs_meta->size >= 0) && (a_fs_meta->size < HFS_MAXPATHLEN)) {
2429
2430
0
        ssize_t bytes_read;
2431
2432
0
        a_fs_meta->link = tsk_malloc((size_t) a_fs_meta->size + 1);
2433
0
        if (a_fs_meta->link == NULL)
2434
0
            return 1;
2435
2436
0
        bytes_read = tsk_fs_file_read(a_fs_file, (TSK_OFF_T) 0,
2437
0
            a_fs_meta->link, (size_t) a_fs_meta->size,
2438
0
            TSK_FS_FILE_READ_FLAG_NONE);
2439
0
        a_fs_meta->link[a_fs_meta->size] = '\0';
2440
2441
0
        if (bytes_read != a_fs_meta->size) {
2442
0
            if (tsk_verbose)
2443
0
                tsk_fprintf(stderr,
2444
0
                    "hfs_dinode_copy: failed to read contents of symbolic link; "
2445
0
                    "expected %u bytes but tsk_fs_file_read() returned %u\n",
2446
0
                    a_fs_meta->size, bytes_read);
2447
0
            free(a_fs_meta->link);
2448
0
            a_fs_meta->link = NULL;
2449
0
            return 1;
2450
0
        }
2451
0
    }
2452
2453
0
    return 0;
2454
0
}
2455
2456
2457
/** \internal
2458
 * Load a catalog file entry and save it in the TSK_FS_FILE structure.
2459
 *
2460
 * @param fs File system to read from.
2461
 * @param a_fs_file Structure to read into.
2462
 * @param inum File address to load
2463
 * @returns 1 on error
2464
 */
2465
static uint8_t
2466
hfs_inode_lookup(TSK_FS_INFO * fs, TSK_FS_FILE * a_fs_file,
2467
    TSK_INUM_T inum)
2468
2
{
2469
2
    HFS_INFO *hfs = (HFS_INFO *) fs;
2470
2
    HFS_ENTRY entry;
2471
2472
2
    if (a_fs_file == NULL) {
2473
0
        tsk_error_set_errno(TSK_ERR_FS_ARG);
2474
0
        tsk_error_set_errstr("hfs_inode_lookup: fs_file is NULL");
2475
0
        return 1;
2476
0
    }
2477
2478
2
    if (a_fs_file->meta == NULL) {
2479
2
        a_fs_file->meta = tsk_fs_meta_alloc(HFS_FILE_CONTENT_LEN);
2480
2
    }
2481
2482
2
    if (a_fs_file->meta == NULL) {
2483
0
        return 1;
2484
0
    }
2485
2
    else {
2486
2
        tsk_fs_meta_reset(a_fs_file->meta);
2487
2
    }
2488
2489
2
    if (tsk_verbose)
2490
0
        tsk_fprintf(stderr, "hfs_inode_lookup: looking up %" PRIuINUM "\n",
2491
0
            inum);
2492
2493
    // @@@ Will need to add orphan stuff here too
2494
2495
    /* First see if this is a special entry
2496
     * the special ones have their metadata stored in the volume header */
2497
2
    if (inum == HFS_EXTENTS_FILE_ID) {
2498
1
        if (!hfs->has_extents_file) {
2499
0
            error_detected(TSK_ERR_FS_INODE_NUM,
2500
0
                "Extents File not present");
2501
0
            return 1;
2502
0
        }
2503
2504
1
        return hfs_make_extents(hfs, a_fs_file);
2505
1
    }
2506
1
    else if (inum == HFS_CATALOG_FILE_ID) {
2507
1
        return hfs_make_catalog(hfs, a_fs_file);
2508
1
    }
2509
0
    else if (inum == HFS_BAD_BLOCK_FILE_ID) {
2510
        // Note: the Extents file and the BadBlocks file are really the same.
2511
0
        if (!hfs->has_extents_file) {
2512
0
            error_detected(TSK_ERR_FS_INODE_NUM,
2513
0
                "BadBlocks File not present");
2514
0
            return 1;
2515
0
        }
2516
0
        return hfs_make_badblockfile(hfs, a_fs_file);
2517
0
    }
2518
0
    else if (inum == HFS_ALLOCATION_FILE_ID) {
2519
0
        return hfs_make_blockmap(hfs, a_fs_file);
2520
0
    }
2521
0
    else if (inum == HFS_STARTUP_FILE_ID) {
2522
0
        if (!hfs->has_startup_file) {
2523
0
            error_detected(TSK_ERR_FS_INODE_NUM,
2524
0
                "Startup File not present");
2525
0
            return 1;
2526
0
        }
2527
0
        return hfs_make_startfile(hfs, a_fs_file);
2528
0
    }
2529
0
    else if (inum == HFS_ATTRIBUTES_FILE_ID) {
2530
0
        if (!hfs->has_attributes_file) {
2531
0
            error_detected(TSK_ERR_FS_INODE_NUM,
2532
0
                "Attributes File not present");
2533
0
            return 1;
2534
0
        }
2535
0
        return hfs_make_attrfile(hfs, a_fs_file);
2536
0
    }
2537
2538
    /* Lookup inode and store it in the HFS structure */
2539
0
    if (hfs_cat_file_lookup(hfs, inum, &entry, TRUE)) {
2540
0
        return 1;
2541
0
    }
2542
2543
    /* Copy the structure in hfs to generic fs_inode */
2544
0
    if (hfs_dinode_copy(hfs, &entry, a_fs_file)) {
2545
0
        return 1;
2546
0
    }
2547
2548
    /* If this is potentially a compressed file, its
2549
     * actual size is unknown until we examine the
2550
     * extended attributes */
2551
0
    if ((a_fs_file->meta->size == 0) &&
2552
0
        (a_fs_file->meta->type == TSK_FS_META_TYPE_REG) &&
2553
0
        (a_fs_file->meta->attr_state != TSK_FS_META_ATTR_ERROR) &&
2554
0
        ((a_fs_file->meta->attr_state != TSK_FS_META_ATTR_STUDIED) ||
2555
0
            (a_fs_file->meta->attr == NULL))) {
2556
0
        hfs_load_attrs(a_fs_file);
2557
0
    }
2558
2559
0
    return 0;
2560
0
}
2561
2562
typedef struct {
2563
    uint32_t offset;
2564
    uint32_t length;
2565
} CMP_OFFSET_ENTRY;
2566
2567
2568
/**
2569
 * \internal
2570
 * Reads the ZLIB compression block table from the attribute.
2571
 *
2572
 * @param rAtttr the attribute to read
2573
 * @param offsetTableOut block table
2574
 * @param tableSizeOut size of block table
2575
 * @param tableOffsetOut the offset of the block table in the resource fork
2576
 * @return 1 on success, 0 on error
2577
 */
2578
static int
2579
0
hfs_read_zlib_block_table(const TSK_FS_ATTR *rAttr, CMP_OFFSET_ENTRY** offsetTableOut, uint32_t* tableSizeOut, uint32_t* tableOffsetOut) {
2580
0
    ssize_t attrReadResult;
2581
0
    hfs_resource_fork_header rfHeader;
2582
0
    uint32_t dataOffset;
2583
0
    uint32_t offsetTableOffset;
2584
0
    char fourBytes[4];          // Size of the offset table, little endian
2585
0
    uint32_t tableSize;         // Size of the offset table
2586
0
    char *offsetTableData = NULL;
2587
0
    CMP_OFFSET_ENTRY *offsetTable = NULL;
2588
0
    size_t indx;
2589
0
2590
0
    // Read the resource fork header
2591
0
    attrReadResult = tsk_fs_attr_read(rAttr, 0, (char *) &rfHeader,
2592
0
        sizeof(hfs_resource_fork_header), TSK_FS_FILE_READ_FLAG_NONE);
2593
0
    if (attrReadResult != sizeof(hfs_resource_fork_header)) {
2594
0
        error_returned
2595
0
            (" %s: trying to read the resource fork header", __func__);
2596
0
        return 0;
2597
0
    }
2598
0
2599
0
    // Begin to parse the resource fork. For now, we just need the data offset.
2600
0
    dataOffset = tsk_getu32(TSK_BIG_ENDIAN, rfHeader.dataOffset);
2601
0
2602
0
    // The resource's data begins with an offset table, which defines blocks
2603
0
    // of (optionally) zlib-compressed data (so that the OS can do file seeks
2604
0
    // efficiently; each uncompressed block is 64KB).
2605
0
    offsetTableOffset = dataOffset + 4;
2606
0
2607
0
    // read 4 bytes, the number of table entries, little endian
2608
0
    attrReadResult =
2609
0
        tsk_fs_attr_read(rAttr, offsetTableOffset, fourBytes, 4,
2610
0
        TSK_FS_FILE_READ_FLAG_NONE);
2611
0
    if (attrReadResult != 4) {
2612
0
        error_returned
2613
0
            (" %s: trying to read the offset table size, "
2614
0
            "return value of %u should have been 4", __func__, attrReadResult);
2615
0
        return 0;
2616
0
    }
2617
0
    tableSize = tsk_getu32(TSK_LIT_ENDIAN, fourBytes);
2618
0
2619
0
    // Each table entry is 8 bytes long
2620
0
    offsetTableData = tsk_malloc(tableSize * 8);
2621
0
    if (offsetTableData == NULL) {
2622
0
        error_returned
2623
0
            (" %s: space for the offset table raw data", __func__);
2624
0
        return 0;
2625
0
    }
2626
0
2627
0
    offsetTable =
2628
0
        (CMP_OFFSET_ENTRY *) tsk_malloc(tableSize *
2629
0
        sizeof(CMP_OFFSET_ENTRY));
2630
0
    if (offsetTable == NULL) {
2631
0
        error_returned
2632
0
            (" %s: space for the offset table", __func__);
2633
0
        goto on_error;
2634
0
    }
2635
0
2636
0
    attrReadResult = tsk_fs_attr_read(rAttr, offsetTableOffset + 4,
2637
0
        offsetTableData, tableSize * 8, TSK_FS_FILE_READ_FLAG_NONE);
2638
0
    if (attrReadResult != (ssize_t) tableSize * 8) {
2639
0
        error_returned
2640
0
            (" %s: reading in the compression offset table, "
2641
0
            "return value %u should have been %u", __func__, attrReadResult,
2642
0
            tableSize * 8);
2643
0
        goto on_error;
2644
0
    }
2645
0
2646
0
    for (indx = 0; indx < tableSize; ++indx) {
2647
0
        offsetTable[indx].offset =
2648
0
            tsk_getu32(TSK_LIT_ENDIAN, offsetTableData + indx * 8);
2649
0
        offsetTable[indx].length =
2650
0
            tsk_getu32(TSK_LIT_ENDIAN, offsetTableData + indx * 8 + 4);
2651
0
    }
2652
0
2653
0
    free(offsetTableData);
2654
0
2655
0
    *offsetTableOut = offsetTable;
2656
0
    *tableSizeOut = tableSize;
2657
0
    *tableOffsetOut = offsetTableOffset;
2658
0
    return 1;
2659
0
2660
0
on_error:
2661
0
    free(offsetTable);
2662
0
    free(offsetTableData);
2663
0
    return 0;
2664
0
}
2665
2666
2667
/**
2668
 * \internal
2669
 * Reads the LZVN compression block table from the attribute.
2670
 *
2671
 * @param rAtttr the attribute to read
2672
 * @param offsetTableOut block table
2673
 * @param tableSizeOut size of block table
2674
 * @param tableOffsetOut the offset of the block table in the resource fork
2675
 * @return 1 on success, 0 on error
2676
 */
2677
static int
2678
0
hfs_read_lzvn_block_table(const TSK_FS_ATTR *rAttr, CMP_OFFSET_ENTRY** offsetTableOut, uint32_t* tableSizeOut, uint32_t* tableOffsetOut) {
2679
0
    ssize_t attrReadResult;
2680
0
    char fourBytes[4];
2681
0
    uint32_t tableDataSize;
2682
0
    uint32_t tableSize;         // Size of the offset table
2683
0
    char *offsetTableData = NULL;
2684
0
    CMP_OFFSET_ENTRY *offsetTable = NULL;
2685
0
2686
0
    // The offset table is a sequence of 4-byte offsets of compressed
2687
0
    // blocks. The first 4 bytes is thus the offset of the first block,
2688
0
    // but also 4 times the number of entries in the table.
2689
0
    attrReadResult = tsk_fs_attr_read(rAttr, 0, fourBytes, 4,
2690
0
                                      TSK_FS_FILE_READ_FLAG_NONE);
2691
0
    if (attrReadResult != 4) {
2692
0
        error_returned
2693
0
            (" %s: trying to read the offset table size, "
2694
0
            "return value of %u should have been 4", __func__, attrReadResult);
2695
0
        return 0;
2696
0
    }
2697
0
2698
0
    tableDataSize = tsk_getu32(TSK_LIT_ENDIAN, fourBytes);
2699
0
2700
0
    offsetTableData = tsk_malloc(tableDataSize);
2701
0
    if (offsetTableData == NULL) {
2702
0
        error_returned
2703
0
            (" %s: space for the offset table raw data", __func__);
2704
0
        return 0;
2705
0
    }
2706
0
2707
0
    // table entries are 4 bytes, last entry is end of data
2708
0
    tableSize = tableDataSize / 4 - 1;
2709
0
2710
0
    offsetTable =
2711
0
        (CMP_OFFSET_ENTRY *) tsk_malloc(tableSize *
2712
0
        sizeof(CMP_OFFSET_ENTRY));
2713
0
    if (offsetTable == NULL) {
2714
0
        error_returned
2715
0
            (" %s: space for the offset table", __func__);
2716
0
        goto on_error;
2717
0
    }
2718
0
2719
0
    attrReadResult = tsk_fs_attr_read(rAttr, 0,
2720
0
        offsetTableData, tableDataSize, TSK_FS_FILE_READ_FLAG_NONE);
2721
0
    if (attrReadResult != (ssize_t) tableDataSize) {
2722
0
        error_returned
2723
0
            (" %s: reading in the compression offset table, "
2724
0
            "return value %u should have been %u", __func__, attrReadResult,
2725
0
            tableDataSize);
2726
0
        goto on_error;
2727
0
    }
2728
0
2729
0
    uint32_t a = tableDataSize;
2730
0
    uint32_t b;
2731
0
    size_t i;
2732
0
2733
0
    for (i = 0; i < tableSize; ++i) {
2734
0
        b = tsk_getu32(TSK_LIT_ENDIAN, offsetTableData + 4*(i+1));
2735
0
        offsetTable[i].offset = a;
2736
0
        offsetTable[i].length = b - a;
2737
0
        a = b;
2738
0
    }
2739
0
2740
0
    free(offsetTableData);
2741
0
2742
0
    *offsetTableOut = offsetTable;
2743
0
    *tableSizeOut = tableSize;
2744
0
    *tableOffsetOut = 0;
2745
0
    return 1;
2746
0
2747
0
on_error:
2748
0
    free(offsetTable);
2749
0
    free(offsetTableData);
2750
0
    return 0;
2751
0
}
2752
2753
/**
2754
 * \internal
2755
 * "Decompress" a block which was stored uncompressed.
2756
 *
2757
 * @param rawBuf the compressed data
2758
 * @param len length of the compressed data
2759
 * @param uncBuf the decompressed data
2760
 * @param uncLen length of the decompressed data
2761
 * @return 1 on success, 0 on error
2762
 */
2763
0
static int hfs_decompress_noncompressed_block(char* rawBuf, uint32_t len, char* uncBuf, uint64_t* uncLen) {
2764
0
    // actually an uncompressed block of data; just copy
2765
0
    if (tsk_verbose)
2766
0
        tsk_fprintf(stderr,
2767
0
           "%s: Copying an uncompressed compression unit\n", __func__);
2768
0
2769
0
    if ((len - 1) > COMPRESSION_UNIT_SIZE) {
2770
0
        error_detected(TSK_ERR_FS_READ,
2771
0
            "%s: uncompressed block length %u is longer "
2772
0
            "than compression unit size %u", __func__, len - 1,
2773
0
            COMPRESSION_UNIT_SIZE);
2774
0
        return 0;
2775
0
    }
2776
0
    memcpy(uncBuf, rawBuf + 1, len - 1);
2777
0
    *uncLen = len - 1;
2778
0
    return 1;
2779
0
}
2780
2781
2782
#ifdef HAVE_LIBZ
2783
/**
2784
 * \internal
2785
 * Decompress a block which was stored with ZLIB.
2786
 *
2787
 * @param rawBuf the compressed data
2788
 * @param len length of the compressed data
2789
 * @param uncBuf the decompressed data
2790
 * @param uncLen length of the decompressed data
2791
 * @return 1 on success, 0 on error
2792
 */
2793
static int hfs_decompress_zlib_block(char* rawBuf, uint32_t len, char* uncBuf, uint64_t* uncLen)
2794
0
{
2795
0
    // see if this block is compressed
2796
0
    if (len > 0 && (rawBuf[0] & 0x0F) != 0x0F) {
2797
0
        // Uncompress the chunk of data
2798
0
        if (tsk_verbose)
2799
0
            tsk_fprintf(stderr,
2800
0
                        "%s: Inflating the compression unit\n", __func__);
2801
0
2802
0
        unsigned long bytesConsumed;
2803
0
        int infResult = zlib_inflate(rawBuf, (uint64_t) len,
2804
0
            uncBuf, (uint64_t) COMPRESSION_UNIT_SIZE,
2805
0
            uncLen, &bytesConsumed);
2806
0
        if (infResult != 0) {
2807
0
            error_returned
2808
0
                  (" %s: zlib inflation (uncompression) failed",
2809
0
                  __func__, infResult);
2810
0
            return 0;
2811
0
        }
2812
0
2813
0
        if (bytesConsumed != len) {
2814
0
            error_detected(TSK_ERR_FS_READ,
2815
0
                " %s, decompressor did not consume the whole compressed data",
2816
0
                __func__);
2817
0
            return 0;
2818
0
        }
2819
0
2820
0
        return 1;
2821
0
    }
2822
0
    else {
2823
0
        // actually an uncompressed block of data; just copy
2824
0
        return hfs_decompress_noncompressed_block(rawBuf, len, uncBuf, uncLen);
2825
0
    }
2826
0
}
2827
#endif
2828
2829
2830
/**
2831
 * \internal
2832
 * Decompress a block which was stored with LZVN.
2833
 *
2834
 * @param rawBuf the compressed data
2835
 * @param len length of the compressed data
2836
 * @param uncBuf the decompressed data
2837
 * @param uncLen length of the decompressed data
2838
 * @return 1 on success, 0 on error
2839
 */
2840
static int hfs_decompress_lzvn_block(char* rawBuf, uint32_t len, char* uncBuf, uint64_t* uncLen)
2841
0
{
2842
0
    // see if this block is compressed
2843
0
    if (len > 0 && rawBuf[0] != 0x06) {
2844
0
        *uncLen = lzvn_decode_buffer(uncBuf, COMPRESSION_UNIT_SIZE, rawBuf, len);
2845
0
        return 1;  // apparently this can't fail
2846
0
    }
2847
0
    else {
2848
0
        // actually an uncompressed block of data; just copy
2849
0
        return hfs_decompress_noncompressed_block(rawBuf, len, uncBuf, uncLen);
2850
0
    }
2851
0
}
2852
2853
/**
2854
 * \internal
2855
 * Decompress a block.
2856
 *
2857
 * @param rAttr the attribute to read
2858
 * @param rawBuf the compressed data
2859
 * @param uncBuf the decompressed data
2860
 * @param offsetTable table of compressed block offsets
2861
 * @param offsetTableSize size of table of compressed block offsets
2862
 * @param offsetTableOffset offset of table of compressed block offsets
2863
 * @param indx index of block to read
2864
 * @param decompress_block pointer to decompression function
2865
 * @return decompressed size on success, -1 on error
2866
 */
2867
static ssize_t read_and_decompress_block(
2868
  const TSK_FS_ATTR* rAttr,
2869
  char* rawBuf,
2870
  char* uncBuf,
2871
  const CMP_OFFSET_ENTRY* offsetTable,
2872
  uint32_t offsetTableSize,
2873
  uint32_t offsetTableOffset,
2874
  size_t indx,
2875
  int (*decompress_block)(char* rawBuf,
2876
                          uint32_t len,
2877
                          char* uncBuf,
2878
                          uint64_t* uncLen)
2879
)
2880
0
{
2881
0
    // @@@ BC: Looks like we should have bounds checks that indx < offsetTableSize, but we should confirm
2882
0
    ssize_t attrReadResult;
2883
0
    uint32_t offset = offsetTableOffset + offsetTable[indx].offset;
2884
0
    uint32_t len = offsetTable[indx].length;
2885
0
    uint64_t uncLen;
2886
0
2887
0
    if (tsk_verbose)
2888
0
        tsk_fprintf(stderr,
2889
0
            "%s: Reading compression unit %d, length %d\n",
2890
0
            __func__, indx, len);
2891
0
2892
0
    /* Github #383 referenced that if len is 0, then the below code causes
2893
0
     * problems. Added this check, but I don't have data to verify this on.
2894
0
     * it looks like it should at least not crash, but it isn't clear if it
2895
0
     * will also do the right thing and if should actually break here
2896
0
     * instead. */
2897
0
    if (len == 0) {
2898
0
      return 0;
2899
0
    }
2900
0
2901
0
    if (len > COMPRESSION_UNIT_SIZE + 1) {
2902
0
      error_detected(TSK_ERR_FS_READ,
2903
0
          "%s: block size is too large: %u", __func__, len);
2904
0
      return -1;
2905
0
    }
2906
0
2907
0
    // Read in the block of compressed data
2908
0
    attrReadResult = tsk_fs_attr_read(rAttr, offset,
2909
0
        rawBuf, len, TSK_FS_FILE_READ_FLAG_NONE);
2910
0
    if (attrReadResult != (ssize_t) len) {
2911
0
        char msg[] =
2912
0
            "%s%s: reading in the compression offset table, "
2913
0
            "return value %u should have been %u";
2914
0
2915
0
        if (attrReadResult < 0 ) {
2916
0
            error_returned(msg, " ", __func__, attrReadResult, len);
2917
0
        }
2918
0
        else {
2919
0
            error_detected(TSK_ERR_FS_READ, "", __func__, attrReadResult, len);
2920
0
        }
2921
0
        return -1;
2922
0
    }
2923
0
2924
0
    if (!decompress_block(rawBuf, len, uncBuf, &uncLen)) {
2925
0
        return -1;
2926
0
    }
2927
0
2928
0
    // If size is a multiple of COMPRESSION_UNIT_SIZE,
2929
0
    // expected uncompressed length is COMPRESSION_UNIT_SIZE
2930
0
    const uint32_t expUncLen = indx == offsetTableSize - 1 ?
2931
0
        ((rAttr->fs_file->meta->size - 1) % COMPRESSION_UNIT_SIZE) + 1 :
2932
0
        COMPRESSION_UNIT_SIZE;
2933
0
2934
0
    if (uncLen != expUncLen) {
2935
0
        error_detected(TSK_ERR_FS_READ,
2936
0
            "%s: compressed block decompressed to %u bytes, "
2937
0
            "should have been %u bytes", __func__, uncLen, expUncLen);
2938
0
        return -1;
2939
0
    }
2940
0
2941
0
    // There are now uncLen bytes of uncompressed data available from
2942
0
    // this comp unit.
2943
0
    return (ssize_t)uncLen;
2944
0
}
2945
2946
/**
2947
 * \internal
2948
 * Attr walk callback function for compressed resources
2949
 *
2950
 * @param fs_attr the attribute to read
2951
 * @param flags
2952
 * @param a_action action callback
2953
 * @param ptr context for the action callback
2954
 * @param read_block_table pointer to block table read function
2955
 * @param decompress_block pointer to decompression function
2956
 * @return 0 on success, 1 on error
2957
 */
2958
static uint8_t
2959
hfs_attr_walk_compressed_rsrc(const TSK_FS_ATTR * fs_attr,
2960
    int flags, TSK_FS_FILE_WALK_CB a_action, void *ptr,
2961
    int (*read_block_table)(const TSK_FS_ATTR *rAttr,
2962
                            CMP_OFFSET_ENTRY** offsetTableOut,
2963
                            uint32_t* tableSizeOut,
2964
                            uint32_t* tableOffsetOut),
2965
    int (*decompress_block)(char* rawBuf,
2966
                            uint32_t len,
2967
                            char* uncBuf,
2968
                            uint64_t* uncLen))
2969
0
{
2970
0
    TSK_FS_INFO *fs;
2971
0
    TSK_FS_FILE *fs_file;
2972
0
    const TSK_FS_ATTR *rAttr;   // resource fork attribute
2973
0
    char *rawBuf = NULL;               // compressed data
2974
0
    char *uncBuf = NULL;               // uncompressed data
2975
0
    uint32_t offsetTableOffset;
2976
0
    uint32_t offsetTableSize;         // The number of table entries
2977
0
    CMP_OFFSET_ENTRY *offsetTable = NULL;
2978
0
    size_t indx;                // index for looping over the offset table
2979
0
    TSK_OFF_T off = 0;          // the offset in the uncompressed data stream consumed thus far
2980
0
2981
0
    if (tsk_verbose)
2982
0
        tsk_fprintf(stderr,
2983
0
            "%s:  Entered, because this is a compressed file with compressed data in the resource fork\n", __func__);
2984
0
2985
0
    // clean up any error messages that are lying around
2986
0
    tsk_error_reset();
2987
0
    if ((fs_attr == NULL) || (fs_attr->fs_file == NULL)
2988
0
        || (fs_attr->fs_file->meta == NULL)
2989
0
        || (fs_attr->fs_file->fs_info == NULL)) {
2990
0
        tsk_error_set_errno(TSK_ERR_FS_ARG);
2991
0
        tsk_error_set_errstr("%s: Null arguments given\n", __func__);
2992
0
        return 1;
2993
0
    }
2994
0
2995
0
    // Check that the ATTR being read is the main DATA resource, 128-0,
2996
0
    // because this is the only one that can be compressed in HFS+
2997
0
    if ((fs_attr->id != HFS_FS_ATTR_ID_DATA) ||
2998
0
        (fs_attr->type != TSK_FS_ATTR_TYPE_HFS_DATA)) {
2999
0
        error_detected(TSK_ERR_FS_ARG,
3000
0
            "%s: arg specified an attribute %u-%u that is not the data fork, "
3001
0
            "Only the data fork can be compressed.", __func__, fs_attr->type,
3002
0
            fs_attr->id);
3003
0
        return 1;
3004
0
    }
3005
0
3006
0
    /* This MUST be a compressed attribute     */
3007
0
    if (!(fs_attr->flags & TSK_FS_ATTR_COMP)) {
3008
0
        error_detected(TSK_ERR_FS_FWALK,
3009
0
            "%s: called with non-special attribute: %x",
3010
0
            __func__, fs_attr->flags);
3011
0
        return 1;
3012
0
    }
3013
0
3014
0
    fs = fs_attr->fs_file->fs_info;
3015
0
    fs_file = fs_attr->fs_file;
3016
0
3017
0
    /********  Open the Resource Fork ***********/
3018
0
3019
0
    // find the attribute for the resource fork
3020
0
    rAttr =
3021
0
        tsk_fs_file_attr_get_type(fs_file, TSK_FS_ATTR_TYPE_HFS_RSRC,
3022
0
        HFS_FS_ATTR_ID_RSRC, TRUE);
3023
0
    if (rAttr == NULL) {
3024
0
        error_returned
3025
0
            (" %s: could not get the attribute for the resource fork of the file", __func__);
3026
0
        return 1;
3027
0
    }
3028
0
3029
0
    // read the offset table from the fork header
3030
0
    if (!read_block_table(rAttr, &offsetTable, &offsetTableSize, &offsetTableOffset)) {
3031
0
      return 1;
3032
0
    }
3033
0
3034
0
    // Allocate two buffers for the raw and uncompressed data
3035
0
    /* Raw data can be COMPRESSION_UNIT_SIZE+1 if the data is not
3036
0
     * compressed and there is a 1-byte flag that indicates that
3037
0
     * the data is not compressed. */
3038
0
    rawBuf = (char *) tsk_malloc(COMPRESSION_UNIT_SIZE + 1);
3039
0
    if (rawBuf == NULL) {
3040
0
        error_returned
3041
0
            (" %s: buffers for reading and uncompressing", __func__);
3042
0
        goto on_error;
3043
0
    }
3044
0
3045
0
    uncBuf = (char *) tsk_malloc(COMPRESSION_UNIT_SIZE);
3046
0
    if (uncBuf == NULL) {
3047
0
        error_returned
3048
0
            (" %s: buffers for reading and uncompressing", __func__);
3049
0
        goto on_error;
3050
0
    }
3051
0
3052
0
    // FOR entry in the table DO
3053
0
    for (indx = 0; indx < offsetTableSize; ++indx) {
3054
0
        ssize_t uncLen;        // uncompressed length
3055
0
        unsigned int blockSize;
3056
0
        uint64_t lumpSize;
3057
0
        uint64_t remaining;
3058
0
        char *lumpStart;
3059
0
3060
0
        switch ((uncLen = read_and_decompress_block(
3061
0
                    rAttr, rawBuf, uncBuf,
3062
0
                    offsetTable, offsetTableSize, offsetTableOffset, indx,
3063
0
                    decompress_block)))
3064
0
        {
3065
0
        case -1:
3066
0
            goto on_error;
3067
0
        case  0:
3068
0
            continue;
3069
0
        default:
3070
0
            break;
3071
0
        }
3072
0
3073
0
        // Call the a_action callback with "Lumps"
3074
0
        // that are at most the block size.
3075
0
        blockSize = fs->block_size;
3076
0
        remaining = uncLen;
3077
0
        lumpStart = uncBuf;
3078
0
3079
0
        while (remaining > 0) {
3080
0
            int retval;         // action return value
3081
0
            lumpSize = remaining <= blockSize ? remaining : blockSize;
3082
0
3083
0
            // Apply the callback function
3084
0
            if (tsk_verbose)
3085
0
                tsk_fprintf(stderr,
3086
0
                    "%s: Calling action on lump of size %"
3087
0
                    PRIu64 " offset %" PRIu64 " in the compression unit\n",
3088
0
                    __func__, lumpSize, uncLen - remaining);
3089
0
            if (lumpSize > SIZE_MAX) {
3090
0
                error_detected(TSK_ERR_FS_FWALK,
3091
0
                    " %s: lumpSize is too large for the action", __func__);
3092
0
                goto on_error;
3093
0
            }
3094
0
3095
0
            retval = a_action(fs_attr->fs_file, off, 0, lumpStart,
3096
0
                (size_t) lumpSize,   // cast OK because of above test
3097
0
                TSK_FS_BLOCK_FLAG_COMP, ptr);
3098
0
3099
0
            if (retval == TSK_WALK_ERROR) {
3100
0
                error_detected(TSK_ERR_FS | 201,
3101
0
                    "%s: callback returned an error", __func__);
3102
0
                goto on_error;
3103
0
            }
3104
0
            else if (retval == TSK_WALK_STOP) {
3105
0
                break;
3106
0
            }
3107
0
3108
0
            // Find the next lump
3109
0
            off += lumpSize;
3110
0
            remaining -= lumpSize;
3111
0
            lumpStart += lumpSize;
3112
0
        }
3113
0
    }
3114
0
3115
0
    // Done, so free up the allocated resources.
3116
0
    free(offsetTable);
3117
0
    free(rawBuf);
3118
0
    free(uncBuf);
3119
0
    return 0;
3120
0
3121
0
on_error:
3122
0
    free(offsetTable);
3123
0
    free(rawBuf);
3124
0
    free(uncBuf);
3125
0
    return 1;
3126
0
}
3127
3128
3129
#ifdef HAVE_LIBZ
3130
/**
3131
 * \internal
3132
 * Attr walk callback function for ZLIB compressed resources
3133
 *
3134
 * @param fs_attr the attribute to read
3135
 * @param flags
3136
 * @param a_action action callback
3137
 * @param ptr context for the action callback
3138
 * @return 0 on success, 1 on error
3139
 */
3140
static uint8_t
3141
hfs_attr_walk_zlib_rsrc(const TSK_FS_ATTR * fs_attr,
3142
    int flags, TSK_FS_FILE_WALK_CB a_action, void *ptr)
3143
0
{
3144
0
    return hfs_attr_walk_compressed_rsrc(
3145
0
      fs_attr, flags, a_action, ptr,
3146
0
      hfs_read_zlib_block_table,
3147
0
      hfs_decompress_zlib_block
3148
0
    );
3149
0
}
3150
#endif
3151
3152
/**
3153
 * \internal
3154
 * Attr walk callback function for LZVN compressed resources
3155
 *
3156
 * @param fs_attr the attribute to read
3157
 * @param flags
3158
 * @param a_action action callback
3159
 * @param ptr context for the action callback
3160
 * @return 0 on success, 1 on error
3161
 */
3162
static uint8_t
3163
hfs_attr_walk_lzvn_rsrc(const TSK_FS_ATTR * fs_attr,
3164
    int flags, TSK_FS_FILE_WALK_CB a_action, void *ptr)
3165
0
{
3166
0
    return hfs_attr_walk_compressed_rsrc(
3167
0
      fs_attr, flags, a_action, ptr,
3168
0
      hfs_read_lzvn_block_table,
3169
0
      hfs_decompress_lzvn_block
3170
0
    );
3171
0
}
3172
3173
3174
/**
3175
 * \internal
3176
 * Read a compressed resource
3177
 *
3178
 * @param fs_attr the attribute to read
3179
 * @param a_offset the offset from which to read
3180
 * @param a_buf the buffer into which to read
3181
 * @param a_len the length of the buffer
3182
 * @param read_block_table pointer to block table read function
3183
 * @param decompress_block pointer to decompression function
3184
 * @return number of bytes read or -1 on error (incl if offset is past EOF)
3185
 */
3186
static ssize_t
3187
hfs_file_read_compressed_rsrc(const TSK_FS_ATTR * a_fs_attr,
3188
    TSK_OFF_T a_offset, char *a_buf, size_t a_len,
3189
    int (*read_block_table)(const TSK_FS_ATTR *rAttr,
3190
                            CMP_OFFSET_ENTRY** offsetTableOut,
3191
                            uint32_t* tableSizeOut,
3192
                            uint32_t* tableOffsetOut),
3193
    int (*decompress_block)(char* rawBuf,
3194
                            uint32_t len,
3195
                            char* uncBuf,
3196
                            uint64_t* uncLen))
3197
0
{
3198
0
    TSK_FS_FILE *fs_file;
3199
0
    const TSK_FS_ATTR *rAttr;
3200
0
    char *rawBuf = NULL;
3201
0
    char *uncBuf = NULL;
3202
0
    uint32_t offsetTableOffset;
3203
0
    uint32_t offsetTableSize;         // Size of the offset table
3204
0
    CMP_OFFSET_ENTRY *offsetTable = NULL;
3205
0
    TSK_OFF_T indx;                // index for looping over the offset table
3206
0
    TSK_OFF_T startUnit = 0;
3207
0
    uint32_t startUnitOffset = 0;
3208
0
    TSK_OFF_T endUnit = 0;
3209
0
    uint64_t bytesCopied;
3210
0
3211
0
    if (tsk_verbose)
3212
0
        tsk_fprintf(stderr,
3213
0
            "%s: called because this file is compressed, with data in the resource fork\n", __func__);
3214
0
3215
0
    // Reading zero bytes?  OK at any offset, I say!
3216
0
    if (a_len == 0)
3217
0
        return 0;
3218
0
3219
0
    if (a_offset < 0) {
3220
0
        error_detected(TSK_ERR_FS_ARG,
3221
0
            "%s: reading from file at a negative offset",
3222
0
             __func__);
3223
0
        return -1;
3224
0
    }
3225
0
3226
0
    if (a_len > SIZE_MAX / 2) {
3227
0
        error_detected(TSK_ERR_FS_ARG,
3228
0
            "%s: trying to read more than SIZE_MAX/2 is not supported.",
3229
0
            __func__);
3230
0
        return -1;
3231
0
    }
3232
0
3233
0
    if ((a_fs_attr == NULL) || (a_fs_attr->fs_file == NULL)
3234
0
        || (a_fs_attr->fs_file->meta == NULL)
3235
0
        || (a_fs_attr->fs_file->fs_info == NULL)) {
3236
0
        error_detected(TSK_ERR_FS_ARG,
3237
0
            "%s: NULL parameters passed", __func__);
3238
0
        return -1;
3239
0
    }
3240
0
3241
0
    // This should be a compressed file.  If not, that's an error!
3242
0
    if (!(a_fs_attr->flags & TSK_FS_ATTR_COMP)) {
3243
0
        error_detected(TSK_ERR_FS_ARG,
3244
0
            "%s: called with non-special attribute: %x",
3245
0
            __func__, a_fs_attr->flags);
3246
0
        return -1;
3247
0
    }
3248
0
3249
0
    // Check that the ATTR being read is the main DATA resource, 4352-0,
3250
0
    // because this is the only one that can be compressed in HFS+
3251
0
    if ((a_fs_attr->id != HFS_FS_ATTR_ID_DATA) ||
3252
0
        (a_fs_attr->type != TSK_FS_ATTR_TYPE_HFS_DATA)) {
3253
0
        error_detected(TSK_ERR_FS_ARG,
3254
0
            "%s: arg specified an attribute %u-%u that is not the data fork, "
3255
0
            "Only the data fork can be compressed.", __func__,
3256
0
            a_fs_attr->type, a_fs_attr->id);
3257
0
        return -1;
3258
0
    }
3259
0
3260
0
    /********  Open the Resource Fork ***********/
3261
0
    // The file
3262
0
    fs_file = a_fs_attr->fs_file;
3263
0
3264
0
    // find the attribute for the resource fork
3265
0
    rAttr =
3266
0
        tsk_fs_file_attr_get_type(fs_file, TSK_FS_ATTR_TYPE_HFS_RSRC,
3267
0
        HFS_FS_ATTR_ID_RSRC, TRUE);
3268
0
    if (rAttr == NULL) {
3269
0
        error_returned
3270
0
            (" %s: could not get the attribute for the resource fork of the file", __func__);
3271
0
        return -1;
3272
0
    }
3273
0
3274
0
    // read the offset table from the fork header
3275
0
    if (!read_block_table(rAttr, &offsetTable, &offsetTableSize, &offsetTableOffset)) {
3276
0
      return -1;
3277
0
    }
3278
0
3279
0
    // Compute the range of compression units needed for the request
3280
0
    startUnit = a_offset / COMPRESSION_UNIT_SIZE;
3281
0
    startUnitOffset = a_offset % COMPRESSION_UNIT_SIZE;
3282
0
    endUnit = (a_offset + a_len - 1) / COMPRESSION_UNIT_SIZE;
3283
0
3284
0
    if (startUnit >= offsetTableSize || endUnit >= offsetTableSize) {
3285
0
        error_detected(TSK_ERR_FS_ARG,
3286
0
            "%s: range of bytes requested %lld - %lld falls past the "
3287
0
            "end of the uncompressed stream %llu\n",
3288
0
            __func__, a_offset, a_offset + a_len,
3289
0
            offsetTable[offsetTableSize-1].offset +
3290
0
            offsetTable[offsetTableSize-1].length);
3291
0
        goto on_error;
3292
0
    }
3293
0
3294
0
    if (tsk_verbose)
3295
0
        tsk_fprintf(stderr,
3296
0
            "%s: reading compression units: %" PRIdOFF
3297
0
            " to %" PRIdOFF "\n", __func__, startUnit, endUnit);
3298
0
    bytesCopied = 0;
3299
0
3300
0
    // Allocate buffers for the raw and uncompressed data
3301
0
    /* Raw data can be COMPRESSION_UNIT_SIZE+1 if the zlib data is not
3302
0
     * compressed and there is a 1-byte flag that indicates that
3303
0
     * the data is not compressed. */
3304
0
    rawBuf = (char *) tsk_malloc(COMPRESSION_UNIT_SIZE + 1);
3305
0
    if (rawBuf == NULL) {
3306
0
        error_returned
3307
0
            (" %s: buffers for reading and uncompressing", __func__);
3308
0
        goto on_error;
3309
0
    }
3310
0
3311
0
    uncBuf = (char *) tsk_malloc(COMPRESSION_UNIT_SIZE);
3312
0
    if (uncBuf == NULL) {
3313
0
        error_returned
3314
0
            (" %s: buffers for reading and uncompressing", __func__);
3315
0
        goto on_error;
3316
0
    }
3317
0
3318
0
    // Read from the indicated comp units
3319
0
    for (indx = startUnit; indx <= endUnit; ++indx) {
3320
0
        uint64_t uncLen;
3321
0
        char *uncBufPtr = uncBuf;
3322
0
        size_t bytesToCopy;
3323
0
3324
0
        switch ((uncLen = read_and_decompress_block(
3325
0
                    rAttr, rawBuf, uncBuf,
3326
0
                    offsetTable, offsetTableSize, offsetTableOffset, (size_t)indx,
3327
0
                    decompress_block)))
3328
0
        {
3329
0
        case -1:
3330
0
            goto on_error;
3331
0
        case  0:
3332
0
            continue;
3333
0
        default:
3334
0
            break;
3335
0
        }
3336
0
3337
0
        // If this is the first comp unit, then we must skip over the
3338
0
        // startUnitOffset bytes.
3339
0
        if (indx == startUnit) {
3340
0
            uncLen -= startUnitOffset;
3341
0
            uncBufPtr += startUnitOffset;
3342
0
        }
3343
0
3344
0
        // How many bytes to copy from this compression unit?
3345
0
3346
0
        if (bytesCopied + uncLen < (uint64_t) a_len)    // cast OK because a_len > 0
3347
0
            bytesToCopy = (size_t) uncLen;      // uncLen <= size of compression unit, which is small, so cast is OK
3348
0
        else
3349
0
            bytesToCopy = (size_t) (((uint64_t) a_len) - bytesCopied);  // diff <= compression unit size, so cast is OK
3350
0
3351
0
        // Copy into the output buffer, and update bookkeeping.
3352
0
        memcpy(a_buf + bytesCopied, uncBufPtr, bytesToCopy);
3353
0
        bytesCopied += bytesToCopy;
3354
0
    }
3355
0
3356
0
    // Well, we don't know (without a lot of work) what the
3357
0
    // true uncompressed size of the stream is.  All we know is the "upper bound" which
3358
0
    // assumes that all of the compression units expand to their full size.  If we did
3359
0
    // know the true size, then we could reject requests that go beyond the end of the
3360
0
    // stream.  Instead, we treat the stream as if it is padded out to the full size of
3361
0
    // the last compression unit with zeros.
3362
0
3363
0
    // Have we read and copied all of the bytes requested?
3364
0
    if (bytesCopied < a_len) {
3365
0
        // set the remaining bytes to zero
3366
0
        memset(a_buf + bytesCopied, 0, a_len - (size_t) bytesCopied);   // cast OK because diff must be < compression unit size
3367
0
    }
3368
0
3369
0
    free(offsetTable);
3370
0
    free(rawBuf);
3371
0
    free(uncBuf);
3372
0
3373
0
    return (ssize_t) bytesCopied;       // cast OK, cannot be greater than a_len which cannot be greater than SIZE_MAX/2 (rounded down).
3374
0
3375
0
on_error:
3376
0
    free(offsetTable);
3377
0
    free(rawBuf);
3378
0
    free(uncBuf);
3379
0
    return -1;
3380
0
}
3381
3382
3383
#ifdef HAVE_LIBZ
3384
/**
3385
 * \internal
3386
 * Read a ZLIB compressed resource
3387
 *
3388
 * @param fs_attr the attribute to read
3389
 * @param a_offset the offset from which to read
3390
 * @param a_buf the buffer into which to read
3391
 * @param a_len the length of the buffer
3392
 * @return number of bytes read or -1 on error (incl if offset is past EOF)
3393
 */
3394
static ssize_t
3395
hfs_file_read_zlib_rsrc(const TSK_FS_ATTR * a_fs_attr,
3396
    TSK_OFF_T a_offset, char *a_buf, size_t a_len)
3397
0
{
3398
0
    return hfs_file_read_compressed_rsrc(
3399
0
        a_fs_attr, a_offset, a_buf, a_len,
3400
0
        hfs_read_zlib_block_table,
3401
0
        hfs_decompress_zlib_block
3402
0
    );
3403
0
}
3404
#endif
3405
3406
3407
/**
3408
 * \internal
3409
 * Read an LZVN compressed resource
3410
 *
3411
 * @param fs_attr the attribute to read
3412
 * @param a_offset the offset from which to read
3413
 * @param a_buf the buffer into which to read
3414
 * @param a_len the length of the buffer
3415
 * @return number of bytes read or -1 on error (incl if offset is past EOF)
3416
 */
3417
static ssize_t
3418
hfs_file_read_lzvn_rsrc(const TSK_FS_ATTR * a_fs_attr,
3419
    TSK_OFF_T a_offset, char *a_buf, size_t a_len)
3420
0
{
3421
0
    return hfs_file_read_compressed_rsrc(
3422
0
        a_fs_attr, a_offset, a_buf, a_len,
3423
0
        hfs_read_lzvn_block_table,
3424
0
        hfs_decompress_lzvn_block
3425
0
    );
3426
0
}
3427
3428
3429
/**
3430
 * \internal
3431
 * "Decompress" an uncompressed attr
3432
 *
3433
 * HFS+ compression schemes allow for some blocks to be stored uncompressed.
3434
 *
3435
 * @param rawBuf source buffer
3436
 * @param rawSize size of source buffer
3437
 * @param uncSize expected uncompressed size
3438
 * @param dstBuf destination buffer
3439
 * @param dstSize size of destination buffer
3440
 * @param dstBufFree true iff the caller must free the destination buffer
3441
 * @return 1
3442
 */
3443
0
static int hfs_decompress_noncompressed_attr(char* rawBuf, uint32_t rawSize, uint64_t uncSize, char** dstBuf, uint64_t* dstSize, int* dstBufFree) {
3444
0
    if (tsk_verbose)
3445
0
        tsk_fprintf(stderr,
3446
0
            "%s: Leading byte, 0x%02x, indicates that the data is not really compressed.\n"
3447
0
            "%s:  Loading the default DATA attribute.", __func__, rawBuf[0], __func__);
3448
0
3449
0
    *dstBuf = rawBuf + 1;  // + 1 indicator byte
3450
0
    *dstSize = uncSize;
3451
0
    *dstBufFree = FALSE;
3452
0
    return 1;
3453
0
}
3454
3455
3456
/**
3457
 * \internal
3458
 * Decompress a ZLIB compressed attr
3459
 *
3460
 * @param rawBuf source buffer
3461
 * @param rawSize size of source buffer
3462
 * @param uncSize expected uncompressed size
3463
 * @param dstBuf destination buffer
3464
 * @param dstSize size of destination buffer
3465
 * @param dstBufFree true iff the caller must free the destination buffer
3466
 * @return 1 on success, 0 on error
3467
 */
3468
static int hfs_decompress_zlib_attr(char* rawBuf, uint32_t rawSize, uint64_t uncSize, char** dstBuf, uint64_t* dstSize, int* dstBufFree)
3469
0
{
3470
0
    // ZLIB blocks cannot start with 0xF as the low nibble, so that's used
3471
0
    // as the flag for noncompressed blocks
3472
0
    if ((rawBuf[0] & 0x0F) == 0x0F) {
3473
0
        return hfs_decompress_noncompressed_attr(
3474
0
            rawBuf, rawSize, uncSize, dstBuf, dstSize, dstBufFree);
3475
0
    }
3476
0
    else {
3477
0
#ifdef HAVE_LIBZ
3478
0
        char* uncBuf = NULL;
3479
0
        uint64_t uLen;
3480
0
        unsigned long bytesConsumed;
3481
0
        int infResult;
3482
0
3483
0
        if (tsk_verbose)
3484
0
            tsk_fprintf(stderr,
3485
0
                        "%s: Uncompressing (inflating) data.", __func__);
3486
0
        // Uncompress the remainder of the attribute, and load as 128-0
3487
0
        // Note: cast is OK because uncSize will be quite modest, < 4000.
3488
0
3489
0
        uncBuf = (char *) tsk_malloc((size_t) uncSize + 100); // add some extra space
3490
0
        if (uncBuf == NULL) {
3491
0
            error_returned
3492
0
                (" - %s, space for the uncompressed attr", __func__);
3493
0
            return 0;
3494
0
        }
3495
0
3496
0
        infResult = zlib_inflate(rawBuf, (uint64_t) rawSize,
3497
0
                                 uncBuf, (uint64_t) (uncSize + 100),
3498
0
                                 &uLen, &bytesConsumed);
3499
0
        if (infResult != 0) {
3500
0
            error_returned
3501
0
                (" %s, zlib could not uncompress attr", __func__);
3502
0
            free(uncBuf);
3503
0
            return 0;
3504
0
        }
3505
0
3506
0
        if (bytesConsumed != rawSize) {
3507
0
            error_detected(TSK_ERR_FS_READ,
3508
0
                " %s, decompressor did not consume the whole compressed data",
3509
0
                __func__);
3510
0
            free(uncBuf);
3511
0
            return 0;
3512
0
        }
3513
0
3514
0
        *dstBuf = uncBuf;
3515
0
        *dstSize = uncSize;
3516
0
        *dstBufFree = TRUE;
3517
0
#else
3518
0
        // ZLIB compression library is not available, so we will load a
3519
0
        // zero-length default DATA attribute. Without this, icat may
3520
0
        // misbehave.
3521
0
3522
0
        if (tsk_verbose)
3523
0
            tsk_fprintf(stderr,
3524
0
                        "%s: ZLIB not available, so loading an empty default DATA attribute.\n", __func__);
3525
0
3526
0
        // Dummy is one byte long, so the ptr is not null, but we set the
3527
0
        // length to zero bytes, so it is never read.
3528
0
        static uint8_t dummy[1];
3529
0
3530
0
        *dstBuf = dummy;
3531
0
        *dstSize = 0;
3532
0
        *dstBufFree = FALSE;
3533
0
#endif
3534
0
    }
3535
0
3536
0
    return 1;
3537
0
}
3538
3539
3540
/**
3541
 * \internal
3542
 * Decompress an LZVN compressed attr
3543
 *
3544
 * @param rawBuf source buffer
3545
 * @param rawSize size of source buffer
3546
 * @param uncSize expected uncompressed size
3547
 * @param dstBuf destination buffer
3548
 * @param dstSize size of destination buffer
3549
 * @param dstBufFree true iff the caller must free the destination buffer
3550
 * @return 1 on success, 0 on error
3551
 */
3552
static int hfs_decompress_lzvn_attr(char* rawBuf, uint32_t rawSize, uint64_t uncSize, char** dstBuf, uint64_t* dstSize, int* dstBufFree)
3553
0
{
3554
0
    // LZVN blocks cannot start with 0x06, so that's used as the flag for
3555
0
    // noncompressed blocks
3556
0
    if (rawBuf[0] == 0x06) {
3557
0
        return hfs_decompress_noncompressed_attr(
3558
0
            rawBuf, rawSize, uncSize, dstBuf, dstSize, dstBufFree);
3559
0
    }
3560
0
3561
0
    char* uncBuf = (char *) tsk_malloc((size_t) uncSize);
3562
0
    *dstSize = lzvn_decode_buffer(uncBuf, uncSize, rawBuf, rawSize);
3563
0
    *dstBuf = uncBuf;
3564
0
    *dstBufFree = TRUE;
3565
0
3566
0
    return 1;
3567
0
}
3568
3569
3570
/**
3571
 * \internal
3572
 * Read a compressed attr
3573
 *
3574
 * @param fs_file the file
3575
 * @param cmpType compression type
3576
 * @param buffer destination buffer
3577
 * @param attributeLength length of the attribute
3578
 * @param uncSize uncompressed size
3579
 * @param decompress_attr pointer to the decompression function
3580
 * @return 1 on success, 0 on error
3581
 */
3582
static int
3583
hfs_file_read_compressed_attr(TSK_FS_FILE* fs_file,
3584
                              uint8_t cmpType,
3585
                              char* buffer,
3586
                              uint32_t attributeLength,
3587
                              uint64_t uncSize,
3588
                              int (*decompress_attr)(char* rawBuf,
3589
                                                     uint32_t rawSize,
3590
                                                     uint64_t uncSize,
3591
                                                     char** dstBuf,
3592
                                                     uint64_t* dstSize,
3593
                                                     int* dstBufFree))
3594
0
{
3595
0
    // Data is inline. We will load the uncompressed data as a
3596
0
    // resident attribute.
3597
0
    if (tsk_verbose)
3598
0
        tsk_fprintf(stderr,
3599
0
            "%s: Compressed data is inline in the attribute, will load this as the default DATA attribute.\n", __func__);
3600
0
3601
0
    if (attributeLength <= 16) {
3602
0
        if (tsk_verbose)
3603
0
            tsk_fprintf(stderr,
3604
0
                "%s: WARNING, Compression Record of type %u is not followed by"
3605
0
                " compressed data. No data will be loaded into the DATA"
3606
0
                " attribute.\n", __func__, cmpType);
3607
0
3608
0
        // oddly, this is not actually considered an error
3609
0
        return 1;
3610
0
    }
3611
0
3612
0
    TSK_FS_ATTR *fs_attr_unc;
3613
0
3614
0
    // There is data following the compression record, as there should be.
3615
0
    if ((fs_attr_unc = tsk_fs_attrlist_getnew(
3616
0
          fs_file->meta->attr, TSK_FS_ATTR_RES)) == NULL)
3617
0
    {
3618
0
        error_returned(" - %s, FS_ATTR for uncompressed data", __func__);
3619
0
        return 0;
3620
0
    }
3621
0
3622
0
    char* dstBuf;
3623
0
    uint64_t dstSize;
3624
0
    int dstBufFree = FALSE;
3625
0
3626
0
    if (!decompress_attr(buffer + 16, attributeLength - 16, uncSize,
3627
0
                         &dstBuf, &dstSize, &dstBufFree)) {
3628
0
        return 0;
3629
0
    }
3630
0
3631
0
    if (dstSize != uncSize) {
3632
0
        error_detected(TSK_ERR_FS_READ,
3633
0
            " %s, actual uncompressed size not equal to the size in the compression record", __func__);
3634
0
        goto on_error;
3635
0
    }
3636
0
3637
0
    if (tsk_verbose)
3638
0
       tsk_fprintf(stderr,
3639
0
                   "%s: Loading decompressed data as default DATA attribute.",
3640
0
                   __func__);
3641
0
3642
0
    // Load the remainder of the attribute as 128-0
3643
0
    // set the details in the fs_attr structure.
3644
0
    // Note, we are loading this as a RESIDENT attribute.
3645
0
    if (tsk_fs_attr_set_str(fs_file,
3646
0
                            fs_attr_unc, "DATA",
3647
0
                            TSK_FS_ATTR_TYPE_HFS_DATA,
3648
0
                            HFS_FS_ATTR_ID_DATA, dstBuf,
3649
0
                            dstSize))
3650
0
    {
3651
0
        error_returned(" - %s", __func__);
3652
0
        goto on_error;
3653
0
    }
3654
0
3655
0
    if (dstBufFree) {
3656
0
        free(dstBuf);
3657
0
    }
3658
0
    return 1;
3659
0
3660
0
on_error:
3661
0
    if (dstBufFree) {
3662
0
        free(dstBuf);
3663
0
    }
3664
0
    return 0;
3665
0
}
3666
3667
3668
/**
3669
 * \internal
3670
 * Read a ZLIB compressed attr
3671
 *
3672
 * @param fs_file the file
3673
 * @param buffer destination buffer
3674
 * @param attributeLength length of the attribute
3675
 * @param uncSize uncompressed size
3676
 * @return 1 on success, 0 on error
3677
 */
3678
static int hfs_file_read_zlib_attr(TSK_FS_FILE* fs_file,
3679
                            char* buffer,
3680
                            uint32_t attributeLength,
3681
                            uint64_t uncSize)
3682
0
{
3683
0
    return hfs_file_read_compressed_attr(
3684
0
        fs_file, DECMPFS_TYPE_ZLIB_ATTR,
3685
0
        buffer, attributeLength, uncSize,
3686
0
        hfs_decompress_zlib_attr
3687
0
    );
3688
0
}
3689
3690
3691
/**
3692
 * \internal
3693
 * Read an LZVN compressed attr
3694
 *
3695
 * @param fs_file the file
3696
 * @param buffer destination buffer
3697
 * @param attributeLength length of the attribute
3698
 * @param uncSize uncompressed size
3699
 * @return 1 on success, 0 on error
3700
 */
3701
static int hfs_file_read_lzvn_attr(TSK_FS_FILE* fs_file,
3702
                            char* buffer,
3703
                            uint32_t attributeLength,
3704
                            uint64_t uncSize)
3705
0
{
3706
0
    return hfs_file_read_compressed_attr(
3707
0
        fs_file, DECMPFS_TYPE_LZVN_ATTR,
3708
0
        buffer, attributeLength, uncSize,
3709
0
        hfs_decompress_lzvn_attr
3710
0
    );
3711
0
}
3712
3713
3714
typedef struct {
3715
    TSK_FS_INFO *fs;            // the HFS file system
3716
    TSK_FS_FILE *file;          // the Attributes file, if open
3717
    hfs_btree_header_record *header;    // the Attributes btree header record.
3718
    // For Convenience, unpacked values.
3719
    TSK_ENDIAN_ENUM endian;
3720
    uint32_t rootNode;
3721
    uint16_t nodeSize;
3722
    uint16_t maxKeyLen;
3723
} ATTR_FILE_T;
3724
3725
3726
/** \internal
3727
 * Open the Attributes file, and read the btree header record. Fill in the fields of the ATTR_FILE_T struct.
3728
 *
3729
 * @param fs -- the HFS file system
3730
 * @param header -- the header record struct
3731
 *
3732
 * @return 1 on error, 0 on success
3733
 */
3734
static uint8_t
3735
open_attr_file(TSK_FS_INFO * fs, ATTR_FILE_T * attr_file)
3736
0
{
3737
3738
0
    ssize_t cnt;                    // will hold bytes read
3739
3740
0
    hfs_btree_header_record *hrec;
3741
3742
    // clean up any error messages that are lying around
3743
0
    tsk_error_reset();
3744
3745
0
    if (fs == NULL) {
3746
0
        tsk_error_set_errno(TSK_ERR_FS_ARG);
3747
0
        tsk_error_set_errstr("open_attr_file: fs is NULL");
3748
0
        return 1;
3749
0
    }
3750
3751
0
    if (attr_file == NULL) {
3752
0
        tsk_error_set_errno(TSK_ERR_FS_ARG);
3753
0
        tsk_error_set_errstr("open_attr_file: attr_file is NULL");
3754
0
        return 1;
3755
0
    }
3756
3757
    // Open the Attributes File
3758
0
    attr_file->file =
3759
0
        tsk_fs_file_open_meta(fs, NULL, HFS_ATTRIBUTES_FILE_ID);
3760
3761
0
    if (attr_file->file == NULL) {
3762
0
        tsk_error_set_errno(TSK_ERR_FS_READ);
3763
0
        tsk_error_set_errstr
3764
0
            ("open_attr_file: could not open the Attributes file");
3765
0
        return 1;
3766
0
    }
3767
3768
    // Allocate some space for the Attributes btree header record (which
3769
    //       is passed back to the caller)
3770
0
    hrec = (hfs_btree_header_record *)
3771
0
        malloc(sizeof(hfs_btree_header_record));
3772
3773
0
    if (hrec == NULL) {
3774
0
        tsk_error_set_errno(TSK_ERR_FS);
3775
0
        tsk_error_set_errstr
3776
0
            ("open_attr_file: could not malloc space for Attributes header record");
3777
0
        return 1;
3778
0
    }
3779
3780
    // Read the btree header record
3781
0
    cnt = tsk_fs_file_read(attr_file->file,
3782
0
        14,
3783
0
        (char *) hrec,
3784
0
        sizeof(hfs_btree_header_record), (TSK_FS_FILE_READ_FLAG_ENUM) 0);
3785
0
    if (cnt != (ssize_t)sizeof(hfs_btree_header_record)) {
3786
0
        tsk_error_set_errno(TSK_ERR_FS_READ);
3787
0
        tsk_error_set_errstr
3788
0
            ("open_attr_file: could not open the Attributes file");
3789
0
        tsk_fs_file_close(attr_file->file);
3790
0
        free(hrec);
3791
0
        return 1;
3792
0
    }
3793
3794
    // Fill in the fields of the attr_file struct (which was passed in by the caller)
3795
0
    attr_file->fs = fs;
3796
0
    attr_file->header = hrec;
3797
0
    attr_file->endian = fs->endian;
3798
0
    attr_file->nodeSize = tsk_getu16(attr_file->endian, hrec->nodesize);
3799
0
    attr_file->rootNode = tsk_getu32(attr_file->endian, hrec->rootNode);
3800
0
    attr_file->maxKeyLen = tsk_getu16(attr_file->endian, hrec->maxKeyLen);
3801
3802
0
    return 0;
3803
0
}
3804
3805
3806
/** \internal
3807
 * Closes and frees the data structures associated with ATTR_FILE_T
3808
 */
3809
static uint8_t
3810
close_attr_file(ATTR_FILE_T * attr_file)
3811
0
{
3812
0
    if (attr_file == NULL) {
3813
0
        tsk_error_set_errno(TSK_ERR_FS_READ);
3814
0
        tsk_error_set_errstr("close_attr_file: NULL attr_file arg");
3815
0
        return 1;
3816
0
    }
3817
3818
0
    if (attr_file->file != NULL) {
3819
0
        tsk_fs_file_close(attr_file->file);
3820
0
        attr_file->file = NULL;
3821
0
    }
3822
3823
0
    free(attr_file->header);
3824
0
    attr_file->header = NULL;
3825
3826
0
    attr_file->rootNode = 0;
3827
0
    attr_file->nodeSize = 0;
3828
    // Note that we leave the fs component alone.
3829
0
    return 0;
3830
0
}
3831
3832
3833
static const char *
3834
hfs_attrTypeName(uint32_t typeNum)
3835
0
{
3836
0
    switch (typeNum) {
3837
0
    case TSK_FS_ATTR_TYPE_HFS_DEFAULT:
3838
0
        return "DFLT";
3839
0
    case TSK_FS_ATTR_TYPE_HFS_DATA:
3840
0
        return "DATA";
3841
0
    case TSK_FS_ATTR_TYPE_HFS_EXT_ATTR:
3842
0
        return "ExATTR";
3843
0
    case TSK_FS_ATTR_TYPE_HFS_COMP_REC:
3844
0
        return "CMPF";
3845
0
    case TSK_FS_ATTR_TYPE_HFS_RSRC:
3846
0
        return "RSRC";
3847
0
    default:
3848
0
        return "UNKN";
3849
0
    }
3850
0
}
3851
3852
3853
// TODO: Function description missing here no idea what it is supposed to return
3854
// in which circumstances.
3855
static uint8_t
3856
hfs_load_extended_attrs(TSK_FS_FILE * fs_file,
3857
    unsigned char *isCompressed, unsigned char *cmpType,
3858
    uint64_t *uncompressedSize)
3859
0
{
3860
0
    TSK_FS_INFO *fs = fs_file->fs_info;
3861
0
    uint64_t fileID;
3862
0
    ATTR_FILE_T attrFile;
3863
0
    uint8_t *nodeData;
3864
0
    TSK_ENDIAN_ENUM endian;
3865
0
    hfs_btree_node *nodeDescriptor;     // The node descriptor
3866
0
    uint32_t nodeID;            // The number or ID of the Attributes file node to read.
3867
0
    hfs_btree_key_attr *keyB;   // ptr to the key of the Attr file record.
3868
0
    unsigned char done;         // Flag to indicate that we are done looping over leaf nodes
3869
0
    uint16_t attribute_counter = 2;     // The ID of the next attribute to be loaded.
3870
0
    HFS_INFO *hfs;
3871
0
    char *buffer = NULL;   // buffer to hold the attribute
3872
0
    TSK_LIST *nodeIDs_processed = NULL; // Keep track of node IDs to prevent an infinite loop
3873
0
    ssize_t cnt;                    // count of chars read from file.
3874
3875
0
    tsk_error_reset();
3876
3877
    // The CNID (or inode number) of the file
3878
    //  Note that in TSK such numbers are 64 bits, but in HFS+ they are only 32 bits.
3879
0
    fileID = fs_file->meta->addr;
3880
3881
0
    if (fs == NULL) {
3882
0
        error_detected(TSK_ERR_FS_ARG,
3883
0
            "hfs_load_extended_attrs: NULL fs arg");
3884
0
        return 1;
3885
0
    }
3886
3887
0
    hfs = (HFS_INFO *) fs;
3888
3889
0
    if (!hfs->has_attributes_file) {
3890
        // No attributes file, and so, no extended attributes
3891
0
        return 0;
3892
0
    }
3893
3894
0
    if (tsk_verbose) {
3895
0
        tsk_fprintf(stderr,
3896
0
            "hfs_load_extended_attrs:  Processing file %" PRIuINUM "\n",
3897
0
            fileID);
3898
0
    }
3899
3900
    // Open the Attributes File
3901
0
    if (open_attr_file(fs, &attrFile)) {
3902
0
        error_returned
3903
0
            ("hfs_load_extended_attrs: could not open Attributes file");
3904
0
        return 1;
3905
0
    }
3906
3907
    // Is the Attributes file empty?
3908
0
    if (attrFile.rootNode == 0) {
3909
0
        if (tsk_verbose)
3910
0
            tsk_fprintf(stderr,
3911
0
                "hfs_load_extended_attrs: Attributes file is empty\n");
3912
0
        close_attr_file(&attrFile);
3913
0
        *isCompressed = FALSE;
3914
0
        *cmpType = 0;
3915
0
        return 0;
3916
0
    }
3917
3918
0
    if (attrFile.nodeSize < sizeof(hfs_btree_node)) {
3919
0
        error_returned
3920
0
            ("hfs_load_extended_attrs: node size too small");
3921
0
        close_attr_file(&attrFile);
3922
0
        return 1;
3923
0
    }
3924
3925
    // A place to hold one node worth of data
3926
0
    nodeData = (uint8_t *) malloc(attrFile.nodeSize);
3927
0
    if (nodeData == NULL) {
3928
0
        error_detected(TSK_ERR_AUX_MALLOC,
3929
0
            "hfs_load_extended_attrs: Could not malloc space for an Attributes file node");
3930
0
        goto on_error;
3931
0
    }
3932
3933
    // Initialize these
3934
0
    *isCompressed = FALSE;
3935
0
    *cmpType = 0;
3936
3937
0
    endian = attrFile.fs->endian;
3938
3939
    // Start with the root node
3940
0
    nodeID = attrFile.rootNode;
3941
3942
    // While loop, over nodes in path from root node to the correct LEAF node.
3943
0
    while (1) {
3944
0
        uint16_t numRec;        // Number of records in the node
3945
0
        int recIndx;            // index for looping over records
3946
3947
0
        if (tsk_verbose) {
3948
0
            tsk_fprintf(stderr,
3949
0
                "hfs_load_extended_attrs: Reading Attributes File node with ID %"
3950
0
                PRIu32 "\n", nodeID);
3951
0
        }
3952
3953
        /* Make sure we do not get into an infinite loop */
3954
0
        if (tsk_list_find(nodeIDs_processed, nodeID)) {
3955
0
            error_detected(TSK_ERR_FS_READ,
3956
0
                "hfs_load_extended_attrs: Infinite loop detected - trying to read node %" PRIu32 " which has already been processed", nodeID);
3957
0
            goto on_error;
3958
0
        }
3959
3960
3961
        /* Read the node */
3962
0
        cnt = tsk_fs_file_read(attrFile.file,
3963
0
            (TSK_OFF_T)nodeID * attrFile.nodeSize,
3964
0
            (char *) nodeData,
3965
0
            attrFile.nodeSize, (TSK_FS_FILE_READ_FLAG_ENUM) 0);
3966
0
        if (cnt != (ssize_t)attrFile.nodeSize) {
3967
0
            error_returned
3968
0
                ("hfs_load_extended_attrs: Could not read in a node from the Attributes File");
3969
0
            goto on_error;
3970
0
        }
3971
3972
        /* Save this node ID to the list of processed nodes */
3973
0
        if (tsk_list_add(&nodeIDs_processed, nodeID)) {
3974
0
            error_detected(TSK_ERR_FS_READ,
3975
0
                "hfs_load_extended_attrs: Could not save nodeID to the list of processed nodes");
3976
0
            goto on_error;
3977
0
        }
3978
3979
        /** Node has a:
3980
         * Descriptor
3981
         * Set of records
3982
         * Table at the end with pointers to the records
3983
         */
3984
        // Parse the Node header
3985
0
        nodeDescriptor = (hfs_btree_node *) nodeData;
3986
3987
        // If we are at a leaf node, then we have found the right node
3988
0
        if (nodeDescriptor->type == HFS_ATTR_NODE_LEAF) {
3989
0
            break;
3990
0
        }
3991
3992
        // This had better be an INDEX node, if not its an error
3993
0
        else if (nodeDescriptor->type != HFS_ATTR_NODE_INDEX) {
3994
0
            error_detected(TSK_ERR_FS_READ,
3995
0
                "hfs_load_extended_attrs: Reached a non-INDEX and non-LEAF node in searching the Attributes File");
3996
0
            goto on_error;
3997
0
        }
3998
3999
        // OK, we are in an INDEX node.  loop over the records to find the last one whose key is
4000
        // smaller than or equal to the desired key
4001
4002
0
        numRec = tsk_getu16(endian, nodeDescriptor->num_rec);
4003
0
        if (numRec == 0) {
4004
            // This is wrong, there must always be at least 1 record in an INDEX node.
4005
0
            error_detected(TSK_ERR_FS_READ,
4006
0
                "hfs_load_extended_attrs:Attributes File index node %"
4007
0
                PRIu32 " has zero records", nodeID);
4008
0
            goto on_error;
4009
0
        }
4010
4011
0
        for (recIndx = 0; recIndx < numRec; ++recIndx) {
4012
0
            uint16_t keyLength;
4013
0
            int comp;           // comparison result
4014
0
            char *compStr;      // comparison result, as a string
4015
0
            uint8_t *recData;   // pointer to the data part of the record
4016
0
            uint32_t keyFileID;
4017
4018
0
            if ((attrFile.nodeSize < 2) || (recIndx > ((attrFile.nodeSize - 2) / 2))) {
4019
0
                error_detected(TSK_ERR_FS_READ,
4020
0
                    "hfs_load_extended_attrs: Unable to process attribute (recIndx exceeds attrFile.nodeSize)");
4021
0
                goto on_error;
4022
0
            }
4023
4024
            // The offset to the record is stored in table at end of node
4025
0
            uint8_t *recOffsetTblEntry = &nodeData[attrFile.nodeSize - (2 * (recIndx + 1))];  // data describing where this record is
4026
0
            uint16_t recOffset = tsk_getu16(endian, recOffsetTblEntry);
4027
            //uint8_t * nextRecOffsetData = &nodeData[attrFile.nodeSize - 2* (recIndx+2)];
4028
4029
            // make sure the record and first fields are in the buffer
4030
0
            if ((attrFile.nodeSize < 14) || (recOffset >= attrFile.nodeSize - 14)) {
4031
0
                error_detected(TSK_ERR_FS_READ,
4032
0
                    "hfs_load_extended_attrs: Unable to process attribute (offset too big)");
4033
0
                goto on_error;
4034
0
            }
4035
4036
            // Pointer to first byte of record
4037
0
            uint8_t *recordBytes = &nodeData[recOffset];
4038
4039
4040
            // Cast that to the Attributes file key (n.b., the key is the first thing in the record)
4041
0
            keyB = (hfs_btree_key_attr *) recordBytes;
4042
4043
            // Is this key less than what we are seeking?
4044
            //int comp = comp_attr_key(endian, keyB, fileID, attrName, startBlock);
4045
4046
0
            keyFileID = tsk_getu32(endian, keyB->file_id);
4047
0
            if (keyFileID < fileID) {
4048
0
                comp = -1;
4049
0
                compStr = "less than";
4050
0
            }
4051
0
            else if (keyFileID > fileID) {
4052
0
                comp = 1;
4053
0
                compStr = "greater than";
4054
0
            }
4055
0
            else {
4056
0
                comp = 0;
4057
0
                compStr = "equal to";
4058
0
            }
4059
0
            if (tsk_verbose)
4060
0
                tsk_fprintf(stderr,
4061
0
                    "hfs_load_extended_attrs: INDEX record %d, fileID %"
4062
0
                    PRIu32 " is %s the file ID we are seeking, %" PRIu32
4063
0
                    ".\n", recIndx, keyFileID, compStr, fileID);
4064
0
            if (comp > 0) {
4065
                // The key of this record is greater than what we are seeking
4066
0
                if (recIndx == 0) {
4067
                    // This is the first record, so no records are appropriate
4068
                    // Nothing in this btree will match.  We can stop right here.
4069
0
                    goto on_exit;
4070
0
                }
4071
4072
                // This is not the first record, so, the previous record's child is the one we want.
4073
0
                break;
4074
0
            }
4075
4076
            // CASE:  key in this record matches the key we are seeking.  The previous record's child
4077
            // is the one we want.  However, if this is the first record, then we want THIS record's child.
4078
0
            if (comp == 0 && recIndx != 0) {
4079
0
                break;
4080
0
            }
4081
4082
            // Extract the child node ID from the record data (stored after the key)
4083
0
            keyLength = tsk_getu16(endian, keyB->key_len);
4084
            // make sure the fields we care about are still in the buffer
4085
            // +2 is because key_len doesn't include its own length
4086
            // +4 is because of the amount of data we read from the data
4087
0
            if ((keyLength > attrFile.nodeSize - 2 - 4) || (recOffset >= attrFile.nodeSize - 2 - 4 - keyLength)) {
4088
0
                error_detected(TSK_ERR_FS_READ,
4089
0
                    "hfs_load_extended_attrs: Unable to process attribute");
4090
0
                goto on_error;
4091
0
            }
4092
4093
0
            recData = &recordBytes[keyLength + 2];
4094
4095
            // Data must start on an even offset from the beginning of the record.
4096
            // So, correct this if needed.
4097
0
            if ((recData - recordBytes) % 2) {
4098
0
                recData += 1;
4099
0
            }
4100
4101
            // The next four bytes should be the Node ID of the child of this node.
4102
0
            nodeID = tsk_getu32(endian, recData);
4103
4104
            // At this point, either comp<0 or comp=0 && recIndx=0.  In the latter case we want to
4105
            // descend to the child of this node, so we break.
4106
0
            if (recIndx == 0 && comp == 0) {
4107
0
                break;
4108
0
            }
4109
4110
            // CASE: key in this record is less than key we seek.  comp < 0
4111
            // So, continue looping over records in this node.
4112
0
        }                       // END loop over records
4113
4114
0
    }                           // END while loop over Nodes in path from root to LEAF node
4115
4116
    // At this point nodeData holds the contents of a LEAF node with the right range of keys
4117
    // and nodeDescriptor points to the descriptor of that node.
4118
4119
    // Loop over successive LEAF nodes, starting with this one
4120
0
    done = FALSE;
4121
0
    while (!done) {
4122
0
        uint16_t numRec;        // number of records
4123
0
        unsigned int recIndx;            // index for looping over records
4124
4125
0
        if (tsk_verbose)
4126
0
            tsk_fprintf(stderr,
4127
0
                "hfs_load_extended_attrs: Attributes File LEAF Node %"
4128
0
                PRIu32 ".\n", nodeID);
4129
0
        numRec = tsk_getu16(endian, nodeDescriptor->num_rec);
4130
        // Note, leaf node could have one (or maybe zero) records
4131
4132
        // Loop over the records in this node
4133
0
        for (recIndx = 0; recIndx < numRec; ++recIndx) {
4134
4135
0
            if ((attrFile.nodeSize < 2) || (recIndx > ((attrFile.nodeSize - 2) / 2))) {
4136
0
                error_detected(TSK_ERR_FS_READ,
4137
0
                    "hfs_load_extended_attrs: Unable to process attribute (recIndx exceeds attrFile.nodeSize)");
4138
0
                goto on_error;
4139
0
            }
4140
            // The offset to the record is stored in table at end of node
4141
0
            uint8_t *recOffsetTblEntry = &nodeData[attrFile.nodeSize - (2 * (recIndx + 1))];  // data describing where this record is
4142
0
            uint16_t recOffset = tsk_getu16(endian, recOffsetTblEntry);
4143
4144
0
            int comp;           // comparison result
4145
0
            char *compStr;      // comparison result as a string
4146
0
            uint32_t keyFileID;
4147
4148
            // make sure the record and first fields are in the buffer
4149
0
            if (recOffset >= attrFile.nodeSize - 14) {
4150
0
                error_detected(TSK_ERR_FS_READ,
4151
0
                    "hfs_load_extended_attrs: Unable to process attribute (offset too big)");
4152
0
                goto on_error;
4153
0
            }
4154
4155
            // Pointer to first byte of record
4156
0
            uint8_t *recordBytes = &nodeData[recOffset];
4157
4158
            // Cast that to the Attributes file key
4159
0
            keyB = (hfs_btree_key_attr *) recordBytes;
4160
4161
            // Compare recordBytes key to the key that we are seeking
4162
0
            keyFileID = tsk_getu32(endian, keyB->file_id);
4163
4164
            //fprintf(stdout, " Key file ID = %lu\n", keyFileID);
4165
0
            if (keyFileID < fileID) {
4166
0
                comp = -1;
4167
0
                compStr = "less than";
4168
0
            }
4169
0
            else if (keyFileID > fileID) {
4170
0
                comp = 1;
4171
0
                compStr = "greater than";
4172
0
            }
4173
0
            else {
4174
0
                comp = 0;
4175
0
                compStr = "equal to";
4176
0
            }
4177
4178
0
            if (tsk_verbose)
4179
0
                tsk_fprintf(stderr,
4180
0
                    "hfs_load_extended_attrs: LEAF Record key file ID %"
4181
0
                    PRIu32 " is %s the desired file ID %" PRIu32 "\n",
4182
0
                    keyFileID, compStr, fileID);
4183
            // Are they the same?
4184
0
            if (comp == 0) {
4185
                // Yes, so load this attribute
4186
4187
0
                uint8_t *recData;       // pointer to the data part of the recordBytes
4188
0
                hfs_attr_data *attrData;
4189
0
                uint32_t attributeLength;
4190
0
                uint32_t nameLength;
4191
0
                uint32_t recordType;
4192
0
                uint16_t keyLength;
4193
0
                int conversionResult;
4194
0
                char nameBuff[HFS_MAX_ATTR_NAME_LEN_UTF8_B+1];
4195
0
                TSK_FS_ATTR_TYPE_ENUM attrType;
4196
0
                TSK_FS_ATTR *fs_attr;   // Points to the attribute to be loaded.
4197
4198
0
                keyLength = tsk_getu16(endian, keyB->key_len);
4199
                // make sure the fields we care about are still in the buffer
4200
                // +2 because key_len doesn't include its own length
4201
                // +16 for the amount of data we'll read from data
4202
0
                if ((attrFile.nodeSize < 2 + 16) || (keyLength > attrFile.nodeSize - 2 - 16) || (recOffset >= attrFile.nodeSize - 2 - 16 - keyLength)) {
4203
0
                    error_detected(TSK_ERR_FS_READ,
4204
0
                        "hfs_load_extended_attrs: Unable to process attribute");
4205
0
                    goto on_error;
4206
0
                }
4207
4208
0
                recData = &recordBytes[keyLength + 2];
4209
4210
                // Data must start on an even offset from the beginning of the record.
4211
                // So, correct this if needed.
4212
0
                if ((recData - recordBytes) % 2) {
4213
0
                    recData += 1;
4214
0
                }
4215
4216
0
                attrData = (hfs_attr_data *) recData;
4217
4218
                // Check we can process the record type before allocating memory
4219
0
                recordType = tsk_getu32(endian, attrData->record_type);
4220
0
                if (recordType != HFS_ATTR_RECORD_INLINE_DATA) {
4221
0
                  error_detected(TSK_ERR_FS_UNSUPTYPE,
4222
0
                      "hfs_load_extended_attrs: Unsupported record type: (%d)",
4223
0
                      recordType);
4224
0
                  goto on_error;
4225
0
                }
4226
4227
                // This is the length of the useful data, not including the record header
4228
0
                attributeLength = tsk_getu32(endian, attrData->attr_size);
4229
4230
                // Check the attribute fits in the node
4231
                //if (recordType != HFS_ATTR_RECORD_INLINE_DATA) {
4232
0
                if ((attributeLength > attrFile.nodeSize - 2 - 16 - keyLength) || (recOffset >= attrFile.nodeSize - 2 - 16 - keyLength - attributeLength)) {
4233
0
                    error_detected(TSK_ERR_FS_READ,
4234
0
                        "hfs_load_extended_attrs: Unable to process attribute");
4235
0
                    goto on_error;
4236
0
                }
4237
4238
                // attr_name_len is in UTF_16 chars
4239
0
                nameLength = tsk_getu16(endian, keyB->attr_name_len);
4240
0
                if (2 * nameLength > HFS_MAX_ATTR_NAME_LEN_UTF16_B) {
4241
0
                    error_detected(TSK_ERR_FS_CORRUPT,
4242
0
                        "hfs_load_extended_attrs: Name length in bytes (%d) > max name length in bytes (%d).",
4243
0
                        2*nameLength, HFS_MAX_ATTR_NAME_LEN_UTF16_B);
4244
0
                    goto on_error;
4245
0
                }
4246
4247
0
                if ((int32_t)(2*nameLength) > keyLength - 12) {
4248
0
                    error_detected(TSK_ERR_FS_CORRUPT,
4249
0
                        "hfs_load_extended_attrs: Name length in bytes (%d) > remaining struct length (%d).",
4250
0
                        2*nameLength, keyLength - 12);
4251
0
                    goto on_error;
4252
0
                }
4253
4254
0
                buffer = tsk_malloc(attributeLength);
4255
0
                if (buffer == NULL) {
4256
0
                    error_detected(TSK_ERR_AUX_MALLOC,
4257
0
                        "hfs_load_extended_attrs: Could not malloc space for the attribute.");
4258
0
                    goto on_error;
4259
0
                }
4260
4261
0
                memcpy(buffer, attrData->attr_data, attributeLength);
4262
4263
                // Use the "attr_name" part of the key as the attribute name
4264
                // but must convert to UTF8.  Unfortunately, there does not seem to
4265
                // be any easy way to determine how long the converted string will
4266
                // be because UTF8 is a variable length encoding. However, the longest
4267
                // it will be is 3 * the max number of UTF16 code units.  Add one for null
4268
                // termination.   (thanks Judson!)
4269
4270
4271
0
                conversionResult = hfs_UTF16toUTF8(fs, keyB->attr_name,
4272
0
                    nameLength, nameBuff, HFS_MAX_ATTR_NAME_LEN_UTF8_B+1, 0);
4273
0
                if (conversionResult != 0) {
4274
0
                    error_returned
4275
0
                        ("-- hfs_load_extended_attrs could not convert the attr_name in the btree key into a UTF8 attribute name");
4276
0
                    goto on_error;
4277
0
                }
4278
4279
                // What is the type of this attribute?  If it is a compression record, then
4280
                // use TSK_FS_ATTR_TYPE_HFS_COMP_REC.  Else, use TSK_FS_ATTR_TYPE_HFS_EXT_ATTR
4281
                // Only "inline data" kind of record is handled.
4282
0
                if (strcmp(nameBuff, "com.apple.decmpfs") == 0 &&
4283
0
                    tsk_getu32(endian, attrData->record_type) == HFS_ATTR_RECORD_INLINE_DATA) {
4284
                    // Now, look at the compression record
4285
0
                    DECMPFS_DISK_HEADER *cmph = (DECMPFS_DISK_HEADER *) buffer;
4286
0
                    *cmpType =
4287
0
                        tsk_getu32(TSK_LIT_ENDIAN, cmph->compression_type);
4288
0
                    uint64_t uncSize = tsk_getu64(TSK_LIT_ENDIAN,
4289
0
                        cmph->uncompressed_size);
4290
4291
0
                    if (tsk_verbose)
4292
0
                        tsk_fprintf(stderr,
4293
0
                            "hfs_load_extended_attrs: This attribute is a compression record.\n");
4294
4295
0
                    attrType = TSK_FS_ATTR_TYPE_HFS_COMP_REC;
4296
0
                    *isCompressed = TRUE;       // The data is governed by a compression record (but might not be compressed)
4297
0
                    *uncompressedSize = uncSize;
4298
4299
0
                    switch (*cmpType) {
4300
                    // Data is inline. We will load the uncompressed
4301
                    // data as a resident attribute.
4302
0
                    case DECMPFS_TYPE_ZLIB_ATTR:
4303
0
                        if (!decmpfs_file_read_zlib_attr(
4304
0
                                fs_file, buffer, attributeLength, uncSize))
4305
0
                        {
4306
0
                            goto on_error;
4307
0
                        }
4308
0
                        break;
4309
4310
0
                    case DECMPFS_TYPE_LZVN_ATTR:
4311
0
                        if (!decmpfs_file_read_lzvn_attr(
4312
0
                                fs_file, buffer, attributeLength, uncSize))
4313
0
                        {
4314
0
                            goto on_error;
4315
0
                        }
4316
0
                        break;
4317
4318
                    // Data is compressed in the resource fork
4319
0
                    case DECMPFS_TYPE_ZLIB_RSRC:
4320
0
                    case DECMPFS_TYPE_LZVN_RSRC:
4321
0
                        if (tsk_verbose)
4322
0
                            tsk_fprintf(stderr,
4323
0
                                "%s: Compressed data is in the file Resource Fork.\n", __func__);
4324
0
                        break;
4325
0
                    }
4326
0
                }
4327
0
                else {          // Attrbute name is NOT com.apple.decmpfs
4328
0
                    attrType = TSK_FS_ATTR_TYPE_HFS_EXT_ATTR;
4329
0
                }               // END if attribute name is com.apple.decmpfs  ELSE clause
4330
4331
0
                if ((fs_attr =
4332
0
                        tsk_fs_attrlist_getnew(fs_file->meta->attr,
4333
0
                            TSK_FS_ATTR_RES)) == NULL) {
4334
0
                    error_returned(" - hfs_load_extended_attrs");
4335
0
                    goto on_error;
4336
0
                }
4337
4338
0
                if (tsk_verbose) {
4339
0
                    tsk_fprintf(stderr,
4340
0
                        "hfs_load_extended_attrs: loading attribute %s, type %u (%s)\n",
4341
0
                        nameBuff, (uint32_t) attrType,
4342
0
                        hfs_attrTypeName((uint32_t) attrType));
4343
0
                }
4344
4345
                // set the details in the fs_attr structure
4346
0
                if (tsk_fs_attr_set_str(fs_file, fs_attr, nameBuff,
4347
0
                        attrType, attribute_counter, buffer,
4348
0
                        attributeLength)) {
4349
0
                    error_returned(" - hfs_load_extended_attrs");
4350
0
                    goto on_error;
4351
0
                }
4352
4353
0
                free(buffer);
4354
0
                buffer = NULL;
4355
4356
0
                ++attribute_counter;
4357
0
            }                   // END if comp == 0
4358
0
            if (comp == 1) {
4359
                // since this record key is greater than our search key, all
4360
                // subsequent records will also be greater.
4361
0
                done = TRUE;
4362
0
                break;
4363
0
            }
4364
0
        }                       // END loop over records in one LEAF node
4365
4366
        /*
4367
         * We get to this point if either:
4368
         *
4369
         * 1. We finish the loop over records and we are still loading attributes
4370
         *    for the given file.  In this case we are NOT done, and must read in
4371
         *    the next leaf node, and process its records.  The following code
4372
         *    loads the next leaf node before we return to the top of the loop.
4373
         *
4374
         * 2. We "broke" out of the loop over records because we found a key that
4375
         *    whose file ID is greater than the one we are working on.  In that case
4376
         *    we are done.  The following code does not run, and we exit the
4377
         *    while loop over successive leaf nodes.
4378
         */
4379
4380
0
        if (!done) {
4381
            // We did not finish loading the attributes when we got to the end of that node,
4382
            // so we must get the next node, and continue.
4383
4384
            // First determine the nodeID of the next LEAF node
4385
0
            uint32_t newNodeID = tsk_getu32(endian, nodeDescriptor->flink);
4386
4387
            //fprintf(stdout, "Next Node ID = %u\n",  newNodeID);
4388
0
            if (tsk_verbose)
4389
0
                tsk_fprintf(stderr,
4390
0
                    "hfs_load_extended_attrs: Processed last record of THIS node, still gathering attributes.\n");
4391
4392
            // If we are at the very last leaf node in the btree, then
4393
            // this "flink" will be zero.  We break out of this loop over LEAF nodes.
4394
0
            if (newNodeID == 0) {
4395
0
                if (tsk_verbose)
4396
0
                    tsk_fprintf(stderr,
4397
0
                        "hfs_load_extended_attrs: But, there are no more leaf nodes, so we are done.\n");
4398
0
                break;
4399
0
            }
4400
4401
0
            if (tsk_verbose)
4402
0
                tsk_fprintf(stderr,
4403
0
                    "hfs_load_extended_attrs: Reading the next LEAF node %"
4404
0
                    PRIu32 ".\n", nodeID);
4405
4406
0
            nodeID = newNodeID;
4407
4408
0
            cnt = tsk_fs_file_read(attrFile.file,
4409
0
                nodeID * attrFile.nodeSize,
4410
0
                (char *) nodeData,
4411
0
                attrFile.nodeSize, (TSK_FS_FILE_READ_FLAG_ENUM) 0);
4412
0
            if (cnt != (ssize_t)attrFile.nodeSize) {
4413
0
                error_returned
4414
0
                    ("hfs_load_extended_attrs: Could not read in the next LEAF node from the Attributes File btree");
4415
0
                goto on_error;
4416
0
            }
4417
4418
            // Parse the Node header
4419
0
            nodeDescriptor = (hfs_btree_node *) nodeData;
4420
4421
            // If we are NOT leaf node, then this is an error
4422
0
            if (nodeDescriptor->type != HFS_ATTR_NODE_LEAF) {
4423
0
                error_detected(TSK_ERR_FS_CORRUPT,
4424
0
                    "hfs_load_extended_attrs: found a non-LEAF node as a successor to a LEAF node");
4425
0
                goto on_error;
4426
0
            }
4427
0
        }                       // END if(! done)
4428
4429
4430
4431
0
    }                           // END while(! done)  loop over successive LEAF nodes
4432
4433
0
on_exit:
4434
0
    free(nodeData);
4435
0
    tsk_list_free(nodeIDs_processed);
4436
0
    close_attr_file(&attrFile);
4437
0
    return 0;
4438
4439
0
on_error:
4440
0
    free(buffer);
4441
0
    free(nodeData);
4442
0
    tsk_list_free(nodeIDs_processed);
4443
0
    close_attr_file(&attrFile);
4444
0
    return 1;
4445
0
}
4446
4447
typedef struct RES_DESCRIPTOR {
4448
    char type[5];               // type is really 4 chars, but we will null-terminate
4449
    uint16_t id;
4450
    uint32_t offset;
4451
    uint32_t length;
4452
    char *name;                 // NULL if a name is not defined for this resource
4453
    struct RES_DESCRIPTOR *next;
4454
} RES_DESCRIPTOR;
4455
4456
void
4457
free_res_descriptor(RES_DESCRIPTOR * rd)
4458
0
{
4459
0
    RES_DESCRIPTOR *nxt;
4460
4461
0
    if (rd == NULL)
4462
0
        return;
4463
0
    nxt = rd->next;
4464
0
    free(rd->name);
4465
0
    free(rd);
4466
0
    free_res_descriptor(nxt);   // tail recursive
4467
0
}
4468
4469
/**
4470
 * The purpose of this function is to parse the resource fork of a file, and to return
4471
 * a data structure that is, in effect, a table of contents for the resource fork.  The
4472
 * data structure is a null-terminated linked list of entries.  Each one describes one
4473
 * resource.  If the resource fork is empty, or if there is not a resource fork at all,
4474
 * or an error occurs, this function returns NULL.
4475
 *
4476
 * A non-NULL answer should be freed by the caller, using free_res_descriptor.
4477
 *
4478
 */
4479
4480
static RES_DESCRIPTOR *
4481
hfs_parse_resource_fork(TSK_FS_FILE * fs_file)
4482
0
{
4483
4484
0
    RES_DESCRIPTOR *result = NULL;
4485
0
    RES_DESCRIPTOR *last = NULL;
4486
0
    TSK_FS_INFO *fs_info;
4487
0
    hfs_fork *fork_info;
4488
0
    hfs_fork *resForkInfo;
4489
0
    uint64_t resSize;
4490
0
    const TSK_FS_ATTR *rAttr;
4491
0
    hfs_resource_fork_header rfHeader;
4492
0
    hfs_resource_fork_header *resHead;
4493
0
    uint32_t dataOffset;
4494
0
    uint32_t mapOffset;
4495
0
    uint32_t mapLength;
4496
0
    char *map;
4497
0
    ssize_t attrReadResult;
4498
0
    ssize_t attrReadResult1;
4499
0
    ssize_t attrReadResult2;
4500
0
    hfs_resource_fork_map_header *mapHdr;
4501
0
    uint16_t typeListOffset;
4502
0
    uint16_t nameListOffset;
4503
0
    unsigned char hasNameList;
4504
0
    char *nameListBegin = NULL;
4505
0
    hfs_resource_type_list *typeList;
4506
0
    uint16_t numTypes;
4507
0
    hfs_resource_type_list_item *tlItem;
4508
0
    int mindx;                  // index for looping over resource types
4509
4510
0
    if (fs_file == NULL) {
4511
0
        error_detected(TSK_ERR_FS_ARG,
4512
0
            "hfs_parse_resource_fork: null fs_file");
4513
0
        return NULL;
4514
0
    }
4515
4516
4517
0
    if (fs_file->meta == NULL) {
4518
0
        error_detected(TSK_ERR_FS_ARG,
4519
0
            "hfs_parse_resource_fork: fs_file has null metadata");
4520
0
        return NULL;
4521
0
    }
4522
4523
0
    if (fs_file->meta->content_ptr == NULL) {
4524
0
        if (tsk_verbose)
4525
0
            fprintf(stderr,
4526
0
                "hfs_parse_resource_fork: fs_file has null fork data structures, so no resources.\n");
4527
0
        return NULL;
4528
0
    }
4529
4530
    // Extract the fs
4531
0
    fs_info = fs_file->fs_info;
4532
0
    if (fs_info == NULL) {
4533
0
        error_detected(TSK_ERR_FS_ARG,
4534
0
            "hfs_parse_resource_fork: null fs within fs_info");
4535
0
        return NULL;
4536
0
    }
4537
4538
    // Try to look at the Resource Fork for an HFS+ file
4539
    // Should be able to cast this to hfs_fork *
4540
0
    fork_info = (hfs_fork *) fs_file->meta->content_ptr;        // The data fork
4541
    // The resource fork is the second one.
4542
0
    resForkInfo = &fork_info[1];
4543
0
    resSize = tsk_getu64(fs_info->endian, resForkInfo->logic_sz);
4544
    //uint32_t numBlocks = tsk_getu32(fs_info->endian, resForkInfo->total_blk);
4545
    //uint32_t clmpSize = tsk_getu32(fs_info->endian, resForkInfo->clmp_sz);
4546
4547
    // Hmm, certainly no resources here!
4548
0
    if (resSize == 0) {
4549
0
        return NULL;
4550
0
    }
4551
4552
    // OK, resource size must be > 0
4553
4554
    // find the attribute for the resource fork
4555
0
    rAttr =
4556
0
        tsk_fs_file_attr_get_type(fs_file, TSK_FS_ATTR_TYPE_HFS_RSRC,
4557
0
        HFS_FS_ATTR_ID_RSRC, TRUE);
4558
4559
4560
0
    if (rAttr == NULL) {
4561
0
        error_returned
4562
0
            ("hfs_parse_resource_fork: could not get the resource fork attribute");
4563
0
        return NULL;
4564
0
    }
4565
4566
    // JUST read the resource fork header
4567
4568
4569
0
    attrReadResult1 =
4570
0
        tsk_fs_attr_read(rAttr, 0, (char *) &rfHeader,
4571
0
        sizeof(hfs_resource_fork_header), TSK_FS_FILE_READ_FLAG_NONE);
4572
4573
0
    if (attrReadResult1 < 0
4574
0
        || attrReadResult1 != sizeof(hfs_resource_fork_header)) {
4575
0
        error_returned
4576
0
            (" hfs_parse_resource_fork: trying to read the resource fork header");
4577
0
        return NULL;
4578
0
    }
4579
4580
    // Begin to parse the resource fork
4581
0
    resHead = &rfHeader;
4582
0
    dataOffset = tsk_getu32(fs_info->endian, resHead->dataOffset);
4583
0
    mapOffset = tsk_getu32(fs_info->endian, resHead->mapOffset);
4584
    //uint32_t dataLength = tsk_getu32(fs_info->endian, resHead->dataLength);
4585
0
    mapLength = tsk_getu32(fs_info->endian, resHead->mapLength);
4586
4587
    // Read in the WHOLE map
4588
0
    map = (char *) tsk_malloc(mapLength);
4589
0
    if (map == NULL) {
4590
0
        error_returned
4591
0
            ("- hfs_parse_resource_fork: could not allocate space for the resource fork map");
4592
0
        return NULL;
4593
0
    }
4594
4595
0
    attrReadResult =
4596
0
        tsk_fs_attr_read(rAttr, (uint64_t) mapOffset, map,
4597
0
        (size_t) mapLength, TSK_FS_FILE_READ_FLAG_NONE);
4598
4599
0
    if (attrReadResult < 0 || attrReadResult != (ssize_t) mapLength) {
4600
0
        error_returned
4601
0
            ("- hfs_parse_resource_fork: could not read the map");
4602
0
        free(map);
4603
0
        return NULL;
4604
0
    }
4605
4606
0
    mapHdr = (hfs_resource_fork_map_header *) map;
4607
4608
0
    typeListOffset = tsk_getu16(fs_info->endian, mapHdr->typeListOffset);
4609
4610
0
    nameListOffset = tsk_getu16(fs_info->endian, mapHdr->nameListOffset);
4611
4612
0
    if (nameListOffset >= mapLength || nameListOffset == 0) {
4613
0
        hasNameList = FALSE;
4614
0
    }
4615
0
    else {
4616
0
        hasNameList = TRUE;
4617
0
        nameListBegin = map + nameListOffset;
4618
0
    }
4619
4620
0
    typeList = (hfs_resource_type_list *) (map + typeListOffset);
4621
0
    numTypes = tsk_getu16(fs_info->endian, typeList->typeCount) + 1;
4622
4623
0
    for (mindx = 0; mindx < numTypes; ++mindx) {
4624
0
        uint16_t numRes;
4625
0
        uint16_t refOff;
4626
0
        int pindx;              // index for looping over resources
4627
0
        uint16_t rID;
4628
0
        uint32_t rOffset;
4629
4630
0
        tlItem = &(typeList->type[mindx]);
4631
0
        numRes = tsk_getu16(fs_info->endian, tlItem->count) + 1;
4632
0
        refOff = tsk_getu16(fs_info->endian, tlItem->offset);
4633
4634
4635
0
        for (pindx = 0; pindx < numRes; ++pindx) {
4636
0
            int16_t nameOffset;
4637
0
            char *nameBuffer;
4638
0
            RES_DESCRIPTOR *rsrc;
4639
0
            char lenBuff[4];    // first 4 bytes of a resource encodes its length
4640
0
            uint32_t rLen;      // Resource length
4641
4642
0
            hfs_resource_refListItem *item =
4643
0
                ((hfs_resource_refListItem *) (((uint8_t *) typeList) +
4644
0
                    refOff)) + pindx;
4645
0
            nameOffset = tsk_gets16(fs_info->endian, item->resNameOffset);
4646
0
            nameBuffer = NULL;
4647
4648
0
            if (hasNameList && nameOffset != -1) {
4649
0
                char *name = nameListBegin + nameOffset;
4650
0
                uint8_t nameLen = (uint8_t) name[0];
4651
0
                nameBuffer = tsk_malloc(nameLen + 1);
4652
0
                if (nameBuffer == NULL) {
4653
0
                    error_returned
4654
0
                        ("hfs_parse_resource_fork: allocating space for the name of a resource");
4655
0
                    free_res_descriptor(result);
4656
0
                    return NULL;
4657
0
                }
4658
0
                memcpy(nameBuffer, name + 1, nameLen);
4659
0
                nameBuffer[nameLen] = (char) 0;
4660
0
            }
4661
0
            else {
4662
0
                nameBuffer = tsk_malloc(7);
4663
0
                if (nameBuffer == NULL) {
4664
0
                    error_returned
4665
0
                        ("hfs_parse_resource_fork: allocating space for the (null) name of a resource");
4666
0
                    free_res_descriptor(result);
4667
0
                    return NULL;
4668
0
                }
4669
0
                memcpy(nameBuffer, "<none>", 6);
4670
0
                nameBuffer[6] = (char) 0;
4671
0
            }
4672
4673
0
            rsrc = (RES_DESCRIPTOR *) tsk_malloc(sizeof(RES_DESCRIPTOR));
4674
0
            if (rsrc == NULL) {
4675
0
                error_returned
4676
0
                    ("hfs_parse_resource_fork: space for a resource descriptor");
4677
0
                free_res_descriptor(result);
4678
0
                return NULL;
4679
0
            }
4680
4681
            // Build the linked list
4682
0
            if (result == NULL)
4683
0
                result = rsrc;
4684
0
            if (last != NULL)
4685
0
                last->next = rsrc;
4686
0
            last = rsrc;
4687
0
            rsrc->next = NULL;
4688
4689
0
            rID = tsk_getu16(fs_info->endian, item->resID);
4690
0
            rOffset =
4691
0
                tsk_getu24(fs_info->endian,
4692
0
                item->resDataOffset) + dataOffset;
4693
4694
            // Just read the first four bytes of the resource to get its length.  It MUST
4695
            // be at least 4 bytes long
4696
0
            attrReadResult2 = tsk_fs_attr_read(rAttr, (uint64_t) rOffset,
4697
0
                lenBuff, (size_t) 4, TSK_FS_FILE_READ_FLAG_NONE);
4698
4699
0
            if (attrReadResult2 != 4) {
4700
0
                error_returned
4701
0
                    ("- hfs_parse_resource_fork: could not read the 4-byte length at beginning of resource");
4702
0
                free_res_descriptor(result);
4703
0
                return NULL;
4704
0
            }
4705
0
            rLen = tsk_getu32(TSK_BIG_ENDIAN, lenBuff); //TODO
4706
4707
0
            rsrc->id = rID;
4708
0
            rsrc->offset = rOffset + 4;
4709
0
            memcpy(rsrc->type, tlItem->type, 4);
4710
0
            rsrc->type[4] = (char) 0;
4711
0
            rsrc->length = rLen;
4712
0
            rsrc->name = nameBuffer;
4713
4714
0
        }                       // END loop over resources of one type
4715
4716
0
    }                           // END loop over resource types
4717
4718
0
    return result;
4719
0
}
4720
4721
4722
static uint8_t
4723
hfs_load_attrs(TSK_FS_FILE * fs_file)
4724
0
{
4725
0
    TSK_FS_INFO *fs;
4726
0
    HFS_INFO *hfs;
4727
0
    TSK_FS_ATTR *fs_attr;
4728
0
    TSK_FS_ATTR_RUN *attr_run;
4729
0
    hfs_fork *forkx;
4730
0
    unsigned char resource_fork_has_contents = FALSE;
4731
0
    unsigned char compression_flag = FALSE;
4732
0
    unsigned char isCompressed = FALSE;
4733
0
    unsigned char compDataInRSRCFork = FALSE;
4734
0
    unsigned char cmpType = 0;
4735
0
    uint64_t uncompressedSize;
4736
0
    uint64_t logicalSize;       // of a fork
4737
4738
    // clean up any error messages that are lying around
4739
0
    tsk_error_reset();
4740