Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * A wrapper around cbtree which stores oids |
3 | | * May be used to replace oid-array for prefix (abbreviation) matches |
4 | | */ |
5 | | #include "git-compat-util.h" |
6 | | #include "oidtree.h" |
7 | | #include "hash.h" |
8 | | |
9 | | struct oidtree_iter_data { |
10 | | oidtree_iter fn; |
11 | | void *arg; |
12 | | size_t *last_nibble_at; |
13 | | int algo; |
14 | | uint8_t last_byte; |
15 | | }; |
16 | | |
17 | | void oidtree_init(struct oidtree *ot) |
18 | 0 | { |
19 | 0 | cb_init(&ot->tree); |
20 | 0 | mem_pool_init(&ot->mem_pool, 0); |
21 | 0 | } |
22 | | |
23 | | void oidtree_clear(struct oidtree *ot) |
24 | 0 | { |
25 | 0 | if (ot) { |
26 | 0 | mem_pool_discard(&ot->mem_pool, 0); |
27 | 0 | oidtree_init(ot); |
28 | 0 | } |
29 | 0 | } |
30 | | |
31 | | void oidtree_insert(struct oidtree *ot, const struct object_id *oid) |
32 | 0 | { |
33 | 0 | struct cb_node *on; |
34 | 0 | struct object_id k; |
35 | |
|
36 | 0 | if (!oid->algo) |
37 | 0 | BUG("oidtree_insert requires oid->algo"); |
38 | | |
39 | 0 | on = mem_pool_alloc(&ot->mem_pool, sizeof(*on) + sizeof(*oid)); |
40 | | |
41 | | /* |
42 | | * Clear the padding and copy the result in separate steps to |
43 | | * respect the 4-byte alignment needed by struct object_id. |
44 | | */ |
45 | 0 | oidcpy(&k, oid); |
46 | 0 | memcpy(on->k, &k, sizeof(k)); |
47 | | |
48 | | /* |
49 | | * n.b. Current callers won't get us duplicates, here. If a |
50 | | * future caller causes duplicates, there'll be a a small leak |
51 | | * that won't be freed until oidtree_clear. Currently it's not |
52 | | * worth maintaining a free list |
53 | | */ |
54 | 0 | cb_insert(&ot->tree, on, sizeof(*oid)); |
55 | 0 | } |
56 | | |
57 | | |
58 | | int oidtree_contains(struct oidtree *ot, const struct object_id *oid) |
59 | 0 | { |
60 | 0 | struct object_id k; |
61 | 0 | size_t klen = sizeof(k); |
62 | |
|
63 | 0 | oidcpy(&k, oid); |
64 | |
|
65 | 0 | if (oid->algo == GIT_HASH_UNKNOWN) |
66 | 0 | klen -= sizeof(oid->algo); |
67 | | |
68 | | /* cb_lookup relies on memcmp on the struct, so order matters: */ |
69 | 0 | klen += BUILD_ASSERT_OR_ZERO(offsetof(struct object_id, hash) < |
70 | 0 | offsetof(struct object_id, algo)); |
71 | |
|
72 | 0 | return cb_lookup(&ot->tree, (const uint8_t *)&k, klen) ? 1 : 0; |
73 | 0 | } |
74 | | |
75 | | static enum cb_next iter(struct cb_node *n, void *arg) |
76 | 0 | { |
77 | 0 | struct oidtree_iter_data *x = arg; |
78 | 0 | struct object_id k; |
79 | | |
80 | | /* Copy to provide 4-byte alignment needed by struct object_id. */ |
81 | 0 | memcpy(&k, n->k, sizeof(k)); |
82 | |
|
83 | 0 | if (x->algo != GIT_HASH_UNKNOWN && x->algo != k.algo) |
84 | 0 | return CB_CONTINUE; |
85 | | |
86 | 0 | if (x->last_nibble_at) { |
87 | 0 | if ((k.hash[*x->last_nibble_at] ^ x->last_byte) & 0xf0) |
88 | 0 | return CB_CONTINUE; |
89 | 0 | } |
90 | | |
91 | 0 | return x->fn(&k, x->arg); |
92 | 0 | } |
93 | | |
94 | | void oidtree_each(struct oidtree *ot, const struct object_id *oid, |
95 | | size_t oidhexsz, oidtree_iter fn, void *arg) |
96 | 0 | { |
97 | 0 | size_t klen = oidhexsz / 2; |
98 | 0 | struct oidtree_iter_data x = { 0 }; |
99 | 0 | assert(oidhexsz <= GIT_MAX_HEXSZ); |
100 | | |
101 | 0 | x.fn = fn; |
102 | 0 | x.arg = arg; |
103 | 0 | x.algo = oid->algo; |
104 | 0 | if (oidhexsz & 1) { |
105 | 0 | x.last_byte = oid->hash[klen]; |
106 | 0 | x.last_nibble_at = &klen; |
107 | 0 | } |
108 | 0 | cb_each(&ot->tree, (const uint8_t *)oid, klen, iter, &x); |
109 | 0 | } |