/src/git/refs/ref-cache.c
Line | Count | Source (jump to first uncovered line) |
1 | | #include "../git-compat-util.h" |
2 | | #include "../hash.h" |
3 | | #include "../refs.h" |
4 | | #include "../repository.h" |
5 | | #include "refs-internal.h" |
6 | | #include "ref-cache.h" |
7 | | #include "../iterator.h" |
8 | | |
9 | | void add_entry_to_dir(struct ref_dir *dir, struct ref_entry *entry) |
10 | 0 | { |
11 | 0 | ALLOC_GROW(dir->entries, dir->nr + 1, dir->alloc); |
12 | 0 | dir->entries[dir->nr++] = entry; |
13 | | /* optimize for the case that entries are added in order */ |
14 | 0 | if (dir->nr == 1 || |
15 | 0 | (dir->nr == dir->sorted + 1 && |
16 | 0 | strcmp(dir->entries[dir->nr - 2]->name, |
17 | 0 | dir->entries[dir->nr - 1]->name) < 0)) |
18 | 0 | dir->sorted = dir->nr; |
19 | 0 | } |
20 | | |
21 | | struct ref_dir *get_ref_dir(struct ref_entry *entry) |
22 | 0 | { |
23 | 0 | struct ref_dir *dir; |
24 | 0 | assert(entry->flag & REF_DIR); |
25 | 0 | dir = &entry->u.subdir; |
26 | 0 | if (entry->flag & REF_INCOMPLETE) { |
27 | 0 | if (!dir->cache->fill_ref_dir) |
28 | 0 | BUG("incomplete ref_store without fill_ref_dir function"); |
29 | | |
30 | 0 | dir->cache->fill_ref_dir(dir->cache->ref_store, dir, entry->name); |
31 | 0 | entry->flag &= ~REF_INCOMPLETE; |
32 | 0 | } |
33 | 0 | return dir; |
34 | 0 | } |
35 | | |
36 | | struct ref_entry *create_ref_entry(const char *refname, |
37 | | const char *referent, |
38 | | const struct object_id *oid, int flag) |
39 | 0 | { |
40 | 0 | struct ref_entry *ref; |
41 | |
|
42 | 0 | FLEX_ALLOC_STR(ref, name, refname); |
43 | 0 | oidcpy(&ref->u.value.oid, oid); |
44 | 0 | ref->flag = flag; |
45 | 0 | ref->u.value.referent = xstrdup_or_null(referent); |
46 | |
|
47 | 0 | return ref; |
48 | 0 | } |
49 | | |
50 | | struct ref_cache *create_ref_cache(struct ref_store *refs, |
51 | | fill_ref_dir_fn *fill_ref_dir) |
52 | 0 | { |
53 | 0 | struct ref_cache *ret = xcalloc(1, sizeof(*ret)); |
54 | |
|
55 | 0 | ret->ref_store = refs; |
56 | 0 | ret->fill_ref_dir = fill_ref_dir; |
57 | 0 | ret->root = create_dir_entry(ret, "", 0); |
58 | 0 | return ret; |
59 | 0 | } |
60 | | |
61 | | static void clear_ref_dir(struct ref_dir *dir); |
62 | | |
63 | | static void free_ref_entry(struct ref_entry *entry) |
64 | 0 | { |
65 | 0 | if (entry->flag & REF_DIR) { |
66 | | /* |
67 | | * Do not use get_ref_dir() here, as that might |
68 | | * trigger the reading of loose refs. |
69 | | */ |
70 | 0 | clear_ref_dir(&entry->u.subdir); |
71 | 0 | } |
72 | 0 | free(entry->u.value.referent); |
73 | 0 | free(entry); |
74 | 0 | } |
75 | | |
76 | | void free_ref_cache(struct ref_cache *cache) |
77 | 0 | { |
78 | 0 | if (!cache) |
79 | 0 | return; |
80 | 0 | free_ref_entry(cache->root); |
81 | 0 | free(cache); |
82 | 0 | } |
83 | | |
84 | | /* |
85 | | * Clear and free all entries in dir, recursively. |
86 | | */ |
87 | | static void clear_ref_dir(struct ref_dir *dir) |
88 | 0 | { |
89 | 0 | int i; |
90 | 0 | for (i = 0; i < dir->nr; i++) |
91 | 0 | free_ref_entry(dir->entries[i]); |
92 | 0 | FREE_AND_NULL(dir->entries); |
93 | 0 | dir->sorted = dir->nr = dir->alloc = 0; |
94 | 0 | } |
95 | | |
96 | | struct ref_entry *create_dir_entry(struct ref_cache *cache, |
97 | | const char *dirname, size_t len) |
98 | 0 | { |
99 | 0 | struct ref_entry *direntry; |
100 | |
|
101 | 0 | FLEX_ALLOC_MEM(direntry, name, dirname, len); |
102 | 0 | direntry->u.subdir.cache = cache; |
103 | 0 | direntry->flag = REF_DIR | REF_INCOMPLETE; |
104 | 0 | return direntry; |
105 | 0 | } |
106 | | |
107 | | static int ref_entry_cmp(const void *a, const void *b) |
108 | 0 | { |
109 | 0 | struct ref_entry *one = *(struct ref_entry **)a; |
110 | 0 | struct ref_entry *two = *(struct ref_entry **)b; |
111 | 0 | return strcmp(one->name, two->name); |
112 | 0 | } |
113 | | |
114 | | static void sort_ref_dir(struct ref_dir *dir); |
115 | | |
116 | | struct string_slice { |
117 | | size_t len; |
118 | | const char *str; |
119 | | }; |
120 | | |
121 | | static int ref_entry_cmp_sslice(const void *key_, const void *ent_) |
122 | 0 | { |
123 | 0 | const struct string_slice *key = key_; |
124 | 0 | const struct ref_entry *ent = *(const struct ref_entry * const *)ent_; |
125 | 0 | int cmp = strncmp(key->str, ent->name, key->len); |
126 | 0 | if (cmp) |
127 | 0 | return cmp; |
128 | 0 | return '\0' - (unsigned char)ent->name[key->len]; |
129 | 0 | } |
130 | | |
131 | | int search_ref_dir(struct ref_dir *dir, const char *refname, size_t len) |
132 | 0 | { |
133 | 0 | struct ref_entry **r; |
134 | 0 | struct string_slice key; |
135 | |
|
136 | 0 | if (refname == NULL || !dir->nr) |
137 | 0 | return -1; |
138 | | |
139 | 0 | sort_ref_dir(dir); |
140 | 0 | key.len = len; |
141 | 0 | key.str = refname; |
142 | 0 | r = bsearch(&key, dir->entries, dir->nr, sizeof(*dir->entries), |
143 | 0 | ref_entry_cmp_sslice); |
144 | |
|
145 | 0 | if (!r) |
146 | 0 | return -1; |
147 | | |
148 | 0 | return r - dir->entries; |
149 | 0 | } |
150 | | |
151 | | /* |
152 | | * Search for a directory entry directly within dir (without |
153 | | * recursing). Sort dir if necessary. subdirname must be a directory |
154 | | * name (i.e., end in '/'). Returns NULL if the desired |
155 | | * directory cannot be found. dir must already be complete. |
156 | | */ |
157 | | static struct ref_dir *search_for_subdir(struct ref_dir *dir, |
158 | | const char *subdirname, size_t len) |
159 | 0 | { |
160 | 0 | int entry_index = search_ref_dir(dir, subdirname, len); |
161 | 0 | struct ref_entry *entry; |
162 | |
|
163 | 0 | if (entry_index == -1) |
164 | 0 | return NULL; |
165 | | |
166 | 0 | entry = dir->entries[entry_index]; |
167 | 0 | return get_ref_dir(entry); |
168 | 0 | } |
169 | | |
170 | | /* |
171 | | * If refname is a reference name, find the ref_dir within the dir |
172 | | * tree that should hold refname. If refname is a directory name |
173 | | * (i.e., it ends in '/'), then return that ref_dir itself. dir must |
174 | | * represent the top-level directory and must already be complete. |
175 | | * Sort ref_dirs and recurse into subdirectories as necessary. Will |
176 | | * return NULL if the desired directory cannot be found. |
177 | | */ |
178 | | static struct ref_dir *find_containing_dir(struct ref_dir *dir, |
179 | | const char *refname) |
180 | 0 | { |
181 | 0 | const char *slash; |
182 | 0 | for (slash = strchr(refname, '/'); slash; slash = strchr(slash + 1, '/')) { |
183 | 0 | size_t dirnamelen = slash - refname + 1; |
184 | 0 | struct ref_dir *subdir; |
185 | 0 | subdir = search_for_subdir(dir, refname, dirnamelen); |
186 | 0 | if (!subdir) { |
187 | 0 | dir = NULL; |
188 | 0 | break; |
189 | 0 | } |
190 | 0 | dir = subdir; |
191 | 0 | } |
192 | |
|
193 | 0 | return dir; |
194 | 0 | } |
195 | | |
196 | | struct ref_entry *find_ref_entry(struct ref_dir *dir, const char *refname) |
197 | 0 | { |
198 | 0 | int entry_index; |
199 | 0 | struct ref_entry *entry; |
200 | 0 | dir = find_containing_dir(dir, refname); |
201 | 0 | if (!dir) |
202 | 0 | return NULL; |
203 | 0 | entry_index = search_ref_dir(dir, refname, strlen(refname)); |
204 | 0 | if (entry_index == -1) |
205 | 0 | return NULL; |
206 | 0 | entry = dir->entries[entry_index]; |
207 | 0 | return (entry->flag & REF_DIR) ? NULL : entry; |
208 | 0 | } |
209 | | |
210 | | /* |
211 | | * Emit a warning and return true iff ref1 and ref2 have the same name |
212 | | * and the same oid. Die if they have the same name but different |
213 | | * oids. |
214 | | */ |
215 | | static int is_dup_ref(const struct ref_entry *ref1, const struct ref_entry *ref2) |
216 | 0 | { |
217 | 0 | if (strcmp(ref1->name, ref2->name)) |
218 | 0 | return 0; |
219 | | |
220 | | /* Duplicate name; make sure that they don't conflict: */ |
221 | | |
222 | 0 | if ((ref1->flag & REF_DIR) || (ref2->flag & REF_DIR)) |
223 | | /* This is impossible by construction */ |
224 | 0 | die("Reference directory conflict: %s", ref1->name); |
225 | | |
226 | 0 | if (!oideq(&ref1->u.value.oid, &ref2->u.value.oid)) |
227 | 0 | die("Duplicated ref, and SHA1s don't match: %s", ref1->name); |
228 | | |
229 | 0 | warning("Duplicated ref: %s", ref1->name); |
230 | 0 | return 1; |
231 | 0 | } |
232 | | |
233 | | /* |
234 | | * Sort the entries in dir non-recursively (if they are not already |
235 | | * sorted) and remove any duplicate entries. |
236 | | */ |
237 | | static void sort_ref_dir(struct ref_dir *dir) |
238 | 0 | { |
239 | 0 | int i, j; |
240 | 0 | struct ref_entry *last = NULL; |
241 | | |
242 | | /* |
243 | | * This check also prevents passing a zero-length array to qsort(), |
244 | | * which is a problem on some platforms. |
245 | | */ |
246 | 0 | if (dir->sorted == dir->nr) |
247 | 0 | return; |
248 | | |
249 | 0 | QSORT(dir->entries, dir->nr, ref_entry_cmp); |
250 | | |
251 | | /* Remove any duplicates: */ |
252 | 0 | for (i = 0, j = 0; j < dir->nr; j++) { |
253 | 0 | struct ref_entry *entry = dir->entries[j]; |
254 | 0 | if (last && is_dup_ref(last, entry)) |
255 | 0 | free_ref_entry(entry); |
256 | 0 | else |
257 | 0 | last = dir->entries[i++] = entry; |
258 | 0 | } |
259 | 0 | dir->sorted = dir->nr = i; |
260 | 0 | } |
261 | | |
262 | | enum prefix_state { |
263 | | /* All refs within the directory would match prefix: */ |
264 | | PREFIX_CONTAINS_DIR, |
265 | | |
266 | | /* Some, but not all, refs within the directory might match prefix: */ |
267 | | PREFIX_WITHIN_DIR, |
268 | | |
269 | | /* No refs within the directory could possibly match prefix: */ |
270 | | PREFIX_EXCLUDES_DIR |
271 | | }; |
272 | | |
273 | | /* |
274 | | * Return a `prefix_state` constant describing the relationship |
275 | | * between the directory with the specified `dirname` and `prefix`. |
276 | | */ |
277 | | static enum prefix_state overlaps_prefix(const char *dirname, |
278 | | const char *prefix) |
279 | 0 | { |
280 | 0 | while (*prefix && *dirname == *prefix) { |
281 | 0 | dirname++; |
282 | 0 | prefix++; |
283 | 0 | } |
284 | 0 | if (!*prefix) |
285 | 0 | return PREFIX_CONTAINS_DIR; |
286 | 0 | else if (!*dirname) |
287 | 0 | return PREFIX_WITHIN_DIR; |
288 | 0 | else |
289 | 0 | return PREFIX_EXCLUDES_DIR; |
290 | 0 | } |
291 | | |
292 | | /* |
293 | | * Load all of the refs from `dir` (recursively) that could possibly |
294 | | * contain references matching `prefix` into our in-memory cache. If |
295 | | * `prefix` is NULL, prime unconditionally. |
296 | | */ |
297 | | static void prime_ref_dir(struct ref_dir *dir, const char *prefix) |
298 | 0 | { |
299 | | /* |
300 | | * The hard work of loading loose refs is done by get_ref_dir(), so we |
301 | | * just need to recurse through all of the sub-directories. We do not |
302 | | * even need to care about sorting, as traversal order does not matter |
303 | | * to us. |
304 | | */ |
305 | 0 | int i; |
306 | 0 | for (i = 0; i < dir->nr; i++) { |
307 | 0 | struct ref_entry *entry = dir->entries[i]; |
308 | 0 | if (!(entry->flag & REF_DIR)) { |
309 | | /* Not a directory; no need to recurse. */ |
310 | 0 | } else if (!prefix) { |
311 | | /* Recurse in any case: */ |
312 | 0 | prime_ref_dir(get_ref_dir(entry), NULL); |
313 | 0 | } else { |
314 | 0 | switch (overlaps_prefix(entry->name, prefix)) { |
315 | 0 | case PREFIX_CONTAINS_DIR: |
316 | | /* |
317 | | * Recurse, and from here down we |
318 | | * don't have to check the prefix |
319 | | * anymore: |
320 | | */ |
321 | 0 | prime_ref_dir(get_ref_dir(entry), NULL); |
322 | 0 | break; |
323 | 0 | case PREFIX_WITHIN_DIR: |
324 | 0 | prime_ref_dir(get_ref_dir(entry), prefix); |
325 | 0 | break; |
326 | 0 | case PREFIX_EXCLUDES_DIR: |
327 | | /* No need to prime this directory. */ |
328 | 0 | break; |
329 | 0 | } |
330 | 0 | } |
331 | 0 | } |
332 | 0 | } |
333 | | |
334 | | /* |
335 | | * A level in the reference hierarchy that is currently being iterated |
336 | | * through. |
337 | | */ |
338 | | struct cache_ref_iterator_level { |
339 | | /* |
340 | | * The ref_dir being iterated over at this level. The ref_dir |
341 | | * is sorted before being stored here. |
342 | | */ |
343 | | struct ref_dir *dir; |
344 | | |
345 | | enum prefix_state prefix_state; |
346 | | |
347 | | /* |
348 | | * The index of the current entry within dir (which might |
349 | | * itself be a directory). If index == -1, then the iteration |
350 | | * hasn't yet begun. If index == dir->nr, then the iteration |
351 | | * through this level is over. |
352 | | */ |
353 | | int index; |
354 | | }; |
355 | | |
356 | | /* |
357 | | * Represent an iteration through a ref_dir in the memory cache. The |
358 | | * iteration recurses through subdirectories. |
359 | | */ |
360 | | struct cache_ref_iterator { |
361 | | struct ref_iterator base; |
362 | | |
363 | | /* |
364 | | * The number of levels currently on the stack. This is always |
365 | | * at least 1, because when it becomes zero the iteration is |
366 | | * ended and this struct is freed. |
367 | | */ |
368 | | size_t levels_nr; |
369 | | |
370 | | /* The number of levels that have been allocated on the stack */ |
371 | | size_t levels_alloc; |
372 | | |
373 | | /* |
374 | | * Only include references with this prefix in the iteration. |
375 | | * The prefix is matched textually, without regard for path |
376 | | * component boundaries. |
377 | | */ |
378 | | const char *prefix; |
379 | | |
380 | | /* |
381 | | * A stack of levels. levels[0] is the uppermost level that is |
382 | | * being iterated over in this iteration. (This is not |
383 | | * necessary the top level in the references hierarchy. If we |
384 | | * are iterating through a subtree, then levels[0] will hold |
385 | | * the ref_dir for that subtree, and subsequent levels will go |
386 | | * on from there.) |
387 | | */ |
388 | | struct cache_ref_iterator_level *levels; |
389 | | |
390 | | struct repository *repo; |
391 | | }; |
392 | | |
393 | | static int cache_ref_iterator_advance(struct ref_iterator *ref_iterator) |
394 | 0 | { |
395 | 0 | struct cache_ref_iterator *iter = |
396 | 0 | (struct cache_ref_iterator *)ref_iterator; |
397 | |
|
398 | 0 | while (1) { |
399 | 0 | struct cache_ref_iterator_level *level = |
400 | 0 | &iter->levels[iter->levels_nr - 1]; |
401 | 0 | struct ref_dir *dir = level->dir; |
402 | 0 | struct ref_entry *entry; |
403 | 0 | enum prefix_state entry_prefix_state; |
404 | |
|
405 | 0 | if (level->index == -1) |
406 | 0 | sort_ref_dir(dir); |
407 | |
|
408 | 0 | if (++level->index == level->dir->nr) { |
409 | | /* This level is exhausted; pop up a level */ |
410 | 0 | if (--iter->levels_nr == 0) |
411 | 0 | return ref_iterator_abort(ref_iterator); |
412 | | |
413 | 0 | continue; |
414 | 0 | } |
415 | | |
416 | 0 | entry = dir->entries[level->index]; |
417 | |
|
418 | 0 | if (level->prefix_state == PREFIX_WITHIN_DIR) { |
419 | 0 | entry_prefix_state = overlaps_prefix(entry->name, iter->prefix); |
420 | 0 | if (entry_prefix_state == PREFIX_EXCLUDES_DIR || |
421 | 0 | (entry_prefix_state == PREFIX_WITHIN_DIR && !(entry->flag & REF_DIR))) |
422 | 0 | continue; |
423 | 0 | } else { |
424 | 0 | entry_prefix_state = level->prefix_state; |
425 | 0 | } |
426 | | |
427 | 0 | if (entry->flag & REF_DIR) { |
428 | | /* push down a level */ |
429 | 0 | ALLOC_GROW(iter->levels, iter->levels_nr + 1, |
430 | 0 | iter->levels_alloc); |
431 | |
|
432 | 0 | level = &iter->levels[iter->levels_nr++]; |
433 | 0 | level->dir = get_ref_dir(entry); |
434 | 0 | level->prefix_state = entry_prefix_state; |
435 | 0 | level->index = -1; |
436 | 0 | } else { |
437 | 0 | iter->base.refname = entry->name; |
438 | 0 | iter->base.referent = entry->u.value.referent; |
439 | 0 | iter->base.oid = &entry->u.value.oid; |
440 | 0 | iter->base.flags = entry->flag; |
441 | 0 | return ITER_OK; |
442 | 0 | } |
443 | 0 | } |
444 | 0 | } |
445 | | |
446 | | static int cache_ref_iterator_peel(struct ref_iterator *ref_iterator, |
447 | | struct object_id *peeled) |
448 | 0 | { |
449 | 0 | struct cache_ref_iterator *iter = |
450 | 0 | (struct cache_ref_iterator *)ref_iterator; |
451 | 0 | return peel_object(iter->repo, ref_iterator->oid, peeled) ? -1 : 0; |
452 | 0 | } |
453 | | |
454 | | static int cache_ref_iterator_abort(struct ref_iterator *ref_iterator) |
455 | 0 | { |
456 | 0 | struct cache_ref_iterator *iter = |
457 | 0 | (struct cache_ref_iterator *)ref_iterator; |
458 | |
|
459 | 0 | free((char *)iter->prefix); |
460 | 0 | free(iter->levels); |
461 | 0 | base_ref_iterator_free(ref_iterator); |
462 | 0 | return ITER_DONE; |
463 | 0 | } |
464 | | |
465 | | static struct ref_iterator_vtable cache_ref_iterator_vtable = { |
466 | | .advance = cache_ref_iterator_advance, |
467 | | .peel = cache_ref_iterator_peel, |
468 | | .abort = cache_ref_iterator_abort |
469 | | }; |
470 | | |
471 | | struct ref_iterator *cache_ref_iterator_begin(struct ref_cache *cache, |
472 | | const char *prefix, |
473 | | struct repository *repo, |
474 | | int prime_dir) |
475 | 0 | { |
476 | 0 | struct ref_dir *dir; |
477 | 0 | struct cache_ref_iterator *iter; |
478 | 0 | struct ref_iterator *ref_iterator; |
479 | 0 | struct cache_ref_iterator_level *level; |
480 | |
|
481 | 0 | dir = get_ref_dir(cache->root); |
482 | 0 | if (prefix && *prefix) |
483 | 0 | dir = find_containing_dir(dir, prefix); |
484 | 0 | if (!dir) |
485 | | /* There's nothing to iterate over. */ |
486 | 0 | return empty_ref_iterator_begin(); |
487 | | |
488 | 0 | if (prime_dir) |
489 | 0 | prime_ref_dir(dir, prefix); |
490 | |
|
491 | 0 | CALLOC_ARRAY(iter, 1); |
492 | 0 | ref_iterator = &iter->base; |
493 | 0 | base_ref_iterator_init(ref_iterator, &cache_ref_iterator_vtable); |
494 | 0 | ALLOC_GROW(iter->levels, 10, iter->levels_alloc); |
495 | |
|
496 | 0 | iter->levels_nr = 1; |
497 | 0 | level = &iter->levels[0]; |
498 | 0 | level->index = -1; |
499 | 0 | level->dir = dir; |
500 | |
|
501 | 0 | if (prefix && *prefix) { |
502 | 0 | iter->prefix = xstrdup(prefix); |
503 | 0 | level->prefix_state = PREFIX_WITHIN_DIR; |
504 | 0 | } else { |
505 | 0 | level->prefix_state = PREFIX_CONTAINS_DIR; |
506 | 0 | } |
507 | |
|
508 | 0 | iter->repo = repo; |
509 | |
|
510 | 0 | return ref_iterator; |
511 | 0 | } |