/src/git/refs/ref-cache.c
Line | Count | Source |
1 | | #include "../git-compat-util.h" |
2 | | #include "../hash.h" |
3 | | #include "../refs.h" |
4 | | #include "../repository.h" |
5 | | #include "refs-internal.h" |
6 | | #include "ref-cache.h" |
7 | | #include "../iterator.h" |
8 | | |
9 | | void add_entry_to_dir(struct ref_dir *dir, struct ref_entry *entry) |
10 | 0 | { |
11 | 0 | ALLOC_GROW(dir->entries, dir->nr + 1, dir->alloc); |
12 | 0 | dir->entries[dir->nr++] = entry; |
13 | | /* optimize for the case that entries are added in order */ |
14 | 0 | if (dir->nr == 1 || |
15 | 0 | (dir->nr == dir->sorted + 1 && |
16 | 0 | strcmp(dir->entries[dir->nr - 2]->name, |
17 | 0 | dir->entries[dir->nr - 1]->name) < 0)) |
18 | 0 | dir->sorted = dir->nr; |
19 | 0 | } |
20 | | |
21 | | struct ref_dir *get_ref_dir(struct ref_entry *entry) |
22 | 0 | { |
23 | 0 | struct ref_dir *dir; |
24 | 0 | assert(entry->flag & REF_DIR); |
25 | 0 | dir = &entry->u.subdir; |
26 | 0 | if (entry->flag & REF_INCOMPLETE) { |
27 | 0 | if (!dir->cache->fill_ref_dir) |
28 | 0 | BUG("incomplete ref_store without fill_ref_dir function"); |
29 | | |
30 | 0 | dir->cache->fill_ref_dir(dir->cache->ref_store, dir, entry->name); |
31 | 0 | entry->flag &= ~REF_INCOMPLETE; |
32 | 0 | } |
33 | 0 | return dir; |
34 | 0 | } |
35 | | |
36 | | struct ref_entry *create_ref_entry(const char *refname, |
37 | | const char *referent, |
38 | | const struct object_id *oid, int flag) |
39 | 0 | { |
40 | 0 | struct ref_entry *ref; |
41 | |
|
42 | 0 | FLEX_ALLOC_STR(ref, name, refname); |
43 | 0 | oidcpy(&ref->u.value.oid, oid); |
44 | 0 | ref->flag = flag; |
45 | 0 | ref->u.value.referent = xstrdup_or_null(referent); |
46 | |
|
47 | 0 | return ref; |
48 | 0 | } |
49 | | |
50 | | struct ref_cache *create_ref_cache(struct ref_store *refs, |
51 | | fill_ref_dir_fn *fill_ref_dir) |
52 | 0 | { |
53 | 0 | struct ref_cache *ret = xcalloc(1, sizeof(*ret)); |
54 | |
|
55 | 0 | ret->ref_store = refs; |
56 | 0 | ret->fill_ref_dir = fill_ref_dir; |
57 | 0 | ret->root = create_dir_entry(ret, "", 0); |
58 | 0 | return ret; |
59 | 0 | } |
60 | | |
61 | | static void clear_ref_dir(struct ref_dir *dir); |
62 | | |
63 | | static void free_ref_entry(struct ref_entry *entry) |
64 | 0 | { |
65 | 0 | if (entry->flag & REF_DIR) { |
66 | | /* |
67 | | * Do not use get_ref_dir() here, as that might |
68 | | * trigger the reading of loose refs. |
69 | | */ |
70 | 0 | clear_ref_dir(&entry->u.subdir); |
71 | 0 | } else { |
72 | 0 | free(entry->u.value.referent); |
73 | 0 | } |
74 | 0 | free(entry); |
75 | 0 | } |
76 | | |
77 | | void free_ref_cache(struct ref_cache *cache) |
78 | 0 | { |
79 | 0 | if (!cache) |
80 | 0 | return; |
81 | 0 | free_ref_entry(cache->root); |
82 | 0 | free(cache); |
83 | 0 | } |
84 | | |
85 | | /* |
86 | | * Clear and free all entries in dir, recursively. |
87 | | */ |
88 | | static void clear_ref_dir(struct ref_dir *dir) |
89 | 0 | { |
90 | 0 | int i; |
91 | 0 | for (i = 0; i < dir->nr; i++) |
92 | 0 | free_ref_entry(dir->entries[i]); |
93 | 0 | FREE_AND_NULL(dir->entries); |
94 | 0 | dir->sorted = dir->nr = dir->alloc = 0; |
95 | 0 | } |
96 | | |
97 | | struct ref_entry *create_dir_entry(struct ref_cache *cache, |
98 | | const char *dirname, size_t len) |
99 | 0 | { |
100 | 0 | struct ref_entry *direntry; |
101 | |
|
102 | 0 | FLEX_ALLOC_MEM(direntry, name, dirname, len); |
103 | 0 | direntry->u.subdir.cache = cache; |
104 | 0 | direntry->flag = REF_DIR | REF_INCOMPLETE; |
105 | 0 | return direntry; |
106 | 0 | } |
107 | | |
108 | | static int ref_entry_cmp(const void *a, const void *b) |
109 | 0 | { |
110 | 0 | struct ref_entry *one = *(struct ref_entry **)a; |
111 | 0 | struct ref_entry *two = *(struct ref_entry **)b; |
112 | 0 | return strcmp(one->name, two->name); |
113 | 0 | } |
114 | | |
115 | | static void sort_ref_dir(struct ref_dir *dir); |
116 | | |
117 | | struct string_slice { |
118 | | size_t len; |
119 | | const char *str; |
120 | | }; |
121 | | |
122 | | static int ref_entry_cmp_sslice(const void *key_, const void *ent_) |
123 | 0 | { |
124 | 0 | const struct string_slice *key = key_; |
125 | 0 | const struct ref_entry *ent = *(const struct ref_entry * const *)ent_; |
126 | 0 | int cmp = strncmp(key->str, ent->name, key->len); |
127 | 0 | if (cmp) |
128 | 0 | return cmp; |
129 | 0 | return '\0' - (unsigned char)ent->name[key->len]; |
130 | 0 | } |
131 | | |
132 | | int search_ref_dir(struct ref_dir *dir, const char *refname, size_t len) |
133 | 0 | { |
134 | 0 | struct ref_entry **r; |
135 | 0 | struct string_slice key; |
136 | |
|
137 | 0 | if (refname == NULL || !dir->nr) |
138 | 0 | return -1; |
139 | | |
140 | 0 | sort_ref_dir(dir); |
141 | 0 | key.len = len; |
142 | 0 | key.str = refname; |
143 | 0 | r = bsearch(&key, dir->entries, dir->nr, sizeof(*dir->entries), |
144 | 0 | ref_entry_cmp_sslice); |
145 | |
|
146 | 0 | if (!r) |
147 | 0 | return -1; |
148 | | |
149 | 0 | return r - dir->entries; |
150 | 0 | } |
151 | | |
152 | | /* |
153 | | * Search for a directory entry directly within dir (without |
154 | | * recursing). Sort dir if necessary. subdirname must be a directory |
155 | | * name (i.e., end in '/'). Returns NULL if the desired |
156 | | * directory cannot be found. dir must already be complete. |
157 | | */ |
158 | | static struct ref_dir *search_for_subdir(struct ref_dir *dir, |
159 | | const char *subdirname, size_t len) |
160 | 0 | { |
161 | 0 | int entry_index = search_ref_dir(dir, subdirname, len); |
162 | 0 | struct ref_entry *entry; |
163 | |
|
164 | 0 | if (entry_index == -1) |
165 | 0 | return NULL; |
166 | | |
167 | 0 | entry = dir->entries[entry_index]; |
168 | 0 | return get_ref_dir(entry); |
169 | 0 | } |
170 | | |
171 | | /* |
172 | | * If refname is a reference name, find the ref_dir within the dir |
173 | | * tree that should hold refname. If refname is a directory name |
174 | | * (i.e., it ends in '/'), then return that ref_dir itself. dir must |
175 | | * represent the top-level directory and must already be complete. |
176 | | * Sort ref_dirs and recurse into subdirectories as necessary. Will |
177 | | * return NULL if the desired directory cannot be found. |
178 | | */ |
179 | | static struct ref_dir *find_containing_dir(struct ref_dir *dir, |
180 | | const char *refname) |
181 | 0 | { |
182 | 0 | const char *slash; |
183 | 0 | for (slash = strchr(refname, '/'); slash; slash = strchr(slash + 1, '/')) { |
184 | 0 | size_t dirnamelen = slash - refname + 1; |
185 | 0 | struct ref_dir *subdir; |
186 | 0 | subdir = search_for_subdir(dir, refname, dirnamelen); |
187 | 0 | if (!subdir) { |
188 | 0 | dir = NULL; |
189 | 0 | break; |
190 | 0 | } |
191 | 0 | dir = subdir; |
192 | 0 | } |
193 | |
|
194 | 0 | return dir; |
195 | 0 | } |
196 | | |
197 | | /* |
198 | | * Emit a warning and return true iff ref1 and ref2 have the same name |
199 | | * and the same oid. Die if they have the same name but different |
200 | | * oids. |
201 | | */ |
202 | | static int is_dup_ref(const struct ref_entry *ref1, const struct ref_entry *ref2) |
203 | 0 | { |
204 | 0 | if (strcmp(ref1->name, ref2->name)) |
205 | 0 | return 0; |
206 | | |
207 | | /* Duplicate name; make sure that they don't conflict: */ |
208 | | |
209 | 0 | if ((ref1->flag & REF_DIR) || (ref2->flag & REF_DIR)) |
210 | | /* This is impossible by construction */ |
211 | 0 | die("Reference directory conflict: %s", ref1->name); |
212 | | |
213 | 0 | if (!oideq(&ref1->u.value.oid, &ref2->u.value.oid)) |
214 | 0 | die("Duplicated ref, and SHA1s don't match: %s", ref1->name); |
215 | | |
216 | 0 | warning("Duplicated ref: %s", ref1->name); |
217 | 0 | return 1; |
218 | 0 | } |
219 | | |
220 | | /* |
221 | | * Sort the entries in dir non-recursively (if they are not already |
222 | | * sorted) and remove any duplicate entries. |
223 | | */ |
224 | | static void sort_ref_dir(struct ref_dir *dir) |
225 | 0 | { |
226 | 0 | int i, j; |
227 | 0 | struct ref_entry *last = NULL; |
228 | | |
229 | | /* |
230 | | * This check also prevents passing a zero-length array to qsort(), |
231 | | * which is a problem on some platforms. |
232 | | */ |
233 | 0 | if (dir->sorted == dir->nr) |
234 | 0 | return; |
235 | | |
236 | 0 | QSORT(dir->entries, dir->nr, ref_entry_cmp); |
237 | | |
238 | | /* Remove any duplicates: */ |
239 | 0 | for (i = 0, j = 0; j < dir->nr; j++) { |
240 | 0 | struct ref_entry *entry = dir->entries[j]; |
241 | 0 | if (last && is_dup_ref(last, entry)) |
242 | 0 | free_ref_entry(entry); |
243 | 0 | else |
244 | 0 | last = dir->entries[i++] = entry; |
245 | 0 | } |
246 | 0 | dir->sorted = dir->nr = i; |
247 | 0 | } |
248 | | |
249 | | enum prefix_state { |
250 | | /* All refs within the directory would match prefix: */ |
251 | | PREFIX_CONTAINS_DIR, |
252 | | |
253 | | /* Some, but not all, refs within the directory might match prefix: */ |
254 | | PREFIX_WITHIN_DIR, |
255 | | |
256 | | /* No refs within the directory could possibly match prefix: */ |
257 | | PREFIX_EXCLUDES_DIR |
258 | | }; |
259 | | |
260 | | /* |
261 | | * Return a `prefix_state` constant describing the relationship |
262 | | * between the directory with the specified `dirname` and `prefix`. |
263 | | */ |
264 | | static enum prefix_state overlaps_prefix(const char *dirname, |
265 | | const char *prefix) |
266 | 0 | { |
267 | 0 | while (*prefix && *dirname == *prefix) { |
268 | 0 | dirname++; |
269 | 0 | prefix++; |
270 | 0 | } |
271 | 0 | if (!*prefix) |
272 | 0 | return PREFIX_CONTAINS_DIR; |
273 | 0 | else if (!*dirname) |
274 | 0 | return PREFIX_WITHIN_DIR; |
275 | 0 | else |
276 | 0 | return PREFIX_EXCLUDES_DIR; |
277 | 0 | } |
278 | | |
279 | | /* |
280 | | * Load all of the refs from `dir` (recursively) that could possibly |
281 | | * contain references matching `prefix` into our in-memory cache. If |
282 | | * `prefix` is NULL, prime unconditionally. |
283 | | */ |
284 | | static void prime_ref_dir(struct ref_dir *dir, const char *prefix) |
285 | 0 | { |
286 | | /* |
287 | | * The hard work of loading loose refs is done by get_ref_dir(), so we |
288 | | * just need to recurse through all of the sub-directories. We do not |
289 | | * even need to care about sorting, as traversal order does not matter |
290 | | * to us. |
291 | | */ |
292 | 0 | int i; |
293 | 0 | for (i = 0; i < dir->nr; i++) { |
294 | 0 | struct ref_entry *entry = dir->entries[i]; |
295 | 0 | if (!(entry->flag & REF_DIR)) { |
296 | | /* Not a directory; no need to recurse. */ |
297 | 0 | } else if (!prefix) { |
298 | | /* Recurse in any case: */ |
299 | 0 | prime_ref_dir(get_ref_dir(entry), NULL); |
300 | 0 | } else { |
301 | 0 | switch (overlaps_prefix(entry->name, prefix)) { |
302 | 0 | case PREFIX_CONTAINS_DIR: |
303 | | /* |
304 | | * Recurse, and from here down we |
305 | | * don't have to check the prefix |
306 | | * anymore: |
307 | | */ |
308 | 0 | prime_ref_dir(get_ref_dir(entry), NULL); |
309 | 0 | break; |
310 | 0 | case PREFIX_WITHIN_DIR: |
311 | 0 | prime_ref_dir(get_ref_dir(entry), prefix); |
312 | 0 | break; |
313 | 0 | case PREFIX_EXCLUDES_DIR: |
314 | | /* No need to prime this directory. */ |
315 | 0 | break; |
316 | 0 | } |
317 | 0 | } |
318 | 0 | } |
319 | 0 | } |
320 | | |
321 | | /* |
322 | | * A level in the reference hierarchy that is currently being iterated |
323 | | * through. |
324 | | */ |
325 | | struct cache_ref_iterator_level { |
326 | | /* |
327 | | * The ref_dir being iterated over at this level. The ref_dir |
328 | | * is sorted before being stored here. |
329 | | */ |
330 | | struct ref_dir *dir; |
331 | | |
332 | | enum prefix_state prefix_state; |
333 | | |
334 | | /* |
335 | | * The index of the current entry within dir (which might |
336 | | * itself be a directory). If index == -1, then the iteration |
337 | | * hasn't yet begun. If index == dir->nr, then the iteration |
338 | | * through this level is over. |
339 | | */ |
340 | | int index; |
341 | | }; |
342 | | |
343 | | /* |
344 | | * Represent an iteration through a ref_dir in the memory cache. The |
345 | | * iteration recurses through subdirectories. |
346 | | */ |
347 | | struct cache_ref_iterator { |
348 | | struct ref_iterator base; |
349 | | |
350 | | /* |
351 | | * The number of levels currently on the stack. |
352 | | */ |
353 | | size_t levels_nr; |
354 | | |
355 | | /* The number of levels that have been allocated on the stack */ |
356 | | size_t levels_alloc; |
357 | | |
358 | | /* |
359 | | * Only include references with this prefix in the iteration. |
360 | | * The prefix is matched textually, without regard for path |
361 | | * component boundaries. |
362 | | */ |
363 | | char *prefix; |
364 | | |
365 | | /* |
366 | | * A stack of levels. levels[0] is the uppermost level that is |
367 | | * being iterated over in this iteration. (This is not |
368 | | * necessary the top level in the references hierarchy. If we |
369 | | * are iterating through a subtree, then levels[0] will hold |
370 | | * the ref_dir for that subtree, and subsequent levels will go |
371 | | * on from there.) |
372 | | */ |
373 | | struct cache_ref_iterator_level *levels; |
374 | | |
375 | | struct repository *repo; |
376 | | struct ref_cache *cache; |
377 | | |
378 | | int prime_dir; |
379 | | }; |
380 | | |
381 | | static int cache_ref_iterator_advance(struct ref_iterator *ref_iterator) |
382 | 0 | { |
383 | 0 | struct cache_ref_iterator *iter = |
384 | 0 | (struct cache_ref_iterator *)ref_iterator; |
385 | |
|
386 | 0 | if (!iter->levels_nr) |
387 | 0 | return ITER_DONE; |
388 | | |
389 | 0 | while (1) { |
390 | 0 | struct cache_ref_iterator_level *level = |
391 | 0 | &iter->levels[iter->levels_nr - 1]; |
392 | 0 | struct ref_dir *dir = level->dir; |
393 | 0 | struct ref_entry *entry; |
394 | 0 | enum prefix_state entry_prefix_state; |
395 | |
|
396 | 0 | if (level->index == -1) |
397 | 0 | sort_ref_dir(dir); |
398 | |
|
399 | 0 | if (++level->index == level->dir->nr) { |
400 | | /* This level is exhausted; pop up a level */ |
401 | 0 | if (--iter->levels_nr == 0) |
402 | 0 | return ITER_DONE; |
403 | | |
404 | 0 | continue; |
405 | 0 | } |
406 | | |
407 | 0 | entry = dir->entries[level->index]; |
408 | |
|
409 | 0 | if (level->prefix_state == PREFIX_WITHIN_DIR) { |
410 | 0 | entry_prefix_state = overlaps_prefix(entry->name, iter->prefix); |
411 | 0 | if (entry_prefix_state == PREFIX_EXCLUDES_DIR || |
412 | 0 | (entry_prefix_state == PREFIX_WITHIN_DIR && !(entry->flag & REF_DIR))) |
413 | 0 | continue; |
414 | 0 | } else { |
415 | 0 | entry_prefix_state = level->prefix_state; |
416 | 0 | } |
417 | | |
418 | 0 | if (entry->flag & REF_DIR) { |
419 | | /* push down a level */ |
420 | 0 | ALLOC_GROW(iter->levels, iter->levels_nr + 1, |
421 | 0 | iter->levels_alloc); |
422 | |
|
423 | 0 | level = &iter->levels[iter->levels_nr++]; |
424 | 0 | level->dir = get_ref_dir(entry); |
425 | 0 | level->prefix_state = entry_prefix_state; |
426 | 0 | level->index = -1; |
427 | 0 | } else { |
428 | 0 | memset(&iter->base.ref, 0, sizeof(iter->base.ref)); |
429 | 0 | iter->base.ref.name = entry->name; |
430 | 0 | iter->base.ref.target = entry->u.value.referent; |
431 | 0 | iter->base.ref.oid = &entry->u.value.oid; |
432 | 0 | iter->base.ref.flags = entry->flag; |
433 | 0 | return ITER_OK; |
434 | 0 | } |
435 | 0 | } |
436 | 0 | } |
437 | | |
438 | | static int cache_ref_iterator_set_prefix(struct cache_ref_iterator *iter, |
439 | | const char *prefix) |
440 | 0 | { |
441 | 0 | struct cache_ref_iterator_level *level; |
442 | 0 | struct ref_dir *dir; |
443 | |
|
444 | 0 | dir = get_ref_dir(iter->cache->root); |
445 | 0 | if (prefix && *prefix) |
446 | 0 | dir = find_containing_dir(dir, prefix); |
447 | 0 | if (!dir) { |
448 | 0 | iter->levels_nr = 0; |
449 | 0 | return 0; |
450 | 0 | } |
451 | | |
452 | 0 | if (iter->prime_dir) |
453 | 0 | prime_ref_dir(dir, prefix); |
454 | 0 | iter->levels_nr = 1; |
455 | 0 | level = &iter->levels[0]; |
456 | 0 | level->index = -1; |
457 | 0 | level->dir = dir; |
458 | |
|
459 | 0 | if (prefix && *prefix) { |
460 | 0 | free(iter->prefix); |
461 | 0 | iter->prefix = xstrdup(prefix); |
462 | 0 | level->prefix_state = PREFIX_WITHIN_DIR; |
463 | 0 | } else { |
464 | 0 | FREE_AND_NULL(iter->prefix); |
465 | 0 | level->prefix_state = PREFIX_CONTAINS_DIR; |
466 | 0 | } |
467 | |
|
468 | 0 | return 0; |
469 | 0 | } |
470 | | |
471 | | static int cache_ref_iterator_seek(struct ref_iterator *ref_iterator, |
472 | | const char *refname, unsigned int flags) |
473 | 0 | { |
474 | 0 | struct cache_ref_iterator *iter = |
475 | 0 | (struct cache_ref_iterator *)ref_iterator; |
476 | |
|
477 | 0 | if (flags & REF_ITERATOR_SEEK_SET_PREFIX) { |
478 | 0 | return cache_ref_iterator_set_prefix(iter, refname); |
479 | 0 | } else if (refname && *refname) { |
480 | 0 | struct cache_ref_iterator_level *level; |
481 | 0 | const char *slash = refname; |
482 | 0 | struct ref_dir *dir; |
483 | |
|
484 | 0 | dir = get_ref_dir(iter->cache->root); |
485 | |
|
486 | 0 | if (iter->prime_dir) |
487 | 0 | prime_ref_dir(dir, refname); |
488 | |
|
489 | 0 | iter->levels_nr = 1; |
490 | 0 | level = &iter->levels[0]; |
491 | 0 | level->index = -1; |
492 | 0 | level->dir = dir; |
493 | | |
494 | | /* Unset any previously set prefix */ |
495 | 0 | FREE_AND_NULL(iter->prefix); |
496 | | |
497 | | /* |
498 | | * Breakdown the provided seek path and assign the correct |
499 | | * indexing to each level as needed. |
500 | | */ |
501 | 0 | do { |
502 | 0 | int idx; |
503 | 0 | size_t len; |
504 | 0 | int cmp = 0; |
505 | |
|
506 | 0 | sort_ref_dir(dir); |
507 | |
|
508 | 0 | slash = strchr(slash, '/'); |
509 | 0 | len = slash ? (size_t)(slash - refname) : strlen(refname); |
510 | |
|
511 | 0 | for (idx = 0; idx < dir->nr; idx++) { |
512 | 0 | cmp = strncmp(refname, dir->entries[idx]->name, len); |
513 | 0 | if (cmp <= 0) |
514 | 0 | break; |
515 | 0 | } |
516 | | /* don't overflow the index */ |
517 | 0 | idx = idx >= dir->nr ? dir->nr - 1 : idx; |
518 | |
|
519 | 0 | if (slash) |
520 | 0 | slash = slash + 1; |
521 | |
|
522 | 0 | level->index = idx; |
523 | 0 | if (dir->entries[idx]->flag & REF_DIR) { |
524 | | /* push down a level */ |
525 | 0 | dir = get_ref_dir(dir->entries[idx]); |
526 | |
|
527 | 0 | ALLOC_GROW(iter->levels, iter->levels_nr + 1, |
528 | 0 | iter->levels_alloc); |
529 | 0 | level = &iter->levels[iter->levels_nr++]; |
530 | 0 | level->dir = dir; |
531 | 0 | level->index = -1; |
532 | 0 | level->prefix_state = PREFIX_CONTAINS_DIR; |
533 | 0 | } else { |
534 | | /* reduce the index so the leaf node is iterated over */ |
535 | 0 | if (cmp <= 0 && !slash) |
536 | 0 | level->index = idx - 1; |
537 | | /* |
538 | | * while the seek path may not be exhausted, our |
539 | | * match is exhausted at a leaf node. |
540 | | */ |
541 | 0 | break; |
542 | 0 | } |
543 | 0 | } while (slash && dir->nr); |
544 | 0 | } |
545 | | |
546 | 0 | return 0; |
547 | 0 | } |
548 | | |
549 | | static void cache_ref_iterator_release(struct ref_iterator *ref_iterator) |
550 | 0 | { |
551 | 0 | struct cache_ref_iterator *iter = |
552 | 0 | (struct cache_ref_iterator *)ref_iterator; |
553 | 0 | free(iter->prefix); |
554 | 0 | free(iter->levels); |
555 | 0 | } |
556 | | |
557 | | static struct ref_iterator_vtable cache_ref_iterator_vtable = { |
558 | | .advance = cache_ref_iterator_advance, |
559 | | .seek = cache_ref_iterator_seek, |
560 | | .release = cache_ref_iterator_release, |
561 | | }; |
562 | | |
563 | | struct ref_iterator *cache_ref_iterator_begin(struct ref_cache *cache, |
564 | | const char *prefix, |
565 | | struct repository *repo, |
566 | | int prime_dir) |
567 | 0 | { |
568 | 0 | struct cache_ref_iterator *iter; |
569 | 0 | struct ref_iterator *ref_iterator; |
570 | |
|
571 | 0 | CALLOC_ARRAY(iter, 1); |
572 | 0 | ref_iterator = &iter->base; |
573 | 0 | base_ref_iterator_init(ref_iterator, &cache_ref_iterator_vtable); |
574 | 0 | ALLOC_GROW(iter->levels, 10, iter->levels_alloc); |
575 | |
|
576 | 0 | iter->repo = repo; |
577 | 0 | iter->cache = cache; |
578 | 0 | iter->prime_dir = prime_dir; |
579 | |
|
580 | 0 | if (cache_ref_iterator_seek(&iter->base, prefix, |
581 | 0 | REF_ITERATOR_SEEK_SET_PREFIX) < 0) { |
582 | 0 | ref_iterator_free(&iter->base); |
583 | 0 | return NULL; |
584 | 0 | } |
585 | | |
586 | 0 | return ref_iterator; |
587 | 0 | } |