Line | Count | Source |
1 | | /* |
2 | | * Generic reference iterator infrastructure. See refs-internal.h for |
3 | | * documentation about the design and use of reference iterators. |
4 | | */ |
5 | | |
6 | | #define DISABLE_SIGN_COMPARE_WARNINGS |
7 | | |
8 | | #include "git-compat-util.h" |
9 | | #include "refs.h" |
10 | | #include "refs/refs-internal.h" |
11 | | #include "iterator.h" |
12 | | |
13 | | int ref_iterator_advance(struct ref_iterator *ref_iterator) |
14 | 0 | { |
15 | 0 | return ref_iterator->vtable->advance(ref_iterator); |
16 | 0 | } |
17 | | |
18 | | int ref_iterator_seek(struct ref_iterator *ref_iterator, const char *refname, |
19 | | unsigned int flags) |
20 | 0 | { |
21 | 0 | return ref_iterator->vtable->seek(ref_iterator, refname, flags); |
22 | 0 | } |
23 | | |
24 | | void ref_iterator_free(struct ref_iterator *ref_iterator) |
25 | 0 | { |
26 | 0 | if (ref_iterator) { |
27 | 0 | ref_iterator->vtable->release(ref_iterator); |
28 | | /* Help make use-after-free bugs fail quickly: */ |
29 | 0 | ref_iterator->vtable = NULL; |
30 | 0 | free(ref_iterator); |
31 | 0 | } |
32 | 0 | } |
33 | | |
34 | | void base_ref_iterator_init(struct ref_iterator *iter, |
35 | | struct ref_iterator_vtable *vtable) |
36 | 0 | { |
37 | 0 | iter->vtable = vtable; |
38 | 0 | memset(&iter->ref, 0, sizeof(iter->ref)); |
39 | 0 | } |
40 | | |
41 | | struct empty_ref_iterator { |
42 | | struct ref_iterator base; |
43 | | }; |
44 | | |
45 | | static int empty_ref_iterator_advance(struct ref_iterator *ref_iterator UNUSED) |
46 | 0 | { |
47 | 0 | return ITER_DONE; |
48 | 0 | } |
49 | | |
50 | | static int empty_ref_iterator_seek(struct ref_iterator *ref_iterator UNUSED, |
51 | | const char *refname UNUSED, |
52 | | unsigned int flags UNUSED) |
53 | 0 | { |
54 | 0 | return 0; |
55 | 0 | } |
56 | | |
57 | | static void empty_ref_iterator_release(struct ref_iterator *ref_iterator UNUSED) |
58 | 0 | { |
59 | 0 | } |
60 | | |
61 | | static struct ref_iterator_vtable empty_ref_iterator_vtable = { |
62 | | .advance = empty_ref_iterator_advance, |
63 | | .seek = empty_ref_iterator_seek, |
64 | | .release = empty_ref_iterator_release, |
65 | | }; |
66 | | |
67 | | struct ref_iterator *empty_ref_iterator_begin(void) |
68 | 0 | { |
69 | 0 | struct empty_ref_iterator *iter = xcalloc(1, sizeof(*iter)); |
70 | 0 | struct ref_iterator *ref_iterator = &iter->base; |
71 | |
|
72 | 0 | base_ref_iterator_init(ref_iterator, &empty_ref_iterator_vtable); |
73 | 0 | return ref_iterator; |
74 | 0 | } |
75 | | |
76 | | int is_empty_ref_iterator(struct ref_iterator *ref_iterator) |
77 | 0 | { |
78 | 0 | return ref_iterator->vtable == &empty_ref_iterator_vtable; |
79 | 0 | } |
80 | | |
81 | | struct merge_ref_iterator { |
82 | | struct ref_iterator base; |
83 | | |
84 | | struct ref_iterator *iter0, *iter0_owned; |
85 | | struct ref_iterator *iter1, *iter1_owned; |
86 | | |
87 | | ref_iterator_select_fn *select; |
88 | | void *cb_data; |
89 | | |
90 | | /* |
91 | | * A pointer to iter0 or iter1 (whichever is supplying the |
92 | | * current value), or NULL if advance has not yet been called. |
93 | | */ |
94 | | struct ref_iterator **current; |
95 | | }; |
96 | | |
97 | | enum iterator_selection ref_iterator_select(struct ref_iterator *iter_worktree, |
98 | | struct ref_iterator *iter_common, |
99 | | void *cb_data UNUSED) |
100 | 0 | { |
101 | 0 | if (iter_worktree && !iter_common) { |
102 | | /* |
103 | | * Return the worktree ref if there are no more common refs. |
104 | | */ |
105 | 0 | return ITER_SELECT_0; |
106 | 0 | } else if (iter_common) { |
107 | | /* |
108 | | * In case we have pending worktree and common refs we need to |
109 | | * yield them based on their lexicographical order. Worktree |
110 | | * refs that have the same name as common refs shadow the |
111 | | * latter. |
112 | | */ |
113 | 0 | if (iter_worktree) { |
114 | 0 | int cmp = strcmp(iter_worktree->ref.name, |
115 | 0 | iter_common->ref.name); |
116 | 0 | if (cmp < 0) |
117 | 0 | return ITER_SELECT_0; |
118 | 0 | else if (!cmp) |
119 | 0 | return ITER_SELECT_0_SKIP_1; |
120 | 0 | } |
121 | | |
122 | | /* |
123 | | * We now know that the lexicographically-next ref is a common |
124 | | * ref. When the common ref is a shared one we return it. |
125 | | */ |
126 | 0 | if (parse_worktree_ref(iter_common->ref.name, NULL, NULL, |
127 | 0 | NULL) == REF_WORKTREE_SHARED) |
128 | 0 | return ITER_SELECT_1; |
129 | | |
130 | | /* |
131 | | * Otherwise, if the common ref is a per-worktree ref we skip |
132 | | * it because it would belong to the main worktree, not ours. |
133 | | */ |
134 | 0 | return ITER_SKIP_1; |
135 | 0 | } else { |
136 | 0 | return ITER_DONE; |
137 | 0 | } |
138 | 0 | } |
139 | | |
140 | | static int merge_ref_iterator_advance(struct ref_iterator *ref_iterator) |
141 | 0 | { |
142 | 0 | struct merge_ref_iterator *iter = |
143 | 0 | (struct merge_ref_iterator *)ref_iterator; |
144 | 0 | int ok; |
145 | |
|
146 | 0 | if (!iter->current) { |
147 | | /* Initialize: advance both iterators to their first entries */ |
148 | 0 | if ((ok = ref_iterator_advance(iter->iter0)) != ITER_OK) { |
149 | 0 | iter->iter0 = NULL; |
150 | 0 | if (ok == ITER_ERROR) |
151 | 0 | goto error; |
152 | 0 | } |
153 | 0 | if ((ok = ref_iterator_advance(iter->iter1)) != ITER_OK) { |
154 | 0 | iter->iter1 = NULL; |
155 | 0 | if (ok == ITER_ERROR) |
156 | 0 | goto error; |
157 | 0 | } |
158 | 0 | } else { |
159 | | /* |
160 | | * Advance the current iterator past the just-used |
161 | | * entry: |
162 | | */ |
163 | 0 | if ((ok = ref_iterator_advance(*iter->current)) != ITER_OK) { |
164 | 0 | *iter->current = NULL; |
165 | 0 | if (ok == ITER_ERROR) |
166 | 0 | goto error; |
167 | 0 | } |
168 | 0 | } |
169 | | |
170 | | /* Loop until we find an entry that we can yield. */ |
171 | 0 | while (1) { |
172 | 0 | struct ref_iterator **secondary; |
173 | 0 | enum iterator_selection selection = |
174 | 0 | iter->select(iter->iter0, iter->iter1, iter->cb_data); |
175 | |
|
176 | 0 | if (selection == ITER_SELECT_DONE) { |
177 | 0 | return ITER_DONE; |
178 | 0 | } else if (selection == ITER_SELECT_ERROR) { |
179 | 0 | return ITER_ERROR; |
180 | 0 | } |
181 | | |
182 | 0 | if ((selection & ITER_CURRENT_SELECTION_MASK) == 0) { |
183 | 0 | iter->current = &iter->iter0; |
184 | 0 | secondary = &iter->iter1; |
185 | 0 | } else { |
186 | 0 | iter->current = &iter->iter1; |
187 | 0 | secondary = &iter->iter0; |
188 | 0 | } |
189 | |
|
190 | 0 | if (selection & ITER_SKIP_SECONDARY) { |
191 | 0 | if ((ok = ref_iterator_advance(*secondary)) != ITER_OK) { |
192 | 0 | *secondary = NULL; |
193 | 0 | if (ok == ITER_ERROR) |
194 | 0 | goto error; |
195 | 0 | } |
196 | 0 | } |
197 | | |
198 | 0 | if (selection & ITER_YIELD_CURRENT) { |
199 | 0 | iter->base.ref = (*iter->current)->ref; |
200 | 0 | return ITER_OK; |
201 | 0 | } |
202 | 0 | } |
203 | | |
204 | 0 | error: |
205 | 0 | return ITER_ERROR; |
206 | 0 | } |
207 | | |
208 | | static int merge_ref_iterator_seek(struct ref_iterator *ref_iterator, |
209 | | const char *refname, unsigned int flags) |
210 | 0 | { |
211 | 0 | struct merge_ref_iterator *iter = |
212 | 0 | (struct merge_ref_iterator *)ref_iterator; |
213 | 0 | int ret; |
214 | |
|
215 | 0 | iter->current = NULL; |
216 | 0 | iter->iter0 = iter->iter0_owned; |
217 | 0 | iter->iter1 = iter->iter1_owned; |
218 | |
|
219 | 0 | ret = ref_iterator_seek(iter->iter0, refname, flags); |
220 | 0 | if (ret < 0) |
221 | 0 | return ret; |
222 | | |
223 | 0 | ret = ref_iterator_seek(iter->iter1, refname, flags); |
224 | 0 | if (ret < 0) |
225 | 0 | return ret; |
226 | | |
227 | 0 | return 0; |
228 | 0 | } |
229 | | |
230 | | static void merge_ref_iterator_release(struct ref_iterator *ref_iterator) |
231 | 0 | { |
232 | 0 | struct merge_ref_iterator *iter = |
233 | 0 | (struct merge_ref_iterator *)ref_iterator; |
234 | 0 | ref_iterator_free(iter->iter0_owned); |
235 | 0 | ref_iterator_free(iter->iter1_owned); |
236 | 0 | } |
237 | | |
238 | | static struct ref_iterator_vtable merge_ref_iterator_vtable = { |
239 | | .advance = merge_ref_iterator_advance, |
240 | | .seek = merge_ref_iterator_seek, |
241 | | .release = merge_ref_iterator_release, |
242 | | }; |
243 | | |
244 | | struct ref_iterator *merge_ref_iterator_begin( |
245 | | struct ref_iterator *iter0, struct ref_iterator *iter1, |
246 | | ref_iterator_select_fn *select, void *cb_data) |
247 | 0 | { |
248 | 0 | struct merge_ref_iterator *iter = xcalloc(1, sizeof(*iter)); |
249 | 0 | struct ref_iterator *ref_iterator = &iter->base; |
250 | | |
251 | | /* |
252 | | * We can't do the same kind of is_empty_ref_iterator()-style |
253 | | * optimization here as overlay_ref_iterator_begin() does, |
254 | | * because we don't know the semantics of the select function. |
255 | | * It might, for example, implement "intersect" by passing |
256 | | * references through only if they exist in both iterators. |
257 | | */ |
258 | |
|
259 | 0 | base_ref_iterator_init(ref_iterator, &merge_ref_iterator_vtable); |
260 | 0 | iter->iter0 = iter->iter0_owned = iter0; |
261 | 0 | iter->iter1 = iter->iter1_owned = iter1; |
262 | 0 | iter->select = select; |
263 | 0 | iter->cb_data = cb_data; |
264 | 0 | iter->current = NULL; |
265 | 0 | return ref_iterator; |
266 | 0 | } |
267 | | |
268 | | /* |
269 | | * A ref_iterator_select_fn that overlays the items from front on top |
270 | | * of those from back (like loose refs over packed refs). See |
271 | | * overlay_ref_iterator_begin(). |
272 | | */ |
273 | | static enum iterator_selection overlay_iterator_select( |
274 | | struct ref_iterator *front, struct ref_iterator *back, |
275 | | void *cb_data UNUSED) |
276 | 0 | { |
277 | 0 | int cmp; |
278 | |
|
279 | 0 | if (!back) |
280 | 0 | return front ? ITER_SELECT_0 : ITER_SELECT_DONE; |
281 | 0 | else if (!front) |
282 | 0 | return ITER_SELECT_1; |
283 | | |
284 | 0 | cmp = strcmp(front->ref.name, back->ref.name); |
285 | |
|
286 | 0 | if (cmp < 0) |
287 | 0 | return ITER_SELECT_0; |
288 | 0 | else if (cmp > 0) |
289 | 0 | return ITER_SELECT_1; |
290 | 0 | else |
291 | 0 | return ITER_SELECT_0_SKIP_1; |
292 | 0 | } |
293 | | |
294 | | struct ref_iterator *overlay_ref_iterator_begin( |
295 | | struct ref_iterator *front, struct ref_iterator *back) |
296 | 0 | { |
297 | | /* |
298 | | * Optimization: if one of the iterators is empty, return the |
299 | | * other one rather than incurring the overhead of wrapping |
300 | | * them. |
301 | | */ |
302 | 0 | if (is_empty_ref_iterator(front)) { |
303 | 0 | ref_iterator_free(front); |
304 | 0 | return back; |
305 | 0 | } else if (is_empty_ref_iterator(back)) { |
306 | 0 | ref_iterator_free(back); |
307 | 0 | return front; |
308 | 0 | } |
309 | | |
310 | 0 | return merge_ref_iterator_begin(front, back, overlay_iterator_select, NULL); |
311 | 0 | } |
312 | | |
313 | | struct prefix_ref_iterator { |
314 | | struct ref_iterator base; |
315 | | |
316 | | struct ref_iterator *iter0; |
317 | | char *prefix; |
318 | | int trim; |
319 | | }; |
320 | | |
321 | | /* Return -1, 0, 1 if refname is before, inside, or after the prefix. */ |
322 | | static int compare_prefix(const char *refname, const char *prefix) |
323 | 0 | { |
324 | 0 | while (*prefix) { |
325 | 0 | if (*refname != *prefix) |
326 | 0 | return ((unsigned char)*refname < (unsigned char)*prefix) ? -1 : +1; |
327 | | |
328 | 0 | refname++; |
329 | 0 | prefix++; |
330 | 0 | } |
331 | | |
332 | 0 | return 0; |
333 | 0 | } |
334 | | |
335 | | static int prefix_ref_iterator_advance(struct ref_iterator *ref_iterator) |
336 | 0 | { |
337 | 0 | struct prefix_ref_iterator *iter = |
338 | 0 | (struct prefix_ref_iterator *)ref_iterator; |
339 | 0 | int ok; |
340 | |
|
341 | 0 | while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) { |
342 | 0 | int cmp = compare_prefix(iter->iter0->ref.name, iter->prefix); |
343 | 0 | if (cmp < 0) |
344 | 0 | continue; |
345 | | /* |
346 | | * As the source iterator is ordered, we |
347 | | * can stop the iteration as soon as we see a |
348 | | * refname that comes after the prefix: |
349 | | */ |
350 | 0 | if (cmp > 0) |
351 | 0 | return ITER_DONE; |
352 | | |
353 | 0 | iter->base.ref = iter->iter0->ref; |
354 | |
|
355 | 0 | if (iter->trim) { |
356 | | /* |
357 | | * It is nonsense to trim off characters that |
358 | | * you haven't already checked for via a |
359 | | * prefix check, whether via this |
360 | | * `prefix_ref_iterator` or upstream in |
361 | | * `iter0`). So if there wouldn't be at least |
362 | | * one character left in the refname after |
363 | | * trimming, report it as a bug: |
364 | | */ |
365 | 0 | if (strlen(iter->base.ref.name) <= iter->trim) |
366 | 0 | BUG("attempt to trim too many characters"); |
367 | 0 | iter->base.ref.name += iter->trim; |
368 | 0 | } |
369 | | |
370 | 0 | return ITER_OK; |
371 | 0 | } |
372 | | |
373 | 0 | return ok; |
374 | 0 | } |
375 | | |
376 | | static int prefix_ref_iterator_seek(struct ref_iterator *ref_iterator, |
377 | | const char *refname, unsigned int flags) |
378 | 0 | { |
379 | 0 | struct prefix_ref_iterator *iter = |
380 | 0 | (struct prefix_ref_iterator *)ref_iterator; |
381 | |
|
382 | 0 | if (flags & REF_ITERATOR_SEEK_SET_PREFIX) { |
383 | 0 | free(iter->prefix); |
384 | 0 | iter->prefix = xstrdup_or_null(refname); |
385 | 0 | } |
386 | 0 | return ref_iterator_seek(iter->iter0, refname, flags); |
387 | 0 | } |
388 | | |
389 | | static void prefix_ref_iterator_release(struct ref_iterator *ref_iterator) |
390 | 0 | { |
391 | 0 | struct prefix_ref_iterator *iter = |
392 | 0 | (struct prefix_ref_iterator *)ref_iterator; |
393 | 0 | ref_iterator_free(iter->iter0); |
394 | 0 | free(iter->prefix); |
395 | 0 | } |
396 | | |
397 | | static struct ref_iterator_vtable prefix_ref_iterator_vtable = { |
398 | | .advance = prefix_ref_iterator_advance, |
399 | | .seek = prefix_ref_iterator_seek, |
400 | | .release = prefix_ref_iterator_release, |
401 | | }; |
402 | | |
403 | | struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0, |
404 | | const char *prefix, |
405 | | int trim) |
406 | 0 | { |
407 | 0 | struct prefix_ref_iterator *iter; |
408 | 0 | struct ref_iterator *ref_iterator; |
409 | |
|
410 | 0 | if (!*prefix && !trim) |
411 | 0 | return iter0; /* optimization: no need to wrap iterator */ |
412 | | |
413 | 0 | CALLOC_ARRAY(iter, 1); |
414 | 0 | ref_iterator = &iter->base; |
415 | |
|
416 | 0 | base_ref_iterator_init(ref_iterator, &prefix_ref_iterator_vtable); |
417 | |
|
418 | 0 | iter->iter0 = iter0; |
419 | 0 | iter->prefix = xstrdup(prefix); |
420 | 0 | iter->trim = trim; |
421 | |
|
422 | 0 | return ref_iterator; |
423 | 0 | } |
424 | | |
425 | | int do_for_each_ref_iterator(struct ref_iterator *iter, |
426 | | each_ref_fn fn, void *cb_data) |
427 | 0 | { |
428 | 0 | int retval = 0, ok; |
429 | |
|
430 | 0 | while ((ok = ref_iterator_advance(iter)) == ITER_OK) { |
431 | 0 | retval = fn(&iter->ref, cb_data); |
432 | 0 | if (retval) |
433 | 0 | goto out; |
434 | 0 | } |
435 | | |
436 | 0 | out: |
437 | 0 | if (ok == ITER_ERROR) |
438 | 0 | retval = -1; |
439 | 0 | ref_iterator_free(iter); |
440 | 0 | return retval; |
441 | 0 | } |