Coverage Report

Created: 2024-09-08 06:24

/src/git/refs/iterator.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Generic reference iterator infrastructure. See refs-internal.h for
3
 * documentation about the design and use of reference iterators.
4
 */
5
6
#include "git-compat-util.h"
7
#include "refs.h"
8
#include "refs/refs-internal.h"
9
#include "iterator.h"
10
11
int ref_iterator_advance(struct ref_iterator *ref_iterator)
12
18.2k
{
13
18.2k
  return ref_iterator->vtable->advance(ref_iterator);
14
18.2k
}
15
16
int ref_iterator_peel(struct ref_iterator *ref_iterator,
17
          struct object_id *peeled)
18
0
{
19
0
  return ref_iterator->vtable->peel(ref_iterator, peeled);
20
0
}
21
22
int ref_iterator_abort(struct ref_iterator *ref_iterator)
23
19.8k
{
24
19.8k
  return ref_iterator->vtable->abort(ref_iterator);
25
19.8k
}
26
27
void base_ref_iterator_init(struct ref_iterator *iter,
28
          struct ref_iterator_vtable *vtable)
29
19.8k
{
30
19.8k
  iter->vtable = vtable;
31
19.8k
  iter->refname = NULL;
32
19.8k
  iter->referent = NULL;
33
19.8k
  iter->oid = NULL;
34
19.8k
  iter->flags = 0;
35
19.8k
}
36
37
void base_ref_iterator_free(struct ref_iterator *iter)
38
19.8k
{
39
  /* Help make use-after-free bugs fail quickly: */
40
19.8k
  iter->vtable = NULL;
41
19.8k
  free(iter);
42
19.8k
}
43
44
struct empty_ref_iterator {
45
  struct ref_iterator base;
46
};
47
48
static int empty_ref_iterator_advance(struct ref_iterator *ref_iterator)
49
14.9k
{
50
14.9k
  return ref_iterator_abort(ref_iterator);
51
14.9k
}
52
53
static int empty_ref_iterator_peel(struct ref_iterator *ref_iterator UNUSED,
54
           struct object_id *peeled UNUSED)
55
0
{
56
0
  BUG("peel called for empty iterator");
57
0
}
58
59
static int empty_ref_iterator_abort(struct ref_iterator *ref_iterator)
60
16.6k
{
61
16.6k
  base_ref_iterator_free(ref_iterator);
62
16.6k
  return ITER_DONE;
63
16.6k
}
64
65
static struct ref_iterator_vtable empty_ref_iterator_vtable = {
66
  .advance = empty_ref_iterator_advance,
67
  .peel = empty_ref_iterator_peel,
68
  .abort = empty_ref_iterator_abort,
69
};
70
71
struct ref_iterator *empty_ref_iterator_begin(void)
72
16.6k
{
73
16.6k
  struct empty_ref_iterator *iter = xcalloc(1, sizeof(*iter));
74
16.6k
  struct ref_iterator *ref_iterator = &iter->base;
75
76
16.6k
  base_ref_iterator_init(ref_iterator, &empty_ref_iterator_vtable);
77
16.6k
  return ref_iterator;
78
16.6k
}
79
80
int is_empty_ref_iterator(struct ref_iterator *ref_iterator)
81
1.61k
{
82
1.61k
  return ref_iterator->vtable == &empty_ref_iterator_vtable;
83
1.61k
}
84
85
struct merge_ref_iterator {
86
  struct ref_iterator base;
87
88
  struct ref_iterator *iter0, *iter1;
89
90
  ref_iterator_select_fn *select;
91
  void *cb_data;
92
93
  /*
94
   * A pointer to iter0 or iter1 (whichever is supplying the
95
   * current value), or NULL if advance has not yet been called.
96
   */
97
  struct ref_iterator **current;
98
};
99
100
enum iterator_selection ref_iterator_select(struct ref_iterator *iter_worktree,
101
              struct ref_iterator *iter_common,
102
              void *cb_data UNUSED)
103
0
{
104
0
  if (iter_worktree && !iter_common) {
105
    /*
106
     * Return the worktree ref if there are no more common refs.
107
     */
108
0
    return ITER_SELECT_0;
109
0
  } else if (iter_common) {
110
    /*
111
     * In case we have pending worktree and common refs we need to
112
     * yield them based on their lexicographical order. Worktree
113
     * refs that have the same name as common refs shadow the
114
     * latter.
115
     */
116
0
    if (iter_worktree) {
117
0
      int cmp = strcmp(iter_worktree->refname,
118
0
           iter_common->refname);
119
0
      if (cmp < 0)
120
0
        return ITER_SELECT_0;
121
0
      else if (!cmp)
122
0
        return ITER_SELECT_0_SKIP_1;
123
0
    }
124
125
     /*
126
      * We now know that the lexicographically-next ref is a common
127
      * ref. When the common ref is a shared one we return it.
128
      */
129
0
    if (parse_worktree_ref(iter_common->refname, NULL, NULL,
130
0
               NULL) == REF_WORKTREE_SHARED)
131
0
      return ITER_SELECT_1;
132
133
    /*
134
     * Otherwise, if the common ref is a per-worktree ref we skip
135
     * it because it would belong to the main worktree, not ours.
136
     */
137
0
    return ITER_SKIP_1;
138
0
  } else {
139
0
    return ITER_DONE;
140
0
  }
141
0
}
142
143
static int merge_ref_iterator_advance(struct ref_iterator *ref_iterator)
144
0
{
145
0
  struct merge_ref_iterator *iter =
146
0
    (struct merge_ref_iterator *)ref_iterator;
147
0
  int ok;
148
149
0
  if (!iter->current) {
150
    /* Initialize: advance both iterators to their first entries */
151
0
    if ((ok = ref_iterator_advance(iter->iter0)) != ITER_OK) {
152
0
      iter->iter0 = NULL;
153
0
      if (ok == ITER_ERROR)
154
0
        goto error;
155
0
    }
156
0
    if ((ok = ref_iterator_advance(iter->iter1)) != ITER_OK) {
157
0
      iter->iter1 = NULL;
158
0
      if (ok == ITER_ERROR)
159
0
        goto error;
160
0
    }
161
0
  } else {
162
    /*
163
     * Advance the current iterator past the just-used
164
     * entry:
165
     */
166
0
    if ((ok = ref_iterator_advance(*iter->current)) != ITER_OK) {
167
0
      *iter->current = NULL;
168
0
      if (ok == ITER_ERROR)
169
0
        goto error;
170
0
    }
171
0
  }
172
173
  /* Loop until we find an entry that we can yield. */
174
0
  while (1) {
175
0
    struct ref_iterator **secondary;
176
0
    enum iterator_selection selection =
177
0
      iter->select(iter->iter0, iter->iter1, iter->cb_data);
178
179
0
    if (selection == ITER_SELECT_DONE) {
180
0
      return ref_iterator_abort(ref_iterator);
181
0
    } else if (selection == ITER_SELECT_ERROR) {
182
0
      ref_iterator_abort(ref_iterator);
183
0
      return ITER_ERROR;
184
0
    }
185
186
0
    if ((selection & ITER_CURRENT_SELECTION_MASK) == 0) {
187
0
      iter->current = &iter->iter0;
188
0
      secondary = &iter->iter1;
189
0
    } else {
190
0
      iter->current = &iter->iter1;
191
0
      secondary = &iter->iter0;
192
0
    }
193
194
0
    if (selection & ITER_SKIP_SECONDARY) {
195
0
      if ((ok = ref_iterator_advance(*secondary)) != ITER_OK) {
196
0
        *secondary = NULL;
197
0
        if (ok == ITER_ERROR)
198
0
          goto error;
199
0
      }
200
0
    }
201
202
0
    if (selection & ITER_YIELD_CURRENT) {
203
0
      iter->base.referent = (*iter->current)->referent;
204
0
      iter->base.refname = (*iter->current)->refname;
205
0
      iter->base.oid = (*iter->current)->oid;
206
0
      iter->base.flags = (*iter->current)->flags;
207
0
      return ITER_OK;
208
0
    }
209
0
  }
210
211
0
error:
212
0
  ref_iterator_abort(ref_iterator);
213
0
  return ITER_ERROR;
214
0
}
215
216
static int merge_ref_iterator_peel(struct ref_iterator *ref_iterator,
217
           struct object_id *peeled)
218
0
{
219
0
  struct merge_ref_iterator *iter =
220
0
    (struct merge_ref_iterator *)ref_iterator;
221
222
0
  if (!iter->current) {
223
0
    BUG("peel called before advance for merge iterator");
224
0
  }
225
0
  return ref_iterator_peel(*iter->current, peeled);
226
0
}
227
228
static int merge_ref_iterator_abort(struct ref_iterator *ref_iterator)
229
0
{
230
0
  struct merge_ref_iterator *iter =
231
0
    (struct merge_ref_iterator *)ref_iterator;
232
0
  int ok = ITER_DONE;
233
234
0
  if (iter->iter0) {
235
0
    if (ref_iterator_abort(iter->iter0) != ITER_DONE)
236
0
      ok = ITER_ERROR;
237
0
  }
238
0
  if (iter->iter1) {
239
0
    if (ref_iterator_abort(iter->iter1) != ITER_DONE)
240
0
      ok = ITER_ERROR;
241
0
  }
242
0
  base_ref_iterator_free(ref_iterator);
243
0
  return ok;
244
0
}
245
246
static struct ref_iterator_vtable merge_ref_iterator_vtable = {
247
  .advance = merge_ref_iterator_advance,
248
  .peel = merge_ref_iterator_peel,
249
  .abort = merge_ref_iterator_abort,
250
};
251
252
struct ref_iterator *merge_ref_iterator_begin(
253
    struct ref_iterator *iter0, struct ref_iterator *iter1,
254
    ref_iterator_select_fn *select, void *cb_data)
255
0
{
256
0
  struct merge_ref_iterator *iter = xcalloc(1, sizeof(*iter));
257
0
  struct ref_iterator *ref_iterator = &iter->base;
258
259
  /*
260
   * We can't do the same kind of is_empty_ref_iterator()-style
261
   * optimization here as overlay_ref_iterator_begin() does,
262
   * because we don't know the semantics of the select function.
263
   * It might, for example, implement "intersect" by passing
264
   * references through only if they exist in both iterators.
265
   */
266
267
0
  base_ref_iterator_init(ref_iterator, &merge_ref_iterator_vtable);
268
0
  iter->iter0 = iter0;
269
0
  iter->iter1 = iter1;
270
0
  iter->select = select;
271
0
  iter->cb_data = cb_data;
272
0
  iter->current = NULL;
273
0
  return ref_iterator;
274
0
}
275
276
/*
277
 * A ref_iterator_select_fn that overlays the items from front on top
278
 * of those from back (like loose refs over packed refs). See
279
 * overlay_ref_iterator_begin().
280
 */
281
static enum iterator_selection overlay_iterator_select(
282
    struct ref_iterator *front, struct ref_iterator *back,
283
    void *cb_data UNUSED)
284
0
{
285
0
  int cmp;
286
287
0
  if (!back)
288
0
    return front ? ITER_SELECT_0 : ITER_SELECT_DONE;
289
0
  else if (!front)
290
0
    return ITER_SELECT_1;
291
292
0
  cmp = strcmp(front->refname, back->refname);
293
294
0
  if (cmp < 0)
295
0
    return ITER_SELECT_0;
296
0
  else if (cmp > 0)
297
0
    return ITER_SELECT_1;
298
0
  else
299
0
    return ITER_SELECT_0_SKIP_1;
300
0
}
301
302
struct ref_iterator *overlay_ref_iterator_begin(
303
    struct ref_iterator *front, struct ref_iterator *back)
304
1.61k
{
305
  /*
306
   * Optimization: if one of the iterators is empty, return the
307
   * other one rather than incurring the overhead of wrapping
308
   * them.
309
   */
310
1.61k
  if (is_empty_ref_iterator(front)) {
311
1.61k
    ref_iterator_abort(front);
312
1.61k
    return back;
313
1.61k
  } else if (is_empty_ref_iterator(back)) {
314
0
    ref_iterator_abort(back);
315
0
    return front;
316
0
  }
317
318
0
  return merge_ref_iterator_begin(front, back, overlay_iterator_select, NULL);
319
1.61k
}
320
321
struct prefix_ref_iterator {
322
  struct ref_iterator base;
323
324
  struct ref_iterator *iter0;
325
  char *prefix;
326
  int trim;
327
};
328
329
/* Return -1, 0, 1 if refname is before, inside, or after the prefix. */
330
static int compare_prefix(const char *refname, const char *prefix)
331
0
{
332
0
  while (*prefix) {
333
0
    if (*refname != *prefix)
334
0
      return ((unsigned char)*refname < (unsigned char)*prefix) ? -1 : +1;
335
336
0
    refname++;
337
0
    prefix++;
338
0
  }
339
340
0
  return 0;
341
0
}
342
343
static int prefix_ref_iterator_advance(struct ref_iterator *ref_iterator)
344
1.61k
{
345
1.61k
  struct prefix_ref_iterator *iter =
346
1.61k
    (struct prefix_ref_iterator *)ref_iterator;
347
1.61k
  int ok;
348
349
1.61k
  while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
350
0
    int cmp = compare_prefix(iter->iter0->refname, iter->prefix);
351
352
0
    if (cmp < 0)
353
0
      continue;
354
355
0
    if (cmp > 0) {
356
      /*
357
       * As the source iterator is ordered, we
358
       * can stop the iteration as soon as we see a
359
       * refname that comes after the prefix:
360
       */
361
0
      ok = ref_iterator_abort(iter->iter0);
362
0
      break;
363
0
    }
364
365
0
    if (iter->trim) {
366
      /*
367
       * It is nonsense to trim off characters that
368
       * you haven't already checked for via a
369
       * prefix check, whether via this
370
       * `prefix_ref_iterator` or upstream in
371
       * `iter0`). So if there wouldn't be at least
372
       * one character left in the refname after
373
       * trimming, report it as a bug:
374
       */
375
0
      if (strlen(iter->iter0->refname) <= iter->trim)
376
0
        BUG("attempt to trim too many characters");
377
0
      iter->base.refname = iter->iter0->refname + iter->trim;
378
0
    } else {
379
0
      iter->base.refname = iter->iter0->refname;
380
0
    }
381
382
0
    iter->base.oid = iter->iter0->oid;
383
0
    iter->base.flags = iter->iter0->flags;
384
0
    return ITER_OK;
385
0
  }
386
387
1.61k
  iter->iter0 = NULL;
388
1.61k
  if (ref_iterator_abort(ref_iterator) != ITER_DONE)
389
0
    return ITER_ERROR;
390
1.61k
  return ok;
391
1.61k
}
392
393
static int prefix_ref_iterator_peel(struct ref_iterator *ref_iterator,
394
            struct object_id *peeled)
395
0
{
396
0
  struct prefix_ref_iterator *iter =
397
0
    (struct prefix_ref_iterator *)ref_iterator;
398
399
0
  return ref_iterator_peel(iter->iter0, peeled);
400
0
}
401
402
static int prefix_ref_iterator_abort(struct ref_iterator *ref_iterator)
403
1.61k
{
404
1.61k
  struct prefix_ref_iterator *iter =
405
1.61k
    (struct prefix_ref_iterator *)ref_iterator;
406
1.61k
  int ok = ITER_DONE;
407
408
1.61k
  if (iter->iter0)
409
0
    ok = ref_iterator_abort(iter->iter0);
410
1.61k
  free(iter->prefix);
411
1.61k
  base_ref_iterator_free(ref_iterator);
412
1.61k
  return ok;
413
1.61k
}
414
415
static struct ref_iterator_vtable prefix_ref_iterator_vtable = {
416
  .advance = prefix_ref_iterator_advance,
417
  .peel = prefix_ref_iterator_peel,
418
  .abort = prefix_ref_iterator_abort,
419
};
420
421
struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0,
422
                 const char *prefix,
423
                 int trim)
424
1.61k
{
425
1.61k
  struct prefix_ref_iterator *iter;
426
1.61k
  struct ref_iterator *ref_iterator;
427
428
1.61k
  if (!*prefix && !trim)
429
0
    return iter0; /* optimization: no need to wrap iterator */
430
431
1.61k
  CALLOC_ARRAY(iter, 1);
432
1.61k
  ref_iterator = &iter->base;
433
434
1.61k
  base_ref_iterator_init(ref_iterator, &prefix_ref_iterator_vtable);
435
436
1.61k
  iter->iter0 = iter0;
437
1.61k
  iter->prefix = xstrdup(prefix);
438
1.61k
  iter->trim = trim;
439
440
1.61k
  return ref_iterator;
441
1.61k
}
442
443
struct ref_iterator *current_ref_iter = NULL;
444
445
int do_for_each_ref_iterator(struct ref_iterator *iter,
446
           each_ref_fn fn, void *cb_data)
447
1.61k
{
448
1.61k
  int retval = 0, ok;
449
1.61k
  struct ref_iterator *old_ref_iter = current_ref_iter;
450
451
1.61k
  current_ref_iter = iter;
452
1.61k
  while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
453
0
    retval = fn(iter->refname, iter->referent, iter->oid, iter->flags, cb_data);
454
0
    if (retval) {
455
      /*
456
       * If ref_iterator_abort() returns ITER_ERROR,
457
       * we ignore that error in deference to the
458
       * callback function's return value.
459
       */
460
0
      ref_iterator_abort(iter);
461
0
      goto out;
462
0
    }
463
0
  }
464
465
1.61k
out:
466
1.61k
  current_ref_iter = old_ref_iter;
467
1.61k
  if (ok == ITER_ERROR)
468
0
    return -1;
469
1.61k
  return retval;
470
1.61k
}