Coverage Report

Created: 2026-03-31 06:24

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/git/refs/reftable-backend.c
Line
Count
Source
1
#define USE_THE_REPOSITORY_VARIABLE
2
3
#include "../git-compat-util.h"
4
#include "../abspath.h"
5
#include "../chdir-notify.h"
6
#include "../config.h"
7
#include "../dir.h"
8
#include "../environment.h"
9
#include "../fsck.h"
10
#include "../gettext.h"
11
#include "../hash.h"
12
#include "../hex.h"
13
#include "../ident.h"
14
#include "../iterator.h"
15
#include "../object.h"
16
#include "../parse.h"
17
#include "../path.h"
18
#include "../refs.h"
19
#include "../reftable/reftable-basics.h"
20
#include "../reftable/reftable-error.h"
21
#include "../reftable/reftable-fsck.h"
22
#include "../reftable/reftable-iterator.h"
23
#include "../reftable/reftable-record.h"
24
#include "../reftable/reftable-stack.h"
25
#include "../repo-settings.h"
26
#include "../setup.h"
27
#include "../strmap.h"
28
#include "../trace2.h"
29
#include "../worktree.h"
30
#include "../write-or-die.h"
31
#include "refs-internal.h"
32
33
/*
34
 * Used as a flag in ref_update::flags when the ref_update was via an
35
 * update to HEAD.
36
 */
37
0
#define REF_UPDATE_VIA_HEAD (1 << 8)
38
39
struct reftable_backend {
40
  struct reftable_stack *stack;
41
  struct reftable_iterator it;
42
};
43
44
static void reftable_backend_on_reload(void *payload)
45
0
{
46
0
  struct reftable_backend *be = payload;
47
0
  reftable_iterator_destroy(&be->it);
48
0
}
49
50
static int reftable_backend_init(struct reftable_backend *be,
51
         const char *path,
52
         const struct reftable_write_options *_opts)
53
0
{
54
0
  struct reftable_write_options opts = *_opts;
55
0
  opts.on_reload = reftable_backend_on_reload;
56
0
  opts.on_reload_payload = be;
57
0
  return reftable_new_stack(&be->stack, path, &opts);
58
0
}
59
60
static void reftable_backend_release(struct reftable_backend *be)
61
0
{
62
0
  reftable_stack_destroy(be->stack);
63
0
  be->stack = NULL;
64
0
  reftable_iterator_destroy(&be->it);
65
0
}
66
67
static int reftable_backend_read_ref(struct reftable_backend *be,
68
             const char *refname,
69
             struct object_id *oid,
70
             struct strbuf *referent,
71
             unsigned int *type)
72
0
{
73
0
  struct reftable_ref_record ref = {0};
74
0
  int ret;
75
76
0
  if (!be->it.ops) {
77
0
    ret = reftable_stack_init_ref_iterator(be->stack, &be->it);
78
0
    if (ret)
79
0
      goto done;
80
0
  }
81
82
0
  ret = reftable_iterator_seek_ref(&be->it, refname);
83
0
  if (ret)
84
0
    goto done;
85
86
0
  ret = reftable_iterator_next_ref(&be->it, &ref);
87
0
  if (ret)
88
0
    goto done;
89
90
0
  if (strcmp(ref.refname, refname)) {
91
0
    ret = 1;
92
0
    goto done;
93
0
  }
94
95
0
  if (ref.value_type == REFTABLE_REF_SYMREF) {
96
0
    strbuf_reset(referent);
97
0
    strbuf_addstr(referent, ref.value.symref);
98
0
    *type |= REF_ISSYMREF;
99
0
  } else if (reftable_ref_record_val1(&ref)) {
100
0
    unsigned int hash_id;
101
102
0
    switch (reftable_stack_hash_id(be->stack)) {
103
0
    case REFTABLE_HASH_SHA1:
104
0
      hash_id = GIT_HASH_SHA1;
105
0
      break;
106
0
    case REFTABLE_HASH_SHA256:
107
0
      hash_id = GIT_HASH_SHA256;
108
0
      break;
109
0
    default:
110
0
      BUG("unhandled hash ID %d", reftable_stack_hash_id(be->stack));
111
0
    }
112
113
0
    oidread(oid, reftable_ref_record_val1(&ref),
114
0
      &hash_algos[hash_id]);
115
0
  } else {
116
    /* We got a tombstone, which should not happen. */
117
0
    BUG("unhandled reference value type %d", ref.value_type);
118
0
  }
119
120
0
done:
121
0
  assert(ret != REFTABLE_API_ERROR);
122
0
  reftable_ref_record_release(&ref);
123
0
  return ret;
124
0
}
125
126
struct reftable_ref_store {
127
  struct ref_store base;
128
129
  /*
130
   * The main backend refers to the common dir and thus contains common
131
   * refs as well as refs of the main repository.
132
   */
133
  struct reftable_backend main_backend;
134
  /*
135
   * The worktree backend refers to the gitdir in case the refdb is opened
136
   * via a worktree. It thus contains the per-worktree refs.
137
   */
138
  struct reftable_backend worktree_backend;
139
  /*
140
   * Map of worktree backends by their respective worktree names. The map
141
   * is populated lazily when we try to resolve `worktrees/$worktree` refs.
142
   */
143
  struct strmap worktree_backends;
144
  struct reftable_write_options write_options;
145
146
  unsigned int store_flags;
147
  enum log_refs_config log_all_ref_updates;
148
  int err;
149
};
150
151
/*
152
 * Downcast ref_store to reftable_ref_store. Die if ref_store is not a
153
 * reftable_ref_store. required_flags is compared with ref_store's store_flags
154
 * to ensure the ref_store has all required capabilities. "caller" is used in
155
 * any necessary error messages.
156
 */
157
static struct reftable_ref_store *reftable_be_downcast(struct ref_store *ref_store,
158
                   unsigned int required_flags,
159
                   const char *caller)
160
0
{
161
0
  struct reftable_ref_store *refs;
162
163
0
  if (ref_store->be != &refs_be_reftable)
164
0
    BUG("ref_store is type \"%s\" not \"reftables\" in %s",
165
0
        ref_store->be->name, caller);
166
167
0
  refs = (struct reftable_ref_store *)ref_store;
168
169
0
  if ((refs->store_flags & required_flags) != required_flags)
170
0
    BUG("operation %s requires abilities 0x%x, but only have 0x%x",
171
0
        caller, required_flags, refs->store_flags);
172
173
0
  return refs;
174
0
}
175
176
static int backend_for_worktree(struct reftable_backend **out,
177
        struct reftable_ref_store *store,
178
        const char *worktree_name)
179
0
{
180
0
  struct strbuf worktree_dir = STRBUF_INIT;
181
0
  int ret;
182
183
0
  *out = strmap_get(&store->worktree_backends, worktree_name);
184
0
  if (*out) {
185
0
    ret = 0;
186
0
    goto out;
187
0
  }
188
189
0
  strbuf_addf(&worktree_dir, "%s/worktrees/%s/reftable",
190
0
        store->base.repo->commondir, worktree_name);
191
192
0
  CALLOC_ARRAY(*out, 1);
193
0
  store->err = ret = reftable_backend_init(*out, worktree_dir.buf,
194
0
             &store->write_options);
195
0
  if (ret < 0) {
196
0
    free(*out);
197
0
    goto out;
198
0
  }
199
200
0
  strmap_put(&store->worktree_backends, worktree_name, *out);
201
202
0
out:
203
0
  strbuf_release(&worktree_dir);
204
0
  return ret;
205
0
}
206
207
/*
208
 * Some refs are global to the repository (refs/heads/{*}), while others are
209
 * local to the worktree (eg. HEAD, refs/bisect/{*}). We solve this by having
210
 * multiple separate databases (ie. multiple reftable/ directories), one for
211
 * the shared refs, one for the current worktree refs, and one for each
212
 * additional worktree. For reading, we merge the view of both the shared and
213
 * the current worktree's refs, when necessary.
214
 *
215
 * This function also optionally assigns the rewritten reference name that is
216
 * local to the stack. This translation is required when using worktree refs
217
 * like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
218
 * those references in their normalized form.
219
 */
220
static int backend_for(struct reftable_backend **out,
221
           struct reftable_ref_store *store,
222
           const char *refname,
223
           const char **rewritten_ref,
224
           int reload)
225
0
{
226
0
  const char *wtname;
227
0
  int wtname_len;
228
0
  int ret;
229
230
0
  if (!refname) {
231
0
    *out = &store->main_backend;
232
0
    ret = 0;
233
0
    goto out;
234
0
  }
235
236
0
  switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) {
237
0
  case REF_WORKTREE_OTHER: {
238
0
    static struct strbuf wtname_buf = STRBUF_INIT;
239
240
    /*
241
     * We're using a static buffer here so that we don't need to
242
     * allocate the worktree name whenever we look up a reference.
243
     * This could be avoided if the strmap interface knew how to
244
     * handle keys with a length.
245
     */
246
0
    strbuf_reset(&wtname_buf);
247
0
    strbuf_add(&wtname_buf, wtname, wtname_len);
248
249
    /*
250
     * There is an edge case here: when the worktree references the
251
     * current worktree, then we set up the stack once via
252
     * `worktree_backends` and once via `worktree_backend`. This is
253
     * wasteful, but in the reading case it shouldn't matter. And
254
     * in the writing case we would notice that the stack is locked
255
     * already and error out when trying to write a reference via
256
     * both stacks.
257
     */
258
0
    ret = backend_for_worktree(out, store, wtname_buf.buf);
259
260
0
    goto out;
261
0
  }
262
0
  case REF_WORKTREE_CURRENT:
263
    /*
264
     * If there is no worktree stack then we're currently in the
265
     * main worktree. We thus return the main stack in that case.
266
     */
267
0
    if (!store->worktree_backend.stack)
268
0
      *out = &store->main_backend;
269
0
    else
270
0
      *out = &store->worktree_backend;
271
0
    ret = 0;
272
0
    goto out;
273
0
  case REF_WORKTREE_MAIN:
274
0
  case REF_WORKTREE_SHARED:
275
0
    *out = &store->main_backend;
276
0
    ret = 0;
277
0
    goto out;
278
0
  default:
279
0
    BUG("unhandled worktree reference type");
280
0
  }
281
282
0
out:
283
0
  if (reload && !ret)
284
0
    ret = reftable_stack_reload((*out)->stack);
285
0
  return ret;
286
0
}
287
288
static int should_write_log(struct reftable_ref_store *refs, const char *refname)
289
0
{
290
0
  enum log_refs_config log_refs_cfg = refs->log_all_ref_updates;
291
0
  if (log_refs_cfg == LOG_REFS_UNSET)
292
0
    log_refs_cfg = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
293
294
0
  switch (log_refs_cfg) {
295
0
  case LOG_REFS_NONE:
296
0
    return refs_reflog_exists(&refs->base, refname);
297
0
  case LOG_REFS_ALWAYS:
298
0
    return 1;
299
0
  case LOG_REFS_NORMAL:
300
0
    if (should_autocreate_reflog(log_refs_cfg, refname))
301
0
      return 1;
302
0
    return refs_reflog_exists(&refs->base, refname);
303
0
  default:
304
0
    BUG("unhandled core.logAllRefUpdates value %d", log_refs_cfg);
305
0
  }
306
0
}
307
308
static void fill_reftable_log_record(struct reftable_log_record *log, const struct ident_split *split)
309
0
{
310
0
  const char *tz_begin;
311
0
  int sign = 1;
312
313
0
  reftable_log_record_release(log);
314
0
  log->value_type = REFTABLE_LOG_UPDATE;
315
0
  log->value.update.name =
316
0
    xstrndup(split->name_begin, split->name_end - split->name_begin);
317
0
  log->value.update.email =
318
0
    xstrndup(split->mail_begin, split->mail_end - split->mail_begin);
319
0
  log->value.update.time = atol(split->date_begin);
320
321
0
  tz_begin = split->tz_begin;
322
0
  if (*tz_begin == '-') {
323
0
    sign = -1;
324
0
    tz_begin++;
325
0
  }
326
0
  if (*tz_begin == '+') {
327
0
    sign = 1;
328
0
    tz_begin++;
329
0
  }
330
331
0
  log->value.update.tz_offset = sign * atoi(tz_begin);
332
0
}
333
334
static int reftable_be_config(const char *var, const char *value,
335
            const struct config_context *ctx,
336
            void *_opts)
337
0
{
338
0
  struct reftable_write_options *opts = _opts;
339
340
0
  if (!strcmp(var, "reftable.blocksize")) {
341
0
    unsigned long block_size = git_config_ulong(var, value, ctx->kvi);
342
0
    if (block_size > 16777215)
343
0
      die("reftable block size cannot exceed 16MB");
344
0
    opts->block_size = block_size;
345
0
  } else if (!strcmp(var, "reftable.restartinterval")) {
346
0
    unsigned long restart_interval = git_config_ulong(var, value, ctx->kvi);
347
0
    if (restart_interval > UINT16_MAX)
348
0
      die("reftable block size cannot exceed %u", (unsigned)UINT16_MAX);
349
0
    opts->restart_interval = restart_interval;
350
0
  } else if (!strcmp(var, "reftable.indexobjects")) {
351
0
    opts->skip_index_objects = !git_config_bool(var, value);
352
0
  } else if (!strcmp(var, "reftable.geometricfactor")) {
353
0
    unsigned long factor = git_config_ulong(var, value, ctx->kvi);
354
0
    if (factor > UINT8_MAX)
355
0
      die("reftable geometric factor cannot exceed %u", (unsigned)UINT8_MAX);
356
0
    opts->auto_compaction_factor = factor;
357
0
  } else if (!strcmp(var, "reftable.locktimeout")) {
358
0
    int64_t lock_timeout = git_config_int64(var, value, ctx->kvi);
359
0
    if (lock_timeout > LONG_MAX)
360
0
      die("reftable lock timeout cannot exceed %"PRIdMAX, (intmax_t)LONG_MAX);
361
0
    if (lock_timeout < 0 && lock_timeout != -1)
362
0
      die("reftable lock timeout does not support negative values other than -1");
363
0
    opts->lock_timeout_ms = lock_timeout;
364
0
  }
365
366
0
  return 0;
367
0
}
368
369
static int reftable_be_fsync(int fd)
370
0
{
371
0
  return fsync_component(FSYNC_COMPONENT_REFERENCE, fd);
372
0
}
373
374
static struct ref_store *reftable_be_init(struct repository *repo,
375
            const char *payload,
376
            const char *gitdir,
377
            unsigned int store_flags)
378
0
{
379
0
  struct reftable_ref_store *refs = xcalloc(1, sizeof(*refs));
380
0
  struct strbuf ref_common_dir = STRBUF_INIT;
381
0
  struct strbuf refdir = STRBUF_INIT;
382
0
  struct strbuf path = STRBUF_INIT;
383
0
  bool is_worktree;
384
0
  mode_t mask;
385
386
0
  mask = umask(0);
387
0
  umask(mask);
388
389
0
  refs_compute_filesystem_location(gitdir, payload, &is_worktree, &refdir,
390
0
           &ref_common_dir);
391
392
0
  base_ref_store_init(&refs->base, repo, refdir.buf, &refs_be_reftable);
393
0
  strmap_init(&refs->worktree_backends);
394
0
  refs->store_flags = store_flags;
395
0
  refs->log_all_ref_updates = repo_settings_get_log_all_ref_updates(repo);
396
397
0
  switch (repo->hash_algo->format_id) {
398
0
  case GIT_SHA1_FORMAT_ID:
399
0
    refs->write_options.hash_id = REFTABLE_HASH_SHA1;
400
0
    break;
401
0
  case GIT_SHA256_FORMAT_ID:
402
0
    refs->write_options.hash_id = REFTABLE_HASH_SHA256;
403
0
    break;
404
0
  default:
405
0
    BUG("unknown hash algorithm %d", repo->hash_algo->format_id);
406
0
  }
407
0
  refs->write_options.default_permissions = calc_shared_perm(the_repository, 0666 & ~mask);
408
0
  refs->write_options.disable_auto_compact =
409
0
    !git_env_bool("GIT_TEST_REFTABLE_AUTOCOMPACTION", 1);
410
0
  refs->write_options.lock_timeout_ms = 100;
411
0
  refs->write_options.fsync = reftable_be_fsync;
412
413
0
  repo_config(the_repository, reftable_be_config, &refs->write_options);
414
415
  /*
416
   * It is somewhat unfortunate that we have to mirror the default block
417
   * size of the reftable library here. But given that the write options
418
   * wouldn't be updated by the library here, and given that we require
419
   * the proper block size to trim reflog message so that they fit, we
420
   * must set up a proper value here.
421
   */
422
0
  if (!refs->write_options.block_size)
423
0
    refs->write_options.block_size = 4096;
424
425
  /*
426
   * Set up the main reftable stack that is hosted in GIT_COMMON_DIR.
427
   * This stack contains both the shared and the main worktree refs.
428
   */
429
0
  strbuf_addbuf(&path, &ref_common_dir);
430
0
  if (!is_worktree) {
431
0
    strbuf_reset(&path);
432
0
    strbuf_realpath(&path, ref_common_dir.buf, 0);
433
0
  }
434
0
  strbuf_addstr(&path, "/reftable");
435
0
  refs->err = reftable_backend_init(&refs->main_backend, path.buf,
436
0
            &refs->write_options);
437
0
  if (refs->err)
438
0
    goto done;
439
440
  /*
441
   * If we're in a worktree we also need to set up the worktree reftable
442
   * stack that is contained in the per-worktree GIT_DIR.
443
   *
444
   * Ideally, we would also add the stack to our worktree stack map. But
445
   * we have no way to figure out the worktree name here and thus can't
446
   * do it efficiently.
447
   */
448
0
  if (is_worktree) {
449
0
    strbuf_addstr(&refdir, "/reftable");
450
451
0
    refs->err = reftable_backend_init(&refs->worktree_backend, refdir.buf,
452
0
              &refs->write_options);
453
0
    if (refs->err)
454
0
      goto done;
455
0
  }
456
457
0
  chdir_notify_reparent("reftables-backend $GIT_DIR", &refs->base.gitdir);
458
459
0
done:
460
0
  assert(refs->err != REFTABLE_API_ERROR);
461
0
  strbuf_release(&ref_common_dir);
462
0
  strbuf_release(&refdir);
463
0
  strbuf_release(&path);
464
0
  return &refs->base;
465
0
}
466
467
static void reftable_be_release(struct ref_store *ref_store)
468
0
{
469
0
  struct reftable_ref_store *refs = reftable_be_downcast(ref_store, 0, "release");
470
0
  struct strmap_entry *entry;
471
0
  struct hashmap_iter iter;
472
473
0
  if (refs->main_backend.stack)
474
0
    reftable_backend_release(&refs->main_backend);
475
0
  if (refs->worktree_backend.stack)
476
0
    reftable_backend_release(&refs->worktree_backend);
477
478
0
  strmap_for_each_entry(&refs->worktree_backends, &iter, entry) {
479
0
    struct reftable_backend *be = entry->value;
480
0
    reftable_backend_release(be);
481
0
    free(be);
482
0
  }
483
0
  strmap_clear(&refs->worktree_backends, 0);
484
0
}
485
486
static int reftable_be_create_on_disk(struct ref_store *ref_store,
487
              int flags UNUSED,
488
              struct strbuf *err UNUSED)
489
0
{
490
0
  struct reftable_ref_store *refs =
491
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE, "create");
492
0
  struct strbuf sb = STRBUF_INIT;
493
494
0
  strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
495
0
  safe_create_dir(the_repository, sb.buf, 1);
496
0
  strbuf_reset(&sb);
497
498
0
  strbuf_release(&sb);
499
0
  return 0;
500
0
}
501
502
static int reftable_be_remove_on_disk(struct ref_store *ref_store,
503
              struct strbuf *err)
504
0
{
505
0
  struct reftable_ref_store *refs =
506
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE, "remove");
507
0
  struct strbuf sb = STRBUF_INIT;
508
0
  int ret = 0;
509
510
  /*
511
   * Release the ref store such that all stacks are closed. This is
512
   * required so that the "tables.list" file is not open anymore, which
513
   * would otherwise make it impossible to remove the file on Windows.
514
   */
515
0
  reftable_be_release(ref_store);
516
517
0
  strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
518
0
  if (remove_dir_recursively(&sb, 0) < 0) {
519
0
    strbuf_addf(err, "could not delete reftables: %s",
520
0
          strerror(errno));
521
0
    ret = -1;
522
0
  }
523
524
0
  strbuf_release(&sb);
525
0
  return ret;
526
0
}
527
528
struct reftable_ref_iterator {
529
  struct ref_iterator base;
530
  struct reftable_ref_store *refs;
531
  struct reftable_iterator iter;
532
  struct reftable_ref_record ref;
533
  struct object_id oid;
534
  struct object_id peeled_oid;
535
536
  char *prefix;
537
  size_t prefix_len;
538
  char **exclude_patterns;
539
  size_t exclude_patterns_index;
540
  size_t exclude_patterns_strlen;
541
  unsigned int flags;
542
  int err;
543
};
544
545
/*
546
 * Handle exclude patterns. Returns either `1`, which tells the caller that the
547
 * current reference shall not be shown. Or `0`, which indicates that it should
548
 * be shown.
549
 */
550
static int should_exclude_current_ref(struct reftable_ref_iterator *iter)
551
0
{
552
0
  while (iter->exclude_patterns[iter->exclude_patterns_index]) {
553
0
    const char *pattern = iter->exclude_patterns[iter->exclude_patterns_index];
554
0
    char *ref_after_pattern;
555
0
    int cmp;
556
557
    /*
558
     * Lazily cache the pattern length so that we don't have to
559
     * recompute it every time this function is called.
560
     */
561
0
    if (!iter->exclude_patterns_strlen)
562
0
      iter->exclude_patterns_strlen = strlen(pattern);
563
564
    /*
565
     * When the reference name is lexicographically bigger than the
566
     * current exclude pattern we know that it won't ever match any
567
     * of the following references, either. We thus advance to the
568
     * next pattern and re-check whether it matches.
569
     *
570
     * Otherwise, if it's smaller, then we do not have a match and
571
     * thus want to show the current reference.
572
     */
573
0
    cmp = strncmp(iter->ref.refname, pattern,
574
0
            iter->exclude_patterns_strlen);
575
0
    if (cmp > 0) {
576
0
      iter->exclude_patterns_index++;
577
0
      iter->exclude_patterns_strlen = 0;
578
0
      continue;
579
0
    }
580
0
    if (cmp < 0)
581
0
      return 0;
582
583
    /*
584
     * The reference shares a prefix with the exclude pattern and
585
     * shall thus be omitted. We skip all references that match the
586
     * pattern by seeking to the first reference after the block of
587
     * matches.
588
     *
589
     * This is done by appending the highest possible character to
590
     * the pattern. Consequently, all references that have the
591
     * pattern as prefix and whose suffix starts with anything in
592
     * the range [0x00, 0xfe] are skipped. And given that 0xff is a
593
     * non-printable character that shouldn't ever be in a ref name,
594
     * we'd not yield any such record, either.
595
     *
596
     * Note that the seeked-to reference may also be excluded. This
597
     * is not handled here though, but the caller is expected to
598
     * loop and re-verify the next reference for us.
599
     */
600
0
    ref_after_pattern = xstrfmt("%s%c", pattern, 0xff);
601
0
    iter->err = reftable_iterator_seek_ref(&iter->iter, ref_after_pattern);
602
0
    iter->exclude_patterns_index++;
603
0
    iter->exclude_patterns_strlen = 0;
604
0
    trace2_counter_add(TRACE2_COUNTER_ID_REFTABLE_RESEEKS, 1);
605
606
0
    free(ref_after_pattern);
607
0
    return 1;
608
0
  }
609
610
0
  return 0;
611
0
}
612
613
static int reftable_ref_iterator_advance(struct ref_iterator *ref_iterator)
614
0
{
615
0
  struct reftable_ref_iterator *iter =
616
0
    (struct reftable_ref_iterator *)ref_iterator;
617
0
  struct reftable_ref_store *refs = iter->refs;
618
0
  const char *referent = NULL;
619
620
0
  while (!iter->err) {
621
0
    int flags = 0;
622
623
0
    iter->err = reftable_iterator_next_ref(&iter->iter, &iter->ref);
624
0
    if (iter->err)
625
0
      break;
626
627
    /*
628
     * The files backend only lists references contained in "refs/" unless
629
     * the root refs are to be included. We emulate the same behaviour here.
630
     */
631
0
    if (!starts_with(iter->ref.refname, "refs/") &&
632
0
        !(iter->flags & REFS_FOR_EACH_INCLUDE_ROOT_REFS &&
633
0
          is_root_ref(iter->ref.refname))) {
634
0
      continue;
635
0
    }
636
637
0
    if (iter->prefix_len &&
638
0
        strncmp(iter->prefix, iter->ref.refname, iter->prefix_len)) {
639
0
      iter->err = 1;
640
0
      break;
641
0
    }
642
643
0
    if (iter->exclude_patterns && should_exclude_current_ref(iter))
644
0
      continue;
645
646
0
    if (iter->flags & REFS_FOR_EACH_PER_WORKTREE_ONLY &&
647
0
        parse_worktree_ref(iter->ref.refname, NULL, NULL, NULL) !=
648
0
          REF_WORKTREE_CURRENT)
649
0
      continue;
650
651
0
    switch (iter->ref.value_type) {
652
0
    case REFTABLE_REF_VAL1:
653
0
      oidread(&iter->oid, iter->ref.value.val1,
654
0
        refs->base.repo->hash_algo);
655
0
      break;
656
0
    case REFTABLE_REF_VAL2:
657
0
      oidread(&iter->oid, iter->ref.value.val2.value,
658
0
        refs->base.repo->hash_algo);
659
0
      oidread(&iter->peeled_oid, iter->ref.value.val2.target_value,
660
0
        refs->base.repo->hash_algo);
661
0
      break;
662
0
    case REFTABLE_REF_SYMREF:
663
0
      referent = refs_resolve_ref_unsafe(&iter->refs->base,
664
0
                 iter->ref.refname,
665
0
                 RESOLVE_REF_READING,
666
0
                 &iter->oid, &flags);
667
0
      if (!referent)
668
0
        oidclr(&iter->oid, refs->base.repo->hash_algo);
669
0
      break;
670
0
    default:
671
0
      BUG("unhandled reference value type %d", iter->ref.value_type);
672
0
    }
673
674
0
    if (is_null_oid(&iter->oid))
675
0
      flags |= REF_ISBROKEN;
676
677
0
    if (check_refname_format(iter->ref.refname, REFNAME_ALLOW_ONELEVEL)) {
678
0
      if (!refname_is_safe(iter->ref.refname))
679
0
        die(_("refname is dangerous: %s"), iter->ref.refname);
680
0
      oidclr(&iter->oid, refs->base.repo->hash_algo);
681
0
      flags |= REF_BAD_NAME | REF_ISBROKEN;
682
0
    }
683
684
0
    if (iter->flags & REFS_FOR_EACH_OMIT_DANGLING_SYMREFS &&
685
0
        flags & REF_ISSYMREF &&
686
0
        flags & REF_ISBROKEN)
687
0
      continue;
688
689
0
    if (!(iter->flags & REFS_FOR_EACH_INCLUDE_BROKEN) &&
690
0
        !ref_resolves_to_object(iter->ref.refname, refs->base.repo,
691
0
              &iter->oid, flags))
692
0
        continue;
693
694
0
    memset(&iter->base.ref, 0, sizeof(iter->base.ref));
695
0
    iter->base.ref.name = iter->ref.refname;
696
0
    iter->base.ref.target = referent;
697
0
    iter->base.ref.oid = &iter->oid;
698
0
    if (iter->ref.value_type == REFTABLE_REF_VAL2)
699
0
      iter->base.ref.peeled_oid = &iter->peeled_oid;
700
0
    iter->base.ref.flags = flags;
701
702
0
    break;
703
0
  }
704
705
0
  if (iter->err > 0)
706
0
    return ITER_DONE;
707
0
  if (iter->err < 0)
708
0
    return ITER_ERROR;
709
0
  return ITER_OK;
710
0
}
711
712
static int reftable_ref_iterator_seek(struct ref_iterator *ref_iterator,
713
              const char *refname, unsigned int flags)
714
0
{
715
0
  struct reftable_ref_iterator *iter =
716
0
    (struct reftable_ref_iterator *)ref_iterator;
717
718
  /* Unset any previously set prefix */
719
0
  FREE_AND_NULL(iter->prefix);
720
0
  iter->prefix_len = 0;
721
722
0
  if (flags & REF_ITERATOR_SEEK_SET_PREFIX) {
723
0
    iter->prefix = xstrdup_or_null(refname);
724
0
    iter->prefix_len = refname ? strlen(refname) : 0;
725
0
  }
726
0
  iter->err = reftable_iterator_seek_ref(&iter->iter, refname);
727
728
0
  return iter->err;
729
0
}
730
731
static void reftable_ref_iterator_release(struct ref_iterator *ref_iterator)
732
0
{
733
0
  struct reftable_ref_iterator *iter =
734
0
    (struct reftable_ref_iterator *)ref_iterator;
735
0
  reftable_ref_record_release(&iter->ref);
736
0
  reftable_iterator_destroy(&iter->iter);
737
0
  if (iter->exclude_patterns) {
738
0
    for (size_t i = 0; iter->exclude_patterns[i]; i++)
739
0
      free(iter->exclude_patterns[i]);
740
0
    free(iter->exclude_patterns);
741
0
  }
742
0
  free(iter->prefix);
743
0
}
744
745
static struct ref_iterator_vtable reftable_ref_iterator_vtable = {
746
  .advance = reftable_ref_iterator_advance,
747
  .seek = reftable_ref_iterator_seek,
748
  .release = reftable_ref_iterator_release,
749
};
750
751
static int qsort_strcmp(const void *va, const void *vb)
752
0
{
753
0
  const char *a = *(const char **)va;
754
0
  const char *b = *(const char **)vb;
755
0
  return strcmp(a, b);
756
0
}
757
758
static char **filter_exclude_patterns(const char **exclude_patterns)
759
0
{
760
0
  size_t filtered_size = 0, filtered_alloc = 0;
761
0
  char **filtered = NULL;
762
763
0
  if (!exclude_patterns)
764
0
    return NULL;
765
766
0
  for (size_t i = 0; ; i++) {
767
0
    const char *exclude_pattern = exclude_patterns[i];
768
0
    int has_glob = 0;
769
770
0
    if (!exclude_pattern)
771
0
      break;
772
773
0
    for (const char *p = exclude_pattern; *p; p++) {
774
0
      has_glob = is_glob_special(*p);
775
0
      if (has_glob)
776
0
        break;
777
0
    }
778
0
    if (has_glob)
779
0
      continue;
780
781
0
    ALLOC_GROW(filtered, filtered_size + 1, filtered_alloc);
782
0
    filtered[filtered_size++] = xstrdup(exclude_pattern);
783
0
  }
784
785
0
  if (filtered_size) {
786
0
    QSORT(filtered, filtered_size, qsort_strcmp);
787
0
    ALLOC_GROW(filtered, filtered_size + 1, filtered_alloc);
788
0
    filtered[filtered_size++] = NULL;
789
0
  }
790
791
0
  return filtered;
792
0
}
793
794
static struct reftable_ref_iterator *ref_iterator_for_stack(struct reftable_ref_store *refs,
795
                  struct reftable_stack *stack,
796
                  const char *prefix,
797
                  const char **exclude_patterns,
798
                  int flags)
799
0
{
800
0
  struct reftable_ref_iterator *iter;
801
0
  int ret;
802
803
0
  iter = xcalloc(1, sizeof(*iter));
804
0
  base_ref_iterator_init(&iter->base, &reftable_ref_iterator_vtable);
805
0
  iter->base.ref.oid = &iter->oid;
806
0
  iter->flags = flags;
807
0
  iter->refs = refs;
808
0
  iter->exclude_patterns = filter_exclude_patterns(exclude_patterns);
809
810
0
  ret = refs->err;
811
0
  if (ret)
812
0
    goto done;
813
814
0
  ret = reftable_stack_reload(stack);
815
0
  if (ret)
816
0
    goto done;
817
818
0
  ret = reftable_stack_init_ref_iterator(stack, &iter->iter);
819
0
  if (ret)
820
0
    goto done;
821
822
0
  ret = reftable_ref_iterator_seek(&iter->base, prefix,
823
0
           REF_ITERATOR_SEEK_SET_PREFIX);
824
0
  if (ret)
825
0
    goto done;
826
827
0
done:
828
0
  iter->err = ret;
829
0
  return iter;
830
0
}
831
832
static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_store,
833
                   const char *prefix,
834
                   const char **exclude_patterns,
835
                   unsigned int flags)
836
0
{
837
0
  struct reftable_ref_iterator *main_iter, *worktree_iter;
838
0
  struct reftable_ref_store *refs;
839
0
  unsigned int required_flags = REF_STORE_READ;
840
841
0
  if (!(flags & REFS_FOR_EACH_INCLUDE_BROKEN))
842
0
    required_flags |= REF_STORE_ODB;
843
0
  refs = reftable_be_downcast(ref_store, required_flags, "ref_iterator_begin");
844
845
0
  main_iter = ref_iterator_for_stack(refs, refs->main_backend.stack, prefix,
846
0
             exclude_patterns, flags);
847
848
  /*
849
   * The worktree stack is only set when we're in an actual worktree
850
   * right now. If we aren't, then we return the common reftable
851
   * iterator, only.
852
   */
853
0
  if (!refs->worktree_backend.stack)
854
0
    return &main_iter->base;
855
856
  /*
857
   * Otherwise we merge both the common and the per-worktree refs into a
858
   * single iterator.
859
   */
860
0
  worktree_iter = ref_iterator_for_stack(refs, refs->worktree_backend.stack, prefix,
861
0
                 exclude_patterns, flags);
862
0
  return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
863
0
          ref_iterator_select, NULL);
864
0
}
865
866
static int reftable_be_read_raw_ref(struct ref_store *ref_store,
867
            const char *refname,
868
            struct object_id *oid,
869
            struct strbuf *referent,
870
            unsigned int *type,
871
            int *failure_errno)
872
0
{
873
0
  struct reftable_ref_store *refs =
874
0
    reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
875
0
  struct reftable_backend *be;
876
0
  int ret;
877
878
0
  if (refs->err < 0)
879
0
    return refs->err;
880
881
0
  ret = backend_for(&be, refs, refname, &refname, 1);
882
0
  if (ret)
883
0
    return ret;
884
885
0
  ret = reftable_backend_read_ref(be, refname, oid, referent, type);
886
0
  if (ret < 0)
887
0
    return ret;
888
0
  if (ret > 0) {
889
0
    *failure_errno = ENOENT;
890
0
    return -1;
891
0
  }
892
893
0
  return 0;
894
0
}
895
896
static int reftable_be_read_symbolic_ref(struct ref_store *ref_store,
897
           const char *refname,
898
           struct strbuf *referent)
899
0
{
900
0
  struct reftable_ref_store *refs =
901
0
    reftable_be_downcast(ref_store, REF_STORE_READ, "read_symbolic_ref");
902
0
  struct reftable_backend *be;
903
0
  struct object_id oid;
904
0
  unsigned int type = 0;
905
0
  int ret;
906
907
0
  ret = backend_for(&be, refs, refname, &refname, 1);
908
0
  if (ret)
909
0
    return ret;
910
911
0
  ret = reftable_backend_read_ref(be, refname, &oid, referent, &type);
912
0
  if (ret)
913
0
    ret = -1;
914
0
  else if (type == REF_ISSYMREF)
915
0
    ; /* happy */
916
0
  else
917
0
    ret = NOT_A_SYMREF;
918
0
  return ret;
919
0
}
920
921
struct reftable_transaction_update {
922
  struct ref_update *update;
923
  struct object_id current_oid;
924
};
925
926
struct write_transaction_table_arg {
927
  struct reftable_ref_store *refs;
928
  struct reftable_backend *be;
929
  struct reftable_addition *addition;
930
  struct reftable_transaction_update *updates;
931
  size_t updates_nr;
932
  size_t updates_alloc;
933
  size_t updates_expected;
934
  uint64_t max_index;
935
};
936
937
struct reftable_transaction_data {
938
  struct write_transaction_table_arg *args;
939
  size_t args_nr, args_alloc;
940
};
941
942
static void free_transaction_data(struct reftable_transaction_data *tx_data)
943
0
{
944
0
  if (!tx_data)
945
0
    return;
946
0
  for (size_t i = 0; i < tx_data->args_nr; i++) {
947
0
    reftable_addition_destroy(tx_data->args[i].addition);
948
0
    free(tx_data->args[i].updates);
949
0
  }
950
0
  free(tx_data->args);
951
0
  free(tx_data);
952
0
}
953
954
/*
955
 * Prepare transaction update for the given reference update. This will cause
956
 * us to lock the corresponding reftable stack for concurrent modification.
957
 */
958
static int prepare_transaction_update(struct write_transaction_table_arg **out,
959
              struct reftable_ref_store *refs,
960
              struct reftable_transaction_data *tx_data,
961
              struct ref_update *update,
962
              struct strbuf *err)
963
0
{
964
0
  struct write_transaction_table_arg *arg = NULL;
965
0
  struct reftable_backend *be;
966
0
  size_t i;
967
0
  int ret;
968
969
  /*
970
   * This function gets called in a loop, and we don't want to repeatedly
971
   * reload the stack for every single ref update. Instead, we manually
972
   * reload further down in the case where we haven't yet prepared the
973
   * specific `reftable_backend`.
974
   */
975
0
  ret = backend_for(&be, refs, update->refname, NULL, 0);
976
0
  if (ret)
977
0
    return ret;
978
979
  /*
980
   * Search for a preexisting stack update. If there is one then we add
981
   * the update to it, otherwise we set up a new stack update.
982
   */
983
0
  for (i = 0; !arg && i < tx_data->args_nr; i++)
984
0
    if (tx_data->args[i].be == be)
985
0
      arg = &tx_data->args[i];
986
987
0
  if (!arg) {
988
0
    struct reftable_addition *addition;
989
990
0
    ret = reftable_stack_new_addition(&addition, be->stack,
991
0
              REFTABLE_STACK_NEW_ADDITION_RELOAD);
992
0
    if (ret) {
993
0
      if (ret == REFTABLE_LOCK_ERROR)
994
0
        strbuf_addstr(err, "cannot lock references");
995
0
      return ret;
996
0
    }
997
998
0
    ALLOC_GROW(tx_data->args, tx_data->args_nr + 1,
999
0
         tx_data->args_alloc);
1000
0
    arg = &tx_data->args[tx_data->args_nr++];
1001
0
    arg->refs = refs;
1002
0
    arg->be = be;
1003
0
    arg->addition = addition;
1004
0
    arg->updates = NULL;
1005
0
    arg->updates_nr = 0;
1006
0
    arg->updates_alloc = 0;
1007
0
    arg->updates_expected = 0;
1008
0
    arg->max_index = 0;
1009
0
  }
1010
1011
0
  arg->updates_expected++;
1012
1013
0
  if (out)
1014
0
    *out = arg;
1015
1016
0
  return 0;
1017
0
}
1018
1019
/*
1020
 * Queue a reference update for the correct stack. We potentially need to
1021
 * handle multiple stack updates in a single transaction when it spans across
1022
 * multiple worktrees.
1023
 */
1024
static int queue_transaction_update(struct reftable_ref_store *refs,
1025
            struct reftable_transaction_data *tx_data,
1026
            struct ref_update *update,
1027
            struct object_id *current_oid,
1028
            struct strbuf *err)
1029
0
{
1030
0
  struct write_transaction_table_arg *arg = NULL;
1031
0
  int ret;
1032
1033
0
  if (update->backend_data)
1034
0
    BUG("reference update queued more than once");
1035
1036
0
  ret = prepare_transaction_update(&arg, refs, tx_data, update, err);
1037
0
  if (ret < 0)
1038
0
    return ret;
1039
1040
0
  ALLOC_GROW(arg->updates, arg->updates_nr + 1,
1041
0
       arg->updates_alloc);
1042
0
  arg->updates[arg->updates_nr].update = update;
1043
0
  oidcpy(&arg->updates[arg->updates_nr].current_oid, current_oid);
1044
0
  update->backend_data = &arg->updates[arg->updates_nr++];
1045
1046
0
  return 0;
1047
0
}
1048
1049
static enum ref_transaction_error prepare_single_update(struct reftable_ref_store *refs,
1050
              struct reftable_transaction_data *tx_data,
1051
              struct ref_transaction *transaction,
1052
              struct reftable_backend *be,
1053
              struct ref_update *u,
1054
              size_t update_idx,
1055
              struct string_list *refnames_to_check,
1056
              unsigned int head_type,
1057
              struct strbuf *head_referent,
1058
              struct strbuf *referent,
1059
              struct strbuf *err)
1060
0
{
1061
0
  enum ref_transaction_error ret = 0;
1062
0
  struct object_id current_oid = {0};
1063
0
  const char *rewritten_ref;
1064
1065
  /*
1066
   * There is no need to reload the respective backends here as
1067
   * we have already reloaded them when preparing the transaction
1068
   * update. And given that the stacks have been locked there
1069
   * shouldn't have been any concurrent modifications of the
1070
   * stack.
1071
   */
1072
0
  ret = backend_for(&be, refs, u->refname, &rewritten_ref, 0);
1073
0
  if (ret)
1074
0
    return REF_TRANSACTION_ERROR_GENERIC;
1075
1076
0
  if (u->flags & REF_LOG_USE_PROVIDED_OIDS) {
1077
0
    if (!(u->flags & REF_HAVE_OLD) ||
1078
0
        !(u->flags & REF_HAVE_NEW) ||
1079
0
        !(u->flags & REF_LOG_ONLY)) {
1080
0
      strbuf_addf(err, _("trying to write reflog for '%s' "
1081
0
             "with incomplete values"), u->refname);
1082
0
      return REF_TRANSACTION_ERROR_GENERIC;
1083
0
    }
1084
1085
0
    if (queue_transaction_update(refs, tx_data, u, &u->old_oid, err))
1086
0
      return REF_TRANSACTION_ERROR_GENERIC;
1087
0
    return 0;
1088
0
  }
1089
1090
  /* Verify that the new object ID is valid. */
1091
0
  if ((u->flags & REF_HAVE_NEW) && !is_null_oid(&u->new_oid) &&
1092
0
      !(u->flags & REF_SKIP_OID_VERIFICATION) &&
1093
0
      !(u->flags & REF_LOG_ONLY)) {
1094
0
    struct object *o = parse_object(refs->base.repo, &u->new_oid);
1095
0
    if (!o) {
1096
0
      strbuf_addf(err,
1097
0
            _("trying to write ref '%s' with nonexistent object %s"),
1098
0
            u->refname, oid_to_hex(&u->new_oid));
1099
0
      return REF_TRANSACTION_ERROR_INVALID_NEW_VALUE;
1100
0
    }
1101
1102
0
    if (o->type != OBJ_COMMIT && is_branch(u->refname)) {
1103
0
      strbuf_addf(err, _("trying to write non-commit object %s to branch '%s'"),
1104
0
            oid_to_hex(&u->new_oid), u->refname);
1105
0
      return REF_TRANSACTION_ERROR_INVALID_NEW_VALUE;
1106
0
    }
1107
0
  }
1108
1109
  /*
1110
   * When we update the reference that HEAD points to we enqueue
1111
   * a second log-only update for HEAD so that its reflog is
1112
   * updated accordingly.
1113
   */
1114
0
  if (head_type == REF_ISSYMREF &&
1115
0
      !(u->flags & REF_LOG_ONLY) &&
1116
0
      !(u->flags & REF_UPDATE_VIA_HEAD) &&
1117
0
      !strcmp(rewritten_ref, head_referent->buf)) {
1118
    /*
1119
     * First make sure that HEAD is not already in the
1120
     * transaction. This check is O(lg N) in the transaction
1121
     * size, but it happens at most once per transaction.
1122
     */
1123
0
    if (string_list_has_string(&transaction->refnames, "HEAD")) {
1124
      /* An entry already existed */
1125
0
      strbuf_addf(err,
1126
0
            _("multiple updates for 'HEAD' (including one "
1127
0
              "via its referent '%s') are not allowed"),
1128
0
            u->refname);
1129
0
      return REF_TRANSACTION_ERROR_NAME_CONFLICT;
1130
0
    }
1131
1132
0
    ref_transaction_add_update(
1133
0
      transaction, "HEAD",
1134
0
      u->flags | REF_LOG_ONLY | REF_NO_DEREF,
1135
0
      &u->new_oid, &u->old_oid, NULL, NULL, NULL,
1136
0
      u->msg);
1137
0
  }
1138
1139
0
  ret = reftable_backend_read_ref(be, rewritten_ref,
1140
0
          &current_oid, referent, &u->type);
1141
0
  if (ret < 0)
1142
0
    return REF_TRANSACTION_ERROR_GENERIC;
1143
0
  if (ret > 0 && !ref_update_expects_existing_old_ref(u)) {
1144
0
    struct string_list_item *item;
1145
    /*
1146
     * The reference does not exist, and we either have no
1147
     * old object ID or expect the reference to not exist.
1148
     * We can thus skip below safety checks as well as the
1149
     * symref splitting. But we do want to verify that
1150
     * there is no conflicting reference here so that we
1151
     * can output a proper error message instead of failing
1152
     * at a later point.
1153
     */
1154
0
    item = string_list_append(refnames_to_check, u->refname);
1155
0
    item->util = xmalloc(sizeof(update_idx));
1156
0
    memcpy(item->util, &update_idx, sizeof(update_idx));
1157
1158
    /*
1159
     * There is no need to write the reference deletion
1160
     * when the reference in question doesn't exist.
1161
     */
1162
0
    if ((u->flags & REF_HAVE_NEW) && !ref_update_has_null_new_value(u)) {
1163
0
      ret = queue_transaction_update(refs, tx_data, u,
1164
0
                   &current_oid, err);
1165
0
      if (ret)
1166
0
        return REF_TRANSACTION_ERROR_GENERIC;
1167
0
    }
1168
1169
0
    return 0;
1170
0
  }
1171
0
  if (ret > 0) {
1172
    /* The reference does not exist, but we expected it to. */
1173
0
    strbuf_addf(err, _("cannot lock ref '%s': "
1174
0
           "unable to resolve reference '%s'"),
1175
0
          ref_update_original_update_refname(u), u->refname);
1176
0
    return REF_TRANSACTION_ERROR_NONEXISTENT_REF;
1177
0
  }
1178
1179
0
  if (u->type & REF_ISSYMREF) {
1180
    /*
1181
     * The reftable stack is locked at this point already,
1182
     * so it is safe to call `refs_resolve_ref_unsafe()`
1183
     * here without causing races.
1184
     */
1185
0
    const char *resolved = refs_resolve_ref_unsafe(&refs->base, u->refname, 0,
1186
0
                     &current_oid, NULL);
1187
1188
0
    if (u->flags & REF_NO_DEREF) {
1189
0
      if (u->flags & REF_HAVE_OLD && !resolved) {
1190
0
        strbuf_addf(err, _("cannot lock ref '%s': "
1191
0
               "error reading reference"), u->refname);
1192
0
        return REF_TRANSACTION_ERROR_GENERIC;
1193
0
      }
1194
0
    } else {
1195
0
      struct ref_update *new_update;
1196
0
      int new_flags;
1197
1198
0
      new_flags = u->flags;
1199
0
      if (!strcmp(rewritten_ref, "HEAD"))
1200
0
        new_flags |= REF_UPDATE_VIA_HEAD;
1201
1202
0
      if (string_list_has_string(&transaction->refnames, referent->buf)) {
1203
0
        strbuf_addf(err,
1204
0
              _("multiple updates for '%s' (including one "
1205
0
                "via symref '%s') are not allowed"),
1206
0
              referent->buf, u->refname);
1207
0
        return REF_TRANSACTION_ERROR_NAME_CONFLICT;
1208
0
      }
1209
1210
      /*
1211
       * If we are updating a symref (eg. HEAD), we should also
1212
       * update the branch that the symref points to.
1213
       *
1214
       * This is generic functionality, and would be better
1215
       * done in refs.c, but the current implementation is
1216
       * intertwined with the locking in files-backend.c.
1217
       */
1218
0
      new_update = ref_transaction_add_update(
1219
0
        transaction, referent->buf, new_flags,
1220
0
        u->new_target ? NULL : &u->new_oid,
1221
0
        u->old_target ? NULL : &u->old_oid,
1222
0
        u->new_target, u->old_target,
1223
0
        u->committer_info, u->msg);
1224
1225
0
      new_update->parent_update = u;
1226
1227
      /* Change the symbolic ref update to log only. */
1228
0
      u->flags |= REF_LOG_ONLY | REF_NO_DEREF;
1229
0
    }
1230
0
  }
1231
1232
  /*
1233
   * Verify that the old object matches our expectations. Note
1234
   * that the error messages here do not make a lot of sense in
1235
   * the context of the reftable backend as we never lock
1236
   * individual refs. But the error messages match what the files
1237
   * backend returns, which keeps our tests happy.
1238
   */
1239
0
  if (u->old_target) {
1240
0
    if (!(u->type & REF_ISSYMREF)) {
1241
0
      strbuf_addf(err, _("cannot lock ref '%s': "
1242
0
             "expected symref with target '%s': "
1243
0
             "but is a regular ref"),
1244
0
            ref_update_original_update_refname(u),
1245
0
            u->old_target);
1246
0
      return REF_TRANSACTION_ERROR_EXPECTED_SYMREF;
1247
0
    }
1248
1249
0
    ret = ref_update_check_old_target(referent->buf, u, err);
1250
0
    if (ret)
1251
0
      return ret;
1252
0
  } else if ((u->flags & (REF_LOG_ONLY | REF_HAVE_OLD)) == REF_HAVE_OLD) {
1253
0
    if (oideq(&current_oid, &u->old_oid)) {
1254
      /*
1255
       * Normally matching the expected old oid is enough. Either we
1256
       * found the ref at the expected state, or we are creating and
1257
       * expect the null oid (and likewise found nothing).
1258
       *
1259
       * But there is one exception for the null oid: if we found a
1260
       * symref pointing to nothing we'll also get the null oid. In
1261
       * regular recursive mode, that's good (we'll write to what the
1262
       * symref points to, which doesn't exist). But in no-deref
1263
       * mode, it means we'll clobber the symref, even though the
1264
       * caller asked for this to be a creation event. So flag
1265
       * that case to preserve the dangling symref.
1266
       *
1267
       * Everything else is OK and we can fall through to the
1268
       * end of the conditional chain.
1269
       */
1270
0
      if ((u->flags & REF_NO_DEREF) &&
1271
0
          referent->len &&
1272
0
          is_null_oid(&u->old_oid)) {
1273
0
        strbuf_addf(err, _("cannot lock ref '%s': "
1274
0
              "dangling symref already exists"),
1275
0
              ref_update_original_update_refname(u));
1276
0
        return REF_TRANSACTION_ERROR_CREATE_EXISTS;
1277
0
      }
1278
0
    } else if (is_null_oid(&u->old_oid)) {
1279
0
      strbuf_addf(err, _("cannot lock ref '%s': "
1280
0
             "reference already exists"),
1281
0
            ref_update_original_update_refname(u));
1282
0
      return REF_TRANSACTION_ERROR_CREATE_EXISTS;
1283
0
    } else if (is_null_oid(&current_oid)) {
1284
0
      strbuf_addf(err, _("cannot lock ref '%s': "
1285
0
             "reference is missing but expected %s"),
1286
0
            ref_update_original_update_refname(u),
1287
0
            oid_to_hex(&u->old_oid));
1288
0
      return REF_TRANSACTION_ERROR_NONEXISTENT_REF;
1289
0
    } else {
1290
0
      strbuf_addf(err, _("cannot lock ref '%s': "
1291
0
             "is at %s but expected %s"),
1292
0
            ref_update_original_update_refname(u),
1293
0
            oid_to_hex(&current_oid),
1294
0
            oid_to_hex(&u->old_oid));
1295
0
      return REF_TRANSACTION_ERROR_INCORRECT_OLD_VALUE;
1296
0
    }
1297
0
  }
1298
1299
  /*
1300
   * If all of the following conditions are true:
1301
   *
1302
   *   - We're not about to write a symref.
1303
   *   - We're not about to write a log-only entry.
1304
   *   - Old and new object ID are different.
1305
   *
1306
   * Then we're essentially doing a no-op update that can be
1307
   * skipped. This is not only for the sake of efficiency, but
1308
   * also skips writing unneeded reflog entries.
1309
   */
1310
0
  if ((u->type & REF_ISSYMREF) ||
1311
0
      (u->flags & REF_LOG_ONLY) ||
1312
0
      (u->flags & REF_HAVE_NEW && !oideq(&current_oid, &u->new_oid)))
1313
0
    if (queue_transaction_update(refs, tx_data, u, &current_oid, err))
1314
0
      return REF_TRANSACTION_ERROR_GENERIC;
1315
1316
0
  return 0;
1317
0
}
1318
1319
static int reftable_be_transaction_prepare(struct ref_store *ref_store,
1320
             struct ref_transaction *transaction,
1321
             struct strbuf *err)
1322
0
{
1323
0
  struct reftable_ref_store *refs =
1324
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE|REF_STORE_MAIN, "ref_transaction_prepare");
1325
0
  struct strbuf referent = STRBUF_INIT, head_referent = STRBUF_INIT;
1326
0
  struct string_list refnames_to_check = STRING_LIST_INIT_NODUP;
1327
0
  struct reftable_transaction_data *tx_data = NULL;
1328
0
  struct reftable_backend *be;
1329
0
  struct object_id head_oid;
1330
0
  unsigned int head_type = 0;
1331
0
  size_t i;
1332
0
  int ret;
1333
1334
0
  ret = refs->err;
1335
0
  if (ret < 0)
1336
0
    goto done;
1337
1338
0
  tx_data = xcalloc(1, sizeof(*tx_data));
1339
1340
  /*
1341
   * Preprocess all updates. For one we check that there are no duplicate
1342
   * reference updates in this transaction. Second, we lock all stacks
1343
   * that will be modified during the transaction.
1344
   */
1345
0
  for (i = 0; i < transaction->nr; i++) {
1346
0
    ret = prepare_transaction_update(NULL, refs, tx_data,
1347
0
             transaction->updates[i], err);
1348
0
    if (ret)
1349
0
      goto done;
1350
0
  }
1351
1352
  /*
1353
   * Now that we have counted updates per stack we can preallocate their
1354
   * arrays. This avoids having to reallocate many times.
1355
   */
1356
0
  for (i = 0; i < tx_data->args_nr; i++) {
1357
0
    CALLOC_ARRAY(tx_data->args[i].updates, tx_data->args[i].updates_expected);
1358
0
    tx_data->args[i].updates_alloc = tx_data->args[i].updates_expected;
1359
0
  }
1360
1361
  /*
1362
   * TODO: it's dubious whether we should reload the stack that "HEAD"
1363
   * belongs to or not. In theory, it may happen that we only modify
1364
   * stacks which are _not_ part of the "HEAD" stack. In that case we
1365
   * wouldn't have prepared any transaction for its stack and would not
1366
   * have reloaded it, which may mean that it is stale.
1367
   *
1368
   * On the other hand, reloading that stack without locking it feels
1369
   * wrong, too, as the value of "HEAD" could be modified concurrently at
1370
   * any point in time.
1371
   */
1372
0
  ret = backend_for(&be, refs, "HEAD", NULL, 0);
1373
0
  if (ret)
1374
0
    goto done;
1375
1376
0
  ret = reftable_backend_read_ref(be, "HEAD", &head_oid,
1377
0
          &head_referent, &head_type);
1378
0
  if (ret < 0)
1379
0
    goto done;
1380
0
  ret = 0;
1381
1382
0
  for (i = 0; i < transaction->nr; i++) {
1383
0
    ret = prepare_single_update(refs, tx_data, transaction, be,
1384
0
              transaction->updates[i], i,
1385
0
              &refnames_to_check, head_type,
1386
0
              &head_referent, &referent, err);
1387
0
    if (ret) {
1388
0
      if (ref_transaction_maybe_set_rejected(transaction, i,
1389
0
                     ret, err)) {
1390
0
        ret = 0;
1391
0
        continue;
1392
0
      }
1393
0
      goto done;
1394
0
    }
1395
0
  }
1396
1397
0
  ret = refs_verify_refnames_available(ref_store, &refnames_to_check,
1398
0
               &transaction->refnames, NULL,
1399
0
               transaction,
1400
0
               transaction->flags & REF_TRANSACTION_FLAG_INITIAL,
1401
0
               err);
1402
0
  if (ret < 0)
1403
0
    goto done;
1404
1405
0
  transaction->backend_data = tx_data;
1406
0
  transaction->state = REF_TRANSACTION_PREPARED;
1407
1408
0
done:
1409
0
  if (ret < 0) {
1410
0
    free_transaction_data(tx_data);
1411
0
    transaction->state = REF_TRANSACTION_CLOSED;
1412
0
    if (!err->len)
1413
0
      strbuf_addf(err, _("reftable: transaction prepare: %s"),
1414
0
            reftable_error_str(ret));
1415
0
  }
1416
0
  strbuf_release(&referent);
1417
0
  strbuf_release(&head_referent);
1418
0
  string_list_clear(&refnames_to_check, 1);
1419
1420
0
  return ret;
1421
0
}
1422
1423
static int reftable_be_transaction_abort(struct ref_store *ref_store UNUSED,
1424
           struct ref_transaction *transaction,
1425
           struct strbuf *err UNUSED)
1426
0
{
1427
0
  struct reftable_transaction_data *tx_data = transaction->backend_data;
1428
0
  free_transaction_data(tx_data);
1429
0
  transaction->state = REF_TRANSACTION_CLOSED;
1430
0
  return 0;
1431
0
}
1432
1433
static int transaction_update_cmp(const void *a, const void *b)
1434
0
{
1435
0
  struct reftable_transaction_update *update_a = (struct reftable_transaction_update *)a;
1436
0
  struct reftable_transaction_update *update_b = (struct reftable_transaction_update *)b;
1437
1438
  /*
1439
   * If there is an index set, it should take preference (default is 0).
1440
   * This ensures that updates with indexes are sorted amongst themselves.
1441
   */
1442
0
  if (update_a->update->index || update_b->update->index)
1443
0
    return update_a->update->index - update_b->update->index;
1444
1445
0
  return strcmp(update_a->update->refname, update_b->update->refname);
1446
0
}
1447
1448
static int write_transaction_table(struct reftable_writer *writer, void *cb_data)
1449
0
{
1450
0
  struct write_transaction_table_arg *arg = cb_data;
1451
0
  uint64_t ts = reftable_stack_next_update_index(arg->be->stack);
1452
0
  struct reftable_log_record *logs = NULL;
1453
0
  struct ident_split committer_ident = {0};
1454
0
  size_t logs_nr = 0, logs_alloc = 0, i;
1455
0
  const char *committer_info;
1456
0
  int ret = 0;
1457
1458
0
  committer_info = git_committer_info(0);
1459
0
  if (split_ident_line(&committer_ident, committer_info, strlen(committer_info)))
1460
0
    BUG("failed splitting committer info");
1461
1462
0
  QSORT(arg->updates, arg->updates_nr, transaction_update_cmp);
1463
1464
  /*
1465
   * During reflog migration, we add indexes for a single reflog with
1466
   * multiple entries. Each entry will contain a different update_index,
1467
   * so set the limits accordingly.
1468
   */
1469
0
  ret = reftable_writer_set_limits(writer, ts, ts + arg->max_index);
1470
0
  if (ret < 0)
1471
0
    goto done;
1472
1473
0
  for (i = 0; i < arg->updates_nr; i++) {
1474
0
    struct reftable_transaction_update *tx_update = &arg->updates[i];
1475
0
    struct ref_update *u = tx_update->update;
1476
1477
0
    if (u->rejection_err)
1478
0
      continue;
1479
1480
    /*
1481
     * Write a reflog entry when updating a ref to point to
1482
     * something new in either of the following cases:
1483
     *
1484
     * - The reference is about to be deleted. We always want to
1485
     *   delete the reflog in that case.
1486
     * - REF_FORCE_CREATE_REFLOG is set, asking us to always create
1487
     *   the reflog entry.
1488
     * - `core.logAllRefUpdates` tells us to create the reflog for
1489
     *   the given ref.
1490
     */
1491
0
    if ((u->flags & REF_HAVE_NEW) &&
1492
0
        !(u->type & REF_ISSYMREF) &&
1493
0
        ref_update_has_null_new_value(u)) {
1494
0
      struct reftable_log_record log = {0};
1495
0
      struct reftable_iterator it = {0};
1496
1497
0
      ret = reftable_stack_init_log_iterator(arg->be->stack, &it);
1498
0
      if (ret < 0)
1499
0
        goto done;
1500
1501
      /*
1502
       * When deleting refs we also delete all reflog entries
1503
       * with them. While it is not strictly required to
1504
       * delete reflogs together with their refs, this
1505
       * matches the behaviour of the files backend.
1506
       *
1507
       * Unfortunately, we have no better way than to delete
1508
       * all reflog entries one by one.
1509
       */
1510
0
      ret = reftable_iterator_seek_log(&it, u->refname);
1511
0
      while (ret == 0) {
1512
0
        struct reftable_log_record *tombstone;
1513
1514
0
        ret = reftable_iterator_next_log(&it, &log);
1515
0
        if (ret < 0)
1516
0
          break;
1517
0
        if (ret > 0 || strcmp(log.refname, u->refname)) {
1518
0
          ret = 0;
1519
0
          break;
1520
0
        }
1521
1522
0
        ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1523
0
        tombstone = &logs[logs_nr++];
1524
0
        tombstone->refname = xstrdup(u->refname);
1525
0
        tombstone->value_type = REFTABLE_LOG_DELETION;
1526
0
        tombstone->update_index = log.update_index;
1527
0
      }
1528
1529
0
      reftable_log_record_release(&log);
1530
0
      reftable_iterator_destroy(&it);
1531
1532
0
      if (ret)
1533
0
        goto done;
1534
0
    } else if (!(u->flags & REF_SKIP_CREATE_REFLOG) &&
1535
0
         (u->flags & REF_HAVE_NEW) &&
1536
0
         (u->flags & REF_FORCE_CREATE_REFLOG ||
1537
0
          should_write_log(arg->refs, u->refname))) {
1538
0
      struct reftable_log_record *log;
1539
0
      int create_reflog = 1;
1540
1541
0
      if (u->new_target) {
1542
0
        if (!refs_resolve_ref_unsafe(&arg->refs->base, u->new_target,
1543
0
                   RESOLVE_REF_READING, &u->new_oid, NULL)) {
1544
          /*
1545
           * TODO: currently we skip creating reflogs for dangling
1546
           * symref updates. It would be nice to capture this as
1547
           * zero oid updates however.
1548
           */
1549
0
          create_reflog = 0;
1550
0
        }
1551
0
      }
1552
1553
0
      if (create_reflog) {
1554
0
        struct ident_split c;
1555
1556
0
        ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1557
0
        log = &logs[logs_nr++];
1558
0
        memset(log, 0, sizeof(*log));
1559
1560
0
        if (u->committer_info) {
1561
0
          if (split_ident_line(&c, u->committer_info,
1562
0
                   strlen(u->committer_info)))
1563
0
            BUG("failed splitting committer info");
1564
0
        } else {
1565
0
          c = committer_ident;
1566
0
        }
1567
1568
0
        fill_reftable_log_record(log, &c);
1569
1570
        /*
1571
         * Updates are sorted by the writer. So updates for the same
1572
         * refname need to contain different update indices.
1573
         */
1574
0
        log->update_index = ts + u->index;
1575
1576
0
        log->refname = xstrdup(u->refname);
1577
0
        memcpy(log->value.update.new_hash,
1578
0
               u->new_oid.hash, GIT_MAX_RAWSZ);
1579
0
        memcpy(log->value.update.old_hash,
1580
0
               tx_update->current_oid.hash, GIT_MAX_RAWSZ);
1581
0
        log->value.update.message =
1582
0
          xstrndup(u->msg, arg->refs->write_options.block_size / 2);
1583
0
      }
1584
0
    }
1585
1586
0
    if (u->flags & REF_LOG_ONLY)
1587
0
      continue;
1588
1589
0
    if (u->new_target) {
1590
0
      struct reftable_ref_record ref = {
1591
0
        .refname = (char *)u->refname,
1592
0
        .value_type = REFTABLE_REF_SYMREF,
1593
0
        .value.symref = (char *)u->new_target,
1594
0
        .update_index = ts,
1595
0
      };
1596
1597
0
      ret = reftable_writer_add_ref(writer, &ref);
1598
0
      if (ret < 0)
1599
0
        goto done;
1600
0
    } else if ((u->flags & REF_HAVE_NEW) && ref_update_has_null_new_value(u)) {
1601
0
      struct reftable_ref_record ref = {
1602
0
        .refname = (char *)u->refname,
1603
0
        .update_index = ts,
1604
0
        .value_type = REFTABLE_REF_DELETION,
1605
0
      };
1606
1607
0
      ret = reftable_writer_add_ref(writer, &ref);
1608
0
      if (ret < 0)
1609
0
        goto done;
1610
0
    } else if (u->flags & REF_HAVE_NEW) {
1611
0
      struct reftable_ref_record ref = {0};
1612
0
      struct object_id peeled;
1613
0
      int peel_error;
1614
1615
0
      ref.refname = (char *)u->refname;
1616
0
      ref.update_index = ts;
1617
1618
0
      peel_error = peel_object(arg->refs->base.repo, &u->new_oid, &peeled,
1619
0
             PEEL_OBJECT_VERIFY_TAGGED_OBJECT_TYPE);
1620
0
      if (!peel_error) {
1621
0
        ref.value_type = REFTABLE_REF_VAL2;
1622
0
        memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
1623
0
        memcpy(ref.value.val2.value, u->new_oid.hash, GIT_MAX_RAWSZ);
1624
0
      } else if (!is_null_oid(&u->new_oid)) {
1625
0
        ref.value_type = REFTABLE_REF_VAL1;
1626
0
        memcpy(ref.value.val1, u->new_oid.hash, GIT_MAX_RAWSZ);
1627
0
      }
1628
1629
0
      ret = reftable_writer_add_ref(writer, &ref);
1630
0
      if (ret < 0)
1631
0
        goto done;
1632
0
    }
1633
0
  }
1634
1635
  /*
1636
   * Logs are written at the end so that we do not have intermixed ref
1637
   * and log blocks.
1638
   */
1639
0
  if (logs) {
1640
0
    ret = reftable_writer_add_logs(writer, logs, logs_nr);
1641
0
    if (ret < 0)
1642
0
      goto done;
1643
0
  }
1644
1645
0
done:
1646
0
  assert(ret != REFTABLE_API_ERROR);
1647
0
  for (i = 0; i < logs_nr; i++)
1648
0
    reftable_log_record_release(&logs[i]);
1649
0
  free(logs);
1650
0
  return ret;
1651
0
}
1652
1653
static int reftable_be_transaction_finish(struct ref_store *ref_store UNUSED,
1654
            struct ref_transaction *transaction,
1655
            struct strbuf *err)
1656
0
{
1657
0
  struct reftable_transaction_data *tx_data = transaction->backend_data;
1658
0
  int ret = 0;
1659
1660
0
  for (size_t i = 0; i < tx_data->args_nr; i++) {
1661
0
    tx_data->args[i].max_index = transaction->max_index;
1662
1663
0
    ret = reftable_addition_add(tx_data->args[i].addition,
1664
0
              write_transaction_table, &tx_data->args[i]);
1665
0
    if (ret < 0)
1666
0
      goto done;
1667
1668
0
    ret = reftable_addition_commit(tx_data->args[i].addition);
1669
0
    if (ret < 0)
1670
0
      goto done;
1671
0
  }
1672
1673
0
done:
1674
0
  assert(ret != REFTABLE_API_ERROR);
1675
0
  free_transaction_data(tx_data);
1676
0
  transaction->state = REF_TRANSACTION_CLOSED;
1677
1678
0
  if (ret) {
1679
0
    strbuf_addf(err, _("reftable: transaction failure: %s"),
1680
0
          reftable_error_str(ret));
1681
0
    return -1;
1682
0
  }
1683
0
  return ret;
1684
0
}
1685
1686
static int reftable_be_optimize(struct ref_store *ref_store,
1687
        struct refs_optimize_opts *opts)
1688
0
{
1689
0
  struct reftable_ref_store *refs =
1690
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB, "optimize_refs");
1691
0
  struct reftable_stack *stack;
1692
0
  int ret;
1693
1694
0
  if (refs->err)
1695
0
    return refs->err;
1696
1697
0
  stack = refs->worktree_backend.stack;
1698
0
  if (!stack)
1699
0
    stack = refs->main_backend.stack;
1700
1701
0
  if (opts->flags & REFS_OPTIMIZE_AUTO)
1702
0
    ret = reftable_stack_auto_compact(stack);
1703
0
  else
1704
0
    ret = reftable_stack_compact_all(stack, NULL);
1705
0
  if (ret < 0) {
1706
0
    ret = error(_("unable to compact stack: %s"),
1707
0
          reftable_error_str(ret));
1708
0
    goto out;
1709
0
  }
1710
1711
0
  ret = reftable_stack_clean(stack);
1712
0
  if (ret)
1713
0
    goto out;
1714
1715
0
out:
1716
0
  return ret;
1717
0
}
1718
1719
static int reftable_be_optimize_required(struct ref_store *ref_store,
1720
           struct refs_optimize_opts *opts,
1721
           bool *required)
1722
0
{
1723
0
  struct reftable_ref_store *refs = reftable_be_downcast(ref_store, REF_STORE_READ,
1724
0
                     "optimize_refs_required");
1725
0
  struct reftable_stack *stack;
1726
0
  bool use_heuristics = false;
1727
1728
0
  if (refs->err)
1729
0
    return refs->err;
1730
1731
0
  stack = refs->worktree_backend.stack;
1732
0
  if (!stack)
1733
0
    stack = refs->main_backend.stack;
1734
1735
0
  if (opts->flags & REFS_OPTIMIZE_AUTO)
1736
0
    use_heuristics = true;
1737
1738
0
  return reftable_stack_compaction_required(stack, use_heuristics,
1739
0
              required);
1740
0
}
1741
1742
struct write_create_symref_arg {
1743
  struct reftable_ref_store *refs;
1744
  struct reftable_stack *stack;
1745
  struct strbuf *err;
1746
  const char *refname;
1747
  const char *target;
1748
  const char *logmsg;
1749
};
1750
1751
struct write_copy_arg {
1752
  struct reftable_ref_store *refs;
1753
  struct reftable_backend *be;
1754
  const char *oldname;
1755
  const char *newname;
1756
  const char *logmsg;
1757
  int delete_old;
1758
};
1759
1760
static int write_copy_table(struct reftable_writer *writer, void *cb_data)
1761
0
{
1762
0
  struct write_copy_arg *arg = cb_data;
1763
0
  uint64_t deletion_ts, creation_ts;
1764
0
  struct reftable_ref_record old_ref = {0}, refs[2] = {0};
1765
0
  struct reftable_log_record old_log = {0}, *logs = NULL;
1766
0
  struct reftable_iterator it = {0};
1767
0
  struct string_list skip = STRING_LIST_INIT_NODUP;
1768
0
  struct ident_split committer_ident = {0};
1769
0
  struct strbuf errbuf = STRBUF_INIT;
1770
0
  size_t logs_nr = 0, logs_alloc = 0, i;
1771
0
  const char *committer_info;
1772
0
  int ret;
1773
1774
0
  committer_info = git_committer_info(0);
1775
0
  if (split_ident_line(&committer_ident, committer_info, strlen(committer_info)))
1776
0
    BUG("failed splitting committer info");
1777
1778
0
  if (reftable_stack_read_ref(arg->be->stack, arg->oldname, &old_ref)) {
1779
0
    ret = error(_("refname %s not found"), arg->oldname);
1780
0
    goto done;
1781
0
  }
1782
0
  if (old_ref.value_type == REFTABLE_REF_SYMREF) {
1783
0
    ret = error(_("refname %s is a symbolic ref, copying it is not supported"),
1784
0
          arg->oldname);
1785
0
    goto done;
1786
0
  }
1787
1788
  /*
1789
   * There's nothing to do in case the old and new name are the same, so
1790
   * we exit early in that case.
1791
   */
1792
0
  if (!strcmp(arg->oldname, arg->newname)) {
1793
0
    ret = 0;
1794
0
    goto done;
1795
0
  }
1796
1797
  /*
1798
   * Verify that the new refname is available.
1799
   */
1800
0
  if (arg->delete_old)
1801
0
    string_list_insert(&skip, arg->oldname);
1802
0
  ret = refs_verify_refname_available(&arg->refs->base, arg->newname,
1803
0
              NULL, &skip, 0, &errbuf);
1804
0
  if (ret < 0) {
1805
0
    error("%s", errbuf.buf);
1806
0
    goto done;
1807
0
  }
1808
1809
  /*
1810
   * When deleting the old reference we have to use two update indices:
1811
   * once to delete the old ref and its reflog, and once to create the
1812
   * new ref and its reflog. They need to be staged with two separate
1813
   * indices because the new reflog needs to encode both the deletion of
1814
   * the old branch and the creation of the new branch, and we cannot do
1815
   * two changes to a reflog in a single update.
1816
   */
1817
0
  deletion_ts = creation_ts = reftable_stack_next_update_index(arg->be->stack);
1818
0
  if (arg->delete_old)
1819
0
    creation_ts++;
1820
0
  ret = reftable_writer_set_limits(writer, deletion_ts, creation_ts);
1821
0
  if (ret < 0)
1822
0
    goto done;
1823
1824
  /*
1825
   * Add the new reference. If this is a rename then we also delete the
1826
   * old reference.
1827
   */
1828
0
  refs[0] = old_ref;
1829
0
  refs[0].refname = xstrdup(arg->newname);
1830
0
  refs[0].update_index = creation_ts;
1831
0
  if (arg->delete_old) {
1832
0
    refs[1].refname = xstrdup(arg->oldname);
1833
0
    refs[1].value_type = REFTABLE_REF_DELETION;
1834
0
    refs[1].update_index = deletion_ts;
1835
0
  }
1836
0
  ret = reftable_writer_add_refs(writer, refs, arg->delete_old ? 2 : 1);
1837
0
  if (ret < 0)
1838
0
    goto done;
1839
1840
  /*
1841
   * When deleting the old branch we need to create a reflog entry on the
1842
   * new branch name that indicates that the old branch has been deleted
1843
   * and then recreated. This is a tad weird, but matches what the files
1844
   * backend does.
1845
   */
1846
0
  if (arg->delete_old) {
1847
0
    struct strbuf head_referent = STRBUF_INIT;
1848
0
    struct object_id head_oid;
1849
0
    int append_head_reflog;
1850
0
    unsigned head_type = 0;
1851
1852
0
    ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1853
0
    memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1854
0
    fill_reftable_log_record(&logs[logs_nr], &committer_ident);
1855
0
    logs[logs_nr].refname = xstrdup(arg->newname);
1856
0
    logs[logs_nr].update_index = deletion_ts;
1857
0
    logs[logs_nr].value.update.message =
1858
0
      xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1859
0
    memcpy(logs[logs_nr].value.update.old_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
1860
0
    logs_nr++;
1861
1862
0
    ret = reftable_backend_read_ref(arg->be, "HEAD", &head_oid,
1863
0
            &head_referent, &head_type);
1864
0
    if (ret < 0)
1865
0
      goto done;
1866
0
    append_head_reflog = (head_type & REF_ISSYMREF) && !strcmp(head_referent.buf, arg->oldname);
1867
0
    strbuf_release(&head_referent);
1868
1869
    /*
1870
     * The files backend uses `refs_delete_ref()` to delete the old
1871
     * branch name, which will append a reflog entry for HEAD in
1872
     * case it points to the old branch.
1873
     */
1874
0
    if (append_head_reflog) {
1875
0
      ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1876
0
      logs[logs_nr] = logs[logs_nr - 1];
1877
0
      logs[logs_nr].refname = xstrdup("HEAD");
1878
0
      logs[logs_nr].value.update.name =
1879
0
        xstrdup(logs[logs_nr].value.update.name);
1880
0
      logs[logs_nr].value.update.email =
1881
0
        xstrdup(logs[logs_nr].value.update.email);
1882
0
      logs[logs_nr].value.update.message =
1883
0
        xstrdup(logs[logs_nr].value.update.message);
1884
0
      logs_nr++;
1885
0
    }
1886
0
  }
1887
1888
  /*
1889
   * Create the reflog entry for the newly created branch.
1890
   */
1891
0
  ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1892
0
  memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1893
0
  fill_reftable_log_record(&logs[logs_nr], &committer_ident);
1894
0
  logs[logs_nr].refname = xstrdup(arg->newname);
1895
0
  logs[logs_nr].update_index = creation_ts;
1896
0
  logs[logs_nr].value.update.message =
1897
0
    xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1898
0
  memcpy(logs[logs_nr].value.update.new_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
1899
0
  logs_nr++;
1900
1901
  /*
1902
   * In addition to writing the reflog entry for the new branch, we also
1903
   * copy over all log entries from the old reflog. Last but not least,
1904
   * when renaming we also have to delete all the old reflog entries.
1905
   */
1906
0
  ret = reftable_stack_init_log_iterator(arg->be->stack, &it);
1907
0
  if (ret < 0)
1908
0
    goto done;
1909
1910
0
  ret = reftable_iterator_seek_log(&it, arg->oldname);
1911
0
  if (ret < 0)
1912
0
    goto done;
1913
1914
0
  while (1) {
1915
0
    ret = reftable_iterator_next_log(&it, &old_log);
1916
0
    if (ret < 0)
1917
0
      goto done;
1918
0
    if (ret > 0 || strcmp(old_log.refname, arg->oldname)) {
1919
0
      ret = 0;
1920
0
      break;
1921
0
    }
1922
1923
0
    free(old_log.refname);
1924
1925
    /*
1926
     * Copy over the old reflog entry with the new refname.
1927
     */
1928
0
    ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1929
0
    logs[logs_nr] = old_log;
1930
0
    logs[logs_nr].refname = xstrdup(arg->newname);
1931
0
    logs_nr++;
1932
1933
    /*
1934
     * Delete the old reflog entry in case we are renaming.
1935
     */
1936
0
    if (arg->delete_old) {
1937
0
      ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1938
0
      memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1939
0
      logs[logs_nr].refname = xstrdup(arg->oldname);
1940
0
      logs[logs_nr].value_type = REFTABLE_LOG_DELETION;
1941
0
      logs[logs_nr].update_index = old_log.update_index;
1942
0
      logs_nr++;
1943
0
    }
1944
1945
    /*
1946
     * Transfer ownership of the log record we're iterating over to
1947
     * the array of log records. Otherwise, the pointers would get
1948
     * free'd or reallocated by the iterator.
1949
     */
1950
0
    memset(&old_log, 0, sizeof(old_log));
1951
0
  }
1952
1953
0
  ret = reftable_writer_add_logs(writer, logs, logs_nr);
1954
0
  if (ret < 0)
1955
0
    goto done;
1956
1957
0
done:
1958
0
  assert(ret != REFTABLE_API_ERROR);
1959
0
  reftable_iterator_destroy(&it);
1960
0
  string_list_clear(&skip, 0);
1961
0
  strbuf_release(&errbuf);
1962
0
  for (i = 0; i < logs_nr; i++)
1963
0
    reftable_log_record_release(&logs[i]);
1964
0
  free(logs);
1965
0
  for (i = 0; i < ARRAY_SIZE(refs); i++)
1966
0
    reftable_ref_record_release(&refs[i]);
1967
0
  reftable_ref_record_release(&old_ref);
1968
0
  reftable_log_record_release(&old_log);
1969
0
  return ret;
1970
0
}
1971
1972
static int reftable_be_rename_ref(struct ref_store *ref_store,
1973
          const char *oldrefname,
1974
          const char *newrefname,
1975
          const char *logmsg)
1976
0
{
1977
0
  struct reftable_ref_store *refs =
1978
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
1979
0
  struct write_copy_arg arg = {
1980
0
    .refs = refs,
1981
0
    .oldname = oldrefname,
1982
0
    .newname = newrefname,
1983
0
    .logmsg = logmsg,
1984
0
    .delete_old = 1,
1985
0
  };
1986
0
  int ret;
1987
1988
0
  ret = refs->err;
1989
0
  if (ret < 0)
1990
0
    goto done;
1991
1992
0
  ret = backend_for(&arg.be, refs, newrefname, &newrefname, 1);
1993
0
  if (ret)
1994
0
    goto done;
1995
0
  ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg,
1996
0
         REFTABLE_STACK_NEW_ADDITION_RELOAD);
1997
1998
0
done:
1999
0
  assert(ret != REFTABLE_API_ERROR);
2000
0
  return ret;
2001
0
}
2002
2003
static int reftable_be_copy_ref(struct ref_store *ref_store,
2004
        const char *oldrefname,
2005
        const char *newrefname,
2006
        const char *logmsg)
2007
0
{
2008
0
  struct reftable_ref_store *refs =
2009
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref");
2010
0
  struct write_copy_arg arg = {
2011
0
    .refs = refs,
2012
0
    .oldname = oldrefname,
2013
0
    .newname = newrefname,
2014
0
    .logmsg = logmsg,
2015
0
  };
2016
0
  int ret;
2017
2018
0
  ret = refs->err;
2019
0
  if (ret < 0)
2020
0
    goto done;
2021
2022
0
  ret = backend_for(&arg.be, refs, newrefname, &newrefname, 1);
2023
0
  if (ret)
2024
0
    goto done;
2025
0
  ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg,
2026
0
         REFTABLE_STACK_NEW_ADDITION_RELOAD);
2027
2028
0
done:
2029
0
  assert(ret != REFTABLE_API_ERROR);
2030
0
  return ret;
2031
0
}
2032
2033
struct reftable_reflog_iterator {
2034
  struct ref_iterator base;
2035
  struct reftable_ref_store *refs;
2036
  struct reftable_iterator iter;
2037
  struct reftable_log_record log;
2038
  struct strbuf last_name;
2039
  int err;
2040
};
2041
2042
static int reftable_reflog_iterator_advance(struct ref_iterator *ref_iterator)
2043
0
{
2044
0
  struct reftable_reflog_iterator *iter =
2045
0
    (struct reftable_reflog_iterator *)ref_iterator;
2046
2047
0
  while (!iter->err) {
2048
0
    iter->err = reftable_iterator_next_log(&iter->iter, &iter->log);
2049
0
    if (iter->err)
2050
0
      break;
2051
2052
    /*
2053
     * We want the refnames that we have reflogs for, so we skip if
2054
     * we've already produced this name. This could be faster by
2055
     * seeking directly to reflog@update_index==0.
2056
     */
2057
0
    if (!strcmp(iter->log.refname, iter->last_name.buf))
2058
0
      continue;
2059
2060
0
    if (check_refname_format(iter->log.refname,
2061
0
           REFNAME_ALLOW_ONELEVEL))
2062
0
      continue;
2063
2064
0
    strbuf_reset(&iter->last_name);
2065
0
    strbuf_addstr(&iter->last_name, iter->log.refname);
2066
0
    iter->base.ref.name = iter->log.refname;
2067
2068
0
    break;
2069
0
  }
2070
2071
0
  if (iter->err > 0)
2072
0
    return ITER_DONE;
2073
0
  if (iter->err < 0)
2074
0
    return ITER_ERROR;
2075
0
  return ITER_OK;
2076
0
}
2077
2078
static int reftable_reflog_iterator_seek(struct ref_iterator *ref_iterator UNUSED,
2079
           const char *refname UNUSED,
2080
           unsigned int flags UNUSED)
2081
0
{
2082
0
  BUG("reftable reflog iterator cannot be seeked");
2083
0
  return -1;
2084
0
}
2085
2086
static void reftable_reflog_iterator_release(struct ref_iterator *ref_iterator)
2087
0
{
2088
0
  struct reftable_reflog_iterator *iter =
2089
0
    (struct reftable_reflog_iterator *)ref_iterator;
2090
0
  reftable_log_record_release(&iter->log);
2091
0
  reftable_iterator_destroy(&iter->iter);
2092
0
  strbuf_release(&iter->last_name);
2093
0
}
2094
2095
static struct ref_iterator_vtable reftable_reflog_iterator_vtable = {
2096
  .advance = reftable_reflog_iterator_advance,
2097
  .seek = reftable_reflog_iterator_seek,
2098
  .release = reftable_reflog_iterator_release,
2099
};
2100
2101
static struct reftable_reflog_iterator *reflog_iterator_for_stack(struct reftable_ref_store *refs,
2102
                  struct reftable_stack *stack)
2103
0
{
2104
0
  struct reftable_reflog_iterator *iter;
2105
0
  int ret;
2106
2107
0
  iter = xcalloc(1, sizeof(*iter));
2108
0
  base_ref_iterator_init(&iter->base, &reftable_reflog_iterator_vtable);
2109
0
  strbuf_init(&iter->last_name, 0);
2110
0
  iter->refs = refs;
2111
2112
0
  ret = refs->err;
2113
0
  if (ret)
2114
0
    goto done;
2115
2116
0
  ret = reftable_stack_reload(stack);
2117
0
  if (ret < 0)
2118
0
    goto done;
2119
2120
0
  ret = reftable_stack_init_log_iterator(stack, &iter->iter);
2121
0
  if (ret < 0)
2122
0
    goto done;
2123
2124
0
  ret = reftable_iterator_seek_log(&iter->iter, "");
2125
0
  if (ret < 0)
2126
0
    goto done;
2127
2128
0
done:
2129
0
  iter->err = ret;
2130
0
  return iter;
2131
0
}
2132
2133
static struct ref_iterator *reftable_be_reflog_iterator_begin(struct ref_store *ref_store)
2134
0
{
2135
0
  struct reftable_ref_store *refs =
2136
0
    reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_iterator_begin");
2137
0
  struct reftable_reflog_iterator *main_iter, *worktree_iter;
2138
2139
0
  main_iter = reflog_iterator_for_stack(refs, refs->main_backend.stack);
2140
0
  if (!refs->worktree_backend.stack)
2141
0
    return &main_iter->base;
2142
2143
0
  worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_backend.stack);
2144
2145
0
  return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
2146
0
          ref_iterator_select, NULL);
2147
0
}
2148
2149
static int yield_log_record(struct reftable_ref_store *refs,
2150
          struct reftable_log_record *log,
2151
          each_reflog_ent_fn fn,
2152
          void *cb_data)
2153
0
{
2154
0
  struct object_id old_oid, new_oid;
2155
0
  const char *full_committer;
2156
2157
0
  oidread(&old_oid, log->value.update.old_hash, refs->base.repo->hash_algo);
2158
0
  oidread(&new_oid, log->value.update.new_hash, refs->base.repo->hash_algo);
2159
2160
  /*
2161
   * When both the old object ID and the new object ID are null
2162
   * then this is the reflog existence marker. The caller must
2163
   * not be aware of it.
2164
   */
2165
0
  if (is_null_oid(&old_oid) && is_null_oid(&new_oid))
2166
0
    return 0;
2167
2168
0
  full_committer = fmt_ident(log->value.update.name, log->value.update.email,
2169
0
           WANT_COMMITTER_IDENT, NULL, IDENT_NO_DATE);
2170
0
  return fn(log->refname, &old_oid, &new_oid, full_committer,
2171
0
      log->value.update.time, log->value.update.tz_offset,
2172
0
      log->value.update.message, cb_data);
2173
0
}
2174
2175
static int reftable_be_for_each_reflog_ent_reverse(struct ref_store *ref_store,
2176
               const char *refname,
2177
               each_reflog_ent_fn fn,
2178
               void *cb_data)
2179
0
{
2180
0
  struct reftable_ref_store *refs =
2181
0
    reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent_reverse");
2182
0
  struct reftable_log_record log = {0};
2183
0
  struct reftable_iterator it = {0};
2184
0
  struct reftable_backend *be;
2185
0
  int ret;
2186
2187
0
  if (refs->err < 0)
2188
0
    return refs->err;
2189
2190
  /*
2191
   * TODO: we should adapt this callsite to reload the stack. There is no
2192
   * obvious reason why we shouldn't.
2193
   */
2194
0
  ret = backend_for(&be, refs, refname, &refname, 0);
2195
0
  if (ret)
2196
0
    goto done;
2197
2198
0
  ret = reftable_stack_init_log_iterator(be->stack, &it);
2199
0
  if (ret < 0)
2200
0
    goto done;
2201
2202
0
  ret = reftable_iterator_seek_log(&it, refname);
2203
0
  while (!ret) {
2204
0
    ret = reftable_iterator_next_log(&it, &log);
2205
0
    if (ret < 0)
2206
0
      break;
2207
0
    if (ret > 0 || strcmp(log.refname, refname)) {
2208
0
      ret = 0;
2209
0
      break;
2210
0
    }
2211
2212
0
    ret = yield_log_record(refs, &log, fn, cb_data);
2213
0
    if (ret)
2214
0
      break;
2215
0
  }
2216
2217
0
done:
2218
0
  reftable_log_record_release(&log);
2219
0
  reftable_iterator_destroy(&it);
2220
0
  return ret;
2221
0
}
2222
2223
static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store,
2224
             const char *refname,
2225
             each_reflog_ent_fn fn,
2226
             void *cb_data)
2227
0
{
2228
0
  struct reftable_ref_store *refs =
2229
0
    reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent");
2230
0
  struct reftable_log_record *logs = NULL;
2231
0
  struct reftable_iterator it = {0};
2232
0
  struct reftable_backend *be;
2233
0
  size_t logs_alloc = 0, logs_nr = 0, i;
2234
0
  int ret;
2235
2236
0
  if (refs->err < 0)
2237
0
    return refs->err;
2238
2239
  /*
2240
   * TODO: we should adapt this callsite to reload the stack. There is no
2241
   * obvious reason why we shouldn't.
2242
   */
2243
0
  ret = backend_for(&be, refs, refname, &refname, 0);
2244
0
  if (ret)
2245
0
    goto done;
2246
2247
0
  ret = reftable_stack_init_log_iterator(be->stack, &it);
2248
0
  if (ret < 0)
2249
0
    goto done;
2250
2251
0
  ret = reftable_iterator_seek_log(&it, refname);
2252
0
  while (!ret) {
2253
0
    struct reftable_log_record log = {0};
2254
2255
0
    ret = reftable_iterator_next_log(&it, &log);
2256
0
    if (ret < 0)
2257
0
      goto done;
2258
0
    if (ret > 0 || strcmp(log.refname, refname)) {
2259
0
      reftable_log_record_release(&log);
2260
0
      ret = 0;
2261
0
      break;
2262
0
    }
2263
2264
0
    ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
2265
0
    logs[logs_nr++] = log;
2266
0
  }
2267
2268
0
  for (i = logs_nr; i--;) {
2269
0
    ret = yield_log_record(refs, &logs[i], fn, cb_data);
2270
0
    if (ret)
2271
0
      goto done;
2272
0
  }
2273
2274
0
done:
2275
0
  reftable_iterator_destroy(&it);
2276
0
  for (i = 0; i < logs_nr; i++)
2277
0
    reftable_log_record_release(&logs[i]);
2278
0
  free(logs);
2279
0
  return ret;
2280
0
}
2281
2282
static int reftable_be_reflog_exists(struct ref_store *ref_store,
2283
             const char *refname)
2284
0
{
2285
0
  struct reftable_ref_store *refs =
2286
0
    reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists");
2287
0
  struct reftable_log_record log = {0};
2288
0
  struct reftable_iterator it = {0};
2289
0
  struct reftable_backend *be;
2290
0
  int ret;
2291
2292
0
  ret = refs->err;
2293
0
  if (ret < 0)
2294
0
    goto done;
2295
2296
0
  ret = backend_for(&be, refs, refname, &refname, 1);
2297
0
  if (ret < 0)
2298
0
    goto done;
2299
2300
0
  ret = reftable_stack_init_log_iterator(be->stack, &it);
2301
0
  if (ret < 0)
2302
0
    goto done;
2303
2304
0
  ret = reftable_iterator_seek_log(&it, refname);
2305
0
  if (ret < 0)
2306
0
    goto done;
2307
2308
  /*
2309
   * Check whether we get at least one log record for the given ref name.
2310
   * If so, the reflog exists, otherwise it doesn't.
2311
   */
2312
0
  ret = reftable_iterator_next_log(&it, &log);
2313
0
  if (ret < 0)
2314
0
    goto done;
2315
0
  if (ret > 0) {
2316
0
    ret = 0;
2317
0
    goto done;
2318
0
  }
2319
2320
0
  ret = strcmp(log.refname, refname) == 0;
2321
2322
0
done:
2323
0
  reftable_iterator_destroy(&it);
2324
0
  reftable_log_record_release(&log);
2325
0
  if (ret < 0)
2326
0
    ret = 0;
2327
0
  return ret;
2328
0
}
2329
2330
struct write_reflog_existence_arg {
2331
  struct reftable_ref_store *refs;
2332
  const char *refname;
2333
  struct reftable_stack *stack;
2334
};
2335
2336
static int write_reflog_existence_table(struct reftable_writer *writer,
2337
          void *cb_data)
2338
0
{
2339
0
  struct write_reflog_existence_arg *arg = cb_data;
2340
0
  uint64_t ts = reftable_stack_next_update_index(arg->stack);
2341
0
  struct reftable_log_record log = {0};
2342
0
  int ret;
2343
2344
0
  ret = reftable_stack_read_log(arg->stack, arg->refname, &log);
2345
0
  if (ret <= 0)
2346
0
    goto done;
2347
2348
0
  ret = reftable_writer_set_limits(writer, ts, ts);
2349
0
  if (ret < 0)
2350
0
    goto done;
2351
2352
  /*
2353
   * The existence entry has both old and new object ID set to the
2354
   * null object ID. Our iterators are aware of this and will not present
2355
   * them to their callers.
2356
   */
2357
0
  log.refname = xstrdup(arg->refname);
2358
0
  log.update_index = ts;
2359
0
  log.value_type = REFTABLE_LOG_UPDATE;
2360
0
  ret = reftable_writer_add_log(writer, &log);
2361
2362
0
done:
2363
0
  assert(ret != REFTABLE_API_ERROR);
2364
0
  reftable_log_record_release(&log);
2365
0
  return ret;
2366
0
}
2367
2368
static int reftable_be_create_reflog(struct ref_store *ref_store,
2369
             const char *refname,
2370
             struct strbuf *errmsg UNUSED)
2371
0
{
2372
0
  struct reftable_ref_store *refs =
2373
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
2374
0
  struct reftable_backend *be;
2375
0
  struct write_reflog_existence_arg arg = {
2376
0
    .refs = refs,
2377
0
    .refname = refname,
2378
0
  };
2379
0
  int ret;
2380
2381
0
  ret = refs->err;
2382
0
  if (ret < 0)
2383
0
    goto done;
2384
2385
0
  ret = backend_for(&be, refs, refname, &refname, 1);
2386
0
  if (ret)
2387
0
    goto done;
2388
0
  arg.stack = be->stack;
2389
2390
0
  ret = reftable_stack_add(be->stack, &write_reflog_existence_table, &arg,
2391
0
         REFTABLE_STACK_NEW_ADDITION_RELOAD);
2392
2393
0
done:
2394
0
  return ret;
2395
0
}
2396
2397
struct write_reflog_delete_arg {
2398
  struct reftable_stack *stack;
2399
  const char *refname;
2400
};
2401
2402
static int write_reflog_delete_table(struct reftable_writer *writer, void *cb_data)
2403
0
{
2404
0
  struct write_reflog_delete_arg *arg = cb_data;
2405
0
  struct reftable_log_record log = {0}, tombstone = {0};
2406
0
  struct reftable_iterator it = {0};
2407
0
  uint64_t ts = reftable_stack_next_update_index(arg->stack);
2408
0
  int ret;
2409
2410
0
  ret = reftable_writer_set_limits(writer, ts, ts);
2411
0
  if (ret < 0)
2412
0
    goto out;
2413
2414
0
  ret = reftable_stack_init_log_iterator(arg->stack, &it);
2415
0
  if (ret < 0)
2416
0
    goto out;
2417
2418
  /*
2419
   * In order to delete a table we need to delete all reflog entries one
2420
   * by one. This is inefficient, but the reftable format does not have a
2421
   * better marker right now.
2422
   */
2423
0
  ret = reftable_iterator_seek_log(&it, arg->refname);
2424
0
  while (ret == 0) {
2425
0
    ret = reftable_iterator_next_log(&it, &log);
2426
0
    if (ret < 0)
2427
0
      break;
2428
0
    if (ret > 0 || strcmp(log.refname, arg->refname)) {
2429
0
      ret = 0;
2430
0
      break;
2431
0
    }
2432
2433
0
    tombstone.refname = (char *)arg->refname;
2434
0
    tombstone.value_type = REFTABLE_LOG_DELETION;
2435
0
    tombstone.update_index = log.update_index;
2436
2437
0
    ret = reftable_writer_add_log(writer, &tombstone);
2438
0
  }
2439
2440
0
out:
2441
0
  reftable_log_record_release(&log);
2442
0
  reftable_iterator_destroy(&it);
2443
0
  return ret;
2444
0
}
2445
2446
static int reftable_be_delete_reflog(struct ref_store *ref_store,
2447
             const char *refname)
2448
0
{
2449
0
  struct reftable_ref_store *refs =
2450
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
2451
0
  struct reftable_backend *be;
2452
0
  struct write_reflog_delete_arg arg = {
2453
0
    .refname = refname,
2454
0
  };
2455
0
  int ret;
2456
2457
0
  ret = backend_for(&be, refs, refname, &refname, 1);
2458
0
  if (ret)
2459
0
    return ret;
2460
0
  arg.stack = be->stack;
2461
2462
0
  ret = reftable_stack_add(be->stack, &write_reflog_delete_table, &arg,
2463
0
         REFTABLE_STACK_NEW_ADDITION_RELOAD);
2464
2465
0
  assert(ret != REFTABLE_API_ERROR);
2466
0
  return ret;
2467
0
}
2468
2469
struct reflog_expiry_arg {
2470
  struct reftable_ref_store *refs;
2471
  struct reftable_stack *stack;
2472
  struct reftable_log_record *records;
2473
  struct object_id update_oid;
2474
  const char *refname;
2475
  size_t len;
2476
};
2477
2478
static int write_reflog_expiry_table(struct reftable_writer *writer, void *cb_data)
2479
0
{
2480
0
  struct reflog_expiry_arg *arg = cb_data;
2481
0
  uint64_t ts = reftable_stack_next_update_index(arg->stack);
2482
0
  uint64_t live_records = 0;
2483
0
  size_t i;
2484
0
  int ret;
2485
2486
0
  for (i = 0; i < arg->len; i++)
2487
0
    if (arg->records[i].value_type == REFTABLE_LOG_UPDATE)
2488
0
      live_records++;
2489
2490
0
  ret = reftable_writer_set_limits(writer, ts, ts);
2491
0
  if (ret < 0)
2492
0
    return ret;
2493
2494
0
  if (!is_null_oid(&arg->update_oid)) {
2495
0
    struct reftable_ref_record ref = {0};
2496
0
    struct object_id peeled;
2497
2498
0
    ref.refname = (char *)arg->refname;
2499
0
    ref.update_index = ts;
2500
2501
0
    if (!peel_object(arg->refs->base.repo, &arg->update_oid, &peeled, 0)) {
2502
0
      ref.value_type = REFTABLE_REF_VAL2;
2503
0
      memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
2504
0
      memcpy(ref.value.val2.value, arg->update_oid.hash, GIT_MAX_RAWSZ);
2505
0
    } else {
2506
0
      ref.value_type = REFTABLE_REF_VAL1;
2507
0
      memcpy(ref.value.val1, arg->update_oid.hash, GIT_MAX_RAWSZ);
2508
0
    }
2509
2510
0
    ret = reftable_writer_add_ref(writer, &ref);
2511
0
    if (ret < 0)
2512
0
      return ret;
2513
0
  }
2514
2515
  /*
2516
   * When there are no more entries left in the reflog we empty it
2517
   * completely, but write a placeholder reflog entry that indicates that
2518
   * the reflog still exists.
2519
   */
2520
0
  if (!live_records) {
2521
0
    struct reftable_log_record log = {
2522
0
      .refname = (char *)arg->refname,
2523
0
      .value_type = REFTABLE_LOG_UPDATE,
2524
0
      .update_index = ts,
2525
0
    };
2526
2527
0
    ret = reftable_writer_add_log(writer, &log);
2528
0
    if (ret)
2529
0
      return ret;
2530
0
  }
2531
2532
0
  for (i = 0; i < arg->len; i++) {
2533
0
    ret = reftable_writer_add_log(writer, &arg->records[i]);
2534
0
    if (ret)
2535
0
      return ret;
2536
0
  }
2537
2538
0
  return 0;
2539
0
}
2540
2541
static int reftable_be_reflog_expire(struct ref_store *ref_store,
2542
             const char *refname,
2543
             unsigned int flags,
2544
             reflog_expiry_prepare_fn prepare_fn,
2545
             reflog_expiry_should_prune_fn should_prune_fn,
2546
             reflog_expiry_cleanup_fn cleanup_fn,
2547
             void *policy_cb_data)
2548
0
{
2549
  /*
2550
   * For log expiry, we write tombstones for every single reflog entry
2551
   * that is to be expired. This means that the entries are still
2552
   * retrievable by delving into the stack, and expiring entries
2553
   * paradoxically takes extra memory. This memory is only reclaimed when
2554
   * compacting the reftable stack.
2555
   *
2556
   * It would be better if the refs backend supported an API that sets a
2557
   * criterion for all refs, passing the criterion to pack_refs().
2558
   *
2559
   * On the plus side, because we do the expiration per ref, we can easily
2560
   * insert the reflog existence dummies.
2561
   */
2562
0
  struct reftable_ref_store *refs =
2563
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
2564
0
  struct reftable_log_record *logs = NULL;
2565
0
  struct reftable_log_record *rewritten = NULL;
2566
0
  struct reftable_iterator it = {0};
2567
0
  struct reftable_addition *add = NULL;
2568
0
  struct reflog_expiry_arg arg = {0};
2569
0
  struct reftable_backend *be;
2570
0
  struct object_id oid = {0};
2571
0
  struct strbuf referent = STRBUF_INIT;
2572
0
  uint8_t *last_hash = NULL;
2573
0
  size_t logs_nr = 0, logs_alloc = 0, i;
2574
0
  unsigned int type = 0;
2575
0
  int ret;
2576
2577
0
  if (refs->err < 0)
2578
0
    return refs->err;
2579
2580
0
  ret = backend_for(&be, refs, refname, &refname, 1);
2581
0
  if (ret < 0)
2582
0
    goto done;
2583
2584
0
  ret = reftable_stack_new_addition(&add, be->stack,
2585
0
            REFTABLE_STACK_NEW_ADDITION_RELOAD);
2586
0
  if (ret < 0)
2587
0
    goto done;
2588
2589
0
  ret = reftable_stack_init_log_iterator(be->stack, &it);
2590
0
  if (ret < 0)
2591
0
    goto done;
2592
2593
0
  ret = reftable_iterator_seek_log(&it, refname);
2594
0
  if (ret < 0)
2595
0
    goto done;
2596
2597
0
  ret = reftable_backend_read_ref(be, refname, &oid, &referent, &type);
2598
0
  if (ret < 0)
2599
0
    goto done;
2600
0
  prepare_fn(refname, &oid, policy_cb_data);
2601
2602
0
  while (1) {
2603
0
    struct reftable_log_record log = {0};
2604
0
    struct object_id old_oid, new_oid;
2605
2606
0
    ret = reftable_iterator_next_log(&it, &log);
2607
0
    if (ret < 0)
2608
0
      goto done;
2609
0
    if (ret > 0 || strcmp(log.refname, refname)) {
2610
0
      reftable_log_record_release(&log);
2611
0
      break;
2612
0
    }
2613
2614
0
    oidread(&old_oid, log.value.update.old_hash,
2615
0
      ref_store->repo->hash_algo);
2616
0
    oidread(&new_oid, log.value.update.new_hash,
2617
0
      ref_store->repo->hash_algo);
2618
2619
    /*
2620
     * Skip over the reflog existence marker. We will add it back
2621
     * in when there are no live reflog records.
2622
     */
2623
0
    if (is_null_oid(&old_oid) && is_null_oid(&new_oid)) {
2624
0
      reftable_log_record_release(&log);
2625
0
      continue;
2626
0
    }
2627
2628
0
    ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
2629
0
    logs[logs_nr++] = log;
2630
0
  }
2631
2632
  /*
2633
   * We need to rewrite all reflog entries according to the pruning
2634
   * callback function:
2635
   *
2636
   *   - If a reflog entry shall be pruned we mark the record for
2637
   *     deletion.
2638
   *
2639
   *   - Otherwise we may have to rewrite the chain of reflog entries so
2640
   *     that gaps created by just-deleted records get backfilled.
2641
   */
2642
0
  CALLOC_ARRAY(rewritten, logs_nr);
2643
0
  for (i = logs_nr; i--;) {
2644
0
    struct reftable_log_record *dest = &rewritten[i];
2645
0
    struct object_id old_oid, new_oid;
2646
2647
0
    *dest = logs[i];
2648
0
    oidread(&old_oid, logs[i].value.update.old_hash,
2649
0
      ref_store->repo->hash_algo);
2650
0
    oidread(&new_oid, logs[i].value.update.new_hash,
2651
0
      ref_store->repo->hash_algo);
2652
2653
0
    if (should_prune_fn(&old_oid, &new_oid, logs[i].value.update.email,
2654
0
            (timestamp_t)logs[i].value.update.time,
2655
0
            logs[i].value.update.tz_offset,
2656
0
            logs[i].value.update.message,
2657
0
            policy_cb_data)) {
2658
0
      dest->value_type = REFTABLE_LOG_DELETION;
2659
0
    } else {
2660
0
      if ((flags & EXPIRE_REFLOGS_REWRITE) && last_hash)
2661
0
        memcpy(dest->value.update.old_hash, last_hash, GIT_MAX_RAWSZ);
2662
0
      last_hash = logs[i].value.update.new_hash;
2663
0
    }
2664
0
  }
2665
2666
0
  if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash && !is_null_oid(&oid))
2667
0
    oidread(&arg.update_oid, last_hash, ref_store->repo->hash_algo);
2668
2669
0
  arg.refs = refs;
2670
0
  arg.records = rewritten;
2671
0
  arg.len = logs_nr;
2672
0
  arg.stack = be->stack;
2673
0
  arg.refname = refname;
2674
2675
0
  ret = reftable_addition_add(add, &write_reflog_expiry_table, &arg);
2676
0
  if (ret < 0)
2677
0
    goto done;
2678
2679
  /*
2680
   * Future improvement: we could skip writing records that were
2681
   * not changed.
2682
   */
2683
0
  if (!(flags & EXPIRE_REFLOGS_DRY_RUN))
2684
0
    ret = reftable_addition_commit(add);
2685
2686
0
done:
2687
0
  if (add)
2688
0
    cleanup_fn(policy_cb_data);
2689
0
  assert(ret != REFTABLE_API_ERROR);
2690
2691
0
  reftable_iterator_destroy(&it);
2692
0
  reftable_addition_destroy(add);
2693
0
  for (i = 0; i < logs_nr; i++)
2694
0
    reftable_log_record_release(&logs[i]);
2695
0
  strbuf_release(&referent);
2696
0
  free(logs);
2697
0
  free(rewritten);
2698
0
  return ret;
2699
0
}
2700
2701
static void reftable_fsck_verbose_handler(const char *msg, void *cb_data)
2702
0
{
2703
0
  struct fsck_options *o = cb_data;
2704
2705
0
  if (o->verbose)
2706
0
    fprintf_ln(stderr, "%s", msg);
2707
0
}
2708
2709
static const enum fsck_msg_id fsck_msg_id_map[] = {
2710
  [REFTABLE_FSCK_ERROR_TABLE_NAME] = FSCK_MSG_BAD_REFTABLE_TABLE_NAME,
2711
};
2712
2713
static int reftable_fsck_error_handler(struct reftable_fsck_info *info,
2714
               void *cb_data)
2715
0
{
2716
0
  struct fsck_ref_report report = { .path = info->path };
2717
0
  struct fsck_options *o = cb_data;
2718
0
  enum fsck_msg_id msg_id;
2719
2720
0
  if (info->error < 0 || info->error >= REFTABLE_FSCK_MAX_VALUE)
2721
0
    BUG("unknown fsck error: %d", (int)info->error);
2722
2723
0
  msg_id = fsck_msg_id_map[info->error];
2724
2725
0
  if (!msg_id)
2726
0
    BUG("fsck_msg_id value missing for reftable error: %d", (int)info->error);
2727
2728
0
  return fsck_report_ref(o, &report, msg_id, "%s", info->msg);
2729
0
}
2730
2731
static int reftable_be_fsck(struct ref_store *ref_store, struct fsck_options *o,
2732
          struct worktree *wt)
2733
0
{
2734
0
  struct reftable_ref_store *refs =
2735
0
    reftable_be_downcast(ref_store, REF_STORE_READ, "fsck");
2736
0
  struct reftable_ref_iterator *iter = NULL;
2737
0
  struct reftable_ref_record ref = { 0 };
2738
0
  struct fsck_ref_report report = { 0 };
2739
0
  struct strbuf refname = STRBUF_INIT;
2740
0
  struct reftable_backend *backend;
2741
0
  int ret, errors = 0;
2742
2743
0
  if (is_main_worktree(wt)) {
2744
0
    backend = &refs->main_backend;
2745
0
  } else {
2746
0
    ret = backend_for_worktree(&backend, refs, wt->id);
2747
0
    if (ret < 0) {
2748
0
      ret = error(_("reftable stack for worktree '%s' is broken"),
2749
0
            wt->id);
2750
0
      goto out;
2751
0
    }
2752
0
  }
2753
2754
0
  errors |= reftable_fsck_check(backend->stack, reftable_fsck_error_handler,
2755
0
              reftable_fsck_verbose_handler, o);
2756
2757
0
  iter = ref_iterator_for_stack(refs, backend->stack, "", NULL, 0);
2758
0
  if (!iter) {
2759
0
    ret = error(_("could not create iterator for worktree '%s'"), wt->id);
2760
0
    goto out;
2761
0
  }
2762
2763
0
  while (1) {
2764
0
    ret = reftable_iterator_next_ref(&iter->iter, &ref);
2765
0
    if (ret > 0)
2766
0
      break;
2767
0
    if (ret < 0) {
2768
0
      ret = error(_("could not read record for worktree '%s'"), wt->id);
2769
0
      goto out;
2770
0
    }
2771
2772
0
    strbuf_reset(&refname);
2773
0
    if (!is_main_worktree(wt))
2774
0
      strbuf_addf(&refname, "worktrees/%s/", wt->id);
2775
0
    strbuf_addstr(&refname, ref.refname);
2776
0
    report.path = refname.buf;
2777
2778
0
    switch (ref.value_type) {
2779
0
    case REFTABLE_REF_VAL1:
2780
0
    case REFTABLE_REF_VAL2: {
2781
0
      struct object_id oid;
2782
0
      unsigned hash_id;
2783
2784
0
      switch (reftable_stack_hash_id(backend->stack)) {
2785
0
      case REFTABLE_HASH_SHA1:
2786
0
        hash_id = GIT_HASH_SHA1;
2787
0
        break;
2788
0
      case REFTABLE_HASH_SHA256:
2789
0
        hash_id = GIT_HASH_SHA256;
2790
0
        break;
2791
0
      default:
2792
0
        BUG("unhandled hash ID %d",
2793
0
            reftable_stack_hash_id(backend->stack));
2794
0
      }
2795
2796
0
      oidread(&oid, reftable_ref_record_val1(&ref),
2797
0
        &hash_algos[hash_id]);
2798
2799
0
      errors |= refs_fsck_ref(ref_store, o, &report, ref.refname, &oid);
2800
0
      break;
2801
0
    }
2802
0
    case REFTABLE_REF_SYMREF:
2803
0
      errors |= refs_fsck_symref(ref_store, o, &report, ref.refname,
2804
0
               ref.value.symref);
2805
0
      break;
2806
0
    default:
2807
0
      BUG("unhandled reference value type %d", ref.value_type);
2808
0
    }
2809
0
  }
2810
2811
0
  ret = errors ? -1 : 0;
2812
2813
0
out:
2814
0
  if (iter)
2815
0
    ref_iterator_free(&iter->base);
2816
0
  reftable_ref_record_release(&ref);
2817
0
  strbuf_release(&refname);
2818
0
  return ret;
2819
0
}
2820
2821
struct ref_storage_be refs_be_reftable = {
2822
  .name = "reftable",
2823
  .init = reftable_be_init,
2824
  .release = reftable_be_release,
2825
  .create_on_disk = reftable_be_create_on_disk,
2826
  .remove_on_disk = reftable_be_remove_on_disk,
2827
2828
  .transaction_prepare = reftable_be_transaction_prepare,
2829
  .transaction_finish = reftable_be_transaction_finish,
2830
  .transaction_abort = reftable_be_transaction_abort,
2831
2832
  .optimize = reftable_be_optimize,
2833
  .optimize_required = reftable_be_optimize_required,
2834
2835
  .rename_ref = reftable_be_rename_ref,
2836
  .copy_ref = reftable_be_copy_ref,
2837
2838
  .iterator_begin = reftable_be_iterator_begin,
2839
  .read_raw_ref = reftable_be_read_raw_ref,
2840
  .read_symbolic_ref = reftable_be_read_symbolic_ref,
2841
2842
  .reflog_iterator_begin = reftable_be_reflog_iterator_begin,
2843
  .for_each_reflog_ent = reftable_be_for_each_reflog_ent,
2844
  .for_each_reflog_ent_reverse = reftable_be_for_each_reflog_ent_reverse,
2845
  .reflog_exists = reftable_be_reflog_exists,
2846
  .create_reflog = reftable_be_create_reflog,
2847
  .delete_reflog = reftable_be_delete_reflog,
2848
  .reflog_expire = reftable_be_reflog_expire,
2849
2850
  .fsck = reftable_be_fsck,
2851
};