Coverage Report

Created: 2026-01-09 07:10

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/git/refs/reftable-backend.c
Line
Count
Source
1
#define USE_THE_REPOSITORY_VARIABLE
2
3
#include "../git-compat-util.h"
4
#include "../abspath.h"
5
#include "../chdir-notify.h"
6
#include "../config.h"
7
#include "../dir.h"
8
#include "../environment.h"
9
#include "../fsck.h"
10
#include "../gettext.h"
11
#include "../hash.h"
12
#include "../hex.h"
13
#include "../iterator.h"
14
#include "../ident.h"
15
#include "../object.h"
16
#include "../path.h"
17
#include "../refs.h"
18
#include "../reftable/reftable-basics.h"
19
#include "../reftable/reftable-error.h"
20
#include "../reftable/reftable-fsck.h"
21
#include "../reftable/reftable-iterator.h"
22
#include "../reftable/reftable-record.h"
23
#include "../reftable/reftable-stack.h"
24
#include "../repo-settings.h"
25
#include "../setup.h"
26
#include "../strmap.h"
27
#include "../trace2.h"
28
#include "../write-or-die.h"
29
#include "parse.h"
30
#include "refs-internal.h"
31
32
/*
33
 * Used as a flag in ref_update::flags when the ref_update was via an
34
 * update to HEAD.
35
 */
36
0
#define REF_UPDATE_VIA_HEAD (1 << 8)
37
38
struct reftable_backend {
39
  struct reftable_stack *stack;
40
  struct reftable_iterator it;
41
};
42
43
static void reftable_backend_on_reload(void *payload)
44
0
{
45
0
  struct reftable_backend *be = payload;
46
0
  reftable_iterator_destroy(&be->it);
47
0
}
48
49
static int reftable_backend_init(struct reftable_backend *be,
50
         const char *path,
51
         const struct reftable_write_options *_opts)
52
0
{
53
0
  struct reftable_write_options opts = *_opts;
54
0
  opts.on_reload = reftable_backend_on_reload;
55
0
  opts.on_reload_payload = be;
56
0
  return reftable_new_stack(&be->stack, path, &opts);
57
0
}
58
59
static void reftable_backend_release(struct reftable_backend *be)
60
0
{
61
0
  reftable_stack_destroy(be->stack);
62
0
  be->stack = NULL;
63
0
  reftable_iterator_destroy(&be->it);
64
0
}
65
66
static int reftable_backend_read_ref(struct reftable_backend *be,
67
             const char *refname,
68
             struct object_id *oid,
69
             struct strbuf *referent,
70
             unsigned int *type)
71
0
{
72
0
  struct reftable_ref_record ref = {0};
73
0
  int ret;
74
75
0
  if (!be->it.ops) {
76
0
    ret = reftable_stack_init_ref_iterator(be->stack, &be->it);
77
0
    if (ret)
78
0
      goto done;
79
0
  }
80
81
0
  ret = reftable_iterator_seek_ref(&be->it, refname);
82
0
  if (ret)
83
0
    goto done;
84
85
0
  ret = reftable_iterator_next_ref(&be->it, &ref);
86
0
  if (ret)
87
0
    goto done;
88
89
0
  if (strcmp(ref.refname, refname)) {
90
0
    ret = 1;
91
0
    goto done;
92
0
  }
93
94
0
  if (ref.value_type == REFTABLE_REF_SYMREF) {
95
0
    strbuf_reset(referent);
96
0
    strbuf_addstr(referent, ref.value.symref);
97
0
    *type |= REF_ISSYMREF;
98
0
  } else if (reftable_ref_record_val1(&ref)) {
99
0
    unsigned int hash_id;
100
101
0
    switch (reftable_stack_hash_id(be->stack)) {
102
0
    case REFTABLE_HASH_SHA1:
103
0
      hash_id = GIT_HASH_SHA1;
104
0
      break;
105
0
    case REFTABLE_HASH_SHA256:
106
0
      hash_id = GIT_HASH_SHA256;
107
0
      break;
108
0
    default:
109
0
      BUG("unhandled hash ID %d", reftable_stack_hash_id(be->stack));
110
0
    }
111
112
0
    oidread(oid, reftable_ref_record_val1(&ref),
113
0
      &hash_algos[hash_id]);
114
0
  } else {
115
    /* We got a tombstone, which should not happen. */
116
0
    BUG("unhandled reference value type %d", ref.value_type);
117
0
  }
118
119
0
done:
120
0
  assert(ret != REFTABLE_API_ERROR);
121
0
  reftable_ref_record_release(&ref);
122
0
  return ret;
123
0
}
124
125
struct reftable_ref_store {
126
  struct ref_store base;
127
128
  /*
129
   * The main backend refers to the common dir and thus contains common
130
   * refs as well as refs of the main repository.
131
   */
132
  struct reftable_backend main_backend;
133
  /*
134
   * The worktree backend refers to the gitdir in case the refdb is opened
135
   * via a worktree. It thus contains the per-worktree refs.
136
   */
137
  struct reftable_backend worktree_backend;
138
  /*
139
   * Map of worktree backends by their respective worktree names. The map
140
   * is populated lazily when we try to resolve `worktrees/$worktree` refs.
141
   */
142
  struct strmap worktree_backends;
143
  struct reftable_write_options write_options;
144
145
  unsigned int store_flags;
146
  enum log_refs_config log_all_ref_updates;
147
  int err;
148
};
149
150
/*
151
 * Downcast ref_store to reftable_ref_store. Die if ref_store is not a
152
 * reftable_ref_store. required_flags is compared with ref_store's store_flags
153
 * to ensure the ref_store has all required capabilities. "caller" is used in
154
 * any necessary error messages.
155
 */
156
static struct reftable_ref_store *reftable_be_downcast(struct ref_store *ref_store,
157
                   unsigned int required_flags,
158
                   const char *caller)
159
0
{
160
0
  struct reftable_ref_store *refs;
161
162
0
  if (ref_store->be != &refs_be_reftable)
163
0
    BUG("ref_store is type \"%s\" not \"reftables\" in %s",
164
0
        ref_store->be->name, caller);
165
166
0
  refs = (struct reftable_ref_store *)ref_store;
167
168
0
  if ((refs->store_flags & required_flags) != required_flags)
169
0
    BUG("operation %s requires abilities 0x%x, but only have 0x%x",
170
0
        caller, required_flags, refs->store_flags);
171
172
0
  return refs;
173
0
}
174
175
/*
176
 * Some refs are global to the repository (refs/heads/{*}), while others are
177
 * local to the worktree (eg. HEAD, refs/bisect/{*}). We solve this by having
178
 * multiple separate databases (ie. multiple reftable/ directories), one for
179
 * the shared refs, one for the current worktree refs, and one for each
180
 * additional worktree. For reading, we merge the view of both the shared and
181
 * the current worktree's refs, when necessary.
182
 *
183
 * This function also optionally assigns the rewritten reference name that is
184
 * local to the stack. This translation is required when using worktree refs
185
 * like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
186
 * those references in their normalized form.
187
 */
188
static int backend_for(struct reftable_backend **out,
189
           struct reftable_ref_store *store,
190
           const char *refname,
191
           const char **rewritten_ref,
192
           int reload)
193
0
{
194
0
  struct reftable_backend *be;
195
0
  const char *wtname;
196
0
  int wtname_len;
197
198
0
  if (!refname) {
199
0
    be = &store->main_backend;
200
0
    goto out;
201
0
  }
202
203
0
  switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) {
204
0
  case REF_WORKTREE_OTHER: {
205
0
    static struct strbuf wtname_buf = STRBUF_INIT;
206
0
    struct strbuf wt_dir = STRBUF_INIT;
207
208
    /*
209
     * We're using a static buffer here so that we don't need to
210
     * allocate the worktree name whenever we look up a reference.
211
     * This could be avoided if the strmap interface knew how to
212
     * handle keys with a length.
213
     */
214
0
    strbuf_reset(&wtname_buf);
215
0
    strbuf_add(&wtname_buf, wtname, wtname_len);
216
217
    /*
218
     * There is an edge case here: when the worktree references the
219
     * current worktree, then we set up the stack once via
220
     * `worktree_backends` and once via `worktree_backend`. This is
221
     * wasteful, but in the reading case it shouldn't matter. And
222
     * in the writing case we would notice that the stack is locked
223
     * already and error out when trying to write a reference via
224
     * both stacks.
225
     */
226
0
    be = strmap_get(&store->worktree_backends, wtname_buf.buf);
227
0
    if (!be) {
228
0
      strbuf_addf(&wt_dir, "%s/worktrees/%s/reftable",
229
0
            store->base.repo->commondir, wtname_buf.buf);
230
231
0
      CALLOC_ARRAY(be, 1);
232
0
      store->err = reftable_backend_init(be, wt_dir.buf,
233
0
                 &store->write_options);
234
0
      assert(store->err != REFTABLE_API_ERROR);
235
236
0
      strmap_put(&store->worktree_backends, wtname_buf.buf, be);
237
0
    }
238
239
0
    strbuf_release(&wt_dir);
240
0
    goto out;
241
0
  }
242
0
  case REF_WORKTREE_CURRENT:
243
    /*
244
     * If there is no worktree stack then we're currently in the
245
     * main worktree. We thus return the main stack in that case.
246
     */
247
0
    if (!store->worktree_backend.stack)
248
0
      be = &store->main_backend;
249
0
    else
250
0
      be = &store->worktree_backend;
251
0
    goto out;
252
0
  case REF_WORKTREE_MAIN:
253
0
  case REF_WORKTREE_SHARED:
254
0
    be = &store->main_backend;
255
0
    goto out;
256
0
  default:
257
0
    BUG("unhandled worktree reference type");
258
0
  }
259
260
0
out:
261
0
  if (reload) {
262
0
    int ret = reftable_stack_reload(be->stack);
263
0
    if (ret)
264
0
      return ret;
265
0
  }
266
0
  *out = be;
267
268
0
  return 0;
269
0
}
270
271
static int should_write_log(struct reftable_ref_store *refs, const char *refname)
272
0
{
273
0
  enum log_refs_config log_refs_cfg = refs->log_all_ref_updates;
274
0
  if (log_refs_cfg == LOG_REFS_UNSET)
275
0
    log_refs_cfg = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
276
277
0
  switch (log_refs_cfg) {
278
0
  case LOG_REFS_NONE:
279
0
    return refs_reflog_exists(&refs->base, refname);
280
0
  case LOG_REFS_ALWAYS:
281
0
    return 1;
282
0
  case LOG_REFS_NORMAL:
283
0
    if (should_autocreate_reflog(log_refs_cfg, refname))
284
0
      return 1;
285
0
    return refs_reflog_exists(&refs->base, refname);
286
0
  default:
287
0
    BUG("unhandled core.logAllRefUpdates value %d", log_refs_cfg);
288
0
  }
289
0
}
290
291
static void fill_reftable_log_record(struct reftable_log_record *log, const struct ident_split *split)
292
0
{
293
0
  const char *tz_begin;
294
0
  int sign = 1;
295
296
0
  reftable_log_record_release(log);
297
0
  log->value_type = REFTABLE_LOG_UPDATE;
298
0
  log->value.update.name =
299
0
    xstrndup(split->name_begin, split->name_end - split->name_begin);
300
0
  log->value.update.email =
301
0
    xstrndup(split->mail_begin, split->mail_end - split->mail_begin);
302
0
  log->value.update.time = atol(split->date_begin);
303
304
0
  tz_begin = split->tz_begin;
305
0
  if (*tz_begin == '-') {
306
0
    sign = -1;
307
0
    tz_begin++;
308
0
  }
309
0
  if (*tz_begin == '+') {
310
0
    sign = 1;
311
0
    tz_begin++;
312
0
  }
313
314
0
  log->value.update.tz_offset = sign * atoi(tz_begin);
315
0
}
316
317
static int reftable_be_config(const char *var, const char *value,
318
            const struct config_context *ctx,
319
            void *_opts)
320
0
{
321
0
  struct reftable_write_options *opts = _opts;
322
323
0
  if (!strcmp(var, "reftable.blocksize")) {
324
0
    unsigned long block_size = git_config_ulong(var, value, ctx->kvi);
325
0
    if (block_size > 16777215)
326
0
      die("reftable block size cannot exceed 16MB");
327
0
    opts->block_size = block_size;
328
0
  } else if (!strcmp(var, "reftable.restartinterval")) {
329
0
    unsigned long restart_interval = git_config_ulong(var, value, ctx->kvi);
330
0
    if (restart_interval > UINT16_MAX)
331
0
      die("reftable block size cannot exceed %u", (unsigned)UINT16_MAX);
332
0
    opts->restart_interval = restart_interval;
333
0
  } else if (!strcmp(var, "reftable.indexobjects")) {
334
0
    opts->skip_index_objects = !git_config_bool(var, value);
335
0
  } else if (!strcmp(var, "reftable.geometricfactor")) {
336
0
    unsigned long factor = git_config_ulong(var, value, ctx->kvi);
337
0
    if (factor > UINT8_MAX)
338
0
      die("reftable geometric factor cannot exceed %u", (unsigned)UINT8_MAX);
339
0
    opts->auto_compaction_factor = factor;
340
0
  } else if (!strcmp(var, "reftable.locktimeout")) {
341
0
    int64_t lock_timeout = git_config_int64(var, value, ctx->kvi);
342
0
    if (lock_timeout > LONG_MAX)
343
0
      die("reftable lock timeout cannot exceed %"PRIdMAX, (intmax_t)LONG_MAX);
344
0
    if (lock_timeout < 0 && lock_timeout != -1)
345
0
      die("reftable lock timeout does not support negative values other than -1");
346
0
    opts->lock_timeout_ms = lock_timeout;
347
0
  }
348
349
0
  return 0;
350
0
}
351
352
static int reftable_be_fsync(int fd)
353
0
{
354
0
  return fsync_component(FSYNC_COMPONENT_REFERENCE, fd);
355
0
}
356
357
static struct ref_store *reftable_be_init(struct repository *repo,
358
            const char *gitdir,
359
            unsigned int store_flags)
360
0
{
361
0
  struct reftable_ref_store *refs = xcalloc(1, sizeof(*refs));
362
0
  struct strbuf path = STRBUF_INIT;
363
0
  int is_worktree;
364
0
  mode_t mask;
365
366
0
  mask = umask(0);
367
0
  umask(mask);
368
369
0
  base_ref_store_init(&refs->base, repo, gitdir, &refs_be_reftable);
370
0
  strmap_init(&refs->worktree_backends);
371
0
  refs->store_flags = store_flags;
372
0
  refs->log_all_ref_updates = repo_settings_get_log_all_ref_updates(repo);
373
374
0
  switch (repo->hash_algo->format_id) {
375
0
  case GIT_SHA1_FORMAT_ID:
376
0
    refs->write_options.hash_id = REFTABLE_HASH_SHA1;
377
0
    break;
378
0
  case GIT_SHA256_FORMAT_ID:
379
0
    refs->write_options.hash_id = REFTABLE_HASH_SHA256;
380
0
    break;
381
0
  default:
382
0
    BUG("unknown hash algorithm %d", repo->hash_algo->format_id);
383
0
  }
384
0
  refs->write_options.default_permissions = calc_shared_perm(the_repository, 0666 & ~mask);
385
0
  refs->write_options.disable_auto_compact =
386
0
    !git_env_bool("GIT_TEST_REFTABLE_AUTOCOMPACTION", 1);
387
0
  refs->write_options.lock_timeout_ms = 100;
388
0
  refs->write_options.fsync = reftable_be_fsync;
389
390
0
  repo_config(the_repository, reftable_be_config, &refs->write_options);
391
392
  /*
393
   * It is somewhat unfortunate that we have to mirror the default block
394
   * size of the reftable library here. But given that the write options
395
   * wouldn't be updated by the library here, and given that we require
396
   * the proper block size to trim reflog message so that they fit, we
397
   * must set up a proper value here.
398
   */
399
0
  if (!refs->write_options.block_size)
400
0
    refs->write_options.block_size = 4096;
401
402
  /*
403
   * Set up the main reftable stack that is hosted in GIT_COMMON_DIR.
404
   * This stack contains both the shared and the main worktree refs.
405
   *
406
   * Note that we don't try to resolve the path in case we have a
407
   * worktree because `get_common_dir_noenv()` already does it for us.
408
   */
409
0
  is_worktree = get_common_dir_noenv(&path, gitdir);
410
0
  if (!is_worktree) {
411
0
    strbuf_reset(&path);
412
0
    strbuf_realpath(&path, gitdir, 0);
413
0
  }
414
0
  strbuf_addstr(&path, "/reftable");
415
0
  refs->err = reftable_backend_init(&refs->main_backend, path.buf,
416
0
            &refs->write_options);
417
0
  if (refs->err)
418
0
    goto done;
419
420
  /*
421
   * If we're in a worktree we also need to set up the worktree reftable
422
   * stack that is contained in the per-worktree GIT_DIR.
423
   *
424
   * Ideally, we would also add the stack to our worktree stack map. But
425
   * we have no way to figure out the worktree name here and thus can't
426
   * do it efficiently.
427
   */
428
0
  if (is_worktree) {
429
0
    strbuf_reset(&path);
430
0
    strbuf_addf(&path, "%s/reftable", gitdir);
431
432
0
    refs->err = reftable_backend_init(&refs->worktree_backend, path.buf,
433
0
              &refs->write_options);
434
0
    if (refs->err)
435
0
      goto done;
436
0
  }
437
438
0
  chdir_notify_reparent("reftables-backend $GIT_DIR", &refs->base.gitdir);
439
440
0
done:
441
0
  assert(refs->err != REFTABLE_API_ERROR);
442
0
  strbuf_release(&path);
443
0
  return &refs->base;
444
0
}
445
446
static void reftable_be_release(struct ref_store *ref_store)
447
0
{
448
0
  struct reftable_ref_store *refs = reftable_be_downcast(ref_store, 0, "release");
449
0
  struct strmap_entry *entry;
450
0
  struct hashmap_iter iter;
451
452
0
  if (refs->main_backend.stack)
453
0
    reftable_backend_release(&refs->main_backend);
454
0
  if (refs->worktree_backend.stack)
455
0
    reftable_backend_release(&refs->worktree_backend);
456
457
0
  strmap_for_each_entry(&refs->worktree_backends, &iter, entry) {
458
0
    struct reftable_backend *be = entry->value;
459
0
    reftable_backend_release(be);
460
0
    free(be);
461
0
  }
462
0
  strmap_clear(&refs->worktree_backends, 0);
463
0
}
464
465
static int reftable_be_create_on_disk(struct ref_store *ref_store,
466
              int flags UNUSED,
467
              struct strbuf *err UNUSED)
468
0
{
469
0
  struct reftable_ref_store *refs =
470
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE, "create");
471
0
  struct strbuf sb = STRBUF_INIT;
472
473
0
  strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
474
0
  safe_create_dir(the_repository, sb.buf, 1);
475
0
  strbuf_reset(&sb);
476
477
0
  strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir);
478
0
  write_file(sb.buf, "ref: refs/heads/.invalid");
479
0
  adjust_shared_perm(the_repository, sb.buf);
480
0
  strbuf_reset(&sb);
481
482
0
  strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
483
0
  safe_create_dir(the_repository, sb.buf, 1);
484
0
  strbuf_reset(&sb);
485
486
0
  strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir);
487
0
  write_file(sb.buf, "this repository uses the reftable format");
488
0
  adjust_shared_perm(the_repository, sb.buf);
489
490
0
  strbuf_release(&sb);
491
0
  return 0;
492
0
}
493
494
static int reftable_be_remove_on_disk(struct ref_store *ref_store,
495
              struct strbuf *err)
496
0
{
497
0
  struct reftable_ref_store *refs =
498
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE, "remove");
499
0
  struct strbuf sb = STRBUF_INIT;
500
0
  int ret = 0;
501
502
  /*
503
   * Release the ref store such that all stacks are closed. This is
504
   * required so that the "tables.list" file is not open anymore, which
505
   * would otherwise make it impossible to remove the file on Windows.
506
   */
507
0
  reftable_be_release(ref_store);
508
509
0
  strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
510
0
  if (remove_dir_recursively(&sb, 0) < 0) {
511
0
    strbuf_addf(err, "could not delete reftables: %s",
512
0
          strerror(errno));
513
0
    ret = -1;
514
0
  }
515
0
  strbuf_reset(&sb);
516
517
0
  strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir);
518
0
  if (unlink(sb.buf) < 0) {
519
0
    strbuf_addf(err, "could not delete stub HEAD: %s",
520
0
          strerror(errno));
521
0
    ret = -1;
522
0
  }
523
0
  strbuf_reset(&sb);
524
525
0
  strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir);
526
0
  if (unlink(sb.buf) < 0) {
527
0
    strbuf_addf(err, "could not delete stub heads: %s",
528
0
          strerror(errno));
529
0
    ret = -1;
530
0
  }
531
0
  strbuf_reset(&sb);
532
533
0
  strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
534
0
  if (rmdir(sb.buf) < 0) {
535
0
    strbuf_addf(err, "could not delete refs directory: %s",
536
0
          strerror(errno));
537
0
    ret = -1;
538
0
  }
539
540
0
  strbuf_release(&sb);
541
0
  return ret;
542
0
}
543
544
struct reftable_ref_iterator {
545
  struct ref_iterator base;
546
  struct reftable_ref_store *refs;
547
  struct reftable_iterator iter;
548
  struct reftable_ref_record ref;
549
  struct object_id oid;
550
  struct object_id peeled_oid;
551
552
  char *prefix;
553
  size_t prefix_len;
554
  char **exclude_patterns;
555
  size_t exclude_patterns_index;
556
  size_t exclude_patterns_strlen;
557
  unsigned int flags;
558
  int err;
559
};
560
561
/*
562
 * Handle exclude patterns. Returns either `1`, which tells the caller that the
563
 * current reference shall not be shown. Or `0`, which indicates that it should
564
 * be shown.
565
 */
566
static int should_exclude_current_ref(struct reftable_ref_iterator *iter)
567
0
{
568
0
  while (iter->exclude_patterns[iter->exclude_patterns_index]) {
569
0
    const char *pattern = iter->exclude_patterns[iter->exclude_patterns_index];
570
0
    char *ref_after_pattern;
571
0
    int cmp;
572
573
    /*
574
     * Lazily cache the pattern length so that we don't have to
575
     * recompute it every time this function is called.
576
     */
577
0
    if (!iter->exclude_patterns_strlen)
578
0
      iter->exclude_patterns_strlen = strlen(pattern);
579
580
    /*
581
     * When the reference name is lexicographically bigger than the
582
     * current exclude pattern we know that it won't ever match any
583
     * of the following references, either. We thus advance to the
584
     * next pattern and re-check whether it matches.
585
     *
586
     * Otherwise, if it's smaller, then we do not have a match and
587
     * thus want to show the current reference.
588
     */
589
0
    cmp = strncmp(iter->ref.refname, pattern,
590
0
            iter->exclude_patterns_strlen);
591
0
    if (cmp > 0) {
592
0
      iter->exclude_patterns_index++;
593
0
      iter->exclude_patterns_strlen = 0;
594
0
      continue;
595
0
    }
596
0
    if (cmp < 0)
597
0
      return 0;
598
599
    /*
600
     * The reference shares a prefix with the exclude pattern and
601
     * shall thus be omitted. We skip all references that match the
602
     * pattern by seeking to the first reference after the block of
603
     * matches.
604
     *
605
     * This is done by appending the highest possible character to
606
     * the pattern. Consequently, all references that have the
607
     * pattern as prefix and whose suffix starts with anything in
608
     * the range [0x00, 0xfe] are skipped. And given that 0xff is a
609
     * non-printable character that shouldn't ever be in a ref name,
610
     * we'd not yield any such record, either.
611
     *
612
     * Note that the seeked-to reference may also be excluded. This
613
     * is not handled here though, but the caller is expected to
614
     * loop and re-verify the next reference for us.
615
     */
616
0
    ref_after_pattern = xstrfmt("%s%c", pattern, 0xff);
617
0
    iter->err = reftable_iterator_seek_ref(&iter->iter, ref_after_pattern);
618
0
    iter->exclude_patterns_index++;
619
0
    iter->exclude_patterns_strlen = 0;
620
0
    trace2_counter_add(TRACE2_COUNTER_ID_REFTABLE_RESEEKS, 1);
621
622
0
    free(ref_after_pattern);
623
0
    return 1;
624
0
  }
625
626
0
  return 0;
627
0
}
628
629
static int reftable_ref_iterator_advance(struct ref_iterator *ref_iterator)
630
0
{
631
0
  struct reftable_ref_iterator *iter =
632
0
    (struct reftable_ref_iterator *)ref_iterator;
633
0
  struct reftable_ref_store *refs = iter->refs;
634
0
  const char *referent = NULL;
635
636
0
  while (!iter->err) {
637
0
    int flags = 0;
638
639
0
    iter->err = reftable_iterator_next_ref(&iter->iter, &iter->ref);
640
0
    if (iter->err)
641
0
      break;
642
643
    /*
644
     * The files backend only lists references contained in "refs/" unless
645
     * the root refs are to be included. We emulate the same behaviour here.
646
     */
647
0
    if (!starts_with(iter->ref.refname, "refs/") &&
648
0
        !(iter->flags & DO_FOR_EACH_INCLUDE_ROOT_REFS &&
649
0
          is_root_ref(iter->ref.refname))) {
650
0
      continue;
651
0
    }
652
653
0
    if (iter->prefix_len &&
654
0
        strncmp(iter->prefix, iter->ref.refname, iter->prefix_len)) {
655
0
      iter->err = 1;
656
0
      break;
657
0
    }
658
659
0
    if (iter->exclude_patterns && should_exclude_current_ref(iter))
660
0
      continue;
661
662
0
    if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
663
0
        parse_worktree_ref(iter->ref.refname, NULL, NULL, NULL) !=
664
0
          REF_WORKTREE_CURRENT)
665
0
      continue;
666
667
0
    switch (iter->ref.value_type) {
668
0
    case REFTABLE_REF_VAL1:
669
0
      oidread(&iter->oid, iter->ref.value.val1,
670
0
        refs->base.repo->hash_algo);
671
0
      break;
672
0
    case REFTABLE_REF_VAL2:
673
0
      oidread(&iter->oid, iter->ref.value.val2.value,
674
0
        refs->base.repo->hash_algo);
675
0
      oidread(&iter->peeled_oid, iter->ref.value.val2.target_value,
676
0
        refs->base.repo->hash_algo);
677
0
      break;
678
0
    case REFTABLE_REF_SYMREF:
679
0
      referent = refs_resolve_ref_unsafe(&iter->refs->base,
680
0
                 iter->ref.refname,
681
0
                 RESOLVE_REF_READING,
682
0
                 &iter->oid, &flags);
683
0
      if (!referent)
684
0
        oidclr(&iter->oid, refs->base.repo->hash_algo);
685
0
      break;
686
0
    default:
687
0
      BUG("unhandled reference value type %d", iter->ref.value_type);
688
0
    }
689
690
0
    if (is_null_oid(&iter->oid))
691
0
      flags |= REF_ISBROKEN;
692
693
0
    if (check_refname_format(iter->ref.refname, REFNAME_ALLOW_ONELEVEL)) {
694
0
      if (!refname_is_safe(iter->ref.refname))
695
0
        die(_("refname is dangerous: %s"), iter->ref.refname);
696
0
      oidclr(&iter->oid, refs->base.repo->hash_algo);
697
0
      flags |= REF_BAD_NAME | REF_ISBROKEN;
698
0
    }
699
700
0
    if (iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS &&
701
0
        flags & REF_ISSYMREF &&
702
0
        flags & REF_ISBROKEN)
703
0
      continue;
704
705
0
    if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
706
0
        !ref_resolves_to_object(iter->ref.refname, refs->base.repo,
707
0
              &iter->oid, flags))
708
0
        continue;
709
710
0
    memset(&iter->base.ref, 0, sizeof(iter->base.ref));
711
0
    iter->base.ref.name = iter->ref.refname;
712
0
    iter->base.ref.target = referent;
713
0
    iter->base.ref.oid = &iter->oid;
714
0
    if (iter->ref.value_type == REFTABLE_REF_VAL2)
715
0
      iter->base.ref.peeled_oid = &iter->peeled_oid;
716
0
    iter->base.ref.flags = flags;
717
718
0
    break;
719
0
  }
720
721
0
  if (iter->err > 0)
722
0
    return ITER_DONE;
723
0
  if (iter->err < 0)
724
0
    return ITER_ERROR;
725
0
  return ITER_OK;
726
0
}
727
728
static int reftable_ref_iterator_seek(struct ref_iterator *ref_iterator,
729
              const char *refname, unsigned int flags)
730
0
{
731
0
  struct reftable_ref_iterator *iter =
732
0
    (struct reftable_ref_iterator *)ref_iterator;
733
734
  /* Unset any previously set prefix */
735
0
  FREE_AND_NULL(iter->prefix);
736
0
  iter->prefix_len = 0;
737
738
0
  if (flags & REF_ITERATOR_SEEK_SET_PREFIX) {
739
0
    iter->prefix = xstrdup_or_null(refname);
740
0
    iter->prefix_len = refname ? strlen(refname) : 0;
741
0
  }
742
0
  iter->err = reftable_iterator_seek_ref(&iter->iter, refname);
743
744
0
  return iter->err;
745
0
}
746
747
static void reftable_ref_iterator_release(struct ref_iterator *ref_iterator)
748
0
{
749
0
  struct reftable_ref_iterator *iter =
750
0
    (struct reftable_ref_iterator *)ref_iterator;
751
0
  reftable_ref_record_release(&iter->ref);
752
0
  reftable_iterator_destroy(&iter->iter);
753
0
  if (iter->exclude_patterns) {
754
0
    for (size_t i = 0; iter->exclude_patterns[i]; i++)
755
0
      free(iter->exclude_patterns[i]);
756
0
    free(iter->exclude_patterns);
757
0
  }
758
0
  free(iter->prefix);
759
0
}
760
761
static struct ref_iterator_vtable reftable_ref_iterator_vtable = {
762
  .advance = reftable_ref_iterator_advance,
763
  .seek = reftable_ref_iterator_seek,
764
  .release = reftable_ref_iterator_release,
765
};
766
767
static int qsort_strcmp(const void *va, const void *vb)
768
0
{
769
0
  const char *a = *(const char **)va;
770
0
  const char *b = *(const char **)vb;
771
0
  return strcmp(a, b);
772
0
}
773
774
static char **filter_exclude_patterns(const char **exclude_patterns)
775
0
{
776
0
  size_t filtered_size = 0, filtered_alloc = 0;
777
0
  char **filtered = NULL;
778
779
0
  if (!exclude_patterns)
780
0
    return NULL;
781
782
0
  for (size_t i = 0; ; i++) {
783
0
    const char *exclude_pattern = exclude_patterns[i];
784
0
    int has_glob = 0;
785
786
0
    if (!exclude_pattern)
787
0
      break;
788
789
0
    for (const char *p = exclude_pattern; *p; p++) {
790
0
      has_glob = is_glob_special(*p);
791
0
      if (has_glob)
792
0
        break;
793
0
    }
794
0
    if (has_glob)
795
0
      continue;
796
797
0
    ALLOC_GROW(filtered, filtered_size + 1, filtered_alloc);
798
0
    filtered[filtered_size++] = xstrdup(exclude_pattern);
799
0
  }
800
801
0
  if (filtered_size) {
802
0
    QSORT(filtered, filtered_size, qsort_strcmp);
803
0
    ALLOC_GROW(filtered, filtered_size + 1, filtered_alloc);
804
0
    filtered[filtered_size++] = NULL;
805
0
  }
806
807
0
  return filtered;
808
0
}
809
810
static struct reftable_ref_iterator *ref_iterator_for_stack(struct reftable_ref_store *refs,
811
                  struct reftable_stack *stack,
812
                  const char *prefix,
813
                  const char **exclude_patterns,
814
                  int flags)
815
0
{
816
0
  struct reftable_ref_iterator *iter;
817
0
  int ret;
818
819
0
  iter = xcalloc(1, sizeof(*iter));
820
0
  base_ref_iterator_init(&iter->base, &reftable_ref_iterator_vtable);
821
0
  iter->base.ref.oid = &iter->oid;
822
0
  iter->flags = flags;
823
0
  iter->refs = refs;
824
0
  iter->exclude_patterns = filter_exclude_patterns(exclude_patterns);
825
826
0
  ret = refs->err;
827
0
  if (ret)
828
0
    goto done;
829
830
0
  ret = reftable_stack_reload(stack);
831
0
  if (ret)
832
0
    goto done;
833
834
0
  ret = reftable_stack_init_ref_iterator(stack, &iter->iter);
835
0
  if (ret)
836
0
    goto done;
837
838
0
  ret = reftable_ref_iterator_seek(&iter->base, prefix,
839
0
           REF_ITERATOR_SEEK_SET_PREFIX);
840
0
  if (ret)
841
0
    goto done;
842
843
0
done:
844
0
  iter->err = ret;
845
0
  return iter;
846
0
}
847
848
static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_store,
849
                   const char *prefix,
850
                   const char **exclude_patterns,
851
                   unsigned int flags)
852
0
{
853
0
  struct reftable_ref_iterator *main_iter, *worktree_iter;
854
0
  struct reftable_ref_store *refs;
855
0
  unsigned int required_flags = REF_STORE_READ;
856
857
0
  if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
858
0
    required_flags |= REF_STORE_ODB;
859
0
  refs = reftable_be_downcast(ref_store, required_flags, "ref_iterator_begin");
860
861
0
  main_iter = ref_iterator_for_stack(refs, refs->main_backend.stack, prefix,
862
0
             exclude_patterns, flags);
863
864
  /*
865
   * The worktree stack is only set when we're in an actual worktree
866
   * right now. If we aren't, then we return the common reftable
867
   * iterator, only.
868
   */
869
0
  if (!refs->worktree_backend.stack)
870
0
    return &main_iter->base;
871
872
  /*
873
   * Otherwise we merge both the common and the per-worktree refs into a
874
   * single iterator.
875
   */
876
0
  worktree_iter = ref_iterator_for_stack(refs, refs->worktree_backend.stack, prefix,
877
0
                 exclude_patterns, flags);
878
0
  return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
879
0
          ref_iterator_select, NULL);
880
0
}
881
882
static int reftable_be_read_raw_ref(struct ref_store *ref_store,
883
            const char *refname,
884
            struct object_id *oid,
885
            struct strbuf *referent,
886
            unsigned int *type,
887
            int *failure_errno)
888
0
{
889
0
  struct reftable_ref_store *refs =
890
0
    reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
891
0
  struct reftable_backend *be;
892
0
  int ret;
893
894
0
  if (refs->err < 0)
895
0
    return refs->err;
896
897
0
  ret = backend_for(&be, refs, refname, &refname, 1);
898
0
  if (ret)
899
0
    return ret;
900
901
0
  ret = reftable_backend_read_ref(be, refname, oid, referent, type);
902
0
  if (ret < 0)
903
0
    return ret;
904
0
  if (ret > 0) {
905
0
    *failure_errno = ENOENT;
906
0
    return -1;
907
0
  }
908
909
0
  return 0;
910
0
}
911
912
static int reftable_be_read_symbolic_ref(struct ref_store *ref_store,
913
           const char *refname,
914
           struct strbuf *referent)
915
0
{
916
0
  struct reftable_ref_store *refs =
917
0
    reftable_be_downcast(ref_store, REF_STORE_READ, "read_symbolic_ref");
918
0
  struct reftable_backend *be;
919
0
  struct object_id oid;
920
0
  unsigned int type = 0;
921
0
  int ret;
922
923
0
  ret = backend_for(&be, refs, refname, &refname, 1);
924
0
  if (ret)
925
0
    return ret;
926
927
0
  ret = reftable_backend_read_ref(be, refname, &oid, referent, &type);
928
0
  if (ret)
929
0
    ret = -1;
930
0
  else if (type == REF_ISSYMREF)
931
0
    ; /* happy */
932
0
  else
933
0
    ret = NOT_A_SYMREF;
934
0
  return ret;
935
0
}
936
937
struct reftable_transaction_update {
938
  struct ref_update *update;
939
  struct object_id current_oid;
940
};
941
942
struct write_transaction_table_arg {
943
  struct reftable_ref_store *refs;
944
  struct reftable_backend *be;
945
  struct reftable_addition *addition;
946
  struct reftable_transaction_update *updates;
947
  size_t updates_nr;
948
  size_t updates_alloc;
949
  size_t updates_expected;
950
  uint64_t max_index;
951
};
952
953
struct reftable_transaction_data {
954
  struct write_transaction_table_arg *args;
955
  size_t args_nr, args_alloc;
956
};
957
958
static void free_transaction_data(struct reftable_transaction_data *tx_data)
959
0
{
960
0
  if (!tx_data)
961
0
    return;
962
0
  for (size_t i = 0; i < tx_data->args_nr; i++) {
963
0
    reftable_addition_destroy(tx_data->args[i].addition);
964
0
    free(tx_data->args[i].updates);
965
0
  }
966
0
  free(tx_data->args);
967
0
  free(tx_data);
968
0
}
969
970
/*
971
 * Prepare transaction update for the given reference update. This will cause
972
 * us to lock the corresponding reftable stack for concurrent modification.
973
 */
974
static int prepare_transaction_update(struct write_transaction_table_arg **out,
975
              struct reftable_ref_store *refs,
976
              struct reftable_transaction_data *tx_data,
977
              struct ref_update *update,
978
              struct strbuf *err)
979
0
{
980
0
  struct write_transaction_table_arg *arg = NULL;
981
0
  struct reftable_backend *be;
982
0
  size_t i;
983
0
  int ret;
984
985
  /*
986
   * This function gets called in a loop, and we don't want to repeatedly
987
   * reload the stack for every single ref update. Instead, we manually
988
   * reload further down in the case where we haven't yet prepared the
989
   * specific `reftable_backend`.
990
   */
991
0
  ret = backend_for(&be, refs, update->refname, NULL, 0);
992
0
  if (ret)
993
0
    return ret;
994
995
  /*
996
   * Search for a preexisting stack update. If there is one then we add
997
   * the update to it, otherwise we set up a new stack update.
998
   */
999
0
  for (i = 0; !arg && i < tx_data->args_nr; i++)
1000
0
    if (tx_data->args[i].be == be)
1001
0
      arg = &tx_data->args[i];
1002
1003
0
  if (!arg) {
1004
0
    struct reftable_addition *addition;
1005
1006
0
    ret = reftable_stack_new_addition(&addition, be->stack,
1007
0
              REFTABLE_STACK_NEW_ADDITION_RELOAD);
1008
0
    if (ret) {
1009
0
      if (ret == REFTABLE_LOCK_ERROR)
1010
0
        strbuf_addstr(err, "cannot lock references");
1011
0
      return ret;
1012
0
    }
1013
1014
0
    ALLOC_GROW(tx_data->args, tx_data->args_nr + 1,
1015
0
         tx_data->args_alloc);
1016
0
    arg = &tx_data->args[tx_data->args_nr++];
1017
0
    arg->refs = refs;
1018
0
    arg->be = be;
1019
0
    arg->addition = addition;
1020
0
    arg->updates = NULL;
1021
0
    arg->updates_nr = 0;
1022
0
    arg->updates_alloc = 0;
1023
0
    arg->updates_expected = 0;
1024
0
    arg->max_index = 0;
1025
0
  }
1026
1027
0
  arg->updates_expected++;
1028
1029
0
  if (out)
1030
0
    *out = arg;
1031
1032
0
  return 0;
1033
0
}
1034
1035
/*
1036
 * Queue a reference update for the correct stack. We potentially need to
1037
 * handle multiple stack updates in a single transaction when it spans across
1038
 * multiple worktrees.
1039
 */
1040
static int queue_transaction_update(struct reftable_ref_store *refs,
1041
            struct reftable_transaction_data *tx_data,
1042
            struct ref_update *update,
1043
            struct object_id *current_oid,
1044
            struct strbuf *err)
1045
0
{
1046
0
  struct write_transaction_table_arg *arg = NULL;
1047
0
  int ret;
1048
1049
0
  if (update->backend_data)
1050
0
    BUG("reference update queued more than once");
1051
1052
0
  ret = prepare_transaction_update(&arg, refs, tx_data, update, err);
1053
0
  if (ret < 0)
1054
0
    return ret;
1055
1056
0
  ALLOC_GROW(arg->updates, arg->updates_nr + 1,
1057
0
       arg->updates_alloc);
1058
0
  arg->updates[arg->updates_nr].update = update;
1059
0
  oidcpy(&arg->updates[arg->updates_nr].current_oid, current_oid);
1060
0
  update->backend_data = &arg->updates[arg->updates_nr++];
1061
1062
0
  return 0;
1063
0
}
1064
1065
static enum ref_transaction_error prepare_single_update(struct reftable_ref_store *refs,
1066
              struct reftable_transaction_data *tx_data,
1067
              struct ref_transaction *transaction,
1068
              struct reftable_backend *be,
1069
              struct ref_update *u,
1070
              size_t update_idx,
1071
              struct string_list *refnames_to_check,
1072
              unsigned int head_type,
1073
              struct strbuf *head_referent,
1074
              struct strbuf *referent,
1075
              struct strbuf *err)
1076
0
{
1077
0
  enum ref_transaction_error ret = 0;
1078
0
  struct object_id current_oid = {0};
1079
0
  const char *rewritten_ref;
1080
1081
  /*
1082
   * There is no need to reload the respective backends here as
1083
   * we have already reloaded them when preparing the transaction
1084
   * update. And given that the stacks have been locked there
1085
   * shouldn't have been any concurrent modifications of the
1086
   * stack.
1087
   */
1088
0
  ret = backend_for(&be, refs, u->refname, &rewritten_ref, 0);
1089
0
  if (ret)
1090
0
    return REF_TRANSACTION_ERROR_GENERIC;
1091
1092
0
  if (u->flags & REF_LOG_USE_PROVIDED_OIDS) {
1093
0
    if (!(u->flags & REF_HAVE_OLD) ||
1094
0
        !(u->flags & REF_HAVE_NEW) ||
1095
0
        !(u->flags & REF_LOG_ONLY)) {
1096
0
      strbuf_addf(err, _("trying to write reflog for '%s' "
1097
0
             "with incomplete values"), u->refname);
1098
0
      return REF_TRANSACTION_ERROR_GENERIC;
1099
0
    }
1100
1101
0
    if (queue_transaction_update(refs, tx_data, u, &u->old_oid, err))
1102
0
      return REF_TRANSACTION_ERROR_GENERIC;
1103
0
    return 0;
1104
0
  }
1105
1106
  /* Verify that the new object ID is valid. */
1107
0
  if ((u->flags & REF_HAVE_NEW) && !is_null_oid(&u->new_oid) &&
1108
0
      !(u->flags & REF_SKIP_OID_VERIFICATION) &&
1109
0
      !(u->flags & REF_LOG_ONLY)) {
1110
0
    struct object *o = parse_object(refs->base.repo, &u->new_oid);
1111
0
    if (!o) {
1112
0
      strbuf_addf(err,
1113
0
            _("trying to write ref '%s' with nonexistent object %s"),
1114
0
            u->refname, oid_to_hex(&u->new_oid));
1115
0
      return REF_TRANSACTION_ERROR_INVALID_NEW_VALUE;
1116
0
    }
1117
1118
0
    if (o->type != OBJ_COMMIT && is_branch(u->refname)) {
1119
0
      strbuf_addf(err, _("trying to write non-commit object %s to branch '%s'"),
1120
0
            oid_to_hex(&u->new_oid), u->refname);
1121
0
      return REF_TRANSACTION_ERROR_INVALID_NEW_VALUE;
1122
0
    }
1123
0
  }
1124
1125
  /*
1126
   * When we update the reference that HEAD points to we enqueue
1127
   * a second log-only update for HEAD so that its reflog is
1128
   * updated accordingly.
1129
   */
1130
0
  if (head_type == REF_ISSYMREF &&
1131
0
      !(u->flags & REF_LOG_ONLY) &&
1132
0
      !(u->flags & REF_UPDATE_VIA_HEAD) &&
1133
0
      !strcmp(rewritten_ref, head_referent->buf)) {
1134
    /*
1135
     * First make sure that HEAD is not already in the
1136
     * transaction. This check is O(lg N) in the transaction
1137
     * size, but it happens at most once per transaction.
1138
     */
1139
0
    if (string_list_has_string(&transaction->refnames, "HEAD")) {
1140
      /* An entry already existed */
1141
0
      strbuf_addf(err,
1142
0
            _("multiple updates for 'HEAD' (including one "
1143
0
              "via its referent '%s') are not allowed"),
1144
0
            u->refname);
1145
0
      return REF_TRANSACTION_ERROR_NAME_CONFLICT;
1146
0
    }
1147
1148
0
    ref_transaction_add_update(
1149
0
      transaction, "HEAD",
1150
0
      u->flags | REF_LOG_ONLY | REF_NO_DEREF,
1151
0
      &u->new_oid, &u->old_oid, NULL, NULL, NULL,
1152
0
      u->msg);
1153
0
  }
1154
1155
0
  ret = reftable_backend_read_ref(be, rewritten_ref,
1156
0
          &current_oid, referent, &u->type);
1157
0
  if (ret < 0)
1158
0
    return REF_TRANSACTION_ERROR_GENERIC;
1159
0
  if (ret > 0 && !ref_update_expects_existing_old_ref(u)) {
1160
0
    struct string_list_item *item;
1161
    /*
1162
     * The reference does not exist, and we either have no
1163
     * old object ID or expect the reference to not exist.
1164
     * We can thus skip below safety checks as well as the
1165
     * symref splitting. But we do want to verify that
1166
     * there is no conflicting reference here so that we
1167
     * can output a proper error message instead of failing
1168
     * at a later point.
1169
     */
1170
0
    item = string_list_append(refnames_to_check, u->refname);
1171
0
    item->util = xmalloc(sizeof(update_idx));
1172
0
    memcpy(item->util, &update_idx, sizeof(update_idx));
1173
1174
    /*
1175
     * There is no need to write the reference deletion
1176
     * when the reference in question doesn't exist.
1177
     */
1178
0
    if ((u->flags & REF_HAVE_NEW) && !ref_update_has_null_new_value(u)) {
1179
0
      ret = queue_transaction_update(refs, tx_data, u,
1180
0
                   &current_oid, err);
1181
0
      if (ret)
1182
0
        return REF_TRANSACTION_ERROR_GENERIC;
1183
0
    }
1184
1185
0
    return 0;
1186
0
  }
1187
0
  if (ret > 0) {
1188
    /* The reference does not exist, but we expected it to. */
1189
0
    strbuf_addf(err, _("cannot lock ref '%s': "
1190
0
           "unable to resolve reference '%s'"),
1191
0
          ref_update_original_update_refname(u), u->refname);
1192
0
    return REF_TRANSACTION_ERROR_NONEXISTENT_REF;
1193
0
  }
1194
1195
0
  if (u->type & REF_ISSYMREF) {
1196
    /*
1197
     * The reftable stack is locked at this point already,
1198
     * so it is safe to call `refs_resolve_ref_unsafe()`
1199
     * here without causing races.
1200
     */
1201
0
    const char *resolved = refs_resolve_ref_unsafe(&refs->base, u->refname, 0,
1202
0
                     &current_oid, NULL);
1203
1204
0
    if (u->flags & REF_NO_DEREF) {
1205
0
      if (u->flags & REF_HAVE_OLD && !resolved) {
1206
0
        strbuf_addf(err, _("cannot lock ref '%s': "
1207
0
               "error reading reference"), u->refname);
1208
0
        return REF_TRANSACTION_ERROR_GENERIC;
1209
0
      }
1210
0
    } else {
1211
0
      struct ref_update *new_update;
1212
0
      int new_flags;
1213
1214
0
      new_flags = u->flags;
1215
0
      if (!strcmp(rewritten_ref, "HEAD"))
1216
0
        new_flags |= REF_UPDATE_VIA_HEAD;
1217
1218
0
      if (string_list_has_string(&transaction->refnames, referent->buf)) {
1219
0
        strbuf_addf(err,
1220
0
              _("multiple updates for '%s' (including one "
1221
0
                "via symref '%s') are not allowed"),
1222
0
              referent->buf, u->refname);
1223
0
        return REF_TRANSACTION_ERROR_NAME_CONFLICT;
1224
0
      }
1225
1226
      /*
1227
       * If we are updating a symref (eg. HEAD), we should also
1228
       * update the branch that the symref points to.
1229
       *
1230
       * This is generic functionality, and would be better
1231
       * done in refs.c, but the current implementation is
1232
       * intertwined with the locking in files-backend.c.
1233
       */
1234
0
      new_update = ref_transaction_add_update(
1235
0
        transaction, referent->buf, new_flags,
1236
0
        u->new_target ? NULL : &u->new_oid,
1237
0
        u->old_target ? NULL : &u->old_oid,
1238
0
        u->new_target, u->old_target,
1239
0
        u->committer_info, u->msg);
1240
1241
0
      new_update->parent_update = u;
1242
1243
      /* Change the symbolic ref update to log only. */
1244
0
      u->flags |= REF_LOG_ONLY | REF_NO_DEREF;
1245
0
    }
1246
0
  }
1247
1248
  /*
1249
   * Verify that the old object matches our expectations. Note
1250
   * that the error messages here do not make a lot of sense in
1251
   * the context of the reftable backend as we never lock
1252
   * individual refs. But the error messages match what the files
1253
   * backend returns, which keeps our tests happy.
1254
   */
1255
0
  if (u->old_target) {
1256
0
    if (!(u->type & REF_ISSYMREF)) {
1257
0
      strbuf_addf(err, _("cannot lock ref '%s': "
1258
0
             "expected symref with target '%s': "
1259
0
             "but is a regular ref"),
1260
0
            ref_update_original_update_refname(u),
1261
0
            u->old_target);
1262
0
      return REF_TRANSACTION_ERROR_EXPECTED_SYMREF;
1263
0
    }
1264
1265
0
    ret = ref_update_check_old_target(referent->buf, u, err);
1266
0
    if (ret)
1267
0
      return ret;
1268
0
  } else if ((u->flags & (REF_LOG_ONLY | REF_HAVE_OLD)) == REF_HAVE_OLD) {
1269
0
    if (oideq(&current_oid, &u->old_oid)) {
1270
      /*
1271
       * Normally matching the expected old oid is enough. Either we
1272
       * found the ref at the expected state, or we are creating and
1273
       * expect the null oid (and likewise found nothing).
1274
       *
1275
       * But there is one exception for the null oid: if we found a
1276
       * symref pointing to nothing we'll also get the null oid. In
1277
       * regular recursive mode, that's good (we'll write to what the
1278
       * symref points to, which doesn't exist). But in no-deref
1279
       * mode, it means we'll clobber the symref, even though the
1280
       * caller asked for this to be a creation event. So flag
1281
       * that case to preserve the dangling symref.
1282
       *
1283
       * Everything else is OK and we can fall through to the
1284
       * end of the conditional chain.
1285
       */
1286
0
      if ((u->flags & REF_NO_DEREF) &&
1287
0
          referent->len &&
1288
0
          is_null_oid(&u->old_oid)) {
1289
0
        strbuf_addf(err, _("cannot lock ref '%s': "
1290
0
              "dangling symref already exists"),
1291
0
              ref_update_original_update_refname(u));
1292
0
        return REF_TRANSACTION_ERROR_CREATE_EXISTS;
1293
0
      }
1294
0
    } else if (is_null_oid(&u->old_oid)) {
1295
0
      strbuf_addf(err, _("cannot lock ref '%s': "
1296
0
             "reference already exists"),
1297
0
            ref_update_original_update_refname(u));
1298
0
      return REF_TRANSACTION_ERROR_CREATE_EXISTS;
1299
0
    } else if (is_null_oid(&current_oid)) {
1300
0
      strbuf_addf(err, _("cannot lock ref '%s': "
1301
0
             "reference is missing but expected %s"),
1302
0
            ref_update_original_update_refname(u),
1303
0
            oid_to_hex(&u->old_oid));
1304
0
      return REF_TRANSACTION_ERROR_NONEXISTENT_REF;
1305
0
    } else {
1306
0
      strbuf_addf(err, _("cannot lock ref '%s': "
1307
0
             "is at %s but expected %s"),
1308
0
            ref_update_original_update_refname(u),
1309
0
            oid_to_hex(&current_oid),
1310
0
            oid_to_hex(&u->old_oid));
1311
0
      return REF_TRANSACTION_ERROR_INCORRECT_OLD_VALUE;
1312
0
    }
1313
0
  }
1314
1315
  /*
1316
   * If all of the following conditions are true:
1317
   *
1318
   *   - We're not about to write a symref.
1319
   *   - We're not about to write a log-only entry.
1320
   *   - Old and new object ID are different.
1321
   *
1322
   * Then we're essentially doing a no-op update that can be
1323
   * skipped. This is not only for the sake of efficiency, but
1324
   * also skips writing unneeded reflog entries.
1325
   */
1326
0
  if ((u->type & REF_ISSYMREF) ||
1327
0
      (u->flags & REF_LOG_ONLY) ||
1328
0
      (u->flags & REF_HAVE_NEW && !oideq(&current_oid, &u->new_oid)))
1329
0
    if (queue_transaction_update(refs, tx_data, u, &current_oid, err))
1330
0
      return REF_TRANSACTION_ERROR_GENERIC;
1331
1332
0
  return 0;
1333
0
}
1334
1335
static int reftable_be_transaction_prepare(struct ref_store *ref_store,
1336
             struct ref_transaction *transaction,
1337
             struct strbuf *err)
1338
0
{
1339
0
  struct reftable_ref_store *refs =
1340
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE|REF_STORE_MAIN, "ref_transaction_prepare");
1341
0
  struct strbuf referent = STRBUF_INIT, head_referent = STRBUF_INIT;
1342
0
  struct string_list refnames_to_check = STRING_LIST_INIT_NODUP;
1343
0
  struct reftable_transaction_data *tx_data = NULL;
1344
0
  struct reftable_backend *be;
1345
0
  struct object_id head_oid;
1346
0
  unsigned int head_type = 0;
1347
0
  size_t i;
1348
0
  int ret;
1349
1350
0
  ret = refs->err;
1351
0
  if (ret < 0)
1352
0
    goto done;
1353
1354
0
  tx_data = xcalloc(1, sizeof(*tx_data));
1355
1356
  /*
1357
   * Preprocess all updates. For one we check that there are no duplicate
1358
   * reference updates in this transaction. Second, we lock all stacks
1359
   * that will be modified during the transaction.
1360
   */
1361
0
  for (i = 0; i < transaction->nr; i++) {
1362
0
    ret = prepare_transaction_update(NULL, refs, tx_data,
1363
0
             transaction->updates[i], err);
1364
0
    if (ret)
1365
0
      goto done;
1366
0
  }
1367
1368
  /*
1369
   * Now that we have counted updates per stack we can preallocate their
1370
   * arrays. This avoids having to reallocate many times.
1371
   */
1372
0
  for (i = 0; i < tx_data->args_nr; i++) {
1373
0
    CALLOC_ARRAY(tx_data->args[i].updates, tx_data->args[i].updates_expected);
1374
0
    tx_data->args[i].updates_alloc = tx_data->args[i].updates_expected;
1375
0
  }
1376
1377
  /*
1378
   * TODO: it's dubious whether we should reload the stack that "HEAD"
1379
   * belongs to or not. In theory, it may happen that we only modify
1380
   * stacks which are _not_ part of the "HEAD" stack. In that case we
1381
   * wouldn't have prepared any transaction for its stack and would not
1382
   * have reloaded it, which may mean that it is stale.
1383
   *
1384
   * On the other hand, reloading that stack without locking it feels
1385
   * wrong, too, as the value of "HEAD" could be modified concurrently at
1386
   * any point in time.
1387
   */
1388
0
  ret = backend_for(&be, refs, "HEAD", NULL, 0);
1389
0
  if (ret)
1390
0
    goto done;
1391
1392
0
  ret = reftable_backend_read_ref(be, "HEAD", &head_oid,
1393
0
          &head_referent, &head_type);
1394
0
  if (ret < 0)
1395
0
    goto done;
1396
0
  ret = 0;
1397
1398
0
  for (i = 0; i < transaction->nr; i++) {
1399
0
    ret = prepare_single_update(refs, tx_data, transaction, be,
1400
0
              transaction->updates[i], i,
1401
0
              &refnames_to_check, head_type,
1402
0
              &head_referent, &referent, err);
1403
0
    if (ret) {
1404
0
      if (ref_transaction_maybe_set_rejected(transaction, i, ret)) {
1405
0
        strbuf_reset(err);
1406
0
        ret = 0;
1407
1408
0
        continue;
1409
0
      }
1410
0
      goto done;
1411
0
    }
1412
0
  }
1413
1414
0
  ret = refs_verify_refnames_available(ref_store, &refnames_to_check,
1415
0
               &transaction->refnames, NULL,
1416
0
               transaction,
1417
0
               transaction->flags & REF_TRANSACTION_FLAG_INITIAL,
1418
0
               err);
1419
0
  if (ret < 0)
1420
0
    goto done;
1421
1422
0
  transaction->backend_data = tx_data;
1423
0
  transaction->state = REF_TRANSACTION_PREPARED;
1424
1425
0
done:
1426
0
  if (ret < 0) {
1427
0
    free_transaction_data(tx_data);
1428
0
    transaction->state = REF_TRANSACTION_CLOSED;
1429
0
    if (!err->len)
1430
0
      strbuf_addf(err, _("reftable: transaction prepare: %s"),
1431
0
            reftable_error_str(ret));
1432
0
  }
1433
0
  strbuf_release(&referent);
1434
0
  strbuf_release(&head_referent);
1435
0
  string_list_clear(&refnames_to_check, 1);
1436
1437
0
  return ret;
1438
0
}
1439
1440
static int reftable_be_transaction_abort(struct ref_store *ref_store UNUSED,
1441
           struct ref_transaction *transaction,
1442
           struct strbuf *err UNUSED)
1443
0
{
1444
0
  struct reftable_transaction_data *tx_data = transaction->backend_data;
1445
0
  free_transaction_data(tx_data);
1446
0
  transaction->state = REF_TRANSACTION_CLOSED;
1447
0
  return 0;
1448
0
}
1449
1450
static int transaction_update_cmp(const void *a, const void *b)
1451
0
{
1452
0
  struct reftable_transaction_update *update_a = (struct reftable_transaction_update *)a;
1453
0
  struct reftable_transaction_update *update_b = (struct reftable_transaction_update *)b;
1454
1455
  /*
1456
   * If there is an index set, it should take preference (default is 0).
1457
   * This ensures that updates with indexes are sorted amongst themselves.
1458
   */
1459
0
  if (update_a->update->index || update_b->update->index)
1460
0
    return update_a->update->index - update_b->update->index;
1461
1462
0
  return strcmp(update_a->update->refname, update_b->update->refname);
1463
0
}
1464
1465
static int write_transaction_table(struct reftable_writer *writer, void *cb_data)
1466
0
{
1467
0
  struct write_transaction_table_arg *arg = cb_data;
1468
0
  uint64_t ts = reftable_stack_next_update_index(arg->be->stack);
1469
0
  struct reftable_log_record *logs = NULL;
1470
0
  struct ident_split committer_ident = {0};
1471
0
  size_t logs_nr = 0, logs_alloc = 0, i;
1472
0
  const char *committer_info;
1473
0
  int ret = 0;
1474
1475
0
  committer_info = git_committer_info(0);
1476
0
  if (split_ident_line(&committer_ident, committer_info, strlen(committer_info)))
1477
0
    BUG("failed splitting committer info");
1478
1479
0
  QSORT(arg->updates, arg->updates_nr, transaction_update_cmp);
1480
1481
  /*
1482
   * During reflog migration, we add indexes for a single reflog with
1483
   * multiple entries. Each entry will contain a different update_index,
1484
   * so set the limits accordingly.
1485
   */
1486
0
  ret = reftable_writer_set_limits(writer, ts, ts + arg->max_index);
1487
0
  if (ret < 0)
1488
0
    goto done;
1489
1490
0
  for (i = 0; i < arg->updates_nr; i++) {
1491
0
    struct reftable_transaction_update *tx_update = &arg->updates[i];
1492
0
    struct ref_update *u = tx_update->update;
1493
1494
0
    if (u->rejection_err)
1495
0
      continue;
1496
1497
    /*
1498
     * Write a reflog entry when updating a ref to point to
1499
     * something new in either of the following cases:
1500
     *
1501
     * - The reference is about to be deleted. We always want to
1502
     *   delete the reflog in that case.
1503
     * - REF_FORCE_CREATE_REFLOG is set, asking us to always create
1504
     *   the reflog entry.
1505
     * - `core.logAllRefUpdates` tells us to create the reflog for
1506
     *   the given ref.
1507
     */
1508
0
    if ((u->flags & REF_HAVE_NEW) &&
1509
0
        !(u->type & REF_ISSYMREF) &&
1510
0
        ref_update_has_null_new_value(u)) {
1511
0
      struct reftable_log_record log = {0};
1512
0
      struct reftable_iterator it = {0};
1513
1514
0
      ret = reftable_stack_init_log_iterator(arg->be->stack, &it);
1515
0
      if (ret < 0)
1516
0
        goto done;
1517
1518
      /*
1519
       * When deleting refs we also delete all reflog entries
1520
       * with them. While it is not strictly required to
1521
       * delete reflogs together with their refs, this
1522
       * matches the behaviour of the files backend.
1523
       *
1524
       * Unfortunately, we have no better way than to delete
1525
       * all reflog entries one by one.
1526
       */
1527
0
      ret = reftable_iterator_seek_log(&it, u->refname);
1528
0
      while (ret == 0) {
1529
0
        struct reftable_log_record *tombstone;
1530
1531
0
        ret = reftable_iterator_next_log(&it, &log);
1532
0
        if (ret < 0)
1533
0
          break;
1534
0
        if (ret > 0 || strcmp(log.refname, u->refname)) {
1535
0
          ret = 0;
1536
0
          break;
1537
0
        }
1538
1539
0
        ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1540
0
        tombstone = &logs[logs_nr++];
1541
0
        tombstone->refname = xstrdup(u->refname);
1542
0
        tombstone->value_type = REFTABLE_LOG_DELETION;
1543
0
        tombstone->update_index = log.update_index;
1544
0
      }
1545
1546
0
      reftable_log_record_release(&log);
1547
0
      reftable_iterator_destroy(&it);
1548
1549
0
      if (ret)
1550
0
        goto done;
1551
0
    } else if (!(u->flags & REF_SKIP_CREATE_REFLOG) &&
1552
0
         (u->flags & REF_HAVE_NEW) &&
1553
0
         (u->flags & REF_FORCE_CREATE_REFLOG ||
1554
0
          should_write_log(arg->refs, u->refname))) {
1555
0
      struct reftable_log_record *log;
1556
0
      int create_reflog = 1;
1557
1558
0
      if (u->new_target) {
1559
0
        if (!refs_resolve_ref_unsafe(&arg->refs->base, u->new_target,
1560
0
                   RESOLVE_REF_READING, &u->new_oid, NULL)) {
1561
          /*
1562
           * TODO: currently we skip creating reflogs for dangling
1563
           * symref updates. It would be nice to capture this as
1564
           * zero oid updates however.
1565
           */
1566
0
          create_reflog = 0;
1567
0
        }
1568
0
      }
1569
1570
0
      if (create_reflog) {
1571
0
        struct ident_split c;
1572
1573
0
        ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1574
0
        log = &logs[logs_nr++];
1575
0
        memset(log, 0, sizeof(*log));
1576
1577
0
        if (u->committer_info) {
1578
0
          if (split_ident_line(&c, u->committer_info,
1579
0
                   strlen(u->committer_info)))
1580
0
            BUG("failed splitting committer info");
1581
0
        } else {
1582
0
          c = committer_ident;
1583
0
        }
1584
1585
0
        fill_reftable_log_record(log, &c);
1586
1587
        /*
1588
         * Updates are sorted by the writer. So updates for the same
1589
         * refname need to contain different update indices.
1590
         */
1591
0
        log->update_index = ts + u->index;
1592
1593
0
        log->refname = xstrdup(u->refname);
1594
0
        memcpy(log->value.update.new_hash,
1595
0
               u->new_oid.hash, GIT_MAX_RAWSZ);
1596
0
        memcpy(log->value.update.old_hash,
1597
0
               tx_update->current_oid.hash, GIT_MAX_RAWSZ);
1598
0
        log->value.update.message =
1599
0
          xstrndup(u->msg, arg->refs->write_options.block_size / 2);
1600
0
      }
1601
0
    }
1602
1603
0
    if (u->flags & REF_LOG_ONLY)
1604
0
      continue;
1605
1606
0
    if (u->new_target) {
1607
0
      struct reftable_ref_record ref = {
1608
0
        .refname = (char *)u->refname,
1609
0
        .value_type = REFTABLE_REF_SYMREF,
1610
0
        .value.symref = (char *)u->new_target,
1611
0
        .update_index = ts,
1612
0
      };
1613
1614
0
      ret = reftable_writer_add_ref(writer, &ref);
1615
0
      if (ret < 0)
1616
0
        goto done;
1617
0
    } else if ((u->flags & REF_HAVE_NEW) && ref_update_has_null_new_value(u)) {
1618
0
      struct reftable_ref_record ref = {
1619
0
        .refname = (char *)u->refname,
1620
0
        .update_index = ts,
1621
0
        .value_type = REFTABLE_REF_DELETION,
1622
0
      };
1623
1624
0
      ret = reftable_writer_add_ref(writer, &ref);
1625
0
      if (ret < 0)
1626
0
        goto done;
1627
0
    } else if (u->flags & REF_HAVE_NEW) {
1628
0
      struct reftable_ref_record ref = {0};
1629
0
      struct object_id peeled;
1630
0
      int peel_error;
1631
1632
0
      ref.refname = (char *)u->refname;
1633
0
      ref.update_index = ts;
1634
1635
0
      peel_error = peel_object(arg->refs->base.repo, &u->new_oid, &peeled,
1636
0
             PEEL_OBJECT_VERIFY_TAGGED_OBJECT_TYPE);
1637
0
      if (!peel_error) {
1638
0
        ref.value_type = REFTABLE_REF_VAL2;
1639
0
        memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
1640
0
        memcpy(ref.value.val2.value, u->new_oid.hash, GIT_MAX_RAWSZ);
1641
0
      } else if (!is_null_oid(&u->new_oid)) {
1642
0
        ref.value_type = REFTABLE_REF_VAL1;
1643
0
        memcpy(ref.value.val1, u->new_oid.hash, GIT_MAX_RAWSZ);
1644
0
      }
1645
1646
0
      ret = reftable_writer_add_ref(writer, &ref);
1647
0
      if (ret < 0)
1648
0
        goto done;
1649
0
    }
1650
0
  }
1651
1652
  /*
1653
   * Logs are written at the end so that we do not have intermixed ref
1654
   * and log blocks.
1655
   */
1656
0
  if (logs) {
1657
0
    ret = reftable_writer_add_logs(writer, logs, logs_nr);
1658
0
    if (ret < 0)
1659
0
      goto done;
1660
0
  }
1661
1662
0
done:
1663
0
  assert(ret != REFTABLE_API_ERROR);
1664
0
  for (i = 0; i < logs_nr; i++)
1665
0
    reftable_log_record_release(&logs[i]);
1666
0
  free(logs);
1667
0
  return ret;
1668
0
}
1669
1670
static int reftable_be_transaction_finish(struct ref_store *ref_store UNUSED,
1671
            struct ref_transaction *transaction,
1672
            struct strbuf *err)
1673
0
{
1674
0
  struct reftable_transaction_data *tx_data = transaction->backend_data;
1675
0
  int ret = 0;
1676
1677
0
  for (size_t i = 0; i < tx_data->args_nr; i++) {
1678
0
    tx_data->args[i].max_index = transaction->max_index;
1679
1680
0
    ret = reftable_addition_add(tx_data->args[i].addition,
1681
0
              write_transaction_table, &tx_data->args[i]);
1682
0
    if (ret < 0)
1683
0
      goto done;
1684
1685
0
    ret = reftable_addition_commit(tx_data->args[i].addition);
1686
0
    if (ret < 0)
1687
0
      goto done;
1688
0
  }
1689
1690
0
done:
1691
0
  assert(ret != REFTABLE_API_ERROR);
1692
0
  free_transaction_data(tx_data);
1693
0
  transaction->state = REF_TRANSACTION_CLOSED;
1694
1695
0
  if (ret) {
1696
0
    strbuf_addf(err, _("reftable: transaction failure: %s"),
1697
0
          reftable_error_str(ret));
1698
0
    return -1;
1699
0
  }
1700
0
  return ret;
1701
0
}
1702
1703
static int reftable_be_optimize(struct ref_store *ref_store,
1704
        struct refs_optimize_opts *opts)
1705
0
{
1706
0
  struct reftable_ref_store *refs =
1707
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB, "optimize_refs");
1708
0
  struct reftable_stack *stack;
1709
0
  int ret;
1710
1711
0
  if (refs->err)
1712
0
    return refs->err;
1713
1714
0
  stack = refs->worktree_backend.stack;
1715
0
  if (!stack)
1716
0
    stack = refs->main_backend.stack;
1717
1718
0
  if (opts->flags & REFS_OPTIMIZE_AUTO)
1719
0
    ret = reftable_stack_auto_compact(stack);
1720
0
  else
1721
0
    ret = reftable_stack_compact_all(stack, NULL);
1722
0
  if (ret < 0) {
1723
0
    ret = error(_("unable to compact stack: %s"),
1724
0
          reftable_error_str(ret));
1725
0
    goto out;
1726
0
  }
1727
1728
0
  ret = reftable_stack_clean(stack);
1729
0
  if (ret)
1730
0
    goto out;
1731
1732
0
out:
1733
0
  return ret;
1734
0
}
1735
1736
static int reftable_be_optimize_required(struct ref_store *ref_store,
1737
           struct refs_optimize_opts *opts,
1738
           bool *required)
1739
0
{
1740
0
  struct reftable_ref_store *refs = reftable_be_downcast(ref_store, REF_STORE_READ,
1741
0
                     "optimize_refs_required");
1742
0
  struct reftable_stack *stack;
1743
0
  bool use_heuristics = false;
1744
1745
0
  if (refs->err)
1746
0
    return refs->err;
1747
1748
0
  stack = refs->worktree_backend.stack;
1749
0
  if (!stack)
1750
0
    stack = refs->main_backend.stack;
1751
1752
0
  if (opts->flags & REFS_OPTIMIZE_AUTO)
1753
0
    use_heuristics = true;
1754
1755
0
  return reftable_stack_compaction_required(stack, use_heuristics,
1756
0
              required);
1757
0
}
1758
1759
struct write_create_symref_arg {
1760
  struct reftable_ref_store *refs;
1761
  struct reftable_stack *stack;
1762
  struct strbuf *err;
1763
  const char *refname;
1764
  const char *target;
1765
  const char *logmsg;
1766
};
1767
1768
struct write_copy_arg {
1769
  struct reftable_ref_store *refs;
1770
  struct reftable_backend *be;
1771
  const char *oldname;
1772
  const char *newname;
1773
  const char *logmsg;
1774
  int delete_old;
1775
};
1776
1777
static int write_copy_table(struct reftable_writer *writer, void *cb_data)
1778
0
{
1779
0
  struct write_copy_arg *arg = cb_data;
1780
0
  uint64_t deletion_ts, creation_ts;
1781
0
  struct reftable_ref_record old_ref = {0}, refs[2] = {0};
1782
0
  struct reftable_log_record old_log = {0}, *logs = NULL;
1783
0
  struct reftable_iterator it = {0};
1784
0
  struct string_list skip = STRING_LIST_INIT_NODUP;
1785
0
  struct ident_split committer_ident = {0};
1786
0
  struct strbuf errbuf = STRBUF_INIT;
1787
0
  size_t logs_nr = 0, logs_alloc = 0, i;
1788
0
  const char *committer_info;
1789
0
  int ret;
1790
1791
0
  committer_info = git_committer_info(0);
1792
0
  if (split_ident_line(&committer_ident, committer_info, strlen(committer_info)))
1793
0
    BUG("failed splitting committer info");
1794
1795
0
  if (reftable_stack_read_ref(arg->be->stack, arg->oldname, &old_ref)) {
1796
0
    ret = error(_("refname %s not found"), arg->oldname);
1797
0
    goto done;
1798
0
  }
1799
0
  if (old_ref.value_type == REFTABLE_REF_SYMREF) {
1800
0
    ret = error(_("refname %s is a symbolic ref, copying it is not supported"),
1801
0
          arg->oldname);
1802
0
    goto done;
1803
0
  }
1804
1805
  /*
1806
   * There's nothing to do in case the old and new name are the same, so
1807
   * we exit early in that case.
1808
   */
1809
0
  if (!strcmp(arg->oldname, arg->newname)) {
1810
0
    ret = 0;
1811
0
    goto done;
1812
0
  }
1813
1814
  /*
1815
   * Verify that the new refname is available.
1816
   */
1817
0
  if (arg->delete_old)
1818
0
    string_list_insert(&skip, arg->oldname);
1819
0
  ret = refs_verify_refname_available(&arg->refs->base, arg->newname,
1820
0
              NULL, &skip, 0, &errbuf);
1821
0
  if (ret < 0) {
1822
0
    error("%s", errbuf.buf);
1823
0
    goto done;
1824
0
  }
1825
1826
  /*
1827
   * When deleting the old reference we have to use two update indices:
1828
   * once to delete the old ref and its reflog, and once to create the
1829
   * new ref and its reflog. They need to be staged with two separate
1830
   * indices because the new reflog needs to encode both the deletion of
1831
   * the old branch and the creation of the new branch, and we cannot do
1832
   * two changes to a reflog in a single update.
1833
   */
1834
0
  deletion_ts = creation_ts = reftable_stack_next_update_index(arg->be->stack);
1835
0
  if (arg->delete_old)
1836
0
    creation_ts++;
1837
0
  ret = reftable_writer_set_limits(writer, deletion_ts, creation_ts);
1838
0
  if (ret < 0)
1839
0
    goto done;
1840
1841
  /*
1842
   * Add the new reference. If this is a rename then we also delete the
1843
   * old reference.
1844
   */
1845
0
  refs[0] = old_ref;
1846
0
  refs[0].refname = xstrdup(arg->newname);
1847
0
  refs[0].update_index = creation_ts;
1848
0
  if (arg->delete_old) {
1849
0
    refs[1].refname = xstrdup(arg->oldname);
1850
0
    refs[1].value_type = REFTABLE_REF_DELETION;
1851
0
    refs[1].update_index = deletion_ts;
1852
0
  }
1853
0
  ret = reftable_writer_add_refs(writer, refs, arg->delete_old ? 2 : 1);
1854
0
  if (ret < 0)
1855
0
    goto done;
1856
1857
  /*
1858
   * When deleting the old branch we need to create a reflog entry on the
1859
   * new branch name that indicates that the old branch has been deleted
1860
   * and then recreated. This is a tad weird, but matches what the files
1861
   * backend does.
1862
   */
1863
0
  if (arg->delete_old) {
1864
0
    struct strbuf head_referent = STRBUF_INIT;
1865
0
    struct object_id head_oid;
1866
0
    int append_head_reflog;
1867
0
    unsigned head_type = 0;
1868
1869
0
    ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1870
0
    memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1871
0
    fill_reftable_log_record(&logs[logs_nr], &committer_ident);
1872
0
    logs[logs_nr].refname = xstrdup(arg->newname);
1873
0
    logs[logs_nr].update_index = deletion_ts;
1874
0
    logs[logs_nr].value.update.message =
1875
0
      xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1876
0
    memcpy(logs[logs_nr].value.update.old_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
1877
0
    logs_nr++;
1878
1879
0
    ret = reftable_backend_read_ref(arg->be, "HEAD", &head_oid,
1880
0
            &head_referent, &head_type);
1881
0
    if (ret < 0)
1882
0
      goto done;
1883
0
    append_head_reflog = (head_type & REF_ISSYMREF) && !strcmp(head_referent.buf, arg->oldname);
1884
0
    strbuf_release(&head_referent);
1885
1886
    /*
1887
     * The files backend uses `refs_delete_ref()` to delete the old
1888
     * branch name, which will append a reflog entry for HEAD in
1889
     * case it points to the old branch.
1890
     */
1891
0
    if (append_head_reflog) {
1892
0
      ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1893
0
      logs[logs_nr] = logs[logs_nr - 1];
1894
0
      logs[logs_nr].refname = xstrdup("HEAD");
1895
0
      logs[logs_nr].value.update.name =
1896
0
        xstrdup(logs[logs_nr].value.update.name);
1897
0
      logs[logs_nr].value.update.email =
1898
0
        xstrdup(logs[logs_nr].value.update.email);
1899
0
      logs[logs_nr].value.update.message =
1900
0
        xstrdup(logs[logs_nr].value.update.message);
1901
0
      logs_nr++;
1902
0
    }
1903
0
  }
1904
1905
  /*
1906
   * Create the reflog entry for the newly created branch.
1907
   */
1908
0
  ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1909
0
  memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1910
0
  fill_reftable_log_record(&logs[logs_nr], &committer_ident);
1911
0
  logs[logs_nr].refname = xstrdup(arg->newname);
1912
0
  logs[logs_nr].update_index = creation_ts;
1913
0
  logs[logs_nr].value.update.message =
1914
0
    xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1915
0
  memcpy(logs[logs_nr].value.update.new_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
1916
0
  logs_nr++;
1917
1918
  /*
1919
   * In addition to writing the reflog entry for the new branch, we also
1920
   * copy over all log entries from the old reflog. Last but not least,
1921
   * when renaming we also have to delete all the old reflog entries.
1922
   */
1923
0
  ret = reftable_stack_init_log_iterator(arg->be->stack, &it);
1924
0
  if (ret < 0)
1925
0
    goto done;
1926
1927
0
  ret = reftable_iterator_seek_log(&it, arg->oldname);
1928
0
  if (ret < 0)
1929
0
    goto done;
1930
1931
0
  while (1) {
1932
0
    ret = reftable_iterator_next_log(&it, &old_log);
1933
0
    if (ret < 0)
1934
0
      goto done;
1935
0
    if (ret > 0 || strcmp(old_log.refname, arg->oldname)) {
1936
0
      ret = 0;
1937
0
      break;
1938
0
    }
1939
1940
0
    free(old_log.refname);
1941
1942
    /*
1943
     * Copy over the old reflog entry with the new refname.
1944
     */
1945
0
    ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1946
0
    logs[logs_nr] = old_log;
1947
0
    logs[logs_nr].refname = xstrdup(arg->newname);
1948
0
    logs_nr++;
1949
1950
    /*
1951
     * Delete the old reflog entry in case we are renaming.
1952
     */
1953
0
    if (arg->delete_old) {
1954
0
      ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1955
0
      memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1956
0
      logs[logs_nr].refname = xstrdup(arg->oldname);
1957
0
      logs[logs_nr].value_type = REFTABLE_LOG_DELETION;
1958
0
      logs[logs_nr].update_index = old_log.update_index;
1959
0
      logs_nr++;
1960
0
    }
1961
1962
    /*
1963
     * Transfer ownership of the log record we're iterating over to
1964
     * the array of log records. Otherwise, the pointers would get
1965
     * free'd or reallocated by the iterator.
1966
     */
1967
0
    memset(&old_log, 0, sizeof(old_log));
1968
0
  }
1969
1970
0
  ret = reftable_writer_add_logs(writer, logs, logs_nr);
1971
0
  if (ret < 0)
1972
0
    goto done;
1973
1974
0
done:
1975
0
  assert(ret != REFTABLE_API_ERROR);
1976
0
  reftable_iterator_destroy(&it);
1977
0
  string_list_clear(&skip, 0);
1978
0
  strbuf_release(&errbuf);
1979
0
  for (i = 0; i < logs_nr; i++)
1980
0
    reftable_log_record_release(&logs[i]);
1981
0
  free(logs);
1982
0
  for (i = 0; i < ARRAY_SIZE(refs); i++)
1983
0
    reftable_ref_record_release(&refs[i]);
1984
0
  reftable_ref_record_release(&old_ref);
1985
0
  reftable_log_record_release(&old_log);
1986
0
  return ret;
1987
0
}
1988
1989
static int reftable_be_rename_ref(struct ref_store *ref_store,
1990
          const char *oldrefname,
1991
          const char *newrefname,
1992
          const char *logmsg)
1993
0
{
1994
0
  struct reftable_ref_store *refs =
1995
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
1996
0
  struct write_copy_arg arg = {
1997
0
    .refs = refs,
1998
0
    .oldname = oldrefname,
1999
0
    .newname = newrefname,
2000
0
    .logmsg = logmsg,
2001
0
    .delete_old = 1,
2002
0
  };
2003
0
  int ret;
2004
2005
0
  ret = refs->err;
2006
0
  if (ret < 0)
2007
0
    goto done;
2008
2009
0
  ret = backend_for(&arg.be, refs, newrefname, &newrefname, 1);
2010
0
  if (ret)
2011
0
    goto done;
2012
0
  ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg,
2013
0
         REFTABLE_STACK_NEW_ADDITION_RELOAD);
2014
2015
0
done:
2016
0
  assert(ret != REFTABLE_API_ERROR);
2017
0
  return ret;
2018
0
}
2019
2020
static int reftable_be_copy_ref(struct ref_store *ref_store,
2021
        const char *oldrefname,
2022
        const char *newrefname,
2023
        const char *logmsg)
2024
0
{
2025
0
  struct reftable_ref_store *refs =
2026
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref");
2027
0
  struct write_copy_arg arg = {
2028
0
    .refs = refs,
2029
0
    .oldname = oldrefname,
2030
0
    .newname = newrefname,
2031
0
    .logmsg = logmsg,
2032
0
  };
2033
0
  int ret;
2034
2035
0
  ret = refs->err;
2036
0
  if (ret < 0)
2037
0
    goto done;
2038
2039
0
  ret = backend_for(&arg.be, refs, newrefname, &newrefname, 1);
2040
0
  if (ret)
2041
0
    goto done;
2042
0
  ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg,
2043
0
         REFTABLE_STACK_NEW_ADDITION_RELOAD);
2044
2045
0
done:
2046
0
  assert(ret != REFTABLE_API_ERROR);
2047
0
  return ret;
2048
0
}
2049
2050
struct reftable_reflog_iterator {
2051
  struct ref_iterator base;
2052
  struct reftable_ref_store *refs;
2053
  struct reftable_iterator iter;
2054
  struct reftable_log_record log;
2055
  struct strbuf last_name;
2056
  int err;
2057
};
2058
2059
static int reftable_reflog_iterator_advance(struct ref_iterator *ref_iterator)
2060
0
{
2061
0
  struct reftable_reflog_iterator *iter =
2062
0
    (struct reftable_reflog_iterator *)ref_iterator;
2063
2064
0
  while (!iter->err) {
2065
0
    iter->err = reftable_iterator_next_log(&iter->iter, &iter->log);
2066
0
    if (iter->err)
2067
0
      break;
2068
2069
    /*
2070
     * We want the refnames that we have reflogs for, so we skip if
2071
     * we've already produced this name. This could be faster by
2072
     * seeking directly to reflog@update_index==0.
2073
     */
2074
0
    if (!strcmp(iter->log.refname, iter->last_name.buf))
2075
0
      continue;
2076
2077
0
    if (check_refname_format(iter->log.refname,
2078
0
           REFNAME_ALLOW_ONELEVEL))
2079
0
      continue;
2080
2081
0
    strbuf_reset(&iter->last_name);
2082
0
    strbuf_addstr(&iter->last_name, iter->log.refname);
2083
0
    iter->base.ref.name = iter->log.refname;
2084
2085
0
    break;
2086
0
  }
2087
2088
0
  if (iter->err > 0)
2089
0
    return ITER_DONE;
2090
0
  if (iter->err < 0)
2091
0
    return ITER_ERROR;
2092
0
  return ITER_OK;
2093
0
}
2094
2095
static int reftable_reflog_iterator_seek(struct ref_iterator *ref_iterator UNUSED,
2096
           const char *refname UNUSED,
2097
           unsigned int flags UNUSED)
2098
0
{
2099
0
  BUG("reftable reflog iterator cannot be seeked");
2100
0
  return -1;
2101
0
}
2102
2103
static void reftable_reflog_iterator_release(struct ref_iterator *ref_iterator)
2104
0
{
2105
0
  struct reftable_reflog_iterator *iter =
2106
0
    (struct reftable_reflog_iterator *)ref_iterator;
2107
0
  reftable_log_record_release(&iter->log);
2108
0
  reftable_iterator_destroy(&iter->iter);
2109
0
  strbuf_release(&iter->last_name);
2110
0
}
2111
2112
static struct ref_iterator_vtable reftable_reflog_iterator_vtable = {
2113
  .advance = reftable_reflog_iterator_advance,
2114
  .seek = reftable_reflog_iterator_seek,
2115
  .release = reftable_reflog_iterator_release,
2116
};
2117
2118
static struct reftable_reflog_iterator *reflog_iterator_for_stack(struct reftable_ref_store *refs,
2119
                  struct reftable_stack *stack)
2120
0
{
2121
0
  struct reftable_reflog_iterator *iter;
2122
0
  int ret;
2123
2124
0
  iter = xcalloc(1, sizeof(*iter));
2125
0
  base_ref_iterator_init(&iter->base, &reftable_reflog_iterator_vtable);
2126
0
  strbuf_init(&iter->last_name, 0);
2127
0
  iter->refs = refs;
2128
2129
0
  ret = refs->err;
2130
0
  if (ret)
2131
0
    goto done;
2132
2133
0
  ret = reftable_stack_reload(stack);
2134
0
  if (ret < 0)
2135
0
    goto done;
2136
2137
0
  ret = reftable_stack_init_log_iterator(stack, &iter->iter);
2138
0
  if (ret < 0)
2139
0
    goto done;
2140
2141
0
  ret = reftable_iterator_seek_log(&iter->iter, "");
2142
0
  if (ret < 0)
2143
0
    goto done;
2144
2145
0
done:
2146
0
  iter->err = ret;
2147
0
  return iter;
2148
0
}
2149
2150
static struct ref_iterator *reftable_be_reflog_iterator_begin(struct ref_store *ref_store)
2151
0
{
2152
0
  struct reftable_ref_store *refs =
2153
0
    reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_iterator_begin");
2154
0
  struct reftable_reflog_iterator *main_iter, *worktree_iter;
2155
2156
0
  main_iter = reflog_iterator_for_stack(refs, refs->main_backend.stack);
2157
0
  if (!refs->worktree_backend.stack)
2158
0
    return &main_iter->base;
2159
2160
0
  worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_backend.stack);
2161
2162
0
  return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
2163
0
          ref_iterator_select, NULL);
2164
0
}
2165
2166
static int yield_log_record(struct reftable_ref_store *refs,
2167
          struct reftable_log_record *log,
2168
          each_reflog_ent_fn fn,
2169
          void *cb_data)
2170
0
{
2171
0
  struct object_id old_oid, new_oid;
2172
0
  const char *full_committer;
2173
2174
0
  oidread(&old_oid, log->value.update.old_hash, refs->base.repo->hash_algo);
2175
0
  oidread(&new_oid, log->value.update.new_hash, refs->base.repo->hash_algo);
2176
2177
  /*
2178
   * When both the old object ID and the new object ID are null
2179
   * then this is the reflog existence marker. The caller must
2180
   * not be aware of it.
2181
   */
2182
0
  if (is_null_oid(&old_oid) && is_null_oid(&new_oid))
2183
0
    return 0;
2184
2185
0
  full_committer = fmt_ident(log->value.update.name, log->value.update.email,
2186
0
           WANT_COMMITTER_IDENT, NULL, IDENT_NO_DATE);
2187
0
  return fn(log->refname, &old_oid, &new_oid, full_committer,
2188
0
      log->value.update.time, log->value.update.tz_offset,
2189
0
      log->value.update.message, cb_data);
2190
0
}
2191
2192
static int reftable_be_for_each_reflog_ent_reverse(struct ref_store *ref_store,
2193
               const char *refname,
2194
               each_reflog_ent_fn fn,
2195
               void *cb_data)
2196
0
{
2197
0
  struct reftable_ref_store *refs =
2198
0
    reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent_reverse");
2199
0
  struct reftable_log_record log = {0};
2200
0
  struct reftable_iterator it = {0};
2201
0
  struct reftable_backend *be;
2202
0
  int ret;
2203
2204
0
  if (refs->err < 0)
2205
0
    return refs->err;
2206
2207
  /*
2208
   * TODO: we should adapt this callsite to reload the stack. There is no
2209
   * obvious reason why we shouldn't.
2210
   */
2211
0
  ret = backend_for(&be, refs, refname, &refname, 0);
2212
0
  if (ret)
2213
0
    goto done;
2214
2215
0
  ret = reftable_stack_init_log_iterator(be->stack, &it);
2216
0
  if (ret < 0)
2217
0
    goto done;
2218
2219
0
  ret = reftable_iterator_seek_log(&it, refname);
2220
0
  while (!ret) {
2221
0
    ret = reftable_iterator_next_log(&it, &log);
2222
0
    if (ret < 0)
2223
0
      break;
2224
0
    if (ret > 0 || strcmp(log.refname, refname)) {
2225
0
      ret = 0;
2226
0
      break;
2227
0
    }
2228
2229
0
    ret = yield_log_record(refs, &log, fn, cb_data);
2230
0
    if (ret)
2231
0
      break;
2232
0
  }
2233
2234
0
done:
2235
0
  reftable_log_record_release(&log);
2236
0
  reftable_iterator_destroy(&it);
2237
0
  return ret;
2238
0
}
2239
2240
static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store,
2241
             const char *refname,
2242
             each_reflog_ent_fn fn,
2243
             void *cb_data)
2244
0
{
2245
0
  struct reftable_ref_store *refs =
2246
0
    reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent");
2247
0
  struct reftable_log_record *logs = NULL;
2248
0
  struct reftable_iterator it = {0};
2249
0
  struct reftable_backend *be;
2250
0
  size_t logs_alloc = 0, logs_nr = 0, i;
2251
0
  int ret;
2252
2253
0
  if (refs->err < 0)
2254
0
    return refs->err;
2255
2256
  /*
2257
   * TODO: we should adapt this callsite to reload the stack. There is no
2258
   * obvious reason why we shouldn't.
2259
   */
2260
0
  ret = backend_for(&be, refs, refname, &refname, 0);
2261
0
  if (ret)
2262
0
    goto done;
2263
2264
0
  ret = reftable_stack_init_log_iterator(be->stack, &it);
2265
0
  if (ret < 0)
2266
0
    goto done;
2267
2268
0
  ret = reftable_iterator_seek_log(&it, refname);
2269
0
  while (!ret) {
2270
0
    struct reftable_log_record log = {0};
2271
2272
0
    ret = reftable_iterator_next_log(&it, &log);
2273
0
    if (ret < 0)
2274
0
      goto done;
2275
0
    if (ret > 0 || strcmp(log.refname, refname)) {
2276
0
      reftable_log_record_release(&log);
2277
0
      ret = 0;
2278
0
      break;
2279
0
    }
2280
2281
0
    ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
2282
0
    logs[logs_nr++] = log;
2283
0
  }
2284
2285
0
  for (i = logs_nr; i--;) {
2286
0
    ret = yield_log_record(refs, &logs[i], fn, cb_data);
2287
0
    if (ret)
2288
0
      goto done;
2289
0
  }
2290
2291
0
done:
2292
0
  reftable_iterator_destroy(&it);
2293
0
  for (i = 0; i < logs_nr; i++)
2294
0
    reftable_log_record_release(&logs[i]);
2295
0
  free(logs);
2296
0
  return ret;
2297
0
}
2298
2299
static int reftable_be_reflog_exists(struct ref_store *ref_store,
2300
             const char *refname)
2301
0
{
2302
0
  struct reftable_ref_store *refs =
2303
0
    reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists");
2304
0
  struct reftable_log_record log = {0};
2305
0
  struct reftable_iterator it = {0};
2306
0
  struct reftable_backend *be;
2307
0
  int ret;
2308
2309
0
  ret = refs->err;
2310
0
  if (ret < 0)
2311
0
    goto done;
2312
2313
0
  ret = backend_for(&be, refs, refname, &refname, 1);
2314
0
  if (ret < 0)
2315
0
    goto done;
2316
2317
0
  ret = reftable_stack_init_log_iterator(be->stack, &it);
2318
0
  if (ret < 0)
2319
0
    goto done;
2320
2321
0
  ret = reftable_iterator_seek_log(&it, refname);
2322
0
  if (ret < 0)
2323
0
    goto done;
2324
2325
  /*
2326
   * Check whether we get at least one log record for the given ref name.
2327
   * If so, the reflog exists, otherwise it doesn't.
2328
   */
2329
0
  ret = reftable_iterator_next_log(&it, &log);
2330
0
  if (ret < 0)
2331
0
    goto done;
2332
0
  if (ret > 0) {
2333
0
    ret = 0;
2334
0
    goto done;
2335
0
  }
2336
2337
0
  ret = strcmp(log.refname, refname) == 0;
2338
2339
0
done:
2340
0
  reftable_iterator_destroy(&it);
2341
0
  reftable_log_record_release(&log);
2342
0
  if (ret < 0)
2343
0
    ret = 0;
2344
0
  return ret;
2345
0
}
2346
2347
struct write_reflog_existence_arg {
2348
  struct reftable_ref_store *refs;
2349
  const char *refname;
2350
  struct reftable_stack *stack;
2351
};
2352
2353
static int write_reflog_existence_table(struct reftable_writer *writer,
2354
          void *cb_data)
2355
0
{
2356
0
  struct write_reflog_existence_arg *arg = cb_data;
2357
0
  uint64_t ts = reftable_stack_next_update_index(arg->stack);
2358
0
  struct reftable_log_record log = {0};
2359
0
  int ret;
2360
2361
0
  ret = reftable_stack_read_log(arg->stack, arg->refname, &log);
2362
0
  if (ret <= 0)
2363
0
    goto done;
2364
2365
0
  ret = reftable_writer_set_limits(writer, ts, ts);
2366
0
  if (ret < 0)
2367
0
    goto done;
2368
2369
  /*
2370
   * The existence entry has both old and new object ID set to the
2371
   * null object ID. Our iterators are aware of this and will not present
2372
   * them to their callers.
2373
   */
2374
0
  log.refname = xstrdup(arg->refname);
2375
0
  log.update_index = ts;
2376
0
  log.value_type = REFTABLE_LOG_UPDATE;
2377
0
  ret = reftable_writer_add_log(writer, &log);
2378
2379
0
done:
2380
0
  assert(ret != REFTABLE_API_ERROR);
2381
0
  reftable_log_record_release(&log);
2382
0
  return ret;
2383
0
}
2384
2385
static int reftable_be_create_reflog(struct ref_store *ref_store,
2386
             const char *refname,
2387
             struct strbuf *errmsg UNUSED)
2388
0
{
2389
0
  struct reftable_ref_store *refs =
2390
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
2391
0
  struct reftable_backend *be;
2392
0
  struct write_reflog_existence_arg arg = {
2393
0
    .refs = refs,
2394
0
    .refname = refname,
2395
0
  };
2396
0
  int ret;
2397
2398
0
  ret = refs->err;
2399
0
  if (ret < 0)
2400
0
    goto done;
2401
2402
0
  ret = backend_for(&be, refs, refname, &refname, 1);
2403
0
  if (ret)
2404
0
    goto done;
2405
0
  arg.stack = be->stack;
2406
2407
0
  ret = reftable_stack_add(be->stack, &write_reflog_existence_table, &arg,
2408
0
         REFTABLE_STACK_NEW_ADDITION_RELOAD);
2409
2410
0
done:
2411
0
  return ret;
2412
0
}
2413
2414
struct write_reflog_delete_arg {
2415
  struct reftable_stack *stack;
2416
  const char *refname;
2417
};
2418
2419
static int write_reflog_delete_table(struct reftable_writer *writer, void *cb_data)
2420
0
{
2421
0
  struct write_reflog_delete_arg *arg = cb_data;
2422
0
  struct reftable_log_record log = {0}, tombstone = {0};
2423
0
  struct reftable_iterator it = {0};
2424
0
  uint64_t ts = reftable_stack_next_update_index(arg->stack);
2425
0
  int ret;
2426
2427
0
  ret = reftable_writer_set_limits(writer, ts, ts);
2428
0
  if (ret < 0)
2429
0
    goto out;
2430
2431
0
  ret = reftable_stack_init_log_iterator(arg->stack, &it);
2432
0
  if (ret < 0)
2433
0
    goto out;
2434
2435
  /*
2436
   * In order to delete a table we need to delete all reflog entries one
2437
   * by one. This is inefficient, but the reftable format does not have a
2438
   * better marker right now.
2439
   */
2440
0
  ret = reftable_iterator_seek_log(&it, arg->refname);
2441
0
  while (ret == 0) {
2442
0
    ret = reftable_iterator_next_log(&it, &log);
2443
0
    if (ret < 0)
2444
0
      break;
2445
0
    if (ret > 0 || strcmp(log.refname, arg->refname)) {
2446
0
      ret = 0;
2447
0
      break;
2448
0
    }
2449
2450
0
    tombstone.refname = (char *)arg->refname;
2451
0
    tombstone.value_type = REFTABLE_LOG_DELETION;
2452
0
    tombstone.update_index = log.update_index;
2453
2454
0
    ret = reftable_writer_add_log(writer, &tombstone);
2455
0
  }
2456
2457
0
out:
2458
0
  reftable_log_record_release(&log);
2459
0
  reftable_iterator_destroy(&it);
2460
0
  return ret;
2461
0
}
2462
2463
static int reftable_be_delete_reflog(struct ref_store *ref_store,
2464
             const char *refname)
2465
0
{
2466
0
  struct reftable_ref_store *refs =
2467
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
2468
0
  struct reftable_backend *be;
2469
0
  struct write_reflog_delete_arg arg = {
2470
0
    .refname = refname,
2471
0
  };
2472
0
  int ret;
2473
2474
0
  ret = backend_for(&be, refs, refname, &refname, 1);
2475
0
  if (ret)
2476
0
    return ret;
2477
0
  arg.stack = be->stack;
2478
2479
0
  ret = reftable_stack_add(be->stack, &write_reflog_delete_table, &arg,
2480
0
         REFTABLE_STACK_NEW_ADDITION_RELOAD);
2481
2482
0
  assert(ret != REFTABLE_API_ERROR);
2483
0
  return ret;
2484
0
}
2485
2486
struct reflog_expiry_arg {
2487
  struct reftable_ref_store *refs;
2488
  struct reftable_stack *stack;
2489
  struct reftable_log_record *records;
2490
  struct object_id update_oid;
2491
  const char *refname;
2492
  size_t len;
2493
};
2494
2495
static int write_reflog_expiry_table(struct reftable_writer *writer, void *cb_data)
2496
0
{
2497
0
  struct reflog_expiry_arg *arg = cb_data;
2498
0
  uint64_t ts = reftable_stack_next_update_index(arg->stack);
2499
0
  uint64_t live_records = 0;
2500
0
  size_t i;
2501
0
  int ret;
2502
2503
0
  for (i = 0; i < arg->len; i++)
2504
0
    if (arg->records[i].value_type == REFTABLE_LOG_UPDATE)
2505
0
      live_records++;
2506
2507
0
  ret = reftable_writer_set_limits(writer, ts, ts);
2508
0
  if (ret < 0)
2509
0
    return ret;
2510
2511
0
  if (!is_null_oid(&arg->update_oid)) {
2512
0
    struct reftable_ref_record ref = {0};
2513
0
    struct object_id peeled;
2514
2515
0
    ref.refname = (char *)arg->refname;
2516
0
    ref.update_index = ts;
2517
2518
0
    if (!peel_object(arg->refs->base.repo, &arg->update_oid, &peeled, 0)) {
2519
0
      ref.value_type = REFTABLE_REF_VAL2;
2520
0
      memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
2521
0
      memcpy(ref.value.val2.value, arg->update_oid.hash, GIT_MAX_RAWSZ);
2522
0
    } else {
2523
0
      ref.value_type = REFTABLE_REF_VAL1;
2524
0
      memcpy(ref.value.val1, arg->update_oid.hash, GIT_MAX_RAWSZ);
2525
0
    }
2526
2527
0
    ret = reftable_writer_add_ref(writer, &ref);
2528
0
    if (ret < 0)
2529
0
      return ret;
2530
0
  }
2531
2532
  /*
2533
   * When there are no more entries left in the reflog we empty it
2534
   * completely, but write a placeholder reflog entry that indicates that
2535
   * the reflog still exists.
2536
   */
2537
0
  if (!live_records) {
2538
0
    struct reftable_log_record log = {
2539
0
      .refname = (char *)arg->refname,
2540
0
      .value_type = REFTABLE_LOG_UPDATE,
2541
0
      .update_index = ts,
2542
0
    };
2543
2544
0
    ret = reftable_writer_add_log(writer, &log);
2545
0
    if (ret)
2546
0
      return ret;
2547
0
  }
2548
2549
0
  for (i = 0; i < arg->len; i++) {
2550
0
    ret = reftable_writer_add_log(writer, &arg->records[i]);
2551
0
    if (ret)
2552
0
      return ret;
2553
0
  }
2554
2555
0
  return 0;
2556
0
}
2557
2558
static int reftable_be_reflog_expire(struct ref_store *ref_store,
2559
             const char *refname,
2560
             unsigned int flags,
2561
             reflog_expiry_prepare_fn prepare_fn,
2562
             reflog_expiry_should_prune_fn should_prune_fn,
2563
             reflog_expiry_cleanup_fn cleanup_fn,
2564
             void *policy_cb_data)
2565
0
{
2566
  /*
2567
   * For log expiry, we write tombstones for every single reflog entry
2568
   * that is to be expired. This means that the entries are still
2569
   * retrievable by delving into the stack, and expiring entries
2570
   * paradoxically takes extra memory. This memory is only reclaimed when
2571
   * compacting the reftable stack.
2572
   *
2573
   * It would be better if the refs backend supported an API that sets a
2574
   * criterion for all refs, passing the criterion to pack_refs().
2575
   *
2576
   * On the plus side, because we do the expiration per ref, we can easily
2577
   * insert the reflog existence dummies.
2578
   */
2579
0
  struct reftable_ref_store *refs =
2580
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
2581
0
  struct reftable_log_record *logs = NULL;
2582
0
  struct reftable_log_record *rewritten = NULL;
2583
0
  struct reftable_iterator it = {0};
2584
0
  struct reftable_addition *add = NULL;
2585
0
  struct reflog_expiry_arg arg = {0};
2586
0
  struct reftable_backend *be;
2587
0
  struct object_id oid = {0};
2588
0
  struct strbuf referent = STRBUF_INIT;
2589
0
  uint8_t *last_hash = NULL;
2590
0
  size_t logs_nr = 0, logs_alloc = 0, i;
2591
0
  unsigned int type = 0;
2592
0
  int ret;
2593
2594
0
  if (refs->err < 0)
2595
0
    return refs->err;
2596
2597
0
  ret = backend_for(&be, refs, refname, &refname, 1);
2598
0
  if (ret < 0)
2599
0
    goto done;
2600
2601
0
  ret = reftable_stack_new_addition(&add, be->stack,
2602
0
            REFTABLE_STACK_NEW_ADDITION_RELOAD);
2603
0
  if (ret < 0)
2604
0
    goto done;
2605
2606
0
  ret = reftable_stack_init_log_iterator(be->stack, &it);
2607
0
  if (ret < 0)
2608
0
    goto done;
2609
2610
0
  ret = reftable_iterator_seek_log(&it, refname);
2611
0
  if (ret < 0)
2612
0
    goto done;
2613
2614
0
  ret = reftable_backend_read_ref(be, refname, &oid, &referent, &type);
2615
0
  if (ret < 0)
2616
0
    goto done;
2617
0
  prepare_fn(refname, &oid, policy_cb_data);
2618
2619
0
  while (1) {
2620
0
    struct reftable_log_record log = {0};
2621
0
    struct object_id old_oid, new_oid;
2622
2623
0
    ret = reftable_iterator_next_log(&it, &log);
2624
0
    if (ret < 0)
2625
0
      goto done;
2626
0
    if (ret > 0 || strcmp(log.refname, refname)) {
2627
0
      reftable_log_record_release(&log);
2628
0
      break;
2629
0
    }
2630
2631
0
    oidread(&old_oid, log.value.update.old_hash,
2632
0
      ref_store->repo->hash_algo);
2633
0
    oidread(&new_oid, log.value.update.new_hash,
2634
0
      ref_store->repo->hash_algo);
2635
2636
    /*
2637
     * Skip over the reflog existence marker. We will add it back
2638
     * in when there are no live reflog records.
2639
     */
2640
0
    if (is_null_oid(&old_oid) && is_null_oid(&new_oid)) {
2641
0
      reftable_log_record_release(&log);
2642
0
      continue;
2643
0
    }
2644
2645
0
    ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
2646
0
    logs[logs_nr++] = log;
2647
0
  }
2648
2649
  /*
2650
   * We need to rewrite all reflog entries according to the pruning
2651
   * callback function:
2652
   *
2653
   *   - If a reflog entry shall be pruned we mark the record for
2654
   *     deletion.
2655
   *
2656
   *   - Otherwise we may have to rewrite the chain of reflog entries so
2657
   *     that gaps created by just-deleted records get backfilled.
2658
   */
2659
0
  CALLOC_ARRAY(rewritten, logs_nr);
2660
0
  for (i = logs_nr; i--;) {
2661
0
    struct reftable_log_record *dest = &rewritten[i];
2662
0
    struct object_id old_oid, new_oid;
2663
2664
0
    *dest = logs[i];
2665
0
    oidread(&old_oid, logs[i].value.update.old_hash,
2666
0
      ref_store->repo->hash_algo);
2667
0
    oidread(&new_oid, logs[i].value.update.new_hash,
2668
0
      ref_store->repo->hash_algo);
2669
2670
0
    if (should_prune_fn(&old_oid, &new_oid, logs[i].value.update.email,
2671
0
            (timestamp_t)logs[i].value.update.time,
2672
0
            logs[i].value.update.tz_offset,
2673
0
            logs[i].value.update.message,
2674
0
            policy_cb_data)) {
2675
0
      dest->value_type = REFTABLE_LOG_DELETION;
2676
0
    } else {
2677
0
      if ((flags & EXPIRE_REFLOGS_REWRITE) && last_hash)
2678
0
        memcpy(dest->value.update.old_hash, last_hash, GIT_MAX_RAWSZ);
2679
0
      last_hash = logs[i].value.update.new_hash;
2680
0
    }
2681
0
  }
2682
2683
0
  if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash && !is_null_oid(&oid))
2684
0
    oidread(&arg.update_oid, last_hash, ref_store->repo->hash_algo);
2685
2686
0
  arg.refs = refs;
2687
0
  arg.records = rewritten;
2688
0
  arg.len = logs_nr;
2689
0
  arg.stack = be->stack;
2690
0
  arg.refname = refname;
2691
2692
0
  ret = reftable_addition_add(add, &write_reflog_expiry_table, &arg);
2693
0
  if (ret < 0)
2694
0
    goto done;
2695
2696
  /*
2697
   * Future improvement: we could skip writing records that were
2698
   * not changed.
2699
   */
2700
0
  if (!(flags & EXPIRE_REFLOGS_DRY_RUN))
2701
0
    ret = reftable_addition_commit(add);
2702
2703
0
done:
2704
0
  if (add)
2705
0
    cleanup_fn(policy_cb_data);
2706
0
  assert(ret != REFTABLE_API_ERROR);
2707
2708
0
  reftable_iterator_destroy(&it);
2709
0
  reftable_addition_destroy(add);
2710
0
  for (i = 0; i < logs_nr; i++)
2711
0
    reftable_log_record_release(&logs[i]);
2712
0
  strbuf_release(&referent);
2713
0
  free(logs);
2714
0
  free(rewritten);
2715
0
  return ret;
2716
0
}
2717
2718
static void reftable_fsck_verbose_handler(const char *msg, void *cb_data)
2719
0
{
2720
0
  struct fsck_options *o = cb_data;
2721
2722
0
  if (o->verbose)
2723
0
    fprintf_ln(stderr, "%s", msg);
2724
0
}
2725
2726
static const enum fsck_msg_id fsck_msg_id_map[] = {
2727
  [REFTABLE_FSCK_ERROR_TABLE_NAME] = FSCK_MSG_BAD_REFTABLE_TABLE_NAME,
2728
};
2729
2730
static int reftable_fsck_error_handler(struct reftable_fsck_info *info,
2731
               void *cb_data)
2732
0
{
2733
0
  struct fsck_ref_report report = { .path = info->path };
2734
0
  struct fsck_options *o = cb_data;
2735
0
  enum fsck_msg_id msg_id;
2736
2737
0
  if (info->error < 0 || info->error >= REFTABLE_FSCK_MAX_VALUE)
2738
0
    BUG("unknown fsck error: %d", (int)info->error);
2739
2740
0
  msg_id = fsck_msg_id_map[info->error];
2741
2742
0
  if (!msg_id)
2743
0
    BUG("fsck_msg_id value missing for reftable error: %d", (int)info->error);
2744
2745
0
  return fsck_report_ref(o, &report, msg_id, "%s", info->msg);
2746
0
}
2747
2748
static int reftable_be_fsck(struct ref_store *ref_store, struct fsck_options *o,
2749
          struct worktree *wt UNUSED)
2750
0
{
2751
0
  struct reftable_ref_store *refs;
2752
0
  struct strmap_entry *entry;
2753
0
  struct hashmap_iter iter;
2754
0
  int ret = 0;
2755
2756
0
  refs = reftable_be_downcast(ref_store, REF_STORE_READ, "fsck");
2757
2758
0
  ret |= reftable_fsck_check(refs->main_backend.stack, reftable_fsck_error_handler,
2759
0
           reftable_fsck_verbose_handler, o);
2760
2761
0
  strmap_for_each_entry(&refs->worktree_backends, &iter, entry) {
2762
0
    struct reftable_backend *b = (struct reftable_backend *)entry->value;
2763
0
    ret |= reftable_fsck_check(b->stack, reftable_fsck_error_handler,
2764
0
             reftable_fsck_verbose_handler, o);
2765
0
  }
2766
2767
0
  return ret;
2768
0
}
2769
2770
struct ref_storage_be refs_be_reftable = {
2771
  .name = "reftable",
2772
  .init = reftable_be_init,
2773
  .release = reftable_be_release,
2774
  .create_on_disk = reftable_be_create_on_disk,
2775
  .remove_on_disk = reftable_be_remove_on_disk,
2776
2777
  .transaction_prepare = reftable_be_transaction_prepare,
2778
  .transaction_finish = reftable_be_transaction_finish,
2779
  .transaction_abort = reftable_be_transaction_abort,
2780
2781
  .optimize = reftable_be_optimize,
2782
  .optimize_required = reftable_be_optimize_required,
2783
2784
  .rename_ref = reftable_be_rename_ref,
2785
  .copy_ref = reftable_be_copy_ref,
2786
2787
  .iterator_begin = reftable_be_iterator_begin,
2788
  .read_raw_ref = reftable_be_read_raw_ref,
2789
  .read_symbolic_ref = reftable_be_read_symbolic_ref,
2790
2791
  .reflog_iterator_begin = reftable_be_reflog_iterator_begin,
2792
  .for_each_reflog_ent = reftable_be_for_each_reflog_ent,
2793
  .for_each_reflog_ent_reverse = reftable_be_for_each_reflog_ent_reverse,
2794
  .reflog_exists = reftable_be_reflog_exists,
2795
  .create_reflog = reftable_be_create_reflog,
2796
  .delete_reflog = reftable_be_delete_reflog,
2797
  .reflog_expire = reftable_be_reflog_expire,
2798
2799
  .fsck = reftable_be_fsck,
2800
};