Coverage Report

Created: 2024-09-16 06:12

/src/git/refs/reftable-backend.c
Line
Count
Source (jump to first uncovered line)
1
#define USE_THE_REPOSITORY_VARIABLE
2
3
#include "../git-compat-util.h"
4
#include "../abspath.h"
5
#include "../chdir-notify.h"
6
#include "../config.h"
7
#include "../dir.h"
8
#include "../environment.h"
9
#include "../gettext.h"
10
#include "../hash.h"
11
#include "../hex.h"
12
#include "../iterator.h"
13
#include "../ident.h"
14
#include "../lockfile.h"
15
#include "../object.h"
16
#include "../path.h"
17
#include "../refs.h"
18
#include "../reftable/reftable-stack.h"
19
#include "../reftable/reftable-record.h"
20
#include "../reftable/reftable-error.h"
21
#include "../reftable/reftable-iterator.h"
22
#include "../setup.h"
23
#include "../strmap.h"
24
#include "parse.h"
25
#include "refs-internal.h"
26
27
/*
28
 * Used as a flag in ref_update::flags when the ref_update was via an
29
 * update to HEAD.
30
 */
31
0
#define REF_UPDATE_VIA_HEAD (1 << 8)
32
33
struct reftable_ref_store {
34
  struct ref_store base;
35
36
  /*
37
   * The main stack refers to the common dir and thus contains common
38
   * refs as well as refs of the main repository.
39
   */
40
  struct reftable_stack *main_stack;
41
  /*
42
   * The worktree stack refers to the gitdir in case the refdb is opened
43
   * via a worktree. It thus contains the per-worktree refs.
44
   */
45
  struct reftable_stack *worktree_stack;
46
  /*
47
   * Map of worktree stacks by their respective worktree names. The map
48
   * is populated lazily when we try to resolve `worktrees/$worktree` refs.
49
   */
50
  struct strmap worktree_stacks;
51
  struct reftable_write_options write_options;
52
53
  unsigned int store_flags;
54
  int err;
55
};
56
57
/*
58
 * Downcast ref_store to reftable_ref_store. Die if ref_store is not a
59
 * reftable_ref_store. required_flags is compared with ref_store's store_flags
60
 * to ensure the ref_store has all required capabilities. "caller" is used in
61
 * any necessary error messages.
62
 */
63
static struct reftable_ref_store *reftable_be_downcast(struct ref_store *ref_store,
64
                   unsigned int required_flags,
65
                   const char *caller)
66
0
{
67
0
  struct reftable_ref_store *refs;
68
69
0
  if (ref_store->be != &refs_be_reftable)
70
0
    BUG("ref_store is type \"%s\" not \"reftables\" in %s",
71
0
        ref_store->be->name, caller);
72
73
0
  refs = (struct reftable_ref_store *)ref_store;
74
75
0
  if ((refs->store_flags & required_flags) != required_flags)
76
0
    BUG("operation %s requires abilities 0x%x, but only have 0x%x",
77
0
        caller, required_flags, refs->store_flags);
78
79
0
  return refs;
80
0
}
81
82
/*
83
 * Some refs are global to the repository (refs/heads/{*}), while others are
84
 * local to the worktree (eg. HEAD, refs/bisect/{*}). We solve this by having
85
 * multiple separate databases (ie. multiple reftable/ directories), one for
86
 * the shared refs, one for the current worktree refs, and one for each
87
 * additional worktree. For reading, we merge the view of both the shared and
88
 * the current worktree's refs, when necessary.
89
 *
90
 * This function also optionally assigns the rewritten reference name that is
91
 * local to the stack. This translation is required when using worktree refs
92
 * like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
93
 * those references in their normalized form.
94
 */
95
static struct reftable_stack *stack_for(struct reftable_ref_store *store,
96
          const char *refname,
97
          const char **rewritten_ref)
98
0
{
99
0
  const char *wtname;
100
0
  int wtname_len;
101
102
0
  if (!refname)
103
0
    return store->main_stack;
104
105
0
  switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) {
106
0
  case REF_WORKTREE_OTHER: {
107
0
    static struct strbuf wtname_buf = STRBUF_INIT;
108
0
    struct strbuf wt_dir = STRBUF_INIT;
109
0
    struct reftable_stack *stack;
110
111
    /*
112
     * We're using a static buffer here so that we don't need to
113
     * allocate the worktree name whenever we look up a reference.
114
     * This could be avoided if the strmap interface knew how to
115
     * handle keys with a length.
116
     */
117
0
    strbuf_reset(&wtname_buf);
118
0
    strbuf_add(&wtname_buf, wtname, wtname_len);
119
120
    /*
121
     * There is an edge case here: when the worktree references the
122
     * current worktree, then we set up the stack once via
123
     * `worktree_stacks` and once via `worktree_stack`. This is
124
     * wasteful, but in the reading case it shouldn't matter. And
125
     * in the writing case we would notice that the stack is locked
126
     * already and error out when trying to write a reference via
127
     * both stacks.
128
     */
129
0
    stack = strmap_get(&store->worktree_stacks, wtname_buf.buf);
130
0
    if (!stack) {
131
0
      strbuf_addf(&wt_dir, "%s/worktrees/%s/reftable",
132
0
            store->base.repo->commondir, wtname_buf.buf);
133
134
0
      store->err = reftable_new_stack(&stack, wt_dir.buf,
135
0
              &store->write_options);
136
0
      assert(store->err != REFTABLE_API_ERROR);
137
0
      strmap_put(&store->worktree_stacks, wtname_buf.buf, stack);
138
0
    }
139
140
0
    strbuf_release(&wt_dir);
141
0
    return stack;
142
0
  }
143
0
  case REF_WORKTREE_CURRENT:
144
    /*
145
     * If there is no worktree stack then we're currently in the
146
     * main worktree. We thus return the main stack in that case.
147
     */
148
0
    if (!store->worktree_stack)
149
0
      return store->main_stack;
150
0
    return store->worktree_stack;
151
0
  case REF_WORKTREE_MAIN:
152
0
  case REF_WORKTREE_SHARED:
153
0
    return store->main_stack;
154
0
  default:
155
0
    BUG("unhandled worktree reference type");
156
0
  }
157
0
}
158
159
static int should_write_log(struct ref_store *refs, const char *refname)
160
0
{
161
0
  if (log_all_ref_updates == LOG_REFS_UNSET)
162
0
    log_all_ref_updates = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
163
164
0
  switch (log_all_ref_updates) {
165
0
  case LOG_REFS_NONE:
166
0
    return refs_reflog_exists(refs, refname);
167
0
  case LOG_REFS_ALWAYS:
168
0
    return 1;
169
0
  case LOG_REFS_NORMAL:
170
0
    if (should_autocreate_reflog(refname))
171
0
      return 1;
172
0
    return refs_reflog_exists(refs, refname);
173
0
  default:
174
0
    BUG("unhandled core.logAllRefUpdates value %d", log_all_ref_updates);
175
0
  }
176
0
}
177
178
static void fill_reftable_log_record(struct reftable_log_record *log, const struct ident_split *split)
179
0
{
180
0
  const char *tz_begin;
181
0
  int sign = 1;
182
183
0
  reftable_log_record_release(log);
184
0
  log->value_type = REFTABLE_LOG_UPDATE;
185
0
  log->value.update.name =
186
0
    xstrndup(split->name_begin, split->name_end - split->name_begin);
187
0
  log->value.update.email =
188
0
    xstrndup(split->mail_begin, split->mail_end - split->mail_begin);
189
0
  log->value.update.time = atol(split->date_begin);
190
191
0
  tz_begin = split->tz_begin;
192
0
  if (*tz_begin == '-') {
193
0
    sign = -1;
194
0
    tz_begin++;
195
0
  }
196
0
  if (*tz_begin == '+') {
197
0
    sign = 1;
198
0
    tz_begin++;
199
0
  }
200
201
0
  log->value.update.tz_offset = sign * atoi(tz_begin);
202
0
}
203
204
static int read_ref_without_reload(struct reftable_ref_store *refs,
205
           struct reftable_stack *stack,
206
           const char *refname,
207
           struct object_id *oid,
208
           struct strbuf *referent,
209
           unsigned int *type)
210
0
{
211
0
  struct reftable_ref_record ref = {0};
212
0
  int ret;
213
214
0
  ret = reftable_stack_read_ref(stack, refname, &ref);
215
0
  if (ret)
216
0
    goto done;
217
218
0
  if (ref.value_type == REFTABLE_REF_SYMREF) {
219
0
    strbuf_reset(referent);
220
0
    strbuf_addstr(referent, ref.value.symref);
221
0
    *type |= REF_ISSYMREF;
222
0
  } else if (reftable_ref_record_val1(&ref)) {
223
0
    oidread(oid, reftable_ref_record_val1(&ref),
224
0
      refs->base.repo->hash_algo);
225
0
  } else {
226
    /* We got a tombstone, which should not happen. */
227
0
    BUG("unhandled reference value type %d", ref.value_type);
228
0
  }
229
230
0
done:
231
0
  assert(ret != REFTABLE_API_ERROR);
232
0
  reftable_ref_record_release(&ref);
233
0
  return ret;
234
0
}
235
236
static int reftable_be_config(const char *var, const char *value,
237
            const struct config_context *ctx,
238
            void *_opts)
239
0
{
240
0
  struct reftable_write_options *opts = _opts;
241
242
0
  if (!strcmp(var, "reftable.blocksize")) {
243
0
    unsigned long block_size = git_config_ulong(var, value, ctx->kvi);
244
0
    if (block_size > 16777215)
245
0
      die("reftable block size cannot exceed 16MB");
246
0
    opts->block_size = block_size;
247
0
  } else if (!strcmp(var, "reftable.restartinterval")) {
248
0
    unsigned long restart_interval = git_config_ulong(var, value, ctx->kvi);
249
0
    if (restart_interval > UINT16_MAX)
250
0
      die("reftable block size cannot exceed %u", (unsigned)UINT16_MAX);
251
0
    opts->restart_interval = restart_interval;
252
0
  } else if (!strcmp(var, "reftable.indexobjects")) {
253
0
    opts->skip_index_objects = !git_config_bool(var, value);
254
0
  } else if (!strcmp(var, "reftable.geometricfactor")) {
255
0
    unsigned long factor = git_config_ulong(var, value, ctx->kvi);
256
0
    if (factor > UINT8_MAX)
257
0
      die("reftable geometric factor cannot exceed %u", (unsigned)UINT8_MAX);
258
0
    opts->auto_compaction_factor = factor;
259
0
  }
260
261
0
  return 0;
262
0
}
263
264
static struct ref_store *reftable_be_init(struct repository *repo,
265
            const char *gitdir,
266
            unsigned int store_flags)
267
0
{
268
0
  struct reftable_ref_store *refs = xcalloc(1, sizeof(*refs));
269
0
  struct strbuf path = STRBUF_INIT;
270
0
  int is_worktree;
271
0
  mode_t mask;
272
273
0
  mask = umask(0);
274
0
  umask(mask);
275
276
0
  base_ref_store_init(&refs->base, repo, gitdir, &refs_be_reftable);
277
0
  strmap_init(&refs->worktree_stacks);
278
0
  refs->store_flags = store_flags;
279
280
0
  refs->write_options.hash_id = repo->hash_algo->format_id;
281
0
  refs->write_options.default_permissions = calc_shared_perm(0666 & ~mask);
282
0
  refs->write_options.disable_auto_compact =
283
0
    !git_env_bool("GIT_TEST_REFTABLE_AUTOCOMPACTION", 1);
284
285
0
  git_config(reftable_be_config, &refs->write_options);
286
287
  /*
288
   * It is somewhat unfortunate that we have to mirror the default block
289
   * size of the reftable library here. But given that the write options
290
   * wouldn't be updated by the library here, and given that we require
291
   * the proper block size to trim reflog message so that they fit, we
292
   * must set up a proper value here.
293
   */
294
0
  if (!refs->write_options.block_size)
295
0
    refs->write_options.block_size = 4096;
296
297
  /*
298
   * Set up the main reftable stack that is hosted in GIT_COMMON_DIR.
299
   * This stack contains both the shared and the main worktree refs.
300
   *
301
   * Note that we don't try to resolve the path in case we have a
302
   * worktree because `get_common_dir_noenv()` already does it for us.
303
   */
304
0
  is_worktree = get_common_dir_noenv(&path, gitdir);
305
0
  if (!is_worktree) {
306
0
    strbuf_reset(&path);
307
0
    strbuf_realpath(&path, gitdir, 0);
308
0
  }
309
0
  strbuf_addstr(&path, "/reftable");
310
0
  refs->err = reftable_new_stack(&refs->main_stack, path.buf,
311
0
               &refs->write_options);
312
0
  if (refs->err)
313
0
    goto done;
314
315
  /*
316
   * If we're in a worktree we also need to set up the worktree reftable
317
   * stack that is contained in the per-worktree GIT_DIR.
318
   *
319
   * Ideally, we would also add the stack to our worktree stack map. But
320
   * we have no way to figure out the worktree name here and thus can't
321
   * do it efficiently.
322
   */
323
0
  if (is_worktree) {
324
0
    strbuf_reset(&path);
325
0
    strbuf_addf(&path, "%s/reftable", gitdir);
326
327
0
    refs->err = reftable_new_stack(&refs->worktree_stack, path.buf,
328
0
                 &refs->write_options);
329
0
    if (refs->err)
330
0
      goto done;
331
0
  }
332
333
0
  chdir_notify_reparent("reftables-backend $GIT_DIR", &refs->base.gitdir);
334
335
0
done:
336
0
  assert(refs->err != REFTABLE_API_ERROR);
337
0
  strbuf_release(&path);
338
0
  return &refs->base;
339
0
}
340
341
static void reftable_be_release(struct ref_store *ref_store)
342
0
{
343
0
  struct reftable_ref_store *refs = reftable_be_downcast(ref_store, 0, "release");
344
0
  struct strmap_entry *entry;
345
0
  struct hashmap_iter iter;
346
347
0
  if (refs->main_stack) {
348
0
    reftable_stack_destroy(refs->main_stack);
349
0
    refs->main_stack = NULL;
350
0
  }
351
352
0
  if (refs->worktree_stack) {
353
0
    reftable_stack_destroy(refs->worktree_stack);
354
0
    refs->worktree_stack = NULL;
355
0
  }
356
357
0
  strmap_for_each_entry(&refs->worktree_stacks, &iter, entry)
358
0
    reftable_stack_destroy(entry->value);
359
0
  strmap_clear(&refs->worktree_stacks, 0);
360
0
}
361
362
static int reftable_be_create_on_disk(struct ref_store *ref_store,
363
              int flags UNUSED,
364
              struct strbuf *err UNUSED)
365
0
{
366
0
  struct reftable_ref_store *refs =
367
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE, "create");
368
0
  struct strbuf sb = STRBUF_INIT;
369
370
0
  strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
371
0
  safe_create_dir(sb.buf, 1);
372
0
  strbuf_reset(&sb);
373
374
0
  strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir);
375
0
  write_file(sb.buf, "ref: refs/heads/.invalid");
376
0
  adjust_shared_perm(sb.buf);
377
0
  strbuf_reset(&sb);
378
379
0
  strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
380
0
  safe_create_dir(sb.buf, 1);
381
0
  strbuf_reset(&sb);
382
383
0
  strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir);
384
0
  write_file(sb.buf, "this repository uses the reftable format");
385
0
  adjust_shared_perm(sb.buf);
386
387
0
  strbuf_release(&sb);
388
0
  return 0;
389
0
}
390
391
static int reftable_be_remove_on_disk(struct ref_store *ref_store,
392
              struct strbuf *err)
393
0
{
394
0
  struct reftable_ref_store *refs =
395
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE, "remove");
396
0
  struct strbuf sb = STRBUF_INIT;
397
0
  int ret = 0;
398
399
  /*
400
   * Release the ref store such that all stacks are closed. This is
401
   * required so that the "tables.list" file is not open anymore, which
402
   * would otherwise make it impossible to remove the file on Windows.
403
   */
404
0
  reftable_be_release(ref_store);
405
406
0
  strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
407
0
  if (remove_dir_recursively(&sb, 0) < 0) {
408
0
    strbuf_addf(err, "could not delete reftables: %s",
409
0
          strerror(errno));
410
0
    ret = -1;
411
0
  }
412
0
  strbuf_reset(&sb);
413
414
0
  strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir);
415
0
  if (unlink(sb.buf) < 0) {
416
0
    strbuf_addf(err, "could not delete stub HEAD: %s",
417
0
          strerror(errno));
418
0
    ret = -1;
419
0
  }
420
0
  strbuf_reset(&sb);
421
422
0
  strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir);
423
0
  if (unlink(sb.buf) < 0) {
424
0
    strbuf_addf(err, "could not delete stub heads: %s",
425
0
          strerror(errno));
426
0
    ret = -1;
427
0
  }
428
0
  strbuf_reset(&sb);
429
430
0
  strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
431
0
  if (rmdir(sb.buf) < 0) {
432
0
    strbuf_addf(err, "could not delete refs directory: %s",
433
0
          strerror(errno));
434
0
    ret = -1;
435
0
  }
436
437
0
  strbuf_release(&sb);
438
0
  return ret;
439
0
}
440
441
struct reftable_ref_iterator {
442
  struct ref_iterator base;
443
  struct reftable_ref_store *refs;
444
  struct reftable_iterator iter;
445
  struct reftable_ref_record ref;
446
  struct object_id oid;
447
448
  const char *prefix;
449
  size_t prefix_len;
450
  unsigned int flags;
451
  int err;
452
};
453
454
static int reftable_ref_iterator_advance(struct ref_iterator *ref_iterator)
455
0
{
456
0
  struct reftable_ref_iterator *iter =
457
0
    (struct reftable_ref_iterator *)ref_iterator;
458
0
  struct reftable_ref_store *refs = iter->refs;
459
0
  const char *referent = NULL;
460
461
0
  while (!iter->err) {
462
0
    int flags = 0;
463
464
0
    iter->err = reftable_iterator_next_ref(&iter->iter, &iter->ref);
465
0
    if (iter->err)
466
0
      break;
467
468
    /*
469
     * The files backend only lists references contained in "refs/" unless
470
     * the root refs are to be included. We emulate the same behaviour here.
471
     */
472
0
    if (!starts_with(iter->ref.refname, "refs/") &&
473
0
        !(iter->flags & DO_FOR_EACH_INCLUDE_ROOT_REFS &&
474
0
          is_root_ref(iter->ref.refname))) {
475
0
      continue;
476
0
    }
477
478
0
    if (iter->prefix_len &&
479
0
        strncmp(iter->prefix, iter->ref.refname, iter->prefix_len)) {
480
0
      iter->err = 1;
481
0
      break;
482
0
    }
483
484
0
    if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
485
0
        parse_worktree_ref(iter->ref.refname, NULL, NULL, NULL) !=
486
0
          REF_WORKTREE_CURRENT)
487
0
      continue;
488
489
0
    switch (iter->ref.value_type) {
490
0
    case REFTABLE_REF_VAL1:
491
0
      oidread(&iter->oid, iter->ref.value.val1,
492
0
        refs->base.repo->hash_algo);
493
0
      break;
494
0
    case REFTABLE_REF_VAL2:
495
0
      oidread(&iter->oid, iter->ref.value.val2.value,
496
0
        refs->base.repo->hash_algo);
497
0
      break;
498
0
    case REFTABLE_REF_SYMREF:
499
0
      referent = refs_resolve_ref_unsafe(&iter->refs->base,
500
0
                 iter->ref.refname,
501
0
                 RESOLVE_REF_READING,
502
0
                 &iter->oid, &flags);
503
0
      if (!referent)
504
0
        oidclr(&iter->oid, refs->base.repo->hash_algo);
505
0
      break;
506
0
    default:
507
0
      BUG("unhandled reference value type %d", iter->ref.value_type);
508
0
    }
509
510
0
    if (is_null_oid(&iter->oid))
511
0
      flags |= REF_ISBROKEN;
512
513
0
    if (check_refname_format(iter->ref.refname, REFNAME_ALLOW_ONELEVEL)) {
514
0
      if (!refname_is_safe(iter->ref.refname))
515
0
        die(_("refname is dangerous: %s"), iter->ref.refname);
516
0
      oidclr(&iter->oid, refs->base.repo->hash_algo);
517
0
      flags |= REF_BAD_NAME | REF_ISBROKEN;
518
0
    }
519
520
0
    if (iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS &&
521
0
        flags & REF_ISSYMREF &&
522
0
        flags & REF_ISBROKEN)
523
0
      continue;
524
525
0
    if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
526
0
        !ref_resolves_to_object(iter->ref.refname, refs->base.repo,
527
0
              &iter->oid, flags))
528
0
        continue;
529
530
0
    iter->base.refname = iter->ref.refname;
531
0
    iter->base.referent = referent;
532
0
    iter->base.oid = &iter->oid;
533
0
    iter->base.flags = flags;
534
535
0
    break;
536
0
  }
537
538
0
  if (iter->err > 0) {
539
0
    if (ref_iterator_abort(ref_iterator) != ITER_DONE)
540
0
      return ITER_ERROR;
541
0
    return ITER_DONE;
542
0
  }
543
544
0
  if (iter->err < 0) {
545
0
    ref_iterator_abort(ref_iterator);
546
0
    return ITER_ERROR;
547
0
  }
548
549
0
  return ITER_OK;
550
0
}
551
552
static int reftable_ref_iterator_peel(struct ref_iterator *ref_iterator,
553
              struct object_id *peeled)
554
0
{
555
0
  struct reftable_ref_iterator *iter =
556
0
    (struct reftable_ref_iterator *)ref_iterator;
557
558
0
  if (iter->ref.value_type == REFTABLE_REF_VAL2) {
559
0
    oidread(peeled, iter->ref.value.val2.target_value,
560
0
      iter->refs->base.repo->hash_algo);
561
0
    return 0;
562
0
  }
563
564
0
  return -1;
565
0
}
566
567
static int reftable_ref_iterator_abort(struct ref_iterator *ref_iterator)
568
0
{
569
0
  struct reftable_ref_iterator *iter =
570
0
    (struct reftable_ref_iterator *)ref_iterator;
571
0
  reftable_ref_record_release(&iter->ref);
572
0
  reftable_iterator_destroy(&iter->iter);
573
0
  free(iter);
574
0
  return ITER_DONE;
575
0
}
576
577
static struct ref_iterator_vtable reftable_ref_iterator_vtable = {
578
  .advance = reftable_ref_iterator_advance,
579
  .peel = reftable_ref_iterator_peel,
580
  .abort = reftable_ref_iterator_abort
581
};
582
583
static struct reftable_ref_iterator *ref_iterator_for_stack(struct reftable_ref_store *refs,
584
                  struct reftable_stack *stack,
585
                  const char *prefix,
586
                  int flags)
587
0
{
588
0
  struct reftable_ref_iterator *iter;
589
0
  int ret;
590
591
0
  iter = xcalloc(1, sizeof(*iter));
592
0
  base_ref_iterator_init(&iter->base, &reftable_ref_iterator_vtable);
593
0
  iter->prefix = prefix;
594
0
  iter->prefix_len = prefix ? strlen(prefix) : 0;
595
0
  iter->base.oid = &iter->oid;
596
0
  iter->flags = flags;
597
0
  iter->refs = refs;
598
599
0
  ret = refs->err;
600
0
  if (ret)
601
0
    goto done;
602
603
0
  ret = reftable_stack_reload(stack);
604
0
  if (ret)
605
0
    goto done;
606
607
0
  reftable_stack_init_ref_iterator(stack, &iter->iter);
608
0
  ret = reftable_iterator_seek_ref(&iter->iter, prefix);
609
0
  if (ret)
610
0
    goto done;
611
612
0
done:
613
0
  iter->err = ret;
614
0
  return iter;
615
0
}
616
617
static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_store,
618
                   const char *prefix,
619
                   const char **exclude_patterns UNUSED,
620
                   unsigned int flags)
621
0
{
622
0
  struct reftable_ref_iterator *main_iter, *worktree_iter;
623
0
  struct reftable_ref_store *refs;
624
0
  unsigned int required_flags = REF_STORE_READ;
625
626
0
  if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
627
0
    required_flags |= REF_STORE_ODB;
628
0
  refs = reftable_be_downcast(ref_store, required_flags, "ref_iterator_begin");
629
630
0
  main_iter = ref_iterator_for_stack(refs, refs->main_stack, prefix, flags);
631
632
  /*
633
   * The worktree stack is only set when we're in an actual worktree
634
   * right now. If we aren't, then we return the common reftable
635
   * iterator, only.
636
   */
637
0
   if (!refs->worktree_stack)
638
0
    return &main_iter->base;
639
640
  /*
641
   * Otherwise we merge both the common and the per-worktree refs into a
642
   * single iterator.
643
   */
644
0
  worktree_iter = ref_iterator_for_stack(refs, refs->worktree_stack, prefix, flags);
645
0
  return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
646
0
          ref_iterator_select, NULL);
647
0
}
648
649
static int reftable_be_read_raw_ref(struct ref_store *ref_store,
650
            const char *refname,
651
            struct object_id *oid,
652
            struct strbuf *referent,
653
            unsigned int *type,
654
            int *failure_errno)
655
0
{
656
0
  struct reftable_ref_store *refs =
657
0
    reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
658
0
  struct reftable_stack *stack = stack_for(refs, refname, &refname);
659
0
  int ret;
660
661
0
  if (refs->err < 0)
662
0
    return refs->err;
663
664
0
  ret = reftable_stack_reload(stack);
665
0
  if (ret)
666
0
    return ret;
667
668
0
  ret = read_ref_without_reload(refs, stack, refname, oid, referent, type);
669
0
  if (ret < 0)
670
0
    return ret;
671
0
  if (ret > 0) {
672
0
    *failure_errno = ENOENT;
673
0
    return -1;
674
0
  }
675
676
0
  return 0;
677
0
}
678
679
static int reftable_be_read_symbolic_ref(struct ref_store *ref_store,
680
           const char *refname,
681
           struct strbuf *referent)
682
0
{
683
0
  struct reftable_ref_store *refs =
684
0
    reftable_be_downcast(ref_store, REF_STORE_READ, "read_symbolic_ref");
685
0
  struct reftable_stack *stack = stack_for(refs, refname, &refname);
686
0
  struct reftable_ref_record ref = {0};
687
0
  int ret;
688
689
0
  ret = reftable_stack_reload(stack);
690
0
  if (ret)
691
0
    return ret;
692
693
0
  ret = reftable_stack_read_ref(stack, refname, &ref);
694
0
  if (ret == 0 && ref.value_type == REFTABLE_REF_SYMREF)
695
0
    strbuf_addstr(referent, ref.value.symref);
696
0
  else
697
0
    ret = -1;
698
699
0
  reftable_ref_record_release(&ref);
700
0
  return ret;
701
0
}
702
703
struct reftable_transaction_update {
704
  struct ref_update *update;
705
  struct object_id current_oid;
706
};
707
708
struct write_transaction_table_arg {
709
  struct reftable_ref_store *refs;
710
  struct reftable_stack *stack;
711
  struct reftable_addition *addition;
712
  struct reftable_transaction_update *updates;
713
  size_t updates_nr;
714
  size_t updates_alloc;
715
  size_t updates_expected;
716
};
717
718
struct reftable_transaction_data {
719
  struct write_transaction_table_arg *args;
720
  size_t args_nr, args_alloc;
721
};
722
723
static void free_transaction_data(struct reftable_transaction_data *tx_data)
724
0
{
725
0
  if (!tx_data)
726
0
    return;
727
0
  for (size_t i = 0; i < tx_data->args_nr; i++) {
728
0
    reftable_addition_destroy(tx_data->args[i].addition);
729
0
    free(tx_data->args[i].updates);
730
0
  }
731
0
  free(tx_data->args);
732
0
  free(tx_data);
733
0
}
734
735
/*
736
 * Prepare transaction update for the given reference update. This will cause
737
 * us to lock the corresponding reftable stack for concurrent modification.
738
 */
739
static int prepare_transaction_update(struct write_transaction_table_arg **out,
740
              struct reftable_ref_store *refs,
741
              struct reftable_transaction_data *tx_data,
742
              struct ref_update *update,
743
              struct strbuf *err)
744
0
{
745
0
  struct reftable_stack *stack = stack_for(refs, update->refname, NULL);
746
0
  struct write_transaction_table_arg *arg = NULL;
747
0
  size_t i;
748
0
  int ret;
749
750
  /*
751
   * Search for a preexisting stack update. If there is one then we add
752
   * the update to it, otherwise we set up a new stack update.
753
   */
754
0
  for (i = 0; !arg && i < tx_data->args_nr; i++)
755
0
    if (tx_data->args[i].stack == stack)
756
0
      arg = &tx_data->args[i];
757
758
0
  if (!arg) {
759
0
    struct reftable_addition *addition;
760
761
0
    ret = reftable_stack_reload(stack);
762
0
    if (ret)
763
0
      return ret;
764
765
0
    ret = reftable_stack_new_addition(&addition, stack);
766
0
    if (ret) {
767
0
      if (ret == REFTABLE_LOCK_ERROR)
768
0
        strbuf_addstr(err, "cannot lock references");
769
0
      return ret;
770
0
    }
771
772
0
    ALLOC_GROW(tx_data->args, tx_data->args_nr + 1,
773
0
         tx_data->args_alloc);
774
0
    arg = &tx_data->args[tx_data->args_nr++];
775
0
    arg->refs = refs;
776
0
    arg->stack = stack;
777
0
    arg->addition = addition;
778
0
    arg->updates = NULL;
779
0
    arg->updates_nr = 0;
780
0
    arg->updates_alloc = 0;
781
0
    arg->updates_expected = 0;
782
0
  }
783
784
0
  arg->updates_expected++;
785
786
0
  if (out)
787
0
    *out = arg;
788
789
0
  return 0;
790
0
}
791
792
/*
793
 * Queue a reference update for the correct stack. We potentially need to
794
 * handle multiple stack updates in a single transaction when it spans across
795
 * multiple worktrees.
796
 */
797
static int queue_transaction_update(struct reftable_ref_store *refs,
798
            struct reftable_transaction_data *tx_data,
799
            struct ref_update *update,
800
            struct object_id *current_oid,
801
            struct strbuf *err)
802
0
{
803
0
  struct write_transaction_table_arg *arg = NULL;
804
0
  int ret;
805
806
0
  if (update->backend_data)
807
0
    BUG("reference update queued more than once");
808
809
0
  ret = prepare_transaction_update(&arg, refs, tx_data, update, err);
810
0
  if (ret < 0)
811
0
    return ret;
812
813
0
  ALLOC_GROW(arg->updates, arg->updates_nr + 1,
814
0
       arg->updates_alloc);
815
0
  arg->updates[arg->updates_nr].update = update;
816
0
  oidcpy(&arg->updates[arg->updates_nr].current_oid, current_oid);
817
0
  update->backend_data = &arg->updates[arg->updates_nr++];
818
819
0
  return 0;
820
0
}
821
822
static int reftable_be_transaction_prepare(struct ref_store *ref_store,
823
             struct ref_transaction *transaction,
824
             struct strbuf *err)
825
0
{
826
0
  struct reftable_ref_store *refs =
827
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE|REF_STORE_MAIN, "ref_transaction_prepare");
828
0
  struct strbuf referent = STRBUF_INIT, head_referent = STRBUF_INIT;
829
0
  struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
830
0
  struct reftable_transaction_data *tx_data = NULL;
831
0
  struct object_id head_oid;
832
0
  unsigned int head_type = 0;
833
0
  size_t i;
834
0
  int ret;
835
836
0
  ret = refs->err;
837
0
  if (ret < 0)
838
0
    goto done;
839
840
0
  tx_data = xcalloc(1, sizeof(*tx_data));
841
842
  /*
843
   * Preprocess all updates. For one we check that there are no duplicate
844
   * reference updates in this transaction. Second, we lock all stacks
845
   * that will be modified during the transaction.
846
   */
847
0
  for (i = 0; i < transaction->nr; i++) {
848
0
    ret = prepare_transaction_update(NULL, refs, tx_data,
849
0
             transaction->updates[i], err);
850
0
    if (ret)
851
0
      goto done;
852
853
0
    string_list_append(&affected_refnames,
854
0
           transaction->updates[i]->refname);
855
0
  }
856
857
  /*
858
   * Now that we have counted updates per stack we can preallocate their
859
   * arrays. This avoids having to reallocate many times.
860
   */
861
0
  for (i = 0; i < tx_data->args_nr; i++) {
862
0
    CALLOC_ARRAY(tx_data->args[i].updates, tx_data->args[i].updates_expected);
863
0
    tx_data->args[i].updates_alloc = tx_data->args[i].updates_expected;
864
0
  }
865
866
  /*
867
   * Fail if a refname appears more than once in the transaction.
868
   * This code is taken from the files backend and is a good candidate to
869
   * be moved into the generic layer.
870
   */
871
0
  string_list_sort(&affected_refnames);
872
0
  if (ref_update_reject_duplicates(&affected_refnames, err)) {
873
0
    ret = TRANSACTION_GENERIC_ERROR;
874
0
    goto done;
875
0
  }
876
877
0
  ret = read_ref_without_reload(refs, stack_for(refs, "HEAD", NULL), "HEAD",
878
0
              &head_oid, &head_referent, &head_type);
879
0
  if (ret < 0)
880
0
    goto done;
881
0
  ret = 0;
882
883
0
  for (i = 0; i < transaction->nr; i++) {
884
0
    struct ref_update *u = transaction->updates[i];
885
0
    struct object_id current_oid = {0};
886
0
    struct reftable_stack *stack;
887
0
    const char *rewritten_ref;
888
889
0
    stack = stack_for(refs, u->refname, &rewritten_ref);
890
891
    /* Verify that the new object ID is valid. */
892
0
    if ((u->flags & REF_HAVE_NEW) && !is_null_oid(&u->new_oid) &&
893
0
        !(u->flags & REF_SKIP_OID_VERIFICATION) &&
894
0
        !(u->flags & REF_LOG_ONLY)) {
895
0
      struct object *o = parse_object(refs->base.repo, &u->new_oid);
896
0
      if (!o) {
897
0
        strbuf_addf(err,
898
0
              _("trying to write ref '%s' with nonexistent object %s"),
899
0
              u->refname, oid_to_hex(&u->new_oid));
900
0
        ret = -1;
901
0
        goto done;
902
0
      }
903
904
0
      if (o->type != OBJ_COMMIT && is_branch(u->refname)) {
905
0
        strbuf_addf(err, _("trying to write non-commit object %s to branch '%s'"),
906
0
              oid_to_hex(&u->new_oid), u->refname);
907
0
        ret = -1;
908
0
        goto done;
909
0
      }
910
0
    }
911
912
    /*
913
     * When we update the reference that HEAD points to we enqueue
914
     * a second log-only update for HEAD so that its reflog is
915
     * updated accordingly.
916
     */
917
0
    if (head_type == REF_ISSYMREF &&
918
0
        !(u->flags & REF_LOG_ONLY) &&
919
0
        !(u->flags & REF_UPDATE_VIA_HEAD) &&
920
0
        !strcmp(rewritten_ref, head_referent.buf)) {
921
0
      struct ref_update *new_update;
922
923
      /*
924
       * First make sure that HEAD is not already in the
925
       * transaction. This check is O(lg N) in the transaction
926
       * size, but it happens at most once per transaction.
927
       */
928
0
      if (string_list_has_string(&affected_refnames, "HEAD")) {
929
        /* An entry already existed */
930
0
        strbuf_addf(err,
931
0
              _("multiple updates for 'HEAD' (including one "
932
0
              "via its referent '%s') are not allowed"),
933
0
              u->refname);
934
0
        ret = TRANSACTION_NAME_CONFLICT;
935
0
        goto done;
936
0
      }
937
938
0
      new_update = ref_transaction_add_update(
939
0
          transaction, "HEAD",
940
0
          u->flags | REF_LOG_ONLY | REF_NO_DEREF,
941
0
          &u->new_oid, &u->old_oid, NULL, NULL, u->msg);
942
0
      string_list_insert(&affected_refnames, new_update->refname);
943
0
    }
944
945
0
    ret = read_ref_without_reload(refs, stack, rewritten_ref,
946
0
                &current_oid, &referent, &u->type);
947
0
    if (ret < 0)
948
0
      goto done;
949
0
    if (ret > 0 && !ref_update_expects_existing_old_ref(u)) {
950
      /*
951
       * The reference does not exist, and we either have no
952
       * old object ID or expect the reference to not exist.
953
       * We can thus skip below safety checks as well as the
954
       * symref splitting. But we do want to verify that
955
       * there is no conflicting reference here so that we
956
       * can output a proper error message instead of failing
957
       * at a later point.
958
       */
959
0
      ret = refs_verify_refname_available(ref_store, u->refname,
960
0
                  &affected_refnames, NULL, err);
961
0
      if (ret < 0)
962
0
        goto done;
963
964
      /*
965
       * There is no need to write the reference deletion
966
       * when the reference in question doesn't exist.
967
       */
968
0
       if ((u->flags & REF_HAVE_NEW) && !ref_update_has_null_new_value(u)) {
969
0
         ret = queue_transaction_update(refs, tx_data, u,
970
0
                &current_oid, err);
971
0
         if (ret)
972
0
           goto done;
973
0
       }
974
975
0
      continue;
976
0
    }
977
0
    if (ret > 0) {
978
      /* The reference does not exist, but we expected it to. */
979
0
      strbuf_addf(err, _("cannot lock ref '%s': "
980
0
            "unable to resolve reference '%s'"),
981
0
            ref_update_original_update_refname(u), u->refname);
982
0
      ret = -1;
983
0
      goto done;
984
0
    }
985
986
0
    if (u->type & REF_ISSYMREF) {
987
      /*
988
       * The reftable stack is locked at this point already,
989
       * so it is safe to call `refs_resolve_ref_unsafe()`
990
       * here without causing races.
991
       */
992
0
      const char *resolved = refs_resolve_ref_unsafe(&refs->base, u->refname, 0,
993
0
                       &current_oid, NULL);
994
995
0
      if (u->flags & REF_NO_DEREF) {
996
0
        if (u->flags & REF_HAVE_OLD && !resolved) {
997
0
          strbuf_addf(err, _("cannot lock ref '%s': "
998
0
                "error reading reference"), u->refname);
999
0
          ret = -1;
1000
0
          goto done;
1001
0
        }
1002
0
      } else {
1003
0
        struct ref_update *new_update;
1004
0
        int new_flags;
1005
1006
0
        new_flags = u->flags;
1007
0
        if (!strcmp(rewritten_ref, "HEAD"))
1008
0
          new_flags |= REF_UPDATE_VIA_HEAD;
1009
1010
        /*
1011
         * If we are updating a symref (eg. HEAD), we should also
1012
         * update the branch that the symref points to.
1013
         *
1014
         * This is generic functionality, and would be better
1015
         * done in refs.c, but the current implementation is
1016
         * intertwined with the locking in files-backend.c.
1017
         */
1018
0
        new_update = ref_transaction_add_update(
1019
0
          transaction, referent.buf, new_flags,
1020
0
          u->new_target ? NULL : &u->new_oid,
1021
0
          u->old_target ? NULL : &u->old_oid,
1022
0
          u->new_target, u->old_target, u->msg);
1023
1024
0
        new_update->parent_update = u;
1025
1026
        /*
1027
         * Change the symbolic ref update to log only. Also, it
1028
         * doesn't need to check its old OID value, as that will be
1029
         * done when new_update is processed.
1030
         */
1031
0
        u->flags |= REF_LOG_ONLY | REF_NO_DEREF;
1032
0
        u->flags &= ~REF_HAVE_OLD;
1033
1034
0
        if (string_list_has_string(&affected_refnames, new_update->refname)) {
1035
0
          strbuf_addf(err,
1036
0
                _("multiple updates for '%s' (including one "
1037
0
                "via symref '%s') are not allowed"),
1038
0
                referent.buf, u->refname);
1039
0
          ret = TRANSACTION_NAME_CONFLICT;
1040
0
          goto done;
1041
0
        }
1042
0
        string_list_insert(&affected_refnames, new_update->refname);
1043
0
      }
1044
0
    }
1045
1046
    /*
1047
     * Verify that the old object matches our expectations. Note
1048
     * that the error messages here do not make a lot of sense in
1049
     * the context of the reftable backend as we never lock
1050
     * individual refs. But the error messages match what the files
1051
     * backend returns, which keeps our tests happy.
1052
     */
1053
0
    if (u->old_target) {
1054
0
      if (!(u->type & REF_ISSYMREF)) {
1055
0
        strbuf_addf(err, _("cannot lock ref '%s': "
1056
0
             "expected symref with target '%s': "
1057
0
             "but is a regular ref"),
1058
0
              ref_update_original_update_refname(u),
1059
0
              u->old_target);
1060
0
        ret = -1;
1061
0
        goto done;
1062
0
      }
1063
1064
0
      if (ref_update_check_old_target(referent.buf, u, err)) {
1065
0
        ret = -1;
1066
0
        goto done;
1067
0
      }
1068
0
    } else if ((u->flags & REF_HAVE_OLD) && !oideq(&current_oid, &u->old_oid)) {
1069
0
      if (is_null_oid(&u->old_oid))
1070
0
        strbuf_addf(err, _("cannot lock ref '%s': "
1071
0
               "reference already exists"),
1072
0
              ref_update_original_update_refname(u));
1073
0
      else if (is_null_oid(&current_oid))
1074
0
        strbuf_addf(err, _("cannot lock ref '%s': "
1075
0
               "reference is missing but expected %s"),
1076
0
              ref_update_original_update_refname(u),
1077
0
              oid_to_hex(&u->old_oid));
1078
0
      else
1079
0
        strbuf_addf(err, _("cannot lock ref '%s': "
1080
0
               "is at %s but expected %s"),
1081
0
              ref_update_original_update_refname(u),
1082
0
              oid_to_hex(&current_oid),
1083
0
              oid_to_hex(&u->old_oid));
1084
0
      ret = -1;
1085
0
      goto done;
1086
0
    }
1087
1088
    /*
1089
     * If all of the following conditions are true:
1090
     *
1091
     *   - We're not about to write a symref.
1092
     *   - We're not about to write a log-only entry.
1093
     *   - Old and new object ID are different.
1094
     *
1095
     * Then we're essentially doing a no-op update that can be
1096
     * skipped. This is not only for the sake of efficiency, but
1097
     * also skips writing unneeded reflog entries.
1098
     */
1099
0
    if ((u->type & REF_ISSYMREF) ||
1100
0
        (u->flags & REF_LOG_ONLY) ||
1101
0
        (u->flags & REF_HAVE_NEW && !oideq(&current_oid, &u->new_oid))) {
1102
0
      ret = queue_transaction_update(refs, tx_data, u,
1103
0
                   &current_oid, err);
1104
0
      if (ret)
1105
0
        goto done;
1106
0
    }
1107
0
  }
1108
1109
0
  transaction->backend_data = tx_data;
1110
0
  transaction->state = REF_TRANSACTION_PREPARED;
1111
1112
0
done:
1113
0
  assert(ret != REFTABLE_API_ERROR);
1114
0
  if (ret < 0) {
1115
0
    free_transaction_data(tx_data);
1116
0
    transaction->state = REF_TRANSACTION_CLOSED;
1117
0
    if (!err->len)
1118
0
      strbuf_addf(err, _("reftable: transaction prepare: %s"),
1119
0
            reftable_error_str(ret));
1120
0
  }
1121
0
  string_list_clear(&affected_refnames, 0);
1122
0
  strbuf_release(&referent);
1123
0
  strbuf_release(&head_referent);
1124
1125
0
  return ret;
1126
0
}
1127
1128
static int reftable_be_transaction_abort(struct ref_store *ref_store UNUSED,
1129
           struct ref_transaction *transaction,
1130
           struct strbuf *err UNUSED)
1131
0
{
1132
0
  struct reftable_transaction_data *tx_data = transaction->backend_data;
1133
0
  free_transaction_data(tx_data);
1134
0
  transaction->state = REF_TRANSACTION_CLOSED;
1135
0
  return 0;
1136
0
}
1137
1138
static int transaction_update_cmp(const void *a, const void *b)
1139
0
{
1140
0
  return strcmp(((struct reftable_transaction_update *)a)->update->refname,
1141
0
          ((struct reftable_transaction_update *)b)->update->refname);
1142
0
}
1143
1144
static int write_transaction_table(struct reftable_writer *writer, void *cb_data)
1145
0
{
1146
0
  struct write_transaction_table_arg *arg = cb_data;
1147
0
  uint64_t ts = reftable_stack_next_update_index(arg->stack);
1148
0
  struct reftable_log_record *logs = NULL;
1149
0
  struct ident_split committer_ident = {0};
1150
0
  size_t logs_nr = 0, logs_alloc = 0, i;
1151
0
  const char *committer_info;
1152
0
  int ret = 0;
1153
1154
0
  committer_info = git_committer_info(0);
1155
0
  if (split_ident_line(&committer_ident, committer_info, strlen(committer_info)))
1156
0
    BUG("failed splitting committer info");
1157
1158
0
  QSORT(arg->updates, arg->updates_nr, transaction_update_cmp);
1159
1160
0
  reftable_writer_set_limits(writer, ts, ts);
1161
1162
0
  for (i = 0; i < arg->updates_nr; i++) {
1163
0
    struct reftable_transaction_update *tx_update = &arg->updates[i];
1164
0
    struct ref_update *u = tx_update->update;
1165
1166
    /*
1167
     * Write a reflog entry when updating a ref to point to
1168
     * something new in either of the following cases:
1169
     *
1170
     * - The reference is about to be deleted. We always want to
1171
     *   delete the reflog in that case.
1172
     * - REF_FORCE_CREATE_REFLOG is set, asking us to always create
1173
     *   the reflog entry.
1174
     * - `core.logAllRefUpdates` tells us to create the reflog for
1175
     *   the given ref.
1176
     */
1177
0
    if ((u->flags & REF_HAVE_NEW) &&
1178
0
        !(u->type & REF_ISSYMREF) &&
1179
0
        ref_update_has_null_new_value(u)) {
1180
0
      struct reftable_log_record log = {0};
1181
0
      struct reftable_iterator it = {0};
1182
1183
0
      reftable_stack_init_log_iterator(arg->stack, &it);
1184
1185
      /*
1186
       * When deleting refs we also delete all reflog entries
1187
       * with them. While it is not strictly required to
1188
       * delete reflogs together with their refs, this
1189
       * matches the behaviour of the files backend.
1190
       *
1191
       * Unfortunately, we have no better way than to delete
1192
       * all reflog entries one by one.
1193
       */
1194
0
      ret = reftable_iterator_seek_log(&it, u->refname);
1195
0
      while (ret == 0) {
1196
0
        struct reftable_log_record *tombstone;
1197
1198
0
        ret = reftable_iterator_next_log(&it, &log);
1199
0
        if (ret < 0)
1200
0
          break;
1201
0
        if (ret > 0 || strcmp(log.refname, u->refname)) {
1202
0
          ret = 0;
1203
0
          break;
1204
0
        }
1205
1206
0
        ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1207
0
        tombstone = &logs[logs_nr++];
1208
0
        tombstone->refname = xstrdup(u->refname);
1209
0
        tombstone->value_type = REFTABLE_LOG_DELETION;
1210
0
        tombstone->update_index = log.update_index;
1211
0
      }
1212
1213
0
      reftable_log_record_release(&log);
1214
0
      reftable_iterator_destroy(&it);
1215
1216
0
      if (ret)
1217
0
        goto done;
1218
0
    } else if (!(u->flags & REF_SKIP_CREATE_REFLOG) &&
1219
0
         (u->flags & REF_HAVE_NEW) &&
1220
0
         (u->flags & REF_FORCE_CREATE_REFLOG ||
1221
0
          should_write_log(&arg->refs->base, u->refname))) {
1222
0
      struct reftable_log_record *log;
1223
0
      int create_reflog = 1;
1224
1225
0
      if (u->new_target) {
1226
0
        if (!refs_resolve_ref_unsafe(&arg->refs->base, u->new_target,
1227
0
                   RESOLVE_REF_READING, &u->new_oid, NULL)) {
1228
          /*
1229
           * TODO: currently we skip creating reflogs for dangling
1230
           * symref updates. It would be nice to capture this as
1231
           * zero oid updates however.
1232
           */
1233
0
          create_reflog = 0;
1234
0
        }
1235
0
      }
1236
1237
0
      if (create_reflog) {
1238
0
        ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1239
0
        log = &logs[logs_nr++];
1240
0
        memset(log, 0, sizeof(*log));
1241
1242
0
        fill_reftable_log_record(log, &committer_ident);
1243
0
        log->update_index = ts;
1244
0
        log->refname = xstrdup(u->refname);
1245
0
        memcpy(log->value.update.new_hash,
1246
0
               u->new_oid.hash, GIT_MAX_RAWSZ);
1247
0
        memcpy(log->value.update.old_hash,
1248
0
               tx_update->current_oid.hash, GIT_MAX_RAWSZ);
1249
0
        log->value.update.message =
1250
0
          xstrndup(u->msg, arg->refs->write_options.block_size / 2);
1251
0
      }
1252
0
    }
1253
1254
0
    if (u->flags & REF_LOG_ONLY)
1255
0
      continue;
1256
1257
0
    if (u->new_target) {
1258
0
      struct reftable_ref_record ref = {
1259
0
        .refname = (char *)u->refname,
1260
0
        .value_type = REFTABLE_REF_SYMREF,
1261
0
        .value.symref = (char *)u->new_target,
1262
0
        .update_index = ts,
1263
0
      };
1264
1265
0
      ret = reftable_writer_add_ref(writer, &ref);
1266
0
      if (ret < 0)
1267
0
        goto done;
1268
0
    } else if ((u->flags & REF_HAVE_NEW) && ref_update_has_null_new_value(u)) {
1269
0
      struct reftable_ref_record ref = {
1270
0
        .refname = (char *)u->refname,
1271
0
        .update_index = ts,
1272
0
        .value_type = REFTABLE_REF_DELETION,
1273
0
      };
1274
1275
0
      ret = reftable_writer_add_ref(writer, &ref);
1276
0
      if (ret < 0)
1277
0
        goto done;
1278
0
    } else if (u->flags & REF_HAVE_NEW) {
1279
0
      struct reftable_ref_record ref = {0};
1280
0
      struct object_id peeled;
1281
0
      int peel_error;
1282
1283
0
      ref.refname = (char *)u->refname;
1284
0
      ref.update_index = ts;
1285
1286
0
      peel_error = peel_object(arg->refs->base.repo, &u->new_oid, &peeled);
1287
0
      if (!peel_error) {
1288
0
        ref.value_type = REFTABLE_REF_VAL2;
1289
0
        memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
1290
0
        memcpy(ref.value.val2.value, u->new_oid.hash, GIT_MAX_RAWSZ);
1291
0
      } else if (!is_null_oid(&u->new_oid)) {
1292
0
        ref.value_type = REFTABLE_REF_VAL1;
1293
0
        memcpy(ref.value.val1, u->new_oid.hash, GIT_MAX_RAWSZ);
1294
0
      }
1295
1296
0
      ret = reftable_writer_add_ref(writer, &ref);
1297
0
      if (ret < 0)
1298
0
        goto done;
1299
0
    }
1300
0
  }
1301
1302
  /*
1303
   * Logs are written at the end so that we do not have intermixed ref
1304
   * and log blocks.
1305
   */
1306
0
  if (logs) {
1307
0
    ret = reftable_writer_add_logs(writer, logs, logs_nr);
1308
0
    if (ret < 0)
1309
0
      goto done;
1310
0
  }
1311
1312
0
done:
1313
0
  assert(ret != REFTABLE_API_ERROR);
1314
0
  for (i = 0; i < logs_nr; i++)
1315
0
    reftable_log_record_release(&logs[i]);
1316
0
  free(logs);
1317
0
  return ret;
1318
0
}
1319
1320
static int reftable_be_transaction_finish(struct ref_store *ref_store UNUSED,
1321
            struct ref_transaction *transaction,
1322
            struct strbuf *err)
1323
0
{
1324
0
  struct reftable_transaction_data *tx_data = transaction->backend_data;
1325
0
  int ret = 0;
1326
1327
0
  for (size_t i = 0; i < tx_data->args_nr; i++) {
1328
0
    ret = reftable_addition_add(tx_data->args[i].addition,
1329
0
              write_transaction_table, &tx_data->args[i]);
1330
0
    if (ret < 0)
1331
0
      goto done;
1332
1333
0
    ret = reftable_addition_commit(tx_data->args[i].addition);
1334
0
    if (ret < 0)
1335
0
      goto done;
1336
0
  }
1337
1338
0
done:
1339
0
  assert(ret != REFTABLE_API_ERROR);
1340
0
  free_transaction_data(tx_data);
1341
0
  transaction->state = REF_TRANSACTION_CLOSED;
1342
1343
0
  if (ret) {
1344
0
    strbuf_addf(err, _("reftable: transaction failure: %s"),
1345
0
          reftable_error_str(ret));
1346
0
    return -1;
1347
0
  }
1348
0
  return ret;
1349
0
}
1350
1351
static int reftable_be_initial_transaction_commit(struct ref_store *ref_store UNUSED,
1352
              struct ref_transaction *transaction,
1353
              struct strbuf *err)
1354
0
{
1355
0
  return ref_transaction_commit(transaction, err);
1356
0
}
1357
1358
static int reftable_be_pack_refs(struct ref_store *ref_store,
1359
         struct pack_refs_opts *opts)
1360
0
{
1361
0
  struct reftable_ref_store *refs =
1362
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB, "pack_refs");
1363
0
  struct reftable_stack *stack;
1364
0
  int ret;
1365
1366
0
  if (refs->err)
1367
0
    return refs->err;
1368
1369
0
  stack = refs->worktree_stack;
1370
0
  if (!stack)
1371
0
    stack = refs->main_stack;
1372
1373
0
  if (opts->flags & PACK_REFS_AUTO)
1374
0
    ret = reftable_stack_auto_compact(stack);
1375
0
  else
1376
0
    ret = reftable_stack_compact_all(stack, NULL);
1377
0
  if (ret < 0) {
1378
0
    ret = error(_("unable to compact stack: %s"),
1379
0
          reftable_error_str(ret));
1380
0
    goto out;
1381
0
  }
1382
1383
0
  ret = reftable_stack_clean(stack);
1384
0
  if (ret)
1385
0
    goto out;
1386
1387
0
out:
1388
0
  return ret;
1389
0
}
1390
1391
struct write_create_symref_arg {
1392
  struct reftable_ref_store *refs;
1393
  struct reftable_stack *stack;
1394
  struct strbuf *err;
1395
  const char *refname;
1396
  const char *target;
1397
  const char *logmsg;
1398
};
1399
1400
struct write_copy_arg {
1401
  struct reftable_ref_store *refs;
1402
  struct reftable_stack *stack;
1403
  const char *oldname;
1404
  const char *newname;
1405
  const char *logmsg;
1406
  int delete_old;
1407
};
1408
1409
static int write_copy_table(struct reftable_writer *writer, void *cb_data)
1410
0
{
1411
0
  struct write_copy_arg *arg = cb_data;
1412
0
  uint64_t deletion_ts, creation_ts;
1413
0
  struct reftable_ref_record old_ref = {0}, refs[2] = {0};
1414
0
  struct reftable_log_record old_log = {0}, *logs = NULL;
1415
0
  struct reftable_iterator it = {0};
1416
0
  struct string_list skip = STRING_LIST_INIT_NODUP;
1417
0
  struct ident_split committer_ident = {0};
1418
0
  struct strbuf errbuf = STRBUF_INIT;
1419
0
  size_t logs_nr = 0, logs_alloc = 0, i;
1420
0
  const char *committer_info;
1421
0
  int ret;
1422
1423
0
  committer_info = git_committer_info(0);
1424
0
  if (split_ident_line(&committer_ident, committer_info, strlen(committer_info)))
1425
0
    BUG("failed splitting committer info");
1426
1427
0
  if (reftable_stack_read_ref(arg->stack, arg->oldname, &old_ref)) {
1428
0
    ret = error(_("refname %s not found"), arg->oldname);
1429
0
    goto done;
1430
0
  }
1431
0
  if (old_ref.value_type == REFTABLE_REF_SYMREF) {
1432
0
    ret = error(_("refname %s is a symbolic ref, copying it is not supported"),
1433
0
          arg->oldname);
1434
0
    goto done;
1435
0
  }
1436
1437
  /*
1438
   * There's nothing to do in case the old and new name are the same, so
1439
   * we exit early in that case.
1440
   */
1441
0
  if (!strcmp(arg->oldname, arg->newname)) {
1442
0
    ret = 0;
1443
0
    goto done;
1444
0
  }
1445
1446
  /*
1447
   * Verify that the new refname is available.
1448
   */
1449
0
  if (arg->delete_old)
1450
0
    string_list_insert(&skip, arg->oldname);
1451
0
  ret = refs_verify_refname_available(&arg->refs->base, arg->newname,
1452
0
              NULL, &skip, &errbuf);
1453
0
  if (ret < 0) {
1454
0
    error("%s", errbuf.buf);
1455
0
    goto done;
1456
0
  }
1457
1458
  /*
1459
   * When deleting the old reference we have to use two update indices:
1460
   * once to delete the old ref and its reflog, and once to create the
1461
   * new ref and its reflog. They need to be staged with two separate
1462
   * indices because the new reflog needs to encode both the deletion of
1463
   * the old branch and the creation of the new branch, and we cannot do
1464
   * two changes to a reflog in a single update.
1465
   */
1466
0
  deletion_ts = creation_ts = reftable_stack_next_update_index(arg->stack);
1467
0
  if (arg->delete_old)
1468
0
    creation_ts++;
1469
0
  reftable_writer_set_limits(writer, deletion_ts, creation_ts);
1470
1471
  /*
1472
   * Add the new reference. If this is a rename then we also delete the
1473
   * old reference.
1474
   */
1475
0
  refs[0] = old_ref;
1476
0
  refs[0].refname = xstrdup(arg->newname);
1477
0
  refs[0].update_index = creation_ts;
1478
0
  if (arg->delete_old) {
1479
0
    refs[1].refname = xstrdup(arg->oldname);
1480
0
    refs[1].value_type = REFTABLE_REF_DELETION;
1481
0
    refs[1].update_index = deletion_ts;
1482
0
  }
1483
0
  ret = reftable_writer_add_refs(writer, refs, arg->delete_old ? 2 : 1);
1484
0
  if (ret < 0)
1485
0
    goto done;
1486
1487
  /*
1488
   * When deleting the old branch we need to create a reflog entry on the
1489
   * new branch name that indicates that the old branch has been deleted
1490
   * and then recreated. This is a tad weird, but matches what the files
1491
   * backend does.
1492
   */
1493
0
  if (arg->delete_old) {
1494
0
    struct strbuf head_referent = STRBUF_INIT;
1495
0
    struct object_id head_oid;
1496
0
    int append_head_reflog;
1497
0
    unsigned head_type = 0;
1498
1499
0
    ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1500
0
    memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1501
0
    fill_reftable_log_record(&logs[logs_nr], &committer_ident);
1502
0
    logs[logs_nr].refname = xstrdup(arg->newname);
1503
0
    logs[logs_nr].update_index = deletion_ts;
1504
0
    logs[logs_nr].value.update.message =
1505
0
      xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1506
0
    memcpy(logs[logs_nr].value.update.old_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
1507
0
    logs_nr++;
1508
1509
0
    ret = read_ref_without_reload(arg->refs, arg->stack, "HEAD", &head_oid,
1510
0
                &head_referent, &head_type);
1511
0
    if (ret < 0)
1512
0
      goto done;
1513
0
    append_head_reflog = (head_type & REF_ISSYMREF) && !strcmp(head_referent.buf, arg->oldname);
1514
0
    strbuf_release(&head_referent);
1515
1516
    /*
1517
     * The files backend uses `refs_delete_ref()` to delete the old
1518
     * branch name, which will append a reflog entry for HEAD in
1519
     * case it points to the old branch.
1520
     */
1521
0
    if (append_head_reflog) {
1522
0
      ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1523
0
      logs[logs_nr] = logs[logs_nr - 1];
1524
0
      logs[logs_nr].refname = xstrdup("HEAD");
1525
0
      logs[logs_nr].value.update.name =
1526
0
        xstrdup(logs[logs_nr].value.update.name);
1527
0
      logs[logs_nr].value.update.email =
1528
0
        xstrdup(logs[logs_nr].value.update.email);
1529
0
      logs[logs_nr].value.update.message =
1530
0
        xstrdup(logs[logs_nr].value.update.message);
1531
0
      logs_nr++;
1532
0
    }
1533
0
  }
1534
1535
  /*
1536
   * Create the reflog entry for the newly created branch.
1537
   */
1538
0
  ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1539
0
  memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1540
0
  fill_reftable_log_record(&logs[logs_nr], &committer_ident);
1541
0
  logs[logs_nr].refname = xstrdup(arg->newname);
1542
0
  logs[logs_nr].update_index = creation_ts;
1543
0
  logs[logs_nr].value.update.message =
1544
0
    xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1545
0
  memcpy(logs[logs_nr].value.update.new_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
1546
0
  logs_nr++;
1547
1548
  /*
1549
   * In addition to writing the reflog entry for the new branch, we also
1550
   * copy over all log entries from the old reflog. Last but not least,
1551
   * when renaming we also have to delete all the old reflog entries.
1552
   */
1553
0
  reftable_stack_init_log_iterator(arg->stack, &it);
1554
0
  ret = reftable_iterator_seek_log(&it, arg->oldname);
1555
0
  if (ret < 0)
1556
0
    goto done;
1557
1558
0
  while (1) {
1559
0
    ret = reftable_iterator_next_log(&it, &old_log);
1560
0
    if (ret < 0)
1561
0
      goto done;
1562
0
    if (ret > 0 || strcmp(old_log.refname, arg->oldname)) {
1563
0
      ret = 0;
1564
0
      break;
1565
0
    }
1566
1567
0
    free(old_log.refname);
1568
1569
    /*
1570
     * Copy over the old reflog entry with the new refname.
1571
     */
1572
0
    ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1573
0
    logs[logs_nr] = old_log;
1574
0
    logs[logs_nr].refname = xstrdup(arg->newname);
1575
0
    logs_nr++;
1576
1577
    /*
1578
     * Delete the old reflog entry in case we are renaming.
1579
     */
1580
0
    if (arg->delete_old) {
1581
0
      ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1582
0
      memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1583
0
      logs[logs_nr].refname = xstrdup(arg->oldname);
1584
0
      logs[logs_nr].value_type = REFTABLE_LOG_DELETION;
1585
0
      logs[logs_nr].update_index = old_log.update_index;
1586
0
      logs_nr++;
1587
0
    }
1588
1589
    /*
1590
     * Transfer ownership of the log record we're iterating over to
1591
     * the array of log records. Otherwise, the pointers would get
1592
     * free'd or reallocated by the iterator.
1593
     */
1594
0
    memset(&old_log, 0, sizeof(old_log));
1595
0
  }
1596
1597
0
  ret = reftable_writer_add_logs(writer, logs, logs_nr);
1598
0
  if (ret < 0)
1599
0
    goto done;
1600
1601
0
done:
1602
0
  assert(ret != REFTABLE_API_ERROR);
1603
0
  reftable_iterator_destroy(&it);
1604
0
  string_list_clear(&skip, 0);
1605
0
  strbuf_release(&errbuf);
1606
0
  for (i = 0; i < logs_nr; i++)
1607
0
    reftable_log_record_release(&logs[i]);
1608
0
  free(logs);
1609
0
  for (i = 0; i < ARRAY_SIZE(refs); i++)
1610
0
    reftable_ref_record_release(&refs[i]);
1611
0
  reftable_ref_record_release(&old_ref);
1612
0
  reftable_log_record_release(&old_log);
1613
0
  return ret;
1614
0
}
1615
1616
static int reftable_be_rename_ref(struct ref_store *ref_store,
1617
          const char *oldrefname,
1618
          const char *newrefname,
1619
          const char *logmsg)
1620
0
{
1621
0
  struct reftable_ref_store *refs =
1622
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
1623
0
  struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1624
0
  struct write_copy_arg arg = {
1625
0
    .refs = refs,
1626
0
    .stack = stack,
1627
0
    .oldname = oldrefname,
1628
0
    .newname = newrefname,
1629
0
    .logmsg = logmsg,
1630
0
    .delete_old = 1,
1631
0
  };
1632
0
  int ret;
1633
1634
0
  ret = refs->err;
1635
0
  if (ret < 0)
1636
0
    goto done;
1637
1638
0
  ret = reftable_stack_reload(stack);
1639
0
  if (ret)
1640
0
    goto done;
1641
0
  ret = reftable_stack_add(stack, &write_copy_table, &arg);
1642
1643
0
done:
1644
0
  assert(ret != REFTABLE_API_ERROR);
1645
0
  return ret;
1646
0
}
1647
1648
static int reftable_be_copy_ref(struct ref_store *ref_store,
1649
        const char *oldrefname,
1650
        const char *newrefname,
1651
        const char *logmsg)
1652
0
{
1653
0
  struct reftable_ref_store *refs =
1654
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref");
1655
0
  struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1656
0
  struct write_copy_arg arg = {
1657
0
    .refs = refs,
1658
0
    .stack = stack,
1659
0
    .oldname = oldrefname,
1660
0
    .newname = newrefname,
1661
0
    .logmsg = logmsg,
1662
0
  };
1663
0
  int ret;
1664
1665
0
  ret = refs->err;
1666
0
  if (ret < 0)
1667
0
    goto done;
1668
1669
0
  ret = reftable_stack_reload(stack);
1670
0
  if (ret)
1671
0
    goto done;
1672
0
  ret = reftable_stack_add(stack, &write_copy_table, &arg);
1673
1674
0
done:
1675
0
  assert(ret != REFTABLE_API_ERROR);
1676
0
  return ret;
1677
0
}
1678
1679
struct reftable_reflog_iterator {
1680
  struct ref_iterator base;
1681
  struct reftable_ref_store *refs;
1682
  struct reftable_iterator iter;
1683
  struct reftable_log_record log;
1684
  struct strbuf last_name;
1685
  int err;
1686
};
1687
1688
static int reftable_reflog_iterator_advance(struct ref_iterator *ref_iterator)
1689
0
{
1690
0
  struct reftable_reflog_iterator *iter =
1691
0
    (struct reftable_reflog_iterator *)ref_iterator;
1692
1693
0
  while (!iter->err) {
1694
0
    iter->err = reftable_iterator_next_log(&iter->iter, &iter->log);
1695
0
    if (iter->err)
1696
0
      break;
1697
1698
    /*
1699
     * We want the refnames that we have reflogs for, so we skip if
1700
     * we've already produced this name. This could be faster by
1701
     * seeking directly to reflog@update_index==0.
1702
     */
1703
0
    if (!strcmp(iter->log.refname, iter->last_name.buf))
1704
0
      continue;
1705
1706
0
    if (check_refname_format(iter->log.refname,
1707
0
           REFNAME_ALLOW_ONELEVEL))
1708
0
      continue;
1709
1710
0
    strbuf_reset(&iter->last_name);
1711
0
    strbuf_addstr(&iter->last_name, iter->log.refname);
1712
0
    iter->base.refname = iter->log.refname;
1713
1714
0
    break;
1715
0
  }
1716
1717
0
  if (iter->err > 0) {
1718
0
    if (ref_iterator_abort(ref_iterator) != ITER_DONE)
1719
0
      return ITER_ERROR;
1720
0
    return ITER_DONE;
1721
0
  }
1722
1723
0
  if (iter->err < 0) {
1724
0
    ref_iterator_abort(ref_iterator);
1725
0
    return ITER_ERROR;
1726
0
  }
1727
1728
0
  return ITER_OK;
1729
0
}
1730
1731
static int reftable_reflog_iterator_peel(struct ref_iterator *ref_iterator UNUSED,
1732
           struct object_id *peeled UNUSED)
1733
0
{
1734
0
  BUG("reftable reflog iterator cannot be peeled");
1735
0
  return -1;
1736
0
}
1737
1738
static int reftable_reflog_iterator_abort(struct ref_iterator *ref_iterator)
1739
0
{
1740
0
  struct reftable_reflog_iterator *iter =
1741
0
    (struct reftable_reflog_iterator *)ref_iterator;
1742
0
  reftable_log_record_release(&iter->log);
1743
0
  reftable_iterator_destroy(&iter->iter);
1744
0
  strbuf_release(&iter->last_name);
1745
0
  free(iter);
1746
0
  return ITER_DONE;
1747
0
}
1748
1749
static struct ref_iterator_vtable reftable_reflog_iterator_vtable = {
1750
  .advance = reftable_reflog_iterator_advance,
1751
  .peel = reftable_reflog_iterator_peel,
1752
  .abort = reftable_reflog_iterator_abort
1753
};
1754
1755
static struct reftable_reflog_iterator *reflog_iterator_for_stack(struct reftable_ref_store *refs,
1756
                  struct reftable_stack *stack)
1757
0
{
1758
0
  struct reftable_reflog_iterator *iter;
1759
0
  int ret;
1760
1761
0
  iter = xcalloc(1, sizeof(*iter));
1762
0
  base_ref_iterator_init(&iter->base, &reftable_reflog_iterator_vtable);
1763
0
  strbuf_init(&iter->last_name, 0);
1764
0
  iter->refs = refs;
1765
1766
0
  ret = refs->err;
1767
0
  if (ret)
1768
0
    goto done;
1769
1770
0
  ret = reftable_stack_reload(stack);
1771
0
  if (ret < 0)
1772
0
    goto done;
1773
1774
0
  reftable_stack_init_log_iterator(stack, &iter->iter);
1775
0
  ret = reftable_iterator_seek_log(&iter->iter, "");
1776
0
  if (ret < 0)
1777
0
    goto done;
1778
1779
0
done:
1780
0
  iter->err = ret;
1781
0
  return iter;
1782
0
}
1783
1784
static struct ref_iterator *reftable_be_reflog_iterator_begin(struct ref_store *ref_store)
1785
0
{
1786
0
  struct reftable_ref_store *refs =
1787
0
    reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_iterator_begin");
1788
0
  struct reftable_reflog_iterator *main_iter, *worktree_iter;
1789
1790
0
  main_iter = reflog_iterator_for_stack(refs, refs->main_stack);
1791
0
  if (!refs->worktree_stack)
1792
0
    return &main_iter->base;
1793
1794
0
  worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_stack);
1795
1796
0
  return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
1797
0
          ref_iterator_select, NULL);
1798
0
}
1799
1800
static int yield_log_record(struct reftable_ref_store *refs,
1801
          struct reftable_log_record *log,
1802
          each_reflog_ent_fn fn,
1803
          void *cb_data)
1804
0
{
1805
0
  struct object_id old_oid, new_oid;
1806
0
  const char *full_committer;
1807
1808
0
  oidread(&old_oid, log->value.update.old_hash, refs->base.repo->hash_algo);
1809
0
  oidread(&new_oid, log->value.update.new_hash, refs->base.repo->hash_algo);
1810
1811
  /*
1812
   * When both the old object ID and the new object ID are null
1813
   * then this is the reflog existence marker. The caller must
1814
   * not be aware of it.
1815
   */
1816
0
  if (is_null_oid(&old_oid) && is_null_oid(&new_oid))
1817
0
    return 0;
1818
1819
0
  full_committer = fmt_ident(log->value.update.name, log->value.update.email,
1820
0
           WANT_COMMITTER_IDENT, NULL, IDENT_NO_DATE);
1821
0
  return fn(&old_oid, &new_oid, full_committer,
1822
0
      log->value.update.time, log->value.update.tz_offset,
1823
0
      log->value.update.message, cb_data);
1824
0
}
1825
1826
static int reftable_be_for_each_reflog_ent_reverse(struct ref_store *ref_store,
1827
               const char *refname,
1828
               each_reflog_ent_fn fn,
1829
               void *cb_data)
1830
0
{
1831
0
  struct reftable_ref_store *refs =
1832
0
    reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent_reverse");
1833
0
  struct reftable_stack *stack = stack_for(refs, refname, &refname);
1834
0
  struct reftable_log_record log = {0};
1835
0
  struct reftable_iterator it = {0};
1836
0
  int ret;
1837
1838
0
  if (refs->err < 0)
1839
0
    return refs->err;
1840
1841
0
  reftable_stack_init_log_iterator(stack, &it);
1842
0
  ret = reftable_iterator_seek_log(&it, refname);
1843
0
  while (!ret) {
1844
0
    ret = reftable_iterator_next_log(&it, &log);
1845
0
    if (ret < 0)
1846
0
      break;
1847
0
    if (ret > 0 || strcmp(log.refname, refname)) {
1848
0
      ret = 0;
1849
0
      break;
1850
0
    }
1851
1852
0
    ret = yield_log_record(refs, &log, fn, cb_data);
1853
0
    if (ret)
1854
0
      break;
1855
0
  }
1856
1857
0
  reftable_log_record_release(&log);
1858
0
  reftable_iterator_destroy(&it);
1859
0
  return ret;
1860
0
}
1861
1862
static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store,
1863
             const char *refname,
1864
             each_reflog_ent_fn fn,
1865
             void *cb_data)
1866
0
{
1867
0
  struct reftable_ref_store *refs =
1868
0
    reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent");
1869
0
  struct reftable_stack *stack = stack_for(refs, refname, &refname);
1870
0
  struct reftable_log_record *logs = NULL;
1871
0
  struct reftable_iterator it = {0};
1872
0
  size_t logs_alloc = 0, logs_nr = 0, i;
1873
0
  int ret;
1874
1875
0
  if (refs->err < 0)
1876
0
    return refs->err;
1877
1878
0
  reftable_stack_init_log_iterator(stack, &it);
1879
0
  ret = reftable_iterator_seek_log(&it, refname);
1880
0
  while (!ret) {
1881
0
    struct reftable_log_record log = {0};
1882
1883
0
    ret = reftable_iterator_next_log(&it, &log);
1884
0
    if (ret < 0)
1885
0
      goto done;
1886
0
    if (ret > 0 || strcmp(log.refname, refname)) {
1887
0
      reftable_log_record_release(&log);
1888
0
      ret = 0;
1889
0
      break;
1890
0
    }
1891
1892
0
    ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1893
0
    logs[logs_nr++] = log;
1894
0
  }
1895
1896
0
  for (i = logs_nr; i--;) {
1897
0
    ret = yield_log_record(refs, &logs[i], fn, cb_data);
1898
0
    if (ret)
1899
0
      goto done;
1900
0
  }
1901
1902
0
done:
1903
0
  reftable_iterator_destroy(&it);
1904
0
  for (i = 0; i < logs_nr; i++)
1905
0
    reftable_log_record_release(&logs[i]);
1906
0
  free(logs);
1907
0
  return ret;
1908
0
}
1909
1910
static int reftable_be_reflog_exists(struct ref_store *ref_store,
1911
             const char *refname)
1912
0
{
1913
0
  struct reftable_ref_store *refs =
1914
0
    reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists");
1915
0
  struct reftable_stack *stack = stack_for(refs, refname, &refname);
1916
0
  struct reftable_log_record log = {0};
1917
0
  struct reftable_iterator it = {0};
1918
0
  int ret;
1919
1920
0
  ret = refs->err;
1921
0
  if (ret < 0)
1922
0
    goto done;
1923
1924
0
  ret = reftable_stack_reload(stack);
1925
0
  if (ret < 0)
1926
0
    goto done;
1927
1928
0
  reftable_stack_init_log_iterator(stack, &it);
1929
0
  ret = reftable_iterator_seek_log(&it, refname);
1930
0
  if (ret < 0)
1931
0
    goto done;
1932
1933
  /*
1934
   * Check whether we get at least one log record for the given ref name.
1935
   * If so, the reflog exists, otherwise it doesn't.
1936
   */
1937
0
  ret = reftable_iterator_next_log(&it, &log);
1938
0
  if (ret < 0)
1939
0
    goto done;
1940
0
  if (ret > 0) {
1941
0
    ret = 0;
1942
0
    goto done;
1943
0
  }
1944
1945
0
  ret = strcmp(log.refname, refname) == 0;
1946
1947
0
done:
1948
0
  reftable_iterator_destroy(&it);
1949
0
  reftable_log_record_release(&log);
1950
0
  if (ret < 0)
1951
0
    ret = 0;
1952
0
  return ret;
1953
0
}
1954
1955
struct write_reflog_existence_arg {
1956
  struct reftable_ref_store *refs;
1957
  const char *refname;
1958
  struct reftable_stack *stack;
1959
};
1960
1961
static int write_reflog_existence_table(struct reftable_writer *writer,
1962
          void *cb_data)
1963
0
{
1964
0
  struct write_reflog_existence_arg *arg = cb_data;
1965
0
  uint64_t ts = reftable_stack_next_update_index(arg->stack);
1966
0
  struct reftable_log_record log = {0};
1967
0
  int ret;
1968
1969
0
  ret = reftable_stack_read_log(arg->stack, arg->refname, &log);
1970
0
  if (ret <= 0)
1971
0
    goto done;
1972
1973
0
  reftable_writer_set_limits(writer, ts, ts);
1974
1975
  /*
1976
   * The existence entry has both old and new object ID set to the the
1977
   * null object ID. Our iterators are aware of this and will not present
1978
   * them to their callers.
1979
   */
1980
0
  log.refname = xstrdup(arg->refname);
1981
0
  log.update_index = ts;
1982
0
  log.value_type = REFTABLE_LOG_UPDATE;
1983
0
  ret = reftable_writer_add_log(writer, &log);
1984
1985
0
done:
1986
0
  assert(ret != REFTABLE_API_ERROR);
1987
0
  reftable_log_record_release(&log);
1988
0
  return ret;
1989
0
}
1990
1991
static int reftable_be_create_reflog(struct ref_store *ref_store,
1992
             const char *refname,
1993
             struct strbuf *errmsg UNUSED)
1994
0
{
1995
0
  struct reftable_ref_store *refs =
1996
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
1997
0
  struct reftable_stack *stack = stack_for(refs, refname, &refname);
1998
0
  struct write_reflog_existence_arg arg = {
1999
0
    .refs = refs,
2000
0
    .stack = stack,
2001
0
    .refname = refname,
2002
0
  };
2003
0
  int ret;
2004
2005
0
  ret = refs->err;
2006
0
  if (ret < 0)
2007
0
    goto done;
2008
2009
0
  ret = reftable_stack_reload(stack);
2010
0
  if (ret)
2011
0
    goto done;
2012
2013
0
  ret = reftable_stack_add(stack, &write_reflog_existence_table, &arg);
2014
2015
0
done:
2016
0
  return ret;
2017
0
}
2018
2019
struct write_reflog_delete_arg {
2020
  struct reftable_stack *stack;
2021
  const char *refname;
2022
};
2023
2024
static int write_reflog_delete_table(struct reftable_writer *writer, void *cb_data)
2025
0
{
2026
0
  struct write_reflog_delete_arg *arg = cb_data;
2027
0
  struct reftable_log_record log = {0}, tombstone = {0};
2028
0
  struct reftable_iterator it = {0};
2029
0
  uint64_t ts = reftable_stack_next_update_index(arg->stack);
2030
0
  int ret;
2031
2032
0
  reftable_writer_set_limits(writer, ts, ts);
2033
2034
0
  reftable_stack_init_log_iterator(arg->stack, &it);
2035
2036
  /*
2037
   * In order to delete a table we need to delete all reflog entries one
2038
   * by one. This is inefficient, but the reftable format does not have a
2039
   * better marker right now.
2040
   */
2041
0
  ret = reftable_iterator_seek_log(&it, arg->refname);
2042
0
  while (ret == 0) {
2043
0
    ret = reftable_iterator_next_log(&it, &log);
2044
0
    if (ret < 0)
2045
0
      break;
2046
0
    if (ret > 0 || strcmp(log.refname, arg->refname)) {
2047
0
      ret = 0;
2048
0
      break;
2049
0
    }
2050
2051
0
    tombstone.refname = (char *)arg->refname;
2052
0
    tombstone.value_type = REFTABLE_LOG_DELETION;
2053
0
    tombstone.update_index = log.update_index;
2054
2055
0
    ret = reftable_writer_add_log(writer, &tombstone);
2056
0
  }
2057
2058
0
  reftable_log_record_release(&log);
2059
0
  reftable_iterator_destroy(&it);
2060
0
  return ret;
2061
0
}
2062
2063
static int reftable_be_delete_reflog(struct ref_store *ref_store,
2064
             const char *refname)
2065
0
{
2066
0
  struct reftable_ref_store *refs =
2067
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
2068
0
  struct reftable_stack *stack = stack_for(refs, refname, &refname);
2069
0
  struct write_reflog_delete_arg arg = {
2070
0
    .stack = stack,
2071
0
    .refname = refname,
2072
0
  };
2073
0
  int ret;
2074
2075
0
  ret = reftable_stack_reload(stack);
2076
0
  if (ret)
2077
0
    return ret;
2078
0
  ret = reftable_stack_add(stack, &write_reflog_delete_table, &arg);
2079
2080
0
  assert(ret != REFTABLE_API_ERROR);
2081
0
  return ret;
2082
0
}
2083
2084
struct reflog_expiry_arg {
2085
  struct reftable_ref_store *refs;
2086
  struct reftable_stack *stack;
2087
  struct reftable_log_record *records;
2088
  struct object_id update_oid;
2089
  const char *refname;
2090
  size_t len;
2091
};
2092
2093
static int write_reflog_expiry_table(struct reftable_writer *writer, void *cb_data)
2094
0
{
2095
0
  struct reflog_expiry_arg *arg = cb_data;
2096
0
  uint64_t ts = reftable_stack_next_update_index(arg->stack);
2097
0
  uint64_t live_records = 0;
2098
0
  size_t i;
2099
0
  int ret;
2100
2101
0
  for (i = 0; i < arg->len; i++)
2102
0
    if (arg->records[i].value_type == REFTABLE_LOG_UPDATE)
2103
0
      live_records++;
2104
2105
0
  reftable_writer_set_limits(writer, ts, ts);
2106
2107
0
  if (!is_null_oid(&arg->update_oid)) {
2108
0
    struct reftable_ref_record ref = {0};
2109
0
    struct object_id peeled;
2110
2111
0
    ref.refname = (char *)arg->refname;
2112
0
    ref.update_index = ts;
2113
2114
0
    if (!peel_object(arg->refs->base.repo, &arg->update_oid, &peeled)) {
2115
0
      ref.value_type = REFTABLE_REF_VAL2;
2116
0
      memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
2117
0
      memcpy(ref.value.val2.value, arg->update_oid.hash, GIT_MAX_RAWSZ);
2118
0
    } else {
2119
0
      ref.value_type = REFTABLE_REF_VAL1;
2120
0
      memcpy(ref.value.val1, arg->update_oid.hash, GIT_MAX_RAWSZ);
2121
0
    }
2122
2123
0
    ret = reftable_writer_add_ref(writer, &ref);
2124
0
    if (ret < 0)
2125
0
      return ret;
2126
0
  }
2127
2128
  /*
2129
   * When there are no more entries left in the reflog we empty it
2130
   * completely, but write a placeholder reflog entry that indicates that
2131
   * the reflog still exists.
2132
   */
2133
0
  if (!live_records) {
2134
0
    struct reftable_log_record log = {
2135
0
      .refname = (char *)arg->refname,
2136
0
      .value_type = REFTABLE_LOG_UPDATE,
2137
0
      .update_index = ts,
2138
0
    };
2139
2140
0
    ret = reftable_writer_add_log(writer, &log);
2141
0
    if (ret)
2142
0
      return ret;
2143
0
  }
2144
2145
0
  for (i = 0; i < arg->len; i++) {
2146
0
    ret = reftable_writer_add_log(writer, &arg->records[i]);
2147
0
    if (ret)
2148
0
      return ret;
2149
0
  }
2150
2151
0
  return 0;
2152
0
}
2153
2154
static int reftable_be_reflog_expire(struct ref_store *ref_store,
2155
             const char *refname,
2156
             unsigned int flags,
2157
             reflog_expiry_prepare_fn prepare_fn,
2158
             reflog_expiry_should_prune_fn should_prune_fn,
2159
             reflog_expiry_cleanup_fn cleanup_fn,
2160
             void *policy_cb_data)
2161
0
{
2162
  /*
2163
   * For log expiry, we write tombstones for every single reflog entry
2164
   * that is to be expired. This means that the entries are still
2165
   * retrievable by delving into the stack, and expiring entries
2166
   * paradoxically takes extra memory. This memory is only reclaimed when
2167
   * compacting the reftable stack.
2168
   *
2169
   * It would be better if the refs backend supported an API that sets a
2170
   * criterion for all refs, passing the criterion to pack_refs().
2171
   *
2172
   * On the plus side, because we do the expiration per ref, we can easily
2173
   * insert the reflog existence dummies.
2174
   */
2175
0
  struct reftable_ref_store *refs =
2176
0
    reftable_be_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
2177
0
  struct reftable_stack *stack = stack_for(refs, refname, &refname);
2178
0
  struct reftable_log_record *logs = NULL;
2179
0
  struct reftable_log_record *rewritten = NULL;
2180
0
  struct reftable_ref_record ref_record = {0};
2181
0
  struct reftable_iterator it = {0};
2182
0
  struct reftable_addition *add = NULL;
2183
0
  struct reflog_expiry_arg arg = {0};
2184
0
  struct object_id oid = {0};
2185
0
  uint8_t *last_hash = NULL;
2186
0
  size_t logs_nr = 0, logs_alloc = 0, i;
2187
0
  int ret;
2188
2189
0
  if (refs->err < 0)
2190
0
    return refs->err;
2191
2192
0
  ret = reftable_stack_reload(stack);
2193
0
  if (ret < 0)
2194
0
    goto done;
2195
2196
0
  reftable_stack_init_log_iterator(stack, &it);
2197
2198
0
  ret = reftable_iterator_seek_log(&it, refname);
2199
0
  if (ret < 0)
2200
0
    goto done;
2201
2202
0
  ret = reftable_stack_new_addition(&add, stack);
2203
0
  if (ret < 0)
2204
0
    goto done;
2205
2206
0
  ret = reftable_stack_read_ref(stack, refname, &ref_record);
2207
0
  if (ret < 0)
2208
0
    goto done;
2209
0
  if (reftable_ref_record_val1(&ref_record))
2210
0
    oidread(&oid, reftable_ref_record_val1(&ref_record),
2211
0
      ref_store->repo->hash_algo);
2212
0
  prepare_fn(refname, &oid, policy_cb_data);
2213
2214
0
  while (1) {
2215
0
    struct reftable_log_record log = {0};
2216
0
    struct object_id old_oid, new_oid;
2217
2218
0
    ret = reftable_iterator_next_log(&it, &log);
2219
0
    if (ret < 0)
2220
0
      goto done;
2221
0
    if (ret > 0 || strcmp(log.refname, refname)) {
2222
0
      reftable_log_record_release(&log);
2223
0
      break;
2224
0
    }
2225
2226
0
    oidread(&old_oid, log.value.update.old_hash,
2227
0
      ref_store->repo->hash_algo);
2228
0
    oidread(&new_oid, log.value.update.new_hash,
2229
0
      ref_store->repo->hash_algo);
2230
2231
    /*
2232
     * Skip over the reflog existence marker. We will add it back
2233
     * in when there are no live reflog records.
2234
     */
2235
0
    if (is_null_oid(&old_oid) && is_null_oid(&new_oid)) {
2236
0
      reftable_log_record_release(&log);
2237
0
      continue;
2238
0
    }
2239
2240
0
    ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
2241
0
    logs[logs_nr++] = log;
2242
0
  }
2243
2244
  /*
2245
   * We need to rewrite all reflog entries according to the pruning
2246
   * callback function:
2247
   *
2248
   *   - If a reflog entry shall be pruned we mark the record for
2249
   *     deletion.
2250
   *
2251
   *   - Otherwise we may have to rewrite the chain of reflog entries so
2252
   *     that gaps created by just-deleted records get backfilled.
2253
   */
2254
0
  CALLOC_ARRAY(rewritten, logs_nr);
2255
0
  for (i = logs_nr; i--;) {
2256
0
    struct reftable_log_record *dest = &rewritten[i];
2257
0
    struct object_id old_oid, new_oid;
2258
2259
0
    *dest = logs[i];
2260
0
    oidread(&old_oid, logs[i].value.update.old_hash,
2261
0
      ref_store->repo->hash_algo);
2262
0
    oidread(&new_oid, logs[i].value.update.new_hash,
2263
0
      ref_store->repo->hash_algo);
2264
2265
0
    if (should_prune_fn(&old_oid, &new_oid, logs[i].value.update.email,
2266
0
            (timestamp_t)logs[i].value.update.time,
2267
0
            logs[i].value.update.tz_offset,
2268
0
            logs[i].value.update.message,
2269
0
            policy_cb_data)) {
2270
0
      dest->value_type = REFTABLE_LOG_DELETION;
2271
0
    } else {
2272
0
      if ((flags & EXPIRE_REFLOGS_REWRITE) && last_hash)
2273
0
        memcpy(dest->value.update.old_hash, last_hash, GIT_MAX_RAWSZ);
2274
0
      last_hash = logs[i].value.update.new_hash;
2275
0
    }
2276
0
  }
2277
2278
0
  if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash &&
2279
0
      reftable_ref_record_val1(&ref_record))
2280
0
    oidread(&arg.update_oid, last_hash, ref_store->repo->hash_algo);
2281
2282
0
  arg.refs = refs;
2283
0
  arg.records = rewritten;
2284
0
  arg.len = logs_nr;
2285
0
  arg.stack = stack,
2286
0
  arg.refname = refname,
2287
2288
0
  ret = reftable_addition_add(add, &write_reflog_expiry_table, &arg);
2289
0
  if (ret < 0)
2290
0
    goto done;
2291
2292
  /*
2293
   * Future improvement: we could skip writing records that were
2294
   * not changed.
2295
   */
2296
0
  if (!(flags & EXPIRE_REFLOGS_DRY_RUN))
2297
0
    ret = reftable_addition_commit(add);
2298
2299
0
done:
2300
0
  if (add)
2301
0
    cleanup_fn(policy_cb_data);
2302
0
  assert(ret != REFTABLE_API_ERROR);
2303
2304
0
  reftable_ref_record_release(&ref_record);
2305
0
  reftable_iterator_destroy(&it);
2306
0
  reftable_addition_destroy(add);
2307
0
  for (i = 0; i < logs_nr; i++)
2308
0
    reftable_log_record_release(&logs[i]);
2309
0
  free(logs);
2310
0
  free(rewritten);
2311
0
  return ret;
2312
0
}
2313
2314
static int reftable_be_fsck(struct ref_store *ref_store UNUSED,
2315
          struct fsck_options *o UNUSED)
2316
0
{
2317
0
  return 0;
2318
0
}
2319
2320
struct ref_storage_be refs_be_reftable = {
2321
  .name = "reftable",
2322
  .init = reftable_be_init,
2323
  .release = reftable_be_release,
2324
  .create_on_disk = reftable_be_create_on_disk,
2325
  .remove_on_disk = reftable_be_remove_on_disk,
2326
2327
  .transaction_prepare = reftable_be_transaction_prepare,
2328
  .transaction_finish = reftable_be_transaction_finish,
2329
  .transaction_abort = reftable_be_transaction_abort,
2330
  .initial_transaction_commit = reftable_be_initial_transaction_commit,
2331
2332
  .pack_refs = reftable_be_pack_refs,
2333
  .rename_ref = reftable_be_rename_ref,
2334
  .copy_ref = reftable_be_copy_ref,
2335
2336
  .iterator_begin = reftable_be_iterator_begin,
2337
  .read_raw_ref = reftable_be_read_raw_ref,
2338
  .read_symbolic_ref = reftable_be_read_symbolic_ref,
2339
2340
  .reflog_iterator_begin = reftable_be_reflog_iterator_begin,
2341
  .for_each_reflog_ent = reftable_be_for_each_reflog_ent,
2342
  .for_each_reflog_ent_reverse = reftable_be_for_each_reflog_ent_reverse,
2343
  .reflog_exists = reftable_be_reflog_exists,
2344
  .create_reflog = reftable_be_create_reflog,
2345
  .delete_reflog = reftable_be_delete_reflog,
2346
  .reflog_expire = reftable_be_reflog_expire,
2347
2348
  .fsck = reftable_be_fsck,
2349
};