/src/git/refs/reftable-backend.c
Line | Count | Source |
1 | | #define USE_THE_REPOSITORY_VARIABLE |
2 | | |
3 | | #include "../git-compat-util.h" |
4 | | #include "../abspath.h" |
5 | | #include "../chdir-notify.h" |
6 | | #include "../config.h" |
7 | | #include "../dir.h" |
8 | | #include "../environment.h" |
9 | | #include "../fsck.h" |
10 | | #include "../gettext.h" |
11 | | #include "../hash.h" |
12 | | #include "../hex.h" |
13 | | #include "../ident.h" |
14 | | #include "../iterator.h" |
15 | | #include "../object.h" |
16 | | #include "../parse.h" |
17 | | #include "../path.h" |
18 | | #include "../refs.h" |
19 | | #include "../reftable/reftable-basics.h" |
20 | | #include "../reftable/reftable-error.h" |
21 | | #include "../reftable/reftable-fsck.h" |
22 | | #include "../reftable/reftable-iterator.h" |
23 | | #include "../reftable/reftable-record.h" |
24 | | #include "../reftable/reftable-stack.h" |
25 | | #include "../repo-settings.h" |
26 | | #include "../setup.h" |
27 | | #include "../strmap.h" |
28 | | #include "../trace2.h" |
29 | | #include "../worktree.h" |
30 | | #include "../write-or-die.h" |
31 | | #include "refs-internal.h" |
32 | | |
33 | | /* |
34 | | * Used as a flag in ref_update::flags when the ref_update was via an |
35 | | * update to HEAD. |
36 | | */ |
37 | 0 | #define REF_UPDATE_VIA_HEAD (1 << 8) |
38 | | |
39 | | struct reftable_backend { |
40 | | struct reftable_stack *stack; |
41 | | struct reftable_iterator it; |
42 | | }; |
43 | | |
44 | | static void reftable_backend_on_reload(void *payload) |
45 | 0 | { |
46 | 0 | struct reftable_backend *be = payload; |
47 | 0 | reftable_iterator_destroy(&be->it); |
48 | 0 | } |
49 | | |
50 | | static int reftable_backend_init(struct reftable_backend *be, |
51 | | const char *path, |
52 | | const struct reftable_write_options *_opts) |
53 | 0 | { |
54 | 0 | struct reftable_write_options opts = *_opts; |
55 | 0 | opts.on_reload = reftable_backend_on_reload; |
56 | 0 | opts.on_reload_payload = be; |
57 | 0 | return reftable_new_stack(&be->stack, path, &opts); |
58 | 0 | } |
59 | | |
60 | | static void reftable_backend_release(struct reftable_backend *be) |
61 | 0 | { |
62 | 0 | reftable_stack_destroy(be->stack); |
63 | 0 | be->stack = NULL; |
64 | 0 | reftable_iterator_destroy(&be->it); |
65 | 0 | } |
66 | | |
67 | | static int reftable_backend_read_ref(struct reftable_backend *be, |
68 | | const char *refname, |
69 | | struct object_id *oid, |
70 | | struct strbuf *referent, |
71 | | unsigned int *type) |
72 | 0 | { |
73 | 0 | struct reftable_ref_record ref = {0}; |
74 | 0 | int ret; |
75 | |
|
76 | 0 | if (!be->it.ops) { |
77 | 0 | ret = reftable_stack_init_ref_iterator(be->stack, &be->it); |
78 | 0 | if (ret) |
79 | 0 | goto done; |
80 | 0 | } |
81 | | |
82 | 0 | ret = reftable_iterator_seek_ref(&be->it, refname); |
83 | 0 | if (ret) |
84 | 0 | goto done; |
85 | | |
86 | 0 | ret = reftable_iterator_next_ref(&be->it, &ref); |
87 | 0 | if (ret) |
88 | 0 | goto done; |
89 | | |
90 | 0 | if (strcmp(ref.refname, refname)) { |
91 | 0 | ret = 1; |
92 | 0 | goto done; |
93 | 0 | } |
94 | | |
95 | 0 | if (ref.value_type == REFTABLE_REF_SYMREF) { |
96 | 0 | strbuf_reset(referent); |
97 | 0 | strbuf_addstr(referent, ref.value.symref); |
98 | 0 | *type |= REF_ISSYMREF; |
99 | 0 | } else if (reftable_ref_record_val1(&ref)) { |
100 | 0 | unsigned int hash_id; |
101 | |
|
102 | 0 | switch (reftable_stack_hash_id(be->stack)) { |
103 | 0 | case REFTABLE_HASH_SHA1: |
104 | 0 | hash_id = GIT_HASH_SHA1; |
105 | 0 | break; |
106 | 0 | case REFTABLE_HASH_SHA256: |
107 | 0 | hash_id = GIT_HASH_SHA256; |
108 | 0 | break; |
109 | 0 | default: |
110 | 0 | BUG("unhandled hash ID %d", reftable_stack_hash_id(be->stack)); |
111 | 0 | } |
112 | | |
113 | 0 | oidread(oid, reftable_ref_record_val1(&ref), |
114 | 0 | &hash_algos[hash_id]); |
115 | 0 | } else { |
116 | | /* We got a tombstone, which should not happen. */ |
117 | 0 | BUG("unhandled reference value type %d", ref.value_type); |
118 | 0 | } |
119 | | |
120 | 0 | done: |
121 | 0 | assert(ret != REFTABLE_API_ERROR); |
122 | 0 | reftable_ref_record_release(&ref); |
123 | 0 | return ret; |
124 | 0 | } |
125 | | |
126 | | struct reftable_ref_store { |
127 | | struct ref_store base; |
128 | | |
129 | | /* |
130 | | * The main backend refers to the common dir and thus contains common |
131 | | * refs as well as refs of the main repository. |
132 | | */ |
133 | | struct reftable_backend main_backend; |
134 | | /* |
135 | | * The worktree backend refers to the gitdir in case the refdb is opened |
136 | | * via a worktree. It thus contains the per-worktree refs. |
137 | | */ |
138 | | struct reftable_backend worktree_backend; |
139 | | /* |
140 | | * Map of worktree backends by their respective worktree names. The map |
141 | | * is populated lazily when we try to resolve `worktrees/$worktree` refs. |
142 | | */ |
143 | | struct strmap worktree_backends; |
144 | | struct reftable_write_options write_options; |
145 | | |
146 | | unsigned int store_flags; |
147 | | enum log_refs_config log_all_ref_updates; |
148 | | int err; |
149 | | }; |
150 | | |
151 | | /* |
152 | | * Downcast ref_store to reftable_ref_store. Die if ref_store is not a |
153 | | * reftable_ref_store. required_flags is compared with ref_store's store_flags |
154 | | * to ensure the ref_store has all required capabilities. "caller" is used in |
155 | | * any necessary error messages. |
156 | | */ |
157 | | static struct reftable_ref_store *reftable_be_downcast(struct ref_store *ref_store, |
158 | | unsigned int required_flags, |
159 | | const char *caller) |
160 | 0 | { |
161 | 0 | struct reftable_ref_store *refs; |
162 | |
|
163 | 0 | if (ref_store->be != &refs_be_reftable) |
164 | 0 | BUG("ref_store is type \"%s\" not \"reftables\" in %s", |
165 | 0 | ref_store->be->name, caller); |
166 | | |
167 | 0 | refs = (struct reftable_ref_store *)ref_store; |
168 | |
|
169 | 0 | if ((refs->store_flags & required_flags) != required_flags) |
170 | 0 | BUG("operation %s requires abilities 0x%x, but only have 0x%x", |
171 | 0 | caller, required_flags, refs->store_flags); |
172 | | |
173 | 0 | return refs; |
174 | 0 | } |
175 | | |
176 | | static int backend_for_worktree(struct reftable_backend **out, |
177 | | struct reftable_ref_store *store, |
178 | | const char *worktree_name) |
179 | 0 | { |
180 | 0 | struct strbuf worktree_dir = STRBUF_INIT; |
181 | 0 | int ret; |
182 | |
|
183 | 0 | *out = strmap_get(&store->worktree_backends, worktree_name); |
184 | 0 | if (*out) { |
185 | 0 | ret = 0; |
186 | 0 | goto out; |
187 | 0 | } |
188 | | |
189 | 0 | strbuf_addf(&worktree_dir, "%s/worktrees/%s/reftable", |
190 | 0 | store->base.repo->commondir, worktree_name); |
191 | |
|
192 | 0 | CALLOC_ARRAY(*out, 1); |
193 | 0 | store->err = ret = reftable_backend_init(*out, worktree_dir.buf, |
194 | 0 | &store->write_options); |
195 | 0 | if (ret < 0) { |
196 | 0 | free(*out); |
197 | 0 | goto out; |
198 | 0 | } |
199 | | |
200 | 0 | strmap_put(&store->worktree_backends, worktree_name, *out); |
201 | |
|
202 | 0 | out: |
203 | 0 | strbuf_release(&worktree_dir); |
204 | 0 | return ret; |
205 | 0 | } |
206 | | |
207 | | /* |
208 | | * Some refs are global to the repository (refs/heads/{*}), while others are |
209 | | * local to the worktree (eg. HEAD, refs/bisect/{*}). We solve this by having |
210 | | * multiple separate databases (ie. multiple reftable/ directories), one for |
211 | | * the shared refs, one for the current worktree refs, and one for each |
212 | | * additional worktree. For reading, we merge the view of both the shared and |
213 | | * the current worktree's refs, when necessary. |
214 | | * |
215 | | * This function also optionally assigns the rewritten reference name that is |
216 | | * local to the stack. This translation is required when using worktree refs |
217 | | * like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store |
218 | | * those references in their normalized form. |
219 | | */ |
220 | | static int backend_for(struct reftable_backend **out, |
221 | | struct reftable_ref_store *store, |
222 | | const char *refname, |
223 | | const char **rewritten_ref, |
224 | | int reload) |
225 | 0 | { |
226 | 0 | const char *wtname; |
227 | 0 | int wtname_len; |
228 | 0 | int ret; |
229 | |
|
230 | 0 | if (!refname) { |
231 | 0 | *out = &store->main_backend; |
232 | 0 | ret = 0; |
233 | 0 | goto out; |
234 | 0 | } |
235 | | |
236 | 0 | switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) { |
237 | 0 | case REF_WORKTREE_OTHER: { |
238 | 0 | static struct strbuf wtname_buf = STRBUF_INIT; |
239 | | |
240 | | /* |
241 | | * We're using a static buffer here so that we don't need to |
242 | | * allocate the worktree name whenever we look up a reference. |
243 | | * This could be avoided if the strmap interface knew how to |
244 | | * handle keys with a length. |
245 | | */ |
246 | 0 | strbuf_reset(&wtname_buf); |
247 | 0 | strbuf_add(&wtname_buf, wtname, wtname_len); |
248 | | |
249 | | /* |
250 | | * There is an edge case here: when the worktree references the |
251 | | * current worktree, then we set up the stack once via |
252 | | * `worktree_backends` and once via `worktree_backend`. This is |
253 | | * wasteful, but in the reading case it shouldn't matter. And |
254 | | * in the writing case we would notice that the stack is locked |
255 | | * already and error out when trying to write a reference via |
256 | | * both stacks. |
257 | | */ |
258 | 0 | ret = backend_for_worktree(out, store, wtname_buf.buf); |
259 | |
|
260 | 0 | goto out; |
261 | 0 | } |
262 | 0 | case REF_WORKTREE_CURRENT: |
263 | | /* |
264 | | * If there is no worktree stack then we're currently in the |
265 | | * main worktree. We thus return the main stack in that case. |
266 | | */ |
267 | 0 | if (!store->worktree_backend.stack) |
268 | 0 | *out = &store->main_backend; |
269 | 0 | else |
270 | 0 | *out = &store->worktree_backend; |
271 | 0 | ret = 0; |
272 | 0 | goto out; |
273 | 0 | case REF_WORKTREE_MAIN: |
274 | 0 | case REF_WORKTREE_SHARED: |
275 | 0 | *out = &store->main_backend; |
276 | 0 | ret = 0; |
277 | 0 | goto out; |
278 | 0 | default: |
279 | 0 | BUG("unhandled worktree reference type"); |
280 | 0 | } |
281 | | |
282 | 0 | out: |
283 | 0 | if (reload && !ret) |
284 | 0 | ret = reftable_stack_reload((*out)->stack); |
285 | 0 | return ret; |
286 | 0 | } |
287 | | |
288 | | static int should_write_log(struct reftable_ref_store *refs, const char *refname) |
289 | 0 | { |
290 | 0 | enum log_refs_config log_refs_cfg = refs->log_all_ref_updates; |
291 | 0 | if (log_refs_cfg == LOG_REFS_UNSET) |
292 | 0 | log_refs_cfg = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL; |
293 | |
|
294 | 0 | switch (log_refs_cfg) { |
295 | 0 | case LOG_REFS_NONE: |
296 | 0 | return refs_reflog_exists(&refs->base, refname); |
297 | 0 | case LOG_REFS_ALWAYS: |
298 | 0 | return 1; |
299 | 0 | case LOG_REFS_NORMAL: |
300 | 0 | if (should_autocreate_reflog(log_refs_cfg, refname)) |
301 | 0 | return 1; |
302 | 0 | return refs_reflog_exists(&refs->base, refname); |
303 | 0 | default: |
304 | 0 | BUG("unhandled core.logAllRefUpdates value %d", log_refs_cfg); |
305 | 0 | } |
306 | 0 | } |
307 | | |
308 | | static void fill_reftable_log_record(struct reftable_log_record *log, const struct ident_split *split) |
309 | 0 | { |
310 | 0 | const char *tz_begin; |
311 | 0 | int sign = 1; |
312 | |
|
313 | 0 | reftable_log_record_release(log); |
314 | 0 | log->value_type = REFTABLE_LOG_UPDATE; |
315 | 0 | log->value.update.name = |
316 | 0 | xstrndup(split->name_begin, split->name_end - split->name_begin); |
317 | 0 | log->value.update.email = |
318 | 0 | xstrndup(split->mail_begin, split->mail_end - split->mail_begin); |
319 | 0 | log->value.update.time = atol(split->date_begin); |
320 | |
|
321 | 0 | tz_begin = split->tz_begin; |
322 | 0 | if (*tz_begin == '-') { |
323 | 0 | sign = -1; |
324 | 0 | tz_begin++; |
325 | 0 | } |
326 | 0 | if (*tz_begin == '+') { |
327 | 0 | sign = 1; |
328 | 0 | tz_begin++; |
329 | 0 | } |
330 | |
|
331 | 0 | log->value.update.tz_offset = sign * atoi(tz_begin); |
332 | 0 | } |
333 | | |
334 | | static int reftable_be_config(const char *var, const char *value, |
335 | | const struct config_context *ctx, |
336 | | void *_opts) |
337 | 0 | { |
338 | 0 | struct reftable_write_options *opts = _opts; |
339 | |
|
340 | 0 | if (!strcmp(var, "reftable.blocksize")) { |
341 | 0 | unsigned long block_size = git_config_ulong(var, value, ctx->kvi); |
342 | 0 | if (block_size > 16777215) |
343 | 0 | die("reftable block size cannot exceed 16MB"); |
344 | 0 | opts->block_size = block_size; |
345 | 0 | } else if (!strcmp(var, "reftable.restartinterval")) { |
346 | 0 | unsigned long restart_interval = git_config_ulong(var, value, ctx->kvi); |
347 | 0 | if (restart_interval > UINT16_MAX) |
348 | 0 | die("reftable block size cannot exceed %u", (unsigned)UINT16_MAX); |
349 | 0 | opts->restart_interval = restart_interval; |
350 | 0 | } else if (!strcmp(var, "reftable.indexobjects")) { |
351 | 0 | opts->skip_index_objects = !git_config_bool(var, value); |
352 | 0 | } else if (!strcmp(var, "reftable.geometricfactor")) { |
353 | 0 | unsigned long factor = git_config_ulong(var, value, ctx->kvi); |
354 | 0 | if (factor > UINT8_MAX) |
355 | 0 | die("reftable geometric factor cannot exceed %u", (unsigned)UINT8_MAX); |
356 | 0 | opts->auto_compaction_factor = factor; |
357 | 0 | } else if (!strcmp(var, "reftable.locktimeout")) { |
358 | 0 | int64_t lock_timeout = git_config_int64(var, value, ctx->kvi); |
359 | 0 | if (lock_timeout > LONG_MAX) |
360 | 0 | die("reftable lock timeout cannot exceed %"PRIdMAX, (intmax_t)LONG_MAX); |
361 | 0 | if (lock_timeout < 0 && lock_timeout != -1) |
362 | 0 | die("reftable lock timeout does not support negative values other than -1"); |
363 | 0 | opts->lock_timeout_ms = lock_timeout; |
364 | 0 | } |
365 | | |
366 | 0 | return 0; |
367 | 0 | } |
368 | | |
369 | | static int reftable_be_fsync(int fd) |
370 | 0 | { |
371 | 0 | return fsync_component(FSYNC_COMPONENT_REFERENCE, fd); |
372 | 0 | } |
373 | | |
374 | | static struct ref_store *reftable_be_init(struct repository *repo, |
375 | | const char *gitdir, |
376 | | unsigned int store_flags) |
377 | 0 | { |
378 | 0 | struct reftable_ref_store *refs = xcalloc(1, sizeof(*refs)); |
379 | 0 | struct strbuf path = STRBUF_INIT; |
380 | 0 | int is_worktree; |
381 | 0 | mode_t mask; |
382 | |
|
383 | 0 | mask = umask(0); |
384 | 0 | umask(mask); |
385 | |
|
386 | 0 | base_ref_store_init(&refs->base, repo, gitdir, &refs_be_reftable); |
387 | 0 | strmap_init(&refs->worktree_backends); |
388 | 0 | refs->store_flags = store_flags; |
389 | 0 | refs->log_all_ref_updates = repo_settings_get_log_all_ref_updates(repo); |
390 | |
|
391 | 0 | switch (repo->hash_algo->format_id) { |
392 | 0 | case GIT_SHA1_FORMAT_ID: |
393 | 0 | refs->write_options.hash_id = REFTABLE_HASH_SHA1; |
394 | 0 | break; |
395 | 0 | case GIT_SHA256_FORMAT_ID: |
396 | 0 | refs->write_options.hash_id = REFTABLE_HASH_SHA256; |
397 | 0 | break; |
398 | 0 | default: |
399 | 0 | BUG("unknown hash algorithm %d", repo->hash_algo->format_id); |
400 | 0 | } |
401 | 0 | refs->write_options.default_permissions = calc_shared_perm(the_repository, 0666 & ~mask); |
402 | 0 | refs->write_options.disable_auto_compact = |
403 | 0 | !git_env_bool("GIT_TEST_REFTABLE_AUTOCOMPACTION", 1); |
404 | 0 | refs->write_options.lock_timeout_ms = 100; |
405 | 0 | refs->write_options.fsync = reftable_be_fsync; |
406 | |
|
407 | 0 | repo_config(the_repository, reftable_be_config, &refs->write_options); |
408 | | |
409 | | /* |
410 | | * It is somewhat unfortunate that we have to mirror the default block |
411 | | * size of the reftable library here. But given that the write options |
412 | | * wouldn't be updated by the library here, and given that we require |
413 | | * the proper block size to trim reflog message so that they fit, we |
414 | | * must set up a proper value here. |
415 | | */ |
416 | 0 | if (!refs->write_options.block_size) |
417 | 0 | refs->write_options.block_size = 4096; |
418 | | |
419 | | /* |
420 | | * Set up the main reftable stack that is hosted in GIT_COMMON_DIR. |
421 | | * This stack contains both the shared and the main worktree refs. |
422 | | * |
423 | | * Note that we don't try to resolve the path in case we have a |
424 | | * worktree because `get_common_dir_noenv()` already does it for us. |
425 | | */ |
426 | 0 | is_worktree = get_common_dir_noenv(&path, gitdir); |
427 | 0 | if (!is_worktree) { |
428 | 0 | strbuf_reset(&path); |
429 | 0 | strbuf_realpath(&path, gitdir, 0); |
430 | 0 | } |
431 | 0 | strbuf_addstr(&path, "/reftable"); |
432 | 0 | refs->err = reftable_backend_init(&refs->main_backend, path.buf, |
433 | 0 | &refs->write_options); |
434 | 0 | if (refs->err) |
435 | 0 | goto done; |
436 | | |
437 | | /* |
438 | | * If we're in a worktree we also need to set up the worktree reftable |
439 | | * stack that is contained in the per-worktree GIT_DIR. |
440 | | * |
441 | | * Ideally, we would also add the stack to our worktree stack map. But |
442 | | * we have no way to figure out the worktree name here and thus can't |
443 | | * do it efficiently. |
444 | | */ |
445 | 0 | if (is_worktree) { |
446 | 0 | strbuf_reset(&path); |
447 | 0 | strbuf_addf(&path, "%s/reftable", gitdir); |
448 | |
|
449 | 0 | refs->err = reftable_backend_init(&refs->worktree_backend, path.buf, |
450 | 0 | &refs->write_options); |
451 | 0 | if (refs->err) |
452 | 0 | goto done; |
453 | 0 | } |
454 | | |
455 | 0 | chdir_notify_reparent("reftables-backend $GIT_DIR", &refs->base.gitdir); |
456 | |
|
457 | 0 | done: |
458 | 0 | assert(refs->err != REFTABLE_API_ERROR); |
459 | 0 | strbuf_release(&path); |
460 | 0 | return &refs->base; |
461 | 0 | } |
462 | | |
463 | | static void reftable_be_release(struct ref_store *ref_store) |
464 | 0 | { |
465 | 0 | struct reftable_ref_store *refs = reftable_be_downcast(ref_store, 0, "release"); |
466 | 0 | struct strmap_entry *entry; |
467 | 0 | struct hashmap_iter iter; |
468 | |
|
469 | 0 | if (refs->main_backend.stack) |
470 | 0 | reftable_backend_release(&refs->main_backend); |
471 | 0 | if (refs->worktree_backend.stack) |
472 | 0 | reftable_backend_release(&refs->worktree_backend); |
473 | |
|
474 | 0 | strmap_for_each_entry(&refs->worktree_backends, &iter, entry) { |
475 | 0 | struct reftable_backend *be = entry->value; |
476 | 0 | reftable_backend_release(be); |
477 | 0 | free(be); |
478 | 0 | } |
479 | 0 | strmap_clear(&refs->worktree_backends, 0); |
480 | 0 | } |
481 | | |
482 | | static int reftable_be_create_on_disk(struct ref_store *ref_store, |
483 | | int flags UNUSED, |
484 | | struct strbuf *err UNUSED) |
485 | 0 | { |
486 | 0 | struct reftable_ref_store *refs = |
487 | 0 | reftable_be_downcast(ref_store, REF_STORE_WRITE, "create"); |
488 | 0 | struct strbuf sb = STRBUF_INIT; |
489 | |
|
490 | 0 | strbuf_addf(&sb, "%s/reftable", refs->base.gitdir); |
491 | 0 | safe_create_dir(the_repository, sb.buf, 1); |
492 | 0 | strbuf_reset(&sb); |
493 | |
|
494 | 0 | strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir); |
495 | 0 | write_file(sb.buf, "ref: refs/heads/.invalid"); |
496 | 0 | adjust_shared_perm(the_repository, sb.buf); |
497 | 0 | strbuf_reset(&sb); |
498 | |
|
499 | 0 | strbuf_addf(&sb, "%s/refs", refs->base.gitdir); |
500 | 0 | safe_create_dir(the_repository, sb.buf, 1); |
501 | 0 | strbuf_reset(&sb); |
502 | |
|
503 | 0 | strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir); |
504 | 0 | write_file(sb.buf, "this repository uses the reftable format"); |
505 | 0 | adjust_shared_perm(the_repository, sb.buf); |
506 | |
|
507 | 0 | strbuf_release(&sb); |
508 | 0 | return 0; |
509 | 0 | } |
510 | | |
511 | | static int reftable_be_remove_on_disk(struct ref_store *ref_store, |
512 | | struct strbuf *err) |
513 | 0 | { |
514 | 0 | struct reftable_ref_store *refs = |
515 | 0 | reftable_be_downcast(ref_store, REF_STORE_WRITE, "remove"); |
516 | 0 | struct strbuf sb = STRBUF_INIT; |
517 | 0 | int ret = 0; |
518 | | |
519 | | /* |
520 | | * Release the ref store such that all stacks are closed. This is |
521 | | * required so that the "tables.list" file is not open anymore, which |
522 | | * would otherwise make it impossible to remove the file on Windows. |
523 | | */ |
524 | 0 | reftable_be_release(ref_store); |
525 | |
|
526 | 0 | strbuf_addf(&sb, "%s/reftable", refs->base.gitdir); |
527 | 0 | if (remove_dir_recursively(&sb, 0) < 0) { |
528 | 0 | strbuf_addf(err, "could not delete reftables: %s", |
529 | 0 | strerror(errno)); |
530 | 0 | ret = -1; |
531 | 0 | } |
532 | 0 | strbuf_reset(&sb); |
533 | |
|
534 | 0 | strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir); |
535 | 0 | if (unlink(sb.buf) < 0) { |
536 | 0 | strbuf_addf(err, "could not delete stub HEAD: %s", |
537 | 0 | strerror(errno)); |
538 | 0 | ret = -1; |
539 | 0 | } |
540 | 0 | strbuf_reset(&sb); |
541 | |
|
542 | 0 | strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir); |
543 | 0 | if (unlink(sb.buf) < 0) { |
544 | 0 | strbuf_addf(err, "could not delete stub heads: %s", |
545 | 0 | strerror(errno)); |
546 | 0 | ret = -1; |
547 | 0 | } |
548 | 0 | strbuf_reset(&sb); |
549 | |
|
550 | 0 | strbuf_addf(&sb, "%s/refs", refs->base.gitdir); |
551 | 0 | if (rmdir(sb.buf) < 0) { |
552 | 0 | strbuf_addf(err, "could not delete refs directory: %s", |
553 | 0 | strerror(errno)); |
554 | 0 | ret = -1; |
555 | 0 | } |
556 | |
|
557 | 0 | strbuf_release(&sb); |
558 | 0 | return ret; |
559 | 0 | } |
560 | | |
561 | | struct reftable_ref_iterator { |
562 | | struct ref_iterator base; |
563 | | struct reftable_ref_store *refs; |
564 | | struct reftable_iterator iter; |
565 | | struct reftable_ref_record ref; |
566 | | struct object_id oid; |
567 | | struct object_id peeled_oid; |
568 | | |
569 | | char *prefix; |
570 | | size_t prefix_len; |
571 | | char **exclude_patterns; |
572 | | size_t exclude_patterns_index; |
573 | | size_t exclude_patterns_strlen; |
574 | | unsigned int flags; |
575 | | int err; |
576 | | }; |
577 | | |
578 | | /* |
579 | | * Handle exclude patterns. Returns either `1`, which tells the caller that the |
580 | | * current reference shall not be shown. Or `0`, which indicates that it should |
581 | | * be shown. |
582 | | */ |
583 | | static int should_exclude_current_ref(struct reftable_ref_iterator *iter) |
584 | 0 | { |
585 | 0 | while (iter->exclude_patterns[iter->exclude_patterns_index]) { |
586 | 0 | const char *pattern = iter->exclude_patterns[iter->exclude_patterns_index]; |
587 | 0 | char *ref_after_pattern; |
588 | 0 | int cmp; |
589 | | |
590 | | /* |
591 | | * Lazily cache the pattern length so that we don't have to |
592 | | * recompute it every time this function is called. |
593 | | */ |
594 | 0 | if (!iter->exclude_patterns_strlen) |
595 | 0 | iter->exclude_patterns_strlen = strlen(pattern); |
596 | | |
597 | | /* |
598 | | * When the reference name is lexicographically bigger than the |
599 | | * current exclude pattern we know that it won't ever match any |
600 | | * of the following references, either. We thus advance to the |
601 | | * next pattern and re-check whether it matches. |
602 | | * |
603 | | * Otherwise, if it's smaller, then we do not have a match and |
604 | | * thus want to show the current reference. |
605 | | */ |
606 | 0 | cmp = strncmp(iter->ref.refname, pattern, |
607 | 0 | iter->exclude_patterns_strlen); |
608 | 0 | if (cmp > 0) { |
609 | 0 | iter->exclude_patterns_index++; |
610 | 0 | iter->exclude_patterns_strlen = 0; |
611 | 0 | continue; |
612 | 0 | } |
613 | 0 | if (cmp < 0) |
614 | 0 | return 0; |
615 | | |
616 | | /* |
617 | | * The reference shares a prefix with the exclude pattern and |
618 | | * shall thus be omitted. We skip all references that match the |
619 | | * pattern by seeking to the first reference after the block of |
620 | | * matches. |
621 | | * |
622 | | * This is done by appending the highest possible character to |
623 | | * the pattern. Consequently, all references that have the |
624 | | * pattern as prefix and whose suffix starts with anything in |
625 | | * the range [0x00, 0xfe] are skipped. And given that 0xff is a |
626 | | * non-printable character that shouldn't ever be in a ref name, |
627 | | * we'd not yield any such record, either. |
628 | | * |
629 | | * Note that the seeked-to reference may also be excluded. This |
630 | | * is not handled here though, but the caller is expected to |
631 | | * loop and re-verify the next reference for us. |
632 | | */ |
633 | 0 | ref_after_pattern = xstrfmt("%s%c", pattern, 0xff); |
634 | 0 | iter->err = reftable_iterator_seek_ref(&iter->iter, ref_after_pattern); |
635 | 0 | iter->exclude_patterns_index++; |
636 | 0 | iter->exclude_patterns_strlen = 0; |
637 | 0 | trace2_counter_add(TRACE2_COUNTER_ID_REFTABLE_RESEEKS, 1); |
638 | |
|
639 | 0 | free(ref_after_pattern); |
640 | 0 | return 1; |
641 | 0 | } |
642 | | |
643 | 0 | return 0; |
644 | 0 | } |
645 | | |
646 | | static int reftable_ref_iterator_advance(struct ref_iterator *ref_iterator) |
647 | 0 | { |
648 | 0 | struct reftable_ref_iterator *iter = |
649 | 0 | (struct reftable_ref_iterator *)ref_iterator; |
650 | 0 | struct reftable_ref_store *refs = iter->refs; |
651 | 0 | const char *referent = NULL; |
652 | |
|
653 | 0 | while (!iter->err) { |
654 | 0 | int flags = 0; |
655 | |
|
656 | 0 | iter->err = reftable_iterator_next_ref(&iter->iter, &iter->ref); |
657 | 0 | if (iter->err) |
658 | 0 | break; |
659 | | |
660 | | /* |
661 | | * The files backend only lists references contained in "refs/" unless |
662 | | * the root refs are to be included. We emulate the same behaviour here. |
663 | | */ |
664 | 0 | if (!starts_with(iter->ref.refname, "refs/") && |
665 | 0 | !(iter->flags & DO_FOR_EACH_INCLUDE_ROOT_REFS && |
666 | 0 | is_root_ref(iter->ref.refname))) { |
667 | 0 | continue; |
668 | 0 | } |
669 | | |
670 | 0 | if (iter->prefix_len && |
671 | 0 | strncmp(iter->prefix, iter->ref.refname, iter->prefix_len)) { |
672 | 0 | iter->err = 1; |
673 | 0 | break; |
674 | 0 | } |
675 | | |
676 | 0 | if (iter->exclude_patterns && should_exclude_current_ref(iter)) |
677 | 0 | continue; |
678 | | |
679 | 0 | if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY && |
680 | 0 | parse_worktree_ref(iter->ref.refname, NULL, NULL, NULL) != |
681 | 0 | REF_WORKTREE_CURRENT) |
682 | 0 | continue; |
683 | | |
684 | 0 | switch (iter->ref.value_type) { |
685 | 0 | case REFTABLE_REF_VAL1: |
686 | 0 | oidread(&iter->oid, iter->ref.value.val1, |
687 | 0 | refs->base.repo->hash_algo); |
688 | 0 | break; |
689 | 0 | case REFTABLE_REF_VAL2: |
690 | 0 | oidread(&iter->oid, iter->ref.value.val2.value, |
691 | 0 | refs->base.repo->hash_algo); |
692 | 0 | oidread(&iter->peeled_oid, iter->ref.value.val2.target_value, |
693 | 0 | refs->base.repo->hash_algo); |
694 | 0 | break; |
695 | 0 | case REFTABLE_REF_SYMREF: |
696 | 0 | referent = refs_resolve_ref_unsafe(&iter->refs->base, |
697 | 0 | iter->ref.refname, |
698 | 0 | RESOLVE_REF_READING, |
699 | 0 | &iter->oid, &flags); |
700 | 0 | if (!referent) |
701 | 0 | oidclr(&iter->oid, refs->base.repo->hash_algo); |
702 | 0 | break; |
703 | 0 | default: |
704 | 0 | BUG("unhandled reference value type %d", iter->ref.value_type); |
705 | 0 | } |
706 | | |
707 | 0 | if (is_null_oid(&iter->oid)) |
708 | 0 | flags |= REF_ISBROKEN; |
709 | |
|
710 | 0 | if (check_refname_format(iter->ref.refname, REFNAME_ALLOW_ONELEVEL)) { |
711 | 0 | if (!refname_is_safe(iter->ref.refname)) |
712 | 0 | die(_("refname is dangerous: %s"), iter->ref.refname); |
713 | 0 | oidclr(&iter->oid, refs->base.repo->hash_algo); |
714 | 0 | flags |= REF_BAD_NAME | REF_ISBROKEN; |
715 | 0 | } |
716 | | |
717 | 0 | if (iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS && |
718 | 0 | flags & REF_ISSYMREF && |
719 | 0 | flags & REF_ISBROKEN) |
720 | 0 | continue; |
721 | | |
722 | 0 | if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) && |
723 | 0 | !ref_resolves_to_object(iter->ref.refname, refs->base.repo, |
724 | 0 | &iter->oid, flags)) |
725 | 0 | continue; |
726 | | |
727 | 0 | memset(&iter->base.ref, 0, sizeof(iter->base.ref)); |
728 | 0 | iter->base.ref.name = iter->ref.refname; |
729 | 0 | iter->base.ref.target = referent; |
730 | 0 | iter->base.ref.oid = &iter->oid; |
731 | 0 | if (iter->ref.value_type == REFTABLE_REF_VAL2) |
732 | 0 | iter->base.ref.peeled_oid = &iter->peeled_oid; |
733 | 0 | iter->base.ref.flags = flags; |
734 | |
|
735 | 0 | break; |
736 | 0 | } |
737 | | |
738 | 0 | if (iter->err > 0) |
739 | 0 | return ITER_DONE; |
740 | 0 | if (iter->err < 0) |
741 | 0 | return ITER_ERROR; |
742 | 0 | return ITER_OK; |
743 | 0 | } |
744 | | |
745 | | static int reftable_ref_iterator_seek(struct ref_iterator *ref_iterator, |
746 | | const char *refname, unsigned int flags) |
747 | 0 | { |
748 | 0 | struct reftable_ref_iterator *iter = |
749 | 0 | (struct reftable_ref_iterator *)ref_iterator; |
750 | | |
751 | | /* Unset any previously set prefix */ |
752 | 0 | FREE_AND_NULL(iter->prefix); |
753 | 0 | iter->prefix_len = 0; |
754 | |
|
755 | 0 | if (flags & REF_ITERATOR_SEEK_SET_PREFIX) { |
756 | 0 | iter->prefix = xstrdup_or_null(refname); |
757 | 0 | iter->prefix_len = refname ? strlen(refname) : 0; |
758 | 0 | } |
759 | 0 | iter->err = reftable_iterator_seek_ref(&iter->iter, refname); |
760 | |
|
761 | 0 | return iter->err; |
762 | 0 | } |
763 | | |
764 | | static void reftable_ref_iterator_release(struct ref_iterator *ref_iterator) |
765 | 0 | { |
766 | 0 | struct reftable_ref_iterator *iter = |
767 | 0 | (struct reftable_ref_iterator *)ref_iterator; |
768 | 0 | reftable_ref_record_release(&iter->ref); |
769 | 0 | reftable_iterator_destroy(&iter->iter); |
770 | 0 | if (iter->exclude_patterns) { |
771 | 0 | for (size_t i = 0; iter->exclude_patterns[i]; i++) |
772 | 0 | free(iter->exclude_patterns[i]); |
773 | 0 | free(iter->exclude_patterns); |
774 | 0 | } |
775 | 0 | free(iter->prefix); |
776 | 0 | } |
777 | | |
778 | | static struct ref_iterator_vtable reftable_ref_iterator_vtable = { |
779 | | .advance = reftable_ref_iterator_advance, |
780 | | .seek = reftable_ref_iterator_seek, |
781 | | .release = reftable_ref_iterator_release, |
782 | | }; |
783 | | |
784 | | static int qsort_strcmp(const void *va, const void *vb) |
785 | 0 | { |
786 | 0 | const char *a = *(const char **)va; |
787 | 0 | const char *b = *(const char **)vb; |
788 | 0 | return strcmp(a, b); |
789 | 0 | } |
790 | | |
791 | | static char **filter_exclude_patterns(const char **exclude_patterns) |
792 | 0 | { |
793 | 0 | size_t filtered_size = 0, filtered_alloc = 0; |
794 | 0 | char **filtered = NULL; |
795 | |
|
796 | 0 | if (!exclude_patterns) |
797 | 0 | return NULL; |
798 | | |
799 | 0 | for (size_t i = 0; ; i++) { |
800 | 0 | const char *exclude_pattern = exclude_patterns[i]; |
801 | 0 | int has_glob = 0; |
802 | |
|
803 | 0 | if (!exclude_pattern) |
804 | 0 | break; |
805 | | |
806 | 0 | for (const char *p = exclude_pattern; *p; p++) { |
807 | 0 | has_glob = is_glob_special(*p); |
808 | 0 | if (has_glob) |
809 | 0 | break; |
810 | 0 | } |
811 | 0 | if (has_glob) |
812 | 0 | continue; |
813 | | |
814 | 0 | ALLOC_GROW(filtered, filtered_size + 1, filtered_alloc); |
815 | 0 | filtered[filtered_size++] = xstrdup(exclude_pattern); |
816 | 0 | } |
817 | |
|
818 | 0 | if (filtered_size) { |
819 | 0 | QSORT(filtered, filtered_size, qsort_strcmp); |
820 | 0 | ALLOC_GROW(filtered, filtered_size + 1, filtered_alloc); |
821 | 0 | filtered[filtered_size++] = NULL; |
822 | 0 | } |
823 | |
|
824 | 0 | return filtered; |
825 | 0 | } |
826 | | |
827 | | static struct reftable_ref_iterator *ref_iterator_for_stack(struct reftable_ref_store *refs, |
828 | | struct reftable_stack *stack, |
829 | | const char *prefix, |
830 | | const char **exclude_patterns, |
831 | | int flags) |
832 | 0 | { |
833 | 0 | struct reftable_ref_iterator *iter; |
834 | 0 | int ret; |
835 | |
|
836 | 0 | iter = xcalloc(1, sizeof(*iter)); |
837 | 0 | base_ref_iterator_init(&iter->base, &reftable_ref_iterator_vtable); |
838 | 0 | iter->base.ref.oid = &iter->oid; |
839 | 0 | iter->flags = flags; |
840 | 0 | iter->refs = refs; |
841 | 0 | iter->exclude_patterns = filter_exclude_patterns(exclude_patterns); |
842 | |
|
843 | 0 | ret = refs->err; |
844 | 0 | if (ret) |
845 | 0 | goto done; |
846 | | |
847 | 0 | ret = reftable_stack_reload(stack); |
848 | 0 | if (ret) |
849 | 0 | goto done; |
850 | | |
851 | 0 | ret = reftable_stack_init_ref_iterator(stack, &iter->iter); |
852 | 0 | if (ret) |
853 | 0 | goto done; |
854 | | |
855 | 0 | ret = reftable_ref_iterator_seek(&iter->base, prefix, |
856 | 0 | REF_ITERATOR_SEEK_SET_PREFIX); |
857 | 0 | if (ret) |
858 | 0 | goto done; |
859 | | |
860 | 0 | done: |
861 | 0 | iter->err = ret; |
862 | 0 | return iter; |
863 | 0 | } |
864 | | |
865 | | static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_store, |
866 | | const char *prefix, |
867 | | const char **exclude_patterns, |
868 | | unsigned int flags) |
869 | 0 | { |
870 | 0 | struct reftable_ref_iterator *main_iter, *worktree_iter; |
871 | 0 | struct reftable_ref_store *refs; |
872 | 0 | unsigned int required_flags = REF_STORE_READ; |
873 | |
|
874 | 0 | if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN)) |
875 | 0 | required_flags |= REF_STORE_ODB; |
876 | 0 | refs = reftable_be_downcast(ref_store, required_flags, "ref_iterator_begin"); |
877 | |
|
878 | 0 | main_iter = ref_iterator_for_stack(refs, refs->main_backend.stack, prefix, |
879 | 0 | exclude_patterns, flags); |
880 | | |
881 | | /* |
882 | | * The worktree stack is only set when we're in an actual worktree |
883 | | * right now. If we aren't, then we return the common reftable |
884 | | * iterator, only. |
885 | | */ |
886 | 0 | if (!refs->worktree_backend.stack) |
887 | 0 | return &main_iter->base; |
888 | | |
889 | | /* |
890 | | * Otherwise we merge both the common and the per-worktree refs into a |
891 | | * single iterator. |
892 | | */ |
893 | 0 | worktree_iter = ref_iterator_for_stack(refs, refs->worktree_backend.stack, prefix, |
894 | 0 | exclude_patterns, flags); |
895 | 0 | return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base, |
896 | 0 | ref_iterator_select, NULL); |
897 | 0 | } |
898 | | |
899 | | static int reftable_be_read_raw_ref(struct ref_store *ref_store, |
900 | | const char *refname, |
901 | | struct object_id *oid, |
902 | | struct strbuf *referent, |
903 | | unsigned int *type, |
904 | | int *failure_errno) |
905 | 0 | { |
906 | 0 | struct reftable_ref_store *refs = |
907 | 0 | reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref"); |
908 | 0 | struct reftable_backend *be; |
909 | 0 | int ret; |
910 | |
|
911 | 0 | if (refs->err < 0) |
912 | 0 | return refs->err; |
913 | | |
914 | 0 | ret = backend_for(&be, refs, refname, &refname, 1); |
915 | 0 | if (ret) |
916 | 0 | return ret; |
917 | | |
918 | 0 | ret = reftable_backend_read_ref(be, refname, oid, referent, type); |
919 | 0 | if (ret < 0) |
920 | 0 | return ret; |
921 | 0 | if (ret > 0) { |
922 | 0 | *failure_errno = ENOENT; |
923 | 0 | return -1; |
924 | 0 | } |
925 | | |
926 | 0 | return 0; |
927 | 0 | } |
928 | | |
929 | | static int reftable_be_read_symbolic_ref(struct ref_store *ref_store, |
930 | | const char *refname, |
931 | | struct strbuf *referent) |
932 | 0 | { |
933 | 0 | struct reftable_ref_store *refs = |
934 | 0 | reftable_be_downcast(ref_store, REF_STORE_READ, "read_symbolic_ref"); |
935 | 0 | struct reftable_backend *be; |
936 | 0 | struct object_id oid; |
937 | 0 | unsigned int type = 0; |
938 | 0 | int ret; |
939 | |
|
940 | 0 | ret = backend_for(&be, refs, refname, &refname, 1); |
941 | 0 | if (ret) |
942 | 0 | return ret; |
943 | | |
944 | 0 | ret = reftable_backend_read_ref(be, refname, &oid, referent, &type); |
945 | 0 | if (ret) |
946 | 0 | ret = -1; |
947 | 0 | else if (type == REF_ISSYMREF) |
948 | 0 | ; /* happy */ |
949 | 0 | else |
950 | 0 | ret = NOT_A_SYMREF; |
951 | 0 | return ret; |
952 | 0 | } |
953 | | |
954 | | struct reftable_transaction_update { |
955 | | struct ref_update *update; |
956 | | struct object_id current_oid; |
957 | | }; |
958 | | |
959 | | struct write_transaction_table_arg { |
960 | | struct reftable_ref_store *refs; |
961 | | struct reftable_backend *be; |
962 | | struct reftable_addition *addition; |
963 | | struct reftable_transaction_update *updates; |
964 | | size_t updates_nr; |
965 | | size_t updates_alloc; |
966 | | size_t updates_expected; |
967 | | uint64_t max_index; |
968 | | }; |
969 | | |
970 | | struct reftable_transaction_data { |
971 | | struct write_transaction_table_arg *args; |
972 | | size_t args_nr, args_alloc; |
973 | | }; |
974 | | |
975 | | static void free_transaction_data(struct reftable_transaction_data *tx_data) |
976 | 0 | { |
977 | 0 | if (!tx_data) |
978 | 0 | return; |
979 | 0 | for (size_t i = 0; i < tx_data->args_nr; i++) { |
980 | 0 | reftable_addition_destroy(tx_data->args[i].addition); |
981 | 0 | free(tx_data->args[i].updates); |
982 | 0 | } |
983 | 0 | free(tx_data->args); |
984 | 0 | free(tx_data); |
985 | 0 | } |
986 | | |
987 | | /* |
988 | | * Prepare transaction update for the given reference update. This will cause |
989 | | * us to lock the corresponding reftable stack for concurrent modification. |
990 | | */ |
991 | | static int prepare_transaction_update(struct write_transaction_table_arg **out, |
992 | | struct reftable_ref_store *refs, |
993 | | struct reftable_transaction_data *tx_data, |
994 | | struct ref_update *update, |
995 | | struct strbuf *err) |
996 | 0 | { |
997 | 0 | struct write_transaction_table_arg *arg = NULL; |
998 | 0 | struct reftable_backend *be; |
999 | 0 | size_t i; |
1000 | 0 | int ret; |
1001 | | |
1002 | | /* |
1003 | | * This function gets called in a loop, and we don't want to repeatedly |
1004 | | * reload the stack for every single ref update. Instead, we manually |
1005 | | * reload further down in the case where we haven't yet prepared the |
1006 | | * specific `reftable_backend`. |
1007 | | */ |
1008 | 0 | ret = backend_for(&be, refs, update->refname, NULL, 0); |
1009 | 0 | if (ret) |
1010 | 0 | return ret; |
1011 | | |
1012 | | /* |
1013 | | * Search for a preexisting stack update. If there is one then we add |
1014 | | * the update to it, otherwise we set up a new stack update. |
1015 | | */ |
1016 | 0 | for (i = 0; !arg && i < tx_data->args_nr; i++) |
1017 | 0 | if (tx_data->args[i].be == be) |
1018 | 0 | arg = &tx_data->args[i]; |
1019 | |
|
1020 | 0 | if (!arg) { |
1021 | 0 | struct reftable_addition *addition; |
1022 | |
|
1023 | 0 | ret = reftable_stack_new_addition(&addition, be->stack, |
1024 | 0 | REFTABLE_STACK_NEW_ADDITION_RELOAD); |
1025 | 0 | if (ret) { |
1026 | 0 | if (ret == REFTABLE_LOCK_ERROR) |
1027 | 0 | strbuf_addstr(err, "cannot lock references"); |
1028 | 0 | return ret; |
1029 | 0 | } |
1030 | | |
1031 | 0 | ALLOC_GROW(tx_data->args, tx_data->args_nr + 1, |
1032 | 0 | tx_data->args_alloc); |
1033 | 0 | arg = &tx_data->args[tx_data->args_nr++]; |
1034 | 0 | arg->refs = refs; |
1035 | 0 | arg->be = be; |
1036 | 0 | arg->addition = addition; |
1037 | 0 | arg->updates = NULL; |
1038 | 0 | arg->updates_nr = 0; |
1039 | 0 | arg->updates_alloc = 0; |
1040 | 0 | arg->updates_expected = 0; |
1041 | 0 | arg->max_index = 0; |
1042 | 0 | } |
1043 | | |
1044 | 0 | arg->updates_expected++; |
1045 | |
|
1046 | 0 | if (out) |
1047 | 0 | *out = arg; |
1048 | |
|
1049 | 0 | return 0; |
1050 | 0 | } |
1051 | | |
1052 | | /* |
1053 | | * Queue a reference update for the correct stack. We potentially need to |
1054 | | * handle multiple stack updates in a single transaction when it spans across |
1055 | | * multiple worktrees. |
1056 | | */ |
1057 | | static int queue_transaction_update(struct reftable_ref_store *refs, |
1058 | | struct reftable_transaction_data *tx_data, |
1059 | | struct ref_update *update, |
1060 | | struct object_id *current_oid, |
1061 | | struct strbuf *err) |
1062 | 0 | { |
1063 | 0 | struct write_transaction_table_arg *arg = NULL; |
1064 | 0 | int ret; |
1065 | |
|
1066 | 0 | if (update->backend_data) |
1067 | 0 | BUG("reference update queued more than once"); |
1068 | | |
1069 | 0 | ret = prepare_transaction_update(&arg, refs, tx_data, update, err); |
1070 | 0 | if (ret < 0) |
1071 | 0 | return ret; |
1072 | | |
1073 | 0 | ALLOC_GROW(arg->updates, arg->updates_nr + 1, |
1074 | 0 | arg->updates_alloc); |
1075 | 0 | arg->updates[arg->updates_nr].update = update; |
1076 | 0 | oidcpy(&arg->updates[arg->updates_nr].current_oid, current_oid); |
1077 | 0 | update->backend_data = &arg->updates[arg->updates_nr++]; |
1078 | |
|
1079 | 0 | return 0; |
1080 | 0 | } |
1081 | | |
1082 | | static enum ref_transaction_error prepare_single_update(struct reftable_ref_store *refs, |
1083 | | struct reftable_transaction_data *tx_data, |
1084 | | struct ref_transaction *transaction, |
1085 | | struct reftable_backend *be, |
1086 | | struct ref_update *u, |
1087 | | size_t update_idx, |
1088 | | struct string_list *refnames_to_check, |
1089 | | unsigned int head_type, |
1090 | | struct strbuf *head_referent, |
1091 | | struct strbuf *referent, |
1092 | | struct strbuf *err) |
1093 | 0 | { |
1094 | 0 | enum ref_transaction_error ret = 0; |
1095 | 0 | struct object_id current_oid = {0}; |
1096 | 0 | const char *rewritten_ref; |
1097 | | |
1098 | | /* |
1099 | | * There is no need to reload the respective backends here as |
1100 | | * we have already reloaded them when preparing the transaction |
1101 | | * update. And given that the stacks have been locked there |
1102 | | * shouldn't have been any concurrent modifications of the |
1103 | | * stack. |
1104 | | */ |
1105 | 0 | ret = backend_for(&be, refs, u->refname, &rewritten_ref, 0); |
1106 | 0 | if (ret) |
1107 | 0 | return REF_TRANSACTION_ERROR_GENERIC; |
1108 | | |
1109 | 0 | if (u->flags & REF_LOG_USE_PROVIDED_OIDS) { |
1110 | 0 | if (!(u->flags & REF_HAVE_OLD) || |
1111 | 0 | !(u->flags & REF_HAVE_NEW) || |
1112 | 0 | !(u->flags & REF_LOG_ONLY)) { |
1113 | 0 | strbuf_addf(err, _("trying to write reflog for '%s' " |
1114 | 0 | "with incomplete values"), u->refname); |
1115 | 0 | return REF_TRANSACTION_ERROR_GENERIC; |
1116 | 0 | } |
1117 | | |
1118 | 0 | if (queue_transaction_update(refs, tx_data, u, &u->old_oid, err)) |
1119 | 0 | return REF_TRANSACTION_ERROR_GENERIC; |
1120 | 0 | return 0; |
1121 | 0 | } |
1122 | | |
1123 | | /* Verify that the new object ID is valid. */ |
1124 | 0 | if ((u->flags & REF_HAVE_NEW) && !is_null_oid(&u->new_oid) && |
1125 | 0 | !(u->flags & REF_SKIP_OID_VERIFICATION) && |
1126 | 0 | !(u->flags & REF_LOG_ONLY)) { |
1127 | 0 | struct object *o = parse_object(refs->base.repo, &u->new_oid); |
1128 | 0 | if (!o) { |
1129 | 0 | strbuf_addf(err, |
1130 | 0 | _("trying to write ref '%s' with nonexistent object %s"), |
1131 | 0 | u->refname, oid_to_hex(&u->new_oid)); |
1132 | 0 | return REF_TRANSACTION_ERROR_INVALID_NEW_VALUE; |
1133 | 0 | } |
1134 | | |
1135 | 0 | if (o->type != OBJ_COMMIT && is_branch(u->refname)) { |
1136 | 0 | strbuf_addf(err, _("trying to write non-commit object %s to branch '%s'"), |
1137 | 0 | oid_to_hex(&u->new_oid), u->refname); |
1138 | 0 | return REF_TRANSACTION_ERROR_INVALID_NEW_VALUE; |
1139 | 0 | } |
1140 | 0 | } |
1141 | | |
1142 | | /* |
1143 | | * When we update the reference that HEAD points to we enqueue |
1144 | | * a second log-only update for HEAD so that its reflog is |
1145 | | * updated accordingly. |
1146 | | */ |
1147 | 0 | if (head_type == REF_ISSYMREF && |
1148 | 0 | !(u->flags & REF_LOG_ONLY) && |
1149 | 0 | !(u->flags & REF_UPDATE_VIA_HEAD) && |
1150 | 0 | !strcmp(rewritten_ref, head_referent->buf)) { |
1151 | | /* |
1152 | | * First make sure that HEAD is not already in the |
1153 | | * transaction. This check is O(lg N) in the transaction |
1154 | | * size, but it happens at most once per transaction. |
1155 | | */ |
1156 | 0 | if (string_list_has_string(&transaction->refnames, "HEAD")) { |
1157 | | /* An entry already existed */ |
1158 | 0 | strbuf_addf(err, |
1159 | 0 | _("multiple updates for 'HEAD' (including one " |
1160 | 0 | "via its referent '%s') are not allowed"), |
1161 | 0 | u->refname); |
1162 | 0 | return REF_TRANSACTION_ERROR_NAME_CONFLICT; |
1163 | 0 | } |
1164 | | |
1165 | 0 | ref_transaction_add_update( |
1166 | 0 | transaction, "HEAD", |
1167 | 0 | u->flags | REF_LOG_ONLY | REF_NO_DEREF, |
1168 | 0 | &u->new_oid, &u->old_oid, NULL, NULL, NULL, |
1169 | 0 | u->msg); |
1170 | 0 | } |
1171 | | |
1172 | 0 | ret = reftable_backend_read_ref(be, rewritten_ref, |
1173 | 0 | ¤t_oid, referent, &u->type); |
1174 | 0 | if (ret < 0) |
1175 | 0 | return REF_TRANSACTION_ERROR_GENERIC; |
1176 | 0 | if (ret > 0 && !ref_update_expects_existing_old_ref(u)) { |
1177 | 0 | struct string_list_item *item; |
1178 | | /* |
1179 | | * The reference does not exist, and we either have no |
1180 | | * old object ID or expect the reference to not exist. |
1181 | | * We can thus skip below safety checks as well as the |
1182 | | * symref splitting. But we do want to verify that |
1183 | | * there is no conflicting reference here so that we |
1184 | | * can output a proper error message instead of failing |
1185 | | * at a later point. |
1186 | | */ |
1187 | 0 | item = string_list_append(refnames_to_check, u->refname); |
1188 | 0 | item->util = xmalloc(sizeof(update_idx)); |
1189 | 0 | memcpy(item->util, &update_idx, sizeof(update_idx)); |
1190 | | |
1191 | | /* |
1192 | | * There is no need to write the reference deletion |
1193 | | * when the reference in question doesn't exist. |
1194 | | */ |
1195 | 0 | if ((u->flags & REF_HAVE_NEW) && !ref_update_has_null_new_value(u)) { |
1196 | 0 | ret = queue_transaction_update(refs, tx_data, u, |
1197 | 0 | ¤t_oid, err); |
1198 | 0 | if (ret) |
1199 | 0 | return REF_TRANSACTION_ERROR_GENERIC; |
1200 | 0 | } |
1201 | | |
1202 | 0 | return 0; |
1203 | 0 | } |
1204 | 0 | if (ret > 0) { |
1205 | | /* The reference does not exist, but we expected it to. */ |
1206 | 0 | strbuf_addf(err, _("cannot lock ref '%s': " |
1207 | 0 | "unable to resolve reference '%s'"), |
1208 | 0 | ref_update_original_update_refname(u), u->refname); |
1209 | 0 | return REF_TRANSACTION_ERROR_NONEXISTENT_REF; |
1210 | 0 | } |
1211 | | |
1212 | 0 | if (u->type & REF_ISSYMREF) { |
1213 | | /* |
1214 | | * The reftable stack is locked at this point already, |
1215 | | * so it is safe to call `refs_resolve_ref_unsafe()` |
1216 | | * here without causing races. |
1217 | | */ |
1218 | 0 | const char *resolved = refs_resolve_ref_unsafe(&refs->base, u->refname, 0, |
1219 | 0 | ¤t_oid, NULL); |
1220 | |
|
1221 | 0 | if (u->flags & REF_NO_DEREF) { |
1222 | 0 | if (u->flags & REF_HAVE_OLD && !resolved) { |
1223 | 0 | strbuf_addf(err, _("cannot lock ref '%s': " |
1224 | 0 | "error reading reference"), u->refname); |
1225 | 0 | return REF_TRANSACTION_ERROR_GENERIC; |
1226 | 0 | } |
1227 | 0 | } else { |
1228 | 0 | struct ref_update *new_update; |
1229 | 0 | int new_flags; |
1230 | |
|
1231 | 0 | new_flags = u->flags; |
1232 | 0 | if (!strcmp(rewritten_ref, "HEAD")) |
1233 | 0 | new_flags |= REF_UPDATE_VIA_HEAD; |
1234 | |
|
1235 | 0 | if (string_list_has_string(&transaction->refnames, referent->buf)) { |
1236 | 0 | strbuf_addf(err, |
1237 | 0 | _("multiple updates for '%s' (including one " |
1238 | 0 | "via symref '%s') are not allowed"), |
1239 | 0 | referent->buf, u->refname); |
1240 | 0 | return REF_TRANSACTION_ERROR_NAME_CONFLICT; |
1241 | 0 | } |
1242 | | |
1243 | | /* |
1244 | | * If we are updating a symref (eg. HEAD), we should also |
1245 | | * update the branch that the symref points to. |
1246 | | * |
1247 | | * This is generic functionality, and would be better |
1248 | | * done in refs.c, but the current implementation is |
1249 | | * intertwined with the locking in files-backend.c. |
1250 | | */ |
1251 | 0 | new_update = ref_transaction_add_update( |
1252 | 0 | transaction, referent->buf, new_flags, |
1253 | 0 | u->new_target ? NULL : &u->new_oid, |
1254 | 0 | u->old_target ? NULL : &u->old_oid, |
1255 | 0 | u->new_target, u->old_target, |
1256 | 0 | u->committer_info, u->msg); |
1257 | |
|
1258 | 0 | new_update->parent_update = u; |
1259 | | |
1260 | | /* Change the symbolic ref update to log only. */ |
1261 | 0 | u->flags |= REF_LOG_ONLY | REF_NO_DEREF; |
1262 | 0 | } |
1263 | 0 | } |
1264 | | |
1265 | | /* |
1266 | | * Verify that the old object matches our expectations. Note |
1267 | | * that the error messages here do not make a lot of sense in |
1268 | | * the context of the reftable backend as we never lock |
1269 | | * individual refs. But the error messages match what the files |
1270 | | * backend returns, which keeps our tests happy. |
1271 | | */ |
1272 | 0 | if (u->old_target) { |
1273 | 0 | if (!(u->type & REF_ISSYMREF)) { |
1274 | 0 | strbuf_addf(err, _("cannot lock ref '%s': " |
1275 | 0 | "expected symref with target '%s': " |
1276 | 0 | "but is a regular ref"), |
1277 | 0 | ref_update_original_update_refname(u), |
1278 | 0 | u->old_target); |
1279 | 0 | return REF_TRANSACTION_ERROR_EXPECTED_SYMREF; |
1280 | 0 | } |
1281 | | |
1282 | 0 | ret = ref_update_check_old_target(referent->buf, u, err); |
1283 | 0 | if (ret) |
1284 | 0 | return ret; |
1285 | 0 | } else if ((u->flags & (REF_LOG_ONLY | REF_HAVE_OLD)) == REF_HAVE_OLD) { |
1286 | 0 | if (oideq(¤t_oid, &u->old_oid)) { |
1287 | | /* |
1288 | | * Normally matching the expected old oid is enough. Either we |
1289 | | * found the ref at the expected state, or we are creating and |
1290 | | * expect the null oid (and likewise found nothing). |
1291 | | * |
1292 | | * But there is one exception for the null oid: if we found a |
1293 | | * symref pointing to nothing we'll also get the null oid. In |
1294 | | * regular recursive mode, that's good (we'll write to what the |
1295 | | * symref points to, which doesn't exist). But in no-deref |
1296 | | * mode, it means we'll clobber the symref, even though the |
1297 | | * caller asked for this to be a creation event. So flag |
1298 | | * that case to preserve the dangling symref. |
1299 | | * |
1300 | | * Everything else is OK and we can fall through to the |
1301 | | * end of the conditional chain. |
1302 | | */ |
1303 | 0 | if ((u->flags & REF_NO_DEREF) && |
1304 | 0 | referent->len && |
1305 | 0 | is_null_oid(&u->old_oid)) { |
1306 | 0 | strbuf_addf(err, _("cannot lock ref '%s': " |
1307 | 0 | "dangling symref already exists"), |
1308 | 0 | ref_update_original_update_refname(u)); |
1309 | 0 | return REF_TRANSACTION_ERROR_CREATE_EXISTS; |
1310 | 0 | } |
1311 | 0 | } else if (is_null_oid(&u->old_oid)) { |
1312 | 0 | strbuf_addf(err, _("cannot lock ref '%s': " |
1313 | 0 | "reference already exists"), |
1314 | 0 | ref_update_original_update_refname(u)); |
1315 | 0 | return REF_TRANSACTION_ERROR_CREATE_EXISTS; |
1316 | 0 | } else if (is_null_oid(¤t_oid)) { |
1317 | 0 | strbuf_addf(err, _("cannot lock ref '%s': " |
1318 | 0 | "reference is missing but expected %s"), |
1319 | 0 | ref_update_original_update_refname(u), |
1320 | 0 | oid_to_hex(&u->old_oid)); |
1321 | 0 | return REF_TRANSACTION_ERROR_NONEXISTENT_REF; |
1322 | 0 | } else { |
1323 | 0 | strbuf_addf(err, _("cannot lock ref '%s': " |
1324 | 0 | "is at %s but expected %s"), |
1325 | 0 | ref_update_original_update_refname(u), |
1326 | 0 | oid_to_hex(¤t_oid), |
1327 | 0 | oid_to_hex(&u->old_oid)); |
1328 | 0 | return REF_TRANSACTION_ERROR_INCORRECT_OLD_VALUE; |
1329 | 0 | } |
1330 | 0 | } |
1331 | | |
1332 | | /* |
1333 | | * If all of the following conditions are true: |
1334 | | * |
1335 | | * - We're not about to write a symref. |
1336 | | * - We're not about to write a log-only entry. |
1337 | | * - Old and new object ID are different. |
1338 | | * |
1339 | | * Then we're essentially doing a no-op update that can be |
1340 | | * skipped. This is not only for the sake of efficiency, but |
1341 | | * also skips writing unneeded reflog entries. |
1342 | | */ |
1343 | 0 | if ((u->type & REF_ISSYMREF) || |
1344 | 0 | (u->flags & REF_LOG_ONLY) || |
1345 | 0 | (u->flags & REF_HAVE_NEW && !oideq(¤t_oid, &u->new_oid))) |
1346 | 0 | if (queue_transaction_update(refs, tx_data, u, ¤t_oid, err)) |
1347 | 0 | return REF_TRANSACTION_ERROR_GENERIC; |
1348 | | |
1349 | 0 | return 0; |
1350 | 0 | } |
1351 | | |
1352 | | static int reftable_be_transaction_prepare(struct ref_store *ref_store, |
1353 | | struct ref_transaction *transaction, |
1354 | | struct strbuf *err) |
1355 | 0 | { |
1356 | 0 | struct reftable_ref_store *refs = |
1357 | 0 | reftable_be_downcast(ref_store, REF_STORE_WRITE|REF_STORE_MAIN, "ref_transaction_prepare"); |
1358 | 0 | struct strbuf referent = STRBUF_INIT, head_referent = STRBUF_INIT; |
1359 | 0 | struct string_list refnames_to_check = STRING_LIST_INIT_NODUP; |
1360 | 0 | struct reftable_transaction_data *tx_data = NULL; |
1361 | 0 | struct reftable_backend *be; |
1362 | 0 | struct object_id head_oid; |
1363 | 0 | unsigned int head_type = 0; |
1364 | 0 | size_t i; |
1365 | 0 | int ret; |
1366 | |
|
1367 | 0 | ret = refs->err; |
1368 | 0 | if (ret < 0) |
1369 | 0 | goto done; |
1370 | | |
1371 | 0 | tx_data = xcalloc(1, sizeof(*tx_data)); |
1372 | | |
1373 | | /* |
1374 | | * Preprocess all updates. For one we check that there are no duplicate |
1375 | | * reference updates in this transaction. Second, we lock all stacks |
1376 | | * that will be modified during the transaction. |
1377 | | */ |
1378 | 0 | for (i = 0; i < transaction->nr; i++) { |
1379 | 0 | ret = prepare_transaction_update(NULL, refs, tx_data, |
1380 | 0 | transaction->updates[i], err); |
1381 | 0 | if (ret) |
1382 | 0 | goto done; |
1383 | 0 | } |
1384 | | |
1385 | | /* |
1386 | | * Now that we have counted updates per stack we can preallocate their |
1387 | | * arrays. This avoids having to reallocate many times. |
1388 | | */ |
1389 | 0 | for (i = 0; i < tx_data->args_nr; i++) { |
1390 | 0 | CALLOC_ARRAY(tx_data->args[i].updates, tx_data->args[i].updates_expected); |
1391 | 0 | tx_data->args[i].updates_alloc = tx_data->args[i].updates_expected; |
1392 | 0 | } |
1393 | | |
1394 | | /* |
1395 | | * TODO: it's dubious whether we should reload the stack that "HEAD" |
1396 | | * belongs to or not. In theory, it may happen that we only modify |
1397 | | * stacks which are _not_ part of the "HEAD" stack. In that case we |
1398 | | * wouldn't have prepared any transaction for its stack and would not |
1399 | | * have reloaded it, which may mean that it is stale. |
1400 | | * |
1401 | | * On the other hand, reloading that stack without locking it feels |
1402 | | * wrong, too, as the value of "HEAD" could be modified concurrently at |
1403 | | * any point in time. |
1404 | | */ |
1405 | 0 | ret = backend_for(&be, refs, "HEAD", NULL, 0); |
1406 | 0 | if (ret) |
1407 | 0 | goto done; |
1408 | | |
1409 | 0 | ret = reftable_backend_read_ref(be, "HEAD", &head_oid, |
1410 | 0 | &head_referent, &head_type); |
1411 | 0 | if (ret < 0) |
1412 | 0 | goto done; |
1413 | 0 | ret = 0; |
1414 | |
|
1415 | 0 | for (i = 0; i < transaction->nr; i++) { |
1416 | 0 | ret = prepare_single_update(refs, tx_data, transaction, be, |
1417 | 0 | transaction->updates[i], i, |
1418 | 0 | &refnames_to_check, head_type, |
1419 | 0 | &head_referent, &referent, err); |
1420 | 0 | if (ret) { |
1421 | 0 | if (ref_transaction_maybe_set_rejected(transaction, i, |
1422 | 0 | ret, err)) { |
1423 | 0 | ret = 0; |
1424 | 0 | continue; |
1425 | 0 | } |
1426 | 0 | goto done; |
1427 | 0 | } |
1428 | 0 | } |
1429 | | |
1430 | 0 | ret = refs_verify_refnames_available(ref_store, &refnames_to_check, |
1431 | 0 | &transaction->refnames, NULL, |
1432 | 0 | transaction, |
1433 | 0 | transaction->flags & REF_TRANSACTION_FLAG_INITIAL, |
1434 | 0 | err); |
1435 | 0 | if (ret < 0) |
1436 | 0 | goto done; |
1437 | | |
1438 | 0 | transaction->backend_data = tx_data; |
1439 | 0 | transaction->state = REF_TRANSACTION_PREPARED; |
1440 | |
|
1441 | 0 | done: |
1442 | 0 | if (ret < 0) { |
1443 | 0 | free_transaction_data(tx_data); |
1444 | 0 | transaction->state = REF_TRANSACTION_CLOSED; |
1445 | 0 | if (!err->len) |
1446 | 0 | strbuf_addf(err, _("reftable: transaction prepare: %s"), |
1447 | 0 | reftable_error_str(ret)); |
1448 | 0 | } |
1449 | 0 | strbuf_release(&referent); |
1450 | 0 | strbuf_release(&head_referent); |
1451 | 0 | string_list_clear(&refnames_to_check, 1); |
1452 | |
|
1453 | 0 | return ret; |
1454 | 0 | } |
1455 | | |
1456 | | static int reftable_be_transaction_abort(struct ref_store *ref_store UNUSED, |
1457 | | struct ref_transaction *transaction, |
1458 | | struct strbuf *err UNUSED) |
1459 | 0 | { |
1460 | 0 | struct reftable_transaction_data *tx_data = transaction->backend_data; |
1461 | 0 | free_transaction_data(tx_data); |
1462 | 0 | transaction->state = REF_TRANSACTION_CLOSED; |
1463 | 0 | return 0; |
1464 | 0 | } |
1465 | | |
1466 | | static int transaction_update_cmp(const void *a, const void *b) |
1467 | 0 | { |
1468 | 0 | struct reftable_transaction_update *update_a = (struct reftable_transaction_update *)a; |
1469 | 0 | struct reftable_transaction_update *update_b = (struct reftable_transaction_update *)b; |
1470 | | |
1471 | | /* |
1472 | | * If there is an index set, it should take preference (default is 0). |
1473 | | * This ensures that updates with indexes are sorted amongst themselves. |
1474 | | */ |
1475 | 0 | if (update_a->update->index || update_b->update->index) |
1476 | 0 | return update_a->update->index - update_b->update->index; |
1477 | | |
1478 | 0 | return strcmp(update_a->update->refname, update_b->update->refname); |
1479 | 0 | } |
1480 | | |
1481 | | static int write_transaction_table(struct reftable_writer *writer, void *cb_data) |
1482 | 0 | { |
1483 | 0 | struct write_transaction_table_arg *arg = cb_data; |
1484 | 0 | uint64_t ts = reftable_stack_next_update_index(arg->be->stack); |
1485 | 0 | struct reftable_log_record *logs = NULL; |
1486 | 0 | struct ident_split committer_ident = {0}; |
1487 | 0 | size_t logs_nr = 0, logs_alloc = 0, i; |
1488 | 0 | const char *committer_info; |
1489 | 0 | int ret = 0; |
1490 | |
|
1491 | 0 | committer_info = git_committer_info(0); |
1492 | 0 | if (split_ident_line(&committer_ident, committer_info, strlen(committer_info))) |
1493 | 0 | BUG("failed splitting committer info"); |
1494 | | |
1495 | 0 | QSORT(arg->updates, arg->updates_nr, transaction_update_cmp); |
1496 | | |
1497 | | /* |
1498 | | * During reflog migration, we add indexes for a single reflog with |
1499 | | * multiple entries. Each entry will contain a different update_index, |
1500 | | * so set the limits accordingly. |
1501 | | */ |
1502 | 0 | ret = reftable_writer_set_limits(writer, ts, ts + arg->max_index); |
1503 | 0 | if (ret < 0) |
1504 | 0 | goto done; |
1505 | | |
1506 | 0 | for (i = 0; i < arg->updates_nr; i++) { |
1507 | 0 | struct reftable_transaction_update *tx_update = &arg->updates[i]; |
1508 | 0 | struct ref_update *u = tx_update->update; |
1509 | |
|
1510 | 0 | if (u->rejection_err) |
1511 | 0 | continue; |
1512 | | |
1513 | | /* |
1514 | | * Write a reflog entry when updating a ref to point to |
1515 | | * something new in either of the following cases: |
1516 | | * |
1517 | | * - The reference is about to be deleted. We always want to |
1518 | | * delete the reflog in that case. |
1519 | | * - REF_FORCE_CREATE_REFLOG is set, asking us to always create |
1520 | | * the reflog entry. |
1521 | | * - `core.logAllRefUpdates` tells us to create the reflog for |
1522 | | * the given ref. |
1523 | | */ |
1524 | 0 | if ((u->flags & REF_HAVE_NEW) && |
1525 | 0 | !(u->type & REF_ISSYMREF) && |
1526 | 0 | ref_update_has_null_new_value(u)) { |
1527 | 0 | struct reftable_log_record log = {0}; |
1528 | 0 | struct reftable_iterator it = {0}; |
1529 | |
|
1530 | 0 | ret = reftable_stack_init_log_iterator(arg->be->stack, &it); |
1531 | 0 | if (ret < 0) |
1532 | 0 | goto done; |
1533 | | |
1534 | | /* |
1535 | | * When deleting refs we also delete all reflog entries |
1536 | | * with them. While it is not strictly required to |
1537 | | * delete reflogs together with their refs, this |
1538 | | * matches the behaviour of the files backend. |
1539 | | * |
1540 | | * Unfortunately, we have no better way than to delete |
1541 | | * all reflog entries one by one. |
1542 | | */ |
1543 | 0 | ret = reftable_iterator_seek_log(&it, u->refname); |
1544 | 0 | while (ret == 0) { |
1545 | 0 | struct reftable_log_record *tombstone; |
1546 | |
|
1547 | 0 | ret = reftable_iterator_next_log(&it, &log); |
1548 | 0 | if (ret < 0) |
1549 | 0 | break; |
1550 | 0 | if (ret > 0 || strcmp(log.refname, u->refname)) { |
1551 | 0 | ret = 0; |
1552 | 0 | break; |
1553 | 0 | } |
1554 | | |
1555 | 0 | ALLOC_GROW(logs, logs_nr + 1, logs_alloc); |
1556 | 0 | tombstone = &logs[logs_nr++]; |
1557 | 0 | tombstone->refname = xstrdup(u->refname); |
1558 | 0 | tombstone->value_type = REFTABLE_LOG_DELETION; |
1559 | 0 | tombstone->update_index = log.update_index; |
1560 | 0 | } |
1561 | |
|
1562 | 0 | reftable_log_record_release(&log); |
1563 | 0 | reftable_iterator_destroy(&it); |
1564 | |
|
1565 | 0 | if (ret) |
1566 | 0 | goto done; |
1567 | 0 | } else if (!(u->flags & REF_SKIP_CREATE_REFLOG) && |
1568 | 0 | (u->flags & REF_HAVE_NEW) && |
1569 | 0 | (u->flags & REF_FORCE_CREATE_REFLOG || |
1570 | 0 | should_write_log(arg->refs, u->refname))) { |
1571 | 0 | struct reftable_log_record *log; |
1572 | 0 | int create_reflog = 1; |
1573 | |
|
1574 | 0 | if (u->new_target) { |
1575 | 0 | if (!refs_resolve_ref_unsafe(&arg->refs->base, u->new_target, |
1576 | 0 | RESOLVE_REF_READING, &u->new_oid, NULL)) { |
1577 | | /* |
1578 | | * TODO: currently we skip creating reflogs for dangling |
1579 | | * symref updates. It would be nice to capture this as |
1580 | | * zero oid updates however. |
1581 | | */ |
1582 | 0 | create_reflog = 0; |
1583 | 0 | } |
1584 | 0 | } |
1585 | |
|
1586 | 0 | if (create_reflog) { |
1587 | 0 | struct ident_split c; |
1588 | |
|
1589 | 0 | ALLOC_GROW(logs, logs_nr + 1, logs_alloc); |
1590 | 0 | log = &logs[logs_nr++]; |
1591 | 0 | memset(log, 0, sizeof(*log)); |
1592 | |
|
1593 | 0 | if (u->committer_info) { |
1594 | 0 | if (split_ident_line(&c, u->committer_info, |
1595 | 0 | strlen(u->committer_info))) |
1596 | 0 | BUG("failed splitting committer info"); |
1597 | 0 | } else { |
1598 | 0 | c = committer_ident; |
1599 | 0 | } |
1600 | | |
1601 | 0 | fill_reftable_log_record(log, &c); |
1602 | | |
1603 | | /* |
1604 | | * Updates are sorted by the writer. So updates for the same |
1605 | | * refname need to contain different update indices. |
1606 | | */ |
1607 | 0 | log->update_index = ts + u->index; |
1608 | |
|
1609 | 0 | log->refname = xstrdup(u->refname); |
1610 | 0 | memcpy(log->value.update.new_hash, |
1611 | 0 | u->new_oid.hash, GIT_MAX_RAWSZ); |
1612 | 0 | memcpy(log->value.update.old_hash, |
1613 | 0 | tx_update->current_oid.hash, GIT_MAX_RAWSZ); |
1614 | 0 | log->value.update.message = |
1615 | 0 | xstrndup(u->msg, arg->refs->write_options.block_size / 2); |
1616 | 0 | } |
1617 | 0 | } |
1618 | | |
1619 | 0 | if (u->flags & REF_LOG_ONLY) |
1620 | 0 | continue; |
1621 | | |
1622 | 0 | if (u->new_target) { |
1623 | 0 | struct reftable_ref_record ref = { |
1624 | 0 | .refname = (char *)u->refname, |
1625 | 0 | .value_type = REFTABLE_REF_SYMREF, |
1626 | 0 | .value.symref = (char *)u->new_target, |
1627 | 0 | .update_index = ts, |
1628 | 0 | }; |
1629 | |
|
1630 | 0 | ret = reftable_writer_add_ref(writer, &ref); |
1631 | 0 | if (ret < 0) |
1632 | 0 | goto done; |
1633 | 0 | } else if ((u->flags & REF_HAVE_NEW) && ref_update_has_null_new_value(u)) { |
1634 | 0 | struct reftable_ref_record ref = { |
1635 | 0 | .refname = (char *)u->refname, |
1636 | 0 | .update_index = ts, |
1637 | 0 | .value_type = REFTABLE_REF_DELETION, |
1638 | 0 | }; |
1639 | |
|
1640 | 0 | ret = reftable_writer_add_ref(writer, &ref); |
1641 | 0 | if (ret < 0) |
1642 | 0 | goto done; |
1643 | 0 | } else if (u->flags & REF_HAVE_NEW) { |
1644 | 0 | struct reftable_ref_record ref = {0}; |
1645 | 0 | struct object_id peeled; |
1646 | 0 | int peel_error; |
1647 | |
|
1648 | 0 | ref.refname = (char *)u->refname; |
1649 | 0 | ref.update_index = ts; |
1650 | |
|
1651 | 0 | peel_error = peel_object(arg->refs->base.repo, &u->new_oid, &peeled, |
1652 | 0 | PEEL_OBJECT_VERIFY_TAGGED_OBJECT_TYPE); |
1653 | 0 | if (!peel_error) { |
1654 | 0 | ref.value_type = REFTABLE_REF_VAL2; |
1655 | 0 | memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ); |
1656 | 0 | memcpy(ref.value.val2.value, u->new_oid.hash, GIT_MAX_RAWSZ); |
1657 | 0 | } else if (!is_null_oid(&u->new_oid)) { |
1658 | 0 | ref.value_type = REFTABLE_REF_VAL1; |
1659 | 0 | memcpy(ref.value.val1, u->new_oid.hash, GIT_MAX_RAWSZ); |
1660 | 0 | } |
1661 | |
|
1662 | 0 | ret = reftable_writer_add_ref(writer, &ref); |
1663 | 0 | if (ret < 0) |
1664 | 0 | goto done; |
1665 | 0 | } |
1666 | 0 | } |
1667 | | |
1668 | | /* |
1669 | | * Logs are written at the end so that we do not have intermixed ref |
1670 | | * and log blocks. |
1671 | | */ |
1672 | 0 | if (logs) { |
1673 | 0 | ret = reftable_writer_add_logs(writer, logs, logs_nr); |
1674 | 0 | if (ret < 0) |
1675 | 0 | goto done; |
1676 | 0 | } |
1677 | | |
1678 | 0 | done: |
1679 | 0 | assert(ret != REFTABLE_API_ERROR); |
1680 | 0 | for (i = 0; i < logs_nr; i++) |
1681 | 0 | reftable_log_record_release(&logs[i]); |
1682 | 0 | free(logs); |
1683 | 0 | return ret; |
1684 | 0 | } |
1685 | | |
1686 | | static int reftable_be_transaction_finish(struct ref_store *ref_store UNUSED, |
1687 | | struct ref_transaction *transaction, |
1688 | | struct strbuf *err) |
1689 | 0 | { |
1690 | 0 | struct reftable_transaction_data *tx_data = transaction->backend_data; |
1691 | 0 | int ret = 0; |
1692 | |
|
1693 | 0 | for (size_t i = 0; i < tx_data->args_nr; i++) { |
1694 | 0 | tx_data->args[i].max_index = transaction->max_index; |
1695 | |
|
1696 | 0 | ret = reftable_addition_add(tx_data->args[i].addition, |
1697 | 0 | write_transaction_table, &tx_data->args[i]); |
1698 | 0 | if (ret < 0) |
1699 | 0 | goto done; |
1700 | | |
1701 | 0 | ret = reftable_addition_commit(tx_data->args[i].addition); |
1702 | 0 | if (ret < 0) |
1703 | 0 | goto done; |
1704 | 0 | } |
1705 | | |
1706 | 0 | done: |
1707 | 0 | assert(ret != REFTABLE_API_ERROR); |
1708 | 0 | free_transaction_data(tx_data); |
1709 | 0 | transaction->state = REF_TRANSACTION_CLOSED; |
1710 | |
|
1711 | 0 | if (ret) { |
1712 | 0 | strbuf_addf(err, _("reftable: transaction failure: %s"), |
1713 | 0 | reftable_error_str(ret)); |
1714 | 0 | return -1; |
1715 | 0 | } |
1716 | 0 | return ret; |
1717 | 0 | } |
1718 | | |
1719 | | static int reftable_be_optimize(struct ref_store *ref_store, |
1720 | | struct refs_optimize_opts *opts) |
1721 | 0 | { |
1722 | 0 | struct reftable_ref_store *refs = |
1723 | 0 | reftable_be_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB, "optimize_refs"); |
1724 | 0 | struct reftable_stack *stack; |
1725 | 0 | int ret; |
1726 | |
|
1727 | 0 | if (refs->err) |
1728 | 0 | return refs->err; |
1729 | | |
1730 | 0 | stack = refs->worktree_backend.stack; |
1731 | 0 | if (!stack) |
1732 | 0 | stack = refs->main_backend.stack; |
1733 | |
|
1734 | 0 | if (opts->flags & REFS_OPTIMIZE_AUTO) |
1735 | 0 | ret = reftable_stack_auto_compact(stack); |
1736 | 0 | else |
1737 | 0 | ret = reftable_stack_compact_all(stack, NULL); |
1738 | 0 | if (ret < 0) { |
1739 | 0 | ret = error(_("unable to compact stack: %s"), |
1740 | 0 | reftable_error_str(ret)); |
1741 | 0 | goto out; |
1742 | 0 | } |
1743 | | |
1744 | 0 | ret = reftable_stack_clean(stack); |
1745 | 0 | if (ret) |
1746 | 0 | goto out; |
1747 | | |
1748 | 0 | out: |
1749 | 0 | return ret; |
1750 | 0 | } |
1751 | | |
1752 | | static int reftable_be_optimize_required(struct ref_store *ref_store, |
1753 | | struct refs_optimize_opts *opts, |
1754 | | bool *required) |
1755 | 0 | { |
1756 | 0 | struct reftable_ref_store *refs = reftable_be_downcast(ref_store, REF_STORE_READ, |
1757 | 0 | "optimize_refs_required"); |
1758 | 0 | struct reftable_stack *stack; |
1759 | 0 | bool use_heuristics = false; |
1760 | |
|
1761 | 0 | if (refs->err) |
1762 | 0 | return refs->err; |
1763 | | |
1764 | 0 | stack = refs->worktree_backend.stack; |
1765 | 0 | if (!stack) |
1766 | 0 | stack = refs->main_backend.stack; |
1767 | |
|
1768 | 0 | if (opts->flags & REFS_OPTIMIZE_AUTO) |
1769 | 0 | use_heuristics = true; |
1770 | |
|
1771 | 0 | return reftable_stack_compaction_required(stack, use_heuristics, |
1772 | 0 | required); |
1773 | 0 | } |
1774 | | |
1775 | | struct write_create_symref_arg { |
1776 | | struct reftable_ref_store *refs; |
1777 | | struct reftable_stack *stack; |
1778 | | struct strbuf *err; |
1779 | | const char *refname; |
1780 | | const char *target; |
1781 | | const char *logmsg; |
1782 | | }; |
1783 | | |
1784 | | struct write_copy_arg { |
1785 | | struct reftable_ref_store *refs; |
1786 | | struct reftable_backend *be; |
1787 | | const char *oldname; |
1788 | | const char *newname; |
1789 | | const char *logmsg; |
1790 | | int delete_old; |
1791 | | }; |
1792 | | |
1793 | | static int write_copy_table(struct reftable_writer *writer, void *cb_data) |
1794 | 0 | { |
1795 | 0 | struct write_copy_arg *arg = cb_data; |
1796 | 0 | uint64_t deletion_ts, creation_ts; |
1797 | 0 | struct reftable_ref_record old_ref = {0}, refs[2] = {0}; |
1798 | 0 | struct reftable_log_record old_log = {0}, *logs = NULL; |
1799 | 0 | struct reftable_iterator it = {0}; |
1800 | 0 | struct string_list skip = STRING_LIST_INIT_NODUP; |
1801 | 0 | struct ident_split committer_ident = {0}; |
1802 | 0 | struct strbuf errbuf = STRBUF_INIT; |
1803 | 0 | size_t logs_nr = 0, logs_alloc = 0, i; |
1804 | 0 | const char *committer_info; |
1805 | 0 | int ret; |
1806 | |
|
1807 | 0 | committer_info = git_committer_info(0); |
1808 | 0 | if (split_ident_line(&committer_ident, committer_info, strlen(committer_info))) |
1809 | 0 | BUG("failed splitting committer info"); |
1810 | | |
1811 | 0 | if (reftable_stack_read_ref(arg->be->stack, arg->oldname, &old_ref)) { |
1812 | 0 | ret = error(_("refname %s not found"), arg->oldname); |
1813 | 0 | goto done; |
1814 | 0 | } |
1815 | 0 | if (old_ref.value_type == REFTABLE_REF_SYMREF) { |
1816 | 0 | ret = error(_("refname %s is a symbolic ref, copying it is not supported"), |
1817 | 0 | arg->oldname); |
1818 | 0 | goto done; |
1819 | 0 | } |
1820 | | |
1821 | | /* |
1822 | | * There's nothing to do in case the old and new name are the same, so |
1823 | | * we exit early in that case. |
1824 | | */ |
1825 | 0 | if (!strcmp(arg->oldname, arg->newname)) { |
1826 | 0 | ret = 0; |
1827 | 0 | goto done; |
1828 | 0 | } |
1829 | | |
1830 | | /* |
1831 | | * Verify that the new refname is available. |
1832 | | */ |
1833 | 0 | if (arg->delete_old) |
1834 | 0 | string_list_insert(&skip, arg->oldname); |
1835 | 0 | ret = refs_verify_refname_available(&arg->refs->base, arg->newname, |
1836 | 0 | NULL, &skip, 0, &errbuf); |
1837 | 0 | if (ret < 0) { |
1838 | 0 | error("%s", errbuf.buf); |
1839 | 0 | goto done; |
1840 | 0 | } |
1841 | | |
1842 | | /* |
1843 | | * When deleting the old reference we have to use two update indices: |
1844 | | * once to delete the old ref and its reflog, and once to create the |
1845 | | * new ref and its reflog. They need to be staged with two separate |
1846 | | * indices because the new reflog needs to encode both the deletion of |
1847 | | * the old branch and the creation of the new branch, and we cannot do |
1848 | | * two changes to a reflog in a single update. |
1849 | | */ |
1850 | 0 | deletion_ts = creation_ts = reftable_stack_next_update_index(arg->be->stack); |
1851 | 0 | if (arg->delete_old) |
1852 | 0 | creation_ts++; |
1853 | 0 | ret = reftable_writer_set_limits(writer, deletion_ts, creation_ts); |
1854 | 0 | if (ret < 0) |
1855 | 0 | goto done; |
1856 | | |
1857 | | /* |
1858 | | * Add the new reference. If this is a rename then we also delete the |
1859 | | * old reference. |
1860 | | */ |
1861 | 0 | refs[0] = old_ref; |
1862 | 0 | refs[0].refname = xstrdup(arg->newname); |
1863 | 0 | refs[0].update_index = creation_ts; |
1864 | 0 | if (arg->delete_old) { |
1865 | 0 | refs[1].refname = xstrdup(arg->oldname); |
1866 | 0 | refs[1].value_type = REFTABLE_REF_DELETION; |
1867 | 0 | refs[1].update_index = deletion_ts; |
1868 | 0 | } |
1869 | 0 | ret = reftable_writer_add_refs(writer, refs, arg->delete_old ? 2 : 1); |
1870 | 0 | if (ret < 0) |
1871 | 0 | goto done; |
1872 | | |
1873 | | /* |
1874 | | * When deleting the old branch we need to create a reflog entry on the |
1875 | | * new branch name that indicates that the old branch has been deleted |
1876 | | * and then recreated. This is a tad weird, but matches what the files |
1877 | | * backend does. |
1878 | | */ |
1879 | 0 | if (arg->delete_old) { |
1880 | 0 | struct strbuf head_referent = STRBUF_INIT; |
1881 | 0 | struct object_id head_oid; |
1882 | 0 | int append_head_reflog; |
1883 | 0 | unsigned head_type = 0; |
1884 | |
|
1885 | 0 | ALLOC_GROW(logs, logs_nr + 1, logs_alloc); |
1886 | 0 | memset(&logs[logs_nr], 0, sizeof(logs[logs_nr])); |
1887 | 0 | fill_reftable_log_record(&logs[logs_nr], &committer_ident); |
1888 | 0 | logs[logs_nr].refname = xstrdup(arg->newname); |
1889 | 0 | logs[logs_nr].update_index = deletion_ts; |
1890 | 0 | logs[logs_nr].value.update.message = |
1891 | 0 | xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2); |
1892 | 0 | memcpy(logs[logs_nr].value.update.old_hash, old_ref.value.val1, GIT_MAX_RAWSZ); |
1893 | 0 | logs_nr++; |
1894 | |
|
1895 | 0 | ret = reftable_backend_read_ref(arg->be, "HEAD", &head_oid, |
1896 | 0 | &head_referent, &head_type); |
1897 | 0 | if (ret < 0) |
1898 | 0 | goto done; |
1899 | 0 | append_head_reflog = (head_type & REF_ISSYMREF) && !strcmp(head_referent.buf, arg->oldname); |
1900 | 0 | strbuf_release(&head_referent); |
1901 | | |
1902 | | /* |
1903 | | * The files backend uses `refs_delete_ref()` to delete the old |
1904 | | * branch name, which will append a reflog entry for HEAD in |
1905 | | * case it points to the old branch. |
1906 | | */ |
1907 | 0 | if (append_head_reflog) { |
1908 | 0 | ALLOC_GROW(logs, logs_nr + 1, logs_alloc); |
1909 | 0 | logs[logs_nr] = logs[logs_nr - 1]; |
1910 | 0 | logs[logs_nr].refname = xstrdup("HEAD"); |
1911 | 0 | logs[logs_nr].value.update.name = |
1912 | 0 | xstrdup(logs[logs_nr].value.update.name); |
1913 | 0 | logs[logs_nr].value.update.email = |
1914 | 0 | xstrdup(logs[logs_nr].value.update.email); |
1915 | 0 | logs[logs_nr].value.update.message = |
1916 | 0 | xstrdup(logs[logs_nr].value.update.message); |
1917 | 0 | logs_nr++; |
1918 | 0 | } |
1919 | 0 | } |
1920 | | |
1921 | | /* |
1922 | | * Create the reflog entry for the newly created branch. |
1923 | | */ |
1924 | 0 | ALLOC_GROW(logs, logs_nr + 1, logs_alloc); |
1925 | 0 | memset(&logs[logs_nr], 0, sizeof(logs[logs_nr])); |
1926 | 0 | fill_reftable_log_record(&logs[logs_nr], &committer_ident); |
1927 | 0 | logs[logs_nr].refname = xstrdup(arg->newname); |
1928 | 0 | logs[logs_nr].update_index = creation_ts; |
1929 | 0 | logs[logs_nr].value.update.message = |
1930 | 0 | xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2); |
1931 | 0 | memcpy(logs[logs_nr].value.update.new_hash, old_ref.value.val1, GIT_MAX_RAWSZ); |
1932 | 0 | logs_nr++; |
1933 | | |
1934 | | /* |
1935 | | * In addition to writing the reflog entry for the new branch, we also |
1936 | | * copy over all log entries from the old reflog. Last but not least, |
1937 | | * when renaming we also have to delete all the old reflog entries. |
1938 | | */ |
1939 | 0 | ret = reftable_stack_init_log_iterator(arg->be->stack, &it); |
1940 | 0 | if (ret < 0) |
1941 | 0 | goto done; |
1942 | | |
1943 | 0 | ret = reftable_iterator_seek_log(&it, arg->oldname); |
1944 | 0 | if (ret < 0) |
1945 | 0 | goto done; |
1946 | | |
1947 | 0 | while (1) { |
1948 | 0 | ret = reftable_iterator_next_log(&it, &old_log); |
1949 | 0 | if (ret < 0) |
1950 | 0 | goto done; |
1951 | 0 | if (ret > 0 || strcmp(old_log.refname, arg->oldname)) { |
1952 | 0 | ret = 0; |
1953 | 0 | break; |
1954 | 0 | } |
1955 | | |
1956 | 0 | free(old_log.refname); |
1957 | | |
1958 | | /* |
1959 | | * Copy over the old reflog entry with the new refname. |
1960 | | */ |
1961 | 0 | ALLOC_GROW(logs, logs_nr + 1, logs_alloc); |
1962 | 0 | logs[logs_nr] = old_log; |
1963 | 0 | logs[logs_nr].refname = xstrdup(arg->newname); |
1964 | 0 | logs_nr++; |
1965 | | |
1966 | | /* |
1967 | | * Delete the old reflog entry in case we are renaming. |
1968 | | */ |
1969 | 0 | if (arg->delete_old) { |
1970 | 0 | ALLOC_GROW(logs, logs_nr + 1, logs_alloc); |
1971 | 0 | memset(&logs[logs_nr], 0, sizeof(logs[logs_nr])); |
1972 | 0 | logs[logs_nr].refname = xstrdup(arg->oldname); |
1973 | 0 | logs[logs_nr].value_type = REFTABLE_LOG_DELETION; |
1974 | 0 | logs[logs_nr].update_index = old_log.update_index; |
1975 | 0 | logs_nr++; |
1976 | 0 | } |
1977 | | |
1978 | | /* |
1979 | | * Transfer ownership of the log record we're iterating over to |
1980 | | * the array of log records. Otherwise, the pointers would get |
1981 | | * free'd or reallocated by the iterator. |
1982 | | */ |
1983 | 0 | memset(&old_log, 0, sizeof(old_log)); |
1984 | 0 | } |
1985 | | |
1986 | 0 | ret = reftable_writer_add_logs(writer, logs, logs_nr); |
1987 | 0 | if (ret < 0) |
1988 | 0 | goto done; |
1989 | | |
1990 | 0 | done: |
1991 | 0 | assert(ret != REFTABLE_API_ERROR); |
1992 | 0 | reftable_iterator_destroy(&it); |
1993 | 0 | string_list_clear(&skip, 0); |
1994 | 0 | strbuf_release(&errbuf); |
1995 | 0 | for (i = 0; i < logs_nr; i++) |
1996 | 0 | reftable_log_record_release(&logs[i]); |
1997 | 0 | free(logs); |
1998 | 0 | for (i = 0; i < ARRAY_SIZE(refs); i++) |
1999 | 0 | reftable_ref_record_release(&refs[i]); |
2000 | 0 | reftable_ref_record_release(&old_ref); |
2001 | 0 | reftable_log_record_release(&old_log); |
2002 | 0 | return ret; |
2003 | 0 | } |
2004 | | |
2005 | | static int reftable_be_rename_ref(struct ref_store *ref_store, |
2006 | | const char *oldrefname, |
2007 | | const char *newrefname, |
2008 | | const char *logmsg) |
2009 | 0 | { |
2010 | 0 | struct reftable_ref_store *refs = |
2011 | 0 | reftable_be_downcast(ref_store, REF_STORE_WRITE, "rename_ref"); |
2012 | 0 | struct write_copy_arg arg = { |
2013 | 0 | .refs = refs, |
2014 | 0 | .oldname = oldrefname, |
2015 | 0 | .newname = newrefname, |
2016 | 0 | .logmsg = logmsg, |
2017 | 0 | .delete_old = 1, |
2018 | 0 | }; |
2019 | 0 | int ret; |
2020 | |
|
2021 | 0 | ret = refs->err; |
2022 | 0 | if (ret < 0) |
2023 | 0 | goto done; |
2024 | | |
2025 | 0 | ret = backend_for(&arg.be, refs, newrefname, &newrefname, 1); |
2026 | 0 | if (ret) |
2027 | 0 | goto done; |
2028 | 0 | ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg, |
2029 | 0 | REFTABLE_STACK_NEW_ADDITION_RELOAD); |
2030 | |
|
2031 | 0 | done: |
2032 | 0 | assert(ret != REFTABLE_API_ERROR); |
2033 | 0 | return ret; |
2034 | 0 | } |
2035 | | |
2036 | | static int reftable_be_copy_ref(struct ref_store *ref_store, |
2037 | | const char *oldrefname, |
2038 | | const char *newrefname, |
2039 | | const char *logmsg) |
2040 | 0 | { |
2041 | 0 | struct reftable_ref_store *refs = |
2042 | 0 | reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref"); |
2043 | 0 | struct write_copy_arg arg = { |
2044 | 0 | .refs = refs, |
2045 | 0 | .oldname = oldrefname, |
2046 | 0 | .newname = newrefname, |
2047 | 0 | .logmsg = logmsg, |
2048 | 0 | }; |
2049 | 0 | int ret; |
2050 | |
|
2051 | 0 | ret = refs->err; |
2052 | 0 | if (ret < 0) |
2053 | 0 | goto done; |
2054 | | |
2055 | 0 | ret = backend_for(&arg.be, refs, newrefname, &newrefname, 1); |
2056 | 0 | if (ret) |
2057 | 0 | goto done; |
2058 | 0 | ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg, |
2059 | 0 | REFTABLE_STACK_NEW_ADDITION_RELOAD); |
2060 | |
|
2061 | 0 | done: |
2062 | 0 | assert(ret != REFTABLE_API_ERROR); |
2063 | 0 | return ret; |
2064 | 0 | } |
2065 | | |
2066 | | struct reftable_reflog_iterator { |
2067 | | struct ref_iterator base; |
2068 | | struct reftable_ref_store *refs; |
2069 | | struct reftable_iterator iter; |
2070 | | struct reftable_log_record log; |
2071 | | struct strbuf last_name; |
2072 | | int err; |
2073 | | }; |
2074 | | |
2075 | | static int reftable_reflog_iterator_advance(struct ref_iterator *ref_iterator) |
2076 | 0 | { |
2077 | 0 | struct reftable_reflog_iterator *iter = |
2078 | 0 | (struct reftable_reflog_iterator *)ref_iterator; |
2079 | |
|
2080 | 0 | while (!iter->err) { |
2081 | 0 | iter->err = reftable_iterator_next_log(&iter->iter, &iter->log); |
2082 | 0 | if (iter->err) |
2083 | 0 | break; |
2084 | | |
2085 | | /* |
2086 | | * We want the refnames that we have reflogs for, so we skip if |
2087 | | * we've already produced this name. This could be faster by |
2088 | | * seeking directly to reflog@update_index==0. |
2089 | | */ |
2090 | 0 | if (!strcmp(iter->log.refname, iter->last_name.buf)) |
2091 | 0 | continue; |
2092 | | |
2093 | 0 | if (check_refname_format(iter->log.refname, |
2094 | 0 | REFNAME_ALLOW_ONELEVEL)) |
2095 | 0 | continue; |
2096 | | |
2097 | 0 | strbuf_reset(&iter->last_name); |
2098 | 0 | strbuf_addstr(&iter->last_name, iter->log.refname); |
2099 | 0 | iter->base.ref.name = iter->log.refname; |
2100 | |
|
2101 | 0 | break; |
2102 | 0 | } |
2103 | |
|
2104 | 0 | if (iter->err > 0) |
2105 | 0 | return ITER_DONE; |
2106 | 0 | if (iter->err < 0) |
2107 | 0 | return ITER_ERROR; |
2108 | 0 | return ITER_OK; |
2109 | 0 | } |
2110 | | |
2111 | | static int reftable_reflog_iterator_seek(struct ref_iterator *ref_iterator UNUSED, |
2112 | | const char *refname UNUSED, |
2113 | | unsigned int flags UNUSED) |
2114 | 0 | { |
2115 | 0 | BUG("reftable reflog iterator cannot be seeked"); |
2116 | 0 | return -1; |
2117 | 0 | } |
2118 | | |
2119 | | static void reftable_reflog_iterator_release(struct ref_iterator *ref_iterator) |
2120 | 0 | { |
2121 | 0 | struct reftable_reflog_iterator *iter = |
2122 | 0 | (struct reftable_reflog_iterator *)ref_iterator; |
2123 | 0 | reftable_log_record_release(&iter->log); |
2124 | 0 | reftable_iterator_destroy(&iter->iter); |
2125 | 0 | strbuf_release(&iter->last_name); |
2126 | 0 | } |
2127 | | |
2128 | | static struct ref_iterator_vtable reftable_reflog_iterator_vtable = { |
2129 | | .advance = reftable_reflog_iterator_advance, |
2130 | | .seek = reftable_reflog_iterator_seek, |
2131 | | .release = reftable_reflog_iterator_release, |
2132 | | }; |
2133 | | |
2134 | | static struct reftable_reflog_iterator *reflog_iterator_for_stack(struct reftable_ref_store *refs, |
2135 | | struct reftable_stack *stack) |
2136 | 0 | { |
2137 | 0 | struct reftable_reflog_iterator *iter; |
2138 | 0 | int ret; |
2139 | |
|
2140 | 0 | iter = xcalloc(1, sizeof(*iter)); |
2141 | 0 | base_ref_iterator_init(&iter->base, &reftable_reflog_iterator_vtable); |
2142 | 0 | strbuf_init(&iter->last_name, 0); |
2143 | 0 | iter->refs = refs; |
2144 | |
|
2145 | 0 | ret = refs->err; |
2146 | 0 | if (ret) |
2147 | 0 | goto done; |
2148 | | |
2149 | 0 | ret = reftable_stack_reload(stack); |
2150 | 0 | if (ret < 0) |
2151 | 0 | goto done; |
2152 | | |
2153 | 0 | ret = reftable_stack_init_log_iterator(stack, &iter->iter); |
2154 | 0 | if (ret < 0) |
2155 | 0 | goto done; |
2156 | | |
2157 | 0 | ret = reftable_iterator_seek_log(&iter->iter, ""); |
2158 | 0 | if (ret < 0) |
2159 | 0 | goto done; |
2160 | | |
2161 | 0 | done: |
2162 | 0 | iter->err = ret; |
2163 | 0 | return iter; |
2164 | 0 | } |
2165 | | |
2166 | | static struct ref_iterator *reftable_be_reflog_iterator_begin(struct ref_store *ref_store) |
2167 | 0 | { |
2168 | 0 | struct reftable_ref_store *refs = |
2169 | 0 | reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_iterator_begin"); |
2170 | 0 | struct reftable_reflog_iterator *main_iter, *worktree_iter; |
2171 | |
|
2172 | 0 | main_iter = reflog_iterator_for_stack(refs, refs->main_backend.stack); |
2173 | 0 | if (!refs->worktree_backend.stack) |
2174 | 0 | return &main_iter->base; |
2175 | | |
2176 | 0 | worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_backend.stack); |
2177 | |
|
2178 | 0 | return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base, |
2179 | 0 | ref_iterator_select, NULL); |
2180 | 0 | } |
2181 | | |
2182 | | static int yield_log_record(struct reftable_ref_store *refs, |
2183 | | struct reftable_log_record *log, |
2184 | | each_reflog_ent_fn fn, |
2185 | | void *cb_data) |
2186 | 0 | { |
2187 | 0 | struct object_id old_oid, new_oid; |
2188 | 0 | const char *full_committer; |
2189 | |
|
2190 | 0 | oidread(&old_oid, log->value.update.old_hash, refs->base.repo->hash_algo); |
2191 | 0 | oidread(&new_oid, log->value.update.new_hash, refs->base.repo->hash_algo); |
2192 | | |
2193 | | /* |
2194 | | * When both the old object ID and the new object ID are null |
2195 | | * then this is the reflog existence marker. The caller must |
2196 | | * not be aware of it. |
2197 | | */ |
2198 | 0 | if (is_null_oid(&old_oid) && is_null_oid(&new_oid)) |
2199 | 0 | return 0; |
2200 | | |
2201 | 0 | full_committer = fmt_ident(log->value.update.name, log->value.update.email, |
2202 | 0 | WANT_COMMITTER_IDENT, NULL, IDENT_NO_DATE); |
2203 | 0 | return fn(log->refname, &old_oid, &new_oid, full_committer, |
2204 | 0 | log->value.update.time, log->value.update.tz_offset, |
2205 | 0 | log->value.update.message, cb_data); |
2206 | 0 | } |
2207 | | |
2208 | | static int reftable_be_for_each_reflog_ent_reverse(struct ref_store *ref_store, |
2209 | | const char *refname, |
2210 | | each_reflog_ent_fn fn, |
2211 | | void *cb_data) |
2212 | 0 | { |
2213 | 0 | struct reftable_ref_store *refs = |
2214 | 0 | reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent_reverse"); |
2215 | 0 | struct reftable_log_record log = {0}; |
2216 | 0 | struct reftable_iterator it = {0}; |
2217 | 0 | struct reftable_backend *be; |
2218 | 0 | int ret; |
2219 | |
|
2220 | 0 | if (refs->err < 0) |
2221 | 0 | return refs->err; |
2222 | | |
2223 | | /* |
2224 | | * TODO: we should adapt this callsite to reload the stack. There is no |
2225 | | * obvious reason why we shouldn't. |
2226 | | */ |
2227 | 0 | ret = backend_for(&be, refs, refname, &refname, 0); |
2228 | 0 | if (ret) |
2229 | 0 | goto done; |
2230 | | |
2231 | 0 | ret = reftable_stack_init_log_iterator(be->stack, &it); |
2232 | 0 | if (ret < 0) |
2233 | 0 | goto done; |
2234 | | |
2235 | 0 | ret = reftable_iterator_seek_log(&it, refname); |
2236 | 0 | while (!ret) { |
2237 | 0 | ret = reftable_iterator_next_log(&it, &log); |
2238 | 0 | if (ret < 0) |
2239 | 0 | break; |
2240 | 0 | if (ret > 0 || strcmp(log.refname, refname)) { |
2241 | 0 | ret = 0; |
2242 | 0 | break; |
2243 | 0 | } |
2244 | | |
2245 | 0 | ret = yield_log_record(refs, &log, fn, cb_data); |
2246 | 0 | if (ret) |
2247 | 0 | break; |
2248 | 0 | } |
2249 | |
|
2250 | 0 | done: |
2251 | 0 | reftable_log_record_release(&log); |
2252 | 0 | reftable_iterator_destroy(&it); |
2253 | 0 | return ret; |
2254 | 0 | } |
2255 | | |
2256 | | static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store, |
2257 | | const char *refname, |
2258 | | each_reflog_ent_fn fn, |
2259 | | void *cb_data) |
2260 | 0 | { |
2261 | 0 | struct reftable_ref_store *refs = |
2262 | 0 | reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent"); |
2263 | 0 | struct reftable_log_record *logs = NULL; |
2264 | 0 | struct reftable_iterator it = {0}; |
2265 | 0 | struct reftable_backend *be; |
2266 | 0 | size_t logs_alloc = 0, logs_nr = 0, i; |
2267 | 0 | int ret; |
2268 | |
|
2269 | 0 | if (refs->err < 0) |
2270 | 0 | return refs->err; |
2271 | | |
2272 | | /* |
2273 | | * TODO: we should adapt this callsite to reload the stack. There is no |
2274 | | * obvious reason why we shouldn't. |
2275 | | */ |
2276 | 0 | ret = backend_for(&be, refs, refname, &refname, 0); |
2277 | 0 | if (ret) |
2278 | 0 | goto done; |
2279 | | |
2280 | 0 | ret = reftable_stack_init_log_iterator(be->stack, &it); |
2281 | 0 | if (ret < 0) |
2282 | 0 | goto done; |
2283 | | |
2284 | 0 | ret = reftable_iterator_seek_log(&it, refname); |
2285 | 0 | while (!ret) { |
2286 | 0 | struct reftable_log_record log = {0}; |
2287 | |
|
2288 | 0 | ret = reftable_iterator_next_log(&it, &log); |
2289 | 0 | if (ret < 0) |
2290 | 0 | goto done; |
2291 | 0 | if (ret > 0 || strcmp(log.refname, refname)) { |
2292 | 0 | reftable_log_record_release(&log); |
2293 | 0 | ret = 0; |
2294 | 0 | break; |
2295 | 0 | } |
2296 | | |
2297 | 0 | ALLOC_GROW(logs, logs_nr + 1, logs_alloc); |
2298 | 0 | logs[logs_nr++] = log; |
2299 | 0 | } |
2300 | | |
2301 | 0 | for (i = logs_nr; i--;) { |
2302 | 0 | ret = yield_log_record(refs, &logs[i], fn, cb_data); |
2303 | 0 | if (ret) |
2304 | 0 | goto done; |
2305 | 0 | } |
2306 | | |
2307 | 0 | done: |
2308 | 0 | reftable_iterator_destroy(&it); |
2309 | 0 | for (i = 0; i < logs_nr; i++) |
2310 | 0 | reftable_log_record_release(&logs[i]); |
2311 | 0 | free(logs); |
2312 | 0 | return ret; |
2313 | 0 | } |
2314 | | |
2315 | | static int reftable_be_reflog_exists(struct ref_store *ref_store, |
2316 | | const char *refname) |
2317 | 0 | { |
2318 | 0 | struct reftable_ref_store *refs = |
2319 | 0 | reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists"); |
2320 | 0 | struct reftable_log_record log = {0}; |
2321 | 0 | struct reftable_iterator it = {0}; |
2322 | 0 | struct reftable_backend *be; |
2323 | 0 | int ret; |
2324 | |
|
2325 | 0 | ret = refs->err; |
2326 | 0 | if (ret < 0) |
2327 | 0 | goto done; |
2328 | | |
2329 | 0 | ret = backend_for(&be, refs, refname, &refname, 1); |
2330 | 0 | if (ret < 0) |
2331 | 0 | goto done; |
2332 | | |
2333 | 0 | ret = reftable_stack_init_log_iterator(be->stack, &it); |
2334 | 0 | if (ret < 0) |
2335 | 0 | goto done; |
2336 | | |
2337 | 0 | ret = reftable_iterator_seek_log(&it, refname); |
2338 | 0 | if (ret < 0) |
2339 | 0 | goto done; |
2340 | | |
2341 | | /* |
2342 | | * Check whether we get at least one log record for the given ref name. |
2343 | | * If so, the reflog exists, otherwise it doesn't. |
2344 | | */ |
2345 | 0 | ret = reftable_iterator_next_log(&it, &log); |
2346 | 0 | if (ret < 0) |
2347 | 0 | goto done; |
2348 | 0 | if (ret > 0) { |
2349 | 0 | ret = 0; |
2350 | 0 | goto done; |
2351 | 0 | } |
2352 | | |
2353 | 0 | ret = strcmp(log.refname, refname) == 0; |
2354 | |
|
2355 | 0 | done: |
2356 | 0 | reftable_iterator_destroy(&it); |
2357 | 0 | reftable_log_record_release(&log); |
2358 | 0 | if (ret < 0) |
2359 | 0 | ret = 0; |
2360 | 0 | return ret; |
2361 | 0 | } |
2362 | | |
2363 | | struct write_reflog_existence_arg { |
2364 | | struct reftable_ref_store *refs; |
2365 | | const char *refname; |
2366 | | struct reftable_stack *stack; |
2367 | | }; |
2368 | | |
2369 | | static int write_reflog_existence_table(struct reftable_writer *writer, |
2370 | | void *cb_data) |
2371 | 0 | { |
2372 | 0 | struct write_reflog_existence_arg *arg = cb_data; |
2373 | 0 | uint64_t ts = reftable_stack_next_update_index(arg->stack); |
2374 | 0 | struct reftable_log_record log = {0}; |
2375 | 0 | int ret; |
2376 | |
|
2377 | 0 | ret = reftable_stack_read_log(arg->stack, arg->refname, &log); |
2378 | 0 | if (ret <= 0) |
2379 | 0 | goto done; |
2380 | | |
2381 | 0 | ret = reftable_writer_set_limits(writer, ts, ts); |
2382 | 0 | if (ret < 0) |
2383 | 0 | goto done; |
2384 | | |
2385 | | /* |
2386 | | * The existence entry has both old and new object ID set to the |
2387 | | * null object ID. Our iterators are aware of this and will not present |
2388 | | * them to their callers. |
2389 | | */ |
2390 | 0 | log.refname = xstrdup(arg->refname); |
2391 | 0 | log.update_index = ts; |
2392 | 0 | log.value_type = REFTABLE_LOG_UPDATE; |
2393 | 0 | ret = reftable_writer_add_log(writer, &log); |
2394 | |
|
2395 | 0 | done: |
2396 | 0 | assert(ret != REFTABLE_API_ERROR); |
2397 | 0 | reftable_log_record_release(&log); |
2398 | 0 | return ret; |
2399 | 0 | } |
2400 | | |
2401 | | static int reftable_be_create_reflog(struct ref_store *ref_store, |
2402 | | const char *refname, |
2403 | | struct strbuf *errmsg UNUSED) |
2404 | 0 | { |
2405 | 0 | struct reftable_ref_store *refs = |
2406 | 0 | reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_reflog"); |
2407 | 0 | struct reftable_backend *be; |
2408 | 0 | struct write_reflog_existence_arg arg = { |
2409 | 0 | .refs = refs, |
2410 | 0 | .refname = refname, |
2411 | 0 | }; |
2412 | 0 | int ret; |
2413 | |
|
2414 | 0 | ret = refs->err; |
2415 | 0 | if (ret < 0) |
2416 | 0 | goto done; |
2417 | | |
2418 | 0 | ret = backend_for(&be, refs, refname, &refname, 1); |
2419 | 0 | if (ret) |
2420 | 0 | goto done; |
2421 | 0 | arg.stack = be->stack; |
2422 | |
|
2423 | 0 | ret = reftable_stack_add(be->stack, &write_reflog_existence_table, &arg, |
2424 | 0 | REFTABLE_STACK_NEW_ADDITION_RELOAD); |
2425 | |
|
2426 | 0 | done: |
2427 | 0 | return ret; |
2428 | 0 | } |
2429 | | |
2430 | | struct write_reflog_delete_arg { |
2431 | | struct reftable_stack *stack; |
2432 | | const char *refname; |
2433 | | }; |
2434 | | |
2435 | | static int write_reflog_delete_table(struct reftable_writer *writer, void *cb_data) |
2436 | 0 | { |
2437 | 0 | struct write_reflog_delete_arg *arg = cb_data; |
2438 | 0 | struct reftable_log_record log = {0}, tombstone = {0}; |
2439 | 0 | struct reftable_iterator it = {0}; |
2440 | 0 | uint64_t ts = reftable_stack_next_update_index(arg->stack); |
2441 | 0 | int ret; |
2442 | |
|
2443 | 0 | ret = reftable_writer_set_limits(writer, ts, ts); |
2444 | 0 | if (ret < 0) |
2445 | 0 | goto out; |
2446 | | |
2447 | 0 | ret = reftable_stack_init_log_iterator(arg->stack, &it); |
2448 | 0 | if (ret < 0) |
2449 | 0 | goto out; |
2450 | | |
2451 | | /* |
2452 | | * In order to delete a table we need to delete all reflog entries one |
2453 | | * by one. This is inefficient, but the reftable format does not have a |
2454 | | * better marker right now. |
2455 | | */ |
2456 | 0 | ret = reftable_iterator_seek_log(&it, arg->refname); |
2457 | 0 | while (ret == 0) { |
2458 | 0 | ret = reftable_iterator_next_log(&it, &log); |
2459 | 0 | if (ret < 0) |
2460 | 0 | break; |
2461 | 0 | if (ret > 0 || strcmp(log.refname, arg->refname)) { |
2462 | 0 | ret = 0; |
2463 | 0 | break; |
2464 | 0 | } |
2465 | | |
2466 | 0 | tombstone.refname = (char *)arg->refname; |
2467 | 0 | tombstone.value_type = REFTABLE_LOG_DELETION; |
2468 | 0 | tombstone.update_index = log.update_index; |
2469 | |
|
2470 | 0 | ret = reftable_writer_add_log(writer, &tombstone); |
2471 | 0 | } |
2472 | |
|
2473 | 0 | out: |
2474 | 0 | reftable_log_record_release(&log); |
2475 | 0 | reftable_iterator_destroy(&it); |
2476 | 0 | return ret; |
2477 | 0 | } |
2478 | | |
2479 | | static int reftable_be_delete_reflog(struct ref_store *ref_store, |
2480 | | const char *refname) |
2481 | 0 | { |
2482 | 0 | struct reftable_ref_store *refs = |
2483 | 0 | reftable_be_downcast(ref_store, REF_STORE_WRITE, "delete_reflog"); |
2484 | 0 | struct reftable_backend *be; |
2485 | 0 | struct write_reflog_delete_arg arg = { |
2486 | 0 | .refname = refname, |
2487 | 0 | }; |
2488 | 0 | int ret; |
2489 | |
|
2490 | 0 | ret = backend_for(&be, refs, refname, &refname, 1); |
2491 | 0 | if (ret) |
2492 | 0 | return ret; |
2493 | 0 | arg.stack = be->stack; |
2494 | |
|
2495 | 0 | ret = reftable_stack_add(be->stack, &write_reflog_delete_table, &arg, |
2496 | 0 | REFTABLE_STACK_NEW_ADDITION_RELOAD); |
2497 | |
|
2498 | 0 | assert(ret != REFTABLE_API_ERROR); |
2499 | 0 | return ret; |
2500 | 0 | } |
2501 | | |
2502 | | struct reflog_expiry_arg { |
2503 | | struct reftable_ref_store *refs; |
2504 | | struct reftable_stack *stack; |
2505 | | struct reftable_log_record *records; |
2506 | | struct object_id update_oid; |
2507 | | const char *refname; |
2508 | | size_t len; |
2509 | | }; |
2510 | | |
2511 | | static int write_reflog_expiry_table(struct reftable_writer *writer, void *cb_data) |
2512 | 0 | { |
2513 | 0 | struct reflog_expiry_arg *arg = cb_data; |
2514 | 0 | uint64_t ts = reftable_stack_next_update_index(arg->stack); |
2515 | 0 | uint64_t live_records = 0; |
2516 | 0 | size_t i; |
2517 | 0 | int ret; |
2518 | |
|
2519 | 0 | for (i = 0; i < arg->len; i++) |
2520 | 0 | if (arg->records[i].value_type == REFTABLE_LOG_UPDATE) |
2521 | 0 | live_records++; |
2522 | |
|
2523 | 0 | ret = reftable_writer_set_limits(writer, ts, ts); |
2524 | 0 | if (ret < 0) |
2525 | 0 | return ret; |
2526 | | |
2527 | 0 | if (!is_null_oid(&arg->update_oid)) { |
2528 | 0 | struct reftable_ref_record ref = {0}; |
2529 | 0 | struct object_id peeled; |
2530 | |
|
2531 | 0 | ref.refname = (char *)arg->refname; |
2532 | 0 | ref.update_index = ts; |
2533 | |
|
2534 | 0 | if (!peel_object(arg->refs->base.repo, &arg->update_oid, &peeled, 0)) { |
2535 | 0 | ref.value_type = REFTABLE_REF_VAL2; |
2536 | 0 | memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ); |
2537 | 0 | memcpy(ref.value.val2.value, arg->update_oid.hash, GIT_MAX_RAWSZ); |
2538 | 0 | } else { |
2539 | 0 | ref.value_type = REFTABLE_REF_VAL1; |
2540 | 0 | memcpy(ref.value.val1, arg->update_oid.hash, GIT_MAX_RAWSZ); |
2541 | 0 | } |
2542 | |
|
2543 | 0 | ret = reftable_writer_add_ref(writer, &ref); |
2544 | 0 | if (ret < 0) |
2545 | 0 | return ret; |
2546 | 0 | } |
2547 | | |
2548 | | /* |
2549 | | * When there are no more entries left in the reflog we empty it |
2550 | | * completely, but write a placeholder reflog entry that indicates that |
2551 | | * the reflog still exists. |
2552 | | */ |
2553 | 0 | if (!live_records) { |
2554 | 0 | struct reftable_log_record log = { |
2555 | 0 | .refname = (char *)arg->refname, |
2556 | 0 | .value_type = REFTABLE_LOG_UPDATE, |
2557 | 0 | .update_index = ts, |
2558 | 0 | }; |
2559 | |
|
2560 | 0 | ret = reftable_writer_add_log(writer, &log); |
2561 | 0 | if (ret) |
2562 | 0 | return ret; |
2563 | 0 | } |
2564 | | |
2565 | 0 | for (i = 0; i < arg->len; i++) { |
2566 | 0 | ret = reftable_writer_add_log(writer, &arg->records[i]); |
2567 | 0 | if (ret) |
2568 | 0 | return ret; |
2569 | 0 | } |
2570 | | |
2571 | 0 | return 0; |
2572 | 0 | } |
2573 | | |
2574 | | static int reftable_be_reflog_expire(struct ref_store *ref_store, |
2575 | | const char *refname, |
2576 | | unsigned int flags, |
2577 | | reflog_expiry_prepare_fn prepare_fn, |
2578 | | reflog_expiry_should_prune_fn should_prune_fn, |
2579 | | reflog_expiry_cleanup_fn cleanup_fn, |
2580 | | void *policy_cb_data) |
2581 | 0 | { |
2582 | | /* |
2583 | | * For log expiry, we write tombstones for every single reflog entry |
2584 | | * that is to be expired. This means that the entries are still |
2585 | | * retrievable by delving into the stack, and expiring entries |
2586 | | * paradoxically takes extra memory. This memory is only reclaimed when |
2587 | | * compacting the reftable stack. |
2588 | | * |
2589 | | * It would be better if the refs backend supported an API that sets a |
2590 | | * criterion for all refs, passing the criterion to pack_refs(). |
2591 | | * |
2592 | | * On the plus side, because we do the expiration per ref, we can easily |
2593 | | * insert the reflog existence dummies. |
2594 | | */ |
2595 | 0 | struct reftable_ref_store *refs = |
2596 | 0 | reftable_be_downcast(ref_store, REF_STORE_WRITE, "reflog_expire"); |
2597 | 0 | struct reftable_log_record *logs = NULL; |
2598 | 0 | struct reftable_log_record *rewritten = NULL; |
2599 | 0 | struct reftable_iterator it = {0}; |
2600 | 0 | struct reftable_addition *add = NULL; |
2601 | 0 | struct reflog_expiry_arg arg = {0}; |
2602 | 0 | struct reftable_backend *be; |
2603 | 0 | struct object_id oid = {0}; |
2604 | 0 | struct strbuf referent = STRBUF_INIT; |
2605 | 0 | uint8_t *last_hash = NULL; |
2606 | 0 | size_t logs_nr = 0, logs_alloc = 0, i; |
2607 | 0 | unsigned int type = 0; |
2608 | 0 | int ret; |
2609 | |
|
2610 | 0 | if (refs->err < 0) |
2611 | 0 | return refs->err; |
2612 | | |
2613 | 0 | ret = backend_for(&be, refs, refname, &refname, 1); |
2614 | 0 | if (ret < 0) |
2615 | 0 | goto done; |
2616 | | |
2617 | 0 | ret = reftable_stack_new_addition(&add, be->stack, |
2618 | 0 | REFTABLE_STACK_NEW_ADDITION_RELOAD); |
2619 | 0 | if (ret < 0) |
2620 | 0 | goto done; |
2621 | | |
2622 | 0 | ret = reftable_stack_init_log_iterator(be->stack, &it); |
2623 | 0 | if (ret < 0) |
2624 | 0 | goto done; |
2625 | | |
2626 | 0 | ret = reftable_iterator_seek_log(&it, refname); |
2627 | 0 | if (ret < 0) |
2628 | 0 | goto done; |
2629 | | |
2630 | 0 | ret = reftable_backend_read_ref(be, refname, &oid, &referent, &type); |
2631 | 0 | if (ret < 0) |
2632 | 0 | goto done; |
2633 | 0 | prepare_fn(refname, &oid, policy_cb_data); |
2634 | |
|
2635 | 0 | while (1) { |
2636 | 0 | struct reftable_log_record log = {0}; |
2637 | 0 | struct object_id old_oid, new_oid; |
2638 | |
|
2639 | 0 | ret = reftable_iterator_next_log(&it, &log); |
2640 | 0 | if (ret < 0) |
2641 | 0 | goto done; |
2642 | 0 | if (ret > 0 || strcmp(log.refname, refname)) { |
2643 | 0 | reftable_log_record_release(&log); |
2644 | 0 | break; |
2645 | 0 | } |
2646 | | |
2647 | 0 | oidread(&old_oid, log.value.update.old_hash, |
2648 | 0 | ref_store->repo->hash_algo); |
2649 | 0 | oidread(&new_oid, log.value.update.new_hash, |
2650 | 0 | ref_store->repo->hash_algo); |
2651 | | |
2652 | | /* |
2653 | | * Skip over the reflog existence marker. We will add it back |
2654 | | * in when there are no live reflog records. |
2655 | | */ |
2656 | 0 | if (is_null_oid(&old_oid) && is_null_oid(&new_oid)) { |
2657 | 0 | reftable_log_record_release(&log); |
2658 | 0 | continue; |
2659 | 0 | } |
2660 | | |
2661 | 0 | ALLOC_GROW(logs, logs_nr + 1, logs_alloc); |
2662 | 0 | logs[logs_nr++] = log; |
2663 | 0 | } |
2664 | | |
2665 | | /* |
2666 | | * We need to rewrite all reflog entries according to the pruning |
2667 | | * callback function: |
2668 | | * |
2669 | | * - If a reflog entry shall be pruned we mark the record for |
2670 | | * deletion. |
2671 | | * |
2672 | | * - Otherwise we may have to rewrite the chain of reflog entries so |
2673 | | * that gaps created by just-deleted records get backfilled. |
2674 | | */ |
2675 | 0 | CALLOC_ARRAY(rewritten, logs_nr); |
2676 | 0 | for (i = logs_nr; i--;) { |
2677 | 0 | struct reftable_log_record *dest = &rewritten[i]; |
2678 | 0 | struct object_id old_oid, new_oid; |
2679 | |
|
2680 | 0 | *dest = logs[i]; |
2681 | 0 | oidread(&old_oid, logs[i].value.update.old_hash, |
2682 | 0 | ref_store->repo->hash_algo); |
2683 | 0 | oidread(&new_oid, logs[i].value.update.new_hash, |
2684 | 0 | ref_store->repo->hash_algo); |
2685 | |
|
2686 | 0 | if (should_prune_fn(&old_oid, &new_oid, logs[i].value.update.email, |
2687 | 0 | (timestamp_t)logs[i].value.update.time, |
2688 | 0 | logs[i].value.update.tz_offset, |
2689 | 0 | logs[i].value.update.message, |
2690 | 0 | policy_cb_data)) { |
2691 | 0 | dest->value_type = REFTABLE_LOG_DELETION; |
2692 | 0 | } else { |
2693 | 0 | if ((flags & EXPIRE_REFLOGS_REWRITE) && last_hash) |
2694 | 0 | memcpy(dest->value.update.old_hash, last_hash, GIT_MAX_RAWSZ); |
2695 | 0 | last_hash = logs[i].value.update.new_hash; |
2696 | 0 | } |
2697 | 0 | } |
2698 | |
|
2699 | 0 | if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash && !is_null_oid(&oid)) |
2700 | 0 | oidread(&arg.update_oid, last_hash, ref_store->repo->hash_algo); |
2701 | |
|
2702 | 0 | arg.refs = refs; |
2703 | 0 | arg.records = rewritten; |
2704 | 0 | arg.len = logs_nr; |
2705 | 0 | arg.stack = be->stack; |
2706 | 0 | arg.refname = refname; |
2707 | |
|
2708 | 0 | ret = reftable_addition_add(add, &write_reflog_expiry_table, &arg); |
2709 | 0 | if (ret < 0) |
2710 | 0 | goto done; |
2711 | | |
2712 | | /* |
2713 | | * Future improvement: we could skip writing records that were |
2714 | | * not changed. |
2715 | | */ |
2716 | 0 | if (!(flags & EXPIRE_REFLOGS_DRY_RUN)) |
2717 | 0 | ret = reftable_addition_commit(add); |
2718 | |
|
2719 | 0 | done: |
2720 | 0 | if (add) |
2721 | 0 | cleanup_fn(policy_cb_data); |
2722 | 0 | assert(ret != REFTABLE_API_ERROR); |
2723 | |
|
2724 | 0 | reftable_iterator_destroy(&it); |
2725 | 0 | reftable_addition_destroy(add); |
2726 | 0 | for (i = 0; i < logs_nr; i++) |
2727 | 0 | reftable_log_record_release(&logs[i]); |
2728 | 0 | strbuf_release(&referent); |
2729 | 0 | free(logs); |
2730 | 0 | free(rewritten); |
2731 | 0 | return ret; |
2732 | 0 | } |
2733 | | |
2734 | | static void reftable_fsck_verbose_handler(const char *msg, void *cb_data) |
2735 | 0 | { |
2736 | 0 | struct fsck_options *o = cb_data; |
2737 | |
|
2738 | 0 | if (o->verbose) |
2739 | 0 | fprintf_ln(stderr, "%s", msg); |
2740 | 0 | } |
2741 | | |
2742 | | static const enum fsck_msg_id fsck_msg_id_map[] = { |
2743 | | [REFTABLE_FSCK_ERROR_TABLE_NAME] = FSCK_MSG_BAD_REFTABLE_TABLE_NAME, |
2744 | | }; |
2745 | | |
2746 | | static int reftable_fsck_error_handler(struct reftable_fsck_info *info, |
2747 | | void *cb_data) |
2748 | 0 | { |
2749 | 0 | struct fsck_ref_report report = { .path = info->path }; |
2750 | 0 | struct fsck_options *o = cb_data; |
2751 | 0 | enum fsck_msg_id msg_id; |
2752 | |
|
2753 | 0 | if (info->error < 0 || info->error >= REFTABLE_FSCK_MAX_VALUE) |
2754 | 0 | BUG("unknown fsck error: %d", (int)info->error); |
2755 | | |
2756 | 0 | msg_id = fsck_msg_id_map[info->error]; |
2757 | |
|
2758 | 0 | if (!msg_id) |
2759 | 0 | BUG("fsck_msg_id value missing for reftable error: %d", (int)info->error); |
2760 | | |
2761 | 0 | return fsck_report_ref(o, &report, msg_id, "%s", info->msg); |
2762 | 0 | } |
2763 | | |
2764 | | static int reftable_be_fsck(struct ref_store *ref_store, struct fsck_options *o, |
2765 | | struct worktree *wt) |
2766 | 0 | { |
2767 | 0 | struct reftable_ref_store *refs = |
2768 | 0 | reftable_be_downcast(ref_store, REF_STORE_READ, "fsck"); |
2769 | 0 | struct reftable_ref_iterator *iter = NULL; |
2770 | 0 | struct reftable_ref_record ref = { 0 }; |
2771 | 0 | struct fsck_ref_report report = { 0 }; |
2772 | 0 | struct strbuf refname = STRBUF_INIT; |
2773 | 0 | struct reftable_backend *backend; |
2774 | 0 | int ret, errors = 0; |
2775 | |
|
2776 | 0 | if (is_main_worktree(wt)) { |
2777 | 0 | backend = &refs->main_backend; |
2778 | 0 | } else { |
2779 | 0 | ret = backend_for_worktree(&backend, refs, wt->id); |
2780 | 0 | if (ret < 0) { |
2781 | 0 | ret = error(_("reftable stack for worktree '%s' is broken"), |
2782 | 0 | wt->id); |
2783 | 0 | goto out; |
2784 | 0 | } |
2785 | 0 | } |
2786 | | |
2787 | 0 | errors |= reftable_fsck_check(backend->stack, reftable_fsck_error_handler, |
2788 | 0 | reftable_fsck_verbose_handler, o); |
2789 | |
|
2790 | 0 | iter = ref_iterator_for_stack(refs, backend->stack, "", NULL, 0); |
2791 | 0 | if (!iter) { |
2792 | 0 | ret = error(_("could not create iterator for worktree '%s'"), wt->id); |
2793 | 0 | goto out; |
2794 | 0 | } |
2795 | | |
2796 | 0 | while (1) { |
2797 | 0 | ret = reftable_iterator_next_ref(&iter->iter, &ref); |
2798 | 0 | if (ret > 0) |
2799 | 0 | break; |
2800 | 0 | if (ret < 0) { |
2801 | 0 | ret = error(_("could not read record for worktree '%s'"), wt->id); |
2802 | 0 | goto out; |
2803 | 0 | } |
2804 | | |
2805 | 0 | strbuf_reset(&refname); |
2806 | 0 | if (!is_main_worktree(wt)) |
2807 | 0 | strbuf_addf(&refname, "worktrees/%s/", wt->id); |
2808 | 0 | strbuf_addstr(&refname, ref.refname); |
2809 | 0 | report.path = refname.buf; |
2810 | |
|
2811 | 0 | switch (ref.value_type) { |
2812 | 0 | case REFTABLE_REF_VAL1: |
2813 | 0 | case REFTABLE_REF_VAL2: { |
2814 | 0 | struct object_id oid; |
2815 | 0 | unsigned hash_id; |
2816 | |
|
2817 | 0 | switch (reftable_stack_hash_id(backend->stack)) { |
2818 | 0 | case REFTABLE_HASH_SHA1: |
2819 | 0 | hash_id = GIT_HASH_SHA1; |
2820 | 0 | break; |
2821 | 0 | case REFTABLE_HASH_SHA256: |
2822 | 0 | hash_id = GIT_HASH_SHA256; |
2823 | 0 | break; |
2824 | 0 | default: |
2825 | 0 | BUG("unhandled hash ID %d", |
2826 | 0 | reftable_stack_hash_id(backend->stack)); |
2827 | 0 | } |
2828 | | |
2829 | 0 | oidread(&oid, reftable_ref_record_val1(&ref), |
2830 | 0 | &hash_algos[hash_id]); |
2831 | |
|
2832 | 0 | errors |= refs_fsck_ref(ref_store, o, &report, ref.refname, &oid); |
2833 | 0 | break; |
2834 | 0 | } |
2835 | 0 | case REFTABLE_REF_SYMREF: |
2836 | 0 | errors |= refs_fsck_symref(ref_store, o, &report, ref.refname, |
2837 | 0 | ref.value.symref); |
2838 | 0 | break; |
2839 | 0 | default: |
2840 | 0 | BUG("unhandled reference value type %d", ref.value_type); |
2841 | 0 | } |
2842 | 0 | } |
2843 | | |
2844 | 0 | ret = errors ? -1 : 0; |
2845 | |
|
2846 | 0 | out: |
2847 | 0 | if (iter) |
2848 | 0 | ref_iterator_free(&iter->base); |
2849 | 0 | reftable_ref_record_release(&ref); |
2850 | 0 | strbuf_release(&refname); |
2851 | 0 | return ret; |
2852 | 0 | } |
2853 | | |
2854 | | struct ref_storage_be refs_be_reftable = { |
2855 | | .name = "reftable", |
2856 | | .init = reftable_be_init, |
2857 | | .release = reftable_be_release, |
2858 | | .create_on_disk = reftable_be_create_on_disk, |
2859 | | .remove_on_disk = reftable_be_remove_on_disk, |
2860 | | |
2861 | | .transaction_prepare = reftable_be_transaction_prepare, |
2862 | | .transaction_finish = reftable_be_transaction_finish, |
2863 | | .transaction_abort = reftable_be_transaction_abort, |
2864 | | |
2865 | | .optimize = reftable_be_optimize, |
2866 | | .optimize_required = reftable_be_optimize_required, |
2867 | | |
2868 | | .rename_ref = reftable_be_rename_ref, |
2869 | | .copy_ref = reftable_be_copy_ref, |
2870 | | |
2871 | | .iterator_begin = reftable_be_iterator_begin, |
2872 | | .read_raw_ref = reftable_be_read_raw_ref, |
2873 | | .read_symbolic_ref = reftable_be_read_symbolic_ref, |
2874 | | |
2875 | | .reflog_iterator_begin = reftable_be_reflog_iterator_begin, |
2876 | | .for_each_reflog_ent = reftable_be_for_each_reflog_ent, |
2877 | | .for_each_reflog_ent_reverse = reftable_be_for_each_reflog_ent_reverse, |
2878 | | .reflog_exists = reftable_be_reflog_exists, |
2879 | | .create_reflog = reftable_be_create_reflog, |
2880 | | .delete_reflog = reftable_be_delete_reflog, |
2881 | | .reflog_expire = reftable_be_reflog_expire, |
2882 | | |
2883 | | .fsck = reftable_be_fsck, |
2884 | | }; |