/src/git/refs/files-backend.c
Line | Count | Source |
1 | | #define USE_THE_REPOSITORY_VARIABLE |
2 | | #define DISABLE_SIGN_COMPARE_WARNINGS |
3 | | |
4 | | #include "../git-compat-util.h" |
5 | | #include "../abspath.h" |
6 | | #include "../config.h" |
7 | | #include "../copy.h" |
8 | | #include "../environment.h" |
9 | | #include "../gettext.h" |
10 | | #include "../hash.h" |
11 | | #include "../hex.h" |
12 | | #include "../fsck.h" |
13 | | #include "../refs.h" |
14 | | #include "../repo-settings.h" |
15 | | #include "refs-internal.h" |
16 | | #include "ref-cache.h" |
17 | | #include "packed-backend.h" |
18 | | #include "../ident.h" |
19 | | #include "../iterator.h" |
20 | | #include "../dir-iterator.h" |
21 | | #include "../lockfile.h" |
22 | | #include "../object.h" |
23 | | #include "../path.h" |
24 | | #include "../dir.h" |
25 | | #include "../chdir-notify.h" |
26 | | #include "../setup.h" |
27 | | #include "../worktree.h" |
28 | | #include "../wrapper.h" |
29 | | #include "../write-or-die.h" |
30 | | #include "../revision.h" |
31 | | #include <wildmatch.h> |
32 | | |
33 | | /* |
34 | | * This backend uses the following flags in `ref_update::flags` for |
35 | | * internal bookkeeping purposes. Their numerical values must not |
36 | | * conflict with REF_NO_DEREF, REF_FORCE_CREATE_REFLOG, REF_HAVE_NEW, |
37 | | * or REF_HAVE_OLD, which are also stored in `ref_update::flags`. |
38 | | */ |
39 | | |
40 | | /* |
41 | | * Used as a flag in ref_update::flags when a loose ref is being |
42 | | * pruned. This flag must only be used when REF_NO_DEREF is set. |
43 | | */ |
44 | 0 | #define REF_IS_PRUNING (1 << 4) |
45 | | |
46 | | /* |
47 | | * Flag passed to lock_ref_sha1_basic() telling it to tolerate broken |
48 | | * refs (i.e., because the reference is about to be deleted anyway). |
49 | | */ |
50 | 0 | #define REF_DELETING (1 << 5) |
51 | | |
52 | | /* |
53 | | * Used as a flag in ref_update::flags when the lockfile needs to be |
54 | | * committed. |
55 | | */ |
56 | 0 | #define REF_NEEDS_COMMIT (1 << 6) |
57 | | |
58 | | /* |
59 | | * Used as a flag in ref_update::flags when the ref_update was via an |
60 | | * update to HEAD. |
61 | | */ |
62 | 0 | #define REF_UPDATE_VIA_HEAD (1 << 8) |
63 | | |
64 | | /* |
65 | | * Used as a flag in ref_update::flags when a reference has been |
66 | | * deleted and the ref's parent directories may need cleanup. |
67 | | */ |
68 | 0 | #define REF_DELETED_RMDIR (1 << 9) |
69 | | |
70 | | /* |
71 | | * Used to indicate that the reflog-only update has been created via |
72 | | * `split_head_update()`. |
73 | | */ |
74 | 0 | #define REF_LOG_VIA_SPLIT (1 << 14) |
75 | | |
76 | | struct ref_lock { |
77 | | char *ref_name; |
78 | | struct lock_file lk; |
79 | | struct object_id old_oid; |
80 | | unsigned int count; /* track users of the lock (ref update + reflog updates) */ |
81 | | }; |
82 | | |
83 | | struct files_ref_store { |
84 | | struct ref_store base; |
85 | | unsigned int store_flags; |
86 | | |
87 | | char *gitcommondir; |
88 | | enum log_refs_config log_all_ref_updates; |
89 | | int prefer_symlink_refs; |
90 | | |
91 | | struct ref_cache *loose; |
92 | | |
93 | | struct ref_store *packed_ref_store; |
94 | | }; |
95 | | |
96 | | static void clear_loose_ref_cache(struct files_ref_store *refs) |
97 | 0 | { |
98 | 0 | if (refs->loose) { |
99 | 0 | free_ref_cache(refs->loose); |
100 | 0 | refs->loose = NULL; |
101 | 0 | } |
102 | 0 | } |
103 | | |
104 | | /* |
105 | | * Create a new submodule ref cache and add it to the internal |
106 | | * set of caches. |
107 | | */ |
108 | | static struct ref_store *files_ref_store_init(struct repository *repo, |
109 | | const char *gitdir, |
110 | | unsigned int flags) |
111 | 0 | { |
112 | 0 | struct files_ref_store *refs = xcalloc(1, sizeof(*refs)); |
113 | 0 | struct ref_store *ref_store = (struct ref_store *)refs; |
114 | 0 | struct strbuf sb = STRBUF_INIT; |
115 | |
|
116 | 0 | base_ref_store_init(ref_store, repo, gitdir, &refs_be_files); |
117 | 0 | refs->store_flags = flags; |
118 | 0 | get_common_dir_noenv(&sb, gitdir); |
119 | 0 | refs->gitcommondir = strbuf_detach(&sb, NULL); |
120 | 0 | refs->packed_ref_store = |
121 | 0 | packed_ref_store_init(repo, refs->gitcommondir, flags); |
122 | 0 | refs->log_all_ref_updates = repo_settings_get_log_all_ref_updates(repo); |
123 | 0 | repo_config_get_bool(repo, "core.prefersymlinkrefs", &refs->prefer_symlink_refs); |
124 | |
|
125 | 0 | chdir_notify_reparent("files-backend $GIT_DIR", &refs->base.gitdir); |
126 | 0 | chdir_notify_reparent("files-backend $GIT_COMMONDIR", |
127 | 0 | &refs->gitcommondir); |
128 | |
|
129 | 0 | return ref_store; |
130 | 0 | } |
131 | | |
132 | | /* |
133 | | * Die if refs is not the main ref store. caller is used in any |
134 | | * necessary error messages. |
135 | | */ |
136 | | static void files_assert_main_repository(struct files_ref_store *refs, |
137 | | const char *caller) |
138 | 0 | { |
139 | 0 | if (refs->store_flags & REF_STORE_MAIN) |
140 | 0 | return; |
141 | | |
142 | 0 | BUG("operation %s only allowed for main ref store", caller); |
143 | 0 | } |
144 | | |
145 | | /* |
146 | | * Downcast ref_store to files_ref_store. Die if ref_store is not a |
147 | | * files_ref_store. required_flags is compared with ref_store's |
148 | | * store_flags to ensure the ref_store has all required capabilities. |
149 | | * "caller" is used in any necessary error messages. |
150 | | */ |
151 | | static struct files_ref_store *files_downcast(struct ref_store *ref_store, |
152 | | unsigned int required_flags, |
153 | | const char *caller) |
154 | 0 | { |
155 | 0 | struct files_ref_store *refs; |
156 | |
|
157 | 0 | if (ref_store->be != &refs_be_files) |
158 | 0 | BUG("ref_store is type \"%s\" not \"files\" in %s", |
159 | 0 | ref_store->be->name, caller); |
160 | | |
161 | 0 | refs = (struct files_ref_store *)ref_store; |
162 | |
|
163 | 0 | if ((refs->store_flags & required_flags) != required_flags) |
164 | 0 | BUG("operation %s requires abilities 0x%x, but only have 0x%x", |
165 | 0 | caller, required_flags, refs->store_flags); |
166 | | |
167 | 0 | return refs; |
168 | 0 | } |
169 | | |
170 | | static void files_ref_store_release(struct ref_store *ref_store) |
171 | 0 | { |
172 | 0 | struct files_ref_store *refs = files_downcast(ref_store, 0, "release"); |
173 | 0 | free_ref_cache(refs->loose); |
174 | 0 | free(refs->gitcommondir); |
175 | 0 | ref_store_release(refs->packed_ref_store); |
176 | 0 | free(refs->packed_ref_store); |
177 | 0 | } |
178 | | |
179 | | static void files_reflog_path(struct files_ref_store *refs, |
180 | | struct strbuf *sb, |
181 | | const char *refname) |
182 | 0 | { |
183 | 0 | const char *bare_refname; |
184 | 0 | const char *wtname; |
185 | 0 | int wtname_len; |
186 | 0 | enum ref_worktree_type wt_type = parse_worktree_ref( |
187 | 0 | refname, &wtname, &wtname_len, &bare_refname); |
188 | |
|
189 | 0 | switch (wt_type) { |
190 | 0 | case REF_WORKTREE_CURRENT: |
191 | 0 | strbuf_addf(sb, "%s/logs/%s", refs->base.gitdir, refname); |
192 | 0 | break; |
193 | 0 | case REF_WORKTREE_SHARED: |
194 | 0 | case REF_WORKTREE_MAIN: |
195 | 0 | strbuf_addf(sb, "%s/logs/%s", refs->gitcommondir, bare_refname); |
196 | 0 | break; |
197 | 0 | case REF_WORKTREE_OTHER: |
198 | 0 | strbuf_addf(sb, "%s/worktrees/%.*s/logs/%s", refs->gitcommondir, |
199 | 0 | wtname_len, wtname, bare_refname); |
200 | 0 | break; |
201 | 0 | default: |
202 | 0 | BUG("unknown ref type %d of ref %s", wt_type, refname); |
203 | 0 | } |
204 | 0 | } |
205 | | |
206 | | static void files_ref_path(struct files_ref_store *refs, |
207 | | struct strbuf *sb, |
208 | | const char *refname) |
209 | 0 | { |
210 | 0 | const char *bare_refname; |
211 | 0 | const char *wtname; |
212 | 0 | int wtname_len; |
213 | 0 | enum ref_worktree_type wt_type = parse_worktree_ref( |
214 | 0 | refname, &wtname, &wtname_len, &bare_refname); |
215 | 0 | switch (wt_type) { |
216 | 0 | case REF_WORKTREE_CURRENT: |
217 | 0 | strbuf_addf(sb, "%s/%s", refs->base.gitdir, refname); |
218 | 0 | break; |
219 | 0 | case REF_WORKTREE_OTHER: |
220 | 0 | strbuf_addf(sb, "%s/worktrees/%.*s/%s", refs->gitcommondir, |
221 | 0 | wtname_len, wtname, bare_refname); |
222 | 0 | break; |
223 | 0 | case REF_WORKTREE_SHARED: |
224 | 0 | case REF_WORKTREE_MAIN: |
225 | 0 | strbuf_addf(sb, "%s/%s", refs->gitcommondir, bare_refname); |
226 | 0 | break; |
227 | 0 | default: |
228 | 0 | BUG("unknown ref type %d of ref %s", wt_type, refname); |
229 | 0 | } |
230 | 0 | } |
231 | | |
232 | | /* |
233 | | * Manually add refs/bisect, refs/rewritten and refs/worktree, which, being |
234 | | * per-worktree, might not appear in the directory listing for |
235 | | * refs/ in the main repo. |
236 | | */ |
237 | | static void add_per_worktree_entries_to_dir(struct ref_dir *dir, const char *dirname) |
238 | 0 | { |
239 | 0 | const char *prefixes[] = { "refs/bisect/", "refs/worktree/", "refs/rewritten/" }; |
240 | 0 | int ip; |
241 | |
|
242 | 0 | if (strcmp(dirname, "refs/")) |
243 | 0 | return; |
244 | | |
245 | 0 | for (ip = 0; ip < ARRAY_SIZE(prefixes); ip++) { |
246 | 0 | const char *prefix = prefixes[ip]; |
247 | 0 | int prefix_len = strlen(prefix); |
248 | 0 | struct ref_entry *child_entry; |
249 | 0 | int pos; |
250 | |
|
251 | 0 | pos = search_ref_dir(dir, prefix, prefix_len); |
252 | 0 | if (pos >= 0) |
253 | 0 | continue; |
254 | 0 | child_entry = create_dir_entry(dir->cache, prefix, prefix_len); |
255 | 0 | add_entry_to_dir(dir, child_entry); |
256 | 0 | } |
257 | 0 | } |
258 | | |
259 | | static void loose_fill_ref_dir_regular_file(struct files_ref_store *refs, |
260 | | const char *refname, |
261 | | struct ref_dir *dir) |
262 | 0 | { |
263 | 0 | struct object_id oid; |
264 | 0 | int flag; |
265 | 0 | const char *referent = refs_resolve_ref_unsafe(&refs->base, |
266 | 0 | refname, |
267 | 0 | RESOLVE_REF_READING, |
268 | 0 | &oid, &flag); |
269 | |
|
270 | 0 | if (!referent) { |
271 | 0 | oidclr(&oid, refs->base.repo->hash_algo); |
272 | 0 | flag |= REF_ISBROKEN; |
273 | 0 | } else if (is_null_oid(&oid)) { |
274 | | /* |
275 | | * It is so astronomically unlikely |
276 | | * that null_oid is the OID of an |
277 | | * actual object that we consider its |
278 | | * appearance in a loose reference |
279 | | * file to be repo corruption |
280 | | * (probably due to a software bug). |
281 | | */ |
282 | 0 | flag |= REF_ISBROKEN; |
283 | 0 | } |
284 | |
|
285 | 0 | if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) { |
286 | 0 | if (!refname_is_safe(refname)) |
287 | 0 | die("loose refname is dangerous: %s", refname); |
288 | 0 | oidclr(&oid, refs->base.repo->hash_algo); |
289 | 0 | flag |= REF_BAD_NAME | REF_ISBROKEN; |
290 | 0 | } |
291 | | |
292 | 0 | if (!(flag & REF_ISSYMREF)) |
293 | 0 | referent = NULL; |
294 | |
|
295 | 0 | add_entry_to_dir(dir, create_ref_entry(refname, referent, &oid, flag)); |
296 | 0 | } |
297 | | |
298 | | /* |
299 | | * Read the loose references from the namespace dirname into dir |
300 | | * (without recursing). dirname must end with '/'. dir must be the |
301 | | * directory entry corresponding to dirname. |
302 | | */ |
303 | | static void loose_fill_ref_dir(struct ref_store *ref_store, |
304 | | struct ref_dir *dir, const char *dirname) |
305 | 0 | { |
306 | 0 | struct files_ref_store *refs = |
307 | 0 | files_downcast(ref_store, REF_STORE_READ, "fill_ref_dir"); |
308 | 0 | DIR *d; |
309 | 0 | struct dirent *de; |
310 | 0 | int dirnamelen = strlen(dirname); |
311 | 0 | struct strbuf refname; |
312 | 0 | struct strbuf path = STRBUF_INIT; |
313 | |
|
314 | 0 | files_ref_path(refs, &path, dirname); |
315 | |
|
316 | 0 | d = opendir(path.buf); |
317 | 0 | if (!d) { |
318 | 0 | strbuf_release(&path); |
319 | 0 | return; |
320 | 0 | } |
321 | | |
322 | 0 | strbuf_init(&refname, dirnamelen + 257); |
323 | 0 | strbuf_add(&refname, dirname, dirnamelen); |
324 | |
|
325 | 0 | while ((de = readdir(d)) != NULL) { |
326 | 0 | unsigned char dtype; |
327 | |
|
328 | 0 | if (de->d_name[0] == '.') |
329 | 0 | continue; |
330 | 0 | if (ends_with(de->d_name, ".lock")) |
331 | 0 | continue; |
332 | 0 | strbuf_addstr(&refname, de->d_name); |
333 | |
|
334 | 0 | dtype = get_dtype(de, &path, 1); |
335 | 0 | if (dtype == DT_DIR) { |
336 | 0 | strbuf_addch(&refname, '/'); |
337 | 0 | add_entry_to_dir(dir, |
338 | 0 | create_dir_entry(dir->cache, refname.buf, |
339 | 0 | refname.len)); |
340 | 0 | } else if (dtype == DT_REG) { |
341 | 0 | loose_fill_ref_dir_regular_file(refs, refname.buf, dir); |
342 | 0 | } |
343 | 0 | strbuf_setlen(&refname, dirnamelen); |
344 | 0 | } |
345 | 0 | strbuf_release(&refname); |
346 | 0 | strbuf_release(&path); |
347 | 0 | closedir(d); |
348 | |
|
349 | 0 | add_per_worktree_entries_to_dir(dir, dirname); |
350 | 0 | } |
351 | | |
352 | | static int for_each_root_ref(struct files_ref_store *refs, |
353 | | int (*cb)(const char *refname, void *cb_data), |
354 | | void *cb_data) |
355 | 0 | { |
356 | 0 | struct strbuf path = STRBUF_INIT, refname = STRBUF_INIT; |
357 | 0 | struct dirent *de; |
358 | 0 | int ret; |
359 | 0 | DIR *d; |
360 | |
|
361 | 0 | files_ref_path(refs, &path, ""); |
362 | |
|
363 | 0 | d = opendir(path.buf); |
364 | 0 | if (!d) { |
365 | 0 | strbuf_release(&path); |
366 | 0 | return -1; |
367 | 0 | } |
368 | | |
369 | 0 | while ((de = readdir(d)) != NULL) { |
370 | 0 | unsigned char dtype; |
371 | |
|
372 | 0 | if (de->d_name[0] == '.') |
373 | 0 | continue; |
374 | 0 | if (ends_with(de->d_name, ".lock")) |
375 | 0 | continue; |
376 | | |
377 | 0 | strbuf_reset(&refname); |
378 | 0 | strbuf_addstr(&refname, de->d_name); |
379 | |
|
380 | 0 | dtype = get_dtype(de, &path, 1); |
381 | 0 | if (dtype == DT_REG && is_root_ref(de->d_name)) { |
382 | 0 | ret = cb(refname.buf, cb_data); |
383 | 0 | if (ret) |
384 | 0 | goto done; |
385 | 0 | } |
386 | 0 | } |
387 | | |
388 | 0 | ret = 0; |
389 | |
|
390 | 0 | done: |
391 | 0 | strbuf_release(&refname); |
392 | 0 | strbuf_release(&path); |
393 | 0 | closedir(d); |
394 | 0 | return ret; |
395 | 0 | } |
396 | | |
397 | | struct fill_root_ref_data { |
398 | | struct files_ref_store *refs; |
399 | | struct ref_dir *dir; |
400 | | }; |
401 | | |
402 | | static int fill_root_ref(const char *refname, void *cb_data) |
403 | 0 | { |
404 | 0 | struct fill_root_ref_data *data = cb_data; |
405 | 0 | loose_fill_ref_dir_regular_file(data->refs, refname, data->dir); |
406 | 0 | return 0; |
407 | 0 | } |
408 | | |
409 | | /* |
410 | | * Add root refs to the ref dir by parsing the directory for any files which |
411 | | * follow the root ref syntax. |
412 | | */ |
413 | | static void add_root_refs(struct files_ref_store *refs, |
414 | | struct ref_dir *dir) |
415 | 0 | { |
416 | 0 | struct fill_root_ref_data data = { |
417 | 0 | .refs = refs, |
418 | 0 | .dir = dir, |
419 | 0 | }; |
420 | |
|
421 | 0 | for_each_root_ref(refs, fill_root_ref, &data); |
422 | 0 | } |
423 | | |
424 | | static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs, |
425 | | unsigned int flags) |
426 | 0 | { |
427 | 0 | if (!refs->loose) { |
428 | 0 | struct ref_dir *dir; |
429 | | |
430 | | /* |
431 | | * Mark the top-level directory complete because we |
432 | | * are about to read the only subdirectory that can |
433 | | * hold references: |
434 | | */ |
435 | 0 | refs->loose = create_ref_cache(&refs->base, loose_fill_ref_dir); |
436 | | |
437 | | /* We're going to fill the top level ourselves: */ |
438 | 0 | refs->loose->root->flag &= ~REF_INCOMPLETE; |
439 | |
|
440 | 0 | dir = get_ref_dir(refs->loose->root); |
441 | |
|
442 | 0 | if (flags & DO_FOR_EACH_INCLUDE_ROOT_REFS) |
443 | 0 | add_root_refs(refs, dir); |
444 | | |
445 | | /* |
446 | | * Add an incomplete entry for "refs/" (to be filled |
447 | | * lazily): |
448 | | */ |
449 | 0 | add_entry_to_dir(dir, create_dir_entry(refs->loose, "refs/", 5)); |
450 | 0 | } |
451 | 0 | return refs->loose; |
452 | 0 | } |
453 | | |
454 | | static int read_ref_internal(struct ref_store *ref_store, const char *refname, |
455 | | struct object_id *oid, struct strbuf *referent, |
456 | | unsigned int *type, int *failure_errno, int skip_packed_refs) |
457 | 0 | { |
458 | 0 | struct files_ref_store *refs = |
459 | 0 | files_downcast(ref_store, REF_STORE_READ, "read_raw_ref"); |
460 | 0 | struct strbuf sb_contents = STRBUF_INIT; |
461 | 0 | struct strbuf sb_path = STRBUF_INIT; |
462 | 0 | const char *path; |
463 | 0 | const char *buf; |
464 | 0 | struct stat st; |
465 | 0 | int fd; |
466 | 0 | int ret = -1; |
467 | 0 | int remaining_retries = 3; |
468 | 0 | int myerr = 0; |
469 | |
|
470 | 0 | *type = 0; |
471 | 0 | strbuf_reset(&sb_path); |
472 | |
|
473 | 0 | files_ref_path(refs, &sb_path, refname); |
474 | |
|
475 | 0 | path = sb_path.buf; |
476 | |
|
477 | 0 | stat_ref: |
478 | | /* |
479 | | * We might have to loop back here to avoid a race |
480 | | * condition: first we lstat() the file, then we try |
481 | | * to read it as a link or as a file. But if somebody |
482 | | * changes the type of the file (file <-> directory |
483 | | * <-> symlink) between the lstat() and reading, then |
484 | | * we don't want to report that as an error but rather |
485 | | * try again starting with the lstat(). |
486 | | * |
487 | | * We'll keep a count of the retries, though, just to avoid |
488 | | * any confusing situation sending us into an infinite loop. |
489 | | */ |
490 | |
|
491 | 0 | if (remaining_retries-- <= 0) |
492 | 0 | goto out; |
493 | | |
494 | 0 | if (lstat(path, &st) < 0) { |
495 | 0 | int ignore_errno; |
496 | 0 | myerr = errno; |
497 | 0 | if (myerr != ENOENT || skip_packed_refs) |
498 | 0 | goto out; |
499 | 0 | if (refs_read_raw_ref(refs->packed_ref_store, refname, oid, |
500 | 0 | referent, type, &ignore_errno)) { |
501 | 0 | myerr = ENOENT; |
502 | 0 | goto out; |
503 | 0 | } |
504 | 0 | ret = 0; |
505 | 0 | goto out; |
506 | 0 | } |
507 | | |
508 | | /* Follow "normalized" - ie "refs/.." symlinks by hand */ |
509 | 0 | if (S_ISLNK(st.st_mode)) { |
510 | 0 | strbuf_reset(&sb_contents); |
511 | 0 | if (strbuf_readlink(&sb_contents, path, st.st_size) < 0) { |
512 | 0 | myerr = errno; |
513 | 0 | if (myerr == ENOENT || myerr == EINVAL) |
514 | | /* inconsistent with lstat; retry */ |
515 | 0 | goto stat_ref; |
516 | 0 | else |
517 | 0 | goto out; |
518 | 0 | } |
519 | 0 | if (starts_with(sb_contents.buf, "refs/") && |
520 | 0 | !check_refname_format(sb_contents.buf, 0)) { |
521 | 0 | strbuf_swap(&sb_contents, referent); |
522 | 0 | *type |= REF_ISSYMREF; |
523 | 0 | ret = 0; |
524 | 0 | goto out; |
525 | 0 | } |
526 | | /* |
527 | | * It doesn't look like a refname; fall through to just |
528 | | * treating it like a non-symlink, and reading whatever it |
529 | | * points to. |
530 | | */ |
531 | 0 | } |
532 | | |
533 | | /* Is it a directory? */ |
534 | 0 | if (S_ISDIR(st.st_mode)) { |
535 | 0 | int ignore_errno; |
536 | | /* |
537 | | * Even though there is a directory where the loose |
538 | | * ref is supposed to be, there could still be a |
539 | | * packed ref: |
540 | | */ |
541 | 0 | if (skip_packed_refs || |
542 | 0 | refs_read_raw_ref(refs->packed_ref_store, refname, oid, |
543 | 0 | referent, type, &ignore_errno)) { |
544 | 0 | myerr = EISDIR; |
545 | 0 | goto out; |
546 | 0 | } |
547 | 0 | ret = 0; |
548 | 0 | goto out; |
549 | 0 | } |
550 | | |
551 | | /* |
552 | | * Anything else, just open it and try to use it as |
553 | | * a ref |
554 | | */ |
555 | 0 | fd = open(path, O_RDONLY); |
556 | 0 | if (fd < 0) { |
557 | 0 | myerr = errno; |
558 | 0 | if (myerr == ENOENT && !S_ISLNK(st.st_mode)) |
559 | | /* inconsistent with lstat; retry */ |
560 | 0 | goto stat_ref; |
561 | 0 | else |
562 | 0 | goto out; |
563 | 0 | } |
564 | 0 | strbuf_reset(&sb_contents); |
565 | 0 | if (strbuf_read(&sb_contents, fd, 256) < 0) { |
566 | 0 | myerr = errno; |
567 | 0 | close(fd); |
568 | 0 | goto out; |
569 | 0 | } |
570 | 0 | close(fd); |
571 | 0 | strbuf_rtrim(&sb_contents); |
572 | 0 | buf = sb_contents.buf; |
573 | |
|
574 | 0 | ret = parse_loose_ref_contents(ref_store->repo->hash_algo, buf, |
575 | 0 | oid, referent, type, NULL, &myerr); |
576 | |
|
577 | 0 | out: |
578 | 0 | if (ret && !myerr) |
579 | 0 | BUG("returning non-zero %d, should have set myerr!", ret); |
580 | 0 | *failure_errno = myerr; |
581 | |
|
582 | 0 | strbuf_release(&sb_path); |
583 | 0 | strbuf_release(&sb_contents); |
584 | 0 | errno = 0; |
585 | 0 | return ret; |
586 | 0 | } |
587 | | |
588 | | static int files_read_raw_ref(struct ref_store *ref_store, const char *refname, |
589 | | struct object_id *oid, struct strbuf *referent, |
590 | | unsigned int *type, int *failure_errno) |
591 | 0 | { |
592 | 0 | return read_ref_internal(ref_store, refname, oid, referent, type, failure_errno, 0); |
593 | 0 | } |
594 | | |
595 | | static int files_read_symbolic_ref(struct ref_store *ref_store, const char *refname, |
596 | | struct strbuf *referent) |
597 | 0 | { |
598 | 0 | struct object_id oid; |
599 | 0 | int failure_errno, ret; |
600 | 0 | unsigned int type; |
601 | |
|
602 | 0 | ret = read_ref_internal(ref_store, refname, &oid, referent, &type, &failure_errno, 1); |
603 | 0 | if (!ret && !(type & REF_ISSYMREF)) |
604 | 0 | return NOT_A_SYMREF; |
605 | 0 | return ret; |
606 | 0 | } |
607 | | |
608 | | int parse_loose_ref_contents(const struct git_hash_algo *algop, |
609 | | const char *buf, struct object_id *oid, |
610 | | struct strbuf *referent, unsigned int *type, |
611 | | const char **trailing, int *failure_errno) |
612 | 0 | { |
613 | 0 | const char *p; |
614 | 0 | if (skip_prefix(buf, "ref:", &buf)) { |
615 | 0 | while (isspace(*buf)) |
616 | 0 | buf++; |
617 | |
|
618 | 0 | strbuf_reset(referent); |
619 | 0 | strbuf_addstr(referent, buf); |
620 | 0 | *type |= REF_ISSYMREF; |
621 | 0 | return 0; |
622 | 0 | } |
623 | | |
624 | | /* |
625 | | * FETCH_HEAD has additional data after the sha. |
626 | | */ |
627 | 0 | if (parse_oid_hex_algop(buf, oid, &p, algop) || |
628 | 0 | (*p != '\0' && !isspace(*p))) { |
629 | 0 | *type |= REF_ISBROKEN; |
630 | 0 | *failure_errno = EINVAL; |
631 | 0 | return -1; |
632 | 0 | } |
633 | | |
634 | 0 | if (trailing) |
635 | 0 | *trailing = p; |
636 | |
|
637 | 0 | return 0; |
638 | 0 | } |
639 | | |
640 | | static void unlock_ref(struct ref_lock *lock) |
641 | 0 | { |
642 | 0 | lock->count--; |
643 | 0 | if (!lock->count) { |
644 | 0 | rollback_lock_file(&lock->lk); |
645 | 0 | free(lock->ref_name); |
646 | 0 | free(lock); |
647 | 0 | } |
648 | 0 | } |
649 | | |
650 | | /* |
651 | | * Check if the transaction has another update with a case-insensitive refname |
652 | | * match. |
653 | | * |
654 | | * If the update is part of the transaction, we only check up to that index. |
655 | | * Further updates are expected to call this function to match previous indices. |
656 | | */ |
657 | | static bool transaction_has_case_conflicting_update(struct ref_transaction *transaction, |
658 | | struct ref_update *update) |
659 | 0 | { |
660 | 0 | for (size_t i = 0; i < transaction->nr; i++) { |
661 | 0 | if (transaction->updates[i] == update) |
662 | 0 | break; |
663 | | |
664 | 0 | if (!strcasecmp(transaction->updates[i]->refname, update->refname)) |
665 | 0 | return true; |
666 | 0 | } |
667 | 0 | return false; |
668 | 0 | } |
669 | | |
670 | | /* |
671 | | * Lock refname, without following symrefs, and set *lock_p to point |
672 | | * at a newly-allocated lock object. Fill in lock->old_oid, referent, |
673 | | * and type similarly to read_raw_ref(). |
674 | | * |
675 | | * The caller must verify that refname is a "safe" reference name (in |
676 | | * the sense of refname_is_safe()) before calling this function. |
677 | | * |
678 | | * If the reference doesn't already exist, verify that refname doesn't |
679 | | * have a D/F conflict with any existing references. extras and skip |
680 | | * are passed to refs_verify_refname_available() for this check. |
681 | | * |
682 | | * If mustexist is not set and the reference is not found or is |
683 | | * broken, lock the reference anyway but clear old_oid. |
684 | | * |
685 | | * Return 0 on success. On failure, write an error message to err and |
686 | | * return REF_TRANSACTION_ERROR_NAME_CONFLICT or REF_TRANSACTION_ERROR_GENERIC. |
687 | | * |
688 | | * Implementation note: This function is basically |
689 | | * |
690 | | * lock reference |
691 | | * read_raw_ref() |
692 | | * |
693 | | * but it includes a lot more code to |
694 | | * - Deal with possible races with other processes |
695 | | * - Avoid calling refs_verify_refname_available() when it can be |
696 | | * avoided, namely if we were successfully able to read the ref |
697 | | * - Generate informative error messages in the case of failure |
698 | | */ |
699 | | static enum ref_transaction_error lock_raw_ref(struct files_ref_store *refs, |
700 | | struct ref_transaction *transaction, |
701 | | size_t update_idx, |
702 | | int mustexist, |
703 | | struct string_list *refnames_to_check, |
704 | | struct ref_lock **lock_p, |
705 | | struct strbuf *referent, |
706 | | struct strbuf *err) |
707 | 0 | { |
708 | 0 | enum ref_transaction_error ret = REF_TRANSACTION_ERROR_GENERIC; |
709 | 0 | struct ref_update *update = transaction->updates[update_idx]; |
710 | 0 | const struct string_list *extras = &transaction->refnames; |
711 | 0 | const char *refname = update->refname; |
712 | 0 | unsigned int *type = &update->type; |
713 | 0 | struct ref_lock *lock; |
714 | 0 | struct strbuf ref_file = STRBUF_INIT; |
715 | 0 | int attempts_remaining = 3; |
716 | 0 | int failure_errno; |
717 | |
|
718 | 0 | assert(err); |
719 | 0 | files_assert_main_repository(refs, "lock_raw_ref"); |
720 | |
|
721 | 0 | *type = 0; |
722 | | |
723 | | /* First lock the file so it can't change out from under us. */ |
724 | |
|
725 | 0 | *lock_p = CALLOC_ARRAY(lock, 1); |
726 | |
|
727 | 0 | lock->ref_name = xstrdup(refname); |
728 | 0 | lock->count = 1; |
729 | 0 | files_ref_path(refs, &ref_file, refname); |
730 | |
|
731 | 0 | retry: |
732 | 0 | switch (safe_create_leading_directories(the_repository, ref_file.buf)) { |
733 | 0 | case SCLD_OK: |
734 | 0 | break; /* success */ |
735 | 0 | case SCLD_EXISTS: |
736 | | /* |
737 | | * Suppose refname is "refs/foo/bar". We just failed |
738 | | * to create the containing directory, "refs/foo", |
739 | | * because there was a non-directory in the way. This |
740 | | * indicates a D/F conflict, probably because of |
741 | | * another reference such as "refs/foo". There is no |
742 | | * reason to expect this error to be transitory. |
743 | | */ |
744 | 0 | if (refs_verify_refname_available(&refs->base, refname, |
745 | 0 | extras, NULL, 0, err)) { |
746 | 0 | if (mustexist) { |
747 | | /* |
748 | | * To the user the relevant error is |
749 | | * that the "mustexist" reference is |
750 | | * missing: |
751 | | */ |
752 | 0 | strbuf_reset(err); |
753 | 0 | strbuf_addf(err, "unable to resolve reference '%s'", |
754 | 0 | refname); |
755 | 0 | ret = REF_TRANSACTION_ERROR_NONEXISTENT_REF; |
756 | 0 | } else { |
757 | | /* |
758 | | * The error message set by |
759 | | * refs_verify_refname_available() is |
760 | | * OK. |
761 | | */ |
762 | 0 | ret = REF_TRANSACTION_ERROR_NAME_CONFLICT; |
763 | 0 | } |
764 | 0 | } else { |
765 | | /* |
766 | | * The file that is in the way isn't a loose |
767 | | * reference. Report it as a low-level |
768 | | * failure. |
769 | | */ |
770 | 0 | strbuf_addf(err, "unable to create lock file %s.lock; " |
771 | 0 | "non-directory in the way", |
772 | 0 | ref_file.buf); |
773 | 0 | } |
774 | 0 | goto error_return; |
775 | 0 | case SCLD_VANISHED: |
776 | | /* Maybe another process was tidying up. Try again. */ |
777 | 0 | if (--attempts_remaining > 0) |
778 | 0 | goto retry; |
779 | | /* fall through */ |
780 | 0 | default: |
781 | 0 | strbuf_addf(err, "unable to create directory for %s", |
782 | 0 | ref_file.buf); |
783 | 0 | goto error_return; |
784 | 0 | } |
785 | | |
786 | 0 | if (hold_lock_file_for_update_timeout( |
787 | 0 | &lock->lk, ref_file.buf, LOCK_NO_DEREF, |
788 | 0 | get_files_ref_lock_timeout_ms()) < 0) { |
789 | 0 | int myerr = errno; |
790 | 0 | errno = 0; |
791 | 0 | if (myerr == ENOENT && --attempts_remaining > 0) { |
792 | | /* |
793 | | * Maybe somebody just deleted one of the |
794 | | * directories leading to ref_file. Try |
795 | | * again: |
796 | | */ |
797 | 0 | goto retry; |
798 | 0 | } else { |
799 | 0 | unable_to_lock_message(ref_file.buf, myerr, err); |
800 | 0 | if (myerr == EEXIST) { |
801 | 0 | if (ignore_case && |
802 | 0 | transaction_has_case_conflicting_update(transaction, update)) { |
803 | | /* |
804 | | * In case-insensitive filesystems, ensure that conflicts within a |
805 | | * given transaction are handled. Pre-existing refs on a |
806 | | * case-insensitive system will be overridden without any issue. |
807 | | */ |
808 | 0 | ret = REF_TRANSACTION_ERROR_CASE_CONFLICT; |
809 | 0 | } else { |
810 | | /* |
811 | | * Pre-existing case-conflicting reference locks should also be |
812 | | * specially categorized to avoid failing all batched updates. |
813 | | */ |
814 | 0 | ret = REF_TRANSACTION_ERROR_CREATE_EXISTS; |
815 | 0 | } |
816 | 0 | } |
817 | |
|
818 | 0 | goto error_return; |
819 | 0 | } |
820 | 0 | } |
821 | | |
822 | | /* |
823 | | * Now we hold the lock and can read the reference without |
824 | | * fear that its value will change. |
825 | | */ |
826 | | |
827 | 0 | if (files_read_raw_ref(&refs->base, refname, &lock->old_oid, referent, |
828 | 0 | type, &failure_errno)) { |
829 | 0 | struct string_list_item *item; |
830 | |
|
831 | 0 | if (failure_errno == ENOENT) { |
832 | 0 | if (mustexist) { |
833 | | /* Garden variety missing reference. */ |
834 | 0 | strbuf_addf(err, "unable to resolve reference '%s'", |
835 | 0 | refname); |
836 | 0 | ret = REF_TRANSACTION_ERROR_NONEXISTENT_REF; |
837 | 0 | goto error_return; |
838 | 0 | } else { |
839 | | /* |
840 | | * Reference is missing, but that's OK. We |
841 | | * know that there is not a conflict with |
842 | | * another loose reference because |
843 | | * (supposing that we are trying to lock |
844 | | * reference "refs/foo/bar"): |
845 | | * |
846 | | * - We were successfully able to create |
847 | | * the lockfile refs/foo/bar.lock, so we |
848 | | * know there cannot be a loose reference |
849 | | * named "refs/foo". |
850 | | * |
851 | | * - We got ENOENT and not EISDIR, so we |
852 | | * know that there cannot be a loose |
853 | | * reference named "refs/foo/bar/baz". |
854 | | */ |
855 | 0 | } |
856 | 0 | } else if (failure_errno == EISDIR) { |
857 | | /* |
858 | | * There is a directory in the way. It might have |
859 | | * contained references that have been deleted. If |
860 | | * we don't require that the reference already |
861 | | * exists, try to remove the directory so that it |
862 | | * doesn't cause trouble when we want to rename the |
863 | | * lockfile into place later. |
864 | | */ |
865 | 0 | if (mustexist) { |
866 | | /* Garden variety missing reference. */ |
867 | 0 | strbuf_addf(err, "unable to resolve reference '%s'", |
868 | 0 | refname); |
869 | 0 | ret = REF_TRANSACTION_ERROR_NONEXISTENT_REF; |
870 | 0 | goto error_return; |
871 | 0 | } else if (remove_dir_recursively(&ref_file, |
872 | 0 | REMOVE_DIR_EMPTY_ONLY)) { |
873 | 0 | ret = REF_TRANSACTION_ERROR_NAME_CONFLICT; |
874 | 0 | if (refs_verify_refname_available( |
875 | 0 | &refs->base, refname, |
876 | 0 | extras, NULL, 0, err)) { |
877 | | /* |
878 | | * The error message set by |
879 | | * verify_refname_available() is OK. |
880 | | */ |
881 | 0 | goto error_return; |
882 | 0 | } else { |
883 | | /* |
884 | | * Directory conflicts can occur if there |
885 | | * is an existing lock file in the directory |
886 | | * or if the filesystem is case-insensitive |
887 | | * and the directory contains a valid reference |
888 | | * but conflicts with the update. |
889 | | */ |
890 | 0 | strbuf_addf(err, "there is a non-empty directory '%s' " |
891 | 0 | "blocking reference '%s'", |
892 | 0 | ref_file.buf, refname); |
893 | 0 | goto error_return; |
894 | 0 | } |
895 | 0 | } |
896 | 0 | } else if (failure_errno == EINVAL && (*type & REF_ISBROKEN)) { |
897 | 0 | strbuf_addf(err, "unable to resolve reference '%s': " |
898 | 0 | "reference broken", refname); |
899 | 0 | goto error_return; |
900 | 0 | } else { |
901 | 0 | strbuf_addf(err, "unable to resolve reference '%s': %s", |
902 | 0 | refname, strerror(failure_errno)); |
903 | 0 | goto error_return; |
904 | 0 | } |
905 | | |
906 | | /* |
907 | | * If the ref did not exist and we are creating it, we have to |
908 | | * make sure there is no existing packed ref that conflicts |
909 | | * with refname. This check is deferred so that we can batch it. |
910 | | * |
911 | | * For case-insensitive filesystems, we should also check for F/D |
912 | | * conflicts between 'foo' and 'Foo/bar'. So let's lowercase |
913 | | * the refname. |
914 | | */ |
915 | 0 | if (ignore_case) { |
916 | 0 | struct strbuf lower = STRBUF_INIT; |
917 | |
|
918 | 0 | strbuf_addstr(&lower, refname); |
919 | 0 | strbuf_tolower(&lower); |
920 | |
|
921 | 0 | item = string_list_append_nodup(refnames_to_check, |
922 | 0 | strbuf_detach(&lower, NULL)); |
923 | 0 | } else { |
924 | 0 | item = string_list_append(refnames_to_check, refname); |
925 | 0 | } |
926 | |
|
927 | 0 | item->util = xmalloc(sizeof(update_idx)); |
928 | 0 | memcpy(item->util, &update_idx, sizeof(update_idx)); |
929 | 0 | } |
930 | | |
931 | 0 | ret = 0; |
932 | 0 | goto out; |
933 | | |
934 | 0 | error_return: |
935 | 0 | unlock_ref(lock); |
936 | 0 | *lock_p = NULL; |
937 | |
|
938 | 0 | out: |
939 | 0 | strbuf_release(&ref_file); |
940 | 0 | return ret; |
941 | 0 | } |
942 | | |
943 | | struct files_ref_iterator { |
944 | | struct ref_iterator base; |
945 | | |
946 | | struct ref_iterator *iter0; |
947 | | struct repository *repo; |
948 | | unsigned int flags; |
949 | | }; |
950 | | |
951 | | static int files_ref_iterator_advance(struct ref_iterator *ref_iterator) |
952 | 0 | { |
953 | 0 | struct files_ref_iterator *iter = |
954 | 0 | (struct files_ref_iterator *)ref_iterator; |
955 | 0 | int ok; |
956 | |
|
957 | 0 | while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) { |
958 | 0 | if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY && |
959 | 0 | parse_worktree_ref(iter->iter0->ref.name, NULL, NULL, |
960 | 0 | NULL) != REF_WORKTREE_CURRENT) |
961 | 0 | continue; |
962 | | |
963 | 0 | if ((iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS) && |
964 | 0 | (iter->iter0->ref.flags & REF_ISSYMREF) && |
965 | 0 | (iter->iter0->ref.flags & REF_ISBROKEN)) |
966 | 0 | continue; |
967 | | |
968 | 0 | if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) && |
969 | 0 | !ref_resolves_to_object(iter->iter0->ref.name, |
970 | 0 | iter->repo, |
971 | 0 | iter->iter0->ref.oid, |
972 | 0 | iter->iter0->ref.flags)) |
973 | 0 | continue; |
974 | | |
975 | 0 | iter->base.ref = iter->iter0->ref; |
976 | |
|
977 | 0 | return ITER_OK; |
978 | 0 | } |
979 | | |
980 | 0 | return ok; |
981 | 0 | } |
982 | | |
983 | | static int files_ref_iterator_seek(struct ref_iterator *ref_iterator, |
984 | | const char *refname, unsigned int flags) |
985 | 0 | { |
986 | 0 | struct files_ref_iterator *iter = |
987 | 0 | (struct files_ref_iterator *)ref_iterator; |
988 | 0 | return ref_iterator_seek(iter->iter0, refname, flags); |
989 | 0 | } |
990 | | |
991 | | static void files_ref_iterator_release(struct ref_iterator *ref_iterator) |
992 | 0 | { |
993 | 0 | struct files_ref_iterator *iter = |
994 | 0 | (struct files_ref_iterator *)ref_iterator; |
995 | 0 | ref_iterator_free(iter->iter0); |
996 | 0 | } |
997 | | |
998 | | static struct ref_iterator_vtable files_ref_iterator_vtable = { |
999 | | .advance = files_ref_iterator_advance, |
1000 | | .seek = files_ref_iterator_seek, |
1001 | | .release = files_ref_iterator_release, |
1002 | | }; |
1003 | | |
1004 | | static struct ref_iterator *files_ref_iterator_begin( |
1005 | | struct ref_store *ref_store, |
1006 | | const char *prefix, const char **exclude_patterns, |
1007 | | unsigned int flags) |
1008 | 0 | { |
1009 | 0 | struct files_ref_store *refs; |
1010 | 0 | struct ref_iterator *loose_iter, *packed_iter, *overlay_iter; |
1011 | 0 | struct files_ref_iterator *iter; |
1012 | 0 | struct ref_iterator *ref_iterator; |
1013 | 0 | unsigned int required_flags = REF_STORE_READ; |
1014 | |
|
1015 | 0 | if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN)) |
1016 | 0 | required_flags |= REF_STORE_ODB; |
1017 | |
|
1018 | 0 | refs = files_downcast(ref_store, required_flags, "ref_iterator_begin"); |
1019 | | |
1020 | | /* |
1021 | | * We must make sure that all loose refs are read before |
1022 | | * accessing the packed-refs file; this avoids a race |
1023 | | * condition if loose refs are migrated to the packed-refs |
1024 | | * file by a simultaneous process, but our in-memory view is |
1025 | | * from before the migration. We ensure this as follows: |
1026 | | * First, we call start the loose refs iteration with its |
1027 | | * `prime_ref` argument set to true. This causes the loose |
1028 | | * references in the subtree to be pre-read into the cache. |
1029 | | * (If they've already been read, that's OK; we only need to |
1030 | | * guarantee that they're read before the packed refs, not |
1031 | | * *how much* before.) After that, we call |
1032 | | * packed_ref_iterator_begin(), which internally checks |
1033 | | * whether the packed-ref cache is up to date with what is on |
1034 | | * disk, and re-reads it if not. |
1035 | | */ |
1036 | |
|
1037 | 0 | loose_iter = cache_ref_iterator_begin(get_loose_ref_cache(refs, flags), |
1038 | 0 | prefix, ref_store->repo, 1); |
1039 | | |
1040 | | /* |
1041 | | * The packed-refs file might contain broken references, for |
1042 | | * example an old version of a reference that points at an |
1043 | | * object that has since been garbage-collected. This is OK as |
1044 | | * long as there is a corresponding loose reference that |
1045 | | * overrides it, and we don't want to emit an error message in |
1046 | | * this case. So ask the packed_ref_store for all of its |
1047 | | * references, and (if needed) do our own check for broken |
1048 | | * ones in files_ref_iterator_advance(), after we have merged |
1049 | | * the packed and loose references. |
1050 | | */ |
1051 | 0 | packed_iter = refs_ref_iterator_begin( |
1052 | 0 | refs->packed_ref_store, prefix, exclude_patterns, 0, |
1053 | 0 | DO_FOR_EACH_INCLUDE_BROKEN); |
1054 | |
|
1055 | 0 | overlay_iter = overlay_ref_iterator_begin(loose_iter, packed_iter); |
1056 | |
|
1057 | 0 | CALLOC_ARRAY(iter, 1); |
1058 | 0 | ref_iterator = &iter->base; |
1059 | 0 | base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable); |
1060 | 0 | iter->iter0 = overlay_iter; |
1061 | 0 | iter->repo = ref_store->repo; |
1062 | 0 | iter->flags = flags; |
1063 | |
|
1064 | 0 | return ref_iterator; |
1065 | 0 | } |
1066 | | |
1067 | | /* |
1068 | | * Callback function for raceproof_create_file(). This function is |
1069 | | * expected to do something that makes dirname(path) permanent despite |
1070 | | * the fact that other processes might be cleaning up empty |
1071 | | * directories at the same time. Usually it will create a file named |
1072 | | * path, but alternatively it could create another file in that |
1073 | | * directory, or even chdir() into that directory. The function should |
1074 | | * return 0 if the action was completed successfully. On error, it |
1075 | | * should return a nonzero result and set errno. |
1076 | | * raceproof_create_file() treats two errno values specially: |
1077 | | * |
1078 | | * - ENOENT -- dirname(path) does not exist. In this case, |
1079 | | * raceproof_create_file() tries creating dirname(path) |
1080 | | * (and any parent directories, if necessary) and calls |
1081 | | * the function again. |
1082 | | * |
1083 | | * - EISDIR -- the file already exists and is a directory. In this |
1084 | | * case, raceproof_create_file() removes the directory if |
1085 | | * it is empty (and recursively any empty directories that |
1086 | | * it contains) and calls the function again. |
1087 | | * |
1088 | | * Any other errno causes raceproof_create_file() to fail with the |
1089 | | * callback's return value and errno. |
1090 | | * |
1091 | | * Obviously, this function should be OK with being called again if it |
1092 | | * fails with ENOENT or EISDIR. In other scenarios it will not be |
1093 | | * called again. |
1094 | | */ |
1095 | | typedef int create_file_fn(const char *path, void *cb); |
1096 | | |
1097 | | /* |
1098 | | * Create a file in dirname(path) by calling fn, creating leading |
1099 | | * directories if necessary. Retry a few times in case we are racing |
1100 | | * with another process that is trying to clean up the directory that |
1101 | | * contains path. See the documentation for create_file_fn for more |
1102 | | * details. |
1103 | | * |
1104 | | * Return the value and set the errno that resulted from the most |
1105 | | * recent call of fn. fn is always called at least once, and will be |
1106 | | * called more than once if it returns ENOENT or EISDIR. |
1107 | | */ |
1108 | | static int raceproof_create_file(const char *path, create_file_fn fn, void *cb) |
1109 | 0 | { |
1110 | | /* |
1111 | | * The number of times we will try to remove empty directories |
1112 | | * in the way of path. This is only 1 because if another |
1113 | | * process is racily creating directories that conflict with |
1114 | | * us, we don't want to fight against them. |
1115 | | */ |
1116 | 0 | int remove_directories_remaining = 1; |
1117 | | |
1118 | | /* |
1119 | | * The number of times that we will try to create the |
1120 | | * directories containing path. We are willing to attempt this |
1121 | | * more than once, because another process could be trying to |
1122 | | * clean up empty directories at the same time as we are |
1123 | | * trying to create them. |
1124 | | */ |
1125 | 0 | int create_directories_remaining = 3; |
1126 | | |
1127 | | /* A scratch copy of path, filled lazily if we need it: */ |
1128 | 0 | struct strbuf path_copy = STRBUF_INIT; |
1129 | |
|
1130 | 0 | int ret, save_errno; |
1131 | | |
1132 | | /* Sanity check: */ |
1133 | 0 | assert(*path); |
1134 | |
|
1135 | 0 | retry_fn: |
1136 | 0 | ret = fn(path, cb); |
1137 | 0 | save_errno = errno; |
1138 | 0 | if (!ret) |
1139 | 0 | goto out; |
1140 | | |
1141 | 0 | if (errno == EISDIR && remove_directories_remaining-- > 0) { |
1142 | | /* |
1143 | | * A directory is in the way. Maybe it is empty; try |
1144 | | * to remove it: |
1145 | | */ |
1146 | 0 | if (!path_copy.len) |
1147 | 0 | strbuf_addstr(&path_copy, path); |
1148 | |
|
1149 | 0 | if (!remove_dir_recursively(&path_copy, REMOVE_DIR_EMPTY_ONLY)) |
1150 | 0 | goto retry_fn; |
1151 | 0 | } else if (errno == ENOENT && create_directories_remaining-- > 0) { |
1152 | | /* |
1153 | | * Maybe the containing directory didn't exist, or |
1154 | | * maybe it was just deleted by a process that is |
1155 | | * racing with us to clean up empty directories. Try |
1156 | | * to create it: |
1157 | | */ |
1158 | 0 | enum scld_error scld_result; |
1159 | |
|
1160 | 0 | if (!path_copy.len) |
1161 | 0 | strbuf_addstr(&path_copy, path); |
1162 | |
|
1163 | 0 | do { |
1164 | 0 | scld_result = safe_create_leading_directories(the_repository, path_copy.buf); |
1165 | 0 | if (scld_result == SCLD_OK) |
1166 | 0 | goto retry_fn; |
1167 | 0 | } while (scld_result == SCLD_VANISHED && create_directories_remaining-- > 0); |
1168 | 0 | } |
1169 | | |
1170 | 0 | out: |
1171 | 0 | strbuf_release(&path_copy); |
1172 | 0 | errno = save_errno; |
1173 | 0 | return ret; |
1174 | 0 | } |
1175 | | |
1176 | | static int remove_empty_directories(struct strbuf *path) |
1177 | 0 | { |
1178 | | /* |
1179 | | * we want to create a file but there is a directory there; |
1180 | | * if that is an empty directory (or a directory that contains |
1181 | | * only empty directories), remove them. |
1182 | | */ |
1183 | 0 | return remove_dir_recursively(path, REMOVE_DIR_EMPTY_ONLY); |
1184 | 0 | } |
1185 | | |
1186 | | static int create_reflock(const char *path, void *cb) |
1187 | 0 | { |
1188 | 0 | struct lock_file *lk = cb; |
1189 | |
|
1190 | 0 | return hold_lock_file_for_update_timeout( |
1191 | 0 | lk, path, LOCK_NO_DEREF, |
1192 | 0 | get_files_ref_lock_timeout_ms()) < 0 ? -1 : 0; |
1193 | 0 | } |
1194 | | |
1195 | | /* |
1196 | | * Locks a ref returning the lock on success and NULL on failure. |
1197 | | */ |
1198 | | static struct ref_lock *lock_ref_oid_basic(struct files_ref_store *refs, |
1199 | | const char *refname, |
1200 | | struct strbuf *err) |
1201 | 0 | { |
1202 | 0 | struct strbuf ref_file = STRBUF_INIT; |
1203 | 0 | struct ref_lock *lock; |
1204 | |
|
1205 | 0 | files_assert_main_repository(refs, "lock_ref_oid_basic"); |
1206 | 0 | assert(err); |
1207 | |
|
1208 | 0 | CALLOC_ARRAY(lock, 1); |
1209 | |
|
1210 | 0 | files_ref_path(refs, &ref_file, refname); |
1211 | | |
1212 | | /* |
1213 | | * If the ref did not exist and we are creating it, make sure |
1214 | | * there is no existing packed ref whose name begins with our |
1215 | | * refname, nor a packed ref whose name is a proper prefix of |
1216 | | * our refname. |
1217 | | */ |
1218 | 0 | if (is_null_oid(&lock->old_oid) && |
1219 | 0 | refs_verify_refname_available(refs->packed_ref_store, refname, |
1220 | 0 | NULL, NULL, 0, err)) |
1221 | 0 | goto error_return; |
1222 | | |
1223 | 0 | lock->ref_name = xstrdup(refname); |
1224 | 0 | lock->count = 1; |
1225 | |
|
1226 | 0 | if (raceproof_create_file(ref_file.buf, create_reflock, &lock->lk)) { |
1227 | 0 | unable_to_lock_message(ref_file.buf, errno, err); |
1228 | 0 | goto error_return; |
1229 | 0 | } |
1230 | | |
1231 | 0 | if (!refs_resolve_ref_unsafe(&refs->base, lock->ref_name, 0, |
1232 | 0 | &lock->old_oid, NULL)) |
1233 | 0 | oidclr(&lock->old_oid, refs->base.repo->hash_algo); |
1234 | 0 | goto out; |
1235 | | |
1236 | 0 | error_return: |
1237 | 0 | unlock_ref(lock); |
1238 | 0 | lock = NULL; |
1239 | |
|
1240 | 0 | out: |
1241 | 0 | strbuf_release(&ref_file); |
1242 | 0 | return lock; |
1243 | 0 | } |
1244 | | |
1245 | | struct ref_to_prune { |
1246 | | struct ref_to_prune *next; |
1247 | | struct object_id oid; |
1248 | | char name[FLEX_ARRAY]; |
1249 | | }; |
1250 | | |
1251 | | enum { |
1252 | | REMOVE_EMPTY_PARENTS_REF = 0x01, |
1253 | | REMOVE_EMPTY_PARENTS_REFLOG = 0x02 |
1254 | | }; |
1255 | | |
1256 | | /* |
1257 | | * Remove empty parent directories associated with the specified |
1258 | | * reference and/or its reflog, but spare [logs/]refs/ and immediate |
1259 | | * subdirs. flags is a combination of REMOVE_EMPTY_PARENTS_REF and/or |
1260 | | * REMOVE_EMPTY_PARENTS_REFLOG. |
1261 | | */ |
1262 | | static void try_remove_empty_parents(struct files_ref_store *refs, |
1263 | | const char *refname, |
1264 | | unsigned int flags) |
1265 | 0 | { |
1266 | 0 | struct strbuf buf = STRBUF_INIT; |
1267 | 0 | struct strbuf sb = STRBUF_INIT; |
1268 | 0 | char *p, *q; |
1269 | 0 | int i; |
1270 | |
|
1271 | 0 | strbuf_addstr(&buf, refname); |
1272 | 0 | p = buf.buf; |
1273 | 0 | for (i = 0; i < 2; i++) { /* refs/{heads,tags,...}/ */ |
1274 | 0 | while (*p && *p != '/') |
1275 | 0 | p++; |
1276 | | /* tolerate duplicate slashes; see check_refname_format() */ |
1277 | 0 | while (*p == '/') |
1278 | 0 | p++; |
1279 | 0 | } |
1280 | 0 | q = buf.buf + buf.len; |
1281 | 0 | while (flags & (REMOVE_EMPTY_PARENTS_REF | REMOVE_EMPTY_PARENTS_REFLOG)) { |
1282 | 0 | while (q > p && *q != '/') |
1283 | 0 | q--; |
1284 | 0 | while (q > p && *(q-1) == '/') |
1285 | 0 | q--; |
1286 | 0 | if (q == p) |
1287 | 0 | break; |
1288 | 0 | strbuf_setlen(&buf, q - buf.buf); |
1289 | |
|
1290 | 0 | strbuf_reset(&sb); |
1291 | 0 | files_ref_path(refs, &sb, buf.buf); |
1292 | 0 | if ((flags & REMOVE_EMPTY_PARENTS_REF) && rmdir(sb.buf)) |
1293 | 0 | flags &= ~REMOVE_EMPTY_PARENTS_REF; |
1294 | |
|
1295 | 0 | strbuf_reset(&sb); |
1296 | 0 | files_reflog_path(refs, &sb, buf.buf); |
1297 | 0 | if ((flags & REMOVE_EMPTY_PARENTS_REFLOG) && rmdir(sb.buf)) |
1298 | 0 | flags &= ~REMOVE_EMPTY_PARENTS_REFLOG; |
1299 | 0 | } |
1300 | 0 | strbuf_release(&buf); |
1301 | 0 | strbuf_release(&sb); |
1302 | 0 | } |
1303 | | |
1304 | | /* make sure nobody touched the ref, and unlink */ |
1305 | | static void prune_ref(struct files_ref_store *refs, struct ref_to_prune *r) |
1306 | 0 | { |
1307 | 0 | struct ref_transaction *transaction; |
1308 | 0 | struct strbuf err = STRBUF_INIT; |
1309 | 0 | int ret = -1; |
1310 | |
|
1311 | 0 | if (check_refname_format(r->name, 0)) |
1312 | 0 | return; |
1313 | | |
1314 | 0 | transaction = ref_store_transaction_begin(&refs->base, 0, &err); |
1315 | 0 | if (!transaction) |
1316 | 0 | goto cleanup; |
1317 | 0 | ref_transaction_add_update( |
1318 | 0 | transaction, r->name, |
1319 | 0 | REF_NO_DEREF | REF_HAVE_NEW | REF_HAVE_OLD | REF_IS_PRUNING, |
1320 | 0 | null_oid(the_hash_algo), &r->oid, NULL, NULL, NULL, NULL); |
1321 | 0 | if (ref_transaction_commit(transaction, &err)) |
1322 | 0 | goto cleanup; |
1323 | | |
1324 | 0 | ret = 0; |
1325 | |
|
1326 | 0 | cleanup: |
1327 | 0 | if (ret) |
1328 | 0 | error("%s", err.buf); |
1329 | 0 | strbuf_release(&err); |
1330 | 0 | ref_transaction_free(transaction); |
1331 | 0 | return; |
1332 | 0 | } |
1333 | | |
1334 | | /* |
1335 | | * Prune the loose versions of the references in the linked list |
1336 | | * `*refs_to_prune`, freeing the entries in the list as we go. |
1337 | | */ |
1338 | | static void prune_refs(struct files_ref_store *refs, struct ref_to_prune **refs_to_prune) |
1339 | 0 | { |
1340 | 0 | while (*refs_to_prune) { |
1341 | 0 | struct ref_to_prune *r = *refs_to_prune; |
1342 | 0 | *refs_to_prune = r->next; |
1343 | 0 | prune_ref(refs, r); |
1344 | 0 | free(r); |
1345 | 0 | } |
1346 | 0 | } |
1347 | | |
1348 | | /* |
1349 | | * Return true if the specified reference should be packed. |
1350 | | */ |
1351 | | static int should_pack_ref(struct files_ref_store *refs, |
1352 | | const struct reference *ref, |
1353 | | struct refs_optimize_opts *opts) |
1354 | 0 | { |
1355 | 0 | struct string_list_item *item; |
1356 | | |
1357 | | /* Do not pack per-worktree refs: */ |
1358 | 0 | if (parse_worktree_ref(ref->name, NULL, NULL, NULL) != |
1359 | 0 | REF_WORKTREE_SHARED) |
1360 | 0 | return 0; |
1361 | | |
1362 | | /* Do not pack symbolic refs: */ |
1363 | 0 | if (ref->flags & REF_ISSYMREF) |
1364 | 0 | return 0; |
1365 | | |
1366 | | /* Do not pack broken refs: */ |
1367 | 0 | if (!ref_resolves_to_object(ref->name, refs->base.repo, ref->oid, ref->flags)) |
1368 | 0 | return 0; |
1369 | | |
1370 | 0 | if (ref_excluded(opts->exclusions, ref->name)) |
1371 | 0 | return 0; |
1372 | | |
1373 | 0 | for_each_string_list_item(item, opts->includes) |
1374 | 0 | if (!wildmatch(item->string, ref->name, 0)) |
1375 | 0 | return 1; |
1376 | | |
1377 | 0 | return 0; |
1378 | 0 | } |
1379 | | |
1380 | | static int should_pack_refs(struct files_ref_store *refs, |
1381 | | struct refs_optimize_opts *opts) |
1382 | 0 | { |
1383 | 0 | struct ref_iterator *iter; |
1384 | 0 | size_t packed_size; |
1385 | 0 | size_t refcount = 0; |
1386 | 0 | size_t limit; |
1387 | 0 | int ret; |
1388 | |
|
1389 | 0 | if (!(opts->flags & REFS_OPTIMIZE_AUTO)) |
1390 | 0 | return 1; |
1391 | | |
1392 | 0 | ret = packed_refs_size(refs->packed_ref_store, &packed_size); |
1393 | 0 | if (ret < 0) |
1394 | 0 | die("cannot determine packed-refs size"); |
1395 | | |
1396 | | /* |
1397 | | * Packing loose references into the packed-refs file scales with the |
1398 | | * number of references we're about to write. We thus decide whether we |
1399 | | * repack refs by weighing the current size of the packed-refs file |
1400 | | * against the number of loose references. This is done such that we do |
1401 | | * not repack too often on repositories with a huge number of |
1402 | | * references, where we can expect a lot of churn in the number of |
1403 | | * references. |
1404 | | * |
1405 | | * As a heuristic, we repack if the number of loose references in the |
1406 | | * repository exceeds `log2(nr_packed_refs) * 5`, where we estimate |
1407 | | * `nr_packed_refs = packed_size / 100`, which scales as following: |
1408 | | * |
1409 | | * - 1kB ~ 10 packed refs: 16 refs |
1410 | | * - 10kB ~ 100 packed refs: 33 refs |
1411 | | * - 100kB ~ 1k packed refs: 49 refs |
1412 | | * - 1MB ~ 10k packed refs: 66 refs |
1413 | | * - 10MB ~ 100k packed refs: 82 refs |
1414 | | * - 100MB ~ 1m packed refs: 99 refs |
1415 | | * |
1416 | | * We thus allow roughly 16 additional loose refs per factor of ten of |
1417 | | * packed refs. This heuristic may be tweaked in the future, but should |
1418 | | * serve as a sufficiently good first iteration. |
1419 | | */ |
1420 | 0 | limit = log2u(packed_size / 100) * 5; |
1421 | 0 | if (limit < 16) |
1422 | 0 | limit = 16; |
1423 | |
|
1424 | 0 | iter = cache_ref_iterator_begin(get_loose_ref_cache(refs, 0), NULL, |
1425 | 0 | refs->base.repo, 0); |
1426 | 0 | while ((ret = ref_iterator_advance(iter)) == ITER_OK) { |
1427 | 0 | if (should_pack_ref(refs, &iter->ref, opts)) |
1428 | 0 | refcount++; |
1429 | 0 | if (refcount >= limit) { |
1430 | 0 | ref_iterator_free(iter); |
1431 | 0 | return 1; |
1432 | 0 | } |
1433 | 0 | } |
1434 | | |
1435 | 0 | if (ret != ITER_DONE) |
1436 | 0 | die("error while iterating over references"); |
1437 | | |
1438 | 0 | ref_iterator_free(iter); |
1439 | 0 | return 0; |
1440 | 0 | } |
1441 | | |
1442 | | static int files_optimize(struct ref_store *ref_store, |
1443 | | struct refs_optimize_opts *opts) |
1444 | 0 | { |
1445 | 0 | struct files_ref_store *refs = |
1446 | 0 | files_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB, |
1447 | 0 | "pack_refs"); |
1448 | 0 | struct ref_iterator *iter; |
1449 | 0 | int ok; |
1450 | 0 | struct ref_to_prune *refs_to_prune = NULL; |
1451 | 0 | struct strbuf err = STRBUF_INIT; |
1452 | 0 | struct ref_transaction *transaction; |
1453 | |
|
1454 | 0 | if (!should_pack_refs(refs, opts)) |
1455 | 0 | return 0; |
1456 | | |
1457 | 0 | transaction = ref_store_transaction_begin(refs->packed_ref_store, |
1458 | 0 | 0, &err); |
1459 | 0 | if (!transaction) |
1460 | 0 | return -1; |
1461 | | |
1462 | 0 | packed_refs_lock(refs->packed_ref_store, LOCK_DIE_ON_ERROR, &err); |
1463 | |
|
1464 | 0 | iter = cache_ref_iterator_begin(get_loose_ref_cache(refs, 0), NULL, |
1465 | 0 | refs->base.repo, 0); |
1466 | 0 | while ((ok = ref_iterator_advance(iter)) == ITER_OK) { |
1467 | | /* |
1468 | | * If the loose reference can be packed, add an entry |
1469 | | * in the packed ref cache. If the reference should be |
1470 | | * pruned, also add it to refs_to_prune. |
1471 | | */ |
1472 | 0 | if (!should_pack_ref(refs, &iter->ref, opts)) |
1473 | 0 | continue; |
1474 | | |
1475 | | /* |
1476 | | * Add a reference creation for this reference to the |
1477 | | * packed-refs transaction: |
1478 | | */ |
1479 | 0 | if (ref_transaction_update(transaction, iter->ref.name, |
1480 | 0 | iter->ref.oid, NULL, NULL, NULL, |
1481 | 0 | REF_NO_DEREF, NULL, &err)) |
1482 | 0 | die("failure preparing to create packed reference %s: %s", |
1483 | 0 | iter->ref.name, err.buf); |
1484 | | |
1485 | | /* Schedule the loose reference for pruning if requested. */ |
1486 | 0 | if ((opts->flags & REFS_OPTIMIZE_PRUNE)) { |
1487 | 0 | struct ref_to_prune *n; |
1488 | 0 | FLEX_ALLOC_STR(n, name, iter->ref.name); |
1489 | 0 | oidcpy(&n->oid, iter->ref.oid); |
1490 | 0 | n->next = refs_to_prune; |
1491 | 0 | refs_to_prune = n; |
1492 | 0 | } |
1493 | 0 | } |
1494 | 0 | if (ok != ITER_DONE) |
1495 | 0 | die("error while iterating over references"); |
1496 | | |
1497 | 0 | if (ref_transaction_commit(transaction, &err)) |
1498 | 0 | die("unable to write new packed-refs: %s", err.buf); |
1499 | | |
1500 | 0 | ref_transaction_free(transaction); |
1501 | |
|
1502 | 0 | packed_refs_unlock(refs->packed_ref_store); |
1503 | |
|
1504 | 0 | prune_refs(refs, &refs_to_prune); |
1505 | 0 | ref_iterator_free(iter); |
1506 | 0 | strbuf_release(&err); |
1507 | 0 | return 0; |
1508 | 0 | } |
1509 | | |
1510 | | static int files_optimize_required(struct ref_store *ref_store, |
1511 | | struct refs_optimize_opts *opts, |
1512 | | bool *required) |
1513 | 0 | { |
1514 | 0 | struct files_ref_store *refs = files_downcast(ref_store, REF_STORE_READ, |
1515 | 0 | "optimize_required"); |
1516 | 0 | *required = should_pack_refs(refs, opts); |
1517 | 0 | return 0; |
1518 | 0 | } |
1519 | | |
1520 | | /* |
1521 | | * People using contrib's git-new-workdir have .git/logs/refs -> |
1522 | | * /some/other/path/.git/logs/refs, and that may live on another device. |
1523 | | * |
1524 | | * IOW, to avoid cross device rename errors, the temporary renamed log must |
1525 | | * live into logs/refs. |
1526 | | */ |
1527 | 0 | #define TMP_RENAMED_LOG "refs/.tmp-renamed-log" |
1528 | | |
1529 | | struct rename_cb { |
1530 | | const char *tmp_renamed_log; |
1531 | | int true_errno; |
1532 | | }; |
1533 | | |
1534 | | static int rename_tmp_log_callback(const char *path, void *cb_data) |
1535 | 0 | { |
1536 | 0 | struct rename_cb *cb = cb_data; |
1537 | |
|
1538 | 0 | if (rename(cb->tmp_renamed_log, path)) { |
1539 | | /* |
1540 | | * rename(a, b) when b is an existing directory ought |
1541 | | * to result in ISDIR, but Solaris 5.8 gives ENOTDIR. |
1542 | | * Sheesh. Record the true errno for error reporting, |
1543 | | * but report EISDIR to raceproof_create_file() so |
1544 | | * that it knows to retry. |
1545 | | */ |
1546 | 0 | cb->true_errno = errno; |
1547 | 0 | if (errno == ENOTDIR) |
1548 | 0 | errno = EISDIR; |
1549 | 0 | return -1; |
1550 | 0 | } else { |
1551 | 0 | return 0; |
1552 | 0 | } |
1553 | 0 | } |
1554 | | |
1555 | | static int rename_tmp_log(struct files_ref_store *refs, const char *newrefname) |
1556 | 0 | { |
1557 | 0 | struct strbuf path = STRBUF_INIT; |
1558 | 0 | struct strbuf tmp = STRBUF_INIT; |
1559 | 0 | struct rename_cb cb; |
1560 | 0 | int ret; |
1561 | |
|
1562 | 0 | files_reflog_path(refs, &path, newrefname); |
1563 | 0 | files_reflog_path(refs, &tmp, TMP_RENAMED_LOG); |
1564 | 0 | cb.tmp_renamed_log = tmp.buf; |
1565 | 0 | ret = raceproof_create_file(path.buf, rename_tmp_log_callback, &cb); |
1566 | 0 | if (ret) { |
1567 | 0 | if (errno == EISDIR) |
1568 | 0 | error("directory not empty: %s", path.buf); |
1569 | 0 | else |
1570 | 0 | error("unable to move logfile %s to %s: %s", |
1571 | 0 | tmp.buf, path.buf, |
1572 | 0 | strerror(cb.true_errno)); |
1573 | 0 | } |
1574 | |
|
1575 | 0 | strbuf_release(&path); |
1576 | 0 | strbuf_release(&tmp); |
1577 | 0 | return ret; |
1578 | 0 | } |
1579 | | |
1580 | | static enum ref_transaction_error write_ref_to_lockfile(struct files_ref_store *refs, |
1581 | | struct ref_lock *lock, |
1582 | | const struct object_id *oid, |
1583 | | int skip_oid_verification, |
1584 | | struct strbuf *err); |
1585 | | static int commit_ref_update(struct files_ref_store *refs, |
1586 | | struct ref_lock *lock, |
1587 | | const struct object_id *oid, const char *logmsg, |
1588 | | int flags, |
1589 | | struct strbuf *err); |
1590 | | |
1591 | | /* |
1592 | | * Emit a better error message than lockfile.c's |
1593 | | * unable_to_lock_message() would in case there is a D/F conflict with |
1594 | | * another existing reference. If there would be a conflict, emit an error |
1595 | | * message and return false; otherwise, return true. |
1596 | | * |
1597 | | * Note that this function is not safe against all races with other |
1598 | | * processes, and that's not its job. We'll emit a more verbose error on D/f |
1599 | | * conflicts if we get past it into lock_ref_oid_basic(). |
1600 | | */ |
1601 | | static int refs_rename_ref_available(struct ref_store *refs, |
1602 | | const char *old_refname, |
1603 | | const char *new_refname) |
1604 | 0 | { |
1605 | 0 | struct string_list skip = STRING_LIST_INIT_NODUP; |
1606 | 0 | struct strbuf err = STRBUF_INIT; |
1607 | 0 | int ok; |
1608 | |
|
1609 | 0 | string_list_insert(&skip, old_refname); |
1610 | 0 | ok = !refs_verify_refname_available(refs, new_refname, |
1611 | 0 | NULL, &skip, 0, &err); |
1612 | 0 | if (!ok) |
1613 | 0 | error("%s", err.buf); |
1614 | |
|
1615 | 0 | string_list_clear(&skip, 0); |
1616 | 0 | strbuf_release(&err); |
1617 | 0 | return ok; |
1618 | 0 | } |
1619 | | |
1620 | | static int files_copy_or_rename_ref(struct ref_store *ref_store, |
1621 | | const char *oldrefname, const char *newrefname, |
1622 | | const char *logmsg, int copy) |
1623 | 0 | { |
1624 | 0 | struct files_ref_store *refs = |
1625 | 0 | files_downcast(ref_store, REF_STORE_WRITE, "rename_ref"); |
1626 | 0 | struct object_id orig_oid; |
1627 | 0 | int flag = 0, logmoved = 0; |
1628 | 0 | struct ref_lock *lock; |
1629 | 0 | struct stat loginfo; |
1630 | 0 | struct strbuf sb_oldref = STRBUF_INIT; |
1631 | 0 | struct strbuf sb_newref = STRBUF_INIT; |
1632 | 0 | struct strbuf tmp_renamed_log = STRBUF_INIT; |
1633 | 0 | int log, ret; |
1634 | 0 | struct strbuf err = STRBUF_INIT; |
1635 | |
|
1636 | 0 | files_reflog_path(refs, &sb_oldref, oldrefname); |
1637 | 0 | files_reflog_path(refs, &sb_newref, newrefname); |
1638 | 0 | files_reflog_path(refs, &tmp_renamed_log, TMP_RENAMED_LOG); |
1639 | |
|
1640 | 0 | log = !lstat(sb_oldref.buf, &loginfo); |
1641 | 0 | if (log && S_ISLNK(loginfo.st_mode)) { |
1642 | 0 | ret = error("reflog for %s is a symlink", oldrefname); |
1643 | 0 | goto out; |
1644 | 0 | } |
1645 | | |
1646 | 0 | if (!refs_resolve_ref_unsafe(&refs->base, oldrefname, |
1647 | 0 | RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE, |
1648 | 0 | &orig_oid, &flag)) { |
1649 | 0 | ret = error("refname %s not found", oldrefname); |
1650 | 0 | goto out; |
1651 | 0 | } |
1652 | | |
1653 | 0 | if (flag & REF_ISSYMREF) { |
1654 | 0 | if (copy) |
1655 | 0 | ret = error("refname %s is a symbolic ref, copying it is not supported", |
1656 | 0 | oldrefname); |
1657 | 0 | else |
1658 | 0 | ret = error("refname %s is a symbolic ref, renaming it is not supported", |
1659 | 0 | oldrefname); |
1660 | 0 | goto out; |
1661 | 0 | } |
1662 | 0 | if (!refs_rename_ref_available(&refs->base, oldrefname, newrefname)) { |
1663 | 0 | ret = 1; |
1664 | 0 | goto out; |
1665 | 0 | } |
1666 | | |
1667 | 0 | if (!copy && log && rename(sb_oldref.buf, tmp_renamed_log.buf)) { |
1668 | 0 | ret = error("unable to move logfile logs/%s to logs/"TMP_RENAMED_LOG": %s", |
1669 | 0 | oldrefname, strerror(errno)); |
1670 | 0 | goto out; |
1671 | 0 | } |
1672 | | |
1673 | 0 | if (copy && log && copy_file(tmp_renamed_log.buf, sb_oldref.buf, 0644)) { |
1674 | 0 | ret = error("unable to copy logfile logs/%s to logs/"TMP_RENAMED_LOG": %s", |
1675 | 0 | oldrefname, strerror(errno)); |
1676 | 0 | goto out; |
1677 | 0 | } |
1678 | | |
1679 | 0 | if (!copy && refs_delete_ref(&refs->base, logmsg, oldrefname, |
1680 | 0 | &orig_oid, REF_NO_DEREF)) { |
1681 | 0 | error("unable to delete old %s", oldrefname); |
1682 | 0 | goto rollback; |
1683 | 0 | } |
1684 | | |
1685 | | /* |
1686 | | * Since we are doing a shallow lookup, oid is not the |
1687 | | * correct value to pass to delete_ref as old_oid. But that |
1688 | | * doesn't matter, because an old_oid check wouldn't add to |
1689 | | * the safety anyway; we want to delete the reference whatever |
1690 | | * its current value. |
1691 | | */ |
1692 | 0 | if (!copy && refs_resolve_ref_unsafe(&refs->base, newrefname, |
1693 | 0 | RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE, |
1694 | 0 | NULL, NULL) && |
1695 | 0 | refs_delete_ref(&refs->base, NULL, newrefname, |
1696 | 0 | NULL, REF_NO_DEREF)) { |
1697 | 0 | if (errno == EISDIR) { |
1698 | 0 | struct strbuf path = STRBUF_INIT; |
1699 | 0 | int result; |
1700 | |
|
1701 | 0 | files_ref_path(refs, &path, newrefname); |
1702 | 0 | result = remove_empty_directories(&path); |
1703 | 0 | strbuf_release(&path); |
1704 | |
|
1705 | 0 | if (result) { |
1706 | 0 | error("Directory not empty: %s", newrefname); |
1707 | 0 | goto rollback; |
1708 | 0 | } |
1709 | 0 | } else { |
1710 | 0 | error("unable to delete existing %s", newrefname); |
1711 | 0 | goto rollback; |
1712 | 0 | } |
1713 | 0 | } |
1714 | | |
1715 | 0 | if (log && rename_tmp_log(refs, newrefname)) |
1716 | 0 | goto rollback; |
1717 | | |
1718 | 0 | logmoved = log; |
1719 | |
|
1720 | 0 | lock = lock_ref_oid_basic(refs, newrefname, &err); |
1721 | 0 | if (!lock) { |
1722 | 0 | if (copy) |
1723 | 0 | error("unable to copy '%s' to '%s': %s", oldrefname, newrefname, err.buf); |
1724 | 0 | else |
1725 | 0 | error("unable to rename '%s' to '%s': %s", oldrefname, newrefname, err.buf); |
1726 | 0 | strbuf_release(&err); |
1727 | 0 | goto rollback; |
1728 | 0 | } |
1729 | 0 | oidcpy(&lock->old_oid, &orig_oid); |
1730 | |
|
1731 | 0 | if (write_ref_to_lockfile(refs, lock, &orig_oid, 0, &err) || |
1732 | 0 | commit_ref_update(refs, lock, &orig_oid, logmsg, 0, &err)) { |
1733 | 0 | error("unable to write current sha1 into %s: %s", newrefname, err.buf); |
1734 | 0 | strbuf_release(&err); |
1735 | 0 | goto rollback; |
1736 | 0 | } |
1737 | | |
1738 | 0 | ret = 0; |
1739 | 0 | goto out; |
1740 | | |
1741 | 0 | rollback: |
1742 | 0 | lock = lock_ref_oid_basic(refs, oldrefname, &err); |
1743 | 0 | if (!lock) { |
1744 | 0 | error("unable to lock %s for rollback: %s", oldrefname, err.buf); |
1745 | 0 | strbuf_release(&err); |
1746 | 0 | goto rollbacklog; |
1747 | 0 | } |
1748 | | |
1749 | 0 | if (write_ref_to_lockfile(refs, lock, &orig_oid, 0, &err) || |
1750 | 0 | commit_ref_update(refs, lock, &orig_oid, NULL, REF_SKIP_CREATE_REFLOG, &err)) { |
1751 | 0 | error("unable to write current sha1 into %s: %s", oldrefname, err.buf); |
1752 | 0 | strbuf_release(&err); |
1753 | 0 | } |
1754 | |
|
1755 | 0 | rollbacklog: |
1756 | 0 | if (logmoved && rename(sb_newref.buf, sb_oldref.buf)) |
1757 | 0 | error("unable to restore logfile %s from %s: %s", |
1758 | 0 | oldrefname, newrefname, strerror(errno)); |
1759 | 0 | if (!logmoved && log && |
1760 | 0 | rename(tmp_renamed_log.buf, sb_oldref.buf)) |
1761 | 0 | error("unable to restore logfile %s from logs/"TMP_RENAMED_LOG": %s", |
1762 | 0 | oldrefname, strerror(errno)); |
1763 | 0 | ret = 1; |
1764 | 0 | out: |
1765 | 0 | strbuf_release(&sb_newref); |
1766 | 0 | strbuf_release(&sb_oldref); |
1767 | 0 | strbuf_release(&tmp_renamed_log); |
1768 | |
|
1769 | 0 | return ret; |
1770 | 0 | } |
1771 | | |
1772 | | static int files_rename_ref(struct ref_store *ref_store, |
1773 | | const char *oldrefname, const char *newrefname, |
1774 | | const char *logmsg) |
1775 | 0 | { |
1776 | 0 | return files_copy_or_rename_ref(ref_store, oldrefname, |
1777 | 0 | newrefname, logmsg, 0); |
1778 | 0 | } |
1779 | | |
1780 | | static int files_copy_ref(struct ref_store *ref_store, |
1781 | | const char *oldrefname, const char *newrefname, |
1782 | | const char *logmsg) |
1783 | 0 | { |
1784 | 0 | return files_copy_or_rename_ref(ref_store, oldrefname, |
1785 | 0 | newrefname, logmsg, 1); |
1786 | 0 | } |
1787 | | |
1788 | | static int close_ref_gently(struct ref_lock *lock) |
1789 | 0 | { |
1790 | 0 | if (close_lock_file_gently(&lock->lk)) |
1791 | 0 | return -1; |
1792 | 0 | return 0; |
1793 | 0 | } |
1794 | | |
1795 | | static int commit_ref(struct ref_lock *lock) |
1796 | 0 | { |
1797 | 0 | char *path = get_locked_file_path(&lock->lk); |
1798 | 0 | struct stat st; |
1799 | |
|
1800 | 0 | if (!lstat(path, &st) && S_ISDIR(st.st_mode)) { |
1801 | | /* |
1802 | | * There is a directory at the path we want to rename |
1803 | | * the lockfile to. Hopefully it is empty; try to |
1804 | | * delete it. |
1805 | | */ |
1806 | 0 | size_t len = strlen(path); |
1807 | 0 | struct strbuf sb_path = STRBUF_INIT; |
1808 | |
|
1809 | 0 | strbuf_attach(&sb_path, path, len, len); |
1810 | | |
1811 | | /* |
1812 | | * If this fails, commit_lock_file() will also fail |
1813 | | * and will report the problem. |
1814 | | */ |
1815 | 0 | remove_empty_directories(&sb_path); |
1816 | 0 | strbuf_release(&sb_path); |
1817 | 0 | } else { |
1818 | 0 | free(path); |
1819 | 0 | } |
1820 | |
|
1821 | 0 | if (commit_lock_file(&lock->lk)) |
1822 | 0 | return -1; |
1823 | 0 | return 0; |
1824 | 0 | } |
1825 | | |
1826 | | static int open_or_create_logfile(const char *path, void *cb) |
1827 | 0 | { |
1828 | 0 | int *fd = cb; |
1829 | |
|
1830 | 0 | *fd = open(path, O_APPEND | O_WRONLY | O_CREAT, 0666); |
1831 | 0 | return (*fd < 0) ? -1 : 0; |
1832 | 0 | } |
1833 | | |
1834 | | /* |
1835 | | * Create a reflog for a ref. If force_create = 0, only create the |
1836 | | * reflog for certain refs (those for which should_autocreate_reflog |
1837 | | * returns non-zero). Otherwise, create it regardless of the reference |
1838 | | * name. If the logfile already existed or was created, return 0 and |
1839 | | * set *logfd to the file descriptor opened for appending to the file. |
1840 | | * If no logfile exists and we decided not to create one, return 0 and |
1841 | | * set *logfd to -1. On failure, fill in *err, set *logfd to -1, and |
1842 | | * return -1. |
1843 | | */ |
1844 | | static int log_ref_setup(struct files_ref_store *refs, |
1845 | | const char *refname, int force_create, |
1846 | | int *logfd, struct strbuf *err) |
1847 | 0 | { |
1848 | 0 | enum log_refs_config log_refs_cfg = refs->log_all_ref_updates; |
1849 | 0 | struct strbuf logfile_sb = STRBUF_INIT; |
1850 | 0 | char *logfile; |
1851 | |
|
1852 | 0 | if (log_refs_cfg == LOG_REFS_UNSET) |
1853 | 0 | log_refs_cfg = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL; |
1854 | |
|
1855 | 0 | files_reflog_path(refs, &logfile_sb, refname); |
1856 | 0 | logfile = strbuf_detach(&logfile_sb, NULL); |
1857 | |
|
1858 | 0 | if (force_create || should_autocreate_reflog(log_refs_cfg, refname)) { |
1859 | 0 | if (raceproof_create_file(logfile, open_or_create_logfile, logfd)) { |
1860 | 0 | if (errno == ENOENT) |
1861 | 0 | strbuf_addf(err, "unable to create directory for '%s': " |
1862 | 0 | "%s", logfile, strerror(errno)); |
1863 | 0 | else if (errno == EISDIR) |
1864 | 0 | strbuf_addf(err, "there are still logs under '%s'", |
1865 | 0 | logfile); |
1866 | 0 | else |
1867 | 0 | strbuf_addf(err, "unable to append to '%s': %s", |
1868 | 0 | logfile, strerror(errno)); |
1869 | |
|
1870 | 0 | goto error; |
1871 | 0 | } |
1872 | 0 | } else { |
1873 | 0 | *logfd = open(logfile, O_APPEND | O_WRONLY); |
1874 | 0 | if (*logfd < 0) { |
1875 | 0 | if (errno == ENOENT || errno == EISDIR) { |
1876 | | /* |
1877 | | * The logfile doesn't already exist, |
1878 | | * but that is not an error; it only |
1879 | | * means that we won't write log |
1880 | | * entries to it. |
1881 | | */ |
1882 | 0 | ; |
1883 | 0 | } else { |
1884 | 0 | strbuf_addf(err, "unable to append to '%s': %s", |
1885 | 0 | logfile, strerror(errno)); |
1886 | 0 | goto error; |
1887 | 0 | } |
1888 | 0 | } |
1889 | 0 | } |
1890 | | |
1891 | 0 | if (*logfd >= 0) |
1892 | 0 | adjust_shared_perm(the_repository, logfile); |
1893 | |
|
1894 | 0 | free(logfile); |
1895 | 0 | return 0; |
1896 | | |
1897 | 0 | error: |
1898 | 0 | free(logfile); |
1899 | 0 | return -1; |
1900 | 0 | } |
1901 | | |
1902 | | static int files_create_reflog(struct ref_store *ref_store, const char *refname, |
1903 | | struct strbuf *err) |
1904 | 0 | { |
1905 | 0 | struct files_ref_store *refs = |
1906 | 0 | files_downcast(ref_store, REF_STORE_WRITE, "create_reflog"); |
1907 | 0 | int fd; |
1908 | |
|
1909 | 0 | if (log_ref_setup(refs, refname, 1, &fd, err)) |
1910 | 0 | return -1; |
1911 | | |
1912 | 0 | if (fd >= 0) |
1913 | 0 | close(fd); |
1914 | |
|
1915 | 0 | return 0; |
1916 | 0 | } |
1917 | | |
1918 | | static int log_ref_write_fd(int fd, const struct object_id *old_oid, |
1919 | | const struct object_id *new_oid, |
1920 | | const char *committer, const char *msg) |
1921 | 0 | { |
1922 | 0 | struct strbuf sb = STRBUF_INIT; |
1923 | 0 | int ret = 0; |
1924 | |
|
1925 | 0 | if (!committer) |
1926 | 0 | committer = git_committer_info(0); |
1927 | |
|
1928 | 0 | strbuf_addf(&sb, "%s %s %s", oid_to_hex(old_oid), oid_to_hex(new_oid), committer); |
1929 | 0 | if (msg && *msg) { |
1930 | 0 | strbuf_addch(&sb, '\t'); |
1931 | 0 | strbuf_addstr(&sb, msg); |
1932 | 0 | } |
1933 | 0 | strbuf_addch(&sb, '\n'); |
1934 | 0 | if (write_in_full(fd, sb.buf, sb.len) < 0) |
1935 | 0 | ret = -1; |
1936 | 0 | strbuf_release(&sb); |
1937 | 0 | return ret; |
1938 | 0 | } |
1939 | | |
1940 | | static int files_log_ref_write(struct files_ref_store *refs, |
1941 | | const char *refname, |
1942 | | const struct object_id *old_oid, |
1943 | | const struct object_id *new_oid, |
1944 | | const char *committer_info, const char *msg, |
1945 | | int flags, struct strbuf *err) |
1946 | 0 | { |
1947 | 0 | int logfd, result; |
1948 | |
|
1949 | 0 | if (flags & REF_SKIP_CREATE_REFLOG) |
1950 | 0 | return 0; |
1951 | | |
1952 | 0 | result = log_ref_setup(refs, refname, |
1953 | 0 | flags & REF_FORCE_CREATE_REFLOG, |
1954 | 0 | &logfd, err); |
1955 | |
|
1956 | 0 | if (result) |
1957 | 0 | return result; |
1958 | | |
1959 | 0 | if (logfd < 0) |
1960 | 0 | return 0; |
1961 | 0 | result = log_ref_write_fd(logfd, old_oid, new_oid, committer_info, msg); |
1962 | 0 | if (result) { |
1963 | 0 | struct strbuf sb = STRBUF_INIT; |
1964 | 0 | int save_errno = errno; |
1965 | |
|
1966 | 0 | files_reflog_path(refs, &sb, refname); |
1967 | 0 | strbuf_addf(err, "unable to append to '%s': %s", |
1968 | 0 | sb.buf, strerror(save_errno)); |
1969 | 0 | strbuf_release(&sb); |
1970 | 0 | close(logfd); |
1971 | 0 | return -1; |
1972 | 0 | } |
1973 | 0 | if (close(logfd)) { |
1974 | 0 | struct strbuf sb = STRBUF_INIT; |
1975 | 0 | int save_errno = errno; |
1976 | |
|
1977 | 0 | files_reflog_path(refs, &sb, refname); |
1978 | 0 | strbuf_addf(err, "unable to append to '%s': %s", |
1979 | 0 | sb.buf, strerror(save_errno)); |
1980 | 0 | strbuf_release(&sb); |
1981 | 0 | return -1; |
1982 | 0 | } |
1983 | 0 | return 0; |
1984 | 0 | } |
1985 | | |
1986 | | /* |
1987 | | * Write oid into the open lockfile, then close the lockfile. On |
1988 | | * errors, rollback the lockfile, fill in *err and return -1. |
1989 | | */ |
1990 | | static enum ref_transaction_error write_ref_to_lockfile(struct files_ref_store *refs, |
1991 | | struct ref_lock *lock, |
1992 | | const struct object_id *oid, |
1993 | | int skip_oid_verification, |
1994 | | struct strbuf *err) |
1995 | 0 | { |
1996 | 0 | static char term = '\n'; |
1997 | 0 | struct object *o; |
1998 | 0 | int fd; |
1999 | |
|
2000 | 0 | if (!skip_oid_verification) { |
2001 | 0 | o = parse_object(refs->base.repo, oid); |
2002 | 0 | if (!o) { |
2003 | 0 | strbuf_addf( |
2004 | 0 | err, |
2005 | 0 | "trying to write ref '%s' with nonexistent object %s", |
2006 | 0 | lock->ref_name, oid_to_hex(oid)); |
2007 | 0 | unlock_ref(lock); |
2008 | 0 | return REF_TRANSACTION_ERROR_INVALID_NEW_VALUE; |
2009 | 0 | } |
2010 | 0 | if (o->type != OBJ_COMMIT && is_branch(lock->ref_name)) { |
2011 | 0 | strbuf_addf( |
2012 | 0 | err, |
2013 | 0 | "trying to write non-commit object %s to branch '%s'", |
2014 | 0 | oid_to_hex(oid), lock->ref_name); |
2015 | 0 | unlock_ref(lock); |
2016 | 0 | return REF_TRANSACTION_ERROR_INVALID_NEW_VALUE; |
2017 | 0 | } |
2018 | 0 | } |
2019 | 0 | fd = get_lock_file_fd(&lock->lk); |
2020 | 0 | if (write_in_full(fd, oid_to_hex(oid), refs->base.repo->hash_algo->hexsz) < 0 || |
2021 | 0 | write_in_full(fd, &term, 1) < 0 || |
2022 | 0 | fsync_component(FSYNC_COMPONENT_REFERENCE, get_lock_file_fd(&lock->lk)) < 0 || |
2023 | 0 | close_ref_gently(lock) < 0) { |
2024 | 0 | strbuf_addf(err, |
2025 | 0 | "couldn't write '%s'", get_lock_file_path(&lock->lk)); |
2026 | 0 | unlock_ref(lock); |
2027 | 0 | return REF_TRANSACTION_ERROR_GENERIC; |
2028 | 0 | } |
2029 | 0 | return 0; |
2030 | 0 | } |
2031 | | |
2032 | | /* |
2033 | | * Commit a change to a loose reference that has already been written |
2034 | | * to the loose reference lockfile. Also update the reflogs if |
2035 | | * necessary, using the specified lockmsg (which can be NULL). |
2036 | | */ |
2037 | | static int commit_ref_update(struct files_ref_store *refs, |
2038 | | struct ref_lock *lock, |
2039 | | const struct object_id *oid, const char *logmsg, |
2040 | | int flags, |
2041 | | struct strbuf *err) |
2042 | 0 | { |
2043 | 0 | files_assert_main_repository(refs, "commit_ref_update"); |
2044 | |
|
2045 | 0 | clear_loose_ref_cache(refs); |
2046 | 0 | if (files_log_ref_write(refs, lock->ref_name, &lock->old_oid, oid, NULL, |
2047 | 0 | logmsg, flags, err)) { |
2048 | 0 | char *old_msg = strbuf_detach(err, NULL); |
2049 | 0 | strbuf_addf(err, "cannot update the ref '%s': %s", |
2050 | 0 | lock->ref_name, old_msg); |
2051 | 0 | free(old_msg); |
2052 | 0 | unlock_ref(lock); |
2053 | 0 | return -1; |
2054 | 0 | } |
2055 | | |
2056 | 0 | if (strcmp(lock->ref_name, "HEAD") != 0) { |
2057 | | /* |
2058 | | * Special hack: If a branch is updated directly and HEAD |
2059 | | * points to it (may happen on the remote side of a push |
2060 | | * for example) then logically the HEAD reflog should be |
2061 | | * updated too. |
2062 | | * A generic solution implies reverse symref information, |
2063 | | * but finding all symrefs pointing to the given branch |
2064 | | * would be rather costly for this rare event (the direct |
2065 | | * update of a branch) to be worth it. So let's cheat and |
2066 | | * check with HEAD only which should cover 99% of all usage |
2067 | | * scenarios (even 100% of the default ones). |
2068 | | */ |
2069 | 0 | int head_flag; |
2070 | 0 | const char *head_ref; |
2071 | |
|
2072 | 0 | head_ref = refs_resolve_ref_unsafe(&refs->base, "HEAD", |
2073 | 0 | RESOLVE_REF_READING, |
2074 | 0 | NULL, &head_flag); |
2075 | 0 | if (head_ref && (head_flag & REF_ISSYMREF) && |
2076 | 0 | !strcmp(head_ref, lock->ref_name)) { |
2077 | 0 | struct strbuf log_err = STRBUF_INIT; |
2078 | 0 | if (files_log_ref_write(refs, "HEAD", &lock->old_oid, |
2079 | 0 | oid, NULL, logmsg, flags, |
2080 | 0 | &log_err)) { |
2081 | 0 | error("%s", log_err.buf); |
2082 | 0 | strbuf_release(&log_err); |
2083 | 0 | } |
2084 | 0 | } |
2085 | 0 | } |
2086 | |
|
2087 | 0 | if (commit_ref(lock)) { |
2088 | 0 | strbuf_addf(err, "couldn't set '%s'", lock->ref_name); |
2089 | 0 | unlock_ref(lock); |
2090 | 0 | return -1; |
2091 | 0 | } |
2092 | | |
2093 | 0 | unlock_ref(lock); |
2094 | 0 | return 0; |
2095 | 0 | } |
2096 | | |
2097 | | #if defined(NO_SYMLINK_HEAD) || defined(WITH_BREAKING_CHANGES) |
2098 | | #define create_ref_symlink(a, b) (-1) |
2099 | | #else |
2100 | | static int create_ref_symlink(struct ref_lock *lock, const char *target) |
2101 | 0 | { |
2102 | 0 | static int warn_once = 1; |
2103 | 0 | char *ref_path; |
2104 | 0 | int ret = -1; |
2105 | |
|
2106 | 0 | ref_path = get_locked_file_path(&lock->lk); |
2107 | 0 | unlink(ref_path); |
2108 | 0 | ret = symlink(target, ref_path); |
2109 | 0 | free(ref_path); |
2110 | |
|
2111 | 0 | if (ret) |
2112 | 0 | fprintf(stderr, "no symlink - falling back to symbolic ref\n"); |
2113 | |
|
2114 | 0 | if (warn_once) |
2115 | 0 | warning(_("'core.preferSymlinkRefs=true' is nominated for removal.\n" |
2116 | 0 | "hint: The use of symbolic links for symbolic refs is deprecated\n" |
2117 | 0 | "hint: and will be removed in Git 3.0. The configuration that\n" |
2118 | 0 | "hint: tells Git to use them is thus going away. You can unset\n" |
2119 | 0 | "hint: it with:\n" |
2120 | 0 | "hint:\n" |
2121 | 0 | "hint:\tgit config unset core.preferSymlinkRefs\n" |
2122 | 0 | "hint:\n" |
2123 | 0 | "hint: Git will then use the textual symref format instead.")); |
2124 | 0 | warn_once = 0; |
2125 | |
|
2126 | 0 | return ret; |
2127 | 0 | } |
2128 | | #endif |
2129 | | |
2130 | | static int create_symref_lock(struct ref_lock *lock, const char *target, |
2131 | | struct strbuf *err) |
2132 | 0 | { |
2133 | 0 | if (!fdopen_lock_file(&lock->lk, "w")) { |
2134 | 0 | strbuf_addf(err, "unable to fdopen %s: %s", |
2135 | 0 | get_lock_file_path(&lock->lk), strerror(errno)); |
2136 | 0 | return -1; |
2137 | 0 | } |
2138 | | |
2139 | 0 | if (fprintf(get_lock_file_fp(&lock->lk), "ref: %s\n", target) < 0) { |
2140 | 0 | strbuf_addf(err, "unable to write to %s: %s", |
2141 | 0 | get_lock_file_path(&lock->lk), strerror(errno)); |
2142 | 0 | return -1; |
2143 | 0 | } |
2144 | | |
2145 | 0 | return 0; |
2146 | 0 | } |
2147 | | |
2148 | | static int files_reflog_exists(struct ref_store *ref_store, |
2149 | | const char *refname) |
2150 | 0 | { |
2151 | 0 | struct files_ref_store *refs = |
2152 | 0 | files_downcast(ref_store, REF_STORE_READ, "reflog_exists"); |
2153 | 0 | struct strbuf sb = STRBUF_INIT; |
2154 | 0 | struct stat st; |
2155 | 0 | int ret; |
2156 | |
|
2157 | 0 | files_reflog_path(refs, &sb, refname); |
2158 | 0 | ret = !lstat(sb.buf, &st) && S_ISREG(st.st_mode); |
2159 | 0 | strbuf_release(&sb); |
2160 | 0 | return ret; |
2161 | 0 | } |
2162 | | |
2163 | | static int files_delete_reflog(struct ref_store *ref_store, |
2164 | | const char *refname) |
2165 | 0 | { |
2166 | 0 | struct files_ref_store *refs = |
2167 | 0 | files_downcast(ref_store, REF_STORE_WRITE, "delete_reflog"); |
2168 | 0 | struct strbuf sb = STRBUF_INIT; |
2169 | 0 | int ret; |
2170 | |
|
2171 | 0 | files_reflog_path(refs, &sb, refname); |
2172 | 0 | ret = remove_path(sb.buf); |
2173 | 0 | strbuf_release(&sb); |
2174 | 0 | return ret; |
2175 | 0 | } |
2176 | | |
2177 | | static int show_one_reflog_ent(struct files_ref_store *refs, |
2178 | | const char *refname, |
2179 | | struct strbuf *sb, |
2180 | | each_reflog_ent_fn fn, void *cb_data) |
2181 | 0 | { |
2182 | 0 | struct object_id ooid, noid; |
2183 | 0 | char *email_end, *message; |
2184 | 0 | timestamp_t timestamp; |
2185 | 0 | int tz; |
2186 | 0 | const char *p = sb->buf; |
2187 | | |
2188 | | /* old SP new SP name <email> SP time TAB msg LF */ |
2189 | 0 | if (!sb->len || sb->buf[sb->len - 1] != '\n' || |
2190 | 0 | parse_oid_hex_algop(p, &ooid, &p, refs->base.repo->hash_algo) || *p++ != ' ' || |
2191 | 0 | parse_oid_hex_algop(p, &noid, &p, refs->base.repo->hash_algo) || *p++ != ' ' || |
2192 | 0 | !(email_end = strchr(p, '>')) || |
2193 | 0 | email_end[1] != ' ' || |
2194 | 0 | !(timestamp = parse_timestamp(email_end + 2, &message, 10)) || |
2195 | 0 | !message || message[0] != ' ' || |
2196 | 0 | (message[1] != '+' && message[1] != '-') || |
2197 | 0 | !isdigit(message[2]) || !isdigit(message[3]) || |
2198 | 0 | !isdigit(message[4]) || !isdigit(message[5])) |
2199 | 0 | return 0; /* corrupt? */ |
2200 | 0 | email_end[1] = '\0'; |
2201 | 0 | tz = strtol(message + 1, NULL, 10); |
2202 | 0 | if (message[6] != '\t') |
2203 | 0 | message += 6; |
2204 | 0 | else |
2205 | 0 | message += 7; |
2206 | 0 | return fn(refname, &ooid, &noid, p, timestamp, tz, message, cb_data); |
2207 | 0 | } |
2208 | | |
2209 | | static char *find_beginning_of_line(char *bob, char *scan) |
2210 | 0 | { |
2211 | 0 | while (bob < scan && *(--scan) != '\n') |
2212 | 0 | ; /* keep scanning backwards */ |
2213 | | /* |
2214 | | * Return either beginning of the buffer, or LF at the end of |
2215 | | * the previous line. |
2216 | | */ |
2217 | 0 | return scan; |
2218 | 0 | } |
2219 | | |
2220 | | static int files_for_each_reflog_ent_reverse(struct ref_store *ref_store, |
2221 | | const char *refname, |
2222 | | each_reflog_ent_fn fn, |
2223 | | void *cb_data) |
2224 | 0 | { |
2225 | 0 | struct files_ref_store *refs = |
2226 | 0 | files_downcast(ref_store, REF_STORE_READ, |
2227 | 0 | "for_each_reflog_ent_reverse"); |
2228 | 0 | struct strbuf sb = STRBUF_INIT; |
2229 | 0 | FILE *logfp; |
2230 | 0 | long pos; |
2231 | 0 | int ret = 0, at_tail = 1; |
2232 | |
|
2233 | 0 | files_reflog_path(refs, &sb, refname); |
2234 | 0 | logfp = fopen(sb.buf, "r"); |
2235 | 0 | strbuf_release(&sb); |
2236 | 0 | if (!logfp) |
2237 | 0 | return -1; |
2238 | | |
2239 | | /* Jump to the end */ |
2240 | 0 | if (fseek(logfp, 0, SEEK_END) < 0) |
2241 | 0 | ret = error("cannot seek back reflog for %s: %s", |
2242 | 0 | refname, strerror(errno)); |
2243 | 0 | pos = ftell(logfp); |
2244 | 0 | while (!ret && 0 < pos) { |
2245 | 0 | int cnt; |
2246 | 0 | size_t nread; |
2247 | 0 | char buf[BUFSIZ]; |
2248 | 0 | char *endp, *scanp; |
2249 | | |
2250 | | /* Fill next block from the end */ |
2251 | 0 | cnt = (sizeof(buf) < pos) ? sizeof(buf) : pos; |
2252 | 0 | if (fseek(logfp, pos - cnt, SEEK_SET)) { |
2253 | 0 | ret = error("cannot seek back reflog for %s: %s", |
2254 | 0 | refname, strerror(errno)); |
2255 | 0 | break; |
2256 | 0 | } |
2257 | 0 | nread = fread(buf, cnt, 1, logfp); |
2258 | 0 | if (nread != 1) { |
2259 | 0 | ret = error("cannot read %d bytes from reflog for %s: %s", |
2260 | 0 | cnt, refname, strerror(errno)); |
2261 | 0 | break; |
2262 | 0 | } |
2263 | 0 | pos -= cnt; |
2264 | |
|
2265 | 0 | scanp = endp = buf + cnt; |
2266 | 0 | if (at_tail && scanp[-1] == '\n') |
2267 | | /* Looking at the final LF at the end of the file */ |
2268 | 0 | scanp--; |
2269 | 0 | at_tail = 0; |
2270 | |
|
2271 | 0 | while (buf < scanp) { |
2272 | | /* |
2273 | | * terminating LF of the previous line, or the beginning |
2274 | | * of the buffer. |
2275 | | */ |
2276 | 0 | char *bp; |
2277 | |
|
2278 | 0 | bp = find_beginning_of_line(buf, scanp); |
2279 | |
|
2280 | 0 | if (*bp == '\n') { |
2281 | | /* |
2282 | | * The newline is the end of the previous line, |
2283 | | * so we know we have complete line starting |
2284 | | * at (bp + 1). Prefix it onto any prior data |
2285 | | * we collected for the line and process it. |
2286 | | */ |
2287 | 0 | strbuf_splice(&sb, 0, 0, bp + 1, endp - (bp + 1)); |
2288 | 0 | scanp = bp; |
2289 | 0 | endp = bp + 1; |
2290 | 0 | ret = show_one_reflog_ent(refs, refname, &sb, fn, cb_data); |
2291 | 0 | strbuf_reset(&sb); |
2292 | 0 | if (ret) |
2293 | 0 | break; |
2294 | 0 | } else if (!pos) { |
2295 | | /* |
2296 | | * We are at the start of the buffer, and the |
2297 | | * start of the file; there is no previous |
2298 | | * line, and we have everything for this one. |
2299 | | * Process it, and we can end the loop. |
2300 | | */ |
2301 | 0 | strbuf_splice(&sb, 0, 0, buf, endp - buf); |
2302 | 0 | ret = show_one_reflog_ent(refs, refname, &sb, fn, cb_data); |
2303 | 0 | strbuf_reset(&sb); |
2304 | 0 | break; |
2305 | 0 | } |
2306 | | |
2307 | 0 | if (bp == buf) { |
2308 | | /* |
2309 | | * We are at the start of the buffer, and there |
2310 | | * is more file to read backwards. Which means |
2311 | | * we are in the middle of a line. Note that we |
2312 | | * may get here even if *bp was a newline; that |
2313 | | * just means we are at the exact end of the |
2314 | | * previous line, rather than some spot in the |
2315 | | * middle. |
2316 | | * |
2317 | | * Save away what we have to be combined with |
2318 | | * the data from the next read. |
2319 | | */ |
2320 | 0 | strbuf_splice(&sb, 0, 0, buf, endp - buf); |
2321 | 0 | break; |
2322 | 0 | } |
2323 | 0 | } |
2324 | |
|
2325 | 0 | } |
2326 | 0 | if (!ret && sb.len) |
2327 | 0 | BUG("reverse reflog parser had leftover data"); |
2328 | | |
2329 | 0 | fclose(logfp); |
2330 | 0 | strbuf_release(&sb); |
2331 | 0 | return ret; |
2332 | 0 | } |
2333 | | |
2334 | | static int files_for_each_reflog_ent(struct ref_store *ref_store, |
2335 | | const char *refname, |
2336 | | each_reflog_ent_fn fn, void *cb_data) |
2337 | 0 | { |
2338 | 0 | struct files_ref_store *refs = |
2339 | 0 | files_downcast(ref_store, REF_STORE_READ, |
2340 | 0 | "for_each_reflog_ent"); |
2341 | 0 | FILE *logfp; |
2342 | 0 | struct strbuf sb = STRBUF_INIT; |
2343 | 0 | int ret = 0; |
2344 | |
|
2345 | 0 | files_reflog_path(refs, &sb, refname); |
2346 | 0 | logfp = fopen(sb.buf, "r"); |
2347 | 0 | strbuf_release(&sb); |
2348 | 0 | if (!logfp) |
2349 | 0 | return -1; |
2350 | | |
2351 | 0 | while (!ret && !strbuf_getwholeline(&sb, logfp, '\n')) |
2352 | 0 | ret = show_one_reflog_ent(refs, refname, &sb, fn, cb_data); |
2353 | 0 | fclose(logfp); |
2354 | 0 | strbuf_release(&sb); |
2355 | 0 | return ret; |
2356 | 0 | } |
2357 | | |
2358 | | struct files_reflog_iterator { |
2359 | | struct ref_iterator base; |
2360 | | struct ref_store *ref_store; |
2361 | | struct dir_iterator *dir_iterator; |
2362 | | }; |
2363 | | |
2364 | | static int files_reflog_iterator_advance(struct ref_iterator *ref_iterator) |
2365 | 0 | { |
2366 | 0 | struct files_reflog_iterator *iter = |
2367 | 0 | (struct files_reflog_iterator *)ref_iterator; |
2368 | 0 | struct dir_iterator *diter = iter->dir_iterator; |
2369 | 0 | int ok; |
2370 | |
|
2371 | 0 | while ((ok = dir_iterator_advance(diter)) == ITER_OK) { |
2372 | 0 | if (!S_ISREG(diter->st.st_mode)) |
2373 | 0 | continue; |
2374 | 0 | if (check_refname_format(diter->basename, |
2375 | 0 | REFNAME_ALLOW_ONELEVEL)) |
2376 | 0 | continue; |
2377 | | |
2378 | 0 | iter->base.ref.name = diter->relative_path; |
2379 | 0 | return ITER_OK; |
2380 | 0 | } |
2381 | | |
2382 | 0 | return ok; |
2383 | 0 | } |
2384 | | |
2385 | | static int files_reflog_iterator_seek(struct ref_iterator *ref_iterator UNUSED, |
2386 | | const char *refname UNUSED, |
2387 | | unsigned int flags UNUSED) |
2388 | 0 | { |
2389 | 0 | BUG("ref_iterator_seek() called for reflog_iterator"); |
2390 | 0 | } |
2391 | | |
2392 | | static void files_reflog_iterator_release(struct ref_iterator *ref_iterator) |
2393 | 0 | { |
2394 | 0 | struct files_reflog_iterator *iter = |
2395 | 0 | (struct files_reflog_iterator *)ref_iterator; |
2396 | 0 | dir_iterator_free(iter->dir_iterator); |
2397 | 0 | } |
2398 | | |
2399 | | static struct ref_iterator_vtable files_reflog_iterator_vtable = { |
2400 | | .advance = files_reflog_iterator_advance, |
2401 | | .seek = files_reflog_iterator_seek, |
2402 | | .release = files_reflog_iterator_release, |
2403 | | }; |
2404 | | |
2405 | | static struct ref_iterator *reflog_iterator_begin(struct ref_store *ref_store, |
2406 | | const char *gitdir) |
2407 | 0 | { |
2408 | 0 | struct dir_iterator *diter; |
2409 | 0 | struct files_reflog_iterator *iter; |
2410 | 0 | struct ref_iterator *ref_iterator; |
2411 | 0 | struct strbuf sb = STRBUF_INIT; |
2412 | |
|
2413 | 0 | strbuf_addf(&sb, "%s/logs", gitdir); |
2414 | |
|
2415 | 0 | diter = dir_iterator_begin(sb.buf, DIR_ITERATOR_SORTED); |
2416 | 0 | if (!diter) { |
2417 | 0 | strbuf_release(&sb); |
2418 | 0 | return empty_ref_iterator_begin(); |
2419 | 0 | } |
2420 | | |
2421 | 0 | CALLOC_ARRAY(iter, 1); |
2422 | 0 | ref_iterator = &iter->base; |
2423 | |
|
2424 | 0 | base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable); |
2425 | 0 | iter->dir_iterator = diter; |
2426 | 0 | iter->ref_store = ref_store; |
2427 | 0 | strbuf_release(&sb); |
2428 | |
|
2429 | 0 | return ref_iterator; |
2430 | 0 | } |
2431 | | |
2432 | | static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_store) |
2433 | 0 | { |
2434 | 0 | struct files_ref_store *refs = |
2435 | 0 | files_downcast(ref_store, REF_STORE_READ, |
2436 | 0 | "reflog_iterator_begin"); |
2437 | |
|
2438 | 0 | if (!strcmp(refs->base.gitdir, refs->gitcommondir)) { |
2439 | 0 | return reflog_iterator_begin(ref_store, refs->gitcommondir); |
2440 | 0 | } else { |
2441 | 0 | return merge_ref_iterator_begin( |
2442 | 0 | reflog_iterator_begin(ref_store, refs->base.gitdir), |
2443 | 0 | reflog_iterator_begin(ref_store, refs->gitcommondir), |
2444 | 0 | ref_iterator_select, refs); |
2445 | 0 | } |
2446 | 0 | } |
2447 | | |
2448 | | /* |
2449 | | * If update is a direct update of head_ref (the reference pointed to |
2450 | | * by HEAD), then add an extra REF_LOG_ONLY update for HEAD. |
2451 | | */ |
2452 | | static enum ref_transaction_error split_head_update(struct ref_update *update, |
2453 | | struct ref_transaction *transaction, |
2454 | | const char *head_ref, |
2455 | | struct strbuf *err) |
2456 | 0 | { |
2457 | 0 | struct ref_update *new_update; |
2458 | |
|
2459 | 0 | if ((update->flags & REF_LOG_ONLY) || |
2460 | 0 | (update->flags & REF_SKIP_CREATE_REFLOG) || |
2461 | 0 | (update->flags & REF_IS_PRUNING) || |
2462 | 0 | (update->flags & REF_UPDATE_VIA_HEAD)) |
2463 | 0 | return 0; |
2464 | | |
2465 | 0 | if (strcmp(update->refname, head_ref)) |
2466 | 0 | return 0; |
2467 | | |
2468 | | /* |
2469 | | * First make sure that HEAD is not already in the |
2470 | | * transaction. This check is O(lg N) in the transaction |
2471 | | * size, but it happens at most once per transaction. |
2472 | | */ |
2473 | 0 | if (string_list_has_string(&transaction->refnames, "HEAD")) { |
2474 | | /* An entry already existed */ |
2475 | 0 | strbuf_addf(err, |
2476 | 0 | "multiple updates for 'HEAD' (including one " |
2477 | 0 | "via its referent '%s') are not allowed", |
2478 | 0 | update->refname); |
2479 | 0 | return REF_TRANSACTION_ERROR_NAME_CONFLICT; |
2480 | 0 | } |
2481 | | |
2482 | 0 | new_update = ref_transaction_add_update( |
2483 | 0 | transaction, "HEAD", |
2484 | 0 | update->flags | REF_LOG_ONLY | REF_NO_DEREF | REF_LOG_VIA_SPLIT, |
2485 | 0 | &update->new_oid, &update->old_oid, |
2486 | 0 | NULL, NULL, update->committer_info, update->msg); |
2487 | 0 | new_update->parent_update = update; |
2488 | | |
2489 | | /* |
2490 | | * Add "HEAD". This insertion is O(N) in the transaction |
2491 | | * size, but it happens at most once per transaction. |
2492 | | * Add new_update->refname instead of a literal "HEAD". |
2493 | | */ |
2494 | 0 | if (strcmp(new_update->refname, "HEAD")) |
2495 | 0 | BUG("%s unexpectedly not 'HEAD'", new_update->refname); |
2496 | | |
2497 | 0 | return 0; |
2498 | 0 | } |
2499 | | |
2500 | | /* |
2501 | | * update is for a symref that points at referent and doesn't have |
2502 | | * REF_NO_DEREF set. Split it into two updates: |
2503 | | * - The original update, but with REF_LOG_ONLY and REF_NO_DEREF set |
2504 | | * - A new, separate update for the referent reference |
2505 | | * Note that the new update will itself be subject to splitting when |
2506 | | * the iteration gets to it. |
2507 | | */ |
2508 | | static enum ref_transaction_error split_symref_update(struct ref_update *update, |
2509 | | const char *referent, |
2510 | | struct ref_transaction *transaction, |
2511 | | struct strbuf *err) |
2512 | 0 | { |
2513 | 0 | struct ref_update *new_update; |
2514 | 0 | unsigned int new_flags; |
2515 | | |
2516 | | /* |
2517 | | * First make sure that referent is not already in the |
2518 | | * transaction. This check is O(lg N) in the transaction |
2519 | | * size, but it happens at most once per symref in a |
2520 | | * transaction. |
2521 | | */ |
2522 | 0 | if (string_list_has_string(&transaction->refnames, referent)) { |
2523 | | /* An entry already exists */ |
2524 | 0 | strbuf_addf(err, |
2525 | 0 | "multiple updates for '%s' (including one " |
2526 | 0 | "via symref '%s') are not allowed", |
2527 | 0 | referent, update->refname); |
2528 | 0 | return REF_TRANSACTION_ERROR_NAME_CONFLICT; |
2529 | 0 | } |
2530 | | |
2531 | 0 | new_flags = update->flags; |
2532 | 0 | if (!strcmp(update->refname, "HEAD")) { |
2533 | | /* |
2534 | | * Record that the new update came via HEAD, so that |
2535 | | * when we process it, split_head_update() doesn't try |
2536 | | * to add another reflog update for HEAD. Note that |
2537 | | * this bit will be propagated if the new_update |
2538 | | * itself needs to be split. |
2539 | | */ |
2540 | 0 | new_flags |= REF_UPDATE_VIA_HEAD; |
2541 | 0 | } |
2542 | |
|
2543 | 0 | new_update = ref_transaction_add_update( |
2544 | 0 | transaction, referent, new_flags, |
2545 | 0 | update->new_target ? NULL : &update->new_oid, |
2546 | 0 | update->old_target ? NULL : &update->old_oid, |
2547 | 0 | update->new_target, update->old_target, NULL, |
2548 | 0 | update->msg); |
2549 | |
|
2550 | 0 | new_update->parent_update = update; |
2551 | | |
2552 | | /* |
2553 | | * Change the symbolic ref update to log only. Also, it |
2554 | | * doesn't need to check its old OID value, as that will be |
2555 | | * done when new_update is processed. |
2556 | | */ |
2557 | 0 | update->flags |= REF_LOG_ONLY | REF_NO_DEREF; |
2558 | |
|
2559 | 0 | return 0; |
2560 | 0 | } |
2561 | | |
2562 | | /* |
2563 | | * Check whether the REF_HAVE_OLD and old_oid values stored in update |
2564 | | * are consistent with oid, which is the reference's current value. If |
2565 | | * everything is OK, return 0; otherwise, write an error message to |
2566 | | * err and return -1. |
2567 | | */ |
2568 | | static enum ref_transaction_error check_old_oid(struct ref_update *update, |
2569 | | struct object_id *oid, |
2570 | | struct strbuf *referent, |
2571 | | struct strbuf *err) |
2572 | 0 | { |
2573 | 0 | if (update->flags & REF_LOG_ONLY || |
2574 | 0 | !(update->flags & REF_HAVE_OLD)) |
2575 | 0 | return 0; |
2576 | | |
2577 | 0 | if (oideq(oid, &update->old_oid)) { |
2578 | | /* |
2579 | | * Normally matching the expected old oid is enough. Either we |
2580 | | * found the ref at the expected state, or we are creating and |
2581 | | * expect the null oid (and likewise found nothing). |
2582 | | * |
2583 | | * But there is one exception for the null oid: if we found a |
2584 | | * symref pointing to nothing we'll also get the null oid. In |
2585 | | * regular recursive mode, that's good (we'll write to what the |
2586 | | * symref points to, which doesn't exist). But in no-deref |
2587 | | * mode, it means we'll clobber the symref, even though the |
2588 | | * caller asked for this to be a creation event. So flag |
2589 | | * that case to preserve the dangling symref. |
2590 | | */ |
2591 | 0 | if ((update->flags & REF_NO_DEREF) && referent->len && |
2592 | 0 | is_null_oid(oid)) { |
2593 | 0 | strbuf_addf(err, "cannot lock ref '%s': " |
2594 | 0 | "dangling symref already exists", |
2595 | 0 | ref_update_original_update_refname(update)); |
2596 | 0 | return REF_TRANSACTION_ERROR_CREATE_EXISTS; |
2597 | 0 | } |
2598 | 0 | return 0; |
2599 | 0 | } |
2600 | | |
2601 | 0 | if (is_null_oid(&update->old_oid)) { |
2602 | 0 | strbuf_addf(err, "cannot lock ref '%s': " |
2603 | 0 | "reference already exists", |
2604 | 0 | ref_update_original_update_refname(update)); |
2605 | 0 | return REF_TRANSACTION_ERROR_CREATE_EXISTS; |
2606 | 0 | } else if (is_null_oid(oid)) { |
2607 | 0 | strbuf_addf(err, "cannot lock ref '%s': " |
2608 | 0 | "reference is missing but expected %s", |
2609 | 0 | ref_update_original_update_refname(update), |
2610 | 0 | oid_to_hex(&update->old_oid)); |
2611 | 0 | return REF_TRANSACTION_ERROR_NONEXISTENT_REF; |
2612 | 0 | } |
2613 | | |
2614 | 0 | strbuf_addf(err, "cannot lock ref '%s': is at %s but expected %s", |
2615 | 0 | ref_update_original_update_refname(update), oid_to_hex(oid), |
2616 | 0 | oid_to_hex(&update->old_oid)); |
2617 | |
|
2618 | 0 | return REF_TRANSACTION_ERROR_INCORRECT_OLD_VALUE; |
2619 | 0 | } |
2620 | | |
2621 | | struct files_transaction_backend_data { |
2622 | | struct ref_transaction *packed_transaction; |
2623 | | int packed_refs_locked; |
2624 | | struct strmap ref_locks; |
2625 | | }; |
2626 | | |
2627 | | /* |
2628 | | * Prepare for carrying out update: |
2629 | | * - Lock the reference referred to by update. |
2630 | | * - Read the reference under lock. |
2631 | | * - Check that its old OID value (if specified) is correct, and in |
2632 | | * any case record it in update->lock->old_oid for later use when |
2633 | | * writing the reflog. |
2634 | | * - If it is a symref update without REF_NO_DEREF, split it up into a |
2635 | | * REF_LOG_ONLY update of the symref and add a separate update for |
2636 | | * the referent to transaction. |
2637 | | * - If it is an update of head_ref, add a corresponding REF_LOG_ONLY |
2638 | | * update of HEAD. |
2639 | | */ |
2640 | | static enum ref_transaction_error lock_ref_for_update(struct files_ref_store *refs, |
2641 | | struct ref_update *update, |
2642 | | size_t update_idx, |
2643 | | struct ref_transaction *transaction, |
2644 | | const char *head_ref, |
2645 | | struct string_list *refnames_to_check, |
2646 | | struct strbuf *err) |
2647 | 0 | { |
2648 | 0 | struct strbuf referent = STRBUF_INIT; |
2649 | 0 | int mustexist = ref_update_expects_existing_old_ref(update); |
2650 | 0 | struct files_transaction_backend_data *backend_data; |
2651 | 0 | enum ref_transaction_error ret = 0; |
2652 | 0 | struct ref_lock *lock; |
2653 | |
|
2654 | 0 | files_assert_main_repository(refs, "lock_ref_for_update"); |
2655 | |
|
2656 | 0 | backend_data = transaction->backend_data; |
2657 | |
|
2658 | 0 | if ((update->flags & REF_HAVE_NEW) && ref_update_has_null_new_value(update)) |
2659 | 0 | update->flags |= REF_DELETING; |
2660 | |
|
2661 | 0 | if (head_ref) { |
2662 | 0 | ret = split_head_update(update, transaction, head_ref, err); |
2663 | 0 | if (ret) |
2664 | 0 | goto out; |
2665 | 0 | } |
2666 | | |
2667 | 0 | lock = strmap_get(&backend_data->ref_locks, update->refname); |
2668 | 0 | if (lock) { |
2669 | 0 | lock->count++; |
2670 | 0 | } else { |
2671 | 0 | ret = lock_raw_ref(refs, transaction, update_idx, mustexist, |
2672 | 0 | refnames_to_check, &lock, &referent, err); |
2673 | 0 | if (ret) { |
2674 | 0 | char *reason; |
2675 | |
|
2676 | 0 | reason = strbuf_detach(err, NULL); |
2677 | 0 | strbuf_addf(err, "cannot lock ref '%s': %s", |
2678 | 0 | ref_update_original_update_refname(update), reason); |
2679 | 0 | free(reason); |
2680 | 0 | goto out; |
2681 | 0 | } |
2682 | | |
2683 | 0 | strmap_put(&backend_data->ref_locks, update->refname, lock); |
2684 | 0 | } |
2685 | | |
2686 | 0 | update->backend_data = lock; |
2687 | |
|
2688 | 0 | if (update->flags & REF_LOG_VIA_SPLIT) { |
2689 | 0 | struct ref_lock *parent_lock; |
2690 | |
|
2691 | 0 | if (!update->parent_update) |
2692 | 0 | BUG("split update without a parent"); |
2693 | | |
2694 | 0 | parent_lock = update->parent_update->backend_data; |
2695 | | |
2696 | | /* |
2697 | | * Check that "HEAD" didn't racily change since we have looked |
2698 | | * it up. If it did we must refuse to write the reflog entry. |
2699 | | * |
2700 | | * Note that this does not catch all races: if "HEAD" was |
2701 | | * racily changed to point to one of the refs part of the |
2702 | | * transaction then we would miss writing the split reflog |
2703 | | * entry for "HEAD". |
2704 | | */ |
2705 | 0 | if (!(update->type & REF_ISSYMREF) || |
2706 | 0 | strcmp(update->parent_update->refname, referent.buf)) { |
2707 | 0 | strbuf_addstr(err, "HEAD has been racily updated"); |
2708 | 0 | ret = REF_TRANSACTION_ERROR_GENERIC; |
2709 | 0 | goto out; |
2710 | 0 | } |
2711 | | |
2712 | 0 | if (update->flags & REF_HAVE_OLD) { |
2713 | 0 | oidcpy(&lock->old_oid, &update->old_oid); |
2714 | 0 | } else { |
2715 | 0 | oidcpy(&lock->old_oid, &parent_lock->old_oid); |
2716 | 0 | } |
2717 | 0 | } else if (update->type & REF_ISSYMREF) { |
2718 | 0 | if (update->flags & REF_NO_DEREF) { |
2719 | | /* |
2720 | | * We won't be reading the referent as part of |
2721 | | * the transaction, so we have to read it here |
2722 | | * to record and possibly check old_oid: |
2723 | | */ |
2724 | 0 | if (!refs_resolve_ref_unsafe(&refs->base, |
2725 | 0 | referent.buf, 0, |
2726 | 0 | &lock->old_oid, NULL)) { |
2727 | 0 | if (update->flags & REF_HAVE_OLD) { |
2728 | 0 | strbuf_addf(err, "cannot lock ref '%s': " |
2729 | 0 | "error reading reference", |
2730 | 0 | ref_update_original_update_refname(update)); |
2731 | 0 | ret = REF_TRANSACTION_ERROR_GENERIC; |
2732 | 0 | goto out; |
2733 | 0 | } |
2734 | 0 | } |
2735 | | |
2736 | 0 | if (update->old_target) |
2737 | 0 | ret = ref_update_check_old_target(referent.buf, update, err); |
2738 | 0 | else |
2739 | 0 | ret = check_old_oid(update, &lock->old_oid, |
2740 | 0 | &referent, err); |
2741 | 0 | if (ret) |
2742 | 0 | goto out; |
2743 | 0 | } else { |
2744 | | /* |
2745 | | * Create a new update for the reference this |
2746 | | * symref is pointing at. Also, we will record |
2747 | | * and verify old_oid for this update as part |
2748 | | * of processing the split-off update, so we |
2749 | | * don't have to do it here. |
2750 | | */ |
2751 | 0 | ret = split_symref_update(update, referent.buf, |
2752 | 0 | transaction, err); |
2753 | 0 | if (ret) |
2754 | 0 | goto out; |
2755 | 0 | } |
2756 | 0 | } else { |
2757 | 0 | struct ref_update *parent_update; |
2758 | | |
2759 | | /* |
2760 | | * Even if the ref is a regular ref, if `old_target` is set, we |
2761 | | * fail with an error. |
2762 | | */ |
2763 | 0 | if (update->old_target) { |
2764 | 0 | strbuf_addf(err, _("cannot lock ref '%s': " |
2765 | 0 | "expected symref with target '%s': " |
2766 | 0 | "but is a regular ref"), |
2767 | 0 | ref_update_original_update_refname(update), |
2768 | 0 | update->old_target); |
2769 | 0 | ret = REF_TRANSACTION_ERROR_EXPECTED_SYMREF; |
2770 | 0 | goto out; |
2771 | 0 | } else { |
2772 | 0 | ret = check_old_oid(update, &lock->old_oid, |
2773 | 0 | &referent, err); |
2774 | 0 | if (ret) { |
2775 | 0 | goto out; |
2776 | 0 | } |
2777 | 0 | } |
2778 | | |
2779 | | /* |
2780 | | * If this update is happening indirectly because of a |
2781 | | * symref update, record the old OID in the parent |
2782 | | * update: |
2783 | | */ |
2784 | 0 | for (parent_update = update->parent_update; |
2785 | 0 | parent_update; |
2786 | 0 | parent_update = parent_update->parent_update) { |
2787 | 0 | struct ref_lock *parent_lock = parent_update->backend_data; |
2788 | 0 | oidcpy(&parent_lock->old_oid, &lock->old_oid); |
2789 | 0 | } |
2790 | 0 | } |
2791 | | |
2792 | 0 | if (update->new_target && !(update->flags & REF_LOG_ONLY)) { |
2793 | 0 | if (create_symref_lock(lock, update->new_target, err)) { |
2794 | 0 | ret = REF_TRANSACTION_ERROR_GENERIC; |
2795 | 0 | goto out; |
2796 | 0 | } |
2797 | | |
2798 | 0 | if (close_ref_gently(lock)) { |
2799 | 0 | strbuf_addf(err, "couldn't close '%s.lock'", |
2800 | 0 | update->refname); |
2801 | 0 | ret = REF_TRANSACTION_ERROR_GENERIC; |
2802 | 0 | goto out; |
2803 | 0 | } |
2804 | | |
2805 | | /* |
2806 | | * Once we have created the symref lock, the commit |
2807 | | * phase of the transaction only needs to commit the lock. |
2808 | | */ |
2809 | 0 | update->flags |= REF_NEEDS_COMMIT; |
2810 | 0 | } else if ((update->flags & REF_HAVE_NEW) && |
2811 | 0 | !(update->flags & REF_DELETING) && |
2812 | 0 | !(update->flags & REF_LOG_ONLY)) { |
2813 | 0 | if (!(update->type & REF_ISSYMREF) && |
2814 | 0 | oideq(&lock->old_oid, &update->new_oid)) { |
2815 | | /* |
2816 | | * The reference already has the desired |
2817 | | * value, so we don't need to write it. |
2818 | | */ |
2819 | 0 | } else { |
2820 | 0 | ret = write_ref_to_lockfile( |
2821 | 0 | refs, lock, &update->new_oid, |
2822 | 0 | update->flags & REF_SKIP_OID_VERIFICATION, |
2823 | 0 | err); |
2824 | 0 | if (ret) { |
2825 | 0 | char *write_err = strbuf_detach(err, NULL); |
2826 | | |
2827 | | /* |
2828 | | * The lock was freed upon failure of |
2829 | | * write_ref_to_lockfile(): |
2830 | | */ |
2831 | 0 | update->backend_data = NULL; |
2832 | 0 | strbuf_addf(err, |
2833 | 0 | "cannot update ref '%s': %s", |
2834 | 0 | update->refname, write_err); |
2835 | 0 | free(write_err); |
2836 | 0 | goto out; |
2837 | 0 | } else { |
2838 | 0 | update->flags |= REF_NEEDS_COMMIT; |
2839 | 0 | } |
2840 | 0 | } |
2841 | 0 | } |
2842 | 0 | if (!(update->flags & REF_NEEDS_COMMIT)) { |
2843 | | /* |
2844 | | * We didn't call write_ref_to_lockfile(), so |
2845 | | * the lockfile is still open. Close it to |
2846 | | * free up the file descriptor: |
2847 | | */ |
2848 | 0 | if (close_ref_gently(lock)) { |
2849 | 0 | strbuf_addf(err, "couldn't close '%s.lock'", |
2850 | 0 | update->refname); |
2851 | 0 | ret = REF_TRANSACTION_ERROR_GENERIC; |
2852 | 0 | goto out; |
2853 | 0 | } |
2854 | 0 | } |
2855 | | |
2856 | 0 | out: |
2857 | 0 | strbuf_release(&referent); |
2858 | 0 | return ret; |
2859 | 0 | } |
2860 | | |
2861 | | /* |
2862 | | * Unlock any references in `transaction` that are still locked, and |
2863 | | * mark the transaction closed. |
2864 | | */ |
2865 | | static void files_transaction_cleanup(struct files_ref_store *refs, |
2866 | | struct ref_transaction *transaction) |
2867 | 0 | { |
2868 | 0 | size_t i; |
2869 | 0 | struct files_transaction_backend_data *backend_data = |
2870 | 0 | transaction->backend_data; |
2871 | 0 | struct strbuf err = STRBUF_INIT; |
2872 | |
|
2873 | 0 | for (i = 0; i < transaction->nr; i++) { |
2874 | 0 | struct ref_update *update = transaction->updates[i]; |
2875 | 0 | struct ref_lock *lock = update->backend_data; |
2876 | |
|
2877 | 0 | if (lock) { |
2878 | 0 | unlock_ref(lock); |
2879 | 0 | try_remove_empty_parents(refs, update->refname, |
2880 | 0 | REMOVE_EMPTY_PARENTS_REF); |
2881 | 0 | update->backend_data = NULL; |
2882 | 0 | } |
2883 | 0 | } |
2884 | |
|
2885 | 0 | if (backend_data) { |
2886 | 0 | if (backend_data->packed_transaction && |
2887 | 0 | ref_transaction_abort(backend_data->packed_transaction, &err)) { |
2888 | 0 | error("error aborting transaction: %s", err.buf); |
2889 | 0 | strbuf_release(&err); |
2890 | 0 | } |
2891 | |
|
2892 | 0 | if (backend_data->packed_refs_locked) |
2893 | 0 | packed_refs_unlock(refs->packed_ref_store); |
2894 | |
|
2895 | 0 | strmap_clear(&backend_data->ref_locks, 0); |
2896 | |
|
2897 | 0 | free(backend_data); |
2898 | 0 | } |
2899 | |
|
2900 | 0 | transaction->state = REF_TRANSACTION_CLOSED; |
2901 | 0 | } |
2902 | | |
2903 | | static int files_transaction_prepare(struct ref_store *ref_store, |
2904 | | struct ref_transaction *transaction, |
2905 | | struct strbuf *err) |
2906 | 0 | { |
2907 | 0 | struct files_ref_store *refs = |
2908 | 0 | files_downcast(ref_store, REF_STORE_WRITE, |
2909 | 0 | "ref_transaction_prepare"); |
2910 | 0 | size_t i; |
2911 | 0 | int ret = 0; |
2912 | 0 | struct string_list refnames_to_check = STRING_LIST_INIT_DUP; |
2913 | 0 | char *head_ref = NULL; |
2914 | 0 | int head_type; |
2915 | 0 | struct files_transaction_backend_data *backend_data; |
2916 | 0 | struct ref_transaction *packed_transaction = NULL; |
2917 | |
|
2918 | 0 | assert(err); |
2919 | |
|
2920 | 0 | if (transaction->flags & REF_TRANSACTION_FLAG_INITIAL) |
2921 | 0 | goto cleanup; |
2922 | 0 | if (!transaction->nr) |
2923 | 0 | goto cleanup; |
2924 | | |
2925 | 0 | CALLOC_ARRAY(backend_data, 1); |
2926 | 0 | strmap_init(&backend_data->ref_locks); |
2927 | 0 | transaction->backend_data = backend_data; |
2928 | | |
2929 | | /* |
2930 | | * Fail if any of the updates use REF_IS_PRUNING without REF_NO_DEREF. |
2931 | | */ |
2932 | 0 | for (i = 0; i < transaction->nr; i++) { |
2933 | 0 | struct ref_update *update = transaction->updates[i]; |
2934 | |
|
2935 | 0 | if ((update->flags & REF_IS_PRUNING) && |
2936 | 0 | !(update->flags & REF_NO_DEREF)) |
2937 | 0 | BUG("REF_IS_PRUNING set without REF_NO_DEREF"); |
2938 | 0 | } |
2939 | | |
2940 | | /* |
2941 | | * Special hack: If a branch is updated directly and HEAD |
2942 | | * points to it (may happen on the remote side of a push |
2943 | | * for example) then logically the HEAD reflog should be |
2944 | | * updated too. |
2945 | | * |
2946 | | * A generic solution would require reverse symref lookups, |
2947 | | * but finding all symrefs pointing to a given branch would be |
2948 | | * rather costly for this rare event (the direct update of a |
2949 | | * branch) to be worth it. So let's cheat and check with HEAD |
2950 | | * only, which should cover 99% of all usage scenarios (even |
2951 | | * 100% of the default ones). |
2952 | | * |
2953 | | * So if HEAD is a symbolic reference, then record the name of |
2954 | | * the reference that it points to. If we see an update of |
2955 | | * head_ref within the transaction, then split_head_update() |
2956 | | * arranges for the reflog of HEAD to be updated, too. |
2957 | | */ |
2958 | 0 | head_ref = refs_resolve_refdup(ref_store, "HEAD", |
2959 | 0 | RESOLVE_REF_NO_RECURSE, |
2960 | 0 | NULL, &head_type); |
2961 | |
|
2962 | 0 | if (head_ref && !(head_type & REF_ISSYMREF)) { |
2963 | 0 | FREE_AND_NULL(head_ref); |
2964 | 0 | } |
2965 | | |
2966 | | /* |
2967 | | * Acquire all locks, verify old values if provided, check |
2968 | | * that new values are valid, and write new values to the |
2969 | | * lockfiles, ready to be activated. Only keep one lockfile |
2970 | | * open at a time to avoid running out of file descriptors. |
2971 | | * Note that lock_ref_for_update() might append more updates |
2972 | | * to the transaction. |
2973 | | */ |
2974 | 0 | for (i = 0; i < transaction->nr; i++) { |
2975 | 0 | struct ref_update *update = transaction->updates[i]; |
2976 | |
|
2977 | 0 | ret = lock_ref_for_update(refs, update, i, transaction, |
2978 | 0 | head_ref, &refnames_to_check, |
2979 | 0 | err); |
2980 | 0 | if (ret) { |
2981 | 0 | if (ref_transaction_maybe_set_rejected(transaction, i, |
2982 | 0 | ret, err)) { |
2983 | 0 | ret = 0; |
2984 | 0 | continue; |
2985 | 0 | } |
2986 | 0 | goto cleanup; |
2987 | 0 | } |
2988 | | |
2989 | 0 | if (update->flags & REF_DELETING && |
2990 | 0 | !(update->flags & REF_LOG_ONLY) && |
2991 | 0 | !(update->flags & REF_IS_PRUNING)) { |
2992 | | /* |
2993 | | * This reference has to be deleted from |
2994 | | * packed-refs if it exists there. |
2995 | | */ |
2996 | 0 | if (!packed_transaction) { |
2997 | 0 | packed_transaction = ref_store_transaction_begin( |
2998 | 0 | refs->packed_ref_store, |
2999 | 0 | transaction->flags, err); |
3000 | 0 | if (!packed_transaction) { |
3001 | 0 | ret = REF_TRANSACTION_ERROR_GENERIC; |
3002 | 0 | goto cleanup; |
3003 | 0 | } |
3004 | | |
3005 | 0 | backend_data->packed_transaction = |
3006 | 0 | packed_transaction; |
3007 | 0 | } |
3008 | | |
3009 | 0 | ref_transaction_add_update( |
3010 | 0 | packed_transaction, update->refname, |
3011 | 0 | REF_HAVE_NEW | REF_NO_DEREF, |
3012 | 0 | &update->new_oid, NULL, |
3013 | 0 | NULL, NULL, NULL, NULL); |
3014 | 0 | } |
3015 | 0 | } |
3016 | | |
3017 | | /* |
3018 | | * Verify that none of the loose reference that we're about to write |
3019 | | * conflict with any existing packed references. Ideally, we'd do this |
3020 | | * check after the packed-refs are locked so that the file cannot |
3021 | | * change underneath our feet. But introducing such a lock now would |
3022 | | * probably do more harm than good as users rely on there not being a |
3023 | | * global lock with the "files" backend. |
3024 | | * |
3025 | | * Another alternative would be to do the check after the (optional) |
3026 | | * lock, but that would extend the time we spend in the globally-locked |
3027 | | * state. |
3028 | | * |
3029 | | * So instead, we accept the race for now. |
3030 | | */ |
3031 | 0 | if (refs_verify_refnames_available(refs->packed_ref_store, &refnames_to_check, |
3032 | 0 | &transaction->refnames, NULL, transaction, |
3033 | 0 | 0, err)) { |
3034 | 0 | ret = REF_TRANSACTION_ERROR_NAME_CONFLICT; |
3035 | 0 | goto cleanup; |
3036 | 0 | } |
3037 | | |
3038 | 0 | if (packed_transaction) { |
3039 | 0 | if (packed_refs_lock(refs->packed_ref_store, 0, err)) { |
3040 | 0 | ret = REF_TRANSACTION_ERROR_GENERIC; |
3041 | 0 | goto cleanup; |
3042 | 0 | } |
3043 | 0 | backend_data->packed_refs_locked = 1; |
3044 | |
|
3045 | 0 | if (is_packed_transaction_needed(refs->packed_ref_store, |
3046 | 0 | packed_transaction)) { |
3047 | 0 | ret = ref_transaction_prepare(packed_transaction, err); |
3048 | | /* |
3049 | | * A failure during the prepare step will abort |
3050 | | * itself, but not free. Do that now, and disconnect |
3051 | | * from the files_transaction so it does not try to |
3052 | | * abort us when we hit the cleanup code below. |
3053 | | */ |
3054 | 0 | if (ret) { |
3055 | 0 | ref_transaction_free(packed_transaction); |
3056 | 0 | backend_data->packed_transaction = NULL; |
3057 | 0 | } |
3058 | 0 | } else { |
3059 | | /* |
3060 | | * We can skip rewriting the `packed-refs` |
3061 | | * file. But we do need to leave it locked, so |
3062 | | * that somebody else doesn't pack a reference |
3063 | | * that we are trying to delete. |
3064 | | * |
3065 | | * We need to disconnect our transaction from |
3066 | | * backend_data, since the abort (whether successful or |
3067 | | * not) will free it. |
3068 | | */ |
3069 | 0 | backend_data->packed_transaction = NULL; |
3070 | 0 | if (ref_transaction_abort(packed_transaction, err)) { |
3071 | 0 | ret = REF_TRANSACTION_ERROR_GENERIC; |
3072 | 0 | goto cleanup; |
3073 | 0 | } |
3074 | 0 | } |
3075 | 0 | } |
3076 | | |
3077 | 0 | cleanup: |
3078 | 0 | free(head_ref); |
3079 | 0 | string_list_clear(&refnames_to_check, 1); |
3080 | |
|
3081 | 0 | if (ret) |
3082 | 0 | files_transaction_cleanup(refs, transaction); |
3083 | 0 | else |
3084 | 0 | transaction->state = REF_TRANSACTION_PREPARED; |
3085 | |
|
3086 | 0 | return ret; |
3087 | 0 | } |
3088 | | |
3089 | | static int parse_and_write_reflog(struct files_ref_store *refs, |
3090 | | struct ref_update *update, |
3091 | | struct ref_lock *lock, |
3092 | | struct strbuf *err) |
3093 | 0 | { |
3094 | 0 | struct object_id *old_oid = &lock->old_oid; |
3095 | |
|
3096 | 0 | if (update->flags & REF_LOG_USE_PROVIDED_OIDS) { |
3097 | 0 | if (!(update->flags & REF_HAVE_OLD) || |
3098 | 0 | !(update->flags & REF_HAVE_NEW) || |
3099 | 0 | !(update->flags & REF_LOG_ONLY)) { |
3100 | 0 | strbuf_addf(err, _("trying to write reflog for '%s' " |
3101 | 0 | "with incomplete values"), update->refname); |
3102 | 0 | return REF_TRANSACTION_ERROR_GENERIC; |
3103 | 0 | } |
3104 | | |
3105 | 0 | old_oid = &update->old_oid; |
3106 | 0 | } |
3107 | | |
3108 | 0 | if (update->new_target) { |
3109 | | /* |
3110 | | * We want to get the resolved OID for the target, to ensure |
3111 | | * that the correct value is added to the reflog. |
3112 | | */ |
3113 | 0 | if (!refs_resolve_ref_unsafe(&refs->base, update->new_target, |
3114 | 0 | RESOLVE_REF_READING, |
3115 | 0 | &update->new_oid, NULL)) { |
3116 | | /* |
3117 | | * TODO: currently we skip creating reflogs for dangling |
3118 | | * symref updates. It would be nice to capture this as |
3119 | | * zero oid updates however. |
3120 | | */ |
3121 | 0 | return 0; |
3122 | 0 | } |
3123 | 0 | } |
3124 | | |
3125 | 0 | if (files_log_ref_write(refs, lock->ref_name, old_oid, |
3126 | 0 | &update->new_oid, update->committer_info, |
3127 | 0 | update->msg, update->flags, err)) { |
3128 | 0 | char *old_msg = strbuf_detach(err, NULL); |
3129 | |
|
3130 | 0 | strbuf_addf(err, "cannot update the ref '%s': %s", |
3131 | 0 | lock->ref_name, old_msg); |
3132 | 0 | free(old_msg); |
3133 | 0 | unlock_ref(lock); |
3134 | 0 | update->backend_data = NULL; |
3135 | 0 | return -1; |
3136 | 0 | } |
3137 | | |
3138 | 0 | return 0; |
3139 | 0 | } |
3140 | | |
3141 | | static int ref_present(const struct reference *ref, void *cb_data) |
3142 | 0 | { |
3143 | 0 | struct string_list *affected_refnames = cb_data; |
3144 | |
|
3145 | 0 | return string_list_has_string(affected_refnames, ref->name); |
3146 | 0 | } |
3147 | | |
3148 | | static int files_transaction_finish_initial(struct files_ref_store *refs, |
3149 | | struct ref_transaction *transaction, |
3150 | | struct strbuf *err) |
3151 | 0 | { |
3152 | 0 | size_t i; |
3153 | 0 | int ret = 0; |
3154 | 0 | struct string_list affected_refnames = STRING_LIST_INIT_NODUP; |
3155 | 0 | struct string_list refnames_to_check = STRING_LIST_INIT_NODUP; |
3156 | 0 | struct ref_transaction *packed_transaction = NULL; |
3157 | 0 | struct ref_transaction *loose_transaction = NULL; |
3158 | |
|
3159 | 0 | assert(err); |
3160 | |
|
3161 | 0 | if (transaction->state != REF_TRANSACTION_PREPARED) |
3162 | 0 | BUG("commit called for transaction that is not prepared"); |
3163 | | |
3164 | | /* |
3165 | | * It's really undefined to call this function in an active |
3166 | | * repository or when there are existing references: we are |
3167 | | * only locking and changing packed-refs, so (1) any |
3168 | | * simultaneous processes might try to change a reference at |
3169 | | * the same time we do, and (2) any existing loose versions of |
3170 | | * the references that we are setting would have precedence |
3171 | | * over our values. But some remote helpers create the remote |
3172 | | * "HEAD" and "master" branches before calling this function, |
3173 | | * so here we really only check that none of the references |
3174 | | * that we are creating already exists. |
3175 | | */ |
3176 | 0 | if (refs_for_each_rawref(&refs->base, ref_present, |
3177 | 0 | &transaction->refnames)) |
3178 | 0 | BUG("initial ref transaction called with existing refs"); |
3179 | | |
3180 | 0 | packed_transaction = ref_store_transaction_begin(refs->packed_ref_store, |
3181 | 0 | transaction->flags, err); |
3182 | 0 | if (!packed_transaction) { |
3183 | 0 | ret = REF_TRANSACTION_ERROR_GENERIC; |
3184 | 0 | goto cleanup; |
3185 | 0 | } |
3186 | | |
3187 | 0 | for (i = 0; i < transaction->nr; i++) { |
3188 | 0 | struct ref_update *update = transaction->updates[i]; |
3189 | |
|
3190 | 0 | if (!(update->flags & REF_LOG_ONLY) && |
3191 | 0 | (update->flags & REF_HAVE_OLD) && |
3192 | 0 | !is_null_oid(&update->old_oid)) |
3193 | 0 | BUG("initial ref transaction with old_sha1 set"); |
3194 | | |
3195 | 0 | string_list_append(&refnames_to_check, update->refname); |
3196 | | |
3197 | | /* |
3198 | | * packed-refs don't support symbolic refs, root refs and reflogs, |
3199 | | * so we have to queue these references via the loose transaction. |
3200 | | */ |
3201 | 0 | if (update->new_target || |
3202 | 0 | is_root_ref(update->refname) || |
3203 | 0 | (update->flags & REF_LOG_ONLY)) { |
3204 | 0 | if (!loose_transaction) { |
3205 | 0 | loose_transaction = ref_store_transaction_begin(&refs->base, 0, err); |
3206 | 0 | if (!loose_transaction) { |
3207 | 0 | ret = REF_TRANSACTION_ERROR_GENERIC; |
3208 | 0 | goto cleanup; |
3209 | 0 | } |
3210 | 0 | } |
3211 | | |
3212 | 0 | if (update->flags & REF_LOG_ONLY) |
3213 | 0 | ref_transaction_add_update(loose_transaction, update->refname, |
3214 | 0 | update->flags, &update->new_oid, |
3215 | 0 | &update->old_oid, NULL, NULL, |
3216 | 0 | update->committer_info, update->msg); |
3217 | 0 | else |
3218 | 0 | ref_transaction_add_update(loose_transaction, update->refname, |
3219 | 0 | update->flags & ~REF_HAVE_OLD, |
3220 | 0 | update->new_target ? NULL : &update->new_oid, NULL, |
3221 | 0 | update->new_target, NULL, update->committer_info, |
3222 | 0 | NULL); |
3223 | 0 | } else { |
3224 | 0 | ref_transaction_add_update(packed_transaction, update->refname, |
3225 | 0 | update->flags & ~REF_HAVE_OLD, |
3226 | 0 | &update->new_oid, &update->old_oid, |
3227 | 0 | NULL, NULL, update->committer_info, NULL); |
3228 | 0 | } |
3229 | 0 | } |
3230 | | |
3231 | 0 | if (packed_refs_lock(refs->packed_ref_store, 0, err)) { |
3232 | 0 | ret = REF_TRANSACTION_ERROR_GENERIC; |
3233 | 0 | goto cleanup; |
3234 | 0 | } |
3235 | | |
3236 | 0 | if (refs_verify_refnames_available(&refs->base, &refnames_to_check, |
3237 | 0 | &affected_refnames, NULL, transaction, |
3238 | 0 | 1, err)) { |
3239 | 0 | packed_refs_unlock(refs->packed_ref_store); |
3240 | 0 | ret = REF_TRANSACTION_ERROR_NAME_CONFLICT; |
3241 | 0 | goto cleanup; |
3242 | 0 | } |
3243 | | |
3244 | 0 | if (ref_transaction_commit(packed_transaction, err)) { |
3245 | 0 | ret = REF_TRANSACTION_ERROR_GENERIC; |
3246 | 0 | goto cleanup; |
3247 | 0 | } |
3248 | 0 | packed_refs_unlock(refs->packed_ref_store); |
3249 | |
|
3250 | 0 | if (loose_transaction) { |
3251 | 0 | if (ref_transaction_prepare(loose_transaction, err) || |
3252 | 0 | ref_transaction_commit(loose_transaction, err)) { |
3253 | 0 | ret = REF_TRANSACTION_ERROR_GENERIC; |
3254 | 0 | goto cleanup; |
3255 | 0 | } |
3256 | 0 | } |
3257 | | |
3258 | 0 | cleanup: |
3259 | 0 | if (loose_transaction) |
3260 | 0 | ref_transaction_free(loose_transaction); |
3261 | 0 | if (packed_transaction) |
3262 | 0 | ref_transaction_free(packed_transaction); |
3263 | 0 | transaction->state = REF_TRANSACTION_CLOSED; |
3264 | 0 | string_list_clear(&affected_refnames, 0); |
3265 | 0 | string_list_clear(&refnames_to_check, 0); |
3266 | 0 | return ret; |
3267 | 0 | } |
3268 | | |
3269 | | static int files_transaction_finish(struct ref_store *ref_store, |
3270 | | struct ref_transaction *transaction, |
3271 | | struct strbuf *err) |
3272 | 0 | { |
3273 | 0 | struct files_ref_store *refs = |
3274 | 0 | files_downcast(ref_store, 0, "ref_transaction_finish"); |
3275 | 0 | size_t i; |
3276 | 0 | int ret = 0; |
3277 | 0 | struct strbuf sb = STRBUF_INIT; |
3278 | 0 | struct files_transaction_backend_data *backend_data; |
3279 | 0 | struct ref_transaction *packed_transaction; |
3280 | | |
3281 | |
|
3282 | 0 | assert(err); |
3283 | |
|
3284 | 0 | if (transaction->flags & REF_TRANSACTION_FLAG_INITIAL) |
3285 | 0 | return files_transaction_finish_initial(refs, transaction, err); |
3286 | 0 | if (!transaction->nr) { |
3287 | 0 | transaction->state = REF_TRANSACTION_CLOSED; |
3288 | 0 | return 0; |
3289 | 0 | } |
3290 | | |
3291 | 0 | backend_data = transaction->backend_data; |
3292 | 0 | packed_transaction = backend_data->packed_transaction; |
3293 | | |
3294 | | /* Perform updates first so live commits remain referenced */ |
3295 | 0 | for (i = 0; i < transaction->nr; i++) { |
3296 | 0 | struct ref_update *update = transaction->updates[i]; |
3297 | 0 | struct ref_lock *lock = update->backend_data; |
3298 | |
|
3299 | 0 | if (update->rejection_err) |
3300 | 0 | continue; |
3301 | | |
3302 | 0 | if (update->flags & REF_NEEDS_COMMIT || |
3303 | 0 | update->flags & REF_LOG_ONLY) { |
3304 | 0 | if (parse_and_write_reflog(refs, update, lock, err)) { |
3305 | 0 | ret = REF_TRANSACTION_ERROR_GENERIC; |
3306 | 0 | goto cleanup; |
3307 | 0 | } |
3308 | 0 | } |
3309 | | |
3310 | | /* |
3311 | | * We try creating a symlink, if that succeeds we continue to the |
3312 | | * next update. If not, we try and create a regular symref. |
3313 | | */ |
3314 | 0 | if (update->new_target && refs->prefer_symlink_refs) |
3315 | | /* |
3316 | | * By using the `NOT_CONSTANT()` trick, we can avoid |
3317 | | * errors by `clang`'s `-Wunreachable` logic that would |
3318 | | * report that the `continue` statement is not reachable |
3319 | | * when `NO_SYMLINK_HEAD` is `#define`d. |
3320 | | */ |
3321 | 0 | if (NOT_CONSTANT(!create_ref_symlink(lock, update->new_target))) |
3322 | 0 | continue; |
3323 | | |
3324 | 0 | if (update->flags & REF_NEEDS_COMMIT) { |
3325 | 0 | clear_loose_ref_cache(refs); |
3326 | 0 | if (commit_ref(lock)) { |
3327 | 0 | strbuf_addf(err, "couldn't set '%s'", lock->ref_name); |
3328 | 0 | unlock_ref(lock); |
3329 | 0 | update->backend_data = NULL; |
3330 | 0 | ret = REF_TRANSACTION_ERROR_GENERIC; |
3331 | 0 | goto cleanup; |
3332 | 0 | } |
3333 | 0 | } |
3334 | 0 | } |
3335 | | |
3336 | | /* |
3337 | | * Now that updates are safely completed, we can perform |
3338 | | * deletes. First delete the reflogs of any references that |
3339 | | * will be deleted, since (in the unexpected event of an |
3340 | | * error) leaving a reference without a reflog is less bad |
3341 | | * than leaving a reflog without a reference (the latter is a |
3342 | | * mildly invalid repository state): |
3343 | | */ |
3344 | 0 | for (i = 0; i < transaction->nr; i++) { |
3345 | 0 | struct ref_update *update = transaction->updates[i]; |
3346 | |
|
3347 | 0 | if (update->rejection_err) |
3348 | 0 | continue; |
3349 | | |
3350 | 0 | if (update->flags & REF_DELETING && |
3351 | 0 | !(update->flags & REF_LOG_ONLY) && |
3352 | 0 | !(update->flags & REF_IS_PRUNING)) { |
3353 | 0 | strbuf_reset(&sb); |
3354 | 0 | files_reflog_path(refs, &sb, update->refname); |
3355 | 0 | if (!unlink_or_warn(sb.buf)) |
3356 | 0 | try_remove_empty_parents(refs, update->refname, |
3357 | 0 | REMOVE_EMPTY_PARENTS_REFLOG); |
3358 | 0 | } |
3359 | 0 | } |
3360 | | |
3361 | | /* |
3362 | | * Perform deletes now that updates are safely completed. |
3363 | | * |
3364 | | * First delete any packed versions of the references, while |
3365 | | * retaining the packed-refs lock: |
3366 | | */ |
3367 | 0 | if (packed_transaction) { |
3368 | 0 | ret = ref_transaction_commit(packed_transaction, err); |
3369 | 0 | ref_transaction_free(packed_transaction); |
3370 | 0 | packed_transaction = NULL; |
3371 | 0 | backend_data->packed_transaction = NULL; |
3372 | 0 | if (ret) |
3373 | 0 | goto cleanup; |
3374 | 0 | } |
3375 | | |
3376 | | /* Now delete the loose versions of the references: */ |
3377 | 0 | for (i = 0; i < transaction->nr; i++) { |
3378 | 0 | struct ref_update *update = transaction->updates[i]; |
3379 | 0 | struct ref_lock *lock = update->backend_data; |
3380 | |
|
3381 | 0 | if (update->rejection_err) |
3382 | 0 | continue; |
3383 | | |
3384 | 0 | if (update->flags & REF_DELETING && |
3385 | 0 | !(update->flags & REF_LOG_ONLY)) { |
3386 | 0 | update->flags |= REF_DELETED_RMDIR; |
3387 | 0 | if (!(update->type & REF_ISPACKED) || |
3388 | 0 | update->type & REF_ISSYMREF) { |
3389 | | /* It is a loose reference. */ |
3390 | 0 | strbuf_reset(&sb); |
3391 | 0 | files_ref_path(refs, &sb, lock->ref_name); |
3392 | 0 | if (unlink_or_msg(sb.buf, err)) { |
3393 | 0 | ret = REF_TRANSACTION_ERROR_GENERIC; |
3394 | 0 | goto cleanup; |
3395 | 0 | } |
3396 | 0 | } |
3397 | 0 | } |
3398 | 0 | } |
3399 | | |
3400 | 0 | clear_loose_ref_cache(refs); |
3401 | |
|
3402 | 0 | cleanup: |
3403 | 0 | files_transaction_cleanup(refs, transaction); |
3404 | |
|
3405 | 0 | for (i = 0; i < transaction->nr; i++) { |
3406 | 0 | struct ref_update *update = transaction->updates[i]; |
3407 | |
|
3408 | 0 | if (update->flags & REF_DELETED_RMDIR) { |
3409 | | /* |
3410 | | * The reference was deleted. Delete any |
3411 | | * empty parent directories. (Note that this |
3412 | | * can only work because we have already |
3413 | | * removed the lockfile.) |
3414 | | */ |
3415 | 0 | try_remove_empty_parents(refs, update->refname, |
3416 | 0 | REMOVE_EMPTY_PARENTS_REF); |
3417 | 0 | } |
3418 | 0 | } |
3419 | |
|
3420 | 0 | strbuf_release(&sb); |
3421 | 0 | return ret; |
3422 | 0 | } |
3423 | | |
3424 | | static int files_transaction_abort(struct ref_store *ref_store, |
3425 | | struct ref_transaction *transaction, |
3426 | | struct strbuf *err UNUSED) |
3427 | 0 | { |
3428 | 0 | struct files_ref_store *refs = |
3429 | 0 | files_downcast(ref_store, 0, "ref_transaction_abort"); |
3430 | |
|
3431 | 0 | files_transaction_cleanup(refs, transaction); |
3432 | 0 | return 0; |
3433 | 0 | } |
3434 | | |
3435 | | struct expire_reflog_cb { |
3436 | | reflog_expiry_should_prune_fn *should_prune_fn; |
3437 | | void *policy_cb; |
3438 | | FILE *newlog; |
3439 | | struct object_id last_kept_oid; |
3440 | | unsigned int rewrite:1, |
3441 | | dry_run:1; |
3442 | | }; |
3443 | | |
3444 | | static int expire_reflog_ent(const char *refname UNUSED, |
3445 | | struct object_id *ooid, struct object_id *noid, |
3446 | | const char *email, timestamp_t timestamp, int tz, |
3447 | | const char *message, void *cb_data) |
3448 | 0 | { |
3449 | 0 | struct expire_reflog_cb *cb = cb_data; |
3450 | 0 | reflog_expiry_should_prune_fn *fn = cb->should_prune_fn; |
3451 | |
|
3452 | 0 | if (cb->rewrite) |
3453 | 0 | ooid = &cb->last_kept_oid; |
3454 | |
|
3455 | 0 | if (fn(ooid, noid, email, timestamp, tz, message, cb->policy_cb)) |
3456 | 0 | return 0; |
3457 | | |
3458 | 0 | if (cb->dry_run) |
3459 | 0 | return 0; /* --dry-run */ |
3460 | | |
3461 | 0 | fprintf(cb->newlog, "%s %s %s %"PRItime" %+05d\t%s", oid_to_hex(ooid), |
3462 | 0 | oid_to_hex(noid), email, timestamp, tz, message); |
3463 | 0 | oidcpy(&cb->last_kept_oid, noid); |
3464 | |
|
3465 | 0 | return 0; |
3466 | 0 | } |
3467 | | |
3468 | | static int files_reflog_expire(struct ref_store *ref_store, |
3469 | | const char *refname, |
3470 | | unsigned int expire_flags, |
3471 | | reflog_expiry_prepare_fn prepare_fn, |
3472 | | reflog_expiry_should_prune_fn should_prune_fn, |
3473 | | reflog_expiry_cleanup_fn cleanup_fn, |
3474 | | void *policy_cb_data) |
3475 | 0 | { |
3476 | 0 | struct files_ref_store *refs = |
3477 | 0 | files_downcast(ref_store, REF_STORE_WRITE, "reflog_expire"); |
3478 | 0 | struct lock_file reflog_lock = LOCK_INIT; |
3479 | 0 | struct expire_reflog_cb cb; |
3480 | 0 | struct ref_lock *lock; |
3481 | 0 | struct strbuf log_file_sb = STRBUF_INIT; |
3482 | 0 | char *log_file; |
3483 | 0 | int status = 0; |
3484 | 0 | struct strbuf err = STRBUF_INIT; |
3485 | 0 | const struct object_id *oid; |
3486 | |
|
3487 | 0 | memset(&cb, 0, sizeof(cb)); |
3488 | 0 | cb.rewrite = !!(expire_flags & EXPIRE_REFLOGS_REWRITE); |
3489 | 0 | cb.dry_run = !!(expire_flags & EXPIRE_REFLOGS_DRY_RUN); |
3490 | 0 | cb.policy_cb = policy_cb_data; |
3491 | 0 | cb.should_prune_fn = should_prune_fn; |
3492 | | |
3493 | | /* |
3494 | | * The reflog file is locked by holding the lock on the |
3495 | | * reference itself, plus we might need to update the |
3496 | | * reference if --updateref was specified: |
3497 | | */ |
3498 | 0 | lock = lock_ref_oid_basic(refs, refname, &err); |
3499 | 0 | if (!lock) { |
3500 | 0 | error("cannot lock ref '%s': %s", refname, err.buf); |
3501 | 0 | strbuf_release(&err); |
3502 | 0 | return -1; |
3503 | 0 | } |
3504 | 0 | oid = &lock->old_oid; |
3505 | | |
3506 | | /* |
3507 | | * When refs are deleted, their reflog is deleted before the |
3508 | | * ref itself is deleted. This is because there is no separate |
3509 | | * lock for reflog; instead we take a lock on the ref with |
3510 | | * lock_ref_oid_basic(). |
3511 | | * |
3512 | | * If a race happens and the reflog doesn't exist after we've |
3513 | | * acquired the lock that's OK. We've got nothing more to do; |
3514 | | * We were asked to delete the reflog, but someone else |
3515 | | * deleted it! The caller doesn't care that we deleted it, |
3516 | | * just that it is deleted. So we can return successfully. |
3517 | | */ |
3518 | 0 | if (!refs_reflog_exists(ref_store, refname)) { |
3519 | 0 | unlock_ref(lock); |
3520 | 0 | return 0; |
3521 | 0 | } |
3522 | | |
3523 | 0 | files_reflog_path(refs, &log_file_sb, refname); |
3524 | 0 | log_file = strbuf_detach(&log_file_sb, NULL); |
3525 | 0 | if (!cb.dry_run) { |
3526 | | /* |
3527 | | * Even though holding $GIT_DIR/logs/$reflog.lock has |
3528 | | * no locking implications, we use the lock_file |
3529 | | * machinery here anyway because it does a lot of the |
3530 | | * work we need, including cleaning up if the program |
3531 | | * exits unexpectedly. |
3532 | | */ |
3533 | 0 | if (hold_lock_file_for_update(&reflog_lock, log_file, 0) < 0) { |
3534 | 0 | struct strbuf err = STRBUF_INIT; |
3535 | 0 | unable_to_lock_message(log_file, errno, &err); |
3536 | 0 | error("%s", err.buf); |
3537 | 0 | strbuf_release(&err); |
3538 | 0 | goto failure; |
3539 | 0 | } |
3540 | 0 | cb.newlog = fdopen_lock_file(&reflog_lock, "w"); |
3541 | 0 | if (!cb.newlog) { |
3542 | 0 | error("cannot fdopen %s (%s)", |
3543 | 0 | get_lock_file_path(&reflog_lock), strerror(errno)); |
3544 | 0 | goto failure; |
3545 | 0 | } |
3546 | 0 | } |
3547 | | |
3548 | 0 | (*prepare_fn)(refname, oid, cb.policy_cb); |
3549 | 0 | refs_for_each_reflog_ent(ref_store, refname, expire_reflog_ent, &cb); |
3550 | 0 | (*cleanup_fn)(cb.policy_cb); |
3551 | |
|
3552 | 0 | if (!cb.dry_run) { |
3553 | | /* |
3554 | | * It doesn't make sense to adjust a reference pointed |
3555 | | * to by a symbolic ref based on expiring entries in |
3556 | | * the symbolic reference's reflog. Nor can we update |
3557 | | * a reference if there are no remaining reflog |
3558 | | * entries. |
3559 | | */ |
3560 | 0 | int update = 0; |
3561 | |
|
3562 | 0 | if ((expire_flags & EXPIRE_REFLOGS_UPDATE_REF) && |
3563 | 0 | !is_null_oid(&cb.last_kept_oid)) { |
3564 | 0 | int type; |
3565 | 0 | const char *ref; |
3566 | |
|
3567 | 0 | ref = refs_resolve_ref_unsafe(&refs->base, refname, |
3568 | 0 | RESOLVE_REF_NO_RECURSE, |
3569 | 0 | NULL, &type); |
3570 | 0 | update = !!(ref && !(type & REF_ISSYMREF)); |
3571 | 0 | } |
3572 | |
|
3573 | 0 | if (close_lock_file_gently(&reflog_lock)) { |
3574 | 0 | status |= error("couldn't write %s: %s", log_file, |
3575 | 0 | strerror(errno)); |
3576 | 0 | rollback_lock_file(&reflog_lock); |
3577 | 0 | } else if (update && |
3578 | 0 | (write_in_full(get_lock_file_fd(&lock->lk), |
3579 | 0 | oid_to_hex(&cb.last_kept_oid), refs->base.repo->hash_algo->hexsz) < 0 || |
3580 | 0 | write_str_in_full(get_lock_file_fd(&lock->lk), "\n") < 0 || |
3581 | 0 | close_ref_gently(lock) < 0)) { |
3582 | 0 | status |= error("couldn't write %s", |
3583 | 0 | get_lock_file_path(&lock->lk)); |
3584 | 0 | rollback_lock_file(&reflog_lock); |
3585 | 0 | } else if (commit_lock_file(&reflog_lock)) { |
3586 | 0 | status |= error("unable to write reflog '%s' (%s)", |
3587 | 0 | log_file, strerror(errno)); |
3588 | 0 | } else if (update && commit_ref(lock)) { |
3589 | 0 | status |= error("couldn't set %s", lock->ref_name); |
3590 | 0 | } |
3591 | 0 | } |
3592 | 0 | free(log_file); |
3593 | 0 | unlock_ref(lock); |
3594 | 0 | return status; |
3595 | | |
3596 | 0 | failure: |
3597 | 0 | rollback_lock_file(&reflog_lock); |
3598 | 0 | free(log_file); |
3599 | 0 | unlock_ref(lock); |
3600 | 0 | return -1; |
3601 | 0 | } |
3602 | | |
3603 | | static int files_ref_store_create_on_disk(struct ref_store *ref_store, |
3604 | | int flags, |
3605 | | struct strbuf *err UNUSED) |
3606 | 0 | { |
3607 | 0 | struct files_ref_store *refs = |
3608 | 0 | files_downcast(ref_store, REF_STORE_WRITE, "create"); |
3609 | 0 | struct strbuf sb = STRBUF_INIT; |
3610 | | |
3611 | | /* |
3612 | | * We need to create a "refs" dir in any case so that older versions of |
3613 | | * Git can tell that this is a repository. This serves two main purposes: |
3614 | | * |
3615 | | * - Clients will know to stop walking the parent-directory chain when |
3616 | | * detecting the Git repository. Otherwise they may end up detecting |
3617 | | * a Git repository in a parent directory instead. |
3618 | | * |
3619 | | * - Instead of failing to detect a repository with unknown reference |
3620 | | * format altogether, old clients will print an error saying that |
3621 | | * they do not understand the reference format extension. |
3622 | | */ |
3623 | 0 | strbuf_addf(&sb, "%s/refs", ref_store->gitdir); |
3624 | 0 | safe_create_dir(the_repository, sb.buf, 1); |
3625 | 0 | adjust_shared_perm(the_repository, sb.buf); |
3626 | | |
3627 | | /* |
3628 | | * There is no need to create directories for common refs when creating |
3629 | | * a worktree ref store. |
3630 | | */ |
3631 | 0 | if (!(flags & REF_STORE_CREATE_ON_DISK_IS_WORKTREE)) { |
3632 | | /* |
3633 | | * Create .git/refs/{heads,tags} |
3634 | | */ |
3635 | 0 | strbuf_reset(&sb); |
3636 | 0 | files_ref_path(refs, &sb, "refs/heads"); |
3637 | 0 | safe_create_dir(the_repository, sb.buf, 1); |
3638 | |
|
3639 | 0 | strbuf_reset(&sb); |
3640 | 0 | files_ref_path(refs, &sb, "refs/tags"); |
3641 | 0 | safe_create_dir(the_repository, sb.buf, 1); |
3642 | 0 | } |
3643 | |
|
3644 | 0 | strbuf_release(&sb); |
3645 | 0 | return 0; |
3646 | 0 | } |
3647 | | |
3648 | | struct remove_one_root_ref_data { |
3649 | | const char *gitdir; |
3650 | | struct strbuf *err; |
3651 | | }; |
3652 | | |
3653 | | static int remove_one_root_ref(const char *refname, |
3654 | | void *cb_data) |
3655 | 0 | { |
3656 | 0 | struct remove_one_root_ref_data *data = cb_data; |
3657 | 0 | struct strbuf buf = STRBUF_INIT; |
3658 | 0 | int ret = 0; |
3659 | |
|
3660 | 0 | strbuf_addf(&buf, "%s/%s", data->gitdir, refname); |
3661 | |
|
3662 | 0 | ret = unlink(buf.buf); |
3663 | 0 | if (ret < 0) |
3664 | 0 | strbuf_addf(data->err, "could not delete %s: %s\n", |
3665 | 0 | refname, strerror(errno)); |
3666 | |
|
3667 | 0 | strbuf_release(&buf); |
3668 | 0 | return ret; |
3669 | 0 | } |
3670 | | |
3671 | | static int files_ref_store_remove_on_disk(struct ref_store *ref_store, |
3672 | | struct strbuf *err) |
3673 | 0 | { |
3674 | 0 | struct files_ref_store *refs = |
3675 | 0 | files_downcast(ref_store, REF_STORE_WRITE, "remove"); |
3676 | 0 | struct remove_one_root_ref_data data = { |
3677 | 0 | .gitdir = refs->base.gitdir, |
3678 | 0 | .err = err, |
3679 | 0 | }; |
3680 | 0 | struct strbuf sb = STRBUF_INIT; |
3681 | 0 | int ret = 0; |
3682 | |
|
3683 | 0 | strbuf_addf(&sb, "%s/refs", refs->base.gitdir); |
3684 | 0 | if (remove_dir_recursively(&sb, 0) < 0) { |
3685 | 0 | strbuf_addf(err, "could not delete refs: %s", |
3686 | 0 | strerror(errno)); |
3687 | 0 | ret = -1; |
3688 | 0 | } |
3689 | 0 | strbuf_reset(&sb); |
3690 | |
|
3691 | 0 | strbuf_addf(&sb, "%s/logs", refs->base.gitdir); |
3692 | 0 | if (remove_dir_recursively(&sb, 0) < 0) { |
3693 | 0 | strbuf_addf(err, "could not delete logs: %s", |
3694 | 0 | strerror(errno)); |
3695 | 0 | ret = -1; |
3696 | 0 | } |
3697 | 0 | strbuf_reset(&sb); |
3698 | |
|
3699 | 0 | if (for_each_root_ref(refs, remove_one_root_ref, &data) < 0) |
3700 | 0 | ret = -1; |
3701 | |
|
3702 | 0 | if (ref_store_remove_on_disk(refs->packed_ref_store, err) < 0) |
3703 | 0 | ret = -1; |
3704 | |
|
3705 | 0 | strbuf_release(&sb); |
3706 | 0 | return ret; |
3707 | 0 | } |
3708 | | |
3709 | | /* |
3710 | | * For refs and reflogs, they share a unified interface when scanning |
3711 | | * the whole directory. This function is used as the callback for each |
3712 | | * regular file or symlink in the directory. |
3713 | | */ |
3714 | | typedef int (*files_fsck_refs_fn)(struct ref_store *ref_store, |
3715 | | struct fsck_options *o, |
3716 | | const char *refname, |
3717 | | const char *path, |
3718 | | int mode); |
3719 | | |
3720 | | static int files_fsck_symref_target(struct ref_store *ref_store, |
3721 | | struct fsck_options *o, |
3722 | | struct fsck_ref_report *report, |
3723 | | const char *refname, |
3724 | | struct strbuf *referent, |
3725 | | unsigned int symbolic_link) |
3726 | 0 | { |
3727 | 0 | char orig_last_byte; |
3728 | 0 | size_t orig_len; |
3729 | 0 | int ret = 0; |
3730 | |
|
3731 | 0 | orig_len = referent->len; |
3732 | 0 | orig_last_byte = referent->buf[orig_len - 1]; |
3733 | |
|
3734 | 0 | if (!symbolic_link) { |
3735 | 0 | strbuf_rtrim(referent); |
3736 | |
|
3737 | 0 | if (referent->len == orig_len || |
3738 | 0 | (referent->len < orig_len && orig_last_byte != '\n')) { |
3739 | 0 | ret |= fsck_report_ref(o, report, |
3740 | 0 | FSCK_MSG_REF_MISSING_NEWLINE, |
3741 | 0 | "misses LF at the end"); |
3742 | 0 | } |
3743 | |
|
3744 | 0 | if (referent->len != orig_len && referent->len != orig_len - 1) { |
3745 | 0 | ret |= fsck_report_ref(o, report, |
3746 | 0 | FSCK_MSG_TRAILING_REF_CONTENT, |
3747 | 0 | "has trailing whitespaces or newlines"); |
3748 | 0 | } |
3749 | 0 | } |
3750 | |
|
3751 | 0 | ret |= refs_fsck_symref(ref_store, o, report, refname, referent->buf); |
3752 | |
|
3753 | 0 | return ret ? -1 : 0; |
3754 | 0 | } |
3755 | | |
3756 | | static int files_fsck_refs_content(struct ref_store *ref_store, |
3757 | | struct fsck_options *o, |
3758 | | const char *target_name, |
3759 | | const char *path, |
3760 | | int mode) |
3761 | 0 | { |
3762 | 0 | struct strbuf ref_content = STRBUF_INIT; |
3763 | 0 | struct strbuf abs_gitdir = STRBUF_INIT; |
3764 | 0 | struct strbuf referent = STRBUF_INIT; |
3765 | 0 | struct fsck_ref_report report = { 0 }; |
3766 | 0 | const char *trailing = NULL; |
3767 | 0 | unsigned int type = 0; |
3768 | 0 | int failure_errno = 0; |
3769 | 0 | struct object_id oid; |
3770 | 0 | int ret = 0; |
3771 | |
|
3772 | 0 | report.path = target_name; |
3773 | |
|
3774 | 0 | if (S_ISLNK(mode)) { |
3775 | 0 | const char *relative_referent_path = NULL; |
3776 | |
|
3777 | 0 | ret = fsck_report_ref(o, &report, |
3778 | 0 | FSCK_MSG_SYMLINK_REF, |
3779 | 0 | "use deprecated symbolic link for symref"); |
3780 | |
|
3781 | 0 | strbuf_add_absolute_path(&abs_gitdir, ref_store->repo->gitdir); |
3782 | 0 | strbuf_normalize_path(&abs_gitdir); |
3783 | 0 | if (!is_dir_sep(abs_gitdir.buf[abs_gitdir.len - 1])) |
3784 | 0 | strbuf_addch(&abs_gitdir, '/'); |
3785 | |
|
3786 | 0 | strbuf_add_real_path(&ref_content, path); |
3787 | 0 | skip_prefix(ref_content.buf, abs_gitdir.buf, |
3788 | 0 | &relative_referent_path); |
3789 | |
|
3790 | 0 | if (relative_referent_path) |
3791 | 0 | strbuf_addstr(&referent, relative_referent_path); |
3792 | 0 | else |
3793 | 0 | strbuf_addbuf(&referent, &ref_content); |
3794 | |
|
3795 | 0 | ret |= files_fsck_symref_target(ref_store, o, &report, |
3796 | 0 | target_name, &referent, 1); |
3797 | 0 | goto cleanup; |
3798 | 0 | } |
3799 | | |
3800 | 0 | if (strbuf_read_file(&ref_content, path, 0) < 0) { |
3801 | | /* |
3802 | | * Ref file could be removed by another concurrent process. We should |
3803 | | * ignore this error and continue to the next ref. |
3804 | | */ |
3805 | 0 | if (errno == ENOENT) |
3806 | 0 | goto cleanup; |
3807 | | |
3808 | 0 | ret = error_errno(_("cannot read ref file '%s'"), path); |
3809 | 0 | goto cleanup; |
3810 | 0 | } |
3811 | | |
3812 | 0 | if (parse_loose_ref_contents(ref_store->repo->hash_algo, |
3813 | 0 | ref_content.buf, &oid, &referent, |
3814 | 0 | &type, &trailing, &failure_errno)) { |
3815 | 0 | strbuf_rtrim(&ref_content); |
3816 | 0 | ret = fsck_report_ref(o, &report, |
3817 | 0 | FSCK_MSG_BAD_REF_CONTENT, |
3818 | 0 | "%s", ref_content.buf); |
3819 | 0 | goto cleanup; |
3820 | 0 | } |
3821 | | |
3822 | 0 | if (!(type & REF_ISSYMREF)) { |
3823 | 0 | if (!*trailing) { |
3824 | 0 | ret = fsck_report_ref(o, &report, |
3825 | 0 | FSCK_MSG_REF_MISSING_NEWLINE, |
3826 | 0 | "misses LF at the end"); |
3827 | 0 | goto cleanup; |
3828 | 0 | } |
3829 | 0 | if (*trailing != '\n' || *(trailing + 1)) { |
3830 | 0 | ret = fsck_report_ref(o, &report, |
3831 | 0 | FSCK_MSG_TRAILING_REF_CONTENT, |
3832 | 0 | "has trailing garbage: '%s'", trailing); |
3833 | 0 | goto cleanup; |
3834 | 0 | } |
3835 | | |
3836 | 0 | ret = refs_fsck_ref(ref_store, o, &report, target_name, &oid); |
3837 | 0 | } else { |
3838 | 0 | ret = files_fsck_symref_target(ref_store, o, &report, |
3839 | 0 | target_name, &referent, 0); |
3840 | 0 | goto cleanup; |
3841 | 0 | } |
3842 | | |
3843 | 0 | cleanup: |
3844 | 0 | strbuf_release(&ref_content); |
3845 | 0 | strbuf_release(&referent); |
3846 | 0 | strbuf_release(&abs_gitdir); |
3847 | 0 | return ret; |
3848 | 0 | } |
3849 | | |
3850 | | static int files_fsck_refs_name(struct ref_store *ref_store UNUSED, |
3851 | | struct fsck_options *o, |
3852 | | const char *refname, |
3853 | | const char *path, |
3854 | | int mode UNUSED) |
3855 | 0 | { |
3856 | 0 | struct strbuf sb = STRBUF_INIT; |
3857 | 0 | const char *filename; |
3858 | 0 | int ret = 0; |
3859 | |
|
3860 | 0 | filename = basename((char *) path); |
3861 | | |
3862 | | /* |
3863 | | * Ignore the files ending with ".lock" as they may be lock files |
3864 | | * However, do not allow bare ".lock" files. |
3865 | | */ |
3866 | 0 | if (filename[0] != '.' && ends_with(filename, ".lock")) |
3867 | 0 | goto cleanup; |
3868 | | |
3869 | 0 | if (is_root_ref(refname)) |
3870 | 0 | goto cleanup; |
3871 | | |
3872 | 0 | if (check_refname_format(refname, 0)) { |
3873 | 0 | struct fsck_ref_report report = { 0 }; |
3874 | |
|
3875 | 0 | report.path = refname; |
3876 | 0 | ret = fsck_report_ref(o, &report, |
3877 | 0 | FSCK_MSG_BAD_REF_NAME, |
3878 | 0 | "invalid refname format"); |
3879 | 0 | } |
3880 | |
|
3881 | 0 | cleanup: |
3882 | 0 | strbuf_release(&sb); |
3883 | 0 | return ret; |
3884 | 0 | } |
3885 | | |
3886 | | static const files_fsck_refs_fn fsck_refs_fn[]= { |
3887 | | files_fsck_refs_name, |
3888 | | files_fsck_refs_content, |
3889 | | NULL, |
3890 | | }; |
3891 | | |
3892 | | static int files_fsck_ref(struct ref_store *ref_store, |
3893 | | struct fsck_options *o, |
3894 | | const char *refname, |
3895 | | const char *path, |
3896 | | int mode) |
3897 | 0 | { |
3898 | 0 | int ret = 0; |
3899 | |
|
3900 | 0 | if (o->verbose) |
3901 | 0 | fprintf_ln(stderr, "Checking %s", refname); |
3902 | |
|
3903 | 0 | if (!S_ISREG(mode) && !S_ISLNK(mode)) { |
3904 | 0 | struct fsck_ref_report report = { .path = refname }; |
3905 | |
|
3906 | 0 | if (fsck_report_ref(o, &report, |
3907 | 0 | FSCK_MSG_BAD_REF_FILETYPE, |
3908 | 0 | "unexpected file type")) |
3909 | 0 | ret = -1; |
3910 | 0 | goto out; |
3911 | 0 | } |
3912 | | |
3913 | 0 | for (size_t i = 0; fsck_refs_fn[i]; i++) |
3914 | 0 | if (fsck_refs_fn[i](ref_store, o, refname, path, mode)) |
3915 | 0 | ret = -1; |
3916 | |
|
3917 | 0 | out: |
3918 | 0 | return ret; |
3919 | 0 | } |
3920 | | |
3921 | | static int files_fsck_refs_dir(struct ref_store *ref_store, |
3922 | | struct fsck_options *o, |
3923 | | struct worktree *wt) |
3924 | 0 | { |
3925 | 0 | struct strbuf refname = STRBUF_INIT; |
3926 | 0 | struct strbuf sb = STRBUF_INIT; |
3927 | 0 | struct dir_iterator *iter; |
3928 | 0 | int iter_status; |
3929 | 0 | int ret = 0; |
3930 | |
|
3931 | 0 | strbuf_addf(&sb, "%s/refs", ref_store->gitdir); |
3932 | |
|
3933 | 0 | iter = dir_iterator_begin(sb.buf, 0); |
3934 | 0 | if (!iter) { |
3935 | 0 | if (errno == ENOENT && !is_main_worktree(wt)) |
3936 | 0 | goto out; |
3937 | | |
3938 | 0 | ret = error_errno(_("cannot open directory %s"), sb.buf); |
3939 | 0 | goto out; |
3940 | 0 | } |
3941 | | |
3942 | 0 | while ((iter_status = dir_iterator_advance(iter)) == ITER_OK) { |
3943 | 0 | if (S_ISDIR(iter->st.st_mode)) |
3944 | 0 | continue; |
3945 | | |
3946 | 0 | strbuf_reset(&refname); |
3947 | 0 | if (!is_main_worktree(wt)) |
3948 | 0 | strbuf_addf(&refname, "worktrees/%s/", wt->id); |
3949 | 0 | strbuf_addf(&refname, "refs/%s", iter->relative_path); |
3950 | |
|
3951 | 0 | if (files_fsck_ref(ref_store, o, refname.buf, |
3952 | 0 | iter->path.buf, iter->st.st_mode) < 0) |
3953 | 0 | ret = -1; |
3954 | 0 | } |
3955 | |
|
3956 | 0 | if (iter_status != ITER_DONE) |
3957 | 0 | ret = error(_("failed to iterate over '%s'"), sb.buf); |
3958 | |
|
3959 | 0 | out: |
3960 | 0 | dir_iterator_free(iter); |
3961 | 0 | strbuf_release(&sb); |
3962 | 0 | strbuf_release(&refname); |
3963 | 0 | return ret; |
3964 | 0 | } |
3965 | | |
3966 | | struct files_fsck_root_ref_data { |
3967 | | struct files_ref_store *refs; |
3968 | | struct fsck_options *o; |
3969 | | struct worktree *wt; |
3970 | | struct strbuf refname; |
3971 | | struct strbuf path; |
3972 | | }; |
3973 | | |
3974 | | static int files_fsck_root_ref(const char *refname, void *cb_data) |
3975 | 0 | { |
3976 | 0 | struct files_fsck_root_ref_data *data = cb_data; |
3977 | 0 | struct stat st; |
3978 | |
|
3979 | 0 | strbuf_reset(&data->refname); |
3980 | 0 | if (!is_main_worktree(data->wt)) |
3981 | 0 | strbuf_addf(&data->refname, "worktrees/%s/", data->wt->id); |
3982 | 0 | strbuf_addstr(&data->refname, refname); |
3983 | |
|
3984 | 0 | strbuf_reset(&data->path); |
3985 | 0 | strbuf_addf(&data->path, "%s/%s", data->refs->gitcommondir, data->refname.buf); |
3986 | |
|
3987 | 0 | if (stat(data->path.buf, &st)) { |
3988 | 0 | if (errno == ENOENT) |
3989 | 0 | return 0; |
3990 | 0 | return error_errno("failed to read ref: '%s'", data->path.buf); |
3991 | 0 | } |
3992 | | |
3993 | 0 | return files_fsck_ref(&data->refs->base, data->o, data->refname.buf, |
3994 | 0 | data->path.buf, st.st_mode); |
3995 | 0 | } |
3996 | | |
3997 | | static int files_fsck(struct ref_store *ref_store, |
3998 | | struct fsck_options *o, |
3999 | | struct worktree *wt) |
4000 | 0 | { |
4001 | 0 | struct files_ref_store *refs = |
4002 | 0 | files_downcast(ref_store, REF_STORE_READ, "fsck"); |
4003 | 0 | struct files_fsck_root_ref_data data = { |
4004 | 0 | .refs = refs, |
4005 | 0 | .o = o, |
4006 | 0 | .wt = wt, |
4007 | 0 | .refname = STRBUF_INIT, |
4008 | 0 | .path = STRBUF_INIT, |
4009 | 0 | }; |
4010 | 0 | int ret = 0; |
4011 | |
|
4012 | 0 | if (files_fsck_refs_dir(ref_store, o, wt) < 0) |
4013 | 0 | ret = -1; |
4014 | |
|
4015 | 0 | if (for_each_root_ref(refs, files_fsck_root_ref, &data) < 0) |
4016 | 0 | ret = -1; |
4017 | |
|
4018 | 0 | if (refs->packed_ref_store->be->fsck(refs->packed_ref_store, o, wt) < 0) |
4019 | 0 | ret = -1; |
4020 | |
|
4021 | 0 | strbuf_release(&data.refname); |
4022 | 0 | strbuf_release(&data.path); |
4023 | 0 | return ret; |
4024 | 0 | } |
4025 | | |
4026 | | struct ref_storage_be refs_be_files = { |
4027 | | .name = "files", |
4028 | | .init = files_ref_store_init, |
4029 | | .release = files_ref_store_release, |
4030 | | .create_on_disk = files_ref_store_create_on_disk, |
4031 | | .remove_on_disk = files_ref_store_remove_on_disk, |
4032 | | |
4033 | | .transaction_prepare = files_transaction_prepare, |
4034 | | .transaction_finish = files_transaction_finish, |
4035 | | .transaction_abort = files_transaction_abort, |
4036 | | |
4037 | | .optimize = files_optimize, |
4038 | | .optimize_required = files_optimize_required, |
4039 | | .rename_ref = files_rename_ref, |
4040 | | .copy_ref = files_copy_ref, |
4041 | | |
4042 | | .iterator_begin = files_ref_iterator_begin, |
4043 | | .read_raw_ref = files_read_raw_ref, |
4044 | | .read_symbolic_ref = files_read_symbolic_ref, |
4045 | | |
4046 | | .reflog_iterator_begin = files_reflog_iterator_begin, |
4047 | | .for_each_reflog_ent = files_for_each_reflog_ent, |
4048 | | .for_each_reflog_ent_reverse = files_for_each_reflog_ent_reverse, |
4049 | | .reflog_exists = files_reflog_exists, |
4050 | | .create_reflog = files_create_reflog, |
4051 | | .delete_reflog = files_delete_reflog, |
4052 | | .reflog_expire = files_reflog_expire, |
4053 | | |
4054 | | .fsck = files_fsck, |
4055 | | }; |