Line | Count | Source (jump to first uncovered line) |
1 | | #include "cache.h" |
2 | | #include "strvec.h" |
3 | | #include "repository.h" |
4 | | #include "config.h" |
5 | | #include "dir.h" |
6 | | #include "tree.h" |
7 | | #include "tree-walk.h" |
8 | | #include "cache-tree.h" |
9 | | #include "unpack-trees.h" |
10 | | #include "progress.h" |
11 | | #include "refs.h" |
12 | | #include "attr.h" |
13 | | #include "split-index.h" |
14 | | #include "sparse-index.h" |
15 | | #include "submodule.h" |
16 | | #include "submodule-config.h" |
17 | | #include "fsmonitor.h" |
18 | | #include "object-store.h" |
19 | | #include "promisor-remote.h" |
20 | | #include "entry.h" |
21 | | #include "parallel-checkout.h" |
22 | | |
23 | | /* |
24 | | * Error messages expected by scripts out of plumbing commands such as |
25 | | * read-tree. Non-scripted Porcelain is not required to use these messages |
26 | | * and in fact are encouraged to reword them to better suit their particular |
27 | | * situation better. See how "git checkout" and "git merge" replaces |
28 | | * them using setup_unpack_trees_porcelain(), for example. |
29 | | */ |
30 | | static const char *unpack_plumbing_errors[NB_UNPACK_TREES_WARNING_TYPES] = { |
31 | | /* ERROR_WOULD_OVERWRITE */ |
32 | | "Entry '%s' would be overwritten by merge. Cannot merge.", |
33 | | |
34 | | /* ERROR_NOT_UPTODATE_FILE */ |
35 | | "Entry '%s' not uptodate. Cannot merge.", |
36 | | |
37 | | /* ERROR_NOT_UPTODATE_DIR */ |
38 | | "Updating '%s' would lose untracked files in it", |
39 | | |
40 | | /* ERROR_CWD_IN_THE_WAY */ |
41 | | "Refusing to remove '%s' since it is the current working directory.", |
42 | | |
43 | | /* ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN */ |
44 | | "Untracked working tree file '%s' would be overwritten by merge.", |
45 | | |
46 | | /* ERROR_WOULD_LOSE_UNTRACKED_REMOVED */ |
47 | | "Untracked working tree file '%s' would be removed by merge.", |
48 | | |
49 | | /* ERROR_BIND_OVERLAP */ |
50 | | "Entry '%s' overlaps with '%s'. Cannot bind.", |
51 | | |
52 | | /* ERROR_WOULD_LOSE_SUBMODULE */ |
53 | | "Submodule '%s' cannot checkout new HEAD.", |
54 | | |
55 | | /* NB_UNPACK_TREES_ERROR_TYPES; just a meta value */ |
56 | | "", |
57 | | |
58 | | /* WARNING_SPARSE_NOT_UPTODATE_FILE */ |
59 | | "Path '%s' not uptodate; will not remove from working tree.", |
60 | | |
61 | | /* WARNING_SPARSE_UNMERGED_FILE */ |
62 | | "Path '%s' unmerged; will not remove from working tree.", |
63 | | |
64 | | /* WARNING_SPARSE_ORPHANED_NOT_OVERWRITTEN */ |
65 | | "Path '%s' already present; will not overwrite with sparse update.", |
66 | | }; |
67 | | |
68 | | #define ERRORMSG(o,type) \ |
69 | 0 | ( ((o) && (o)->msgs[(type)]) \ |
70 | 0 | ? ((o)->msgs[(type)]) \ |
71 | 0 | : (unpack_plumbing_errors[(type)]) ) |
72 | | |
73 | | static const char *super_prefixed(const char *path, const char *super_prefix) |
74 | 0 | { |
75 | | /* |
76 | | * It is necessary and sufficient to have two static buffers |
77 | | * here, as the return value of this function is fed to |
78 | | * error() using the unpack_*_errors[] templates we see above. |
79 | | */ |
80 | 0 | static struct strbuf buf[2] = {STRBUF_INIT, STRBUF_INIT}; |
81 | 0 | static int super_prefix_len = -1; |
82 | 0 | static unsigned idx = ARRAY_SIZE(buf) - 1; |
83 | |
|
84 | 0 | if (super_prefix_len < 0) { |
85 | 0 | if (!super_prefix) { |
86 | 0 | super_prefix_len = 0; |
87 | 0 | } else { |
88 | 0 | int i; |
89 | 0 | for (i = 0; i < ARRAY_SIZE(buf); i++) |
90 | 0 | strbuf_addstr(&buf[i], super_prefix); |
91 | 0 | super_prefix_len = buf[0].len; |
92 | 0 | } |
93 | 0 | } |
94 | |
|
95 | 0 | if (!super_prefix_len) |
96 | 0 | return path; |
97 | | |
98 | 0 | if (++idx >= ARRAY_SIZE(buf)) |
99 | 0 | idx = 0; |
100 | |
|
101 | 0 | strbuf_setlen(&buf[idx], super_prefix_len); |
102 | 0 | strbuf_addstr(&buf[idx], path); |
103 | |
|
104 | 0 | return buf[idx].buf; |
105 | 0 | } |
106 | | |
107 | | void setup_unpack_trees_porcelain(struct unpack_trees_options *opts, |
108 | | const char *cmd) |
109 | 0 | { |
110 | 0 | int i; |
111 | 0 | const char **msgs = opts->msgs; |
112 | 0 | const char *msg; |
113 | |
|
114 | 0 | strvec_init(&opts->msgs_to_free); |
115 | |
|
116 | 0 | if (!strcmp(cmd, "checkout")) |
117 | 0 | msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) |
118 | 0 | ? _("Your local changes to the following files would be overwritten by checkout:\n%%s" |
119 | 0 | "Please commit your changes or stash them before you switch branches.") |
120 | 0 | : _("Your local changes to the following files would be overwritten by checkout:\n%%s"); |
121 | 0 | else if (!strcmp(cmd, "merge")) |
122 | 0 | msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) |
123 | 0 | ? _("Your local changes to the following files would be overwritten by merge:\n%%s" |
124 | 0 | "Please commit your changes or stash them before you merge.") |
125 | 0 | : _("Your local changes to the following files would be overwritten by merge:\n%%s"); |
126 | 0 | else |
127 | 0 | msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) |
128 | 0 | ? _("Your local changes to the following files would be overwritten by %s:\n%%s" |
129 | 0 | "Please commit your changes or stash them before you %s.") |
130 | 0 | : _("Your local changes to the following files would be overwritten by %s:\n%%s"); |
131 | 0 | msgs[ERROR_WOULD_OVERWRITE] = msgs[ERROR_NOT_UPTODATE_FILE] = |
132 | 0 | strvec_pushf(&opts->msgs_to_free, msg, cmd, cmd); |
133 | |
|
134 | 0 | msgs[ERROR_NOT_UPTODATE_DIR] = |
135 | 0 | _("Updating the following directories would lose untracked files in them:\n%s"); |
136 | |
|
137 | 0 | msgs[ERROR_CWD_IN_THE_WAY] = |
138 | 0 | _("Refusing to remove the current working directory:\n%s"); |
139 | |
|
140 | 0 | if (!strcmp(cmd, "checkout")) |
141 | 0 | msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) |
142 | 0 | ? _("The following untracked working tree files would be removed by checkout:\n%%s" |
143 | 0 | "Please move or remove them before you switch branches.") |
144 | 0 | : _("The following untracked working tree files would be removed by checkout:\n%%s"); |
145 | 0 | else if (!strcmp(cmd, "merge")) |
146 | 0 | msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) |
147 | 0 | ? _("The following untracked working tree files would be removed by merge:\n%%s" |
148 | 0 | "Please move or remove them before you merge.") |
149 | 0 | : _("The following untracked working tree files would be removed by merge:\n%%s"); |
150 | 0 | else |
151 | 0 | msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) |
152 | 0 | ? _("The following untracked working tree files would be removed by %s:\n%%s" |
153 | 0 | "Please move or remove them before you %s.") |
154 | 0 | : _("The following untracked working tree files would be removed by %s:\n%%s"); |
155 | 0 | msgs[ERROR_WOULD_LOSE_UNTRACKED_REMOVED] = |
156 | 0 | strvec_pushf(&opts->msgs_to_free, msg, cmd, cmd); |
157 | |
|
158 | 0 | if (!strcmp(cmd, "checkout")) |
159 | 0 | msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) |
160 | 0 | ? _("The following untracked working tree files would be overwritten by checkout:\n%%s" |
161 | 0 | "Please move or remove them before you switch branches.") |
162 | 0 | : _("The following untracked working tree files would be overwritten by checkout:\n%%s"); |
163 | 0 | else if (!strcmp(cmd, "merge")) |
164 | 0 | msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) |
165 | 0 | ? _("The following untracked working tree files would be overwritten by merge:\n%%s" |
166 | 0 | "Please move or remove them before you merge.") |
167 | 0 | : _("The following untracked working tree files would be overwritten by merge:\n%%s"); |
168 | 0 | else |
169 | 0 | msg = advice_enabled(ADVICE_COMMIT_BEFORE_MERGE) |
170 | 0 | ? _("The following untracked working tree files would be overwritten by %s:\n%%s" |
171 | 0 | "Please move or remove them before you %s.") |
172 | 0 | : _("The following untracked working tree files would be overwritten by %s:\n%%s"); |
173 | 0 | msgs[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN] = |
174 | 0 | strvec_pushf(&opts->msgs_to_free, msg, cmd, cmd); |
175 | | |
176 | | /* |
177 | | * Special case: ERROR_BIND_OVERLAP refers to a pair of paths, we |
178 | | * cannot easily display it as a list. |
179 | | */ |
180 | 0 | msgs[ERROR_BIND_OVERLAP] = _("Entry '%s' overlaps with '%s'. Cannot bind."); |
181 | |
|
182 | 0 | msgs[ERROR_WOULD_LOSE_SUBMODULE] = |
183 | 0 | _("Cannot update submodule:\n%s"); |
184 | |
|
185 | 0 | msgs[WARNING_SPARSE_NOT_UPTODATE_FILE] = |
186 | 0 | _("The following paths are not up to date and were left despite sparse patterns:\n%s"); |
187 | 0 | msgs[WARNING_SPARSE_UNMERGED_FILE] = |
188 | 0 | _("The following paths are unmerged and were left despite sparse patterns:\n%s"); |
189 | 0 | msgs[WARNING_SPARSE_ORPHANED_NOT_OVERWRITTEN] = |
190 | 0 | _("The following paths were already present and thus not updated despite sparse patterns:\n%s"); |
191 | |
|
192 | 0 | opts->show_all_errors = 1; |
193 | | /* rejected paths may not have a static buffer */ |
194 | 0 | for (i = 0; i < ARRAY_SIZE(opts->unpack_rejects); i++) |
195 | 0 | opts->unpack_rejects[i].strdup_strings = 1; |
196 | 0 | } |
197 | | |
198 | | void clear_unpack_trees_porcelain(struct unpack_trees_options *opts) |
199 | 0 | { |
200 | 0 | strvec_clear(&opts->msgs_to_free); |
201 | 0 | memset(opts->msgs, 0, sizeof(opts->msgs)); |
202 | 0 | } |
203 | | |
204 | | static int do_add_entry(struct unpack_trees_options *o, struct cache_entry *ce, |
205 | | unsigned int set, unsigned int clear) |
206 | 0 | { |
207 | 0 | clear |= CE_HASHED; |
208 | |
|
209 | 0 | if (set & CE_REMOVE) |
210 | 0 | set |= CE_WT_REMOVE; |
211 | |
|
212 | 0 | ce->ce_flags = (ce->ce_flags & ~clear) | set; |
213 | 0 | return add_index_entry(&o->result, ce, |
214 | 0 | ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE); |
215 | 0 | } |
216 | | |
217 | | static void add_entry(struct unpack_trees_options *o, |
218 | | const struct cache_entry *ce, |
219 | | unsigned int set, unsigned int clear) |
220 | 0 | { |
221 | 0 | do_add_entry(o, dup_cache_entry(ce, &o->result), set, clear); |
222 | 0 | } |
223 | | |
224 | | /* |
225 | | * add error messages on path <path> |
226 | | * corresponding to the type <e> with the message <msg> |
227 | | * indicating if it should be display in porcelain or not |
228 | | */ |
229 | | static int add_rejected_path(struct unpack_trees_options *o, |
230 | | enum unpack_trees_error_types e, |
231 | | const char *path) |
232 | 0 | { |
233 | 0 | if (o->quiet) |
234 | 0 | return -1; |
235 | | |
236 | 0 | if (!o->show_all_errors) |
237 | 0 | return error(ERRORMSG(o, e), super_prefixed(path, |
238 | 0 | o->super_prefix)); |
239 | | |
240 | | /* |
241 | | * Otherwise, insert in a list for future display by |
242 | | * display_(error|warning)_msgs() |
243 | | */ |
244 | 0 | string_list_append(&o->unpack_rejects[e], path); |
245 | 0 | return -1; |
246 | 0 | } |
247 | | |
248 | | /* |
249 | | * display all the error messages stored in a nice way |
250 | | */ |
251 | | static void display_error_msgs(struct unpack_trees_options *o) |
252 | 0 | { |
253 | 0 | int e; |
254 | 0 | unsigned error_displayed = 0; |
255 | 0 | for (e = 0; e < NB_UNPACK_TREES_ERROR_TYPES; e++) { |
256 | 0 | struct string_list *rejects = &o->unpack_rejects[e]; |
257 | |
|
258 | 0 | if (rejects->nr > 0) { |
259 | 0 | int i; |
260 | 0 | struct strbuf path = STRBUF_INIT; |
261 | |
|
262 | 0 | error_displayed = 1; |
263 | 0 | for (i = 0; i < rejects->nr; i++) |
264 | 0 | strbuf_addf(&path, "\t%s\n", rejects->items[i].string); |
265 | 0 | error(ERRORMSG(o, e), super_prefixed(path.buf, |
266 | 0 | o->super_prefix)); |
267 | 0 | strbuf_release(&path); |
268 | 0 | } |
269 | 0 | string_list_clear(rejects, 0); |
270 | 0 | } |
271 | 0 | if (error_displayed) |
272 | 0 | fprintf(stderr, _("Aborting\n")); |
273 | 0 | } |
274 | | |
275 | | /* |
276 | | * display all the warning messages stored in a nice way |
277 | | */ |
278 | | static void display_warning_msgs(struct unpack_trees_options *o) |
279 | 0 | { |
280 | 0 | int e; |
281 | 0 | unsigned warning_displayed = 0; |
282 | 0 | for (e = NB_UNPACK_TREES_ERROR_TYPES + 1; |
283 | 0 | e < NB_UNPACK_TREES_WARNING_TYPES; e++) { |
284 | 0 | struct string_list *rejects = &o->unpack_rejects[e]; |
285 | |
|
286 | 0 | if (rejects->nr > 0) { |
287 | 0 | int i; |
288 | 0 | struct strbuf path = STRBUF_INIT; |
289 | |
|
290 | 0 | warning_displayed = 1; |
291 | 0 | for (i = 0; i < rejects->nr; i++) |
292 | 0 | strbuf_addf(&path, "\t%s\n", rejects->items[i].string); |
293 | 0 | warning(ERRORMSG(o, e), super_prefixed(path.buf, |
294 | 0 | o->super_prefix)); |
295 | 0 | strbuf_release(&path); |
296 | 0 | } |
297 | 0 | string_list_clear(rejects, 0); |
298 | 0 | } |
299 | 0 | if (warning_displayed) |
300 | 0 | fprintf(stderr, _("After fixing the above paths, you may want to run `git sparse-checkout reapply`.\n")); |
301 | 0 | } |
302 | | static int check_submodule_move_head(const struct cache_entry *ce, |
303 | | const char *old_id, |
304 | | const char *new_id, |
305 | | struct unpack_trees_options *o) |
306 | 0 | { |
307 | 0 | unsigned flags = SUBMODULE_MOVE_HEAD_DRY_RUN; |
308 | 0 | const struct submodule *sub = submodule_from_ce(ce); |
309 | |
|
310 | 0 | if (!sub) |
311 | 0 | return 0; |
312 | | |
313 | 0 | if (o->reset) |
314 | 0 | flags |= SUBMODULE_MOVE_HEAD_FORCE; |
315 | |
|
316 | 0 | if (submodule_move_head(ce->name, o->super_prefix, old_id, new_id, |
317 | 0 | flags)) |
318 | 0 | return add_rejected_path(o, ERROR_WOULD_LOSE_SUBMODULE, ce->name); |
319 | 0 | return 0; |
320 | 0 | } |
321 | | |
322 | | /* |
323 | | * Perform the loading of the repository's gitmodules file. This function is |
324 | | * used by 'check_update()' to perform loading of the gitmodules file in two |
325 | | * different situations: |
326 | | * (1) before removing entries from the working tree if the gitmodules file has |
327 | | * been marked for removal. This situation is specified by 'state' == NULL. |
328 | | * (2) before checking out entries to the working tree if the gitmodules file |
329 | | * has been marked for update. This situation is specified by 'state' != NULL. |
330 | | */ |
331 | | static void load_gitmodules_file(struct index_state *index, |
332 | | struct checkout *state) |
333 | 0 | { |
334 | 0 | int pos = index_name_pos(index, GITMODULES_FILE, strlen(GITMODULES_FILE)); |
335 | |
|
336 | 0 | if (pos >= 0) { |
337 | 0 | struct cache_entry *ce = index->cache[pos]; |
338 | 0 | if (!state && ce->ce_flags & CE_WT_REMOVE) { |
339 | 0 | repo_read_gitmodules(the_repository, 0); |
340 | 0 | } else if (state && (ce->ce_flags & CE_UPDATE)) { |
341 | 0 | submodule_free(the_repository); |
342 | 0 | checkout_entry(ce, state, NULL, NULL); |
343 | 0 | repo_read_gitmodules(the_repository, 0); |
344 | 0 | } |
345 | 0 | } |
346 | 0 | } |
347 | | |
348 | | static struct progress *get_progress(struct unpack_trees_options *o, |
349 | | struct index_state *index) |
350 | 0 | { |
351 | 0 | unsigned cnt = 0, total = 0; |
352 | |
|
353 | 0 | if (!o->update || !o->verbose_update) |
354 | 0 | return NULL; |
355 | | |
356 | 0 | for (; cnt < index->cache_nr; cnt++) { |
357 | 0 | const struct cache_entry *ce = index->cache[cnt]; |
358 | 0 | if (ce->ce_flags & (CE_UPDATE | CE_WT_REMOVE)) |
359 | 0 | total++; |
360 | 0 | } |
361 | |
|
362 | 0 | return start_delayed_progress(_("Updating files"), total); |
363 | 0 | } |
364 | | |
365 | | static void setup_collided_checkout_detection(struct checkout *state, |
366 | | struct index_state *index) |
367 | 0 | { |
368 | 0 | int i; |
369 | |
|
370 | 0 | state->clone = 1; |
371 | 0 | for (i = 0; i < index->cache_nr; i++) |
372 | 0 | index->cache[i]->ce_flags &= ~CE_MATCHED; |
373 | 0 | } |
374 | | |
375 | | static void report_collided_checkout(struct index_state *index) |
376 | 0 | { |
377 | 0 | struct string_list list = STRING_LIST_INIT_NODUP; |
378 | 0 | int i; |
379 | |
|
380 | 0 | for (i = 0; i < index->cache_nr; i++) { |
381 | 0 | struct cache_entry *ce = index->cache[i]; |
382 | |
|
383 | 0 | if (!(ce->ce_flags & CE_MATCHED)) |
384 | 0 | continue; |
385 | | |
386 | 0 | string_list_append(&list, ce->name); |
387 | 0 | ce->ce_flags &= ~CE_MATCHED; |
388 | 0 | } |
389 | |
|
390 | 0 | list.cmp = fspathcmp; |
391 | 0 | string_list_sort(&list); |
392 | |
|
393 | 0 | if (list.nr) { |
394 | 0 | warning(_("the following paths have collided (e.g. case-sensitive paths\n" |
395 | 0 | "on a case-insensitive filesystem) and only one from the same\n" |
396 | 0 | "colliding group is in the working tree:\n")); |
397 | |
|
398 | 0 | for (i = 0; i < list.nr; i++) |
399 | 0 | fprintf(stderr, " '%s'\n", list.items[i].string); |
400 | 0 | } |
401 | |
|
402 | 0 | string_list_clear(&list, 0); |
403 | 0 | } |
404 | | |
405 | | static int must_checkout(const struct cache_entry *ce) |
406 | 0 | { |
407 | 0 | return ce->ce_flags & CE_UPDATE; |
408 | 0 | } |
409 | | |
410 | | static int check_updates(struct unpack_trees_options *o, |
411 | | struct index_state *index) |
412 | 9.68k | { |
413 | 9.68k | unsigned cnt = 0; |
414 | 9.68k | int errs = 0; |
415 | 9.68k | struct progress *progress; |
416 | 9.68k | struct checkout state = CHECKOUT_INIT; |
417 | 9.68k | int i, pc_workers, pc_threshold; |
418 | | |
419 | 9.68k | trace_performance_enter(); |
420 | 9.68k | state.super_prefix = o->super_prefix; |
421 | 9.68k | state.force = 1; |
422 | 9.68k | state.quiet = 1; |
423 | 9.68k | state.refresh_cache = 1; |
424 | 9.68k | state.istate = index; |
425 | 9.68k | clone_checkout_metadata(&state.meta, &o->meta, NULL); |
426 | | |
427 | 9.68k | if (!o->update || o->dry_run) { |
428 | 9.68k | remove_marked_cache_entries(index, 0); |
429 | 9.68k | trace_performance_leave("check_updates"); |
430 | 9.68k | return 0; |
431 | 9.68k | } |
432 | | |
433 | 0 | if (o->clone) |
434 | 0 | setup_collided_checkout_detection(&state, index); |
435 | |
|
436 | 0 | progress = get_progress(o, index); |
437 | | |
438 | | /* Start with clean cache to avoid using any possibly outdated info. */ |
439 | 0 | invalidate_lstat_cache(); |
440 | |
|
441 | 0 | git_attr_set_direction(GIT_ATTR_CHECKOUT); |
442 | |
|
443 | 0 | if (should_update_submodules()) |
444 | 0 | load_gitmodules_file(index, NULL); |
445 | |
|
446 | 0 | for (i = 0; i < index->cache_nr; i++) { |
447 | 0 | const struct cache_entry *ce = index->cache[i]; |
448 | |
|
449 | 0 | if (ce->ce_flags & CE_WT_REMOVE) { |
450 | 0 | display_progress(progress, ++cnt); |
451 | 0 | unlink_entry(ce, o->super_prefix); |
452 | 0 | } |
453 | 0 | } |
454 | |
|
455 | 0 | remove_marked_cache_entries(index, 0); |
456 | 0 | remove_scheduled_dirs(); |
457 | |
|
458 | 0 | if (should_update_submodules()) |
459 | 0 | load_gitmodules_file(index, &state); |
460 | |
|
461 | 0 | if (has_promisor_remote()) |
462 | | /* |
463 | | * Prefetch the objects that are to be checked out in the loop |
464 | | * below. |
465 | | */ |
466 | 0 | prefetch_cache_entries(index, must_checkout); |
467 | |
|
468 | 0 | get_parallel_checkout_configs(&pc_workers, &pc_threshold); |
469 | |
|
470 | 0 | enable_delayed_checkout(&state); |
471 | 0 | if (pc_workers > 1) |
472 | 0 | init_parallel_checkout(); |
473 | 0 | for (i = 0; i < index->cache_nr; i++) { |
474 | 0 | struct cache_entry *ce = index->cache[i]; |
475 | |
|
476 | 0 | if (must_checkout(ce)) { |
477 | 0 | size_t last_pc_queue_size = pc_queue_size(); |
478 | |
|
479 | 0 | if (ce->ce_flags & CE_WT_REMOVE) |
480 | 0 | BUG("both update and delete flags are set on %s", |
481 | 0 | ce->name); |
482 | 0 | ce->ce_flags &= ~CE_UPDATE; |
483 | 0 | errs |= checkout_entry(ce, &state, NULL, NULL); |
484 | |
|
485 | 0 | if (last_pc_queue_size == pc_queue_size()) |
486 | 0 | display_progress(progress, ++cnt); |
487 | 0 | } |
488 | 0 | } |
489 | 0 | if (pc_workers > 1) |
490 | 0 | errs |= run_parallel_checkout(&state, pc_workers, pc_threshold, |
491 | 0 | progress, &cnt); |
492 | 0 | stop_progress(&progress); |
493 | 0 | errs |= finish_delayed_checkout(&state, o->verbose_update); |
494 | 0 | git_attr_set_direction(GIT_ATTR_CHECKIN); |
495 | |
|
496 | 0 | if (o->clone) |
497 | 0 | report_collided_checkout(index); |
498 | |
|
499 | 0 | trace_performance_leave("check_updates"); |
500 | 0 | return errs != 0; |
501 | 0 | } |
502 | | |
503 | | static int verify_uptodate_sparse(const struct cache_entry *ce, |
504 | | struct unpack_trees_options *o); |
505 | | static int verify_absent_sparse(const struct cache_entry *ce, |
506 | | enum unpack_trees_error_types, |
507 | | struct unpack_trees_options *o); |
508 | | |
509 | | static int apply_sparse_checkout(struct index_state *istate, |
510 | | struct cache_entry *ce, |
511 | | struct unpack_trees_options *o) |
512 | 0 | { |
513 | 0 | int was_skip_worktree = ce_skip_worktree(ce); |
514 | |
|
515 | 0 | if (ce->ce_flags & CE_NEW_SKIP_WORKTREE) |
516 | 0 | ce->ce_flags |= CE_SKIP_WORKTREE; |
517 | 0 | else |
518 | 0 | ce->ce_flags &= ~CE_SKIP_WORKTREE; |
519 | 0 | if (was_skip_worktree != ce_skip_worktree(ce)) { |
520 | 0 | ce->ce_flags |= CE_UPDATE_IN_BASE; |
521 | 0 | mark_fsmonitor_invalid(istate, ce); |
522 | 0 | istate->cache_changed |= CE_ENTRY_CHANGED; |
523 | 0 | } |
524 | | |
525 | | /* |
526 | | * if (!was_skip_worktree && !ce_skip_worktree()) { |
527 | | * This is perfectly normal. Move on; |
528 | | * } |
529 | | */ |
530 | | |
531 | | /* |
532 | | * Merge strategies may set CE_UPDATE|CE_REMOVE outside checkout |
533 | | * area as a result of ce_skip_worktree() shortcuts in |
534 | | * verify_absent() and verify_uptodate(). |
535 | | * Make sure they don't modify worktree if they are already |
536 | | * outside checkout area |
537 | | */ |
538 | 0 | if (was_skip_worktree && ce_skip_worktree(ce)) { |
539 | 0 | ce->ce_flags &= ~CE_UPDATE; |
540 | | |
541 | | /* |
542 | | * By default, when CE_REMOVE is on, CE_WT_REMOVE is also |
543 | | * on to get that file removed from both index and worktree. |
544 | | * If that file is already outside worktree area, don't |
545 | | * bother remove it. |
546 | | */ |
547 | 0 | if (ce->ce_flags & CE_REMOVE) |
548 | 0 | ce->ce_flags &= ~CE_WT_REMOVE; |
549 | 0 | } |
550 | |
|
551 | 0 | if (!was_skip_worktree && ce_skip_worktree(ce)) { |
552 | | /* |
553 | | * If CE_UPDATE is set, verify_uptodate() must be called already |
554 | | * also stat info may have lost after merged_entry() so calling |
555 | | * verify_uptodate() again may fail |
556 | | */ |
557 | 0 | if (!(ce->ce_flags & CE_UPDATE) && |
558 | 0 | verify_uptodate_sparse(ce, o)) { |
559 | 0 | ce->ce_flags &= ~CE_SKIP_WORKTREE; |
560 | 0 | return -1; |
561 | 0 | } |
562 | 0 | ce->ce_flags |= CE_WT_REMOVE; |
563 | 0 | ce->ce_flags &= ~CE_UPDATE; |
564 | 0 | } |
565 | 0 | if (was_skip_worktree && !ce_skip_worktree(ce)) { |
566 | 0 | if (verify_absent_sparse(ce, WARNING_SPARSE_ORPHANED_NOT_OVERWRITTEN, o)) |
567 | 0 | return -1; |
568 | 0 | ce->ce_flags |= CE_UPDATE; |
569 | 0 | } |
570 | 0 | return 0; |
571 | 0 | } |
572 | | |
573 | | static int warn_conflicted_path(struct index_state *istate, |
574 | | int i, |
575 | | struct unpack_trees_options *o) |
576 | 0 | { |
577 | 0 | char *conflicting_path = istate->cache[i]->name; |
578 | 0 | int count = 0; |
579 | |
|
580 | 0 | add_rejected_path(o, WARNING_SPARSE_UNMERGED_FILE, conflicting_path); |
581 | | |
582 | | /* Find out how many higher stage entries are at same path */ |
583 | 0 | while ((++count) + i < istate->cache_nr && |
584 | 0 | !strcmp(conflicting_path, istate->cache[count + i]->name)) |
585 | 0 | ; /* do nothing */ |
586 | |
|
587 | 0 | return count; |
588 | 0 | } |
589 | | |
590 | | static inline int call_unpack_fn(const struct cache_entry * const *src, |
591 | | struct unpack_trees_options *o) |
592 | 89.0k | { |
593 | 89.0k | int ret = o->fn(src, o); |
594 | 89.0k | if (ret > 0) |
595 | 0 | ret = 0; |
596 | 89.0k | return ret; |
597 | 89.0k | } |
598 | | |
599 | | static void mark_ce_used(struct cache_entry *ce, struct unpack_trees_options *o) |
600 | 89.0k | { |
601 | 89.0k | ce->ce_flags |= CE_UNPACKED; |
602 | | |
603 | 89.0k | if (o->cache_bottom < o->src_index->cache_nr && |
604 | 89.0k | o->src_index->cache[o->cache_bottom] == ce) { |
605 | 86.6k | int bottom = o->cache_bottom; |
606 | 175k | while (bottom < o->src_index->cache_nr && |
607 | 175k | o->src_index->cache[bottom]->ce_flags & CE_UNPACKED) |
608 | 89.0k | bottom++; |
609 | 86.6k | o->cache_bottom = bottom; |
610 | 86.6k | } |
611 | 89.0k | } |
612 | | |
613 | | static void mark_all_ce_unused(struct index_state *index) |
614 | 31.7k | { |
615 | 31.7k | int i; |
616 | 256k | for (i = 0; i < index->cache_nr; i++) |
617 | 224k | index->cache[i]->ce_flags &= ~(CE_UNPACKED | CE_ADDED | CE_NEW_SKIP_WORKTREE); |
618 | 31.7k | } |
619 | | |
620 | | static int locate_in_src_index(const struct cache_entry *ce, |
621 | | struct unpack_trees_options *o) |
622 | 0 | { |
623 | 0 | struct index_state *index = o->src_index; |
624 | 0 | int len = ce_namelen(ce); |
625 | 0 | int pos = index_name_pos(index, ce->name, len); |
626 | 0 | if (pos < 0) |
627 | 0 | pos = -1 - pos; |
628 | 0 | return pos; |
629 | 0 | } |
630 | | |
631 | | /* |
632 | | * We call unpack_index_entry() with an unmerged cache entry |
633 | | * only in diff-index, and it wants a single callback. Skip |
634 | | * the other unmerged entry with the same name. |
635 | | */ |
636 | | static void mark_ce_used_same_name(struct cache_entry *ce, |
637 | | struct unpack_trees_options *o) |
638 | 0 | { |
639 | 0 | struct index_state *index = o->src_index; |
640 | 0 | int len = ce_namelen(ce); |
641 | 0 | int pos; |
642 | |
|
643 | 0 | for (pos = locate_in_src_index(ce, o); pos < index->cache_nr; pos++) { |
644 | 0 | struct cache_entry *next = index->cache[pos]; |
645 | 0 | if (len != ce_namelen(next) || |
646 | 0 | memcmp(ce->name, next->name, len)) |
647 | 0 | break; |
648 | 0 | mark_ce_used(next, o); |
649 | 0 | } |
650 | 0 | } |
651 | | |
652 | | static struct cache_entry *next_cache_entry(struct unpack_trees_options *o) |
653 | 57.3k | { |
654 | 57.3k | const struct index_state *index = o->src_index; |
655 | 57.3k | int pos = o->cache_bottom; |
656 | | |
657 | 57.3k | while (pos < index->cache_nr) { |
658 | 47.6k | struct cache_entry *ce = index->cache[pos]; |
659 | 47.6k | if (!(ce->ce_flags & CE_UNPACKED)) |
660 | 47.6k | return ce; |
661 | 0 | pos++; |
662 | 0 | } |
663 | 9.68k | return NULL; |
664 | 57.3k | } |
665 | | |
666 | | static void add_same_unmerged(const struct cache_entry *ce, |
667 | | struct unpack_trees_options *o) |
668 | 0 | { |
669 | 0 | struct index_state *index = o->src_index; |
670 | 0 | int len = ce_namelen(ce); |
671 | 0 | int pos = index_name_pos(index, ce->name, len); |
672 | |
|
673 | 0 | if (0 <= pos) |
674 | 0 | die("programming error in a caller of mark_ce_used_same_name"); |
675 | 0 | for (pos = -pos - 1; pos < index->cache_nr; pos++) { |
676 | 0 | struct cache_entry *next = index->cache[pos]; |
677 | 0 | if (len != ce_namelen(next) || |
678 | 0 | memcmp(ce->name, next->name, len)) |
679 | 0 | break; |
680 | 0 | add_entry(o, next, 0, 0); |
681 | 0 | mark_ce_used(next, o); |
682 | 0 | } |
683 | 0 | } |
684 | | |
685 | | static int unpack_index_entry(struct cache_entry *ce, |
686 | | struct unpack_trees_options *o) |
687 | 19.4k | { |
688 | 19.4k | const struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; |
689 | 19.4k | int ret; |
690 | | |
691 | 19.4k | src[0] = ce; |
692 | | |
693 | 19.4k | mark_ce_used(ce, o); |
694 | 19.4k | if (ce_stage(ce)) { |
695 | 0 | if (o->skip_unmerged) { |
696 | 0 | add_entry(o, ce, 0, 0); |
697 | 0 | return 0; |
698 | 0 | } |
699 | 0 | } |
700 | 19.4k | ret = call_unpack_fn(src, o); |
701 | 19.4k | if (ce_stage(ce)) |
702 | 0 | mark_ce_used_same_name(ce, o); |
703 | 19.4k | return ret; |
704 | 19.4k | } |
705 | | |
706 | | static int find_cache_pos(struct traverse_info *, const char *p, size_t len); |
707 | | |
708 | | static void restore_cache_bottom(struct traverse_info *info, int bottom) |
709 | 0 | { |
710 | 0 | struct unpack_trees_options *o = info->data; |
711 | |
|
712 | 0 | if (o->diff_index_cached) |
713 | 0 | return; |
714 | 0 | o->cache_bottom = bottom; |
715 | 0 | } |
716 | | |
717 | | static int switch_cache_bottom(struct traverse_info *info) |
718 | 0 | { |
719 | 0 | struct unpack_trees_options *o = info->data; |
720 | 0 | int ret, pos; |
721 | |
|
722 | 0 | if (o->diff_index_cached) |
723 | 0 | return 0; |
724 | 0 | ret = o->cache_bottom; |
725 | 0 | pos = find_cache_pos(info->prev, info->name, info->namelen); |
726 | |
|
727 | 0 | if (pos < -1) |
728 | 0 | o->cache_bottom = -2 - pos; |
729 | 0 | else if (pos < 0) |
730 | 0 | o->cache_bottom = o->src_index->cache_nr; |
731 | 0 | return ret; |
732 | 0 | } |
733 | | |
734 | | static inline int are_same_oid(struct name_entry *name_j, struct name_entry *name_k) |
735 | 0 | { |
736 | 0 | return !is_null_oid(&name_j->oid) && !is_null_oid(&name_k->oid) && oideq(&name_j->oid, &name_k->oid); |
737 | 0 | } |
738 | | |
739 | | static int all_trees_same_as_cache_tree(int n, unsigned long dirmask, |
740 | | struct name_entry *names, |
741 | | struct traverse_info *info) |
742 | 0 | { |
743 | 0 | struct unpack_trees_options *o = info->data; |
744 | 0 | int i; |
745 | |
|
746 | 0 | if (!o->merge || dirmask != ((1 << n) - 1)) |
747 | 0 | return 0; |
748 | | |
749 | 0 | for (i = 1; i < n; i++) |
750 | 0 | if (!are_same_oid(names, names + i)) |
751 | 0 | return 0; |
752 | | |
753 | 0 | return cache_tree_matches_traversal(o->src_index->cache_tree, names, info); |
754 | 0 | } |
755 | | |
756 | | static int index_pos_by_traverse_info(struct name_entry *names, |
757 | | struct traverse_info *info) |
758 | 0 | { |
759 | 0 | struct unpack_trees_options *o = info->data; |
760 | 0 | struct strbuf name = STRBUF_INIT; |
761 | 0 | int pos; |
762 | |
|
763 | 0 | strbuf_make_traverse_path(&name, info, names->path, names->pathlen); |
764 | 0 | strbuf_addch(&name, '/'); |
765 | 0 | pos = index_name_pos(o->src_index, name.buf, name.len); |
766 | 0 | if (pos >= 0) { |
767 | 0 | if (!o->src_index->sparse_index || |
768 | 0 | !(o->src_index->cache[pos]->ce_flags & CE_SKIP_WORKTREE)) |
769 | 0 | BUG("This is a directory and should not exist in index"); |
770 | 0 | } else { |
771 | 0 | pos = -pos - 1; |
772 | 0 | } |
773 | 0 | if (pos >= o->src_index->cache_nr || |
774 | 0 | !starts_with(o->src_index->cache[pos]->name, name.buf) || |
775 | 0 | (pos > 0 && starts_with(o->src_index->cache[pos-1]->name, name.buf))) |
776 | 0 | BUG("pos %d doesn't point to the first entry of %s in index", |
777 | 0 | pos, name.buf); |
778 | 0 | strbuf_release(&name); |
779 | 0 | return pos; |
780 | 0 | } |
781 | | |
782 | | /* |
783 | | * Fast path if we detect that all trees are the same as cache-tree at this |
784 | | * path. We'll walk these trees in an iterative loop using cache-tree/index |
785 | | * instead of ODB since we already know what these trees contain. |
786 | | */ |
787 | | static int traverse_by_cache_tree(int pos, int nr_entries, int nr_names, |
788 | | struct traverse_info *info) |
789 | 0 | { |
790 | 0 | struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; |
791 | 0 | struct unpack_trees_options *o = info->data; |
792 | 0 | struct cache_entry *tree_ce = NULL; |
793 | 0 | int ce_len = 0; |
794 | 0 | int i, d; |
795 | |
|
796 | 0 | if (!o->merge) |
797 | 0 | BUG("We need cache-tree to do this optimization"); |
798 | | |
799 | | /* |
800 | | * Do what unpack_callback() and unpack_single_entry() normally |
801 | | * do. But we walk all paths in an iterative loop instead. |
802 | | * |
803 | | * D/F conflicts and higher stage entries are not a concern |
804 | | * because cache-tree would be invalidated and we would never |
805 | | * get here in the first place. |
806 | | */ |
807 | 0 | for (i = 0; i < nr_entries; i++) { |
808 | 0 | int new_ce_len, len, rc; |
809 | |
|
810 | 0 | src[0] = o->src_index->cache[pos + i]; |
811 | |
|
812 | 0 | len = ce_namelen(src[0]); |
813 | 0 | new_ce_len = cache_entry_size(len); |
814 | |
|
815 | 0 | if (new_ce_len > ce_len) { |
816 | 0 | new_ce_len <<= 1; |
817 | 0 | tree_ce = xrealloc(tree_ce, new_ce_len); |
818 | 0 | memset(tree_ce, 0, new_ce_len); |
819 | 0 | ce_len = new_ce_len; |
820 | |
|
821 | 0 | tree_ce->ce_flags = create_ce_flags(0); |
822 | |
|
823 | 0 | for (d = 1; d <= nr_names; d++) |
824 | 0 | src[d] = tree_ce; |
825 | 0 | } |
826 | |
|
827 | 0 | tree_ce->ce_mode = src[0]->ce_mode; |
828 | 0 | tree_ce->ce_namelen = len; |
829 | 0 | oidcpy(&tree_ce->oid, &src[0]->oid); |
830 | 0 | memcpy(tree_ce->name, src[0]->name, len + 1); |
831 | |
|
832 | 0 | rc = call_unpack_fn((const struct cache_entry * const *)src, o); |
833 | 0 | if (rc < 0) { |
834 | 0 | free(tree_ce); |
835 | 0 | return rc; |
836 | 0 | } |
837 | | |
838 | 0 | mark_ce_used(src[0], o); |
839 | 0 | } |
840 | 0 | free(tree_ce); |
841 | 0 | if (o->debug_unpack) |
842 | 0 | printf("Unpacked %d entries from %s to %s using cache-tree\n", |
843 | 0 | nr_entries, |
844 | 0 | o->src_index->cache[pos]->name, |
845 | 0 | o->src_index->cache[pos + nr_entries - 1]->name); |
846 | 0 | return 0; |
847 | 0 | } |
848 | | |
849 | | static int traverse_trees_recursive(int n, unsigned long dirmask, |
850 | | unsigned long df_conflicts, |
851 | | struct name_entry *names, |
852 | | struct traverse_info *info) |
853 | 0 | { |
854 | 0 | struct unpack_trees_options *o = info->data; |
855 | 0 | int i, ret, bottom; |
856 | 0 | int nr_buf = 0; |
857 | 0 | struct tree_desc t[MAX_UNPACK_TREES]; |
858 | 0 | void *buf[MAX_UNPACK_TREES]; |
859 | 0 | struct traverse_info newinfo; |
860 | 0 | struct name_entry *p; |
861 | 0 | int nr_entries; |
862 | |
|
863 | 0 | nr_entries = all_trees_same_as_cache_tree(n, dirmask, names, info); |
864 | 0 | if (nr_entries > 0) { |
865 | 0 | int pos = index_pos_by_traverse_info(names, info); |
866 | |
|
867 | 0 | if (!o->merge || df_conflicts) |
868 | 0 | BUG("Wrong condition to get here buddy"); |
869 | | |
870 | | /* |
871 | | * All entries up to 'pos' must have been processed |
872 | | * (i.e. marked CE_UNPACKED) at this point. But to be safe, |
873 | | * save and restore cache_bottom anyway to not miss |
874 | | * unprocessed entries before 'pos'. |
875 | | */ |
876 | 0 | bottom = o->cache_bottom; |
877 | 0 | ret = traverse_by_cache_tree(pos, nr_entries, n, info); |
878 | 0 | o->cache_bottom = bottom; |
879 | 0 | return ret; |
880 | 0 | } |
881 | | |
882 | 0 | p = names; |
883 | 0 | while (!p->mode) |
884 | 0 | p++; |
885 | |
|
886 | 0 | newinfo = *info; |
887 | 0 | newinfo.prev = info; |
888 | 0 | newinfo.pathspec = info->pathspec; |
889 | 0 | newinfo.name = p->path; |
890 | 0 | newinfo.namelen = p->pathlen; |
891 | 0 | newinfo.mode = p->mode; |
892 | 0 | newinfo.pathlen = st_add3(newinfo.pathlen, tree_entry_len(p), 1); |
893 | 0 | newinfo.df_conflicts |= df_conflicts; |
894 | | |
895 | | /* |
896 | | * Fetch the tree from the ODB for each peer directory in the |
897 | | * n commits. |
898 | | * |
899 | | * For 2- and 3-way traversals, we try to avoid hitting the |
900 | | * ODB twice for the same OID. This should yield a nice speed |
901 | | * up in checkouts and merges when the commits are similar. |
902 | | * |
903 | | * We don't bother doing the full O(n^2) search for larger n, |
904 | | * because wider traversals don't happen that often and we |
905 | | * avoid the search setup. |
906 | | * |
907 | | * When 2 peer OIDs are the same, we just copy the tree |
908 | | * descriptor data. This implicitly borrows the buffer |
909 | | * data from the earlier cell. |
910 | | */ |
911 | 0 | for (i = 0; i < n; i++, dirmask >>= 1) { |
912 | 0 | if (i > 0 && are_same_oid(&names[i], &names[i - 1])) |
913 | 0 | t[i] = t[i - 1]; |
914 | 0 | else if (i > 1 && are_same_oid(&names[i], &names[i - 2])) |
915 | 0 | t[i] = t[i - 2]; |
916 | 0 | else { |
917 | 0 | const struct object_id *oid = NULL; |
918 | 0 | if (dirmask & 1) |
919 | 0 | oid = &names[i].oid; |
920 | 0 | buf[nr_buf++] = fill_tree_descriptor(the_repository, t + i, oid); |
921 | 0 | } |
922 | 0 | } |
923 | |
|
924 | 0 | bottom = switch_cache_bottom(&newinfo); |
925 | 0 | ret = traverse_trees(o->src_index, n, t, &newinfo); |
926 | 0 | restore_cache_bottom(&newinfo, bottom); |
927 | |
|
928 | 0 | for (i = 0; i < nr_buf; i++) |
929 | 0 | free(buf[i]); |
930 | |
|
931 | 0 | return ret; |
932 | 0 | } |
933 | | |
934 | | /* |
935 | | * Compare the traverse-path to the cache entry without actually |
936 | | * having to generate the textual representation of the traverse |
937 | | * path. |
938 | | * |
939 | | * NOTE! This *only* compares up to the size of the traverse path |
940 | | * itself - the caller needs to do the final check for the cache |
941 | | * entry having more data at the end! |
942 | | */ |
943 | | static int do_compare_entry_piecewise(const struct cache_entry *ce, |
944 | | const struct traverse_info *info, |
945 | | const char *name, size_t namelen, |
946 | | unsigned mode) |
947 | 0 | { |
948 | 0 | int pathlen, ce_len; |
949 | 0 | const char *ce_name; |
950 | |
|
951 | 0 | if (info->prev) { |
952 | 0 | int cmp = do_compare_entry_piecewise(ce, info->prev, |
953 | 0 | info->name, info->namelen, |
954 | 0 | info->mode); |
955 | 0 | if (cmp) |
956 | 0 | return cmp; |
957 | 0 | } |
958 | 0 | pathlen = info->pathlen; |
959 | 0 | ce_len = ce_namelen(ce); |
960 | | |
961 | | /* If ce_len < pathlen then we must have previously hit "name == directory" entry */ |
962 | 0 | if (ce_len < pathlen) |
963 | 0 | return -1; |
964 | | |
965 | 0 | ce_len -= pathlen; |
966 | 0 | ce_name = ce->name + pathlen; |
967 | |
|
968 | 0 | return df_name_compare(ce_name, ce_len, S_IFREG, name, namelen, mode); |
969 | 0 | } |
970 | | |
971 | | static int do_compare_entry(const struct cache_entry *ce, |
972 | | const struct traverse_info *info, |
973 | | const char *name, size_t namelen, |
974 | | unsigned mode) |
975 | 75.7k | { |
976 | 75.7k | int pathlen, ce_len; |
977 | 75.7k | const char *ce_name; |
978 | 75.7k | int cmp; |
979 | 75.7k | unsigned ce_mode; |
980 | | |
981 | | /* |
982 | | * If we have not precomputed the traverse path, it is quicker |
983 | | * to avoid doing so. But if we have precomputed it, |
984 | | * it is quicker to use the precomputed version. |
985 | | */ |
986 | 75.7k | if (!info->traverse_path) |
987 | 0 | return do_compare_entry_piecewise(ce, info, name, namelen, mode); |
988 | | |
989 | 75.7k | cmp = strncmp(ce->name, info->traverse_path, info->pathlen); |
990 | 75.7k | if (cmp) |
991 | 0 | return cmp; |
992 | | |
993 | 75.7k | pathlen = info->pathlen; |
994 | 75.7k | ce_len = ce_namelen(ce); |
995 | | |
996 | 75.7k | if (ce_len < pathlen) |
997 | 0 | return -1; |
998 | | |
999 | 75.7k | ce_len -= pathlen; |
1000 | 75.7k | ce_name = ce->name + pathlen; |
1001 | | |
1002 | 75.7k | ce_mode = S_ISSPARSEDIR(ce->ce_mode) ? S_IFDIR : S_IFREG; |
1003 | 75.7k | return df_name_compare(ce_name, ce_len, ce_mode, name, namelen, mode); |
1004 | 75.7k | } |
1005 | | |
1006 | | static int compare_entry(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n) |
1007 | 75.7k | { |
1008 | 75.7k | int cmp = do_compare_entry(ce, info, n->path, n->pathlen, n->mode); |
1009 | 75.7k | if (cmp) |
1010 | 6.20k | return cmp; |
1011 | | |
1012 | | /* |
1013 | | * At this point, we know that we have a prefix match. If ce |
1014 | | * is a sparse directory, then allow an exact match. This only |
1015 | | * works when the input name is a directory, since ce->name |
1016 | | * ends in a directory separator. |
1017 | | */ |
1018 | 69.5k | if (S_ISSPARSEDIR(ce->ce_mode) && |
1019 | 69.5k | ce->ce_namelen == traverse_path_len(info, tree_entry_len(n)) + 1) |
1020 | 0 | return 0; |
1021 | | |
1022 | | /* |
1023 | | * Even if the beginning compared identically, the ce should |
1024 | | * compare as bigger than a directory leading up to it! |
1025 | | */ |
1026 | 69.5k | return ce_namelen(ce) > traverse_path_len(info, tree_entry_len(n)); |
1027 | 69.5k | } |
1028 | | |
1029 | | static int ce_in_traverse_path(const struct cache_entry *ce, |
1030 | | const struct traverse_info *info) |
1031 | 52.2k | { |
1032 | 52.2k | if (!info->prev) |
1033 | 52.2k | return 1; |
1034 | 0 | if (do_compare_entry(ce, info->prev, |
1035 | 0 | info->name, info->namelen, info->mode)) |
1036 | 0 | return 0; |
1037 | | /* |
1038 | | * If ce (blob) is the same name as the path (which is a tree |
1039 | | * we will be descending into), it won't be inside it. |
1040 | | */ |
1041 | 0 | return (info->pathlen < ce_namelen(ce)); |
1042 | 0 | } |
1043 | | |
1044 | | static struct cache_entry *create_ce_entry(const struct traverse_info *info, |
1045 | | const struct name_entry *n, |
1046 | | int stage, |
1047 | | struct index_state *istate, |
1048 | | int is_transient, |
1049 | | int is_sparse_directory) |
1050 | 69.5k | { |
1051 | 69.5k | size_t len = traverse_path_len(info, tree_entry_len(n)); |
1052 | 69.5k | size_t alloc_len = is_sparse_directory ? len + 1 : len; |
1053 | 69.5k | struct cache_entry *ce = |
1054 | 69.5k | is_transient ? |
1055 | 69.5k | make_empty_transient_cache_entry(alloc_len, NULL) : |
1056 | 69.5k | make_empty_cache_entry(istate, alloc_len); |
1057 | | |
1058 | 69.5k | ce->ce_mode = create_ce_mode(n->mode); |
1059 | 69.5k | ce->ce_flags = create_ce_flags(stage); |
1060 | 69.5k | ce->ce_namelen = len; |
1061 | 69.5k | oidcpy(&ce->oid, &n->oid); |
1062 | | /* len+1 because the cache_entry allocates space for NUL */ |
1063 | 69.5k | make_traverse_path(ce->name, len + 1, info, n->path, n->pathlen); |
1064 | | |
1065 | 69.5k | if (is_sparse_directory) { |
1066 | 0 | ce->name[len] = '/'; |
1067 | 0 | ce->name[len + 1] = '\0'; |
1068 | 0 | ce->ce_namelen++; |
1069 | 0 | ce->ce_flags |= CE_SKIP_WORKTREE; |
1070 | 0 | } |
1071 | | |
1072 | 69.5k | return ce; |
1073 | 69.5k | } |
1074 | | |
1075 | | /* |
1076 | | * Determine whether the path specified by 'p' should be unpacked as a new |
1077 | | * sparse directory in a sparse index. A new sparse directory 'A/': |
1078 | | * - must be outside the sparse cone. |
1079 | | * - must not already be in the index (i.e., no index entry with name 'A/' |
1080 | | * exists). |
1081 | | * - must not have any child entries in the index (i.e., no index entry |
1082 | | * 'A/<something>' exists). |
1083 | | * If 'p' meets the above requirements, return 1; otherwise, return 0. |
1084 | | */ |
1085 | | static int entry_is_new_sparse_dir(const struct traverse_info *info, |
1086 | | const struct name_entry *p) |
1087 | 0 | { |
1088 | 0 | int res, pos; |
1089 | 0 | struct strbuf dirpath = STRBUF_INIT; |
1090 | 0 | struct unpack_trees_options *o = info->data; |
1091 | |
|
1092 | 0 | if (!S_ISDIR(p->mode)) |
1093 | 0 | return 0; |
1094 | | |
1095 | | /* |
1096 | | * If the path is inside the sparse cone, it can't be a sparse directory. |
1097 | | */ |
1098 | 0 | strbuf_add(&dirpath, info->traverse_path, info->pathlen); |
1099 | 0 | strbuf_add(&dirpath, p->path, p->pathlen); |
1100 | 0 | strbuf_addch(&dirpath, '/'); |
1101 | 0 | if (path_in_cone_mode_sparse_checkout(dirpath.buf, o->src_index)) { |
1102 | 0 | res = 0; |
1103 | 0 | goto cleanup; |
1104 | 0 | } |
1105 | | |
1106 | 0 | pos = index_name_pos_sparse(o->src_index, dirpath.buf, dirpath.len); |
1107 | 0 | if (pos >= 0) { |
1108 | | /* Path is already in the index, not a new sparse dir */ |
1109 | 0 | res = 0; |
1110 | 0 | goto cleanup; |
1111 | 0 | } |
1112 | | |
1113 | | /* Where would this sparse dir be inserted into the index? */ |
1114 | 0 | pos = -pos - 1; |
1115 | 0 | if (pos >= o->src_index->cache_nr) { |
1116 | | /* |
1117 | | * Sparse dir would be inserted at the end of the index, so we |
1118 | | * know it has no child entries. |
1119 | | */ |
1120 | 0 | res = 1; |
1121 | 0 | goto cleanup; |
1122 | 0 | } |
1123 | | |
1124 | | /* |
1125 | | * If the dir has child entries in the index, the first would be at the |
1126 | | * position the sparse directory would be inserted. If the entry at this |
1127 | | * position is inside the dir, not a new sparse dir. |
1128 | | */ |
1129 | 0 | res = strncmp(o->src_index->cache[pos]->name, dirpath.buf, dirpath.len); |
1130 | |
|
1131 | 0 | cleanup: |
1132 | 0 | strbuf_release(&dirpath); |
1133 | 0 | return res; |
1134 | 0 | } |
1135 | | |
1136 | | /* |
1137 | | * Note that traverse_by_cache_tree() duplicates some logic in this function |
1138 | | * without actually calling it. If you change the logic here you may need to |
1139 | | * check and change there as well. |
1140 | | */ |
1141 | | static int unpack_single_entry(int n, unsigned long mask, |
1142 | | unsigned long dirmask, |
1143 | | struct cache_entry **src, |
1144 | | const struct name_entry *names, |
1145 | | const struct traverse_info *info, |
1146 | | int *is_new_sparse_dir) |
1147 | 69.5k | { |
1148 | 69.5k | int i; |
1149 | 69.5k | struct unpack_trees_options *o = info->data; |
1150 | 69.5k | unsigned long conflicts = info->df_conflicts | dirmask; |
1151 | 69.5k | const struct name_entry *p = names; |
1152 | | |
1153 | 69.5k | *is_new_sparse_dir = 0; |
1154 | 69.5k | if (mask == dirmask && !src[0]) { |
1155 | | /* |
1156 | | * If we're not in a sparse index, we can't unpack a directory |
1157 | | * without recursing into it, so we return. |
1158 | | */ |
1159 | 0 | if (!o->src_index->sparse_index) |
1160 | 0 | return 0; |
1161 | | |
1162 | | /* Find first entry with a real name (we could use "mask" too) */ |
1163 | 0 | while (!p->mode) |
1164 | 0 | p++; |
1165 | | |
1166 | | /* |
1167 | | * If the directory is completely missing from the index but |
1168 | | * would otherwise be a sparse directory, we should unpack it. |
1169 | | * If not, we'll return and continue recursively traversing the |
1170 | | * tree. |
1171 | | */ |
1172 | 0 | *is_new_sparse_dir = entry_is_new_sparse_dir(info, p); |
1173 | 0 | if (!*is_new_sparse_dir) |
1174 | 0 | return 0; |
1175 | 0 | } |
1176 | | |
1177 | | /* |
1178 | | * When we are unpacking a sparse directory, then this isn't necessarily |
1179 | | * a directory-file conflict. |
1180 | | */ |
1181 | 69.5k | if (mask == dirmask && |
1182 | 69.5k | (*is_new_sparse_dir || (src[0] && S_ISSPARSEDIR(src[0]->ce_mode)))) |
1183 | 0 | conflicts = 0; |
1184 | | |
1185 | | /* |
1186 | | * Ok, we've filled in up to any potential index entry in src[0], |
1187 | | * now do the rest. |
1188 | | */ |
1189 | 139k | for (i = 0; i < n; i++) { |
1190 | 69.5k | int stage; |
1191 | 69.5k | unsigned int bit = 1ul << i; |
1192 | 69.5k | if (conflicts & bit) { |
1193 | 0 | src[i + o->merge] = o->df_conflict_entry; |
1194 | 0 | continue; |
1195 | 0 | } |
1196 | 69.5k | if (!(mask & bit)) |
1197 | 0 | continue; |
1198 | 69.5k | if (!o->merge) |
1199 | 0 | stage = 0; |
1200 | 69.5k | else if (i + 1 < o->head_idx) |
1201 | 0 | stage = 1; |
1202 | 69.5k | else if (i + 1 > o->head_idx) |
1203 | 0 | stage = 3; |
1204 | 69.5k | else |
1205 | 69.5k | stage = 2; |
1206 | | |
1207 | | /* |
1208 | | * If the merge bit is set, then the cache entries are |
1209 | | * discarded in the following block. In this case, |
1210 | | * construct "transient" cache_entries, as they are |
1211 | | * not stored in the index. otherwise construct the |
1212 | | * cache entry from the index aware logic. |
1213 | | */ |
1214 | 69.5k | src[i + o->merge] = create_ce_entry(info, names + i, stage, |
1215 | 69.5k | &o->result, o->merge, |
1216 | 69.5k | bit & dirmask); |
1217 | 69.5k | } |
1218 | | |
1219 | 69.5k | if (o->merge) { |
1220 | 69.5k | int rc = call_unpack_fn((const struct cache_entry * const *)src, |
1221 | 69.5k | o); |
1222 | 139k | for (i = 0; i < n; i++) { |
1223 | 69.5k | struct cache_entry *ce = src[i + o->merge]; |
1224 | 69.5k | if (ce != o->df_conflict_entry) |
1225 | 69.5k | discard_cache_entry(ce); |
1226 | 69.5k | } |
1227 | 69.5k | return rc; |
1228 | 69.5k | } |
1229 | | |
1230 | 0 | for (i = 0; i < n; i++) |
1231 | 0 | if (src[i] && src[i] != o->df_conflict_entry) |
1232 | 0 | if (do_add_entry(o, src[i], 0, 0)) |
1233 | 0 | return -1; |
1234 | | |
1235 | 0 | return 0; |
1236 | 0 | } |
1237 | | |
1238 | | static int unpack_failed(struct unpack_trees_options *o, const char *message) |
1239 | 12.4k | { |
1240 | 12.4k | discard_index(&o->result); |
1241 | 12.4k | if (!o->quiet && !o->exiting_early) { |
1242 | 0 | if (message) |
1243 | 0 | return error("%s", message); |
1244 | 0 | return -1; |
1245 | 0 | } |
1246 | 12.4k | return -1; |
1247 | 12.4k | } |
1248 | | |
1249 | | /* |
1250 | | * The tree traversal is looking at name p. If we have a matching entry, |
1251 | | * return it. If name p is a directory in the index, do not return |
1252 | | * anything, as we will want to match it when the traversal descends into |
1253 | | * the directory. |
1254 | | */ |
1255 | | static int find_cache_pos(struct traverse_info *info, |
1256 | | const char *p, size_t p_len) |
1257 | 41.3k | { |
1258 | 41.3k | int pos; |
1259 | 41.3k | struct unpack_trees_options *o = info->data; |
1260 | 41.3k | struct index_state *index = o->src_index; |
1261 | 41.3k | int pfxlen = info->pathlen; |
1262 | | |
1263 | 52.2k | for (pos = o->cache_bottom; pos < index->cache_nr; pos++) { |
1264 | 52.2k | const struct cache_entry *ce = index->cache[pos]; |
1265 | 52.2k | const char *ce_name, *ce_slash; |
1266 | 52.2k | int cmp, ce_len; |
1267 | | |
1268 | 52.2k | if (ce->ce_flags & CE_UNPACKED) { |
1269 | | /* |
1270 | | * cache_bottom entry is already unpacked, so |
1271 | | * we can never match it; don't check it |
1272 | | * again. |
1273 | | */ |
1274 | 0 | if (pos == o->cache_bottom) |
1275 | 0 | ++o->cache_bottom; |
1276 | 0 | continue; |
1277 | 0 | } |
1278 | 52.2k | if (!ce_in_traverse_path(ce, info)) { |
1279 | | /* |
1280 | | * Check if we can skip future cache checks |
1281 | | * (because we're already past all possible |
1282 | | * entries in the traverse path). |
1283 | | */ |
1284 | 0 | if (info->traverse_path) { |
1285 | 0 | if (strncmp(ce->name, info->traverse_path, |
1286 | 0 | info->pathlen) > 0) |
1287 | 0 | break; |
1288 | 0 | } |
1289 | 0 | continue; |
1290 | 0 | } |
1291 | 52.2k | ce_name = ce->name + pfxlen; |
1292 | 52.2k | ce_slash = strchr(ce_name, '/'); |
1293 | 52.2k | if (ce_slash) |
1294 | 0 | ce_len = ce_slash - ce_name; |
1295 | 52.2k | else |
1296 | 52.2k | ce_len = ce_namelen(ce) - pfxlen; |
1297 | 52.2k | cmp = name_compare(p, p_len, ce_name, ce_len); |
1298 | | /* |
1299 | | * Exact match; if we have a directory we need to |
1300 | | * delay returning it. |
1301 | | */ |
1302 | 52.2k | if (!cmp) |
1303 | 41.3k | return ce_slash ? -2 - pos : pos; |
1304 | 10.8k | if (0 < cmp) |
1305 | 10.8k | continue; /* keep looking */ |
1306 | | /* |
1307 | | * ce_name sorts after p->path; could it be that we |
1308 | | * have files under p->path directory in the index? |
1309 | | * E.g. ce_name == "t-i", and p->path == "t"; we may |
1310 | | * have "t/a" in the index. |
1311 | | */ |
1312 | 0 | if (p_len < ce_len && !memcmp(ce_name, p, p_len) && |
1313 | 0 | ce_name[p_len] < '/') |
1314 | 0 | continue; /* keep looking */ |
1315 | 0 | break; |
1316 | 0 | } |
1317 | 0 | return -1; |
1318 | 41.3k | } |
1319 | | |
1320 | | /* |
1321 | | * Given a sparse directory entry 'ce', compare ce->name to |
1322 | | * info->traverse_path + p->path + '/' if info->traverse_path |
1323 | | * is non-empty. |
1324 | | * |
1325 | | * Compare ce->name to p->path + '/' otherwise. Note that |
1326 | | * ce->name must end in a trailing '/' because it is a sparse |
1327 | | * directory entry. |
1328 | | */ |
1329 | | static int sparse_dir_matches_path(const struct cache_entry *ce, |
1330 | | struct traverse_info *info, |
1331 | | const struct name_entry *p) |
1332 | 0 | { |
1333 | 0 | assert(S_ISSPARSEDIR(ce->ce_mode)); |
1334 | 0 | assert(ce->name[ce->ce_namelen - 1] == '/'); |
1335 | | |
1336 | 0 | if (info->pathlen) |
1337 | 0 | return ce->ce_namelen == info->pathlen + p->pathlen + 1 && |
1338 | 0 | ce->name[info->pathlen - 1] == '/' && |
1339 | 0 | !strncmp(ce->name, info->traverse_path, info->pathlen) && |
1340 | 0 | !strncmp(ce->name + info->pathlen, p->path, p->pathlen); |
1341 | 0 | return ce->ce_namelen == p->pathlen + 1 && |
1342 | 0 | !strncmp(ce->name, p->path, p->pathlen); |
1343 | 0 | } |
1344 | | |
1345 | | static struct cache_entry *find_cache_entry(struct traverse_info *info, |
1346 | | const struct name_entry *p) |
1347 | 41.3k | { |
1348 | 41.3k | const char *path; |
1349 | 41.3k | int pos = find_cache_pos(info, p->path, p->pathlen); |
1350 | 41.3k | struct unpack_trees_options *o = info->data; |
1351 | | |
1352 | 41.3k | if (0 <= pos) |
1353 | 41.3k | return o->src_index->cache[pos]; |
1354 | | |
1355 | | /* |
1356 | | * Check for a sparse-directory entry named "path/". |
1357 | | * Due to the input p->path not having a trailing |
1358 | | * slash, the negative 'pos' value overshoots the |
1359 | | * expected position, hence "-2" instead of "-1". |
1360 | | */ |
1361 | 0 | pos = -pos - 2; |
1362 | |
|
1363 | 0 | if (pos < 0 || pos >= o->src_index->cache_nr) |
1364 | 0 | return NULL; |
1365 | | |
1366 | | /* |
1367 | | * Due to lexicographic sorting and sparse directory |
1368 | | * entries ending with a trailing slash, our path as a |
1369 | | * sparse directory (e.g "subdir/") and our path as a |
1370 | | * file (e.g. "subdir") might be separated by other |
1371 | | * paths (e.g. "subdir-"). |
1372 | | */ |
1373 | 0 | while (pos >= 0) { |
1374 | 0 | struct cache_entry *ce = o->src_index->cache[pos]; |
1375 | |
|
1376 | 0 | if (!skip_prefix(ce->name, info->traverse_path, &path) || |
1377 | 0 | strncmp(path, p->path, p->pathlen) || |
1378 | 0 | path[p->pathlen] != '/') |
1379 | 0 | return NULL; |
1380 | | |
1381 | 0 | if (S_ISSPARSEDIR(ce->ce_mode) && |
1382 | 0 | sparse_dir_matches_path(ce, info, p)) |
1383 | 0 | return ce; |
1384 | | |
1385 | 0 | pos--; |
1386 | 0 | } |
1387 | | |
1388 | 0 | return NULL; |
1389 | 0 | } |
1390 | | |
1391 | | static void debug_path(struct traverse_info *info) |
1392 | 0 | { |
1393 | 0 | if (info->prev) { |
1394 | 0 | debug_path(info->prev); |
1395 | 0 | if (*info->prev->name) |
1396 | 0 | putchar('/'); |
1397 | 0 | } |
1398 | 0 | printf("%s", info->name); |
1399 | 0 | } |
1400 | | |
1401 | | static void debug_name_entry(int i, struct name_entry *n) |
1402 | 0 | { |
1403 | 0 | printf("ent#%d %06o %s\n", i, |
1404 | 0 | n->path ? n->mode : 0, |
1405 | 0 | n->path ? n->path : "(missing)"); |
1406 | 0 | } |
1407 | | |
1408 | | static void debug_unpack_callback(int n, |
1409 | | unsigned long mask, |
1410 | | unsigned long dirmask, |
1411 | | struct name_entry *names, |
1412 | | struct traverse_info *info) |
1413 | 0 | { |
1414 | 0 | int i; |
1415 | 0 | printf("* unpack mask %lu, dirmask %lu, cnt %d ", |
1416 | 0 | mask, dirmask, n); |
1417 | 0 | debug_path(info); |
1418 | 0 | putchar('\n'); |
1419 | 0 | for (i = 0; i < n; i++) |
1420 | 0 | debug_name_entry(i, names + i); |
1421 | 0 | } |
1422 | | |
1423 | | /* |
1424 | | * Returns true if and only if the given cache_entry is a |
1425 | | * sparse-directory entry that matches the given name_entry |
1426 | | * from the tree walk at the given traverse_info. |
1427 | | */ |
1428 | | static int is_sparse_directory_entry(struct cache_entry *ce, |
1429 | | const struct name_entry *name, |
1430 | | struct traverse_info *info) |
1431 | 0 | { |
1432 | 0 | if (!ce || !name || !S_ISSPARSEDIR(ce->ce_mode)) |
1433 | 0 | return 0; |
1434 | | |
1435 | 0 | return sparse_dir_matches_path(ce, info, name); |
1436 | 0 | } |
1437 | | |
1438 | | static int unpack_sparse_callback(int n, unsigned long mask, unsigned long dirmask, struct name_entry *names, struct traverse_info *info) |
1439 | 0 | { |
1440 | 0 | struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; |
1441 | 0 | struct unpack_trees_options *o = info->data; |
1442 | 0 | int ret, is_new_sparse_dir; |
1443 | |
|
1444 | 0 | assert(o->merge); |
1445 | | |
1446 | | /* |
1447 | | * Unlike in 'unpack_callback', where src[0] is derived from the index when |
1448 | | * merging, src[0] is a transient cache entry derived from the first tree |
1449 | | * provided. Create the temporary entry as if it came from a non-sparse index. |
1450 | | */ |
1451 | 0 | if (!is_null_oid(&names[0].oid)) { |
1452 | 0 | src[0] = create_ce_entry(info, &names[0], 0, |
1453 | 0 | &o->result, 1, |
1454 | 0 | dirmask & (1ul << 0)); |
1455 | 0 | src[0]->ce_flags |= (CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE); |
1456 | 0 | } |
1457 | | |
1458 | | /* |
1459 | | * 'unpack_single_entry' assumes that src[0] is derived directly from |
1460 | | * the index, rather than from an entry in 'names'. This is *not* true when |
1461 | | * merging a sparse directory, in which case names[0] is the "index" source |
1462 | | * entry. To match the expectations of 'unpack_single_entry', shift past the |
1463 | | * "index" tree (i.e., names[0]) and adjust 'names', 'n', 'mask', and |
1464 | | * 'dirmask' accordingly. |
1465 | | */ |
1466 | 0 | ret = unpack_single_entry(n - 1, mask >> 1, dirmask >> 1, src, names + 1, info, &is_new_sparse_dir); |
1467 | |
|
1468 | 0 | if (src[0]) |
1469 | 0 | discard_cache_entry(src[0]); |
1470 | |
|
1471 | 0 | return ret >= 0 ? mask : -1; |
1472 | 0 | } |
1473 | | |
1474 | | /* |
1475 | | * Note that traverse_by_cache_tree() duplicates some logic in this function |
1476 | | * without actually calling it. If you change the logic here you may need to |
1477 | | * check and change there as well. |
1478 | | */ |
1479 | | static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, struct name_entry *names, struct traverse_info *info) |
1480 | 75.7k | { |
1481 | 75.7k | struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; |
1482 | 75.7k | struct unpack_trees_options *o = info->data; |
1483 | 75.7k | const struct name_entry *p = names; |
1484 | 75.7k | int is_new_sparse_dir; |
1485 | | |
1486 | | /* Find first entry with a real name (we could use "mask" too) */ |
1487 | 75.7k | while (!p->mode) |
1488 | 0 | p++; |
1489 | | |
1490 | 75.7k | if (o->debug_unpack) |
1491 | 0 | debug_unpack_callback(n, mask, dirmask, names, info); |
1492 | | |
1493 | | /* Are we supposed to look at the index too? */ |
1494 | 75.7k | if (o->merge) { |
1495 | 75.7k | while (1) { |
1496 | 75.7k | int cmp; |
1497 | 75.7k | struct cache_entry *ce; |
1498 | | |
1499 | 75.7k | if (o->diff_index_cached) |
1500 | 34.3k | ce = next_cache_entry(o); |
1501 | 41.3k | else |
1502 | 41.3k | ce = find_cache_entry(info, p); |
1503 | | |
1504 | 75.7k | if (!ce) |
1505 | 0 | break; |
1506 | 75.7k | cmp = compare_entry(ce, info, p); |
1507 | 75.7k | if (cmp < 0) { |
1508 | 6.20k | if (unpack_index_entry(ce, o) < 0) |
1509 | 6.20k | return unpack_failed(o, NULL); |
1510 | 0 | continue; |
1511 | 6.20k | } |
1512 | 69.5k | if (!cmp) { |
1513 | 69.5k | if (ce_stage(ce)) { |
1514 | | /* |
1515 | | * If we skip unmerged index |
1516 | | * entries, we'll skip this |
1517 | | * entry *and* the tree |
1518 | | * entries associated with it! |
1519 | | */ |
1520 | 0 | if (o->skip_unmerged) { |
1521 | 0 | add_same_unmerged(ce, o); |
1522 | 0 | return mask; |
1523 | 0 | } |
1524 | 0 | } |
1525 | 69.5k | src[0] = ce; |
1526 | 69.5k | } |
1527 | 69.5k | break; |
1528 | 69.5k | } |
1529 | 75.7k | } |
1530 | | |
1531 | 69.5k | if (unpack_single_entry(n, mask, dirmask, src, names, info, &is_new_sparse_dir)) |
1532 | 0 | return -1; |
1533 | | |
1534 | 69.5k | if (o->merge && src[0]) { |
1535 | 69.5k | if (ce_stage(src[0])) |
1536 | 0 | mark_ce_used_same_name(src[0], o); |
1537 | 69.5k | else |
1538 | 69.5k | mark_ce_used(src[0], o); |
1539 | 69.5k | } |
1540 | | |
1541 | | /* Now handle any directories.. */ |
1542 | 69.5k | if (dirmask) { |
1543 | | /* special case: "diff-index --cached" looking at a tree */ |
1544 | 0 | if (o->diff_index_cached && |
1545 | 0 | n == 1 && dirmask == 1 && S_ISDIR(names->mode)) { |
1546 | 0 | int matches; |
1547 | 0 | matches = cache_tree_matches_traversal(o->src_index->cache_tree, |
1548 | 0 | names, info); |
1549 | | /* |
1550 | | * Everything under the name matches; skip the |
1551 | | * entire hierarchy. diff_index_cached codepath |
1552 | | * special cases D/F conflicts in such a way that |
1553 | | * it does not do any look-ahead, so this is safe. |
1554 | | */ |
1555 | 0 | if (matches) { |
1556 | | /* |
1557 | | * Only increment the cache_bottom if the |
1558 | | * directory isn't a sparse directory index |
1559 | | * entry (if it is, it was already incremented) |
1560 | | * in 'mark_ce_used()' |
1561 | | */ |
1562 | 0 | if (!src[0] || !S_ISSPARSEDIR(src[0]->ce_mode)) |
1563 | 0 | o->cache_bottom += matches; |
1564 | 0 | return mask; |
1565 | 0 | } |
1566 | 0 | } |
1567 | | |
1568 | 0 | if (!is_sparse_directory_entry(src[0], p, info) && |
1569 | 0 | !is_new_sparse_dir && |
1570 | 0 | traverse_trees_recursive(n, dirmask, mask & ~dirmask, |
1571 | 0 | names, info) < 0) { |
1572 | 0 | return -1; |
1573 | 0 | } |
1574 | | |
1575 | 0 | return mask; |
1576 | 0 | } |
1577 | | |
1578 | 69.5k | return mask; |
1579 | 69.5k | } |
1580 | | |
1581 | | static int clear_ce_flags_1(struct index_state *istate, |
1582 | | struct cache_entry **cache, int nr, |
1583 | | struct strbuf *prefix, |
1584 | | int select_mask, int clear_mask, |
1585 | | struct pattern_list *pl, |
1586 | | enum pattern_match_result default_match, |
1587 | | int progress_nr); |
1588 | | |
1589 | | /* Whole directory matching */ |
1590 | | static int clear_ce_flags_dir(struct index_state *istate, |
1591 | | struct cache_entry **cache, int nr, |
1592 | | struct strbuf *prefix, |
1593 | | char *basename, |
1594 | | int select_mask, int clear_mask, |
1595 | | struct pattern_list *pl, |
1596 | | enum pattern_match_result default_match, |
1597 | | int progress_nr) |
1598 | 0 | { |
1599 | 0 | struct cache_entry **cache_end; |
1600 | 0 | int dtype = DT_DIR; |
1601 | 0 | int rc; |
1602 | 0 | enum pattern_match_result ret, orig_ret; |
1603 | 0 | orig_ret = path_matches_pattern_list(prefix->buf, prefix->len, |
1604 | 0 | basename, &dtype, pl, istate); |
1605 | |
|
1606 | 0 | strbuf_addch(prefix, '/'); |
1607 | | |
1608 | | /* If undecided, use matching result of parent dir in defval */ |
1609 | 0 | if (orig_ret == UNDECIDED) |
1610 | 0 | ret = default_match; |
1611 | 0 | else |
1612 | 0 | ret = orig_ret; |
1613 | |
|
1614 | 0 | for (cache_end = cache; cache_end != cache + nr; cache_end++) { |
1615 | 0 | struct cache_entry *ce = *cache_end; |
1616 | 0 | if (strncmp(ce->name, prefix->buf, prefix->len)) |
1617 | 0 | break; |
1618 | 0 | } |
1619 | |
|
1620 | 0 | if (pl->use_cone_patterns && orig_ret == MATCHED_RECURSIVE) { |
1621 | 0 | struct cache_entry **ce = cache; |
1622 | 0 | rc = cache_end - cache; |
1623 | |
|
1624 | 0 | while (ce < cache_end) { |
1625 | 0 | (*ce)->ce_flags &= ~clear_mask; |
1626 | 0 | ce++; |
1627 | 0 | } |
1628 | 0 | } else if (pl->use_cone_patterns && orig_ret == NOT_MATCHED) { |
1629 | 0 | rc = cache_end - cache; |
1630 | 0 | } else { |
1631 | 0 | rc = clear_ce_flags_1(istate, cache, cache_end - cache, |
1632 | 0 | prefix, |
1633 | 0 | select_mask, clear_mask, |
1634 | 0 | pl, ret, |
1635 | 0 | progress_nr); |
1636 | 0 | } |
1637 | |
|
1638 | 0 | strbuf_setlen(prefix, prefix->len - 1); |
1639 | 0 | return rc; |
1640 | 0 | } |
1641 | | |
1642 | | /* |
1643 | | * Traverse the index, find every entry that matches according to |
1644 | | * o->pl. Do "ce_flags &= ~clear_mask" on those entries. Return the |
1645 | | * number of traversed entries. |
1646 | | * |
1647 | | * If select_mask is non-zero, only entries whose ce_flags has on of |
1648 | | * those bits enabled are traversed. |
1649 | | * |
1650 | | * cache : pointer to an index entry |
1651 | | * prefix_len : an offset to its path |
1652 | | * |
1653 | | * The current path ("prefix") including the trailing '/' is |
1654 | | * cache[0]->name[0..(prefix_len-1)] |
1655 | | * Top level path has prefix_len zero. |
1656 | | */ |
1657 | | static int clear_ce_flags_1(struct index_state *istate, |
1658 | | struct cache_entry **cache, int nr, |
1659 | | struct strbuf *prefix, |
1660 | | int select_mask, int clear_mask, |
1661 | | struct pattern_list *pl, |
1662 | | enum pattern_match_result default_match, |
1663 | | int progress_nr) |
1664 | 0 | { |
1665 | 0 | struct cache_entry **cache_end = nr ? cache + nr : cache; |
1666 | | |
1667 | | /* |
1668 | | * Process all entries that have the given prefix and meet |
1669 | | * select_mask condition |
1670 | | */ |
1671 | 0 | while(cache != cache_end) { |
1672 | 0 | struct cache_entry *ce = *cache; |
1673 | 0 | const char *name, *slash; |
1674 | 0 | int len, dtype; |
1675 | 0 | enum pattern_match_result ret; |
1676 | |
|
1677 | 0 | display_progress(istate->progress, progress_nr); |
1678 | |
|
1679 | 0 | if (select_mask && !(ce->ce_flags & select_mask)) { |
1680 | 0 | cache++; |
1681 | 0 | progress_nr++; |
1682 | 0 | continue; |
1683 | 0 | } |
1684 | | |
1685 | 0 | if (prefix->len && strncmp(ce->name, prefix->buf, prefix->len)) |
1686 | 0 | break; |
1687 | | |
1688 | 0 | name = ce->name + prefix->len; |
1689 | 0 | slash = strchr(name, '/'); |
1690 | | |
1691 | | /* If it's a directory, try whole directory match first */ |
1692 | 0 | if (slash) { |
1693 | 0 | int processed; |
1694 | |
|
1695 | 0 | len = slash - name; |
1696 | 0 | strbuf_add(prefix, name, len); |
1697 | |
|
1698 | 0 | processed = clear_ce_flags_dir(istate, cache, cache_end - cache, |
1699 | 0 | prefix, |
1700 | 0 | prefix->buf + prefix->len - len, |
1701 | 0 | select_mask, clear_mask, |
1702 | 0 | pl, default_match, |
1703 | 0 | progress_nr); |
1704 | | |
1705 | | /* clear_c_f_dir eats a whole dir already? */ |
1706 | 0 | if (processed) { |
1707 | 0 | cache += processed; |
1708 | 0 | progress_nr += processed; |
1709 | 0 | strbuf_setlen(prefix, prefix->len - len); |
1710 | 0 | continue; |
1711 | 0 | } |
1712 | | |
1713 | 0 | strbuf_addch(prefix, '/'); |
1714 | 0 | processed = clear_ce_flags_1(istate, cache, cache_end - cache, |
1715 | 0 | prefix, |
1716 | 0 | select_mask, clear_mask, pl, |
1717 | 0 | default_match, progress_nr); |
1718 | |
|
1719 | 0 | cache += processed; |
1720 | 0 | progress_nr += processed; |
1721 | |
|
1722 | 0 | strbuf_setlen(prefix, prefix->len - len - 1); |
1723 | 0 | continue; |
1724 | 0 | } |
1725 | | |
1726 | | /* Non-directory */ |
1727 | 0 | dtype = ce_to_dtype(ce); |
1728 | 0 | ret = path_matches_pattern_list(ce->name, |
1729 | 0 | ce_namelen(ce), |
1730 | 0 | name, &dtype, pl, istate); |
1731 | 0 | if (ret == UNDECIDED) |
1732 | 0 | ret = default_match; |
1733 | 0 | if (ret == MATCHED || ret == MATCHED_RECURSIVE) |
1734 | 0 | ce->ce_flags &= ~clear_mask; |
1735 | 0 | cache++; |
1736 | 0 | progress_nr++; |
1737 | 0 | } |
1738 | |
|
1739 | 0 | display_progress(istate->progress, progress_nr); |
1740 | 0 | return nr - (cache_end - cache); |
1741 | 0 | } |
1742 | | |
1743 | | static int clear_ce_flags(struct index_state *istate, |
1744 | | int select_mask, int clear_mask, |
1745 | | struct pattern_list *pl, |
1746 | | int show_progress) |
1747 | 0 | { |
1748 | 0 | static struct strbuf prefix = STRBUF_INIT; |
1749 | 0 | char label[100]; |
1750 | 0 | int rval; |
1751 | |
|
1752 | 0 | strbuf_reset(&prefix); |
1753 | 0 | if (show_progress) |
1754 | 0 | istate->progress = start_delayed_progress( |
1755 | 0 | _("Updating index flags"), |
1756 | 0 | istate->cache_nr); |
1757 | |
|
1758 | 0 | xsnprintf(label, sizeof(label), "clear_ce_flags(0x%08lx,0x%08lx)", |
1759 | 0 | (unsigned long)select_mask, (unsigned long)clear_mask); |
1760 | 0 | trace2_region_enter("unpack_trees", label, the_repository); |
1761 | 0 | rval = clear_ce_flags_1(istate, |
1762 | 0 | istate->cache, |
1763 | 0 | istate->cache_nr, |
1764 | 0 | &prefix, |
1765 | 0 | select_mask, clear_mask, |
1766 | 0 | pl, 0, 0); |
1767 | 0 | trace2_region_leave("unpack_trees", label, the_repository); |
1768 | |
|
1769 | 0 | stop_progress(&istate->progress); |
1770 | 0 | return rval; |
1771 | 0 | } |
1772 | | |
1773 | | /* |
1774 | | * Set/Clear CE_NEW_SKIP_WORKTREE according to $GIT_DIR/info/sparse-checkout |
1775 | | */ |
1776 | | static void mark_new_skip_worktree(struct pattern_list *pl, |
1777 | | struct index_state *istate, |
1778 | | int select_flag, int skip_wt_flag, |
1779 | | int show_progress) |
1780 | 0 | { |
1781 | 0 | int i; |
1782 | | |
1783 | | /* |
1784 | | * 1. Pretend the narrowest worktree: only unmerged entries |
1785 | | * are checked out |
1786 | | */ |
1787 | 0 | for (i = 0; i < istate->cache_nr; i++) { |
1788 | 0 | struct cache_entry *ce = istate->cache[i]; |
1789 | |
|
1790 | 0 | if (select_flag && !(ce->ce_flags & select_flag)) |
1791 | 0 | continue; |
1792 | | |
1793 | 0 | if (!ce_stage(ce) && !(ce->ce_flags & CE_CONFLICTED)) |
1794 | 0 | ce->ce_flags |= skip_wt_flag; |
1795 | 0 | else |
1796 | 0 | ce->ce_flags &= ~skip_wt_flag; |
1797 | 0 | } |
1798 | | |
1799 | | /* |
1800 | | * 2. Widen worktree according to sparse-checkout file. |
1801 | | * Matched entries will have skip_wt_flag cleared (i.e. "in") |
1802 | | */ |
1803 | 0 | clear_ce_flags(istate, select_flag, skip_wt_flag, pl, show_progress); |
1804 | 0 | } |
1805 | | |
1806 | | static void populate_from_existing_patterns(struct unpack_trees_options *o, |
1807 | | struct pattern_list *pl) |
1808 | 0 | { |
1809 | 0 | if (get_sparse_checkout_patterns(pl) < 0) |
1810 | 0 | o->skip_sparse_checkout = 1; |
1811 | 0 | else |
1812 | 0 | o->pl = pl; |
1813 | 0 | } |
1814 | | |
1815 | | static void update_sparsity_for_prefix(const char *prefix, |
1816 | | struct index_state *istate) |
1817 | 0 | { |
1818 | 0 | int prefix_len = strlen(prefix); |
1819 | 0 | struct strbuf ce_prefix = STRBUF_INIT; |
1820 | |
|
1821 | 0 | if (!istate->sparse_index) |
1822 | 0 | return; |
1823 | | |
1824 | 0 | while (prefix_len > 0 && prefix[prefix_len - 1] == '/') |
1825 | 0 | prefix_len--; |
1826 | |
|
1827 | 0 | if (prefix_len <= 0) |
1828 | 0 | BUG("Invalid prefix passed to update_sparsity_for_prefix"); |
1829 | | |
1830 | 0 | strbuf_grow(&ce_prefix, prefix_len + 1); |
1831 | 0 | strbuf_add(&ce_prefix, prefix, prefix_len); |
1832 | 0 | strbuf_addch(&ce_prefix, '/'); |
1833 | | |
1834 | | /* |
1835 | | * If the prefix points to a sparse directory or a path inside a sparse |
1836 | | * directory, the index should be expanded. This is accomplished in one |
1837 | | * of two ways: |
1838 | | * - if the prefix is inside a sparse directory, it will be expanded by |
1839 | | * the 'ensure_full_index(...)' call in 'index_name_pos(...)'. |
1840 | | * - if the prefix matches an existing sparse directory entry, |
1841 | | * 'index_name_pos(...)' will return its index position, triggering |
1842 | | * the 'ensure_full_index(...)' below. |
1843 | | */ |
1844 | 0 | if (!path_in_cone_mode_sparse_checkout(ce_prefix.buf, istate) && |
1845 | 0 | index_name_pos(istate, ce_prefix.buf, ce_prefix.len) >= 0) |
1846 | 0 | ensure_full_index(istate); |
1847 | |
|
1848 | 0 | strbuf_release(&ce_prefix); |
1849 | 0 | } |
1850 | | |
1851 | | static int verify_absent(const struct cache_entry *, |
1852 | | enum unpack_trees_error_types, |
1853 | | struct unpack_trees_options *); |
1854 | | /* |
1855 | | * N-way merge "len" trees. Returns 0 on success, -1 on failure to manipulate the |
1856 | | * resulting index, -2 on failure to reflect the changes to the work tree. |
1857 | | * |
1858 | | * CE_ADDED, CE_UNPACKED and CE_NEW_SKIP_WORKTREE are used internally |
1859 | | */ |
1860 | | int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options *o) |
1861 | 15.8k | { |
1862 | 15.8k | struct repository *repo = the_repository; |
1863 | 15.8k | int i, ret; |
1864 | 15.8k | static struct cache_entry *dfc; |
1865 | 15.8k | struct pattern_list pl; |
1866 | 15.8k | int free_pattern_list = 0; |
1867 | 15.8k | struct dir_struct dir = DIR_INIT; |
1868 | | |
1869 | 15.8k | if (o->reset == UNPACK_RESET_INVALID) |
1870 | 0 | BUG("o->reset had a value of 1; should be UNPACK_TREES_*_UNTRACKED"); |
1871 | | |
1872 | 15.8k | if (len > MAX_UNPACK_TREES) |
1873 | 0 | die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES); |
1874 | 15.8k | if (o->dir) |
1875 | 0 | BUG("o->dir is for internal use only"); |
1876 | | |
1877 | 15.8k | trace_performance_enter(); |
1878 | 15.8k | trace2_region_enter("unpack_trees", "unpack_trees", the_repository); |
1879 | | |
1880 | 15.8k | prepare_repo_settings(repo); |
1881 | 15.8k | if (repo->settings.command_requires_full_index) { |
1882 | 0 | ensure_full_index(o->src_index); |
1883 | 0 | if (o->dst_index) |
1884 | 0 | ensure_full_index(o->dst_index); |
1885 | 0 | } |
1886 | | |
1887 | 15.8k | if (o->reset == UNPACK_RESET_OVERWRITE_UNTRACKED && |
1888 | 15.8k | o->preserve_ignored) |
1889 | 0 | BUG("UNPACK_RESET_OVERWRITE_UNTRACKED incompatible with preserved ignored files"); |
1890 | | |
1891 | 15.8k | if (!o->preserve_ignored) { |
1892 | 15.8k | o->dir = &dir; |
1893 | 15.8k | o->dir->flags |= DIR_SHOW_IGNORED; |
1894 | 15.8k | setup_standard_excludes(o->dir); |
1895 | 15.8k | } |
1896 | | |
1897 | 15.8k | if (o->prefix) |
1898 | 0 | update_sparsity_for_prefix(o->prefix, o->src_index); |
1899 | | |
1900 | 15.8k | if (!core_apply_sparse_checkout || !o->update) |
1901 | 15.8k | o->skip_sparse_checkout = 1; |
1902 | 15.8k | if (!o->skip_sparse_checkout && !o->pl) { |
1903 | 0 | memset(&pl, 0, sizeof(pl)); |
1904 | 0 | free_pattern_list = 1; |
1905 | 0 | populate_from_existing_patterns(o, &pl); |
1906 | 0 | } |
1907 | | |
1908 | 15.8k | index_state_init(&o->result, o->src_index->repo); |
1909 | 15.8k | o->result.initialized = 1; |
1910 | 15.8k | o->result.timestamp.sec = o->src_index->timestamp.sec; |
1911 | 15.8k | o->result.timestamp.nsec = o->src_index->timestamp.nsec; |
1912 | 15.8k | o->result.version = o->src_index->version; |
1913 | 15.8k | if (!o->src_index->split_index) { |
1914 | 15.8k | o->result.split_index = NULL; |
1915 | 15.8k | } else if (o->src_index == o->dst_index) { |
1916 | | /* |
1917 | | * o->dst_index (and thus o->src_index) will be discarded |
1918 | | * and overwritten with o->result at the end of this function, |
1919 | | * so just use src_index's split_index to avoid having to |
1920 | | * create a new one. |
1921 | | */ |
1922 | 0 | o->result.split_index = o->src_index->split_index; |
1923 | 0 | o->result.split_index->refcount++; |
1924 | 0 | } else { |
1925 | 0 | o->result.split_index = init_split_index(&o->result); |
1926 | 0 | } |
1927 | 15.8k | oidcpy(&o->result.oid, &o->src_index->oid); |
1928 | 15.8k | o->merge_size = len; |
1929 | 15.8k | mark_all_ce_unused(o->src_index); |
1930 | | |
1931 | 15.8k | o->result.fsmonitor_last_update = |
1932 | 15.8k | xstrdup_or_null(o->src_index->fsmonitor_last_update); |
1933 | 15.8k | o->result.fsmonitor_has_run_once = o->src_index->fsmonitor_has_run_once; |
1934 | | |
1935 | 15.8k | if (!o->src_index->initialized && |
1936 | 15.8k | !repo->settings.command_requires_full_index && |
1937 | 15.8k | is_sparse_index_allowed(&o->result, 0)) |
1938 | 0 | o->result.sparse_index = 1; |
1939 | | |
1940 | | /* |
1941 | | * Sparse checkout loop #1: set NEW_SKIP_WORKTREE on existing entries |
1942 | | */ |
1943 | 15.8k | if (!o->skip_sparse_checkout) |
1944 | 0 | mark_new_skip_worktree(o->pl, o->src_index, 0, |
1945 | 0 | CE_NEW_SKIP_WORKTREE, o->verbose_update); |
1946 | | |
1947 | 15.8k | if (!dfc) |
1948 | 1 | dfc = xcalloc(1, cache_entry_size(0)); |
1949 | 15.8k | o->df_conflict_entry = dfc; |
1950 | | |
1951 | 15.8k | if (len) { |
1952 | 15.8k | const char *prefix = o->prefix ? o->prefix : ""; |
1953 | 15.8k | struct traverse_info info; |
1954 | | |
1955 | 15.8k | setup_traverse_info(&info, prefix); |
1956 | 15.8k | info.fn = unpack_callback; |
1957 | 15.8k | info.data = o; |
1958 | 15.8k | info.show_all_errors = o->show_all_errors; |
1959 | 15.8k | info.pathspec = o->pathspec; |
1960 | | |
1961 | 15.8k | if (o->prefix) { |
1962 | | /* |
1963 | | * Unpack existing index entries that sort before the |
1964 | | * prefix the tree is spliced into. Note that o->merge |
1965 | | * is always true in this case. |
1966 | | */ |
1967 | 0 | while (1) { |
1968 | 0 | struct cache_entry *ce = next_cache_entry(o); |
1969 | 0 | if (!ce) |
1970 | 0 | break; |
1971 | 0 | if (ce_in_traverse_path(ce, &info)) |
1972 | 0 | break; |
1973 | 0 | if (unpack_index_entry(ce, o) < 0) |
1974 | 0 | goto return_failed; |
1975 | 0 | } |
1976 | 0 | } |
1977 | | |
1978 | 15.8k | trace_performance_enter(); |
1979 | 15.8k | trace2_region_enter("unpack_trees", "traverse_trees", the_repository); |
1980 | 15.8k | ret = traverse_trees(o->src_index, len, t, &info); |
1981 | 15.8k | trace2_region_leave("unpack_trees", "traverse_trees", the_repository); |
1982 | 15.8k | trace_performance_leave("traverse_trees"); |
1983 | 15.8k | if (ret < 0) |
1984 | 6.20k | goto return_failed; |
1985 | 15.8k | } |
1986 | | |
1987 | | /* Any left-over entries in the index? */ |
1988 | 9.68k | if (o->merge) { |
1989 | 22.9k | while (1) { |
1990 | 22.9k | struct cache_entry *ce = next_cache_entry(o); |
1991 | 22.9k | if (!ce) |
1992 | 9.68k | break; |
1993 | 13.2k | if (unpack_index_entry(ce, o) < 0) |
1994 | 0 | goto return_failed; |
1995 | 13.2k | } |
1996 | 9.68k | } |
1997 | 9.68k | mark_all_ce_unused(o->src_index); |
1998 | | |
1999 | 9.68k | if (o->trivial_merges_only && o->nontrivial_merge) { |
2000 | 0 | ret = unpack_failed(o, "Merge requires file-level merging"); |
2001 | 0 | goto done; |
2002 | 0 | } |
2003 | | |
2004 | 9.68k | if (!o->skip_sparse_checkout) { |
2005 | | /* |
2006 | | * Sparse checkout loop #2: set NEW_SKIP_WORKTREE on entries not in loop #1 |
2007 | | * If they will have NEW_SKIP_WORKTREE, also set CE_SKIP_WORKTREE |
2008 | | * so apply_sparse_checkout() won't attempt to remove it from worktree |
2009 | | */ |
2010 | 0 | mark_new_skip_worktree(o->pl, &o->result, |
2011 | 0 | CE_ADDED, CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE, |
2012 | 0 | o->verbose_update); |
2013 | |
|
2014 | 0 | ret = 0; |
2015 | 0 | for (i = 0; i < o->result.cache_nr; i++) { |
2016 | 0 | struct cache_entry *ce = o->result.cache[i]; |
2017 | | |
2018 | | /* |
2019 | | * Entries marked with CE_ADDED in merged_entry() do not have |
2020 | | * verify_absent() check (the check is effectively disabled |
2021 | | * because CE_NEW_SKIP_WORKTREE is set unconditionally). |
2022 | | * |
2023 | | * Do the real check now because we have had |
2024 | | * correct CE_NEW_SKIP_WORKTREE |
2025 | | */ |
2026 | 0 | if (ce->ce_flags & CE_ADDED && |
2027 | 0 | verify_absent(ce, WARNING_SPARSE_ORPHANED_NOT_OVERWRITTEN, o)) |
2028 | 0 | ret = 1; |
2029 | |
|
2030 | 0 | if (apply_sparse_checkout(&o->result, ce, o)) |
2031 | 0 | ret = 1; |
2032 | 0 | } |
2033 | 0 | if (ret == 1) { |
2034 | | /* |
2035 | | * Inability to sparsify or de-sparsify individual |
2036 | | * paths is not an error, but just a warning. |
2037 | | */ |
2038 | 0 | if (o->show_all_errors) |
2039 | 0 | display_warning_msgs(o); |
2040 | 0 | ret = 0; |
2041 | 0 | } |
2042 | 0 | } |
2043 | | |
2044 | 9.68k | ret = check_updates(o, &o->result) ? (-2) : 0; |
2045 | 9.68k | if (o->dst_index) { |
2046 | 0 | move_index_extensions(&o->result, o->src_index); |
2047 | 0 | if (!ret) { |
2048 | 0 | if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0)) |
2049 | 0 | cache_tree_verify(the_repository, &o->result); |
2050 | 0 | if (!o->skip_cache_tree_update && |
2051 | 0 | !cache_tree_fully_valid(o->result.cache_tree)) |
2052 | 0 | cache_tree_update(&o->result, |
2053 | 0 | WRITE_TREE_SILENT | |
2054 | 0 | WRITE_TREE_REPAIR); |
2055 | 0 | } |
2056 | |
|
2057 | 0 | o->result.updated_workdir = 1; |
2058 | 0 | discard_index(o->dst_index); |
2059 | 0 | *o->dst_index = o->result; |
2060 | 9.68k | } else { |
2061 | 9.68k | discard_index(&o->result); |
2062 | 9.68k | } |
2063 | 9.68k | o->src_index = NULL; |
2064 | | |
2065 | 15.8k | done: |
2066 | 15.8k | if (free_pattern_list) |
2067 | 0 | clear_pattern_list(&pl); |
2068 | 15.8k | if (o->dir) { |
2069 | 15.8k | dir_clear(o->dir); |
2070 | 15.8k | o->dir = NULL; |
2071 | 15.8k | } |
2072 | 15.8k | trace2_region_leave("unpack_trees", "unpack_trees", the_repository); |
2073 | 15.8k | trace_performance_leave("unpack_trees"); |
2074 | 15.8k | return ret; |
2075 | | |
2076 | 6.20k | return_failed: |
2077 | 6.20k | if (o->show_all_errors) |
2078 | 0 | display_error_msgs(o); |
2079 | 6.20k | mark_all_ce_unused(o->src_index); |
2080 | 6.20k | ret = unpack_failed(o, NULL); |
2081 | 6.20k | if (o->exiting_early) |
2082 | 6.20k | ret = 0; |
2083 | 6.20k | goto done; |
2084 | 9.68k | } |
2085 | | |
2086 | | /* |
2087 | | * Update SKIP_WORKTREE bits according to sparsity patterns, and update |
2088 | | * working directory to match. |
2089 | | * |
2090 | | * CE_NEW_SKIP_WORKTREE is used internally. |
2091 | | */ |
2092 | | enum update_sparsity_result update_sparsity(struct unpack_trees_options *o) |
2093 | 0 | { |
2094 | 0 | enum update_sparsity_result ret = UPDATE_SPARSITY_SUCCESS; |
2095 | 0 | struct pattern_list pl; |
2096 | 0 | int i; |
2097 | 0 | unsigned old_show_all_errors; |
2098 | 0 | int free_pattern_list = 0; |
2099 | |
|
2100 | 0 | old_show_all_errors = o->show_all_errors; |
2101 | 0 | o->show_all_errors = 1; |
2102 | | |
2103 | | /* Sanity checks */ |
2104 | 0 | if (!o->update || o->index_only || o->skip_sparse_checkout) |
2105 | 0 | BUG("update_sparsity() is for reflecting sparsity patterns in working directory"); |
2106 | 0 | if (o->src_index != o->dst_index || o->fn) |
2107 | 0 | BUG("update_sparsity() called wrong"); |
2108 | | |
2109 | 0 | trace_performance_enter(); |
2110 | | |
2111 | | /* If we weren't given patterns, use the recorded ones */ |
2112 | 0 | if (!o->pl) { |
2113 | 0 | memset(&pl, 0, sizeof(pl)); |
2114 | 0 | free_pattern_list = 1; |
2115 | 0 | populate_from_existing_patterns(o, &pl); |
2116 | 0 | if (o->skip_sparse_checkout) |
2117 | 0 | goto skip_sparse_checkout; |
2118 | 0 | } |
2119 | | |
2120 | | /* Expand sparse directories as needed */ |
2121 | 0 | expand_index(o->src_index, o->pl); |
2122 | | |
2123 | | /* Set NEW_SKIP_WORKTREE on existing entries. */ |
2124 | 0 | mark_all_ce_unused(o->src_index); |
2125 | 0 | mark_new_skip_worktree(o->pl, o->src_index, 0, |
2126 | 0 | CE_NEW_SKIP_WORKTREE, o->verbose_update); |
2127 | | |
2128 | | /* Then loop over entries and update/remove as needed */ |
2129 | 0 | ret = UPDATE_SPARSITY_SUCCESS; |
2130 | 0 | for (i = 0; i < o->src_index->cache_nr; i++) { |
2131 | 0 | struct cache_entry *ce = o->src_index->cache[i]; |
2132 | | |
2133 | |
|
2134 | 0 | if (ce_stage(ce)) { |
2135 | | /* -1 because for loop will increment by 1 */ |
2136 | 0 | i += warn_conflicted_path(o->src_index, i, o) - 1; |
2137 | 0 | ret = UPDATE_SPARSITY_WARNINGS; |
2138 | 0 | continue; |
2139 | 0 | } |
2140 | | |
2141 | 0 | if (apply_sparse_checkout(o->src_index, ce, o)) |
2142 | 0 | ret = UPDATE_SPARSITY_WARNINGS; |
2143 | 0 | } |
2144 | |
|
2145 | 0 | skip_sparse_checkout: |
2146 | 0 | if (check_updates(o, o->src_index)) |
2147 | 0 | ret = UPDATE_SPARSITY_WORKTREE_UPDATE_FAILURES; |
2148 | |
|
2149 | 0 | display_warning_msgs(o); |
2150 | 0 | o->show_all_errors = old_show_all_errors; |
2151 | 0 | if (free_pattern_list) |
2152 | 0 | clear_pattern_list(&pl); |
2153 | 0 | trace_performance_leave("update_sparsity"); |
2154 | 0 | return ret; |
2155 | 0 | } |
2156 | | |
2157 | | /* Here come the merge functions */ |
2158 | | |
2159 | | static int reject_merge(const struct cache_entry *ce, |
2160 | | struct unpack_trees_options *o) |
2161 | 0 | { |
2162 | 0 | return add_rejected_path(o, ERROR_WOULD_OVERWRITE, ce->name); |
2163 | 0 | } |
2164 | | |
2165 | | static int same(const struct cache_entry *a, const struct cache_entry *b) |
2166 | 0 | { |
2167 | 0 | if (!!a != !!b) |
2168 | 0 | return 0; |
2169 | 0 | if (!a && !b) |
2170 | 0 | return 1; |
2171 | 0 | if ((a->ce_flags | b->ce_flags) & CE_CONFLICTED) |
2172 | 0 | return 0; |
2173 | 0 | return a->ce_mode == b->ce_mode && |
2174 | 0 | oideq(&a->oid, &b->oid); |
2175 | 0 | } |
2176 | | |
2177 | | |
2178 | | /* |
2179 | | * When a CE gets turned into an unmerged entry, we |
2180 | | * want it to be up-to-date |
2181 | | */ |
2182 | | static int verify_uptodate_1(const struct cache_entry *ce, |
2183 | | struct unpack_trees_options *o, |
2184 | | enum unpack_trees_error_types error_type) |
2185 | 0 | { |
2186 | 0 | struct stat st; |
2187 | |
|
2188 | 0 | if (o->index_only) |
2189 | 0 | return 0; |
2190 | | |
2191 | | /* |
2192 | | * CE_VALID and CE_SKIP_WORKTREE cheat, we better check again |
2193 | | * if this entry is truly up-to-date because this file may be |
2194 | | * overwritten. |
2195 | | */ |
2196 | 0 | if ((ce->ce_flags & CE_VALID) || ce_skip_worktree(ce)) |
2197 | 0 | ; /* keep checking */ |
2198 | 0 | else if (o->reset || ce_uptodate(ce)) |
2199 | 0 | return 0; |
2200 | | |
2201 | 0 | if (!lstat(ce->name, &st)) { |
2202 | 0 | int flags = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE; |
2203 | 0 | unsigned changed = ie_match_stat(o->src_index, ce, &st, flags); |
2204 | |
|
2205 | 0 | if (submodule_from_ce(ce)) { |
2206 | 0 | int r = check_submodule_move_head(ce, |
2207 | 0 | "HEAD", oid_to_hex(&ce->oid), o); |
2208 | 0 | if (r) |
2209 | 0 | return add_rejected_path(o, error_type, ce->name); |
2210 | 0 | return 0; |
2211 | 0 | } |
2212 | | |
2213 | 0 | if (!changed) |
2214 | 0 | return 0; |
2215 | | /* |
2216 | | * Historic default policy was to allow submodule to be out |
2217 | | * of sync wrt the superproject index. If the submodule was |
2218 | | * not considered interesting above, we don't care here. |
2219 | | */ |
2220 | 0 | if (S_ISGITLINK(ce->ce_mode)) |
2221 | 0 | return 0; |
2222 | | |
2223 | 0 | errno = 0; |
2224 | 0 | } |
2225 | 0 | if (errno == ENOENT) |
2226 | 0 | return 0; |
2227 | 0 | return add_rejected_path(o, error_type, ce->name); |
2228 | 0 | } |
2229 | | |
2230 | | int verify_uptodate(const struct cache_entry *ce, |
2231 | | struct unpack_trees_options *o) |
2232 | 0 | { |
2233 | 0 | if (!o->skip_sparse_checkout && |
2234 | 0 | (ce->ce_flags & CE_SKIP_WORKTREE) && |
2235 | 0 | (ce->ce_flags & CE_NEW_SKIP_WORKTREE)) |
2236 | 0 | return 0; |
2237 | 0 | return verify_uptodate_1(ce, o, ERROR_NOT_UPTODATE_FILE); |
2238 | 0 | } |
2239 | | |
2240 | | static int verify_uptodate_sparse(const struct cache_entry *ce, |
2241 | | struct unpack_trees_options *o) |
2242 | 0 | { |
2243 | 0 | return verify_uptodate_1(ce, o, WARNING_SPARSE_NOT_UPTODATE_FILE); |
2244 | 0 | } |
2245 | | |
2246 | | /* |
2247 | | * TODO: We should actually invalidate o->result, not src_index [1]. |
2248 | | * But since cache tree and untracked cache both are not copied to |
2249 | | * o->result until unpacking is complete, we invalidate them on |
2250 | | * src_index instead with the assumption that they will be copied to |
2251 | | * dst_index at the end. |
2252 | | * |
2253 | | * [1] src_index->cache_tree is also used in unpack_callback() so if |
2254 | | * we invalidate o->result, we need to update it to use |
2255 | | * o->result.cache_tree as well. |
2256 | | */ |
2257 | | static void invalidate_ce_path(const struct cache_entry *ce, |
2258 | | struct unpack_trees_options *o) |
2259 | 0 | { |
2260 | 0 | if (!ce) |
2261 | 0 | return; |
2262 | 0 | cache_tree_invalidate_path(o->src_index, ce->name); |
2263 | 0 | untracked_cache_invalidate_path(o->src_index, ce->name, 1); |
2264 | 0 | } |
2265 | | |
2266 | | /* |
2267 | | * Check that checking out ce->sha1 in subdir ce->name is not |
2268 | | * going to overwrite any working files. |
2269 | | */ |
2270 | | static int verify_clean_submodule(const char *old_sha1, |
2271 | | const struct cache_entry *ce, |
2272 | | struct unpack_trees_options *o) |
2273 | 0 | { |
2274 | 0 | if (!submodule_from_ce(ce)) |
2275 | 0 | return 0; |
2276 | | |
2277 | 0 | return check_submodule_move_head(ce, old_sha1, |
2278 | 0 | oid_to_hex(&ce->oid), o); |
2279 | 0 | } |
2280 | | |
2281 | | static int verify_clean_subdirectory(const struct cache_entry *ce, |
2282 | | struct unpack_trees_options *o) |
2283 | 0 | { |
2284 | | /* |
2285 | | * we are about to extract "ce->name"; we would not want to lose |
2286 | | * anything in the existing directory there. |
2287 | | */ |
2288 | 0 | int namelen; |
2289 | 0 | int i; |
2290 | 0 | struct dir_struct d; |
2291 | 0 | char *pathbuf; |
2292 | 0 | int cnt = 0; |
2293 | |
|
2294 | 0 | if (S_ISGITLINK(ce->ce_mode)) { |
2295 | 0 | struct object_id oid; |
2296 | 0 | int sub_head = resolve_gitlink_ref(ce->name, "HEAD", &oid); |
2297 | | /* |
2298 | | * If we are not going to update the submodule, then |
2299 | | * we don't care. |
2300 | | */ |
2301 | 0 | if (!sub_head && oideq(&oid, &ce->oid)) |
2302 | 0 | return 0; |
2303 | 0 | return verify_clean_submodule(sub_head ? NULL : oid_to_hex(&oid), |
2304 | 0 | ce, o); |
2305 | 0 | } |
2306 | | |
2307 | | /* |
2308 | | * First let's make sure we do not have a local modification |
2309 | | * in that directory. |
2310 | | */ |
2311 | 0 | namelen = ce_namelen(ce); |
2312 | 0 | for (i = locate_in_src_index(ce, o); |
2313 | 0 | i < o->src_index->cache_nr; |
2314 | 0 | i++) { |
2315 | 0 | struct cache_entry *ce2 = o->src_index->cache[i]; |
2316 | 0 | int len = ce_namelen(ce2); |
2317 | 0 | if (len < namelen || |
2318 | 0 | strncmp(ce->name, ce2->name, namelen) || |
2319 | 0 | ce2->name[namelen] != '/') |
2320 | 0 | break; |
2321 | | /* |
2322 | | * ce2->name is an entry in the subdirectory to be |
2323 | | * removed. |
2324 | | */ |
2325 | 0 | if (!ce_stage(ce2)) { |
2326 | 0 | if (verify_uptodate(ce2, o)) |
2327 | 0 | return -1; |
2328 | 0 | add_entry(o, ce2, CE_REMOVE, 0); |
2329 | 0 | invalidate_ce_path(ce, o); |
2330 | 0 | mark_ce_used(ce2, o); |
2331 | 0 | } |
2332 | 0 | cnt++; |
2333 | 0 | } |
2334 | | |
2335 | | /* Do not lose a locally present file that is not ignored. */ |
2336 | 0 | pathbuf = xstrfmt("%.*s/", namelen, ce->name); |
2337 | |
|
2338 | 0 | memset(&d, 0, sizeof(d)); |
2339 | 0 | if (o->dir) |
2340 | 0 | d.exclude_per_dir = o->dir->exclude_per_dir; |
2341 | 0 | i = read_directory(&d, o->src_index, pathbuf, namelen+1, NULL); |
2342 | 0 | dir_clear(&d); |
2343 | 0 | free(pathbuf); |
2344 | 0 | if (i) |
2345 | 0 | return add_rejected_path(o, ERROR_NOT_UPTODATE_DIR, ce->name); |
2346 | | |
2347 | | /* Do not lose startup_info->original_cwd */ |
2348 | 0 | if (startup_info->original_cwd && |
2349 | 0 | !strcmp(startup_info->original_cwd, ce->name)) |
2350 | 0 | return add_rejected_path(o, ERROR_CWD_IN_THE_WAY, ce->name); |
2351 | | |
2352 | 0 | return cnt; |
2353 | 0 | } |
2354 | | |
2355 | | /* |
2356 | | * This gets called when there was no index entry for the tree entry 'dst', |
2357 | | * but we found a file in the working tree that 'lstat()' said was fine, |
2358 | | * and we're on a case-insensitive filesystem. |
2359 | | * |
2360 | | * See if we can find a case-insensitive match in the index that also |
2361 | | * matches the stat information, and assume it's that other file! |
2362 | | */ |
2363 | | static int icase_exists(struct unpack_trees_options *o, const char *name, int len, struct stat *st) |
2364 | 0 | { |
2365 | 0 | const struct cache_entry *src; |
2366 | |
|
2367 | 0 | src = index_file_exists(o->src_index, name, len, 1); |
2368 | 0 | return src && !ie_match_stat(o->src_index, src, st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE); |
2369 | 0 | } |
2370 | | |
2371 | | enum absent_checking_type { |
2372 | | COMPLETELY_ABSENT, |
2373 | | ABSENT_ANY_DIRECTORY |
2374 | | }; |
2375 | | |
2376 | | static int check_ok_to_remove(const char *name, int len, int dtype, |
2377 | | const struct cache_entry *ce, struct stat *st, |
2378 | | enum unpack_trees_error_types error_type, |
2379 | | enum absent_checking_type absent_type, |
2380 | | struct unpack_trees_options *o) |
2381 | 0 | { |
2382 | 0 | const struct cache_entry *result; |
2383 | | |
2384 | | /* |
2385 | | * It may be that the 'lstat()' succeeded even though |
2386 | | * target 'ce' was absent, because there is an old |
2387 | | * entry that is different only in case.. |
2388 | | * |
2389 | | * Ignore that lstat() if it matches. |
2390 | | */ |
2391 | 0 | if (ignore_case && icase_exists(o, name, len, st)) |
2392 | 0 | return 0; |
2393 | | |
2394 | 0 | if (o->dir && |
2395 | 0 | is_excluded(o->dir, o->src_index, name, &dtype)) |
2396 | | /* |
2397 | | * ce->name is explicitly excluded, so it is Ok to |
2398 | | * overwrite it. |
2399 | | */ |
2400 | 0 | return 0; |
2401 | 0 | if (S_ISDIR(st->st_mode)) { |
2402 | | /* |
2403 | | * We are checking out path "foo" and |
2404 | | * found "foo/." in the working tree. |
2405 | | * This is tricky -- if we have modified |
2406 | | * files that are in "foo/" we would lose |
2407 | | * them. |
2408 | | */ |
2409 | 0 | if (verify_clean_subdirectory(ce, o) < 0) |
2410 | 0 | return -1; |
2411 | 0 | return 0; |
2412 | 0 | } |
2413 | | |
2414 | | /* If we only care about directories, then we can remove */ |
2415 | 0 | if (absent_type == ABSENT_ANY_DIRECTORY) |
2416 | 0 | return 0; |
2417 | | |
2418 | | /* |
2419 | | * The previous round may already have decided to |
2420 | | * delete this path, which is in a subdirectory that |
2421 | | * is being replaced with a blob. |
2422 | | */ |
2423 | 0 | result = index_file_exists(&o->result, name, len, 0); |
2424 | 0 | if (result) { |
2425 | 0 | if (result->ce_flags & CE_REMOVE) |
2426 | 0 | return 0; |
2427 | 0 | } |
2428 | | |
2429 | 0 | return add_rejected_path(o, error_type, name); |
2430 | 0 | } |
2431 | | |
2432 | | /* |
2433 | | * We do not want to remove or overwrite a working tree file that |
2434 | | * is not tracked, unless it is ignored. |
2435 | | */ |
2436 | | static int verify_absent_1(const struct cache_entry *ce, |
2437 | | enum unpack_trees_error_types error_type, |
2438 | | enum absent_checking_type absent_type, |
2439 | | struct unpack_trees_options *o) |
2440 | 0 | { |
2441 | 0 | int len; |
2442 | 0 | struct stat st; |
2443 | |
|
2444 | 0 | if (o->index_only || !o->update) |
2445 | 0 | return 0; |
2446 | | |
2447 | 0 | if (o->reset == UNPACK_RESET_OVERWRITE_UNTRACKED) { |
2448 | | /* Avoid nuking startup_info->original_cwd... */ |
2449 | 0 | if (startup_info->original_cwd && |
2450 | 0 | !strcmp(startup_info->original_cwd, ce->name)) |
2451 | 0 | return add_rejected_path(o, ERROR_CWD_IN_THE_WAY, |
2452 | 0 | ce->name); |
2453 | | /* ...but nuke anything else. */ |
2454 | 0 | return 0; |
2455 | 0 | } |
2456 | | |
2457 | 0 | len = check_leading_path(ce->name, ce_namelen(ce), 0); |
2458 | 0 | if (!len) |
2459 | 0 | return 0; |
2460 | 0 | else if (len > 0) { |
2461 | 0 | char *path; |
2462 | 0 | int ret; |
2463 | |
|
2464 | 0 | path = xmemdupz(ce->name, len); |
2465 | 0 | if (lstat(path, &st)) |
2466 | 0 | ret = error_errno("cannot stat '%s'", path); |
2467 | 0 | else { |
2468 | 0 | if (submodule_from_ce(ce)) |
2469 | 0 | ret = check_submodule_move_head(ce, |
2470 | 0 | oid_to_hex(&ce->oid), |
2471 | 0 | NULL, o); |
2472 | 0 | else |
2473 | 0 | ret = check_ok_to_remove(path, len, DT_UNKNOWN, NULL, |
2474 | 0 | &st, error_type, |
2475 | 0 | absent_type, o); |
2476 | 0 | } |
2477 | 0 | free(path); |
2478 | 0 | return ret; |
2479 | 0 | } else if (lstat(ce->name, &st)) { |
2480 | 0 | if (errno != ENOENT) |
2481 | 0 | return error_errno("cannot stat '%s'", ce->name); |
2482 | 0 | return 0; |
2483 | 0 | } else { |
2484 | 0 | if (submodule_from_ce(ce)) |
2485 | 0 | return check_submodule_move_head(ce, oid_to_hex(&ce->oid), |
2486 | 0 | NULL, o); |
2487 | | |
2488 | 0 | return check_ok_to_remove(ce->name, ce_namelen(ce), |
2489 | 0 | ce_to_dtype(ce), ce, &st, |
2490 | 0 | error_type, absent_type, o); |
2491 | 0 | } |
2492 | 0 | } |
2493 | | |
2494 | | static int verify_absent(const struct cache_entry *ce, |
2495 | | enum unpack_trees_error_types error_type, |
2496 | | struct unpack_trees_options *o) |
2497 | 0 | { |
2498 | 0 | if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE)) |
2499 | 0 | return 0; |
2500 | 0 | return verify_absent_1(ce, error_type, COMPLETELY_ABSENT, o); |
2501 | 0 | } |
2502 | | |
2503 | | static int verify_absent_if_directory(const struct cache_entry *ce, |
2504 | | enum unpack_trees_error_types error_type, |
2505 | | struct unpack_trees_options *o) |
2506 | 0 | { |
2507 | 0 | if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE)) |
2508 | 0 | return 0; |
2509 | 0 | return verify_absent_1(ce, error_type, ABSENT_ANY_DIRECTORY, o); |
2510 | 0 | } |
2511 | | |
2512 | | static int verify_absent_sparse(const struct cache_entry *ce, |
2513 | | enum unpack_trees_error_types error_type, |
2514 | | struct unpack_trees_options *o) |
2515 | 0 | { |
2516 | 0 | return verify_absent_1(ce, error_type, COMPLETELY_ABSENT, o); |
2517 | 0 | } |
2518 | | |
2519 | | static int merged_entry(const struct cache_entry *ce, |
2520 | | const struct cache_entry *old, |
2521 | | struct unpack_trees_options *o) |
2522 | 0 | { |
2523 | 0 | int update = CE_UPDATE; |
2524 | 0 | struct cache_entry *merge = dup_cache_entry(ce, &o->result); |
2525 | |
|
2526 | 0 | if (!old) { |
2527 | | /* |
2528 | | * New index entries. In sparse checkout, the following |
2529 | | * verify_absent() will be delayed until after |
2530 | | * traverse_trees() finishes in unpack_trees(), then: |
2531 | | * |
2532 | | * - CE_NEW_SKIP_WORKTREE will be computed correctly |
2533 | | * - verify_absent() be called again, this time with |
2534 | | * correct CE_NEW_SKIP_WORKTREE |
2535 | | * |
2536 | | * verify_absent() call here does nothing in sparse |
2537 | | * checkout (i.e. o->skip_sparse_checkout == 0) |
2538 | | */ |
2539 | 0 | update |= CE_ADDED; |
2540 | 0 | merge->ce_flags |= CE_NEW_SKIP_WORKTREE; |
2541 | |
|
2542 | 0 | if (verify_absent(merge, |
2543 | 0 | ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) { |
2544 | 0 | discard_cache_entry(merge); |
2545 | 0 | return -1; |
2546 | 0 | } |
2547 | 0 | invalidate_ce_path(merge, o); |
2548 | |
|
2549 | 0 | if (submodule_from_ce(ce) && file_exists(ce->name)) { |
2550 | 0 | int ret = check_submodule_move_head(ce, NULL, |
2551 | 0 | oid_to_hex(&ce->oid), |
2552 | 0 | o); |
2553 | 0 | if (ret) |
2554 | 0 | return ret; |
2555 | 0 | } |
2556 | |
|
2557 | 0 | } else if (!(old->ce_flags & CE_CONFLICTED)) { |
2558 | | /* |
2559 | | * See if we can re-use the old CE directly? |
2560 | | * That way we get the uptodate stat info. |
2561 | | * |
2562 | | * This also removes the UPDATE flag on a match; otherwise |
2563 | | * we will end up overwriting local changes in the work tree. |
2564 | | */ |
2565 | 0 | if (same(old, merge)) { |
2566 | 0 | copy_cache_entry(merge, old); |
2567 | 0 | update = 0; |
2568 | 0 | } else { |
2569 | 0 | if (verify_uptodate(old, o)) { |
2570 | 0 | discard_cache_entry(merge); |
2571 | 0 | return -1; |
2572 | 0 | } |
2573 | | /* Migrate old flags over */ |
2574 | 0 | update |= old->ce_flags & (CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE); |
2575 | 0 | invalidate_ce_path(old, o); |
2576 | 0 | } |
2577 | | |
2578 | 0 | if (submodule_from_ce(ce) && file_exists(ce->name)) { |
2579 | 0 | int ret = check_submodule_move_head(ce, oid_to_hex(&old->oid), |
2580 | 0 | oid_to_hex(&ce->oid), |
2581 | 0 | o); |
2582 | 0 | if (ret) |
2583 | 0 | return ret; |
2584 | 0 | } |
2585 | 0 | } else { |
2586 | | /* |
2587 | | * Previously unmerged entry left as an existence |
2588 | | * marker by read_index_unmerged(); |
2589 | | */ |
2590 | 0 | if (verify_absent_if_directory(merge, |
2591 | 0 | ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) { |
2592 | 0 | discard_cache_entry(merge); |
2593 | 0 | return -1; |
2594 | 0 | } |
2595 | | |
2596 | 0 | invalidate_ce_path(old, o); |
2597 | 0 | } |
2598 | | |
2599 | 0 | if (do_add_entry(o, merge, update, CE_STAGEMASK) < 0) |
2600 | 0 | return -1; |
2601 | 0 | return 1; |
2602 | 0 | } |
2603 | | |
2604 | | static int merged_sparse_dir(const struct cache_entry * const *src, int n, |
2605 | | struct unpack_trees_options *o) |
2606 | 0 | { |
2607 | 0 | struct tree_desc t[MAX_UNPACK_TREES + 1]; |
2608 | 0 | void * tree_bufs[MAX_UNPACK_TREES + 1]; |
2609 | 0 | struct traverse_info info; |
2610 | 0 | int i, ret; |
2611 | | |
2612 | | /* |
2613 | | * Create the tree traversal information for traversing into *only* the |
2614 | | * sparse directory. |
2615 | | */ |
2616 | 0 | setup_traverse_info(&info, src[0]->name); |
2617 | 0 | info.fn = unpack_sparse_callback; |
2618 | 0 | info.data = o; |
2619 | 0 | info.show_all_errors = o->show_all_errors; |
2620 | 0 | info.pathspec = o->pathspec; |
2621 | | |
2622 | | /* Get the tree descriptors of the sparse directory in each of the merging trees */ |
2623 | 0 | for (i = 0; i < n; i++) |
2624 | 0 | tree_bufs[i] = fill_tree_descriptor(o->src_index->repo, &t[i], |
2625 | 0 | src[i] && !is_null_oid(&src[i]->oid) ? &src[i]->oid : NULL); |
2626 | |
|
2627 | 0 | ret = traverse_trees(o->src_index, n, t, &info); |
2628 | |
|
2629 | 0 | for (i = 0; i < n; i++) |
2630 | 0 | free(tree_bufs[i]); |
2631 | |
|
2632 | 0 | return ret; |
2633 | 0 | } |
2634 | | |
2635 | | static int deleted_entry(const struct cache_entry *ce, |
2636 | | const struct cache_entry *old, |
2637 | | struct unpack_trees_options *o) |
2638 | 0 | { |
2639 | | /* Did it exist in the index? */ |
2640 | 0 | if (!old) { |
2641 | 0 | if (verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o)) |
2642 | 0 | return -1; |
2643 | 0 | return 0; |
2644 | 0 | } else if (verify_absent_if_directory(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o)) { |
2645 | 0 | return -1; |
2646 | 0 | } |
2647 | | |
2648 | 0 | if (!(old->ce_flags & CE_CONFLICTED) && verify_uptodate(old, o)) |
2649 | 0 | return -1; |
2650 | 0 | add_entry(o, ce, CE_REMOVE, 0); |
2651 | 0 | invalidate_ce_path(ce, o); |
2652 | 0 | return 1; |
2653 | 0 | } |
2654 | | |
2655 | | static int keep_entry(const struct cache_entry *ce, |
2656 | | struct unpack_trees_options *o) |
2657 | 0 | { |
2658 | 0 | add_entry(o, ce, 0, 0); |
2659 | 0 | if (ce_stage(ce)) |
2660 | 0 | invalidate_ce_path(ce, o); |
2661 | 0 | return 1; |
2662 | 0 | } |
2663 | | |
2664 | | #if DBRT_DEBUG |
2665 | | static void show_stage_entry(FILE *o, |
2666 | | const char *label, const struct cache_entry *ce) |
2667 | | { |
2668 | | if (!ce) |
2669 | | fprintf(o, "%s (missing)\n", label); |
2670 | | else |
2671 | | fprintf(o, "%s%06o %s %d\t%s\n", |
2672 | | label, |
2673 | | ce->ce_mode, |
2674 | | oid_to_hex(&ce->oid), |
2675 | | ce_stage(ce), |
2676 | | ce->name); |
2677 | | } |
2678 | | #endif |
2679 | | |
2680 | | int threeway_merge(const struct cache_entry * const *stages, |
2681 | | struct unpack_trees_options *o) |
2682 | 0 | { |
2683 | 0 | const struct cache_entry *index; |
2684 | 0 | const struct cache_entry *head; |
2685 | 0 | const struct cache_entry *remote = stages[o->head_idx + 1]; |
2686 | 0 | int count; |
2687 | 0 | int head_match = 0; |
2688 | 0 | int remote_match = 0; |
2689 | |
|
2690 | 0 | int df_conflict_head = 0; |
2691 | 0 | int df_conflict_remote = 0; |
2692 | |
|
2693 | 0 | int any_anc_missing = 0; |
2694 | 0 | int no_anc_exists = 1; |
2695 | 0 | int i; |
2696 | |
|
2697 | 0 | for (i = 1; i < o->head_idx; i++) { |
2698 | 0 | if (!stages[i] || stages[i] == o->df_conflict_entry) |
2699 | 0 | any_anc_missing = 1; |
2700 | 0 | else |
2701 | 0 | no_anc_exists = 0; |
2702 | 0 | } |
2703 | |
|
2704 | 0 | index = stages[0]; |
2705 | 0 | head = stages[o->head_idx]; |
2706 | |
|
2707 | 0 | if (head == o->df_conflict_entry) { |
2708 | 0 | df_conflict_head = 1; |
2709 | 0 | head = NULL; |
2710 | 0 | } |
2711 | |
|
2712 | 0 | if (remote == o->df_conflict_entry) { |
2713 | 0 | df_conflict_remote = 1; |
2714 | 0 | remote = NULL; |
2715 | 0 | } |
2716 | | |
2717 | | /* |
2718 | | * First, if there's a #16 situation, note that to prevent #13 |
2719 | | * and #14. |
2720 | | */ |
2721 | 0 | if (!same(remote, head)) { |
2722 | 0 | for (i = 1; i < o->head_idx; i++) { |
2723 | 0 | if (same(stages[i], head)) { |
2724 | 0 | head_match = i; |
2725 | 0 | } |
2726 | 0 | if (same(stages[i], remote)) { |
2727 | 0 | remote_match = i; |
2728 | 0 | } |
2729 | 0 | } |
2730 | 0 | } |
2731 | | |
2732 | | /* |
2733 | | * We start with cases where the index is allowed to match |
2734 | | * something other than the head: #14(ALT) and #2ALT, where it |
2735 | | * is permitted to match the result instead. |
2736 | | */ |
2737 | | /* #14, #14ALT, #2ALT */ |
2738 | 0 | if (remote && !df_conflict_head && head_match && !remote_match) { |
2739 | 0 | if (index && !same(index, remote) && !same(index, head)) { |
2740 | 0 | if (S_ISSPARSEDIR(index->ce_mode)) |
2741 | 0 | return merged_sparse_dir(stages, 4, o); |
2742 | 0 | else |
2743 | 0 | return reject_merge(index, o); |
2744 | 0 | } |
2745 | 0 | return merged_entry(remote, index, o); |
2746 | 0 | } |
2747 | | /* |
2748 | | * If we have an entry in the index cache, then we want to |
2749 | | * make sure that it matches head. |
2750 | | */ |
2751 | 0 | if (index && !same(index, head)) { |
2752 | 0 | if (S_ISSPARSEDIR(index->ce_mode)) |
2753 | 0 | return merged_sparse_dir(stages, 4, o); |
2754 | 0 | else |
2755 | 0 | return reject_merge(index, o); |
2756 | 0 | } |
2757 | | |
2758 | 0 | if (head) { |
2759 | | /* #5ALT, #15 */ |
2760 | 0 | if (same(head, remote)) |
2761 | 0 | return merged_entry(head, index, o); |
2762 | | /* #13, #3ALT */ |
2763 | 0 | if (!df_conflict_remote && remote_match && !head_match) |
2764 | 0 | return merged_entry(head, index, o); |
2765 | 0 | } |
2766 | | |
2767 | | /* #1 */ |
2768 | 0 | if (!head && !remote && any_anc_missing) |
2769 | 0 | return 0; |
2770 | | |
2771 | | /* |
2772 | | * Under the "aggressive" rule, we resolve mostly trivial |
2773 | | * cases that we historically had git-merge-one-file resolve. |
2774 | | */ |
2775 | 0 | if (o->aggressive) { |
2776 | 0 | int head_deleted = !head; |
2777 | 0 | int remote_deleted = !remote; |
2778 | 0 | const struct cache_entry *ce = NULL; |
2779 | |
|
2780 | 0 | if (index) |
2781 | 0 | ce = index; |
2782 | 0 | else if (head) |
2783 | 0 | ce = head; |
2784 | 0 | else if (remote) |
2785 | 0 | ce = remote; |
2786 | 0 | else { |
2787 | 0 | for (i = 1; i < o->head_idx; i++) { |
2788 | 0 | if (stages[i] && stages[i] != o->df_conflict_entry) { |
2789 | 0 | ce = stages[i]; |
2790 | 0 | break; |
2791 | 0 | } |
2792 | 0 | } |
2793 | 0 | } |
2794 | | |
2795 | | /* |
2796 | | * Deleted in both. |
2797 | | * Deleted in one and unchanged in the other. |
2798 | | */ |
2799 | 0 | if ((head_deleted && remote_deleted) || |
2800 | 0 | (head_deleted && remote && remote_match) || |
2801 | 0 | (remote_deleted && head && head_match)) { |
2802 | 0 | if (index) |
2803 | 0 | return deleted_entry(index, index, o); |
2804 | 0 | if (ce && !head_deleted) { |
2805 | 0 | if (verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o)) |
2806 | 0 | return -1; |
2807 | 0 | } |
2808 | 0 | return 0; |
2809 | 0 | } |
2810 | | /* |
2811 | | * Added in both, identically. |
2812 | | */ |
2813 | 0 | if (no_anc_exists && head && remote && same(head, remote)) |
2814 | 0 | return merged_entry(head, index, o); |
2815 | |
|
2816 | 0 | } |
2817 | | |
2818 | | /* Handle "no merge" cases (see t/t1000-read-tree-m-3way.sh) */ |
2819 | 0 | if (index) { |
2820 | | /* |
2821 | | * If we've reached the "no merge" cases and we're merging |
2822 | | * a sparse directory, we may have an "edit/edit" conflict that |
2823 | | * can be resolved by individually merging directory contents. |
2824 | | */ |
2825 | 0 | if (S_ISSPARSEDIR(index->ce_mode)) |
2826 | 0 | return merged_sparse_dir(stages, 4, o); |
2827 | | |
2828 | | /* |
2829 | | * If we're not merging a sparse directory, ensure the index is |
2830 | | * up-to-date to avoid files getting overwritten with conflict |
2831 | | * resolution files |
2832 | | */ |
2833 | 0 | if (verify_uptodate(index, o)) |
2834 | 0 | return -1; |
2835 | 0 | } |
2836 | | |
2837 | 0 | o->nontrivial_merge = 1; |
2838 | | |
2839 | | /* #2, #3, #4, #6, #7, #9, #10, #11. */ |
2840 | 0 | count = 0; |
2841 | 0 | if (!head_match || !remote_match) { |
2842 | 0 | for (i = 1; i < o->head_idx; i++) { |
2843 | 0 | if (stages[i] && stages[i] != o->df_conflict_entry) { |
2844 | 0 | keep_entry(stages[i], o); |
2845 | 0 | count++; |
2846 | 0 | break; |
2847 | 0 | } |
2848 | 0 | } |
2849 | 0 | } |
2850 | | #if DBRT_DEBUG |
2851 | | else { |
2852 | | fprintf(stderr, "read-tree: warning #16 detected\n"); |
2853 | | show_stage_entry(stderr, "head ", stages[head_match]); |
2854 | | show_stage_entry(stderr, "remote ", stages[remote_match]); |
2855 | | } |
2856 | | #endif |
2857 | 0 | if (head) { count += keep_entry(head, o); } |
2858 | 0 | if (remote) { count += keep_entry(remote, o); } |
2859 | 0 | return count; |
2860 | 0 | } |
2861 | | |
2862 | | /* |
2863 | | * Two-way merge. |
2864 | | * |
2865 | | * The rule is to "carry forward" what is in the index without losing |
2866 | | * information across a "fast-forward", favoring a successful merge |
2867 | | * over a merge failure when it makes sense. For details of the |
2868 | | * "carry forward" rule, please see <Documentation/git-read-tree.txt>. |
2869 | | * |
2870 | | */ |
2871 | | int twoway_merge(const struct cache_entry * const *src, |
2872 | | struct unpack_trees_options *o) |
2873 | 0 | { |
2874 | 0 | const struct cache_entry *current = src[0]; |
2875 | 0 | const struct cache_entry *oldtree = src[1]; |
2876 | 0 | const struct cache_entry *newtree = src[2]; |
2877 | |
|
2878 | 0 | if (o->merge_size != 2) |
2879 | 0 | return error("Cannot do a twoway merge of %d trees", |
2880 | 0 | o->merge_size); |
2881 | | |
2882 | 0 | if (oldtree == o->df_conflict_entry) |
2883 | 0 | oldtree = NULL; |
2884 | 0 | if (newtree == o->df_conflict_entry) |
2885 | 0 | newtree = NULL; |
2886 | |
|
2887 | 0 | if (current) { |
2888 | 0 | if (current->ce_flags & CE_CONFLICTED) { |
2889 | 0 | if (same(oldtree, newtree) || o->reset) { |
2890 | 0 | if (!newtree) |
2891 | 0 | return deleted_entry(current, current, o); |
2892 | 0 | else |
2893 | 0 | return merged_entry(newtree, current, o); |
2894 | 0 | } |
2895 | 0 | return reject_merge(current, o); |
2896 | 0 | } else if ((!oldtree && !newtree) || /* 4 and 5 */ |
2897 | 0 | (!oldtree && newtree && |
2898 | 0 | same(current, newtree)) || /* 6 and 7 */ |
2899 | 0 | (oldtree && newtree && |
2900 | 0 | same(oldtree, newtree)) || /* 14 and 15 */ |
2901 | 0 | (oldtree && newtree && |
2902 | 0 | !same(oldtree, newtree) && /* 18 and 19 */ |
2903 | 0 | same(current, newtree))) { |
2904 | 0 | return keep_entry(current, o); |
2905 | 0 | } else if (oldtree && !newtree && same(current, oldtree)) { |
2906 | | /* 10 or 11 */ |
2907 | 0 | return deleted_entry(oldtree, current, o); |
2908 | 0 | } else if (oldtree && newtree && |
2909 | 0 | same(current, oldtree) && !same(current, newtree)) { |
2910 | | /* 20 or 21 */ |
2911 | 0 | return merged_entry(newtree, current, o); |
2912 | 0 | } else if (current && !oldtree && newtree && |
2913 | 0 | S_ISSPARSEDIR(current->ce_mode) != S_ISSPARSEDIR(newtree->ce_mode) && |
2914 | 0 | ce_stage(current) == 0) { |
2915 | | /* |
2916 | | * This case is a directory/file conflict across the sparse-index |
2917 | | * boundary. When we are changing from one path to another via |
2918 | | * 'git checkout', then we want to replace one entry with another |
2919 | | * via merged_entry(). If there are staged changes, then we should |
2920 | | * reject the merge instead. |
2921 | | */ |
2922 | 0 | return merged_entry(newtree, current, o); |
2923 | 0 | } else if (S_ISSPARSEDIR(current->ce_mode)) { |
2924 | | /* |
2925 | | * The sparse directories differ, but we don't know whether that's |
2926 | | * because of two different files in the directory being modified |
2927 | | * (can be trivially merged) or if there is a real file conflict. |
2928 | | * Merge the sparse directory by OID to compare file-by-file. |
2929 | | */ |
2930 | 0 | return merged_sparse_dir(src, 3, o); |
2931 | 0 | } else |
2932 | 0 | return reject_merge(current, o); |
2933 | 0 | } |
2934 | 0 | else if (newtree) { |
2935 | 0 | if (oldtree && !o->initial_checkout) { |
2936 | | /* |
2937 | | * deletion of the path was staged; |
2938 | | */ |
2939 | 0 | if (same(oldtree, newtree)) |
2940 | 0 | return 1; |
2941 | 0 | return reject_merge(oldtree, o); |
2942 | 0 | } |
2943 | 0 | return merged_entry(newtree, current, o); |
2944 | 0 | } |
2945 | 0 | return deleted_entry(oldtree, current, o); |
2946 | 0 | } |
2947 | | |
2948 | | /* |
2949 | | * Bind merge. |
2950 | | * |
2951 | | * Keep the index entries at stage0, collapse stage1 but make sure |
2952 | | * stage0 does not have anything there. |
2953 | | */ |
2954 | | int bind_merge(const struct cache_entry * const *src, |
2955 | | struct unpack_trees_options *o) |
2956 | 0 | { |
2957 | 0 | const struct cache_entry *old = src[0]; |
2958 | 0 | const struct cache_entry *a = src[1]; |
2959 | |
|
2960 | 0 | if (o->merge_size != 1) |
2961 | 0 | return error("Cannot do a bind merge of %d trees", |
2962 | 0 | o->merge_size); |
2963 | 0 | if (a && old) |
2964 | 0 | return o->quiet ? -1 : |
2965 | 0 | error(ERRORMSG(o, ERROR_BIND_OVERLAP), |
2966 | 0 | super_prefixed(a->name, o->super_prefix), |
2967 | 0 | super_prefixed(old->name, o->super_prefix)); |
2968 | 0 | if (!a) |
2969 | 0 | return keep_entry(old, o); |
2970 | 0 | else |
2971 | 0 | return merged_entry(a, NULL, o); |
2972 | 0 | } |
2973 | | |
2974 | | /* |
2975 | | * One-way merge. |
2976 | | * |
2977 | | * The rule is: |
2978 | | * - take the stat information from stage0, take the data from stage1 |
2979 | | */ |
2980 | | int oneway_merge(const struct cache_entry * const *src, |
2981 | | struct unpack_trees_options *o) |
2982 | 0 | { |
2983 | 0 | const struct cache_entry *old = src[0]; |
2984 | 0 | const struct cache_entry *a = src[1]; |
2985 | |
|
2986 | 0 | if (o->merge_size != 1) |
2987 | 0 | return error("Cannot do a oneway merge of %d trees", |
2988 | 0 | o->merge_size); |
2989 | | |
2990 | 0 | if (!a || a == o->df_conflict_entry) |
2991 | 0 | return deleted_entry(old, old, o); |
2992 | | |
2993 | 0 | if (old && same(old, a)) { |
2994 | 0 | int update = 0; |
2995 | 0 | if (o->reset && o->update && !ce_uptodate(old) && !ce_skip_worktree(old) && |
2996 | 0 | !(old->ce_flags & CE_FSMONITOR_VALID)) { |
2997 | 0 | struct stat st; |
2998 | 0 | if (lstat(old->name, &st) || |
2999 | 0 | ie_match_stat(o->src_index, old, &st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE)) |
3000 | 0 | update |= CE_UPDATE; |
3001 | 0 | } |
3002 | 0 | if (o->update && S_ISGITLINK(old->ce_mode) && |
3003 | 0 | should_update_submodules() && !verify_uptodate(old, o)) |
3004 | 0 | update |= CE_UPDATE; |
3005 | 0 | add_entry(o, old, update, CE_STAGEMASK); |
3006 | 0 | return 0; |
3007 | 0 | } |
3008 | 0 | return merged_entry(a, old, o); |
3009 | 0 | } |
3010 | | |
3011 | | /* |
3012 | | * Merge worktree and untracked entries in a stash entry. |
3013 | | * |
3014 | | * Ignore all index entries. Collapse remaining trees but make sure that they |
3015 | | * don't have any conflicting files. |
3016 | | */ |
3017 | | int stash_worktree_untracked_merge(const struct cache_entry * const *src, |
3018 | | struct unpack_trees_options *o) |
3019 | 0 | { |
3020 | 0 | const struct cache_entry *worktree = src[1]; |
3021 | 0 | const struct cache_entry *untracked = src[2]; |
3022 | |
|
3023 | 0 | if (o->merge_size != 2) |
3024 | 0 | BUG("invalid merge_size: %d", o->merge_size); |
3025 | | |
3026 | 0 | if (worktree && untracked) |
3027 | 0 | return error(_("worktree and untracked commit have duplicate entries: %s"), |
3028 | 0 | super_prefixed(worktree->name, o->super_prefix)); |
3029 | | |
3030 | 0 | return merged_entry(worktree ? worktree : untracked, NULL, o); |
3031 | 0 | } |