Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Helper functions for tree diff generation |
3 | | */ |
4 | | #include "git-compat-util.h" |
5 | | #include "diff.h" |
6 | | #include "diffcore.h" |
7 | | #include "hash.h" |
8 | | #include "tree.h" |
9 | | #include "tree-walk.h" |
10 | | #include "environment.h" |
11 | | #include "repository.h" |
12 | | |
13 | | /* |
14 | | * Some mode bits are also used internally for computations. |
15 | | * |
16 | | * They *must* not overlap with any valid modes, and they *must* not be emitted |
17 | | * to outside world - i.e. appear on disk or network. In other words, it's just |
18 | | * temporary fields, which we internally use, but they have to stay in-house. |
19 | | * |
20 | | * ( such approach is valid, as standard S_IF* fits into 16 bits, and in Git |
21 | | * codebase mode is `unsigned int` which is assumed to be at least 32 bits ) |
22 | | */ |
23 | | |
24 | 339k | #define S_DIFFTREE_IFXMIN_NEQ 0x80000000 |
25 | | |
26 | | /* |
27 | | * internal mode marker, saying a tree entry != entry of tp[imin] |
28 | | * (see ll_diff_tree_paths for what it means there) |
29 | | * |
30 | | * we will update/use/emit entry for diff only with it unset. |
31 | | */ |
32 | 248k | #define S_IFXMIN_NEQ S_DIFFTREE_IFXMIN_NEQ |
33 | | |
34 | 26.4k | #define FAST_ARRAY_ALLOC(x, nr) do { \ |
35 | 26.4k | if ((nr) <= 2) \ |
36 | 26.4k | (x) = xalloca((nr) * sizeof(*(x))); \ |
37 | 26.4k | else \ |
38 | 26.4k | ALLOC_ARRAY((x), nr); \ |
39 | 26.4k | } while(0) |
40 | 26.4k | #define FAST_ARRAY_FREE(x, nr) do { \ |
41 | 26.4k | if ((nr) <= 2) \ |
42 | 26.4k | xalloca_free((x)); \ |
43 | 26.4k | else \ |
44 | 26.4k | free((x)); \ |
45 | 26.4k | } while(0) |
46 | | |
47 | | static struct combine_diff_path *ll_diff_tree_paths( |
48 | | struct combine_diff_path *p, const struct object_id *oid, |
49 | | const struct object_id **parents_oid, int nparent, |
50 | | struct strbuf *base, struct diff_options *opt, |
51 | | int depth); |
52 | | static void ll_diff_tree_oid(const struct object_id *old_oid, |
53 | | const struct object_id *new_oid, |
54 | | struct strbuf *base, struct diff_options *opt); |
55 | | |
56 | | /* |
57 | | * Compare two tree entries, taking into account only path/S_ISDIR(mode), |
58 | | * but not their sha1's. |
59 | | * |
60 | | * NOTE files and directories *always* compare differently, even when having |
61 | | * the same name - thanks to base_name_compare(). |
62 | | * |
63 | | * NOTE empty (=invalid) descriptor(s) take part in comparison as +infty, |
64 | | * so that they sort *after* valid tree entries. |
65 | | * |
66 | | * Due to this convention, if trees are scanned in sorted order, all |
67 | | * non-empty descriptors will be processed first. |
68 | | */ |
69 | | static int tree_entry_pathcmp(struct tree_desc *t1, struct tree_desc *t2) |
70 | 90.7k | { |
71 | 90.7k | struct name_entry *e1, *e2; |
72 | 90.7k | int cmp; |
73 | | |
74 | | /* empty descriptors sort after valid tree entries */ |
75 | 90.7k | if (!t1->size) |
76 | 0 | return t2->size ? 1 : 0; |
77 | 90.7k | else if (!t2->size) |
78 | 2.92k | return -1; |
79 | | |
80 | 87.8k | e1 = &t1->entry; |
81 | 87.8k | e2 = &t2->entry; |
82 | 87.8k | cmp = base_name_compare(e1->path, tree_entry_len(e1), e1->mode, |
83 | 87.8k | e2->path, tree_entry_len(e2), e2->mode); |
84 | 87.8k | return cmp; |
85 | 90.7k | } |
86 | | |
87 | | |
88 | | /* |
89 | | * convert path -> opt->diff_*() callbacks |
90 | | * |
91 | | * emits diff to first parent only, and tells diff tree-walker that we are done |
92 | | * with p and it can be freed. |
93 | | */ |
94 | | static int emit_diff_first_parent_only(struct diff_options *opt, struct combine_diff_path *p) |
95 | 11.9k | { |
96 | 11.9k | struct combine_diff_parent *p0 = &p->parent[0]; |
97 | 11.9k | if (p->mode && p0->mode) { |
98 | 0 | opt->change(opt, p0->mode, p->mode, &p0->oid, &p->oid, |
99 | 0 | 1, 1, p->path, 0, 0); |
100 | 0 | } |
101 | 11.9k | else { |
102 | 11.9k | const struct object_id *oid; |
103 | 11.9k | unsigned int mode; |
104 | 11.9k | int addremove; |
105 | | |
106 | 11.9k | if (p->mode) { |
107 | 11.9k | addremove = '+'; |
108 | 11.9k | oid = &p->oid; |
109 | 11.9k | mode = p->mode; |
110 | 11.9k | } else { |
111 | 0 | addremove = '-'; |
112 | 0 | oid = &p0->oid; |
113 | 0 | mode = p0->mode; |
114 | 0 | } |
115 | | |
116 | 11.9k | opt->add_remove(opt, addremove, mode, oid, 1, p->path, 0); |
117 | 11.9k | } |
118 | | |
119 | 11.9k | return 0; /* we are done with p */ |
120 | 11.9k | } |
121 | | |
122 | | |
123 | | /* |
124 | | * Make a new combine_diff_path from path/mode/sha1 |
125 | | * and append it to paths list tail. |
126 | | * |
127 | | * Memory for created elements could be reused: |
128 | | * |
129 | | * - if last->next == NULL, the memory is allocated; |
130 | | * |
131 | | * - if last->next != NULL, it is assumed that p=last->next was returned |
132 | | * earlier by this function, and p->next was *not* modified. |
133 | | * The memory is then reused from p. |
134 | | * |
135 | | * so for clients, |
136 | | * |
137 | | * - if you do need to keep the element |
138 | | * |
139 | | * p = path_appendnew(p, ...); |
140 | | * process(p); |
141 | | * p->next = NULL; |
142 | | * |
143 | | * - if you don't need to keep the element after processing |
144 | | * |
145 | | * pprev = p; |
146 | | * p = path_appendnew(p, ...); |
147 | | * process(p); |
148 | | * p = pprev; |
149 | | * ; don't forget to free tail->next in the end |
150 | | * |
151 | | * p->parent[] remains uninitialized. |
152 | | */ |
153 | | static struct combine_diff_path *path_appendnew(struct combine_diff_path *last, |
154 | | int nparent, const struct strbuf *base, const char *path, int pathlen, |
155 | | unsigned mode, const struct object_id *oid) |
156 | 11.9k | { |
157 | 11.9k | struct combine_diff_path *p; |
158 | 11.9k | size_t len = st_add(base->len, pathlen); |
159 | 11.9k | size_t alloclen = combine_diff_path_size(nparent, len); |
160 | | |
161 | | /* if last->next is !NULL - it is a pre-allocated memory, we can reuse */ |
162 | 11.9k | p = last->next; |
163 | 11.9k | if (p && (alloclen > (intptr_t)p->next)) { |
164 | 0 | FREE_AND_NULL(p); |
165 | 0 | } |
166 | | |
167 | 11.9k | if (!p) { |
168 | 10.4k | p = xmalloc(alloclen); |
169 | | |
170 | | /* |
171 | | * until we go to it next round, .next holds how many bytes we |
172 | | * allocated (for faster realloc - we don't need copying old data). |
173 | | */ |
174 | 10.4k | p->next = (struct combine_diff_path *)(intptr_t)alloclen; |
175 | 10.4k | } |
176 | | |
177 | 11.9k | last->next = p; |
178 | | |
179 | 11.9k | p->path = (char *)&(p->parent[nparent]); |
180 | 11.9k | memcpy(p->path, base->buf, base->len); |
181 | 11.9k | memcpy(p->path + base->len, path, pathlen); |
182 | 11.9k | p->path[len] = 0; |
183 | 11.9k | p->mode = mode; |
184 | 11.9k | oidcpy(&p->oid, oid ? oid : null_oid()); |
185 | | |
186 | 11.9k | return p; |
187 | 11.9k | } |
188 | | |
189 | | /* |
190 | | * new path should be added to combine diff |
191 | | * |
192 | | * 3 cases on how/when it should be called and behaves: |
193 | | * |
194 | | * t, !tp -> path added, all parents lack it |
195 | | * !t, tp -> path removed from all parents |
196 | | * t, tp -> path modified/added |
197 | | * (M for tp[i]=tp[imin], A otherwise) |
198 | | */ |
199 | | static struct combine_diff_path *emit_path(struct combine_diff_path *p, |
200 | | struct strbuf *base, struct diff_options *opt, int nparent, |
201 | | struct tree_desc *t, struct tree_desc *tp, |
202 | | int imin, int depth) |
203 | 11.9k | { |
204 | 11.9k | unsigned short mode; |
205 | 11.9k | const char *path; |
206 | 11.9k | const struct object_id *oid; |
207 | 11.9k | int pathlen; |
208 | 11.9k | int old_baselen = base->len; |
209 | 11.9k | int i, isdir, recurse = 0, emitthis = 1; |
210 | | |
211 | | /* at least something has to be valid */ |
212 | 11.9k | assert(t || tp); |
213 | | |
214 | 11.9k | if (t) { |
215 | | /* path present in resulting tree */ |
216 | 11.9k | oid = tree_entry_extract(t, &path, &mode); |
217 | 11.9k | pathlen = tree_entry_len(&t->entry); |
218 | 11.9k | isdir = S_ISDIR(mode); |
219 | 11.9k | } else { |
220 | | /* |
221 | | * a path was removed - take path from imin parent. Also take |
222 | | * mode from that parent, to decide on recursion(1). |
223 | | * |
224 | | * 1) all modes for tp[i]=tp[imin] should be the same wrt |
225 | | * S_ISDIR, thanks to base_name_compare(). |
226 | | */ |
227 | 0 | tree_entry_extract(&tp[imin], &path, &mode); |
228 | 0 | pathlen = tree_entry_len(&tp[imin].entry); |
229 | |
|
230 | 0 | isdir = S_ISDIR(mode); |
231 | 0 | oid = NULL; |
232 | 0 | mode = 0; |
233 | 0 | } |
234 | | |
235 | 11.9k | if (opt->flags.recursive && isdir) { |
236 | 0 | recurse = 1; |
237 | 0 | emitthis = opt->flags.tree_in_recursive; |
238 | 0 | } |
239 | | |
240 | 11.9k | if (emitthis) { |
241 | 11.9k | int keep; |
242 | 11.9k | struct combine_diff_path *pprev = p; |
243 | 11.9k | p = path_appendnew(p, nparent, base, path, pathlen, mode, oid); |
244 | | |
245 | 23.8k | for (i = 0; i < nparent; ++i) { |
246 | | /* |
247 | | * tp[i] is valid, if present and if tp[i]==tp[imin] - |
248 | | * otherwise, we should ignore it. |
249 | | */ |
250 | 11.9k | int tpi_valid = tp && !(tp[i].entry.mode & S_IFXMIN_NEQ); |
251 | | |
252 | 11.9k | const struct object_id *oid_i; |
253 | 11.9k | unsigned mode_i; |
254 | | |
255 | 11.9k | p->parent[i].status = |
256 | 11.9k | !t ? DIFF_STATUS_DELETED : |
257 | 11.9k | tpi_valid ? |
258 | 0 | DIFF_STATUS_MODIFIED : |
259 | 11.9k | DIFF_STATUS_ADDED; |
260 | | |
261 | 11.9k | if (tpi_valid) { |
262 | 0 | oid_i = &tp[i].entry.oid; |
263 | 0 | mode_i = tp[i].entry.mode; |
264 | 0 | } |
265 | 11.9k | else { |
266 | 11.9k | oid_i = null_oid(); |
267 | 11.9k | mode_i = 0; |
268 | 11.9k | } |
269 | | |
270 | 11.9k | p->parent[i].mode = mode_i; |
271 | 11.9k | oidcpy(&p->parent[i].oid, oid_i); |
272 | 11.9k | } |
273 | | |
274 | 11.9k | keep = 1; |
275 | 11.9k | if (opt->pathchange) |
276 | 11.9k | keep = opt->pathchange(opt, p); |
277 | | |
278 | | /* |
279 | | * If a path was filtered or consumed - we don't need to add it |
280 | | * to the list and can reuse its memory, leaving it as |
281 | | * pre-allocated element on the tail. |
282 | | * |
283 | | * On the other hand, if path needs to be kept, we need to |
284 | | * correct its .next to NULL, as it was pre-initialized to how |
285 | | * much memory was allocated. |
286 | | * |
287 | | * see path_appendnew() for details. |
288 | | */ |
289 | 11.9k | if (!keep) |
290 | 11.9k | p = pprev; |
291 | 0 | else |
292 | 0 | p->next = NULL; |
293 | 11.9k | } |
294 | | |
295 | 11.9k | if (recurse) { |
296 | 0 | const struct object_id **parents_oid; |
297 | |
|
298 | 0 | FAST_ARRAY_ALLOC(parents_oid, nparent); |
299 | 0 | for (i = 0; i < nparent; ++i) { |
300 | | /* same rule as in emitthis */ |
301 | 0 | int tpi_valid = tp && !(tp[i].entry.mode & S_IFXMIN_NEQ); |
302 | |
|
303 | 0 | parents_oid[i] = tpi_valid ? &tp[i].entry.oid : NULL; |
304 | 0 | } |
305 | |
|
306 | 0 | strbuf_add(base, path, pathlen); |
307 | 0 | strbuf_addch(base, '/'); |
308 | 0 | p = ll_diff_tree_paths(p, oid, parents_oid, nparent, base, opt, |
309 | 0 | depth + 1); |
310 | 0 | FAST_ARRAY_FREE(parents_oid, nparent); |
311 | 0 | } |
312 | | |
313 | 11.9k | strbuf_setlen(base, old_baselen); |
314 | 11.9k | return p; |
315 | 11.9k | } |
316 | | |
317 | | static void skip_uninteresting(struct tree_desc *t, struct strbuf *base, |
318 | | struct diff_options *opt) |
319 | 0 | { |
320 | 0 | enum interesting match; |
321 | |
|
322 | 0 | while (t->size) { |
323 | 0 | match = tree_entry_interesting(opt->repo->index, &t->entry, |
324 | 0 | base, &opt->pathspec); |
325 | 0 | if (match) { |
326 | 0 | if (match == all_entries_not_interesting) |
327 | 0 | t->size = 0; |
328 | 0 | break; |
329 | 0 | } |
330 | 0 | update_tree_entry(t); |
331 | 0 | } |
332 | 0 | } |
333 | | |
334 | | |
335 | | /* |
336 | | * generate paths for combined diff D(sha1,parents_oid[]) |
337 | | * |
338 | | * Resulting paths are appended to combine_diff_path linked list, and also, are |
339 | | * emitted on the go via opt->pathchange() callback, so it is possible to |
340 | | * process the result as batch or incrementally. |
341 | | * |
342 | | * The paths are generated scanning new tree and all parents trees |
343 | | * simultaneously, similarly to what diff_tree() was doing for 2 trees. |
344 | | * The theory behind such scan is as follows: |
345 | | * |
346 | | * |
347 | | * D(T,P1...Pn) calculation scheme |
348 | | * ------------------------------- |
349 | | * |
350 | | * D(T,P1...Pn) = D(T,P1) ^ ... ^ D(T,Pn) (regarding resulting paths set) |
351 | | * |
352 | | * D(T,Pj) - diff between T..Pj |
353 | | * D(T,P1...Pn) - combined diff from T to parents P1,...,Pn |
354 | | * |
355 | | * |
356 | | * We start from all trees, which are sorted, and compare their entries in |
357 | | * lock-step: |
358 | | * |
359 | | * T P1 Pn |
360 | | * - - - |
361 | | * |t| |p1| |pn| |
362 | | * |-| |--| ... |--| imin = argmin(p1...pn) |
363 | | * | | | | | | |
364 | | * |-| |--| |--| |
365 | | * |.| |. | |. | |
366 | | * . . . |
367 | | * . . . |
368 | | * |
369 | | * at any time there could be 3 cases: |
370 | | * |
371 | | * 1) t < p[imin]; |
372 | | * 2) t > p[imin]; |
373 | | * 3) t = p[imin]. |
374 | | * |
375 | | * Schematic deduction of what every case means, and what to do, follows: |
376 | | * |
377 | | * 1) t < p[imin] -> ∀j t ∉ Pj -> "+t" ∈ D(T,Pj) -> D += "+t"; t↓ |
378 | | * |
379 | | * 2) t > p[imin] |
380 | | * |
381 | | * 2.1) ∃j: pj > p[imin] -> "-p[imin]" ∉ D(T,Pj) -> D += ø; ∀ pi=p[imin] pi↓ |
382 | | * 2.2) ∀i pi = p[imin] -> pi ∉ T -> "-pi" ∈ D(T,Pi) -> D += "-p[imin]"; ∀i pi↓ |
383 | | * |
384 | | * 3) t = p[imin] |
385 | | * |
386 | | * 3.1) ∃j: pj > p[imin] -> "+t" ∈ D(T,Pj) -> only pi=p[imin] remains to investigate |
387 | | * 3.2) pi = p[imin] -> investigate δ(t,pi) |
388 | | * | |
389 | | * | |
390 | | * v |
391 | | * |
392 | | * 3.1+3.2) looking at δ(t,pi) ∀i: pi=p[imin] - if all != ø -> |
393 | | * |
394 | | * ⎧δ(t,pi) - if pi=p[imin] |
395 | | * -> D += ⎨ |
396 | | * ⎩"+t" - if pi>p[imin] |
397 | | * |
398 | | * |
399 | | * in any case t↓ ∀ pi=p[imin] pi↓ |
400 | | * |
401 | | * |
402 | | * ~~~~~~~~ |
403 | | * |
404 | | * NOTE |
405 | | * |
406 | | * Usual diff D(A,B) is by definition the same as combined diff D(A,[B]), |
407 | | * so this diff paths generator can, and is used, for plain diffs |
408 | | * generation too. |
409 | | * |
410 | | * Please keep attention to the common D(A,[B]) case when working on the |
411 | | * code, in order not to slow it down. |
412 | | * |
413 | | * NOTE |
414 | | * nparent must be > 0. |
415 | | */ |
416 | | |
417 | | |
418 | | /* ∀ pi=p[imin] pi↓ */ |
419 | | static inline void update_tp_entries(struct tree_desc *tp, int nparent) |
420 | 78.8k | { |
421 | 78.8k | int i; |
422 | 157k | for (i = 0; i < nparent; ++i) |
423 | 78.8k | if (!(tp[i].entry.mode & S_IFXMIN_NEQ)) |
424 | 78.8k | update_tree_entry(&tp[i]); |
425 | 78.8k | } |
426 | | |
427 | | static struct combine_diff_path *ll_diff_tree_paths( |
428 | | struct combine_diff_path *p, const struct object_id *oid, |
429 | | const struct object_id **parents_oid, int nparent, |
430 | | struct strbuf *base, struct diff_options *opt, |
431 | | int depth) |
432 | 13.2k | { |
433 | 13.2k | struct tree_desc t, *tp; |
434 | 13.2k | void *ttree, **tptree; |
435 | 13.2k | int i; |
436 | | |
437 | 13.2k | if (depth > max_allowed_tree_depth) |
438 | 0 | die("exceeded maximum allowed tree depth"); |
439 | | |
440 | 13.2k | FAST_ARRAY_ALLOC(tp, nparent); |
441 | 13.2k | FAST_ARRAY_ALLOC(tptree, nparent); |
442 | | |
443 | | /* |
444 | | * load parents first, as they are probably already cached. |
445 | | * |
446 | | * ( log_tree_diff() parses commit->parent before calling here via |
447 | | * diff_tree_oid(parent, commit) ) |
448 | | */ |
449 | 26.4k | for (i = 0; i < nparent; ++i) |
450 | 13.2k | tptree[i] = fill_tree_descriptor(opt->repo, &tp[i], parents_oid[i]); |
451 | 13.2k | ttree = fill_tree_descriptor(opt->repo, &t, oid); |
452 | | |
453 | | /* Enable recursion indefinitely */ |
454 | 13.2k | opt->pathspec.recursive = opt->flags.recursive; |
455 | | |
456 | 103k | for (;;) { |
457 | 103k | int imin, cmp; |
458 | | |
459 | 103k | if (diff_can_quit_early(opt)) |
460 | 0 | break; |
461 | | |
462 | 103k | if (opt->max_changes && diff_queued_diff.nr > opt->max_changes) |
463 | 0 | break; |
464 | | |
465 | 103k | if (opt->pathspec.nr) { |
466 | 0 | skip_uninteresting(&t, base, opt); |
467 | 0 | for (i = 0; i < nparent; i++) |
468 | 0 | skip_uninteresting(&tp[i], base, opt); |
469 | 0 | } |
470 | | |
471 | | /* comparing is finished when all trees are done */ |
472 | 103k | if (!t.size) { |
473 | 13.2k | int done = 1; |
474 | 26.4k | for (i = 0; i < nparent; ++i) |
475 | 13.2k | if (tp[i].size) { |
476 | 0 | done = 0; |
477 | 0 | break; |
478 | 0 | } |
479 | 13.2k | if (done) |
480 | 13.2k | break; |
481 | 13.2k | } |
482 | | |
483 | | /* |
484 | | * lookup imin = argmin(p1...pn), |
485 | | * mark entries whether they =p[imin] along the way |
486 | | */ |
487 | 90.7k | imin = 0; |
488 | 90.7k | tp[0].entry.mode &= ~S_IFXMIN_NEQ; |
489 | | |
490 | 90.7k | for (i = 1; i < nparent; ++i) { |
491 | 0 | cmp = tree_entry_pathcmp(&tp[i], &tp[imin]); |
492 | 0 | if (cmp < 0) { |
493 | 0 | imin = i; |
494 | 0 | tp[i].entry.mode &= ~S_IFXMIN_NEQ; |
495 | 0 | } |
496 | 0 | else if (cmp == 0) { |
497 | 0 | tp[i].entry.mode &= ~S_IFXMIN_NEQ; |
498 | 0 | } |
499 | 0 | else { |
500 | 0 | tp[i].entry.mode |= S_IFXMIN_NEQ; |
501 | 0 | } |
502 | 0 | } |
503 | | |
504 | | /* fixup markings for entries before imin */ |
505 | 90.7k | for (i = 0; i < imin; ++i) |
506 | 0 | tp[i].entry.mode |= S_IFXMIN_NEQ; /* pi > p[imin] */ |
507 | | |
508 | | |
509 | | |
510 | | /* compare t vs p[imin] */ |
511 | 90.7k | cmp = tree_entry_pathcmp(&t, &tp[imin]); |
512 | | |
513 | | /* t = p[imin] */ |
514 | 90.7k | if (cmp == 0) { |
515 | | /* are either pi > p[imin] or diff(t,pi) != ø ? */ |
516 | 78.8k | if (!opt->flags.find_copies_harder) { |
517 | 78.8k | for (i = 0; i < nparent; ++i) { |
518 | | /* p[i] > p[imin] */ |
519 | 78.8k | if (tp[i].entry.mode & S_IFXMIN_NEQ) |
520 | 0 | continue; |
521 | | |
522 | | /* diff(t,pi) != ø */ |
523 | 78.8k | if (!oideq(&t.entry.oid, &tp[i].entry.oid) || |
524 | 78.8k | (t.entry.mode != tp[i].entry.mode)) |
525 | 0 | continue; |
526 | | |
527 | 78.8k | goto skip_emit_t_tp; |
528 | 78.8k | } |
529 | 78.8k | } |
530 | | |
531 | | /* D += {δ(t,pi) if pi=p[imin]; "+a" if pi > p[imin]} */ |
532 | 0 | p = emit_path(p, base, opt, nparent, |
533 | 0 | &t, tp, imin, depth); |
534 | |
|
535 | 78.8k | skip_emit_t_tp: |
536 | | /* t↓, ∀ pi=p[imin] pi↓ */ |
537 | 78.8k | update_tree_entry(&t); |
538 | 78.8k | update_tp_entries(tp, nparent); |
539 | 78.8k | } |
540 | | |
541 | | /* t < p[imin] */ |
542 | 11.9k | else if (cmp < 0) { |
543 | | /* D += "+t" */ |
544 | 11.9k | p = emit_path(p, base, opt, nparent, |
545 | 11.9k | &t, /*tp=*/NULL, -1, depth); |
546 | | |
547 | | /* t↓ */ |
548 | 11.9k | update_tree_entry(&t); |
549 | 11.9k | } |
550 | | |
551 | | /* t > p[imin] */ |
552 | 0 | else { |
553 | | /* ∀i pi=p[imin] -> D += "-p[imin]" */ |
554 | 0 | if (!opt->flags.find_copies_harder) { |
555 | 0 | for (i = 0; i < nparent; ++i) |
556 | 0 | if (tp[i].entry.mode & S_IFXMIN_NEQ) |
557 | 0 | goto skip_emit_tp; |
558 | 0 | } |
559 | | |
560 | 0 | p = emit_path(p, base, opt, nparent, |
561 | | /*t=*/NULL, tp, imin, depth); |
562 | |
|
563 | 0 | skip_emit_tp: |
564 | | /* ∀ pi=p[imin] pi↓ */ |
565 | 0 | update_tp_entries(tp, nparent); |
566 | 0 | } |
567 | 90.7k | } |
568 | | |
569 | 13.2k | free(ttree); |
570 | 26.4k | for (i = nparent-1; i >= 0; i--) |
571 | 13.2k | free(tptree[i]); |
572 | 13.2k | FAST_ARRAY_FREE(tptree, nparent); |
573 | 13.2k | FAST_ARRAY_FREE(tp, nparent); |
574 | | |
575 | 13.2k | return p; |
576 | 13.2k | } |
577 | | |
578 | | struct combine_diff_path *diff_tree_paths( |
579 | | struct combine_diff_path *p, const struct object_id *oid, |
580 | | const struct object_id **parents_oid, int nparent, |
581 | | struct strbuf *base, struct diff_options *opt) |
582 | 13.2k | { |
583 | 13.2k | p = ll_diff_tree_paths(p, oid, parents_oid, nparent, base, opt, 0); |
584 | | |
585 | | /* |
586 | | * free pre-allocated last element, if any |
587 | | * (see path_appendnew() for details about why) |
588 | | */ |
589 | 13.2k | FREE_AND_NULL(p->next); |
590 | | |
591 | 13.2k | return p; |
592 | 13.2k | } |
593 | | |
594 | | /* |
595 | | * Does it look like the resulting diff might be due to a rename? |
596 | | * - single entry |
597 | | * - not a valid previous file |
598 | | */ |
599 | | static inline int diff_might_be_rename(void) |
600 | 0 | { |
601 | 0 | return diff_queued_diff.nr == 1 && |
602 | 0 | !DIFF_FILE_VALID(diff_queued_diff.queue[0]->one); |
603 | 0 | } |
604 | | |
605 | | static void try_to_follow_renames(const struct object_id *old_oid, |
606 | | const struct object_id *new_oid, |
607 | | struct strbuf *base, struct diff_options *opt) |
608 | 0 | { |
609 | 0 | struct diff_options diff_opts; |
610 | 0 | struct diff_queue_struct *q = &diff_queued_diff; |
611 | 0 | struct diff_filepair *choice; |
612 | 0 | int i; |
613 | | |
614 | | /* |
615 | | * follow-rename code is very specific, we need exactly one |
616 | | * path. Magic that matches more than one path is not |
617 | | * supported. |
618 | | */ |
619 | 0 | GUARD_PATHSPEC(&opt->pathspec, PATHSPEC_FROMTOP | PATHSPEC_LITERAL); |
620 | | #if 0 |
621 | | /* |
622 | | * We should reject wildcards as well. Unfortunately we |
623 | | * haven't got a reliable way to detect that 'foo\*bar' in |
624 | | * fact has no wildcards. nowildcard_len is merely a hint for |
625 | | * optimization. Let it slip for now until wildmatch is taught |
626 | | * about dry-run mode and returns wildcard info. |
627 | | */ |
628 | | if (opt->pathspec.has_wildcard) |
629 | | BUG("wildcards are not supported"); |
630 | | #endif |
631 | | |
632 | | /* Remove the file creation entry from the diff queue, and remember it */ |
633 | 0 | choice = q->queue[0]; |
634 | 0 | q->nr = 0; |
635 | |
|
636 | 0 | repo_diff_setup(opt->repo, &diff_opts); |
637 | 0 | diff_opts.flags.recursive = 1; |
638 | 0 | diff_opts.flags.find_copies_harder = 1; |
639 | 0 | diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT; |
640 | 0 | diff_opts.single_follow = opt->pathspec.items[0].match; |
641 | 0 | diff_opts.break_opt = opt->break_opt; |
642 | 0 | diff_opts.rename_score = opt->rename_score; |
643 | 0 | diff_setup_done(&diff_opts); |
644 | 0 | ll_diff_tree_oid(old_oid, new_oid, base, &diff_opts); |
645 | 0 | diffcore_std(&diff_opts); |
646 | 0 | clear_pathspec(&diff_opts.pathspec); |
647 | | |
648 | | /* Go through the new set of filepairing, and see if we find a more interesting one */ |
649 | 0 | opt->found_follow = 0; |
650 | 0 | for (i = 0; i < q->nr; i++) { |
651 | 0 | struct diff_filepair *p = q->queue[i]; |
652 | | |
653 | | /* |
654 | | * Found a source? Not only do we use that for the new |
655 | | * diff_queued_diff, we will also use that as the path in |
656 | | * the future! |
657 | | */ |
658 | 0 | if ((p->status == 'R' || p->status == 'C') && |
659 | 0 | !strcmp(p->two->path, opt->pathspec.items[0].match)) { |
660 | 0 | const char *path[2]; |
661 | | |
662 | | /* Switch the file-pairs around */ |
663 | 0 | q->queue[i] = choice; |
664 | 0 | choice = p; |
665 | | |
666 | | /* Update the path we use from now on.. */ |
667 | 0 | path[0] = p->one->path; |
668 | 0 | path[1] = NULL; |
669 | 0 | clear_pathspec(&opt->pathspec); |
670 | 0 | parse_pathspec(&opt->pathspec, |
671 | 0 | PATHSPEC_ALL_MAGIC & ~PATHSPEC_LITERAL, |
672 | 0 | PATHSPEC_LITERAL_PATH, "", path); |
673 | | |
674 | | /* |
675 | | * The caller expects us to return a set of vanilla |
676 | | * filepairs to let a later call to diffcore_std() |
677 | | * it makes to sort the renames out (among other |
678 | | * things), but we already have found renames |
679 | | * ourselves; signal diffcore_std() not to muck with |
680 | | * rename information. |
681 | | */ |
682 | 0 | opt->found_follow = 1; |
683 | 0 | break; |
684 | 0 | } |
685 | 0 | } |
686 | | |
687 | | /* |
688 | | * Then, discard all the non-relevant file pairs... |
689 | | */ |
690 | 0 | for (i = 0; i < q->nr; i++) { |
691 | 0 | struct diff_filepair *p = q->queue[i]; |
692 | 0 | diff_free_filepair(p); |
693 | 0 | } |
694 | | |
695 | | /* |
696 | | * .. and re-instate the one we want (which might be either the |
697 | | * original one, or the rename/copy we found) |
698 | | */ |
699 | 0 | q->queue[0] = choice; |
700 | 0 | q->nr = 1; |
701 | 0 | } |
702 | | |
703 | | static void ll_diff_tree_oid(const struct object_id *old_oid, |
704 | | const struct object_id *new_oid, |
705 | | struct strbuf *base, struct diff_options *opt) |
706 | 13.2k | { |
707 | 13.2k | struct combine_diff_path phead, *p; |
708 | 13.2k | pathchange_fn_t pathchange_old = opt->pathchange; |
709 | | |
710 | 13.2k | phead.next = NULL; |
711 | 13.2k | opt->pathchange = emit_diff_first_parent_only; |
712 | 13.2k | diff_tree_paths(&phead, new_oid, &old_oid, 1, base, opt); |
713 | | |
714 | 13.2k | for (p = phead.next; p;) { |
715 | 0 | struct combine_diff_path *pprev = p; |
716 | 0 | p = p->next; |
717 | 0 | free(pprev); |
718 | 0 | } |
719 | | |
720 | 13.2k | opt->pathchange = pathchange_old; |
721 | 13.2k | } |
722 | | |
723 | | void diff_tree_oid(const struct object_id *old_oid, |
724 | | const struct object_id *new_oid, |
725 | | const char *base_str, struct diff_options *opt) |
726 | 13.2k | { |
727 | 13.2k | struct strbuf base; |
728 | | |
729 | 13.2k | strbuf_init(&base, PATH_MAX); |
730 | 13.2k | strbuf_addstr(&base, base_str); |
731 | | |
732 | 13.2k | ll_diff_tree_oid(old_oid, new_oid, &base, opt); |
733 | 13.2k | if (!*base_str && opt->flags.follow_renames && diff_might_be_rename()) |
734 | 0 | try_to_follow_renames(old_oid, new_oid, &base, opt); |
735 | | |
736 | 13.2k | strbuf_release(&base); |
737 | 13.2k | } |
738 | | |
739 | | void diff_root_tree_oid(const struct object_id *new_oid, |
740 | | const char *base, |
741 | | struct diff_options *opt) |
742 | 1.46k | { |
743 | 1.46k | diff_tree_oid(NULL, new_oid, base, opt); |
744 | 1.46k | } |