Line | Count | Source (jump to first uncovered line) |
1 | | #define USE_THE_REPOSITORY_VARIABLE |
2 | | |
3 | | #include "git-compat-util.h" |
4 | | #include "hex.h" |
5 | | #include "repository.h" |
6 | | #include "tempfile.h" |
7 | | #include "lockfile.h" |
8 | | #include "object-store-ll.h" |
9 | | #include "commit.h" |
10 | | #include "tag.h" |
11 | | #include "pkt-line.h" |
12 | | #include "refs.h" |
13 | | #include "oid-array.h" |
14 | | #include "path.h" |
15 | | #include "diff.h" |
16 | | #include "revision.h" |
17 | | #include "commit-slab.h" |
18 | | #include "list-objects.h" |
19 | | #include "commit-reach.h" |
20 | | #include "shallow.h" |
21 | | #include "statinfo.h" |
22 | | #include "trace.h" |
23 | | |
24 | | void set_alternate_shallow_file(struct repository *r, const char *path, int override) |
25 | 0 | { |
26 | 0 | if (r->parsed_objects->is_shallow != -1) |
27 | 0 | BUG("is_repository_shallow must not be called before set_alternate_shallow_file"); |
28 | 0 | if (r->parsed_objects->alternate_shallow_file && !override) |
29 | 0 | return; |
30 | 0 | free(r->parsed_objects->alternate_shallow_file); |
31 | 0 | r->parsed_objects->alternate_shallow_file = xstrdup_or_null(path); |
32 | 0 | } |
33 | | |
34 | | int register_shallow(struct repository *r, const struct object_id *oid) |
35 | 0 | { |
36 | 0 | struct commit_graft *graft = |
37 | 0 | xmalloc(sizeof(struct commit_graft)); |
38 | 0 | struct commit *commit = lookup_commit(r, oid); |
39 | |
|
40 | 0 | oidcpy(&graft->oid, oid); |
41 | 0 | graft->nr_parent = -1; |
42 | 0 | if (commit && commit->object.parsed) { |
43 | 0 | free_commit_list(commit->parents); |
44 | 0 | commit->parents = NULL; |
45 | 0 | } |
46 | 0 | return register_commit_graft(r, graft, 0); |
47 | 0 | } |
48 | | |
49 | | int unregister_shallow(const struct object_id *oid) |
50 | 0 | { |
51 | 0 | int pos = commit_graft_pos(the_repository, oid); |
52 | 0 | if (pos < 0) |
53 | 0 | return -1; |
54 | 0 | if (pos + 1 < the_repository->parsed_objects->grafts_nr) |
55 | 0 | MOVE_ARRAY(the_repository->parsed_objects->grafts + pos, |
56 | 0 | the_repository->parsed_objects->grafts + pos + 1, |
57 | 0 | the_repository->parsed_objects->grafts_nr - pos - 1); |
58 | 0 | the_repository->parsed_objects->grafts_nr--; |
59 | 0 | return 0; |
60 | 0 | } |
61 | | |
62 | | int is_repository_shallow(struct repository *r) |
63 | 0 | { |
64 | 0 | FILE *fp; |
65 | 0 | char buf[1024]; |
66 | 0 | const char *path = r->parsed_objects->alternate_shallow_file; |
67 | |
|
68 | 0 | if (r->parsed_objects->is_shallow >= 0) |
69 | 0 | return r->parsed_objects->is_shallow; |
70 | | |
71 | 0 | if (!path) |
72 | 0 | path = git_path_shallow(r); |
73 | | /* |
74 | | * fetch-pack sets '--shallow-file ""' as an indicator that no |
75 | | * shallow file should be used. We could just open it and it |
76 | | * will likely fail. But let's do an explicit check instead. |
77 | | */ |
78 | 0 | if (!*path || (fp = fopen(path, "r")) == NULL) { |
79 | 0 | stat_validity_clear(r->parsed_objects->shallow_stat); |
80 | 0 | r->parsed_objects->is_shallow = 0; |
81 | 0 | return r->parsed_objects->is_shallow; |
82 | 0 | } |
83 | 0 | stat_validity_update(r->parsed_objects->shallow_stat, fileno(fp)); |
84 | 0 | r->parsed_objects->is_shallow = 1; |
85 | |
|
86 | 0 | while (fgets(buf, sizeof(buf), fp)) { |
87 | 0 | struct object_id oid; |
88 | 0 | if (get_oid_hex(buf, &oid)) |
89 | 0 | die("bad shallow line: %s", buf); |
90 | 0 | register_shallow(r, &oid); |
91 | 0 | } |
92 | 0 | fclose(fp); |
93 | 0 | return r->parsed_objects->is_shallow; |
94 | 0 | } |
95 | | |
96 | | static void reset_repository_shallow(struct repository *r) |
97 | 0 | { |
98 | 0 | r->parsed_objects->is_shallow = -1; |
99 | 0 | stat_validity_clear(r->parsed_objects->shallow_stat); |
100 | 0 | reset_commit_grafts(r); |
101 | 0 | } |
102 | | |
103 | | int commit_shallow_file(struct repository *r, struct shallow_lock *lk) |
104 | 0 | { |
105 | 0 | int res = commit_lock_file(&lk->lock); |
106 | 0 | reset_repository_shallow(r); |
107 | | |
108 | | /* |
109 | | * Update in-memory data structures with the new shallow information, |
110 | | * including unparsing all commits that now have grafts. |
111 | | */ |
112 | 0 | is_repository_shallow(r); |
113 | |
|
114 | 0 | return res; |
115 | 0 | } |
116 | | |
117 | | void rollback_shallow_file(struct repository *r, struct shallow_lock *lk) |
118 | 0 | { |
119 | 0 | rollback_lock_file(&lk->lock); |
120 | 0 | reset_repository_shallow(r); |
121 | 0 | } |
122 | | |
123 | | /* |
124 | | * TODO: use "int" elemtype instead of "int *" when/if commit-slab |
125 | | * supports a "valid" flag. |
126 | | */ |
127 | | define_commit_slab(commit_depth, int *); |
128 | | static void free_depth_in_slab(int **ptr) |
129 | 0 | { |
130 | 0 | FREE_AND_NULL(*ptr); |
131 | 0 | } |
132 | | struct commit_list *get_shallow_commits(struct object_array *heads, int depth, |
133 | | int shallow_flag, int not_shallow_flag) |
134 | 0 | { |
135 | 0 | int i = 0, cur_depth = 0; |
136 | 0 | struct commit_list *result = NULL; |
137 | 0 | struct object_array stack = OBJECT_ARRAY_INIT; |
138 | 0 | struct commit *commit = NULL; |
139 | 0 | struct commit_graft *graft; |
140 | 0 | struct commit_depth depths; |
141 | |
|
142 | 0 | init_commit_depth(&depths); |
143 | 0 | while (commit || i < heads->nr || stack.nr) { |
144 | 0 | struct commit_list *p; |
145 | 0 | if (!commit) { |
146 | 0 | if (i < heads->nr) { |
147 | 0 | int **depth_slot; |
148 | 0 | commit = (struct commit *) |
149 | 0 | deref_tag(the_repository, |
150 | 0 | heads->objects[i++].item, |
151 | 0 | NULL, 0); |
152 | 0 | if (!commit || commit->object.type != OBJ_COMMIT) { |
153 | 0 | commit = NULL; |
154 | 0 | continue; |
155 | 0 | } |
156 | 0 | depth_slot = commit_depth_at(&depths, commit); |
157 | 0 | if (!*depth_slot) |
158 | 0 | *depth_slot = xmalloc(sizeof(int)); |
159 | 0 | **depth_slot = 0; |
160 | 0 | cur_depth = 0; |
161 | 0 | } else { |
162 | 0 | commit = (struct commit *) |
163 | 0 | object_array_pop(&stack); |
164 | 0 | cur_depth = **commit_depth_at(&depths, commit); |
165 | 0 | } |
166 | 0 | } |
167 | 0 | parse_commit_or_die(commit); |
168 | 0 | cur_depth++; |
169 | 0 | if ((depth != INFINITE_DEPTH && cur_depth >= depth) || |
170 | 0 | (is_repository_shallow(the_repository) && !commit->parents && |
171 | 0 | (graft = lookup_commit_graft(the_repository, &commit->object.oid)) != NULL && |
172 | 0 | graft->nr_parent < 0)) { |
173 | 0 | commit_list_insert(commit, &result); |
174 | 0 | commit->object.flags |= shallow_flag; |
175 | 0 | commit = NULL; |
176 | 0 | continue; |
177 | 0 | } |
178 | 0 | commit->object.flags |= not_shallow_flag; |
179 | 0 | for (p = commit->parents, commit = NULL; p; p = p->next) { |
180 | 0 | int **depth_slot = commit_depth_at(&depths, p->item); |
181 | 0 | if (!*depth_slot) { |
182 | 0 | *depth_slot = xmalloc(sizeof(int)); |
183 | 0 | **depth_slot = cur_depth; |
184 | 0 | } else { |
185 | 0 | if (cur_depth >= **depth_slot) |
186 | 0 | continue; |
187 | 0 | **depth_slot = cur_depth; |
188 | 0 | } |
189 | 0 | if (p->next) |
190 | 0 | add_object_array(&p->item->object, |
191 | 0 | NULL, &stack); |
192 | 0 | else { |
193 | 0 | commit = p->item; |
194 | 0 | cur_depth = **commit_depth_at(&depths, commit); |
195 | 0 | } |
196 | 0 | } |
197 | 0 | } |
198 | 0 | deep_clear_commit_depth(&depths, free_depth_in_slab); |
199 | |
|
200 | 0 | return result; |
201 | 0 | } |
202 | | |
203 | | static void show_commit(struct commit *commit, void *data) |
204 | 0 | { |
205 | 0 | commit_list_insert(commit, data); |
206 | 0 | } |
207 | | |
208 | | /* |
209 | | * Given rev-list arguments, run rev-list. All reachable commits |
210 | | * except border ones are marked with not_shallow_flag. Border commits |
211 | | * are marked with shallow_flag. The list of border/shallow commits |
212 | | * are also returned. |
213 | | */ |
214 | | struct commit_list *get_shallow_commits_by_rev_list(int ac, const char **av, |
215 | | int shallow_flag, |
216 | | int not_shallow_flag) |
217 | 0 | { |
218 | 0 | struct commit_list *result = NULL, *p; |
219 | 0 | struct commit_list *not_shallow_list = NULL; |
220 | 0 | struct rev_info revs; |
221 | 0 | int both_flags = shallow_flag | not_shallow_flag; |
222 | | |
223 | | /* |
224 | | * SHALLOW (excluded) and NOT_SHALLOW (included) should not be |
225 | | * set at this point. But better be safe than sorry. |
226 | | */ |
227 | 0 | clear_object_flags(both_flags); |
228 | |
|
229 | 0 | is_repository_shallow(the_repository); /* make sure shallows are read */ |
230 | |
|
231 | 0 | repo_init_revisions(the_repository, &revs, NULL); |
232 | 0 | save_commit_buffer = 0; |
233 | 0 | setup_revisions(ac, av, &revs, NULL); |
234 | |
|
235 | 0 | if (prepare_revision_walk(&revs)) |
236 | 0 | die("revision walk setup failed"); |
237 | 0 | traverse_commit_list(&revs, show_commit, NULL, ¬_shallow_list); |
238 | |
|
239 | 0 | if (!not_shallow_list) |
240 | 0 | die("no commits selected for shallow requests"); |
241 | | |
242 | | /* Mark all reachable commits as NOT_SHALLOW */ |
243 | 0 | for (p = not_shallow_list; p; p = p->next) |
244 | 0 | p->item->object.flags |= not_shallow_flag; |
245 | | |
246 | | /* |
247 | | * mark border commits SHALLOW + NOT_SHALLOW. |
248 | | * We cannot clear NOT_SHALLOW right now. Imagine border |
249 | | * commit A is processed first, then commit B, whose parent is |
250 | | * A, later. If NOT_SHALLOW on A is cleared at step 1, B |
251 | | * itself is considered border at step 2, which is incorrect. |
252 | | */ |
253 | 0 | for (p = not_shallow_list; p; p = p->next) { |
254 | 0 | struct commit *c = p->item; |
255 | 0 | struct commit_list *parent; |
256 | |
|
257 | 0 | if (repo_parse_commit(the_repository, c)) |
258 | 0 | die("unable to parse commit %s", |
259 | 0 | oid_to_hex(&c->object.oid)); |
260 | | |
261 | 0 | for (parent = c->parents; parent; parent = parent->next) |
262 | 0 | if (!(parent->item->object.flags & not_shallow_flag)) { |
263 | 0 | c->object.flags |= shallow_flag; |
264 | 0 | commit_list_insert(c, &result); |
265 | 0 | break; |
266 | 0 | } |
267 | 0 | } |
268 | 0 | free_commit_list(not_shallow_list); |
269 | | |
270 | | /* |
271 | | * Now we can clean up NOT_SHALLOW on border commits. Having |
272 | | * both flags set can confuse the caller. |
273 | | */ |
274 | 0 | for (p = result; p; p = p->next) { |
275 | 0 | struct object *o = &p->item->object; |
276 | 0 | if ((o->flags & both_flags) == both_flags) |
277 | 0 | o->flags &= ~not_shallow_flag; |
278 | 0 | } |
279 | 0 | release_revisions(&revs); |
280 | 0 | return result; |
281 | 0 | } |
282 | | |
283 | | static void check_shallow_file_for_update(struct repository *r) |
284 | 0 | { |
285 | 0 | if (r->parsed_objects->is_shallow == -1) |
286 | 0 | BUG("shallow must be initialized by now"); |
287 | | |
288 | 0 | if (!stat_validity_check(r->parsed_objects->shallow_stat, |
289 | 0 | git_path_shallow(r))) |
290 | 0 | die("shallow file has changed since we read it"); |
291 | 0 | } |
292 | | |
293 | 0 | #define SEEN_ONLY 1 |
294 | 0 | #define VERBOSE 2 |
295 | 0 | #define QUICK 4 |
296 | | |
297 | | struct write_shallow_data { |
298 | | struct strbuf *out; |
299 | | int use_pack_protocol; |
300 | | int count; |
301 | | unsigned flags; |
302 | | }; |
303 | | |
304 | | static int write_one_shallow(const struct commit_graft *graft, void *cb_data) |
305 | 0 | { |
306 | 0 | struct write_shallow_data *data = cb_data; |
307 | 0 | const char *hex = oid_to_hex(&graft->oid); |
308 | 0 | if (graft->nr_parent != -1) |
309 | 0 | return 0; |
310 | 0 | if (data->flags & QUICK) { |
311 | 0 | if (!repo_has_object_file(the_repository, &graft->oid)) |
312 | 0 | return 0; |
313 | 0 | } else if (data->flags & SEEN_ONLY) { |
314 | 0 | struct commit *c = lookup_commit(the_repository, &graft->oid); |
315 | 0 | if (!c || !(c->object.flags & SEEN)) { |
316 | 0 | if (data->flags & VERBOSE) |
317 | 0 | printf("Removing %s from .git/shallow\n", |
318 | 0 | oid_to_hex(&c->object.oid)); |
319 | 0 | return 0; |
320 | 0 | } |
321 | 0 | } |
322 | 0 | data->count++; |
323 | 0 | if (data->use_pack_protocol) |
324 | 0 | packet_buf_write(data->out, "shallow %s", hex); |
325 | 0 | else { |
326 | 0 | strbuf_addstr(data->out, hex); |
327 | 0 | strbuf_addch(data->out, '\n'); |
328 | 0 | } |
329 | 0 | return 0; |
330 | 0 | } |
331 | | |
332 | | static int write_shallow_commits_1(struct strbuf *out, int use_pack_protocol, |
333 | | const struct oid_array *extra, |
334 | | unsigned flags) |
335 | 0 | { |
336 | 0 | struct write_shallow_data data; |
337 | 0 | int i; |
338 | 0 | data.out = out; |
339 | 0 | data.use_pack_protocol = use_pack_protocol; |
340 | 0 | data.count = 0; |
341 | 0 | data.flags = flags; |
342 | 0 | for_each_commit_graft(write_one_shallow, &data); |
343 | 0 | if (!extra) |
344 | 0 | return data.count; |
345 | 0 | for (i = 0; i < extra->nr; i++) { |
346 | 0 | strbuf_addstr(out, oid_to_hex(extra->oid + i)); |
347 | 0 | strbuf_addch(out, '\n'); |
348 | 0 | data.count++; |
349 | 0 | } |
350 | 0 | return data.count; |
351 | 0 | } |
352 | | |
353 | | int write_shallow_commits(struct strbuf *out, int use_pack_protocol, |
354 | | const struct oid_array *extra) |
355 | 0 | { |
356 | 0 | return write_shallow_commits_1(out, use_pack_protocol, extra, 0); |
357 | 0 | } |
358 | | |
359 | | const char *setup_temporary_shallow(const struct oid_array *extra) |
360 | 0 | { |
361 | 0 | struct tempfile *temp; |
362 | 0 | struct strbuf sb = STRBUF_INIT; |
363 | |
|
364 | 0 | if (write_shallow_commits(&sb, 0, extra)) { |
365 | 0 | temp = xmks_tempfile(git_path("shallow_XXXXXX")); |
366 | |
|
367 | 0 | if (write_in_full(temp->fd, sb.buf, sb.len) < 0 || |
368 | 0 | close_tempfile_gently(temp) < 0) |
369 | 0 | die_errno("failed to write to %s", |
370 | 0 | get_tempfile_path(temp)); |
371 | 0 | strbuf_release(&sb); |
372 | 0 | return get_tempfile_path(temp); |
373 | 0 | } |
374 | | /* |
375 | | * is_repository_shallow() sees empty string as "no shallow |
376 | | * file". |
377 | | */ |
378 | 0 | return ""; |
379 | 0 | } |
380 | | |
381 | | void setup_alternate_shallow(struct shallow_lock *shallow_lock, |
382 | | const char **alternate_shallow_file, |
383 | | const struct oid_array *extra) |
384 | 0 | { |
385 | 0 | struct strbuf sb = STRBUF_INIT; |
386 | 0 | int fd; |
387 | |
|
388 | 0 | fd = hold_lock_file_for_update(&shallow_lock->lock, |
389 | 0 | git_path_shallow(the_repository), |
390 | 0 | LOCK_DIE_ON_ERROR); |
391 | 0 | check_shallow_file_for_update(the_repository); |
392 | 0 | if (write_shallow_commits(&sb, 0, extra)) { |
393 | 0 | if (write_in_full(fd, sb.buf, sb.len) < 0) |
394 | 0 | die_errno("failed to write to %s", |
395 | 0 | get_lock_file_path(&shallow_lock->lock)); |
396 | 0 | *alternate_shallow_file = get_lock_file_path(&shallow_lock->lock); |
397 | 0 | } else |
398 | | /* |
399 | | * is_repository_shallow() sees empty string as "no |
400 | | * shallow file". |
401 | | */ |
402 | 0 | *alternate_shallow_file = ""; |
403 | 0 | strbuf_release(&sb); |
404 | 0 | } |
405 | | |
406 | | static int advertise_shallow_grafts_cb(const struct commit_graft *graft, void *cb) |
407 | 0 | { |
408 | 0 | int fd = *(int *)cb; |
409 | 0 | if (graft->nr_parent == -1) |
410 | 0 | packet_write_fmt(fd, "shallow %s\n", oid_to_hex(&graft->oid)); |
411 | 0 | return 0; |
412 | 0 | } |
413 | | |
414 | | void advertise_shallow_grafts(int fd) |
415 | 0 | { |
416 | 0 | if (!is_repository_shallow(the_repository)) |
417 | 0 | return; |
418 | 0 | for_each_commit_graft(advertise_shallow_grafts_cb, &fd); |
419 | 0 | } |
420 | | |
421 | | /* |
422 | | * mark_reachable_objects() should have been run prior to this and all |
423 | | * reachable commits marked as "SEEN", except when quick_prune is non-zero, |
424 | | * in which case lines are excised from the shallow file if they refer to |
425 | | * commits that do not exist (any longer). |
426 | | */ |
427 | | void prune_shallow(unsigned options) |
428 | 0 | { |
429 | 0 | struct shallow_lock shallow_lock = SHALLOW_LOCK_INIT; |
430 | 0 | struct strbuf sb = STRBUF_INIT; |
431 | 0 | unsigned flags = SEEN_ONLY; |
432 | 0 | int fd; |
433 | |
|
434 | 0 | if (options & PRUNE_QUICK) |
435 | 0 | flags |= QUICK; |
436 | |
|
437 | 0 | if (options & PRUNE_SHOW_ONLY) { |
438 | 0 | flags |= VERBOSE; |
439 | 0 | write_shallow_commits_1(&sb, 0, NULL, flags); |
440 | 0 | strbuf_release(&sb); |
441 | 0 | return; |
442 | 0 | } |
443 | 0 | fd = hold_lock_file_for_update(&shallow_lock.lock, |
444 | 0 | git_path_shallow(the_repository), |
445 | 0 | LOCK_DIE_ON_ERROR); |
446 | 0 | check_shallow_file_for_update(the_repository); |
447 | 0 | if (write_shallow_commits_1(&sb, 0, NULL, flags)) { |
448 | 0 | if (write_in_full(fd, sb.buf, sb.len) < 0) |
449 | 0 | die_errno("failed to write to %s", |
450 | 0 | get_lock_file_path(&shallow_lock.lock)); |
451 | 0 | commit_shallow_file(the_repository, &shallow_lock); |
452 | 0 | } else { |
453 | 0 | unlink(git_path_shallow(the_repository)); |
454 | 0 | rollback_shallow_file(the_repository, &shallow_lock); |
455 | 0 | } |
456 | 0 | strbuf_release(&sb); |
457 | 0 | } |
458 | | |
459 | | struct trace_key trace_shallow = TRACE_KEY_INIT(SHALLOW); |
460 | | |
461 | | /* |
462 | | * Step 1, split sender shallow commits into "ours" and "theirs" |
463 | | * Step 2, clean "ours" based on .git/shallow |
464 | | */ |
465 | | void prepare_shallow_info(struct shallow_info *info, struct oid_array *sa) |
466 | 0 | { |
467 | 0 | int i; |
468 | 0 | trace_printf_key(&trace_shallow, "shallow: prepare_shallow_info\n"); |
469 | 0 | memset(info, 0, sizeof(*info)); |
470 | 0 | info->shallow = sa; |
471 | 0 | if (!sa) |
472 | 0 | return; |
473 | 0 | ALLOC_ARRAY(info->ours, sa->nr); |
474 | 0 | ALLOC_ARRAY(info->theirs, sa->nr); |
475 | 0 | for (i = 0; i < sa->nr; i++) { |
476 | 0 | if (repo_has_object_file(the_repository, sa->oid + i)) { |
477 | 0 | struct commit_graft *graft; |
478 | 0 | graft = lookup_commit_graft(the_repository, |
479 | 0 | &sa->oid[i]); |
480 | 0 | if (graft && graft->nr_parent < 0) |
481 | 0 | continue; |
482 | 0 | info->ours[info->nr_ours++] = i; |
483 | 0 | } else |
484 | 0 | info->theirs[info->nr_theirs++] = i; |
485 | 0 | } |
486 | 0 | } |
487 | | |
488 | | void clear_shallow_info(struct shallow_info *info) |
489 | 0 | { |
490 | 0 | free(info->ours); |
491 | 0 | free(info->theirs); |
492 | 0 | } |
493 | | |
494 | | /* Step 4, remove non-existent ones in "theirs" after getting the pack */ |
495 | | |
496 | | void remove_nonexistent_theirs_shallow(struct shallow_info *info) |
497 | 0 | { |
498 | 0 | struct object_id *oid = info->shallow->oid; |
499 | 0 | int i, dst; |
500 | 0 | trace_printf_key(&trace_shallow, "shallow: remove_nonexistent_theirs_shallow\n"); |
501 | 0 | for (i = dst = 0; i < info->nr_theirs; i++) { |
502 | 0 | if (i != dst) |
503 | 0 | info->theirs[dst] = info->theirs[i]; |
504 | 0 | if (repo_has_object_file(the_repository, oid + info->theirs[i])) |
505 | 0 | dst++; |
506 | 0 | } |
507 | 0 | info->nr_theirs = dst; |
508 | 0 | } |
509 | | |
510 | | define_commit_slab(ref_bitmap, uint32_t *); |
511 | | |
512 | 0 | #define POOL_SIZE (512 * 1024) |
513 | | |
514 | | struct paint_info { |
515 | | struct ref_bitmap ref_bitmap; |
516 | | unsigned nr_bits; |
517 | | char **pools; |
518 | | char *free, *end; |
519 | | unsigned pool_count; |
520 | | }; |
521 | | |
522 | | static uint32_t *paint_alloc(struct paint_info *info) |
523 | 0 | { |
524 | 0 | unsigned nr = DIV_ROUND_UP(info->nr_bits, 32); |
525 | 0 | unsigned size = nr * sizeof(uint32_t); |
526 | 0 | void *p; |
527 | 0 | if (!info->pool_count || size > info->end - info->free) { |
528 | 0 | if (size > POOL_SIZE) |
529 | 0 | BUG("pool size too small for %d in paint_alloc()", |
530 | 0 | size); |
531 | 0 | info->pool_count++; |
532 | 0 | REALLOC_ARRAY(info->pools, info->pool_count); |
533 | 0 | info->free = xmalloc(POOL_SIZE); |
534 | 0 | info->pools[info->pool_count - 1] = info->free; |
535 | 0 | info->end = info->free + POOL_SIZE; |
536 | 0 | } |
537 | 0 | p = info->free; |
538 | 0 | info->free += size; |
539 | 0 | return p; |
540 | 0 | } |
541 | | |
542 | | /* |
543 | | * Given a commit SHA-1, walk down to parents until either SEEN, |
544 | | * UNINTERESTING or BOTTOM is hit. Set the id-th bit in ref_bitmap for |
545 | | * all walked commits. |
546 | | */ |
547 | | static void paint_down(struct paint_info *info, const struct object_id *oid, |
548 | | unsigned int id) |
549 | 0 | { |
550 | 0 | unsigned int i, nr; |
551 | 0 | struct commit_list *head = NULL; |
552 | 0 | int bitmap_nr = DIV_ROUND_UP(info->nr_bits, 32); |
553 | 0 | size_t bitmap_size = st_mult(sizeof(uint32_t), bitmap_nr); |
554 | 0 | struct commit *c = lookup_commit_reference_gently(the_repository, oid, |
555 | 0 | 1); |
556 | 0 | uint32_t *tmp; /* to be freed before return */ |
557 | 0 | uint32_t *bitmap; |
558 | |
|
559 | 0 | if (!c) |
560 | 0 | return; |
561 | | |
562 | 0 | tmp = xmalloc(bitmap_size); |
563 | 0 | bitmap = paint_alloc(info); |
564 | 0 | memset(bitmap, 0, bitmap_size); |
565 | 0 | bitmap[id / 32] |= (1U << (id % 32)); |
566 | 0 | commit_list_insert(c, &head); |
567 | 0 | while (head) { |
568 | 0 | struct commit_list *p; |
569 | 0 | struct commit *c = pop_commit(&head); |
570 | 0 | uint32_t **refs = ref_bitmap_at(&info->ref_bitmap, c); |
571 | | |
572 | | /* XXX check "UNINTERESTING" from pack bitmaps if available */ |
573 | 0 | if (c->object.flags & (SEEN | UNINTERESTING)) |
574 | 0 | continue; |
575 | 0 | else |
576 | 0 | c->object.flags |= SEEN; |
577 | | |
578 | 0 | if (!*refs) |
579 | 0 | *refs = bitmap; |
580 | 0 | else { |
581 | 0 | memcpy(tmp, *refs, bitmap_size); |
582 | 0 | for (i = 0; i < bitmap_nr; i++) |
583 | 0 | tmp[i] |= bitmap[i]; |
584 | 0 | if (memcmp(tmp, *refs, bitmap_size)) { |
585 | 0 | *refs = paint_alloc(info); |
586 | 0 | memcpy(*refs, tmp, bitmap_size); |
587 | 0 | } |
588 | 0 | } |
589 | |
|
590 | 0 | if (c->object.flags & BOTTOM) |
591 | 0 | continue; |
592 | | |
593 | 0 | if (repo_parse_commit(the_repository, c)) |
594 | 0 | die("unable to parse commit %s", |
595 | 0 | oid_to_hex(&c->object.oid)); |
596 | | |
597 | 0 | for (p = c->parents; p; p = p->next) { |
598 | 0 | if (p->item->object.flags & SEEN) |
599 | 0 | continue; |
600 | 0 | commit_list_insert(p->item, &head); |
601 | 0 | } |
602 | 0 | } |
603 | | |
604 | 0 | nr = get_max_object_index(); |
605 | 0 | for (i = 0; i < nr; i++) { |
606 | 0 | struct object *o = get_indexed_object(i); |
607 | 0 | if (o && o->type == OBJ_COMMIT) |
608 | 0 | o->flags &= ~SEEN; |
609 | 0 | } |
610 | |
|
611 | 0 | free(tmp); |
612 | 0 | } |
613 | | |
614 | | static int mark_uninteresting(const char *refname UNUSED, |
615 | | const char *referent UNUSED, |
616 | | const struct object_id *oid, |
617 | | int flags UNUSED, |
618 | | void *cb_data UNUSED) |
619 | 0 | { |
620 | 0 | struct commit *commit = lookup_commit_reference_gently(the_repository, |
621 | 0 | oid, 1); |
622 | 0 | if (!commit) |
623 | 0 | return 0; |
624 | 0 | commit->object.flags |= UNINTERESTING; |
625 | 0 | mark_parents_uninteresting(NULL, commit); |
626 | 0 | return 0; |
627 | 0 | } |
628 | | |
629 | | static void post_assign_shallow(struct shallow_info *info, |
630 | | struct ref_bitmap *ref_bitmap, |
631 | | int *ref_status); |
632 | | /* |
633 | | * Step 6(+7), associate shallow commits with new refs |
634 | | * |
635 | | * info->ref must be initialized before calling this function. |
636 | | * |
637 | | * If used is not NULL, it's an array of info->shallow->nr |
638 | | * bitmaps. The n-th bit set in the m-th bitmap if ref[n] needs the |
639 | | * m-th shallow commit from info->shallow. |
640 | | * |
641 | | * If used is NULL, "ours" and "theirs" are updated. And if ref_status |
642 | | * is not NULL it's an array of ref->nr ints. ref_status[i] is true if |
643 | | * the ref needs some shallow commits from either info->ours or |
644 | | * info->theirs. |
645 | | */ |
646 | | void assign_shallow_commits_to_refs(struct shallow_info *info, |
647 | | uint32_t **used, int *ref_status) |
648 | 0 | { |
649 | 0 | struct object_id *oid = info->shallow->oid; |
650 | 0 | struct oid_array *ref = info->ref; |
651 | 0 | unsigned int i, nr; |
652 | 0 | int *shallow, nr_shallow = 0; |
653 | 0 | struct paint_info pi; |
654 | |
|
655 | 0 | trace_printf_key(&trace_shallow, "shallow: assign_shallow_commits_to_refs\n"); |
656 | 0 | ALLOC_ARRAY(shallow, info->nr_ours + info->nr_theirs); |
657 | 0 | for (i = 0; i < info->nr_ours; i++) |
658 | 0 | shallow[nr_shallow++] = info->ours[i]; |
659 | 0 | for (i = 0; i < info->nr_theirs; i++) |
660 | 0 | shallow[nr_shallow++] = info->theirs[i]; |
661 | | |
662 | | /* |
663 | | * Prepare the commit graph to track what refs can reach what |
664 | | * (new) shallow commits. |
665 | | */ |
666 | 0 | nr = get_max_object_index(); |
667 | 0 | for (i = 0; i < nr; i++) { |
668 | 0 | struct object *o = get_indexed_object(i); |
669 | 0 | if (!o || o->type != OBJ_COMMIT) |
670 | 0 | continue; |
671 | | |
672 | 0 | o->flags &= ~(UNINTERESTING | BOTTOM | SEEN); |
673 | 0 | } |
674 | |
|
675 | 0 | memset(&pi, 0, sizeof(pi)); |
676 | 0 | init_ref_bitmap(&pi.ref_bitmap); |
677 | 0 | pi.nr_bits = ref->nr; |
678 | | |
679 | | /* |
680 | | * "--not --all" to cut short the traversal if new refs |
681 | | * connect to old refs. If not (e.g. force ref updates) it'll |
682 | | * have to go down to the current shallow commits. |
683 | | */ |
684 | 0 | refs_head_ref(get_main_ref_store(the_repository), mark_uninteresting, |
685 | 0 | NULL); |
686 | 0 | refs_for_each_ref(get_main_ref_store(the_repository), |
687 | 0 | mark_uninteresting, NULL); |
688 | | |
689 | | /* Mark potential bottoms so we won't go out of bound */ |
690 | 0 | for (i = 0; i < nr_shallow; i++) { |
691 | 0 | struct commit *c = lookup_commit(the_repository, |
692 | 0 | &oid[shallow[i]]); |
693 | 0 | c->object.flags |= BOTTOM; |
694 | 0 | } |
695 | |
|
696 | 0 | for (i = 0; i < ref->nr; i++) |
697 | 0 | paint_down(&pi, ref->oid + i, i); |
698 | |
|
699 | 0 | if (used) { |
700 | 0 | int bitmap_size = DIV_ROUND_UP(pi.nr_bits, 32) * sizeof(uint32_t); |
701 | 0 | memset(used, 0, sizeof(*used) * info->shallow->nr); |
702 | 0 | for (i = 0; i < nr_shallow; i++) { |
703 | 0 | const struct commit *c = lookup_commit(the_repository, |
704 | 0 | &oid[shallow[i]]); |
705 | 0 | uint32_t **map = ref_bitmap_at(&pi.ref_bitmap, c); |
706 | 0 | if (*map) |
707 | 0 | used[shallow[i]] = xmemdupz(*map, bitmap_size); |
708 | 0 | } |
709 | | /* |
710 | | * unreachable shallow commits are not removed from |
711 | | * "ours" and "theirs". The user is supposed to run |
712 | | * step 7 on every ref separately and not trust "ours" |
713 | | * and "theirs" any more. |
714 | | */ |
715 | 0 | } else |
716 | 0 | post_assign_shallow(info, &pi.ref_bitmap, ref_status); |
717 | |
|
718 | 0 | clear_ref_bitmap(&pi.ref_bitmap); |
719 | 0 | for (i = 0; i < pi.pool_count; i++) |
720 | 0 | free(pi.pools[i]); |
721 | 0 | free(pi.pools); |
722 | 0 | free(shallow); |
723 | 0 | } |
724 | | |
725 | | struct commit_array { |
726 | | struct commit **commits; |
727 | | int nr, alloc; |
728 | | }; |
729 | | |
730 | | static int add_ref(const char *refname UNUSED, |
731 | | const char *referent UNUSED, |
732 | | const struct object_id *oid, |
733 | | int flags UNUSED, |
734 | | void *cb_data) |
735 | 0 | { |
736 | 0 | struct commit_array *ca = cb_data; |
737 | 0 | ALLOC_GROW(ca->commits, ca->nr + 1, ca->alloc); |
738 | 0 | ca->commits[ca->nr] = lookup_commit_reference_gently(the_repository, |
739 | 0 | oid, 1); |
740 | 0 | if (ca->commits[ca->nr]) |
741 | 0 | ca->nr++; |
742 | 0 | return 0; |
743 | 0 | } |
744 | | |
745 | | static void update_refstatus(int *ref_status, int nr, uint32_t *bitmap) |
746 | 0 | { |
747 | 0 | unsigned int i; |
748 | 0 | if (!ref_status) |
749 | 0 | return; |
750 | 0 | for (i = 0; i < nr; i++) |
751 | 0 | if (bitmap[i / 32] & (1U << (i % 32))) |
752 | 0 | ref_status[i]++; |
753 | 0 | } |
754 | | |
755 | | /* |
756 | | * Step 7, reachability test on "ours" at commit level |
757 | | */ |
758 | | static void post_assign_shallow(struct shallow_info *info, |
759 | | struct ref_bitmap *ref_bitmap, |
760 | | int *ref_status) |
761 | 0 | { |
762 | 0 | struct object_id *oid = info->shallow->oid; |
763 | 0 | struct commit *c; |
764 | 0 | uint32_t **bitmap; |
765 | 0 | int dst, i, j; |
766 | 0 | int bitmap_nr = DIV_ROUND_UP(info->ref->nr, 32); |
767 | 0 | struct commit_array ca; |
768 | |
|
769 | 0 | trace_printf_key(&trace_shallow, "shallow: post_assign_shallow\n"); |
770 | 0 | if (ref_status) |
771 | 0 | memset(ref_status, 0, sizeof(*ref_status) * info->ref->nr); |
772 | | |
773 | | /* Remove unreachable shallow commits from "theirs" */ |
774 | 0 | for (i = dst = 0; i < info->nr_theirs; i++) { |
775 | 0 | if (i != dst) |
776 | 0 | info->theirs[dst] = info->theirs[i]; |
777 | 0 | c = lookup_commit(the_repository, &oid[info->theirs[i]]); |
778 | 0 | bitmap = ref_bitmap_at(ref_bitmap, c); |
779 | 0 | if (!*bitmap) |
780 | 0 | continue; |
781 | 0 | for (j = 0; j < bitmap_nr; j++) |
782 | 0 | if (bitmap[0][j]) { |
783 | 0 | update_refstatus(ref_status, info->ref->nr, *bitmap); |
784 | 0 | dst++; |
785 | 0 | break; |
786 | 0 | } |
787 | 0 | } |
788 | 0 | info->nr_theirs = dst; |
789 | |
|
790 | 0 | memset(&ca, 0, sizeof(ca)); |
791 | 0 | refs_head_ref(get_main_ref_store(the_repository), add_ref, &ca); |
792 | 0 | refs_for_each_ref(get_main_ref_store(the_repository), add_ref, &ca); |
793 | | |
794 | | /* Remove unreachable shallow commits from "ours" */ |
795 | 0 | for (i = dst = 0; i < info->nr_ours; i++) { |
796 | 0 | if (i != dst) |
797 | 0 | info->ours[dst] = info->ours[i]; |
798 | 0 | c = lookup_commit(the_repository, &oid[info->ours[i]]); |
799 | 0 | bitmap = ref_bitmap_at(ref_bitmap, c); |
800 | 0 | if (!*bitmap) |
801 | 0 | continue; |
802 | 0 | for (j = 0; j < bitmap_nr; j++) |
803 | 0 | if (bitmap[0][j]) { |
804 | | /* Step 7, reachability test at commit level */ |
805 | 0 | int ret = repo_in_merge_bases_many(the_repository, c, ca.nr, ca.commits, 1); |
806 | 0 | if (ret < 0) |
807 | 0 | exit(128); |
808 | 0 | if (!ret) { |
809 | 0 | update_refstatus(ref_status, info->ref->nr, *bitmap); |
810 | 0 | dst++; |
811 | 0 | break; |
812 | 0 | } |
813 | 0 | } |
814 | 0 | } |
815 | 0 | info->nr_ours = dst; |
816 | |
|
817 | 0 | free(ca.commits); |
818 | 0 | } |
819 | | |
820 | | /* (Delayed) step 7, reachability test at commit level */ |
821 | | int delayed_reachability_test(struct shallow_info *si, int c) |
822 | 0 | { |
823 | 0 | if (si->need_reachability_test[c]) { |
824 | 0 | struct commit *commit = lookup_commit(the_repository, |
825 | 0 | &si->shallow->oid[c]); |
826 | |
|
827 | 0 | if (!si->commits) { |
828 | 0 | struct commit_array ca; |
829 | |
|
830 | 0 | memset(&ca, 0, sizeof(ca)); |
831 | 0 | refs_head_ref(get_main_ref_store(the_repository), |
832 | 0 | add_ref, &ca); |
833 | 0 | refs_for_each_ref(get_main_ref_store(the_repository), |
834 | 0 | add_ref, &ca); |
835 | 0 | si->commits = ca.commits; |
836 | 0 | si->nr_commits = ca.nr; |
837 | 0 | } |
838 | |
|
839 | 0 | si->reachable[c] = repo_in_merge_bases_many(the_repository, |
840 | 0 | commit, |
841 | 0 | si->nr_commits, |
842 | 0 | si->commits, |
843 | 0 | 1); |
844 | 0 | if (si->reachable[c] < 0) |
845 | 0 | exit(128); |
846 | 0 | si->need_reachability_test[c] = 0; |
847 | 0 | } |
848 | 0 | return si->reachable[c]; |
849 | 0 | } |