Line | Count | Source |
1 | | #define USE_THE_REPOSITORY_VARIABLE |
2 | | |
3 | | #include "git-compat-util.h" |
4 | | #include "hex.h" |
5 | | #include "repository.h" |
6 | | #include "tempfile.h" |
7 | | #include "lockfile.h" |
8 | | #include "odb.h" |
9 | | #include "commit.h" |
10 | | #include "tag.h" |
11 | | #include "pkt-line.h" |
12 | | #include "refs.h" |
13 | | #include "oid-array.h" |
14 | | #include "path.h" |
15 | | #include "diff.h" |
16 | | #include "revision.h" |
17 | | #include "commit-slab.h" |
18 | | #include "list-objects.h" |
19 | | #include "commit-reach.h" |
20 | | #include "shallow.h" |
21 | | #include "statinfo.h" |
22 | | #include "trace.h" |
23 | | |
24 | | void set_alternate_shallow_file(struct repository *r, const char *path, int override) |
25 | 0 | { |
26 | 0 | if (r->parsed_objects->is_shallow != -1) |
27 | 0 | BUG("is_repository_shallow must not be called before set_alternate_shallow_file"); |
28 | 0 | if (r->parsed_objects->alternate_shallow_file && !override) |
29 | 0 | return; |
30 | 0 | free(r->parsed_objects->alternate_shallow_file); |
31 | 0 | r->parsed_objects->alternate_shallow_file = xstrdup_or_null(path); |
32 | 0 | } |
33 | | |
34 | | int register_shallow(struct repository *r, const struct object_id *oid) |
35 | 0 | { |
36 | 0 | struct commit_graft *graft = |
37 | 0 | xmalloc(sizeof(struct commit_graft)); |
38 | 0 | struct commit *commit = lookup_commit(r, oid); |
39 | |
|
40 | 0 | oidcpy(&graft->oid, oid); |
41 | 0 | graft->nr_parent = -1; |
42 | 0 | if (commit && commit->object.parsed) { |
43 | 0 | free_commit_list(commit->parents); |
44 | 0 | commit->parents = NULL; |
45 | 0 | } |
46 | 0 | return register_commit_graft(r, graft, 0); |
47 | 0 | } |
48 | | |
49 | | int unregister_shallow(const struct object_id *oid) |
50 | 0 | { |
51 | 0 | int pos = commit_graft_pos(the_repository, oid); |
52 | 0 | if (pos < 0) |
53 | 0 | return -1; |
54 | 0 | free(the_repository->parsed_objects->grafts[pos]); |
55 | 0 | if (pos + 1 < the_repository->parsed_objects->grafts_nr) |
56 | 0 | MOVE_ARRAY(the_repository->parsed_objects->grafts + pos, |
57 | 0 | the_repository->parsed_objects->grafts + pos + 1, |
58 | 0 | the_repository->parsed_objects->grafts_nr - pos - 1); |
59 | 0 | the_repository->parsed_objects->grafts_nr--; |
60 | 0 | return 0; |
61 | 0 | } |
62 | | |
63 | | int is_repository_shallow(struct repository *r) |
64 | 0 | { |
65 | 0 | FILE *fp; |
66 | 0 | char buf[1024]; |
67 | 0 | const char *path = r->parsed_objects->alternate_shallow_file; |
68 | |
|
69 | 0 | if (r->parsed_objects->is_shallow >= 0) |
70 | 0 | return r->parsed_objects->is_shallow; |
71 | | |
72 | 0 | if (!path) |
73 | 0 | path = git_path_shallow(r); |
74 | | /* |
75 | | * fetch-pack sets '--shallow-file ""' as an indicator that no |
76 | | * shallow file should be used. We could just open it and it |
77 | | * will likely fail. But let's do an explicit check instead. |
78 | | */ |
79 | 0 | if (!*path || (fp = fopen(path, "r")) == NULL) { |
80 | 0 | stat_validity_clear(r->parsed_objects->shallow_stat); |
81 | 0 | r->parsed_objects->is_shallow = 0; |
82 | 0 | return r->parsed_objects->is_shallow; |
83 | 0 | } |
84 | 0 | stat_validity_update(r->parsed_objects->shallow_stat, fileno(fp)); |
85 | 0 | r->parsed_objects->is_shallow = 1; |
86 | |
|
87 | 0 | while (fgets(buf, sizeof(buf), fp)) { |
88 | 0 | struct object_id oid; |
89 | 0 | if (get_oid_hex(buf, &oid)) |
90 | 0 | die("bad shallow line: %s", buf); |
91 | 0 | register_shallow(r, &oid); |
92 | 0 | } |
93 | 0 | fclose(fp); |
94 | 0 | return r->parsed_objects->is_shallow; |
95 | 0 | } |
96 | | |
97 | | static void reset_repository_shallow(struct repository *r) |
98 | 0 | { |
99 | 0 | r->parsed_objects->is_shallow = -1; |
100 | 0 | stat_validity_clear(r->parsed_objects->shallow_stat); |
101 | 0 | parsed_object_pool_reset_commit_grafts(r->parsed_objects); |
102 | 0 | } |
103 | | |
104 | | int commit_shallow_file(struct repository *r, struct shallow_lock *lk) |
105 | 0 | { |
106 | 0 | int res = commit_lock_file(&lk->lock); |
107 | 0 | reset_repository_shallow(r); |
108 | | |
109 | | /* |
110 | | * Update in-memory data structures with the new shallow information, |
111 | | * including unparsing all commits that now have grafts. |
112 | | */ |
113 | 0 | is_repository_shallow(r); |
114 | |
|
115 | 0 | return res; |
116 | 0 | } |
117 | | |
118 | | void rollback_shallow_file(struct repository *r, struct shallow_lock *lk) |
119 | 0 | { |
120 | 0 | rollback_lock_file(&lk->lock); |
121 | 0 | reset_repository_shallow(r); |
122 | 0 | } |
123 | | |
124 | | /* |
125 | | * TODO: use "int" elemtype instead of "int *" when/if commit-slab |
126 | | * supports a "valid" flag. |
127 | | */ |
128 | 0 | define_commit_slab(commit_depth, int *); Unexecuted instantiation: shallow.c:init_commit_depth_with_stride Unexecuted instantiation: shallow.c:commit_depth_at_peek Unexecuted instantiation: shallow.c:clear_commit_depth |
129 | 0 | static void free_depth_in_slab(int **ptr) |
130 | 0 | { |
131 | 0 | FREE_AND_NULL(*ptr); |
132 | 0 | } |
133 | | struct commit_list *get_shallow_commits(struct object_array *heads, int depth, |
134 | | int shallow_flag, int not_shallow_flag) |
135 | 0 | { |
136 | 0 | size_t i = 0; |
137 | 0 | int cur_depth = 0; |
138 | 0 | struct commit_list *result = NULL; |
139 | 0 | struct object_array stack = OBJECT_ARRAY_INIT; |
140 | 0 | struct commit *commit = NULL; |
141 | 0 | struct commit_graft *graft; |
142 | 0 | struct commit_depth depths; |
143 | |
|
144 | 0 | init_commit_depth(&depths); |
145 | 0 | while (commit || i < heads->nr || stack.nr) { |
146 | 0 | struct commit_list *p; |
147 | 0 | if (!commit) { |
148 | 0 | if (i < heads->nr) { |
149 | 0 | int **depth_slot; |
150 | 0 | commit = (struct commit *) |
151 | 0 | deref_tag(the_repository, |
152 | 0 | heads->objects[i++].item, |
153 | 0 | NULL, 0); |
154 | 0 | if (!commit || commit->object.type != OBJ_COMMIT) { |
155 | 0 | commit = NULL; |
156 | 0 | continue; |
157 | 0 | } |
158 | 0 | depth_slot = commit_depth_at(&depths, commit); |
159 | 0 | if (!*depth_slot) |
160 | 0 | *depth_slot = xmalloc(sizeof(int)); |
161 | 0 | **depth_slot = 0; |
162 | 0 | cur_depth = 0; |
163 | 0 | } else { |
164 | 0 | commit = (struct commit *) |
165 | 0 | object_array_pop(&stack); |
166 | 0 | cur_depth = **commit_depth_at(&depths, commit); |
167 | 0 | } |
168 | 0 | } |
169 | 0 | parse_commit_or_die(commit); |
170 | 0 | cur_depth++; |
171 | 0 | if ((depth != INFINITE_DEPTH && cur_depth >= depth) || |
172 | 0 | (is_repository_shallow(the_repository) && !commit->parents && |
173 | 0 | (graft = lookup_commit_graft(the_repository, &commit->object.oid)) != NULL && |
174 | 0 | graft->nr_parent < 0)) { |
175 | 0 | commit_list_insert(commit, &result); |
176 | 0 | commit->object.flags |= shallow_flag; |
177 | 0 | commit = NULL; |
178 | 0 | continue; |
179 | 0 | } |
180 | 0 | commit->object.flags |= not_shallow_flag; |
181 | 0 | for (p = commit->parents, commit = NULL; p; p = p->next) { |
182 | 0 | int **depth_slot = commit_depth_at(&depths, p->item); |
183 | 0 | if (!*depth_slot) { |
184 | 0 | *depth_slot = xmalloc(sizeof(int)); |
185 | 0 | **depth_slot = cur_depth; |
186 | 0 | } else { |
187 | 0 | if (cur_depth >= **depth_slot) |
188 | 0 | continue; |
189 | 0 | **depth_slot = cur_depth; |
190 | 0 | } |
191 | 0 | if (p->next) |
192 | 0 | add_object_array(&p->item->object, |
193 | 0 | NULL, &stack); |
194 | 0 | else { |
195 | 0 | commit = p->item; |
196 | 0 | cur_depth = **commit_depth_at(&depths, commit); |
197 | 0 | } |
198 | 0 | } |
199 | 0 | } |
200 | 0 | deep_clear_commit_depth(&depths, free_depth_in_slab); |
201 | |
|
202 | 0 | return result; |
203 | 0 | } |
204 | | |
205 | | static void show_commit(struct commit *commit, void *data) |
206 | 0 | { |
207 | 0 | commit_list_insert(commit, data); |
208 | 0 | } |
209 | | |
210 | | /* |
211 | | * Given rev-list arguments, run rev-list. All reachable commits |
212 | | * except border ones are marked with not_shallow_flag. Border commits |
213 | | * are marked with shallow_flag. The list of border/shallow commits |
214 | | * are also returned. |
215 | | */ |
216 | | struct commit_list *get_shallow_commits_by_rev_list(struct strvec *argv, |
217 | | int shallow_flag, |
218 | | int not_shallow_flag) |
219 | 0 | { |
220 | 0 | struct commit_list *result = NULL, *p; |
221 | 0 | struct commit_list *not_shallow_list = NULL; |
222 | 0 | struct rev_info revs; |
223 | 0 | int both_flags = shallow_flag | not_shallow_flag; |
224 | | |
225 | | /* |
226 | | * SHALLOW (excluded) and NOT_SHALLOW (included) should not be |
227 | | * set at this point. But better be safe than sorry. |
228 | | */ |
229 | 0 | clear_object_flags(the_repository, both_flags); |
230 | |
|
231 | 0 | is_repository_shallow(the_repository); /* make sure shallows are read */ |
232 | |
|
233 | 0 | repo_init_revisions(the_repository, &revs, NULL); |
234 | 0 | save_commit_buffer = 0; |
235 | 0 | setup_revisions_from_strvec(argv, &revs, NULL); |
236 | |
|
237 | 0 | if (prepare_revision_walk(&revs)) |
238 | 0 | die("revision walk setup failed"); |
239 | 0 | traverse_commit_list(&revs, show_commit, NULL, ¬_shallow_list); |
240 | |
|
241 | 0 | if (!not_shallow_list) |
242 | 0 | die("no commits selected for shallow requests"); |
243 | | |
244 | | /* Mark all reachable commits as NOT_SHALLOW */ |
245 | 0 | for (p = not_shallow_list; p; p = p->next) |
246 | 0 | p->item->object.flags |= not_shallow_flag; |
247 | | |
248 | | /* |
249 | | * mark border commits SHALLOW + NOT_SHALLOW. |
250 | | * We cannot clear NOT_SHALLOW right now. Imagine border |
251 | | * commit A is processed first, then commit B, whose parent is |
252 | | * A, later. If NOT_SHALLOW on A is cleared at step 1, B |
253 | | * itself is considered border at step 2, which is incorrect. |
254 | | */ |
255 | 0 | for (p = not_shallow_list; p; p = p->next) { |
256 | 0 | struct commit *c = p->item; |
257 | 0 | struct commit_list *parent; |
258 | |
|
259 | 0 | if (repo_parse_commit(the_repository, c)) |
260 | 0 | die("unable to parse commit %s", |
261 | 0 | oid_to_hex(&c->object.oid)); |
262 | | |
263 | 0 | for (parent = c->parents; parent; parent = parent->next) |
264 | 0 | if (!(parent->item->object.flags & not_shallow_flag)) { |
265 | 0 | c->object.flags |= shallow_flag; |
266 | 0 | commit_list_insert(c, &result); |
267 | 0 | break; |
268 | 0 | } |
269 | 0 | } |
270 | 0 | free_commit_list(not_shallow_list); |
271 | | |
272 | | /* |
273 | | * Now we can clean up NOT_SHALLOW on border commits. Having |
274 | | * both flags set can confuse the caller. |
275 | | */ |
276 | 0 | for (p = result; p; p = p->next) { |
277 | 0 | struct object *o = &p->item->object; |
278 | 0 | if ((o->flags & both_flags) == both_flags) |
279 | 0 | o->flags &= ~not_shallow_flag; |
280 | 0 | } |
281 | 0 | release_revisions(&revs); |
282 | 0 | return result; |
283 | 0 | } |
284 | | |
285 | | static void check_shallow_file_for_update(struct repository *r) |
286 | 0 | { |
287 | 0 | if (r->parsed_objects->is_shallow == -1) |
288 | 0 | BUG("shallow must be initialized by now"); |
289 | | |
290 | 0 | if (!stat_validity_check(r->parsed_objects->shallow_stat, |
291 | 0 | git_path_shallow(r))) |
292 | 0 | die("shallow file has changed since we read it"); |
293 | 0 | } |
294 | | |
295 | 0 | #define SEEN_ONLY 1 |
296 | 0 | #define VERBOSE 2 |
297 | 0 | #define QUICK 4 |
298 | | |
299 | | struct write_shallow_data { |
300 | | struct strbuf *out; |
301 | | int use_pack_protocol; |
302 | | int count; |
303 | | unsigned flags; |
304 | | }; |
305 | | |
306 | | static int write_one_shallow(const struct commit_graft *graft, void *cb_data) |
307 | 0 | { |
308 | 0 | struct write_shallow_data *data = cb_data; |
309 | 0 | const char *hex = oid_to_hex(&graft->oid); |
310 | 0 | if (graft->nr_parent != -1) |
311 | 0 | return 0; |
312 | 0 | if (data->flags & QUICK) { |
313 | 0 | if (!odb_has_object(the_repository->objects, &graft->oid, |
314 | 0 | HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR)) |
315 | 0 | return 0; |
316 | 0 | } else if (data->flags & SEEN_ONLY) { |
317 | 0 | struct commit *c = lookup_commit(the_repository, &graft->oid); |
318 | 0 | if (!c || !(c->object.flags & SEEN)) { |
319 | 0 | if (data->flags & VERBOSE) |
320 | 0 | printf("Removing %s from .git/shallow\n", |
321 | 0 | oid_to_hex(&c->object.oid)); |
322 | 0 | return 0; |
323 | 0 | } |
324 | 0 | } |
325 | 0 | data->count++; |
326 | 0 | if (data->use_pack_protocol) |
327 | 0 | packet_buf_write(data->out, "shallow %s", hex); |
328 | 0 | else { |
329 | 0 | strbuf_addstr(data->out, hex); |
330 | 0 | strbuf_addch(data->out, '\n'); |
331 | 0 | } |
332 | 0 | return 0; |
333 | 0 | } |
334 | | |
335 | | static int write_shallow_commits_1(struct strbuf *out, int use_pack_protocol, |
336 | | const struct oid_array *extra, |
337 | | unsigned flags) |
338 | 0 | { |
339 | 0 | struct write_shallow_data data = { |
340 | 0 | .out = out, |
341 | 0 | .use_pack_protocol = use_pack_protocol, |
342 | 0 | .flags = flags, |
343 | 0 | }; |
344 | |
|
345 | 0 | for_each_commit_graft(write_one_shallow, &data); |
346 | 0 | if (!extra) |
347 | 0 | return data.count; |
348 | 0 | for (size_t i = 0; i < extra->nr; i++) { |
349 | 0 | strbuf_addstr(out, oid_to_hex(extra->oid + i)); |
350 | 0 | strbuf_addch(out, '\n'); |
351 | 0 | data.count++; |
352 | 0 | } |
353 | 0 | return data.count; |
354 | 0 | } |
355 | | |
356 | | int write_shallow_commits(struct strbuf *out, int use_pack_protocol, |
357 | | const struct oid_array *extra) |
358 | 0 | { |
359 | 0 | return write_shallow_commits_1(out, use_pack_protocol, extra, 0); |
360 | 0 | } |
361 | | |
362 | | const char *setup_temporary_shallow(const struct oid_array *extra) |
363 | 0 | { |
364 | 0 | struct tempfile *temp; |
365 | 0 | struct strbuf sb = STRBUF_INIT; |
366 | |
|
367 | 0 | if (write_shallow_commits(&sb, 0, extra)) { |
368 | 0 | char *path = repo_git_path(the_repository, "shallow_XXXXXX"); |
369 | 0 | temp = xmks_tempfile(path); |
370 | 0 | free(path); |
371 | |
|
372 | 0 | if (write_in_full(temp->fd, sb.buf, sb.len) < 0 || |
373 | 0 | close_tempfile_gently(temp) < 0) |
374 | 0 | die_errno("failed to write to %s", |
375 | 0 | get_tempfile_path(temp)); |
376 | 0 | strbuf_release(&sb); |
377 | 0 | return get_tempfile_path(temp); |
378 | 0 | } |
379 | | /* |
380 | | * is_repository_shallow() sees empty string as "no shallow |
381 | | * file". |
382 | | */ |
383 | 0 | return ""; |
384 | 0 | } |
385 | | |
386 | | void setup_alternate_shallow(struct shallow_lock *shallow_lock, |
387 | | const char **alternate_shallow_file, |
388 | | const struct oid_array *extra) |
389 | 0 | { |
390 | 0 | struct strbuf sb = STRBUF_INIT; |
391 | 0 | int fd; |
392 | |
|
393 | 0 | fd = hold_lock_file_for_update(&shallow_lock->lock, |
394 | 0 | git_path_shallow(the_repository), |
395 | 0 | LOCK_DIE_ON_ERROR); |
396 | 0 | check_shallow_file_for_update(the_repository); |
397 | 0 | if (write_shallow_commits(&sb, 0, extra)) { |
398 | 0 | if (write_in_full(fd, sb.buf, sb.len) < 0) |
399 | 0 | die_errno("failed to write to %s", |
400 | 0 | get_lock_file_path(&shallow_lock->lock)); |
401 | 0 | *alternate_shallow_file = get_lock_file_path(&shallow_lock->lock); |
402 | 0 | } else |
403 | | /* |
404 | | * is_repository_shallow() sees empty string as "no |
405 | | * shallow file". |
406 | | */ |
407 | 0 | *alternate_shallow_file = ""; |
408 | 0 | strbuf_release(&sb); |
409 | 0 | } |
410 | | |
411 | | static int advertise_shallow_grafts_cb(const struct commit_graft *graft, void *cb) |
412 | 0 | { |
413 | 0 | int fd = *(int *)cb; |
414 | 0 | if (graft->nr_parent == -1) |
415 | 0 | packet_write_fmt(fd, "shallow %s\n", oid_to_hex(&graft->oid)); |
416 | 0 | return 0; |
417 | 0 | } |
418 | | |
419 | | void advertise_shallow_grafts(int fd) |
420 | 0 | { |
421 | 0 | if (!is_repository_shallow(the_repository)) |
422 | 0 | return; |
423 | 0 | for_each_commit_graft(advertise_shallow_grafts_cb, &fd); |
424 | 0 | } |
425 | | |
426 | | /* |
427 | | * mark_reachable_objects() should have been run prior to this and all |
428 | | * reachable commits marked as "SEEN", except when quick_prune is non-zero, |
429 | | * in which case lines are excised from the shallow file if they refer to |
430 | | * commits that do not exist (any longer). |
431 | | */ |
432 | | void prune_shallow(unsigned options) |
433 | 0 | { |
434 | 0 | struct shallow_lock shallow_lock = SHALLOW_LOCK_INIT; |
435 | 0 | struct strbuf sb = STRBUF_INIT; |
436 | 0 | unsigned flags = SEEN_ONLY; |
437 | 0 | int fd; |
438 | |
|
439 | 0 | if (options & PRUNE_QUICK) |
440 | 0 | flags |= QUICK; |
441 | |
|
442 | 0 | if (options & PRUNE_SHOW_ONLY) { |
443 | 0 | flags |= VERBOSE; |
444 | 0 | write_shallow_commits_1(&sb, 0, NULL, flags); |
445 | 0 | strbuf_release(&sb); |
446 | 0 | return; |
447 | 0 | } |
448 | 0 | fd = hold_lock_file_for_update(&shallow_lock.lock, |
449 | 0 | git_path_shallow(the_repository), |
450 | 0 | LOCK_DIE_ON_ERROR); |
451 | 0 | check_shallow_file_for_update(the_repository); |
452 | 0 | if (write_shallow_commits_1(&sb, 0, NULL, flags)) { |
453 | 0 | if (write_in_full(fd, sb.buf, sb.len) < 0) |
454 | 0 | die_errno("failed to write to %s", |
455 | 0 | get_lock_file_path(&shallow_lock.lock)); |
456 | 0 | commit_shallow_file(the_repository, &shallow_lock); |
457 | 0 | } else { |
458 | 0 | unlink(git_path_shallow(the_repository)); |
459 | 0 | rollback_shallow_file(the_repository, &shallow_lock); |
460 | 0 | } |
461 | 0 | strbuf_release(&sb); |
462 | 0 | } |
463 | | |
464 | | struct trace_key trace_shallow = TRACE_KEY_INIT(SHALLOW); |
465 | | |
466 | | /* |
467 | | * Step 1, split sender shallow commits into "ours" and "theirs" |
468 | | * Step 2, clean "ours" based on .git/shallow |
469 | | */ |
470 | | void prepare_shallow_info(struct shallow_info *info, struct oid_array *sa) |
471 | 0 | { |
472 | 0 | trace_printf_key(&trace_shallow, "shallow: prepare_shallow_info\n"); |
473 | 0 | memset(info, 0, sizeof(*info)); |
474 | 0 | info->shallow = sa; |
475 | 0 | if (!sa) |
476 | 0 | return; |
477 | 0 | ALLOC_ARRAY(info->ours, sa->nr); |
478 | 0 | ALLOC_ARRAY(info->theirs, sa->nr); |
479 | 0 | for (size_t i = 0; i < sa->nr; i++) { |
480 | 0 | if (odb_has_object(the_repository->objects, sa->oid + i, |
481 | 0 | HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR)) { |
482 | 0 | struct commit_graft *graft; |
483 | 0 | graft = lookup_commit_graft(the_repository, |
484 | 0 | &sa->oid[i]); |
485 | 0 | if (graft && graft->nr_parent < 0) |
486 | 0 | continue; |
487 | 0 | info->ours[info->nr_ours++] = i; |
488 | 0 | } else |
489 | 0 | info->theirs[info->nr_theirs++] = i; |
490 | 0 | } |
491 | 0 | } |
492 | | |
493 | | void clear_shallow_info(struct shallow_info *info) |
494 | 0 | { |
495 | 0 | if (info->used_shallow) { |
496 | 0 | for (size_t i = 0; i < info->shallow->nr; i++) |
497 | 0 | free(info->used_shallow[i]); |
498 | 0 | free(info->used_shallow); |
499 | 0 | } |
500 | |
|
501 | 0 | free(info->need_reachability_test); |
502 | 0 | free(info->reachable); |
503 | 0 | free(info->shallow_ref); |
504 | 0 | free(info->ours); |
505 | 0 | free(info->theirs); |
506 | 0 | } |
507 | | |
508 | | /* Step 4, remove non-existent ones in "theirs" after getting the pack */ |
509 | | |
510 | | void remove_nonexistent_theirs_shallow(struct shallow_info *info) |
511 | 0 | { |
512 | 0 | struct object_id *oid = info->shallow->oid; |
513 | 0 | size_t i, dst; |
514 | 0 | trace_printf_key(&trace_shallow, "shallow: remove_nonexistent_theirs_shallow\n"); |
515 | 0 | for (i = dst = 0; i < info->nr_theirs; i++) { |
516 | 0 | if (i != dst) |
517 | 0 | info->theirs[dst] = info->theirs[i]; |
518 | 0 | if (odb_has_object(the_repository->objects, oid + info->theirs[i], |
519 | 0 | HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR)) |
520 | 0 | dst++; |
521 | 0 | } |
522 | 0 | info->nr_theirs = dst; |
523 | 0 | } |
524 | | |
525 | 0 | define_commit_slab(ref_bitmap, uint32_t *); Unexecuted instantiation: shallow.c:init_ref_bitmap_with_stride Unexecuted instantiation: shallow.c:ref_bitmap_at_peek Unexecuted instantiation: shallow.c:clear_ref_bitmap |
526 | 0 |
|
527 | 0 | #define POOL_SIZE (512 * 1024) |
528 | | |
529 | | struct paint_info { |
530 | | struct ref_bitmap ref_bitmap; |
531 | | unsigned nr_bits; |
532 | | char **pools; |
533 | | char *free, *end; |
534 | | unsigned pool_count; |
535 | | }; |
536 | | |
537 | | static uint32_t *paint_alloc(struct paint_info *info) |
538 | 0 | { |
539 | 0 | unsigned nr = DIV_ROUND_UP(info->nr_bits, 32); |
540 | 0 | unsigned size = nr * sizeof(uint32_t); |
541 | 0 | void *p; |
542 | 0 | if (!info->pool_count || info->end < info->free + size) { |
543 | 0 | if (size > POOL_SIZE) |
544 | 0 | BUG("pool size too small for %d in paint_alloc()", |
545 | 0 | size); |
546 | 0 | info->pool_count++; |
547 | 0 | REALLOC_ARRAY(info->pools, info->pool_count); |
548 | 0 | info->free = xmalloc(POOL_SIZE); |
549 | 0 | info->pools[info->pool_count - 1] = info->free; |
550 | 0 | info->end = info->free + POOL_SIZE; |
551 | 0 | } |
552 | 0 | p = info->free; |
553 | 0 | info->free += size; |
554 | 0 | return p; |
555 | 0 | } |
556 | | |
557 | | /* |
558 | | * Given a commit SHA-1, walk down to parents until either SEEN, |
559 | | * UNINTERESTING or BOTTOM is hit. Set the id-th bit in ref_bitmap for |
560 | | * all walked commits. |
561 | | */ |
562 | | static void paint_down(struct paint_info *info, const struct object_id *oid, |
563 | | unsigned int id) |
564 | 0 | { |
565 | 0 | unsigned int i, nr; |
566 | 0 | struct commit_list *head = NULL; |
567 | 0 | size_t bitmap_nr = DIV_ROUND_UP(info->nr_bits, 32); |
568 | 0 | size_t bitmap_size = st_mult(sizeof(uint32_t), bitmap_nr); |
569 | 0 | struct commit *c = lookup_commit_reference_gently(the_repository, oid, |
570 | 0 | 1); |
571 | 0 | uint32_t *tmp; /* to be freed before return */ |
572 | 0 | uint32_t *bitmap; |
573 | |
|
574 | 0 | if (!c) |
575 | 0 | return; |
576 | | |
577 | 0 | tmp = xmalloc(bitmap_size); |
578 | 0 | bitmap = paint_alloc(info); |
579 | 0 | memset(bitmap, 0, bitmap_size); |
580 | 0 | bitmap[id / 32] |= (1U << (id % 32)); |
581 | 0 | commit_list_insert(c, &head); |
582 | 0 | while (head) { |
583 | 0 | struct commit_list *p; |
584 | 0 | struct commit *c = pop_commit(&head); |
585 | 0 | uint32_t **refs = ref_bitmap_at(&info->ref_bitmap, c); |
586 | | |
587 | | /* XXX check "UNINTERESTING" from pack bitmaps if available */ |
588 | 0 | if (c->object.flags & (SEEN | UNINTERESTING)) |
589 | 0 | continue; |
590 | 0 | else |
591 | 0 | c->object.flags |= SEEN; |
592 | | |
593 | 0 | if (!*refs) |
594 | 0 | *refs = bitmap; |
595 | 0 | else { |
596 | 0 | memcpy(tmp, *refs, bitmap_size); |
597 | 0 | for (i = 0; i < bitmap_nr; i++) |
598 | 0 | tmp[i] |= bitmap[i]; |
599 | 0 | if (memcmp(tmp, *refs, bitmap_size)) { |
600 | 0 | *refs = paint_alloc(info); |
601 | 0 | memcpy(*refs, tmp, bitmap_size); |
602 | 0 | } |
603 | 0 | } |
604 | |
|
605 | 0 | if (c->object.flags & BOTTOM) |
606 | 0 | continue; |
607 | | |
608 | 0 | if (repo_parse_commit(the_repository, c)) |
609 | 0 | die("unable to parse commit %s", |
610 | 0 | oid_to_hex(&c->object.oid)); |
611 | | |
612 | 0 | for (p = c->parents; p; p = p->next) { |
613 | 0 | if (p->item->object.flags & SEEN) |
614 | 0 | continue; |
615 | 0 | commit_list_insert(p->item, &head); |
616 | 0 | } |
617 | 0 | } |
618 | | |
619 | 0 | nr = get_max_object_index(the_repository); |
620 | 0 | for (i = 0; i < nr; i++) { |
621 | 0 | struct object *o = get_indexed_object(the_repository, i); |
622 | 0 | if (o && o->type == OBJ_COMMIT) |
623 | 0 | o->flags &= ~SEEN; |
624 | 0 | } |
625 | |
|
626 | 0 | free(tmp); |
627 | 0 | } |
628 | | |
629 | | static int mark_uninteresting(const struct reference *ref, void *cb_data UNUSED) |
630 | 0 | { |
631 | 0 | struct commit *commit = lookup_commit_reference_gently(the_repository, |
632 | 0 | ref->oid, 1); |
633 | 0 | if (!commit) |
634 | 0 | return 0; |
635 | 0 | commit->object.flags |= UNINTERESTING; |
636 | 0 | mark_parents_uninteresting(NULL, commit); |
637 | 0 | return 0; |
638 | 0 | } |
639 | | |
640 | | static void post_assign_shallow(struct shallow_info *info, |
641 | | struct ref_bitmap *ref_bitmap, |
642 | | int *ref_status); |
643 | | /* |
644 | | * Step 6(+7), associate shallow commits with new refs |
645 | | * |
646 | | * info->ref must be initialized before calling this function. |
647 | | * |
648 | | * If used is not NULL, it's an array of info->shallow->nr |
649 | | * bitmaps. The n-th bit set in the m-th bitmap if ref[n] needs the |
650 | | * m-th shallow commit from info->shallow. |
651 | | * |
652 | | * If used is NULL, "ours" and "theirs" are updated. And if ref_status |
653 | | * is not NULL it's an array of ref->nr ints. ref_status[i] is true if |
654 | | * the ref needs some shallow commits from either info->ours or |
655 | | * info->theirs. |
656 | | */ |
657 | | void assign_shallow_commits_to_refs(struct shallow_info *info, |
658 | | uint32_t **used, int *ref_status) |
659 | 0 | { |
660 | 0 | struct object_id *oid = info->shallow->oid; |
661 | 0 | struct oid_array *ref = info->ref; |
662 | 0 | unsigned int i, nr; |
663 | 0 | size_t *shallow, nr_shallow = 0; |
664 | 0 | struct paint_info pi; |
665 | |
|
666 | 0 | trace_printf_key(&trace_shallow, "shallow: assign_shallow_commits_to_refs\n"); |
667 | 0 | ALLOC_ARRAY(shallow, info->nr_ours + info->nr_theirs); |
668 | 0 | for (i = 0; i < info->nr_ours; i++) |
669 | 0 | shallow[nr_shallow++] = info->ours[i]; |
670 | 0 | for (i = 0; i < info->nr_theirs; i++) |
671 | 0 | shallow[nr_shallow++] = info->theirs[i]; |
672 | | |
673 | | /* |
674 | | * Prepare the commit graph to track what refs can reach what |
675 | | * (new) shallow commits. |
676 | | */ |
677 | 0 | nr = get_max_object_index(the_repository); |
678 | 0 | for (i = 0; i < nr; i++) { |
679 | 0 | struct object *o = get_indexed_object(the_repository, i); |
680 | 0 | if (!o || o->type != OBJ_COMMIT) |
681 | 0 | continue; |
682 | | |
683 | 0 | o->flags &= ~(UNINTERESTING | BOTTOM | SEEN); |
684 | 0 | } |
685 | |
|
686 | 0 | memset(&pi, 0, sizeof(pi)); |
687 | 0 | init_ref_bitmap(&pi.ref_bitmap); |
688 | 0 | pi.nr_bits = ref->nr; |
689 | | |
690 | | /* |
691 | | * "--not --all" to cut short the traversal if new refs |
692 | | * connect to old refs. If not (e.g. force ref updates) it'll |
693 | | * have to go down to the current shallow commits. |
694 | | */ |
695 | 0 | refs_head_ref(get_main_ref_store(the_repository), mark_uninteresting, |
696 | 0 | NULL); |
697 | 0 | refs_for_each_ref(get_main_ref_store(the_repository), |
698 | 0 | mark_uninteresting, NULL); |
699 | | |
700 | | /* Mark potential bottoms so we won't go out of bound */ |
701 | 0 | for (i = 0; i < nr_shallow; i++) { |
702 | 0 | struct commit *c = lookup_commit(the_repository, |
703 | 0 | &oid[shallow[i]]); |
704 | 0 | c->object.flags |= BOTTOM; |
705 | 0 | } |
706 | |
|
707 | 0 | for (i = 0; i < ref->nr; i++) |
708 | 0 | paint_down(&pi, ref->oid + i, i); |
709 | |
|
710 | 0 | if (used) { |
711 | 0 | int bitmap_size = DIV_ROUND_UP(pi.nr_bits, 32) * sizeof(uint32_t); |
712 | 0 | MEMZERO_ARRAY(used, info->shallow->nr); |
713 | 0 | for (i = 0; i < nr_shallow; i++) { |
714 | 0 | const struct commit *c = lookup_commit(the_repository, |
715 | 0 | &oid[shallow[i]]); |
716 | 0 | uint32_t **map = ref_bitmap_at(&pi.ref_bitmap, c); |
717 | 0 | if (*map) |
718 | 0 | used[shallow[i]] = xmemdupz(*map, bitmap_size); |
719 | 0 | } |
720 | | /* |
721 | | * unreachable shallow commits are not removed from |
722 | | * "ours" and "theirs". The user is supposed to run |
723 | | * step 7 on every ref separately and not trust "ours" |
724 | | * and "theirs" any more. |
725 | | */ |
726 | 0 | } else |
727 | 0 | post_assign_shallow(info, &pi.ref_bitmap, ref_status); |
728 | |
|
729 | 0 | clear_ref_bitmap(&pi.ref_bitmap); |
730 | 0 | for (i = 0; i < pi.pool_count; i++) |
731 | 0 | free(pi.pools[i]); |
732 | 0 | free(pi.pools); |
733 | 0 | free(shallow); |
734 | 0 | } |
735 | | |
736 | | struct commit_array { |
737 | | struct commit **commits; |
738 | | size_t nr, alloc; |
739 | | }; |
740 | | |
741 | | static int add_ref(const struct reference *ref, void *cb_data) |
742 | 0 | { |
743 | 0 | struct commit_array *ca = cb_data; |
744 | 0 | ALLOC_GROW(ca->commits, ca->nr + 1, ca->alloc); |
745 | 0 | ca->commits[ca->nr] = lookup_commit_reference_gently(the_repository, |
746 | 0 | ref->oid, 1); |
747 | 0 | if (ca->commits[ca->nr]) |
748 | 0 | ca->nr++; |
749 | 0 | return 0; |
750 | 0 | } |
751 | | |
752 | | static void update_refstatus(int *ref_status, size_t nr, uint32_t *bitmap) |
753 | 0 | { |
754 | 0 | if (!ref_status) |
755 | 0 | return; |
756 | 0 | for (size_t i = 0; i < nr; i++) |
757 | 0 | if (bitmap[i / 32] & (1U << (i % 32))) |
758 | 0 | ref_status[i]++; |
759 | 0 | } |
760 | | |
761 | | /* |
762 | | * Step 7, reachability test on "ours" at commit level |
763 | | */ |
764 | | static void post_assign_shallow(struct shallow_info *info, |
765 | | struct ref_bitmap *ref_bitmap, |
766 | | int *ref_status) |
767 | 0 | { |
768 | 0 | struct object_id *oid = info->shallow->oid; |
769 | 0 | struct commit *c; |
770 | 0 | uint32_t **bitmap; |
771 | 0 | size_t dst, i, j; |
772 | 0 | size_t bitmap_nr = DIV_ROUND_UP(info->ref->nr, 32); |
773 | 0 | struct commit_array ca; |
774 | |
|
775 | 0 | trace_printf_key(&trace_shallow, "shallow: post_assign_shallow\n"); |
776 | 0 | if (ref_status) |
777 | 0 | MEMZERO_ARRAY(ref_status, info->ref->nr); |
778 | | |
779 | | /* Remove unreachable shallow commits from "theirs" */ |
780 | 0 | for (i = dst = 0; i < info->nr_theirs; i++) { |
781 | 0 | if (i != dst) |
782 | 0 | info->theirs[dst] = info->theirs[i]; |
783 | 0 | c = lookup_commit(the_repository, &oid[info->theirs[i]]); |
784 | 0 | bitmap = ref_bitmap_at(ref_bitmap, c); |
785 | 0 | if (!*bitmap) |
786 | 0 | continue; |
787 | 0 | for (j = 0; j < bitmap_nr; j++) |
788 | 0 | if (bitmap[0][j]) { |
789 | 0 | update_refstatus(ref_status, info->ref->nr, *bitmap); |
790 | 0 | dst++; |
791 | 0 | break; |
792 | 0 | } |
793 | 0 | } |
794 | 0 | info->nr_theirs = dst; |
795 | |
|
796 | 0 | memset(&ca, 0, sizeof(ca)); |
797 | 0 | refs_head_ref(get_main_ref_store(the_repository), add_ref, &ca); |
798 | 0 | refs_for_each_ref(get_main_ref_store(the_repository), add_ref, &ca); |
799 | | |
800 | | /* Remove unreachable shallow commits from "ours" */ |
801 | 0 | for (i = dst = 0; i < info->nr_ours; i++) { |
802 | 0 | if (i != dst) |
803 | 0 | info->ours[dst] = info->ours[i]; |
804 | 0 | c = lookup_commit(the_repository, &oid[info->ours[i]]); |
805 | 0 | bitmap = ref_bitmap_at(ref_bitmap, c); |
806 | 0 | if (!*bitmap) |
807 | 0 | continue; |
808 | 0 | for (j = 0; j < bitmap_nr; j++) |
809 | 0 | if (bitmap[0][j]) { |
810 | | /* Step 7, reachability test at commit level */ |
811 | 0 | int ret = repo_in_merge_bases_many(the_repository, c, ca.nr, ca.commits, 1); |
812 | 0 | if (ret < 0) |
813 | 0 | exit(128); |
814 | 0 | if (!ret) { |
815 | 0 | update_refstatus(ref_status, info->ref->nr, *bitmap); |
816 | 0 | dst++; |
817 | 0 | break; |
818 | 0 | } |
819 | 0 | } |
820 | 0 | } |
821 | 0 | info->nr_ours = dst; |
822 | |
|
823 | 0 | free(ca.commits); |
824 | 0 | } |
825 | | |
826 | | /* (Delayed) step 7, reachability test at commit level */ |
827 | | int delayed_reachability_test(struct shallow_info *si, int c) |
828 | 0 | { |
829 | 0 | if (si->need_reachability_test[c]) { |
830 | 0 | struct commit *commit = lookup_commit(the_repository, |
831 | 0 | &si->shallow->oid[c]); |
832 | |
|
833 | 0 | if (!si->commits) { |
834 | 0 | struct commit_array ca; |
835 | |
|
836 | 0 | memset(&ca, 0, sizeof(ca)); |
837 | 0 | refs_head_ref(get_main_ref_store(the_repository), |
838 | 0 | add_ref, &ca); |
839 | 0 | refs_for_each_ref(get_main_ref_store(the_repository), |
840 | 0 | add_ref, &ca); |
841 | 0 | si->commits = ca.commits; |
842 | 0 | si->nr_commits = ca.nr; |
843 | 0 | } |
844 | |
|
845 | 0 | si->reachable[c] = repo_in_merge_bases_many(the_repository, |
846 | 0 | commit, |
847 | 0 | si->nr_commits, |
848 | 0 | si->commits, |
849 | 0 | 1); |
850 | 0 | if (si->reachable[c] < 0) |
851 | 0 | exit(128); |
852 | 0 | si->need_reachability_test[c] = 0; |
853 | 0 | } |
854 | 0 | return si->reachable[c]; |
855 | 0 | } |