Line | Count | Source (jump to first uncovered line) |
1 | | #define USE_THE_REPOSITORY_VARIABLE |
2 | | |
3 | | #include "git-compat-util.h" |
4 | | #include "gettext.h" |
5 | | #include "pack-revindex.h" |
6 | | #include "object-file.h" |
7 | | #include "object-store-ll.h" |
8 | | #include "packfile.h" |
9 | | #include "strbuf.h" |
10 | | #include "trace2.h" |
11 | | #include "parse.h" |
12 | | #include "midx.h" |
13 | | #include "csum-file.h" |
14 | | |
15 | | struct revindex_entry { |
16 | | off_t offset; |
17 | | unsigned int nr; |
18 | | }; |
19 | | |
20 | | /* |
21 | | * Pack index for existing packs give us easy access to the offsets into |
22 | | * corresponding pack file where each object's data starts, but the entries |
23 | | * do not store the size of the compressed representation (uncompressed |
24 | | * size is easily available by examining the pack entry header). It is |
25 | | * also rather expensive to find the sha1 for an object given its offset. |
26 | | * |
27 | | * The pack index file is sorted by object name mapping to offset; |
28 | | * this revindex array is a list of offset/index_nr pairs |
29 | | * ordered by offset, so if you know the offset of an object, next offset |
30 | | * is where its packed representation ends and the index_nr can be used to |
31 | | * get the object sha1 from the main index. |
32 | | */ |
33 | | |
34 | | /* |
35 | | * This is a least-significant-digit radix sort. |
36 | | * |
37 | | * It sorts each of the "n" items in "entries" by its offset field. The "max" |
38 | | * parameter must be at least as large as the largest offset in the array, |
39 | | * and lets us quit the sort early. |
40 | | */ |
41 | | static void sort_revindex(struct revindex_entry *entries, unsigned n, off_t max) |
42 | 0 | { |
43 | | /* |
44 | | * We use a "digit" size of 16 bits. That keeps our memory |
45 | | * usage reasonable, and we can generally (for a 4G or smaller |
46 | | * packfile) quit after two rounds of radix-sorting. |
47 | | */ |
48 | 0 | #define DIGIT_SIZE (16) |
49 | 0 | #define BUCKETS (1 << DIGIT_SIZE) |
50 | | /* |
51 | | * We want to know the bucket that a[i] will go into when we are using |
52 | | * the digit that is N bits from the (least significant) end. |
53 | | */ |
54 | 0 | #define BUCKET_FOR(a, i, bits) (((a)[(i)].offset >> (bits)) & (BUCKETS-1)) |
55 | | |
56 | | /* |
57 | | * We need O(n) temporary storage. Rather than do an extra copy of the |
58 | | * partial results into "entries", we sort back and forth between the |
59 | | * real array and temporary storage. In each iteration of the loop, we |
60 | | * keep track of them with alias pointers, always sorting from "from" |
61 | | * to "to". |
62 | | */ |
63 | 0 | struct revindex_entry *tmp, *from, *to; |
64 | 0 | int bits; |
65 | 0 | unsigned *pos; |
66 | |
|
67 | 0 | ALLOC_ARRAY(pos, BUCKETS); |
68 | 0 | ALLOC_ARRAY(tmp, n); |
69 | 0 | from = entries; |
70 | 0 | to = tmp; |
71 | | |
72 | | /* |
73 | | * If (max >> bits) is zero, then we know that the radix digit we are |
74 | | * on (and any higher) will be zero for all entries, and our loop will |
75 | | * be a no-op, as everybody lands in the same zero-th bucket. |
76 | | */ |
77 | 0 | for (bits = 0; max >> bits; bits += DIGIT_SIZE) { |
78 | 0 | unsigned i; |
79 | |
|
80 | 0 | memset(pos, 0, BUCKETS * sizeof(*pos)); |
81 | | |
82 | | /* |
83 | | * We want pos[i] to store the index of the last element that |
84 | | * will go in bucket "i" (actually one past the last element). |
85 | | * To do this, we first count the items that will go in each |
86 | | * bucket, which gives us a relative offset from the last |
87 | | * bucket. We can then cumulatively add the index from the |
88 | | * previous bucket to get the true index. |
89 | | */ |
90 | 0 | for (i = 0; i < n; i++) |
91 | 0 | pos[BUCKET_FOR(from, i, bits)]++; |
92 | 0 | for (i = 1; i < BUCKETS; i++) |
93 | 0 | pos[i] += pos[i-1]; |
94 | | |
95 | | /* |
96 | | * Now we can drop the elements into their correct buckets (in |
97 | | * our temporary array). We iterate the pos counter backwards |
98 | | * to avoid using an extra index to count up. And since we are |
99 | | * going backwards there, we must also go backwards through the |
100 | | * array itself, to keep the sort stable. |
101 | | * |
102 | | * Note that we use an unsigned iterator to make sure we can |
103 | | * handle 2^32-1 objects, even on a 32-bit system. But this |
104 | | * means we cannot use the more obvious "i >= 0" loop condition |
105 | | * for counting backwards, and must instead check for |
106 | | * wrap-around with UINT_MAX. |
107 | | */ |
108 | 0 | for (i = n - 1; i != UINT_MAX; i--) |
109 | 0 | to[--pos[BUCKET_FOR(from, i, bits)]] = from[i]; |
110 | | |
111 | | /* |
112 | | * Now "to" contains the most sorted list, so we swap "from" and |
113 | | * "to" for the next iteration. |
114 | | */ |
115 | 0 | SWAP(from, to); |
116 | 0 | } |
117 | | |
118 | | /* |
119 | | * If we ended with our data in the original array, great. If not, |
120 | | * we have to move it back from the temporary storage. |
121 | | */ |
122 | 0 | if (from != entries) |
123 | 0 | COPY_ARRAY(entries, tmp, n); |
124 | 0 | free(tmp); |
125 | 0 | free(pos); |
126 | |
|
127 | 0 | #undef BUCKET_FOR |
128 | 0 | #undef BUCKETS |
129 | 0 | #undef DIGIT_SIZE |
130 | 0 | } |
131 | | |
132 | | /* |
133 | | * Ordered list of offsets of objects in the pack. |
134 | | */ |
135 | | static void create_pack_revindex(struct packed_git *p) |
136 | 0 | { |
137 | 0 | const unsigned num_ent = p->num_objects; |
138 | 0 | unsigned i; |
139 | 0 | const char *index = p->index_data; |
140 | 0 | const unsigned hashsz = the_hash_algo->rawsz; |
141 | |
|
142 | 0 | ALLOC_ARRAY(p->revindex, num_ent + 1); |
143 | 0 | index += 4 * 256; |
144 | |
|
145 | 0 | if (p->index_version > 1) { |
146 | 0 | const uint32_t *off_32 = |
147 | 0 | (uint32_t *)(index + 8 + (size_t)p->num_objects * (hashsz + 4)); |
148 | 0 | const uint32_t *off_64 = off_32 + p->num_objects; |
149 | 0 | for (i = 0; i < num_ent; i++) { |
150 | 0 | const uint32_t off = ntohl(*off_32++); |
151 | 0 | if (!(off & 0x80000000)) { |
152 | 0 | p->revindex[i].offset = off; |
153 | 0 | } else { |
154 | 0 | p->revindex[i].offset = get_be64(off_64); |
155 | 0 | off_64 += 2; |
156 | 0 | } |
157 | 0 | p->revindex[i].nr = i; |
158 | 0 | } |
159 | 0 | } else { |
160 | 0 | for (i = 0; i < num_ent; i++) { |
161 | 0 | const uint32_t hl = *((uint32_t *)(index + (hashsz + 4) * i)); |
162 | 0 | p->revindex[i].offset = ntohl(hl); |
163 | 0 | p->revindex[i].nr = i; |
164 | 0 | } |
165 | 0 | } |
166 | | |
167 | | /* |
168 | | * This knows the pack format -- the hash trailer |
169 | | * follows immediately after the last object data. |
170 | | */ |
171 | 0 | p->revindex[num_ent].offset = p->pack_size - hashsz; |
172 | 0 | p->revindex[num_ent].nr = -1; |
173 | 0 | sort_revindex(p->revindex, num_ent, p->pack_size); |
174 | 0 | } |
175 | | |
176 | | static int create_pack_revindex_in_memory(struct packed_git *p) |
177 | 0 | { |
178 | 0 | if (git_env_bool(GIT_TEST_REV_INDEX_DIE_IN_MEMORY, 0)) |
179 | 0 | die("dying as requested by '%s'", |
180 | 0 | GIT_TEST_REV_INDEX_DIE_IN_MEMORY); |
181 | 0 | if (open_pack_index(p)) |
182 | 0 | return -1; |
183 | 0 | create_pack_revindex(p); |
184 | 0 | return 0; |
185 | 0 | } |
186 | | |
187 | | static char *pack_revindex_filename(struct packed_git *p) |
188 | 0 | { |
189 | 0 | size_t len; |
190 | 0 | if (!strip_suffix(p->pack_name, ".pack", &len)) |
191 | 0 | BUG("pack_name does not end in .pack"); |
192 | 0 | return xstrfmt("%.*s.rev", (int)len, p->pack_name); |
193 | 0 | } |
194 | | |
195 | 0 | #define RIDX_HEADER_SIZE (12) |
196 | 0 | #define RIDX_MIN_SIZE (RIDX_HEADER_SIZE + (2 * the_hash_algo->rawsz)) |
197 | | |
198 | | struct revindex_header { |
199 | | uint32_t signature; |
200 | | uint32_t version; |
201 | | uint32_t hash_id; |
202 | | }; |
203 | | |
204 | | static int load_revindex_from_disk(char *revindex_name, |
205 | | uint32_t num_objects, |
206 | | const uint32_t **data_p, size_t *len_p) |
207 | 0 | { |
208 | 0 | int fd, ret = 0; |
209 | 0 | struct stat st; |
210 | 0 | void *data = NULL; |
211 | 0 | size_t revindex_size; |
212 | 0 | struct revindex_header *hdr; |
213 | |
|
214 | 0 | if (git_env_bool(GIT_TEST_REV_INDEX_DIE_ON_DISK, 0)) |
215 | 0 | die("dying as requested by '%s'", GIT_TEST_REV_INDEX_DIE_ON_DISK); |
216 | | |
217 | 0 | fd = git_open(revindex_name); |
218 | |
|
219 | 0 | if (fd < 0) { |
220 | | /* "No file" means return 1. */ |
221 | 0 | ret = 1; |
222 | 0 | goto cleanup; |
223 | 0 | } |
224 | 0 | if (fstat(fd, &st)) { |
225 | 0 | ret = error_errno(_("failed to read %s"), revindex_name); |
226 | 0 | goto cleanup; |
227 | 0 | } |
228 | | |
229 | 0 | revindex_size = xsize_t(st.st_size); |
230 | |
|
231 | 0 | if (revindex_size < RIDX_MIN_SIZE) { |
232 | 0 | ret = error(_("reverse-index file %s is too small"), revindex_name); |
233 | 0 | goto cleanup; |
234 | 0 | } |
235 | | |
236 | 0 | if (revindex_size - RIDX_MIN_SIZE != st_mult(sizeof(uint32_t), num_objects)) { |
237 | 0 | ret = error(_("reverse-index file %s is corrupt"), revindex_name); |
238 | 0 | goto cleanup; |
239 | 0 | } |
240 | | |
241 | 0 | data = xmmap(NULL, revindex_size, PROT_READ, MAP_PRIVATE, fd, 0); |
242 | 0 | hdr = data; |
243 | |
|
244 | 0 | if (ntohl(hdr->signature) != RIDX_SIGNATURE) { |
245 | 0 | ret = error(_("reverse-index file %s has unknown signature"), revindex_name); |
246 | 0 | goto cleanup; |
247 | 0 | } |
248 | 0 | if (ntohl(hdr->version) != 1) { |
249 | 0 | ret = error(_("reverse-index file %s has unsupported version %"PRIu32), |
250 | 0 | revindex_name, ntohl(hdr->version)); |
251 | 0 | goto cleanup; |
252 | 0 | } |
253 | 0 | if (!(ntohl(hdr->hash_id) == 1 || ntohl(hdr->hash_id) == 2)) { |
254 | 0 | ret = error(_("reverse-index file %s has unsupported hash id %"PRIu32), |
255 | 0 | revindex_name, ntohl(hdr->hash_id)); |
256 | 0 | goto cleanup; |
257 | 0 | } |
258 | | |
259 | 0 | cleanup: |
260 | 0 | if (ret) { |
261 | 0 | if (data) |
262 | 0 | munmap(data, revindex_size); |
263 | 0 | } else { |
264 | 0 | *len_p = revindex_size; |
265 | 0 | *data_p = (const uint32_t *)data; |
266 | 0 | } |
267 | |
|
268 | 0 | if (fd >= 0) |
269 | 0 | close(fd); |
270 | 0 | return ret; |
271 | 0 | } |
272 | | |
273 | | int load_pack_revindex_from_disk(struct packed_git *p) |
274 | 0 | { |
275 | 0 | char *revindex_name; |
276 | 0 | int ret; |
277 | 0 | if (open_pack_index(p)) |
278 | 0 | return -1; |
279 | | |
280 | 0 | revindex_name = pack_revindex_filename(p); |
281 | |
|
282 | 0 | ret = load_revindex_from_disk(revindex_name, |
283 | 0 | p->num_objects, |
284 | 0 | &p->revindex_map, |
285 | 0 | &p->revindex_size); |
286 | 0 | if (ret) |
287 | 0 | goto cleanup; |
288 | | |
289 | 0 | p->revindex_data = (const uint32_t *)((const char *)p->revindex_map + RIDX_HEADER_SIZE); |
290 | |
|
291 | 0 | cleanup: |
292 | 0 | free(revindex_name); |
293 | 0 | return ret; |
294 | 0 | } |
295 | | |
296 | | int load_pack_revindex(struct repository *r, struct packed_git *p) |
297 | 0 | { |
298 | 0 | if (p->revindex || p->revindex_data) |
299 | 0 | return 0; |
300 | | |
301 | 0 | prepare_repo_settings(r); |
302 | |
|
303 | 0 | if (r->settings.pack_read_reverse_index && |
304 | 0 | !load_pack_revindex_from_disk(p)) |
305 | 0 | return 0; |
306 | 0 | else if (!create_pack_revindex_in_memory(p)) |
307 | 0 | return 0; |
308 | 0 | return -1; |
309 | 0 | } |
310 | | |
311 | | /* |
312 | | * verify_pack_revindex verifies that the on-disk rev-index for the given |
313 | | * pack-file is the same that would be created if written from scratch. |
314 | | * |
315 | | * A negative number is returned on error. |
316 | | */ |
317 | | int verify_pack_revindex(struct packed_git *p) |
318 | 0 | { |
319 | 0 | int res = 0; |
320 | | |
321 | | /* Do not bother checking if not initialized. */ |
322 | 0 | if (!p->revindex_map || !p->revindex_data) |
323 | 0 | return res; |
324 | | |
325 | 0 | if (!hashfile_checksum_valid((const unsigned char *)p->revindex_map, p->revindex_size)) { |
326 | 0 | error(_("invalid checksum")); |
327 | 0 | res = -1; |
328 | 0 | } |
329 | | |
330 | | /* This may fail due to a broken .idx. */ |
331 | 0 | if (create_pack_revindex_in_memory(p)) |
332 | 0 | return res; |
333 | | |
334 | 0 | for (size_t i = 0; i < p->num_objects; i++) { |
335 | 0 | uint32_t nr = p->revindex[i].nr; |
336 | 0 | uint32_t rev_val = get_be32(p->revindex_data + i); |
337 | |
|
338 | 0 | if (nr != rev_val) { |
339 | 0 | error(_("invalid rev-index position at %"PRIu64": %"PRIu32" != %"PRIu32""), |
340 | 0 | (uint64_t)i, nr, rev_val); |
341 | 0 | res = -1; |
342 | 0 | } |
343 | 0 | } |
344 | |
|
345 | 0 | return res; |
346 | 0 | } |
347 | | |
348 | | static int can_use_midx_ridx_chunk(struct multi_pack_index *m) |
349 | 0 | { |
350 | 0 | if (!m->chunk_revindex) |
351 | 0 | return 0; |
352 | 0 | if (m->chunk_revindex_len != st_mult(sizeof(uint32_t), m->num_objects)) { |
353 | 0 | error(_("multi-pack-index reverse-index chunk is the wrong size")); |
354 | 0 | return 0; |
355 | 0 | } |
356 | 0 | return 1; |
357 | 0 | } |
358 | | |
359 | | int load_midx_revindex(struct multi_pack_index *m) |
360 | 0 | { |
361 | 0 | struct strbuf revindex_name = STRBUF_INIT; |
362 | 0 | int ret; |
363 | |
|
364 | 0 | if (m->revindex_data) |
365 | 0 | return 0; |
366 | | |
367 | 0 | if (can_use_midx_ridx_chunk(m)) { |
368 | | /* |
369 | | * If the MIDX `m` has a `RIDX` chunk, then use its contents for |
370 | | * the reverse index instead of trying to load a separate `.rev` |
371 | | * file. |
372 | | * |
373 | | * Note that we do *not* set `m->revindex_map` here, since we do |
374 | | * not want to accidentally call munmap() in the middle of the |
375 | | * MIDX. |
376 | | */ |
377 | 0 | trace2_data_string("load_midx_revindex", the_repository, |
378 | 0 | "source", "midx"); |
379 | 0 | m->revindex_data = (const uint32_t *)m->chunk_revindex; |
380 | 0 | return 0; |
381 | 0 | } |
382 | | |
383 | 0 | trace2_data_string("load_midx_revindex", the_repository, |
384 | 0 | "source", "rev"); |
385 | |
|
386 | 0 | get_midx_filename_ext(&revindex_name, m->object_dir, |
387 | 0 | get_midx_checksum(m), MIDX_EXT_REV); |
388 | |
|
389 | 0 | ret = load_revindex_from_disk(revindex_name.buf, |
390 | 0 | m->num_objects, |
391 | 0 | &m->revindex_map, |
392 | 0 | &m->revindex_len); |
393 | 0 | if (ret) |
394 | 0 | goto cleanup; |
395 | | |
396 | 0 | m->revindex_data = (const uint32_t *)((const char *)m->revindex_map + RIDX_HEADER_SIZE); |
397 | |
|
398 | 0 | cleanup: |
399 | 0 | strbuf_release(&revindex_name); |
400 | 0 | return ret; |
401 | 0 | } |
402 | | |
403 | | int close_midx_revindex(struct multi_pack_index *m) |
404 | 0 | { |
405 | 0 | if (!m || !m->revindex_map) |
406 | 0 | return 0; |
407 | | |
408 | 0 | munmap((void*)m->revindex_map, m->revindex_len); |
409 | |
|
410 | 0 | m->revindex_map = NULL; |
411 | 0 | m->revindex_data = NULL; |
412 | 0 | m->revindex_len = 0; |
413 | |
|
414 | 0 | return 0; |
415 | 0 | } |
416 | | |
417 | | int offset_to_pack_pos(struct packed_git *p, off_t ofs, uint32_t *pos) |
418 | 0 | { |
419 | 0 | unsigned lo, hi; |
420 | |
|
421 | 0 | if (load_pack_revindex(the_repository, p) < 0) |
422 | 0 | return -1; |
423 | | |
424 | 0 | lo = 0; |
425 | 0 | hi = p->num_objects + 1; |
426 | |
|
427 | 0 | do { |
428 | 0 | const unsigned mi = lo + (hi - lo) / 2; |
429 | 0 | off_t got = pack_pos_to_offset(p, mi); |
430 | |
|
431 | 0 | if (got == ofs) { |
432 | 0 | *pos = mi; |
433 | 0 | return 0; |
434 | 0 | } else if (ofs < got) |
435 | 0 | hi = mi; |
436 | 0 | else |
437 | 0 | lo = mi + 1; |
438 | 0 | } while (lo < hi); |
439 | | |
440 | 0 | error("bad offset for revindex"); |
441 | 0 | return -1; |
442 | 0 | } |
443 | | |
444 | | uint32_t pack_pos_to_index(struct packed_git *p, uint32_t pos) |
445 | 0 | { |
446 | 0 | if (!(p->revindex || p->revindex_data)) |
447 | 0 | BUG("pack_pos_to_index: reverse index not yet loaded"); |
448 | 0 | if (p->num_objects <= pos) |
449 | 0 | BUG("pack_pos_to_index: out-of-bounds object at %"PRIu32, pos); |
450 | | |
451 | 0 | if (p->revindex) |
452 | 0 | return p->revindex[pos].nr; |
453 | 0 | else |
454 | 0 | return get_be32(p->revindex_data + pos); |
455 | 0 | } |
456 | | |
457 | | off_t pack_pos_to_offset(struct packed_git *p, uint32_t pos) |
458 | 0 | { |
459 | 0 | if (!(p->revindex || p->revindex_data)) |
460 | 0 | BUG("pack_pos_to_index: reverse index not yet loaded"); |
461 | 0 | if (p->num_objects < pos) |
462 | 0 | BUG("pack_pos_to_offset: out-of-bounds object at %"PRIu32, pos); |
463 | | |
464 | 0 | if (p->revindex) |
465 | 0 | return p->revindex[pos].offset; |
466 | 0 | else if (pos == p->num_objects) |
467 | 0 | return p->pack_size - the_hash_algo->rawsz; |
468 | 0 | else |
469 | 0 | return nth_packed_object_offset(p, pack_pos_to_index(p, pos)); |
470 | 0 | } |
471 | | |
472 | | uint32_t pack_pos_to_midx(struct multi_pack_index *m, uint32_t pos) |
473 | 0 | { |
474 | 0 | if (!m->revindex_data) |
475 | 0 | BUG("pack_pos_to_midx: reverse index not yet loaded"); |
476 | 0 | if (m->num_objects <= pos) |
477 | 0 | BUG("pack_pos_to_midx: out-of-bounds object at %"PRIu32, pos); |
478 | 0 | return get_be32(m->revindex_data + pos); |
479 | 0 | } |
480 | | |
481 | | struct midx_pack_key { |
482 | | uint32_t pack; |
483 | | off_t offset; |
484 | | |
485 | | uint32_t preferred_pack; |
486 | | struct multi_pack_index *midx; |
487 | | }; |
488 | | |
489 | | static int midx_pack_order_cmp(const void *va, const void *vb) |
490 | 0 | { |
491 | 0 | const struct midx_pack_key *key = va; |
492 | 0 | struct multi_pack_index *midx = key->midx; |
493 | |
|
494 | 0 | uint32_t versus = pack_pos_to_midx(midx, (uint32_t*)vb - (const uint32_t *)midx->revindex_data); |
495 | 0 | uint32_t versus_pack = nth_midxed_pack_int_id(midx, versus); |
496 | 0 | off_t versus_offset; |
497 | |
|
498 | 0 | uint32_t key_preferred = key->pack == key->preferred_pack; |
499 | 0 | uint32_t versus_preferred = versus_pack == key->preferred_pack; |
500 | | |
501 | | /* |
502 | | * First, compare the preferred-ness, noting that the preferred pack |
503 | | * comes first. |
504 | | */ |
505 | 0 | if (key_preferred && !versus_preferred) |
506 | 0 | return -1; |
507 | 0 | else if (!key_preferred && versus_preferred) |
508 | 0 | return 1; |
509 | | |
510 | | /* Then, break ties first by comparing the pack IDs. */ |
511 | 0 | if (key->pack < versus_pack) |
512 | 0 | return -1; |
513 | 0 | else if (key->pack > versus_pack) |
514 | 0 | return 1; |
515 | | |
516 | | /* Finally, break ties by comparing offsets within a pack. */ |
517 | 0 | versus_offset = nth_midxed_offset(midx, versus); |
518 | 0 | if (key->offset < versus_offset) |
519 | 0 | return -1; |
520 | 0 | else if (key->offset > versus_offset) |
521 | 0 | return 1; |
522 | | |
523 | 0 | return 0; |
524 | 0 | } |
525 | | |
526 | | static int midx_key_to_pack_pos(struct multi_pack_index *m, |
527 | | struct midx_pack_key *key, |
528 | | uint32_t *pos) |
529 | 0 | { |
530 | 0 | uint32_t *found; |
531 | |
|
532 | 0 | if (key->pack >= m->num_packs) |
533 | 0 | BUG("MIDX pack lookup out of bounds (%"PRIu32" >= %"PRIu32")", |
534 | 0 | key->pack, m->num_packs); |
535 | | /* |
536 | | * The preferred pack sorts first, so determine its identifier by |
537 | | * looking at the first object in pseudo-pack order. |
538 | | * |
539 | | * Note that if no --preferred-pack is explicitly given when writing a |
540 | | * multi-pack index, then whichever pack has the lowest identifier |
541 | | * implicitly is preferred (and includes all its objects, since ties are |
542 | | * broken first by pack identifier). |
543 | | */ |
544 | 0 | if (midx_preferred_pack(key->midx, &key->preferred_pack) < 0) |
545 | 0 | return error(_("could not determine preferred pack")); |
546 | | |
547 | 0 | found = bsearch(key, m->revindex_data, m->num_objects, |
548 | 0 | sizeof(*m->revindex_data), |
549 | 0 | midx_pack_order_cmp); |
550 | |
|
551 | 0 | if (!found) |
552 | 0 | return -1; |
553 | | |
554 | 0 | *pos = found - m->revindex_data; |
555 | 0 | return 0; |
556 | 0 | } |
557 | | |
558 | | int midx_to_pack_pos(struct multi_pack_index *m, uint32_t at, uint32_t *pos) |
559 | 0 | { |
560 | 0 | struct midx_pack_key key; |
561 | |
|
562 | 0 | if (!m->revindex_data) |
563 | 0 | BUG("midx_to_pack_pos: reverse index not yet loaded"); |
564 | 0 | if (m->num_objects <= at) |
565 | 0 | BUG("midx_to_pack_pos: out-of-bounds object at %"PRIu32, at); |
566 | | |
567 | 0 | key.pack = nth_midxed_pack_int_id(m, at); |
568 | 0 | key.offset = nth_midxed_offset(m, at); |
569 | 0 | key.midx = m; |
570 | |
|
571 | 0 | return midx_key_to_pack_pos(m, &key, pos); |
572 | 0 | } |
573 | | |
574 | | int midx_pair_to_pack_pos(struct multi_pack_index *m, uint32_t pack_int_id, |
575 | | off_t ofs, uint32_t *pos) |
576 | 0 | { |
577 | 0 | struct midx_pack_key key = { |
578 | 0 | .pack = pack_int_id, |
579 | 0 | .offset = ofs, |
580 | 0 | .midx = m, |
581 | 0 | }; |
582 | 0 | return midx_key_to_pack_pos(m, &key, pos); |
583 | 0 | } |