/src/git/reftable/stack.c
Line | Count | Source |
1 | | /* |
2 | | * Copyright 2020 Google LLC |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style |
5 | | * license that can be found in the LICENSE file or at |
6 | | * https://developers.google.com/open-source/licenses/bsd |
7 | | */ |
8 | | |
9 | | #include "stack.h" |
10 | | |
11 | | #include "system.h" |
12 | | #include "constants.h" |
13 | | #include "merged.h" |
14 | | #include "reftable-error.h" |
15 | | #include "reftable-record.h" |
16 | | #include "reftable-merged.h" |
17 | | #include "table.h" |
18 | | #include "writer.h" |
19 | | |
20 | | static int stack_filename(struct reftable_buf *dest, struct reftable_stack *st, |
21 | | const char *name) |
22 | 0 | { |
23 | 0 | int err; |
24 | 0 | reftable_buf_reset(dest); |
25 | 0 | if ((err = reftable_buf_addstr(dest, st->reftable_dir)) < 0 || |
26 | 0 | (err = reftable_buf_addstr(dest, "/")) < 0 || |
27 | 0 | (err = reftable_buf_addstr(dest, name)) < 0) |
28 | 0 | return err; |
29 | 0 | return 0; |
30 | 0 | } |
31 | | |
32 | | static int stack_fsync(const struct reftable_write_options *opts, int fd) |
33 | 0 | { |
34 | 0 | if (opts->fsync) |
35 | 0 | return opts->fsync(fd); |
36 | 0 | return fsync(fd); |
37 | 0 | } |
38 | | |
39 | | static ssize_t reftable_write_data(int fd, const void *data, size_t size) |
40 | 0 | { |
41 | 0 | size_t total_written = 0; |
42 | 0 | const char *p = data; |
43 | |
|
44 | 0 | while (total_written < size) { |
45 | 0 | ssize_t bytes_written = write(fd, p, size - total_written); |
46 | 0 | if (bytes_written < 0 && (errno == EAGAIN || errno == EINTR)) |
47 | 0 | continue; |
48 | 0 | if (bytes_written < 0) |
49 | 0 | return REFTABLE_IO_ERROR; |
50 | | |
51 | 0 | total_written += bytes_written; |
52 | 0 | p += bytes_written; |
53 | 0 | } |
54 | | |
55 | 0 | return total_written; |
56 | 0 | } |
57 | | |
58 | | struct fd_writer { |
59 | | const struct reftable_write_options *opts; |
60 | | int fd; |
61 | | }; |
62 | | |
63 | | static ssize_t fd_writer_write(void *arg, const void *data, size_t sz) |
64 | 0 | { |
65 | 0 | struct fd_writer *writer = arg; |
66 | 0 | return reftable_write_data(writer->fd, data, sz); |
67 | 0 | } |
68 | | |
69 | | static int fd_writer_flush(void *arg) |
70 | 0 | { |
71 | 0 | struct fd_writer *writer = arg; |
72 | 0 | return stack_fsync(writer->opts, writer->fd); |
73 | 0 | } |
74 | | |
75 | | static int fd_read_lines(int fd, char ***namesp) |
76 | 0 | { |
77 | 0 | char *buf = NULL; |
78 | 0 | int err = 0; |
79 | 0 | off_t size; |
80 | |
|
81 | 0 | size = lseek(fd, 0, SEEK_END); |
82 | 0 | if (size < 0) { |
83 | 0 | err = REFTABLE_IO_ERROR; |
84 | 0 | goto done; |
85 | 0 | } |
86 | | |
87 | 0 | err = lseek(fd, 0, SEEK_SET); |
88 | 0 | if (err < 0) { |
89 | 0 | err = REFTABLE_IO_ERROR; |
90 | 0 | goto done; |
91 | 0 | } |
92 | | |
93 | 0 | REFTABLE_ALLOC_ARRAY(buf, size + 1); |
94 | 0 | if (!buf) { |
95 | 0 | err = REFTABLE_OUT_OF_MEMORY_ERROR; |
96 | 0 | goto done; |
97 | 0 | } |
98 | | |
99 | 0 | for (off_t total_read = 0; total_read < size; ) { |
100 | 0 | ssize_t bytes_read = read(fd, buf + total_read, size - total_read); |
101 | 0 | if (bytes_read < 0 && (errno == EAGAIN || errno == EINTR)) |
102 | 0 | continue; |
103 | 0 | if (bytes_read < 0 || !bytes_read) { |
104 | 0 | err = REFTABLE_IO_ERROR; |
105 | 0 | goto done; |
106 | 0 | } |
107 | | |
108 | 0 | total_read += bytes_read; |
109 | 0 | } |
110 | 0 | buf[size] = 0; |
111 | |
|
112 | 0 | err = parse_names(buf, size, namesp); |
113 | 0 | done: |
114 | 0 | reftable_free(buf); |
115 | 0 | return err; |
116 | 0 | } |
117 | | |
118 | | int read_lines(const char *filename, char ***namesp) |
119 | 0 | { |
120 | 0 | int fd = open(filename, O_RDONLY); |
121 | 0 | int err = 0; |
122 | 0 | if (fd < 0) { |
123 | 0 | if (errno == ENOENT) { |
124 | 0 | REFTABLE_CALLOC_ARRAY(*namesp, 1); |
125 | 0 | if (!*namesp) |
126 | 0 | return REFTABLE_OUT_OF_MEMORY_ERROR; |
127 | 0 | return 0; |
128 | 0 | } |
129 | | |
130 | 0 | return REFTABLE_IO_ERROR; |
131 | 0 | } |
132 | 0 | err = fd_read_lines(fd, namesp); |
133 | 0 | close(fd); |
134 | 0 | return err; |
135 | 0 | } |
136 | | |
137 | | int reftable_stack_init_ref_iterator(struct reftable_stack *st, |
138 | | struct reftable_iterator *it) |
139 | 0 | { |
140 | 0 | return merged_table_init_iter(reftable_stack_merged_table(st), |
141 | 0 | it, REFTABLE_BLOCK_TYPE_REF); |
142 | 0 | } |
143 | | |
144 | | int reftable_stack_init_log_iterator(struct reftable_stack *st, |
145 | | struct reftable_iterator *it) |
146 | 0 | { |
147 | 0 | return merged_table_init_iter(reftable_stack_merged_table(st), |
148 | 0 | it, REFTABLE_BLOCK_TYPE_LOG); |
149 | 0 | } |
150 | | |
151 | | struct reftable_merged_table * |
152 | | reftable_stack_merged_table(struct reftable_stack *st) |
153 | 0 | { |
154 | 0 | return st->merged; |
155 | 0 | } |
156 | | |
157 | | static int has_name(char **names, const char *name) |
158 | 0 | { |
159 | 0 | while (*names) { |
160 | 0 | if (!strcmp(*names, name)) |
161 | 0 | return 1; |
162 | 0 | names++; |
163 | 0 | } |
164 | 0 | return 0; |
165 | 0 | } |
166 | | |
167 | | /* Close and free the stack */ |
168 | | void reftable_stack_destroy(struct reftable_stack *st) |
169 | 0 | { |
170 | 0 | char **names = NULL; |
171 | 0 | int err = 0; |
172 | |
|
173 | 0 | if (!st) |
174 | 0 | return; |
175 | | |
176 | 0 | if (st->merged) { |
177 | 0 | reftable_merged_table_free(st->merged); |
178 | 0 | st->merged = NULL; |
179 | 0 | } |
180 | |
|
181 | 0 | err = read_lines(st->list_file, &names); |
182 | 0 | if (err < 0) { |
183 | 0 | REFTABLE_FREE_AND_NULL(names); |
184 | 0 | } |
185 | |
|
186 | 0 | if (st->tables) { |
187 | 0 | struct reftable_buf filename = REFTABLE_BUF_INIT; |
188 | |
|
189 | 0 | for (size_t i = 0; i < st->tables_len; i++) { |
190 | 0 | const char *name = reftable_table_name(st->tables[i]); |
191 | 0 | int try_unlinking = 1; |
192 | |
|
193 | 0 | reftable_buf_reset(&filename); |
194 | 0 | if (names && !has_name(names, name)) { |
195 | 0 | if (stack_filename(&filename, st, name) < 0) |
196 | 0 | try_unlinking = 0; |
197 | 0 | } |
198 | 0 | reftable_table_decref(st->tables[i]); |
199 | |
|
200 | 0 | if (try_unlinking && filename.len) { |
201 | | /* On Windows, can only unlink after closing. */ |
202 | 0 | unlink(filename.buf); |
203 | 0 | } |
204 | 0 | } |
205 | |
|
206 | 0 | reftable_buf_release(&filename); |
207 | 0 | st->tables_len = 0; |
208 | 0 | REFTABLE_FREE_AND_NULL(st->tables); |
209 | 0 | } |
210 | |
|
211 | 0 | if (st->list_fd >= 0) { |
212 | 0 | close(st->list_fd); |
213 | 0 | st->list_fd = -1; |
214 | 0 | } |
215 | |
|
216 | 0 | REFTABLE_FREE_AND_NULL(st->list_file); |
217 | 0 | REFTABLE_FREE_AND_NULL(st->reftable_dir); |
218 | 0 | reftable_free(st); |
219 | 0 | free_names(names); |
220 | 0 | } |
221 | | |
222 | | static struct reftable_table **stack_copy_tables(struct reftable_stack *st, |
223 | | size_t cur_len) |
224 | 0 | { |
225 | 0 | struct reftable_table **cur = reftable_calloc(cur_len, sizeof(*cur)); |
226 | 0 | if (!cur) |
227 | 0 | return NULL; |
228 | 0 | for (size_t i = 0; i < cur_len; i++) |
229 | 0 | cur[i] = st->tables[i]; |
230 | 0 | return cur; |
231 | 0 | } |
232 | | |
233 | | static int reftable_stack_reload_once(struct reftable_stack *st, |
234 | | const char **names, |
235 | | int reuse_open) |
236 | 0 | { |
237 | 0 | size_t cur_len = !st->merged ? 0 : st->merged->tables_len; |
238 | 0 | struct reftable_table **cur = NULL; |
239 | 0 | struct reftable_table **reused = NULL; |
240 | 0 | struct reftable_table **new_tables = NULL; |
241 | 0 | size_t reused_len = 0, reused_alloc = 0, names_len; |
242 | 0 | size_t new_tables_len = 0; |
243 | 0 | struct reftable_merged_table *new_merged = NULL; |
244 | 0 | struct reftable_buf table_path = REFTABLE_BUF_INIT; |
245 | 0 | int err = 0; |
246 | 0 | size_t i; |
247 | |
|
248 | 0 | if (cur_len) { |
249 | 0 | cur = stack_copy_tables(st, cur_len); |
250 | 0 | if (!cur) { |
251 | 0 | err = REFTABLE_OUT_OF_MEMORY_ERROR; |
252 | 0 | goto done; |
253 | 0 | } |
254 | 0 | } |
255 | | |
256 | 0 | names_len = names_length(names); |
257 | |
|
258 | 0 | if (names_len) { |
259 | 0 | new_tables = reftable_calloc(names_len, sizeof(*new_tables)); |
260 | 0 | if (!new_tables) { |
261 | 0 | err = REFTABLE_OUT_OF_MEMORY_ERROR; |
262 | 0 | goto done; |
263 | 0 | } |
264 | 0 | } |
265 | | |
266 | 0 | while (*names) { |
267 | 0 | struct reftable_table *table = NULL; |
268 | 0 | const char *name = *names++; |
269 | | |
270 | | /* this is linear; we assume compaction keeps the number of |
271 | | tables under control so this is not quadratic. */ |
272 | 0 | for (i = 0; reuse_open && i < cur_len; i++) { |
273 | 0 | if (cur[i] && 0 == strcmp(cur[i]->name, name)) { |
274 | 0 | table = cur[i]; |
275 | 0 | cur[i] = NULL; |
276 | | |
277 | | /* |
278 | | * When reloading the stack fails, we end up |
279 | | * releasing all new tables. This also |
280 | | * includes the reused tables, even though |
281 | | * they are still in used by the old stack. We |
282 | | * thus need to keep them alive here, which we |
283 | | * do by bumping their refcount. |
284 | | */ |
285 | 0 | REFTABLE_ALLOC_GROW_OR_NULL(reused, |
286 | 0 | reused_len + 1, |
287 | 0 | reused_alloc); |
288 | 0 | if (!reused) { |
289 | 0 | err = REFTABLE_OUT_OF_MEMORY_ERROR; |
290 | 0 | goto done; |
291 | 0 | } |
292 | 0 | reused[reused_len++] = table; |
293 | 0 | reftable_table_incref(table); |
294 | 0 | break; |
295 | 0 | } |
296 | 0 | } |
297 | | |
298 | 0 | if (!table) { |
299 | 0 | struct reftable_block_source src = { NULL }; |
300 | |
|
301 | 0 | err = stack_filename(&table_path, st, name); |
302 | 0 | if (err < 0) |
303 | 0 | goto done; |
304 | | |
305 | 0 | err = reftable_block_source_from_file(&src, |
306 | 0 | table_path.buf); |
307 | 0 | if (err < 0) |
308 | 0 | goto done; |
309 | | |
310 | 0 | err = reftable_table_new(&table, &src, name); |
311 | 0 | if (err < 0) |
312 | 0 | goto done; |
313 | 0 | } |
314 | | |
315 | 0 | new_tables[new_tables_len] = table; |
316 | 0 | new_tables_len++; |
317 | 0 | } |
318 | | |
319 | | /* success! */ |
320 | 0 | err = reftable_merged_table_new(&new_merged, new_tables, |
321 | 0 | new_tables_len, st->opts.hash_id); |
322 | 0 | if (err < 0) |
323 | 0 | goto done; |
324 | | |
325 | | /* |
326 | | * Close the old, non-reused tables and proactively try to unlink |
327 | | * them. This is done for systems like Windows, where the underlying |
328 | | * file of such an open table wouldn't have been possible to be |
329 | | * unlinked by the compacting process. |
330 | | */ |
331 | 0 | for (i = 0; i < cur_len; i++) { |
332 | 0 | if (cur[i]) { |
333 | 0 | const char *name = reftable_table_name(cur[i]); |
334 | |
|
335 | 0 | err = stack_filename(&table_path, st, name); |
336 | 0 | if (err < 0) |
337 | 0 | goto done; |
338 | | |
339 | 0 | reftable_table_decref(cur[i]); |
340 | 0 | unlink(table_path.buf); |
341 | 0 | } |
342 | 0 | } |
343 | | |
344 | | /* Update the stack to point to the new tables. */ |
345 | 0 | if (st->merged) |
346 | 0 | reftable_merged_table_free(st->merged); |
347 | 0 | new_merged->suppress_deletions = 1; |
348 | 0 | st->merged = new_merged; |
349 | |
|
350 | 0 | if (st->tables) |
351 | 0 | reftable_free(st->tables); |
352 | 0 | st->tables = new_tables; |
353 | 0 | st->tables_len = new_tables_len; |
354 | 0 | new_tables = NULL; |
355 | 0 | new_tables_len = 0; |
356 | | |
357 | | /* |
358 | | * Decrement the refcount of reused tables again. This only needs to |
359 | | * happen on the successful case, because on the unsuccessful one we |
360 | | * decrement their refcount via `new_tables`. |
361 | | */ |
362 | 0 | for (i = 0; i < reused_len; i++) |
363 | 0 | reftable_table_decref(reused[i]); |
364 | |
|
365 | 0 | done: |
366 | 0 | for (i = 0; i < new_tables_len; i++) |
367 | 0 | reftable_table_decref(new_tables[i]); |
368 | 0 | reftable_free(new_tables); |
369 | 0 | reftable_free(reused); |
370 | 0 | reftable_free(cur); |
371 | 0 | reftable_buf_release(&table_path); |
372 | 0 | return err; |
373 | 0 | } |
374 | | |
375 | | /* return negative if a before b. */ |
376 | | static int tv_cmp(struct timeval *a, struct timeval *b) |
377 | 0 | { |
378 | 0 | time_t diff = a->tv_sec - b->tv_sec; |
379 | 0 | int udiff = a->tv_usec - b->tv_usec; |
380 | |
|
381 | 0 | if (diff != 0) |
382 | 0 | return diff; |
383 | | |
384 | 0 | return udiff; |
385 | 0 | } |
386 | | |
387 | | static int reftable_stack_reload_maybe_reuse(struct reftable_stack *st, |
388 | | int reuse_open) |
389 | 0 | { |
390 | 0 | char **names = NULL, **names_after = NULL; |
391 | 0 | struct timeval deadline; |
392 | 0 | int64_t delay = 0; |
393 | 0 | int tries = 0, err; |
394 | 0 | int fd = -1; |
395 | |
|
396 | 0 | err = gettimeofday(&deadline, NULL); |
397 | 0 | if (err < 0) |
398 | 0 | goto out; |
399 | 0 | deadline.tv_sec += 3; |
400 | |
|
401 | 0 | while (1) { |
402 | 0 | struct timeval now; |
403 | |
|
404 | 0 | err = gettimeofday(&now, NULL); |
405 | 0 | if (err < 0) |
406 | 0 | goto out; |
407 | | |
408 | | /* |
409 | | * Only look at deadlines after the first few times. This |
410 | | * simplifies debugging in GDB. |
411 | | */ |
412 | 0 | tries++; |
413 | 0 | if (tries > 3 && tv_cmp(&now, &deadline) >= 0) |
414 | 0 | goto out; |
415 | | |
416 | 0 | fd = open(st->list_file, O_RDONLY); |
417 | 0 | if (fd < 0) { |
418 | 0 | if (errno != ENOENT) { |
419 | 0 | err = REFTABLE_IO_ERROR; |
420 | 0 | goto out; |
421 | 0 | } |
422 | | |
423 | 0 | REFTABLE_CALLOC_ARRAY(names, 1); |
424 | 0 | if (!names) { |
425 | 0 | err = REFTABLE_OUT_OF_MEMORY_ERROR; |
426 | 0 | goto out; |
427 | 0 | } |
428 | 0 | } else { |
429 | 0 | err = fd_read_lines(fd, &names); |
430 | 0 | if (err < 0) |
431 | 0 | goto out; |
432 | 0 | } |
433 | | |
434 | 0 | err = reftable_stack_reload_once(st, (const char **) names, reuse_open); |
435 | 0 | if (!err) |
436 | 0 | break; |
437 | 0 | if (err != REFTABLE_NOT_EXIST_ERROR) |
438 | 0 | goto out; |
439 | | |
440 | | /* |
441 | | * REFTABLE_NOT_EXIST_ERROR can be caused by a concurrent |
442 | | * writer. Check if there was one by checking if the name list |
443 | | * changed. |
444 | | */ |
445 | 0 | err = read_lines(st->list_file, &names_after); |
446 | 0 | if (err < 0) |
447 | 0 | goto out; |
448 | 0 | if (names_equal((const char **) names_after, |
449 | 0 | (const char **) names)) { |
450 | 0 | err = REFTABLE_NOT_EXIST_ERROR; |
451 | 0 | goto out; |
452 | 0 | } |
453 | | |
454 | 0 | free_names(names); |
455 | 0 | names = NULL; |
456 | 0 | free_names(names_after); |
457 | 0 | names_after = NULL; |
458 | 0 | close(fd); |
459 | 0 | fd = -1; |
460 | |
|
461 | 0 | delay = delay + (delay * reftable_rand()) / UINT32_MAX + 1; |
462 | 0 | poll(NULL, 0, delay); |
463 | 0 | } |
464 | | |
465 | 0 | out: |
466 | | /* |
467 | | * Invalidate the stat cache. It is sufficient to only close the file |
468 | | * descriptor and keep the cached stat info because we never use the |
469 | | * latter when the former is negative. |
470 | | */ |
471 | 0 | if (st->list_fd >= 0) { |
472 | 0 | close(st->list_fd); |
473 | 0 | st->list_fd = -1; |
474 | 0 | } |
475 | | |
476 | | /* |
477 | | * Cache stat information in case it provides a useful signal to us. |
478 | | * According to POSIX, "The st_ino and st_dev fields taken together |
479 | | * uniquely identify the file within the system." That being said, |
480 | | * Windows is not POSIX compliant and we do not have these fields |
481 | | * available. So the information we have there is insufficient to |
482 | | * determine whether two file descriptors point to the same file. |
483 | | * |
484 | | * While we could fall back to using other signals like the file's |
485 | | * mtime, those are not sufficient to avoid races. We thus refrain from |
486 | | * using the stat cache on such systems and fall back to the secondary |
487 | | * caching mechanism, which is to check whether contents of the file |
488 | | * have changed. |
489 | | * |
490 | | * On other systems which are POSIX compliant we must keep the file |
491 | | * descriptor open. This is to avoid a race condition where two |
492 | | * processes access the reftable stack at the same point in time: |
493 | | * |
494 | | * 1. A reads the reftable stack and caches its stat info. |
495 | | * |
496 | | * 2. B updates the stack, appending a new table to "tables.list". |
497 | | * This will both use a new inode and result in a different file |
498 | | * size, thus invalidating A's cache in theory. |
499 | | * |
500 | | * 3. B decides to auto-compact the stack and merges two tables. The |
501 | | * file size now matches what A has cached again. Furthermore, the |
502 | | * filesystem may decide to recycle the inode number of the file |
503 | | * we have replaced in (2) because it is not in use anymore. |
504 | | * |
505 | | * 4. A reloads the reftable stack. Neither the inode number nor the |
506 | | * file size changed. If the timestamps did not change either then |
507 | | * we think the cached copy of our stack is up-to-date. |
508 | | * |
509 | | * By keeping the file descriptor open the inode number cannot be |
510 | | * recycled, mitigating the race. |
511 | | */ |
512 | 0 | if (!err && fd >= 0 && !fstat(fd, &st->list_st) && |
513 | 0 | st->list_st.st_dev && st->list_st.st_ino) { |
514 | 0 | st->list_fd = fd; |
515 | 0 | fd = -1; |
516 | 0 | } |
517 | |
|
518 | 0 | if (fd >= 0) |
519 | 0 | close(fd); |
520 | 0 | free_names(names); |
521 | 0 | free_names(names_after); |
522 | |
|
523 | 0 | if (st->opts.on_reload) |
524 | 0 | st->opts.on_reload(st->opts.on_reload_payload); |
525 | |
|
526 | 0 | return err; |
527 | 0 | } |
528 | | |
529 | | int reftable_new_stack(struct reftable_stack **dest, const char *dir, |
530 | | const struct reftable_write_options *_opts) |
531 | 0 | { |
532 | 0 | struct reftable_buf list_file_name = REFTABLE_BUF_INIT; |
533 | 0 | struct reftable_write_options opts = { 0 }; |
534 | 0 | struct reftable_stack *p; |
535 | 0 | int err; |
536 | |
|
537 | 0 | p = reftable_calloc(1, sizeof(*p)); |
538 | 0 | if (!p) { |
539 | 0 | err = REFTABLE_OUT_OF_MEMORY_ERROR; |
540 | 0 | goto out; |
541 | 0 | } |
542 | | |
543 | 0 | if (_opts) |
544 | 0 | opts = *_opts; |
545 | 0 | if (opts.hash_id == 0) |
546 | 0 | opts.hash_id = REFTABLE_HASH_SHA1; |
547 | |
|
548 | 0 | *dest = NULL; |
549 | |
|
550 | 0 | reftable_buf_reset(&list_file_name); |
551 | 0 | if ((err = reftable_buf_addstr(&list_file_name, dir)) < 0 || |
552 | 0 | (err = reftable_buf_addstr(&list_file_name, "/tables.list")) < 0) |
553 | 0 | goto out; |
554 | | |
555 | 0 | p->list_file = reftable_buf_detach(&list_file_name); |
556 | 0 | p->list_fd = -1; |
557 | 0 | p->opts = opts; |
558 | 0 | p->reftable_dir = reftable_strdup(dir); |
559 | 0 | if (!p->reftable_dir) { |
560 | 0 | err = REFTABLE_OUT_OF_MEMORY_ERROR; |
561 | 0 | goto out; |
562 | 0 | } |
563 | | |
564 | 0 | err = reftable_stack_reload_maybe_reuse(p, 1); |
565 | 0 | if (err < 0) |
566 | 0 | goto out; |
567 | | |
568 | 0 | *dest = p; |
569 | 0 | err = 0; |
570 | |
|
571 | 0 | out: |
572 | 0 | if (err < 0) |
573 | 0 | reftable_stack_destroy(p); |
574 | 0 | return err; |
575 | 0 | } |
576 | | |
577 | | /* |
578 | | * Check whether the given stack is up-to-date with what we have in memory. |
579 | | * Returns 0 if so, 1 if the stack is out-of-date or a negative error code |
580 | | * otherwise. |
581 | | */ |
582 | | static int stack_uptodate(struct reftable_stack *st) |
583 | 0 | { |
584 | 0 | char **names = NULL; |
585 | 0 | int err; |
586 | | |
587 | | /* |
588 | | * When we have cached stat information available then we use it to |
589 | | * verify whether the file has been rewritten. |
590 | | * |
591 | | * Note that we explicitly do not want to use `stat_validity_check()` |
592 | | * and friends here because they may end up not comparing the `st_dev` |
593 | | * and `st_ino` fields. These functions thus cannot guarantee that we |
594 | | * indeed still have the same file. |
595 | | */ |
596 | 0 | if (st->list_fd >= 0) { |
597 | 0 | struct stat list_st; |
598 | |
|
599 | 0 | if (stat(st->list_file, &list_st) < 0) { |
600 | | /* |
601 | | * It's fine for "tables.list" to not exist. In that |
602 | | * case, we have to refresh when the loaded stack has |
603 | | * any tables. |
604 | | */ |
605 | 0 | if (errno == ENOENT) |
606 | 0 | return !!st->tables_len; |
607 | 0 | return REFTABLE_IO_ERROR; |
608 | 0 | } |
609 | | |
610 | | /* |
611 | | * When "tables.list" refers to the same file we can assume |
612 | | * that it didn't change. This is because we always use |
613 | | * rename(3P) to update the file and never write to it |
614 | | * directly. |
615 | | */ |
616 | 0 | if (st->list_st.st_dev == list_st.st_dev && |
617 | 0 | st->list_st.st_ino == list_st.st_ino) |
618 | 0 | return 0; |
619 | 0 | } |
620 | | |
621 | 0 | err = read_lines(st->list_file, &names); |
622 | 0 | if (err < 0) |
623 | 0 | return err; |
624 | | |
625 | 0 | for (size_t i = 0; i < st->tables_len; i++) { |
626 | 0 | if (!names[i]) { |
627 | 0 | err = 1; |
628 | 0 | goto done; |
629 | 0 | } |
630 | | |
631 | 0 | if (strcmp(st->tables[i]->name, names[i])) { |
632 | 0 | err = 1; |
633 | 0 | goto done; |
634 | 0 | } |
635 | 0 | } |
636 | | |
637 | 0 | if (names[st->merged->tables_len]) { |
638 | 0 | err = 1; |
639 | 0 | goto done; |
640 | 0 | } |
641 | | |
642 | 0 | done: |
643 | 0 | free_names(names); |
644 | 0 | return err; |
645 | 0 | } |
646 | | |
647 | | int reftable_stack_reload(struct reftable_stack *st) |
648 | 0 | { |
649 | 0 | int err = stack_uptodate(st); |
650 | 0 | if (err > 0) |
651 | 0 | return reftable_stack_reload_maybe_reuse(st, 1); |
652 | 0 | return err; |
653 | 0 | } |
654 | | |
655 | | struct reftable_addition { |
656 | | struct reftable_flock tables_list_lock; |
657 | | struct reftable_stack *stack; |
658 | | |
659 | | char **new_tables; |
660 | | size_t new_tables_len, new_tables_cap; |
661 | | uint64_t next_update_index; |
662 | | }; |
663 | | |
664 | | static void reftable_addition_close(struct reftable_addition *add) |
665 | 0 | { |
666 | 0 | struct reftable_buf nm = REFTABLE_BUF_INIT; |
667 | 0 | size_t i; |
668 | |
|
669 | 0 | for (i = 0; i < add->new_tables_len; i++) { |
670 | 0 | if (!stack_filename(&nm, add->stack, add->new_tables[i])) |
671 | 0 | unlink(nm.buf); |
672 | 0 | reftable_free(add->new_tables[i]); |
673 | 0 | add->new_tables[i] = NULL; |
674 | 0 | } |
675 | 0 | reftable_free(add->new_tables); |
676 | 0 | add->new_tables = NULL; |
677 | 0 | add->new_tables_len = 0; |
678 | 0 | add->new_tables_cap = 0; |
679 | |
|
680 | 0 | flock_release(&add->tables_list_lock); |
681 | 0 | reftable_buf_release(&nm); |
682 | 0 | } |
683 | | |
684 | | static int reftable_stack_init_addition(struct reftable_addition *add, |
685 | | struct reftable_stack *st, |
686 | | unsigned int flags) |
687 | 0 | { |
688 | 0 | struct reftable_buf lock_file_name = REFTABLE_BUF_INIT; |
689 | 0 | int err; |
690 | |
|
691 | 0 | memset(add, 0, sizeof(*add)); |
692 | 0 | add->stack = st; |
693 | |
|
694 | 0 | err = flock_acquire(&add->tables_list_lock, st->list_file, |
695 | 0 | st->opts.lock_timeout_ms); |
696 | 0 | if (err < 0) |
697 | 0 | goto done; |
698 | | |
699 | 0 | if (st->opts.default_permissions) { |
700 | 0 | if (chmod(add->tables_list_lock.path, |
701 | 0 | st->opts.default_permissions) < 0) { |
702 | 0 | err = REFTABLE_IO_ERROR; |
703 | 0 | goto done; |
704 | 0 | } |
705 | 0 | } |
706 | | |
707 | 0 | err = stack_uptodate(st); |
708 | 0 | if (err < 0) |
709 | 0 | goto done; |
710 | 0 | if (err > 0 && flags & REFTABLE_STACK_NEW_ADDITION_RELOAD) { |
711 | 0 | err = reftable_stack_reload_maybe_reuse(add->stack, 1); |
712 | 0 | if (err) |
713 | 0 | goto done; |
714 | 0 | } |
715 | 0 | if (err > 0) { |
716 | 0 | err = REFTABLE_OUTDATED_ERROR; |
717 | 0 | goto done; |
718 | 0 | } |
719 | | |
720 | 0 | add->next_update_index = reftable_stack_next_update_index(st); |
721 | 0 | done: |
722 | 0 | if (err) |
723 | 0 | reftable_addition_close(add); |
724 | 0 | reftable_buf_release(&lock_file_name); |
725 | 0 | return err; |
726 | 0 | } |
727 | | |
728 | | static int stack_try_add(struct reftable_stack *st, |
729 | | int (*write_table)(struct reftable_writer *wr, |
730 | | void *arg), |
731 | | void *arg, unsigned flags) |
732 | 0 | { |
733 | 0 | struct reftable_addition add; |
734 | 0 | int err; |
735 | |
|
736 | 0 | err = reftable_stack_init_addition(&add, st, flags); |
737 | 0 | if (err < 0) |
738 | 0 | goto done; |
739 | | |
740 | 0 | err = reftable_addition_add(&add, write_table, arg); |
741 | 0 | if (err < 0) |
742 | 0 | goto done; |
743 | | |
744 | 0 | err = reftable_addition_commit(&add); |
745 | 0 | done: |
746 | 0 | reftable_addition_close(&add); |
747 | 0 | return err; |
748 | 0 | } |
749 | | |
750 | | int reftable_stack_add(struct reftable_stack *st, |
751 | | int (*write)(struct reftable_writer *wr, void *arg), |
752 | | void *arg, unsigned flags) |
753 | 0 | { |
754 | 0 | int err = stack_try_add(st, write, arg, flags); |
755 | 0 | if (err < 0) { |
756 | 0 | if (err == REFTABLE_OUTDATED_ERROR) { |
757 | | /* Ignore error return, we want to propagate |
758 | | REFTABLE_OUTDATED_ERROR. |
759 | | */ |
760 | 0 | reftable_stack_reload(st); |
761 | 0 | } |
762 | 0 | return err; |
763 | 0 | } |
764 | | |
765 | 0 | return 0; |
766 | 0 | } |
767 | | |
768 | | static int format_name(struct reftable_buf *dest, uint64_t min, uint64_t max) |
769 | 0 | { |
770 | 0 | char buf[100]; |
771 | 0 | uint32_t rnd = reftable_rand(); |
772 | 0 | snprintf(buf, sizeof(buf), "0x%012" PRIx64 "-0x%012" PRIx64 "-%08x", |
773 | 0 | min, max, rnd); |
774 | 0 | reftable_buf_reset(dest); |
775 | 0 | return reftable_buf_addstr(dest, buf); |
776 | 0 | } |
777 | | |
778 | | void reftable_addition_destroy(struct reftable_addition *add) |
779 | 0 | { |
780 | 0 | if (!add) { |
781 | 0 | return; |
782 | 0 | } |
783 | 0 | reftable_addition_close(add); |
784 | 0 | reftable_free(add); |
785 | 0 | } |
786 | | |
787 | | int reftable_addition_commit(struct reftable_addition *add) |
788 | 0 | { |
789 | 0 | struct reftable_buf table_list = REFTABLE_BUF_INIT; |
790 | 0 | int err = 0; |
791 | 0 | size_t i; |
792 | |
|
793 | 0 | if (add->new_tables_len == 0) |
794 | 0 | goto done; |
795 | | |
796 | 0 | for (i = 0; i < add->stack->merged->tables_len; i++) { |
797 | 0 | if ((err = reftable_buf_addstr(&table_list, add->stack->tables[i]->name)) < 0 || |
798 | 0 | (err = reftable_buf_addstr(&table_list, "\n")) < 0) |
799 | 0 | goto done; |
800 | 0 | } |
801 | 0 | for (i = 0; i < add->new_tables_len; i++) { |
802 | 0 | if ((err = reftable_buf_addstr(&table_list, add->new_tables[i])) < 0 || |
803 | 0 | (err = reftable_buf_addstr(&table_list, "\n")) < 0) |
804 | 0 | goto done; |
805 | 0 | } |
806 | | |
807 | 0 | err = reftable_write_data(add->tables_list_lock.fd, |
808 | 0 | table_list.buf, table_list.len); |
809 | 0 | reftable_buf_release(&table_list); |
810 | 0 | if (err < 0) { |
811 | 0 | err = REFTABLE_IO_ERROR; |
812 | 0 | goto done; |
813 | 0 | } |
814 | | |
815 | 0 | err = stack_fsync(&add->stack->opts, add->tables_list_lock.fd); |
816 | 0 | if (err < 0) { |
817 | 0 | err = REFTABLE_IO_ERROR; |
818 | 0 | goto done; |
819 | 0 | } |
820 | | |
821 | 0 | err = flock_commit(&add->tables_list_lock); |
822 | 0 | if (err < 0) { |
823 | 0 | err = REFTABLE_IO_ERROR; |
824 | 0 | goto done; |
825 | 0 | } |
826 | | |
827 | | /* success, no more state to clean up. */ |
828 | 0 | for (i = 0; i < add->new_tables_len; i++) |
829 | 0 | reftable_free(add->new_tables[i]); |
830 | 0 | reftable_free(add->new_tables); |
831 | 0 | add->new_tables = NULL; |
832 | 0 | add->new_tables_len = 0; |
833 | 0 | add->new_tables_cap = 0; |
834 | |
|
835 | 0 | err = reftable_stack_reload_maybe_reuse(add->stack, 1); |
836 | 0 | if (err) |
837 | 0 | goto done; |
838 | | |
839 | 0 | if (!add->stack->opts.disable_auto_compact) { |
840 | | /* |
841 | | * Auto-compact the stack to keep the number of tables in |
842 | | * control. It is possible that a concurrent writer is already |
843 | | * trying to compact parts of the stack, which would lead to a |
844 | | * `REFTABLE_LOCK_ERROR` because parts of the stack are locked |
845 | | * already. Similarly, the stack may have been rewritten by a |
846 | | * concurrent writer, which causes `REFTABLE_OUTDATED_ERROR`. |
847 | | * Both of these errors are benign, so we simply ignore them. |
848 | | */ |
849 | 0 | err = reftable_stack_auto_compact(add->stack); |
850 | 0 | if (err < 0 && err != REFTABLE_LOCK_ERROR && |
851 | 0 | err != REFTABLE_OUTDATED_ERROR) |
852 | 0 | goto done; |
853 | 0 | err = 0; |
854 | 0 | } |
855 | | |
856 | 0 | done: |
857 | 0 | reftable_addition_close(add); |
858 | 0 | return err; |
859 | 0 | } |
860 | | |
861 | | int reftable_stack_new_addition(struct reftable_addition **dest, |
862 | | struct reftable_stack *st, |
863 | | unsigned int flags) |
864 | 0 | { |
865 | 0 | int err; |
866 | |
|
867 | 0 | REFTABLE_CALLOC_ARRAY(*dest, 1); |
868 | 0 | if (!*dest) |
869 | 0 | return REFTABLE_OUT_OF_MEMORY_ERROR; |
870 | | |
871 | 0 | err = reftable_stack_init_addition(*dest, st, flags); |
872 | 0 | if (err) { |
873 | 0 | reftable_free(*dest); |
874 | 0 | *dest = NULL; |
875 | 0 | } |
876 | |
|
877 | 0 | return err; |
878 | 0 | } |
879 | | |
880 | | int reftable_addition_add(struct reftable_addition *add, |
881 | | int (*write_table)(struct reftable_writer *wr, |
882 | | void *arg), |
883 | | void *arg) |
884 | 0 | { |
885 | 0 | struct reftable_buf temp_tab_file_name = REFTABLE_BUF_INIT; |
886 | 0 | struct reftable_buf tab_file_name = REFTABLE_BUF_INIT; |
887 | 0 | struct reftable_buf next_name = REFTABLE_BUF_INIT; |
888 | 0 | struct reftable_writer *wr = NULL; |
889 | 0 | struct reftable_tmpfile tab_file = REFTABLE_TMPFILE_INIT; |
890 | 0 | struct fd_writer writer = { |
891 | 0 | .opts = &add->stack->opts, |
892 | 0 | }; |
893 | 0 | int err = 0; |
894 | |
|
895 | 0 | reftable_buf_reset(&next_name); |
896 | |
|
897 | 0 | err = format_name(&next_name, add->next_update_index, add->next_update_index); |
898 | 0 | if (err < 0) |
899 | 0 | goto done; |
900 | | |
901 | 0 | err = stack_filename(&temp_tab_file_name, add->stack, next_name.buf); |
902 | 0 | if (err < 0) |
903 | 0 | goto done; |
904 | | |
905 | 0 | err = reftable_buf_addstr(&temp_tab_file_name, ".temp.XXXXXX"); |
906 | 0 | if (err < 0) |
907 | 0 | goto done; |
908 | | |
909 | 0 | err = tmpfile_from_pattern(&tab_file, temp_tab_file_name.buf); |
910 | 0 | if (err < 0) |
911 | 0 | goto done; |
912 | 0 | if (add->stack->opts.default_permissions) { |
913 | 0 | if (chmod(tab_file.path, |
914 | 0 | add->stack->opts.default_permissions)) { |
915 | 0 | err = REFTABLE_IO_ERROR; |
916 | 0 | goto done; |
917 | 0 | } |
918 | 0 | } |
919 | | |
920 | 0 | writer.fd = tab_file.fd; |
921 | 0 | err = reftable_writer_new(&wr, fd_writer_write, fd_writer_flush, |
922 | 0 | &writer, &add->stack->opts); |
923 | 0 | if (err < 0) |
924 | 0 | goto done; |
925 | | |
926 | 0 | err = write_table(wr, arg); |
927 | 0 | if (err < 0) |
928 | 0 | goto done; |
929 | | |
930 | 0 | err = reftable_writer_close(wr); |
931 | 0 | if (err == REFTABLE_EMPTY_TABLE_ERROR) { |
932 | 0 | err = 0; |
933 | 0 | goto done; |
934 | 0 | } |
935 | 0 | if (err < 0) |
936 | 0 | goto done; |
937 | | |
938 | 0 | err = tmpfile_close(&tab_file); |
939 | 0 | if (err < 0) |
940 | 0 | goto done; |
941 | | |
942 | 0 | if (wr->min_update_index < add->next_update_index) { |
943 | 0 | err = REFTABLE_API_ERROR; |
944 | 0 | goto done; |
945 | 0 | } |
946 | | |
947 | 0 | err = format_name(&next_name, wr->min_update_index, wr->max_update_index); |
948 | 0 | if (err < 0) |
949 | 0 | goto done; |
950 | | |
951 | 0 | err = reftable_buf_addstr(&next_name, ".ref"); |
952 | 0 | if (err < 0) |
953 | 0 | goto done; |
954 | | |
955 | 0 | err = stack_filename(&tab_file_name, add->stack, next_name.buf); |
956 | 0 | if (err < 0) |
957 | 0 | goto done; |
958 | | |
959 | | /* |
960 | | On windows, this relies on rand() picking a unique destination name. |
961 | | Maybe we should do retry loop as well? |
962 | | */ |
963 | 0 | err = tmpfile_rename(&tab_file, tab_file_name.buf); |
964 | 0 | if (err < 0) |
965 | 0 | goto done; |
966 | | |
967 | 0 | REFTABLE_ALLOC_GROW_OR_NULL(add->new_tables, add->new_tables_len + 1, |
968 | 0 | add->new_tables_cap); |
969 | 0 | if (!add->new_tables) { |
970 | 0 | err = REFTABLE_OUT_OF_MEMORY_ERROR; |
971 | 0 | goto done; |
972 | 0 | } |
973 | 0 | add->new_tables[add->new_tables_len++] = reftable_buf_detach(&next_name); |
974 | |
|
975 | 0 | done: |
976 | 0 | tmpfile_delete(&tab_file); |
977 | 0 | reftable_buf_release(&temp_tab_file_name); |
978 | 0 | reftable_buf_release(&tab_file_name); |
979 | 0 | reftable_buf_release(&next_name); |
980 | 0 | reftable_writer_free(wr); |
981 | 0 | return err; |
982 | 0 | } |
983 | | |
984 | | uint64_t reftable_stack_next_update_index(struct reftable_stack *st) |
985 | 0 | { |
986 | 0 | int sz = st->merged->tables_len; |
987 | 0 | if (sz > 0) |
988 | 0 | return reftable_table_max_update_index(st->tables[sz - 1]) + |
989 | 0 | 1; |
990 | 0 | return 1; |
991 | 0 | } |
992 | | |
993 | | static int stack_write_compact(struct reftable_stack *st, |
994 | | struct reftable_writer *wr, |
995 | | size_t first, size_t last, |
996 | | struct reftable_log_expiry_config *config) |
997 | 0 | { |
998 | 0 | struct reftable_merged_table *mt = NULL; |
999 | 0 | struct reftable_iterator it = { NULL }; |
1000 | 0 | struct reftable_ref_record ref = { NULL }; |
1001 | 0 | struct reftable_log_record log = { NULL }; |
1002 | 0 | size_t subtabs_len = last - first + 1; |
1003 | 0 | uint64_t entries = 0; |
1004 | 0 | int err = 0; |
1005 | |
|
1006 | 0 | for (size_t i = first; i <= last; i++) |
1007 | 0 | st->stats.bytes += st->tables[i]->size; |
1008 | 0 | err = reftable_writer_set_limits(wr, st->tables[first]->min_update_index, |
1009 | 0 | st->tables[last]->max_update_index); |
1010 | 0 | if (err < 0) |
1011 | 0 | goto done; |
1012 | | |
1013 | 0 | err = reftable_merged_table_new(&mt, st->tables + first, subtabs_len, |
1014 | 0 | st->opts.hash_id); |
1015 | 0 | if (err < 0) |
1016 | 0 | goto done; |
1017 | | |
1018 | 0 | err = merged_table_init_iter(mt, &it, REFTABLE_BLOCK_TYPE_REF); |
1019 | 0 | if (err < 0) |
1020 | 0 | goto done; |
1021 | | |
1022 | 0 | err = reftable_iterator_seek_ref(&it, ""); |
1023 | 0 | if (err < 0) |
1024 | 0 | goto done; |
1025 | | |
1026 | 0 | while (1) { |
1027 | 0 | err = reftable_iterator_next_ref(&it, &ref); |
1028 | 0 | if (err > 0) { |
1029 | 0 | err = 0; |
1030 | 0 | break; |
1031 | 0 | } |
1032 | 0 | if (err < 0) |
1033 | 0 | goto done; |
1034 | | |
1035 | 0 | if (first == 0 && reftable_ref_record_is_deletion(&ref)) { |
1036 | 0 | continue; |
1037 | 0 | } |
1038 | | |
1039 | 0 | err = reftable_writer_add_ref(wr, &ref); |
1040 | 0 | if (err < 0) |
1041 | 0 | goto done; |
1042 | 0 | entries++; |
1043 | 0 | } |
1044 | 0 | reftable_iterator_destroy(&it); |
1045 | |
|
1046 | 0 | err = merged_table_init_iter(mt, &it, REFTABLE_BLOCK_TYPE_LOG); |
1047 | 0 | if (err < 0) |
1048 | 0 | goto done; |
1049 | | |
1050 | 0 | err = reftable_iterator_seek_log(&it, ""); |
1051 | 0 | if (err < 0) |
1052 | 0 | goto done; |
1053 | | |
1054 | 0 | while (1) { |
1055 | 0 | err = reftable_iterator_next_log(&it, &log); |
1056 | 0 | if (err > 0) { |
1057 | 0 | err = 0; |
1058 | 0 | break; |
1059 | 0 | } |
1060 | 0 | if (err < 0) |
1061 | 0 | goto done; |
1062 | 0 | if (first == 0 && reftable_log_record_is_deletion(&log)) { |
1063 | 0 | continue; |
1064 | 0 | } |
1065 | | |
1066 | 0 | if (config && config->min_update_index > 0 && |
1067 | 0 | log.update_index < config->min_update_index) { |
1068 | 0 | continue; |
1069 | 0 | } |
1070 | | |
1071 | 0 | if (config && config->time > 0 && |
1072 | 0 | log.value.update.time < config->time) { |
1073 | 0 | continue; |
1074 | 0 | } |
1075 | | |
1076 | 0 | err = reftable_writer_add_log(wr, &log); |
1077 | 0 | if (err < 0) |
1078 | 0 | goto done; |
1079 | 0 | entries++; |
1080 | 0 | } |
1081 | | |
1082 | 0 | done: |
1083 | 0 | reftable_iterator_destroy(&it); |
1084 | 0 | if (mt) |
1085 | 0 | reftable_merged_table_free(mt); |
1086 | 0 | reftable_ref_record_release(&ref); |
1087 | 0 | reftable_log_record_release(&log); |
1088 | 0 | st->stats.entries_written += entries; |
1089 | 0 | return err; |
1090 | 0 | } |
1091 | | |
1092 | | static int stack_compact_locked(struct reftable_stack *st, |
1093 | | size_t first, size_t last, |
1094 | | struct reftable_log_expiry_config *config, |
1095 | | struct reftable_tmpfile *tab_file_out) |
1096 | 0 | { |
1097 | 0 | struct reftable_buf next_name = REFTABLE_BUF_INIT; |
1098 | 0 | struct reftable_buf tab_file_path = REFTABLE_BUF_INIT; |
1099 | 0 | struct reftable_writer *wr = NULL; |
1100 | 0 | struct fd_writer writer= { |
1101 | 0 | .opts = &st->opts, |
1102 | 0 | }; |
1103 | 0 | struct reftable_tmpfile tab_file = REFTABLE_TMPFILE_INIT; |
1104 | 0 | int err = 0; |
1105 | |
|
1106 | 0 | err = format_name(&next_name, reftable_table_min_update_index(st->tables[first]), |
1107 | 0 | reftable_table_max_update_index(st->tables[last])); |
1108 | 0 | if (err < 0) |
1109 | 0 | goto done; |
1110 | | |
1111 | 0 | err = stack_filename(&tab_file_path, st, next_name.buf); |
1112 | 0 | if (err < 0) |
1113 | 0 | goto done; |
1114 | | |
1115 | 0 | err = reftable_buf_addstr(&tab_file_path, ".temp.XXXXXX"); |
1116 | 0 | if (err < 0) |
1117 | 0 | goto done; |
1118 | | |
1119 | 0 | err = tmpfile_from_pattern(&tab_file, tab_file_path.buf); |
1120 | 0 | if (err < 0) |
1121 | 0 | goto done; |
1122 | | |
1123 | 0 | if (st->opts.default_permissions && |
1124 | 0 | chmod(tab_file.path, st->opts.default_permissions) < 0) { |
1125 | 0 | err = REFTABLE_IO_ERROR; |
1126 | 0 | goto done; |
1127 | 0 | } |
1128 | | |
1129 | 0 | writer.fd = tab_file.fd; |
1130 | 0 | err = reftable_writer_new(&wr, fd_writer_write, fd_writer_flush, |
1131 | 0 | &writer, &st->opts); |
1132 | 0 | if (err < 0) |
1133 | 0 | goto done; |
1134 | | |
1135 | 0 | err = stack_write_compact(st, wr, first, last, config); |
1136 | 0 | if (err < 0) |
1137 | 0 | goto done; |
1138 | | |
1139 | 0 | err = reftable_writer_close(wr); |
1140 | 0 | if (err < 0) |
1141 | 0 | goto done; |
1142 | | |
1143 | 0 | err = tmpfile_close(&tab_file); |
1144 | 0 | if (err < 0) |
1145 | 0 | goto done; |
1146 | | |
1147 | 0 | *tab_file_out = tab_file; |
1148 | 0 | tab_file = REFTABLE_TMPFILE_INIT; |
1149 | |
|
1150 | 0 | done: |
1151 | 0 | tmpfile_delete(&tab_file); |
1152 | 0 | reftable_writer_free(wr); |
1153 | 0 | reftable_buf_release(&next_name); |
1154 | 0 | reftable_buf_release(&tab_file_path); |
1155 | 0 | return err; |
1156 | 0 | } |
1157 | | |
1158 | | enum stack_compact_range_flags { |
1159 | | /* |
1160 | | * Perform a best-effort compaction. That is, even if we cannot lock |
1161 | | * all tables in the specified range, we will try to compact the |
1162 | | * remaining slice. |
1163 | | */ |
1164 | | STACK_COMPACT_RANGE_BEST_EFFORT = (1 << 0), |
1165 | | }; |
1166 | | |
1167 | | /* |
1168 | | * Compact all tables in the range `[first, last)` into a single new table. |
1169 | | * |
1170 | | * This function returns `0` on success or a code `< 0` on failure. When the |
1171 | | * stack or any of the tables in the specified range are already locked then |
1172 | | * this function returns `REFTABLE_LOCK_ERROR`. This is a benign error that |
1173 | | * callers can either ignore, or they may choose to retry compaction after some |
1174 | | * amount of time. |
1175 | | */ |
1176 | | static int stack_compact_range(struct reftable_stack *st, |
1177 | | size_t first, size_t last, |
1178 | | struct reftable_log_expiry_config *expiry, |
1179 | | unsigned int flags) |
1180 | 0 | { |
1181 | 0 | struct reftable_buf tables_list_buf = REFTABLE_BUF_INIT; |
1182 | 0 | struct reftable_buf new_table_name = REFTABLE_BUF_INIT; |
1183 | 0 | struct reftable_buf new_table_path = REFTABLE_BUF_INIT; |
1184 | 0 | struct reftable_buf table_name = REFTABLE_BUF_INIT; |
1185 | 0 | struct reftable_flock tables_list_lock = REFTABLE_FLOCK_INIT; |
1186 | 0 | struct reftable_flock *table_locks = NULL; |
1187 | 0 | struct reftable_tmpfile new_table = REFTABLE_TMPFILE_INIT; |
1188 | 0 | int is_empty_table = 0, err = 0; |
1189 | 0 | size_t first_to_replace, last_to_replace; |
1190 | 0 | size_t i, nlocks = 0; |
1191 | 0 | char **names = NULL; |
1192 | |
|
1193 | 0 | if (first > last || (!expiry && first == last)) { |
1194 | 0 | err = 0; |
1195 | 0 | goto done; |
1196 | 0 | } |
1197 | | |
1198 | 0 | st->stats.attempts++; |
1199 | | |
1200 | | /* |
1201 | | * Hold the lock so that we can read "tables.list" and lock all tables |
1202 | | * which are part of the user-specified range. |
1203 | | */ |
1204 | 0 | err = flock_acquire(&tables_list_lock, st->list_file, st->opts.lock_timeout_ms); |
1205 | 0 | if (err < 0) |
1206 | 0 | goto done; |
1207 | | |
1208 | | /* |
1209 | | * Check whether the stack is up-to-date. We unfortunately cannot |
1210 | | * handle the situation gracefully in case it's _not_ up-to-date |
1211 | | * because the range of tables that the user has requested us to |
1212 | | * compact may have been changed. So instead we abort. |
1213 | | * |
1214 | | * We could in theory improve the situation by having the caller not |
1215 | | * pass in a range, but instead the list of tables to compact. If so, |
1216 | | * we could check that relevant tables still exist. But for now it's |
1217 | | * good enough to just abort. |
1218 | | */ |
1219 | 0 | err = stack_uptodate(st); |
1220 | 0 | if (err < 0) |
1221 | 0 | goto done; |
1222 | 0 | if (err > 0) { |
1223 | 0 | err = REFTABLE_OUTDATED_ERROR; |
1224 | 0 | goto done; |
1225 | 0 | } |
1226 | | |
1227 | | /* |
1228 | | * Lock all tables in the user-provided range. This is the slice of our |
1229 | | * stack which we'll compact. |
1230 | | * |
1231 | | * Note that we lock tables in reverse order from last to first. The |
1232 | | * intent behind this is to allow a newer process to perform best |
1233 | | * effort compaction of tables that it has added in the case where an |
1234 | | * older process is still busy compacting tables which are preexisting |
1235 | | * from the point of view of the newer process. |
1236 | | */ |
1237 | 0 | REFTABLE_ALLOC_ARRAY(table_locks, last - first + 1); |
1238 | 0 | if (!table_locks) { |
1239 | 0 | err = REFTABLE_OUT_OF_MEMORY_ERROR; |
1240 | 0 | goto done; |
1241 | 0 | } |
1242 | 0 | for (i = 0; i < last - first + 1; i++) |
1243 | 0 | table_locks[i] = REFTABLE_FLOCK_INIT; |
1244 | |
|
1245 | 0 | for (i = last + 1; i > first; i--) { |
1246 | 0 | err = stack_filename(&table_name, st, reftable_table_name(st->tables[i - 1])); |
1247 | 0 | if (err < 0) |
1248 | 0 | goto done; |
1249 | | |
1250 | 0 | err = flock_acquire(&table_locks[nlocks], table_name.buf, 0); |
1251 | 0 | if (err < 0) { |
1252 | | /* |
1253 | | * When the table is locked already we may do a |
1254 | | * best-effort compaction and compact only the tables |
1255 | | * that we have managed to lock so far. This of course |
1256 | | * requires that we have been able to lock at least two |
1257 | | * tables, otherwise there would be nothing to compact. |
1258 | | * In that case, we return a lock error to our caller. |
1259 | | */ |
1260 | 0 | if (err == REFTABLE_LOCK_ERROR && last - (i - 1) >= 2 && |
1261 | 0 | flags & STACK_COMPACT_RANGE_BEST_EFFORT) { |
1262 | 0 | err = 0; |
1263 | | /* |
1264 | | * The subtraction is to offset the index, the |
1265 | | * addition is to only compact up to the table |
1266 | | * of the preceding iteration. They obviously |
1267 | | * cancel each other out, but that may be |
1268 | | * non-obvious when it was omitted. |
1269 | | */ |
1270 | 0 | first = (i - 1) + 1; |
1271 | 0 | break; |
1272 | 0 | } |
1273 | | |
1274 | 0 | goto done; |
1275 | 0 | } |
1276 | | |
1277 | | /* |
1278 | | * We need to close the lockfiles as we might otherwise easily |
1279 | | * run into file descriptor exhaustion when we compress a lot |
1280 | | * of tables. |
1281 | | */ |
1282 | 0 | err = flock_close(&table_locks[nlocks++]); |
1283 | 0 | if (err < 0) |
1284 | 0 | goto done; |
1285 | 0 | } |
1286 | | |
1287 | | /* |
1288 | | * We have locked all tables in our range and can thus release the |
1289 | | * "tables.list" lock while compacting the locked tables. This allows |
1290 | | * concurrent updates to the stack to proceed. |
1291 | | */ |
1292 | 0 | err = flock_release(&tables_list_lock); |
1293 | 0 | if (err < 0) { |
1294 | 0 | err = REFTABLE_IO_ERROR; |
1295 | 0 | goto done; |
1296 | 0 | } |
1297 | | |
1298 | | /* |
1299 | | * Compact the now-locked tables into a new table. Note that compacting |
1300 | | * these tables may end up with an empty new table in case tombstones |
1301 | | * end up cancelling out all refs in that range. |
1302 | | */ |
1303 | 0 | err = stack_compact_locked(st, first, last, expiry, &new_table); |
1304 | 0 | if (err < 0) { |
1305 | 0 | if (err != REFTABLE_EMPTY_TABLE_ERROR) |
1306 | 0 | goto done; |
1307 | 0 | is_empty_table = 1; |
1308 | 0 | } |
1309 | | |
1310 | | /* |
1311 | | * Now that we have written the new, compacted table we need to re-lock |
1312 | | * "tables.list". We'll then replace the compacted range of tables with |
1313 | | * the new table. |
1314 | | */ |
1315 | 0 | err = flock_acquire(&tables_list_lock, st->list_file, st->opts.lock_timeout_ms); |
1316 | 0 | if (err < 0) |
1317 | 0 | goto done; |
1318 | | |
1319 | 0 | if (st->opts.default_permissions) { |
1320 | 0 | if (chmod(tables_list_lock.path, |
1321 | 0 | st->opts.default_permissions) < 0) { |
1322 | 0 | err = REFTABLE_IO_ERROR; |
1323 | 0 | goto done; |
1324 | 0 | } |
1325 | 0 | } |
1326 | | |
1327 | | /* |
1328 | | * As we have unlocked the stack while compacting our slice of tables |
1329 | | * it may have happened that a concurrently running process has updated |
1330 | | * the stack while we were compacting. In that case, we need to check |
1331 | | * whether the tables that we have just compacted still exist in the |
1332 | | * stack in the exact same order as we have compacted them. |
1333 | | * |
1334 | | * If they do exist, then it is fine to continue and replace those |
1335 | | * tables with our compacted version. If they don't, then we need to |
1336 | | * abort. |
1337 | | */ |
1338 | 0 | err = stack_uptodate(st); |
1339 | 0 | if (err < 0) |
1340 | 0 | goto done; |
1341 | 0 | if (err > 0) { |
1342 | 0 | ssize_t new_offset = -1; |
1343 | 0 | int fd; |
1344 | |
|
1345 | 0 | fd = open(st->list_file, O_RDONLY); |
1346 | 0 | if (fd < 0) { |
1347 | 0 | err = REFTABLE_IO_ERROR; |
1348 | 0 | goto done; |
1349 | 0 | } |
1350 | | |
1351 | 0 | err = fd_read_lines(fd, &names); |
1352 | 0 | close(fd); |
1353 | 0 | if (err < 0) |
1354 | 0 | goto done; |
1355 | | |
1356 | | /* |
1357 | | * Search for the offset of the first table that we have |
1358 | | * compacted in the updated "tables.list" file. |
1359 | | */ |
1360 | 0 | for (size_t i = 0; names[i]; i++) { |
1361 | 0 | if (strcmp(names[i], st->tables[first]->name)) |
1362 | 0 | continue; |
1363 | | |
1364 | | /* |
1365 | | * We have found the first entry. Verify that all the |
1366 | | * subsequent tables we have compacted still exist in |
1367 | | * the modified stack in the exact same order as we |
1368 | | * have compacted them. |
1369 | | */ |
1370 | 0 | for (size_t j = 1; j < last - first + 1; j++) { |
1371 | 0 | const char *old = first + j < st->merged->tables_len ? |
1372 | 0 | st->tables[first + j]->name : NULL; |
1373 | 0 | const char *new = names[i + j]; |
1374 | | |
1375 | | /* |
1376 | | * If some entries are missing or in case the tables |
1377 | | * have changed then we need to bail out. Again, this |
1378 | | * shouldn't ever happen because we have locked the |
1379 | | * tables we are compacting. |
1380 | | */ |
1381 | 0 | if (!old || !new || strcmp(old, new)) { |
1382 | 0 | err = REFTABLE_OUTDATED_ERROR; |
1383 | 0 | goto done; |
1384 | 0 | } |
1385 | 0 | } |
1386 | | |
1387 | 0 | new_offset = i; |
1388 | 0 | break; |
1389 | 0 | } |
1390 | | |
1391 | | /* |
1392 | | * In case we didn't find our compacted tables in the stack we |
1393 | | * need to bail out. In theory, this should have never happened |
1394 | | * because we locked the tables we are compacting. |
1395 | | */ |
1396 | 0 | if (new_offset < 0) { |
1397 | 0 | err = REFTABLE_OUTDATED_ERROR; |
1398 | 0 | goto done; |
1399 | 0 | } |
1400 | | |
1401 | | /* |
1402 | | * We have found the new range that we want to replace, so |
1403 | | * let's update the range of tables that we want to replace. |
1404 | | */ |
1405 | 0 | first_to_replace = new_offset; |
1406 | 0 | last_to_replace = last + (new_offset - first); |
1407 | 0 | } else { |
1408 | | /* |
1409 | | * `fd_read_lines()` uses a `NULL` sentinel to indicate that |
1410 | | * the array is at its end. As we use `free_names()` to free |
1411 | | * the array, we need to include this sentinel value here and |
1412 | | * thus have to allocate `tables_len + 1` many entries. |
1413 | | */ |
1414 | 0 | REFTABLE_CALLOC_ARRAY(names, st->merged->tables_len + 1); |
1415 | 0 | if (!names) { |
1416 | 0 | err = REFTABLE_OUT_OF_MEMORY_ERROR; |
1417 | 0 | goto done; |
1418 | 0 | } |
1419 | | |
1420 | 0 | for (size_t i = 0; i < st->merged->tables_len; i++) { |
1421 | 0 | names[i] = reftable_strdup(st->tables[i]->name); |
1422 | 0 | if (!names[i]) { |
1423 | 0 | err = REFTABLE_OUT_OF_MEMORY_ERROR; |
1424 | 0 | goto done; |
1425 | 0 | } |
1426 | 0 | } |
1427 | 0 | first_to_replace = first; |
1428 | 0 | last_to_replace = last; |
1429 | 0 | } |
1430 | | |
1431 | | /* |
1432 | | * If the resulting compacted table is not empty, then we need to move |
1433 | | * it into place now. |
1434 | | */ |
1435 | 0 | if (!is_empty_table) { |
1436 | 0 | err = format_name(&new_table_name, st->tables[first]->min_update_index, |
1437 | 0 | st->tables[last]->max_update_index); |
1438 | 0 | if (err < 0) |
1439 | 0 | goto done; |
1440 | | |
1441 | 0 | err = reftable_buf_addstr(&new_table_name, ".ref"); |
1442 | 0 | if (err < 0) |
1443 | 0 | goto done; |
1444 | | |
1445 | 0 | err = stack_filename(&new_table_path, st, new_table_name.buf); |
1446 | 0 | if (err < 0) |
1447 | 0 | goto done; |
1448 | | |
1449 | 0 | err = tmpfile_rename(&new_table, new_table_path.buf); |
1450 | 0 | if (err < 0) |
1451 | 0 | goto done; |
1452 | 0 | } |
1453 | | |
1454 | | /* |
1455 | | * Write the new "tables.list" contents with the compacted table we |
1456 | | * have just written. In case the compacted table became empty we |
1457 | | * simply skip writing it. |
1458 | | */ |
1459 | 0 | for (i = 0; i < first_to_replace; i++) { |
1460 | 0 | if ((err = reftable_buf_addstr(&tables_list_buf, names[i])) < 0 || |
1461 | 0 | (err = reftable_buf_addstr(&tables_list_buf, "\n")) < 0) |
1462 | 0 | goto done; |
1463 | 0 | } |
1464 | 0 | if (!is_empty_table) { |
1465 | 0 | if ((err = reftable_buf_addstr(&tables_list_buf, new_table_name.buf)) < 0 || |
1466 | 0 | (err = reftable_buf_addstr(&tables_list_buf, "\n")) < 0) |
1467 | 0 | goto done; |
1468 | 0 | } |
1469 | 0 | for (i = last_to_replace + 1; names[i]; i++) { |
1470 | 0 | if ((err = reftable_buf_addstr(&tables_list_buf, names[i])) < 0 || |
1471 | 0 | (err = reftable_buf_addstr(&tables_list_buf, "\n")) < 0) |
1472 | 0 | goto done; |
1473 | 0 | } |
1474 | | |
1475 | 0 | err = reftable_write_data(tables_list_lock.fd, |
1476 | 0 | tables_list_buf.buf, tables_list_buf.len); |
1477 | 0 | if (err < 0) { |
1478 | 0 | err = REFTABLE_IO_ERROR; |
1479 | 0 | unlink(new_table_path.buf); |
1480 | 0 | goto done; |
1481 | 0 | } |
1482 | | |
1483 | 0 | err = stack_fsync(&st->opts, tables_list_lock.fd); |
1484 | 0 | if (err < 0) { |
1485 | 0 | err = REFTABLE_IO_ERROR; |
1486 | 0 | unlink(new_table_path.buf); |
1487 | 0 | goto done; |
1488 | 0 | } |
1489 | | |
1490 | 0 | err = flock_commit(&tables_list_lock); |
1491 | 0 | if (err < 0) { |
1492 | 0 | err = REFTABLE_IO_ERROR; |
1493 | 0 | unlink(new_table_path.buf); |
1494 | 0 | goto done; |
1495 | 0 | } |
1496 | | |
1497 | | /* |
1498 | | * Reload the stack before deleting the compacted tables. We can only |
1499 | | * delete the files after we closed them on Windows, so this needs to |
1500 | | * happen first. |
1501 | | */ |
1502 | 0 | err = reftable_stack_reload_maybe_reuse(st, first < last); |
1503 | 0 | if (err < 0) |
1504 | 0 | goto done; |
1505 | | |
1506 | | /* |
1507 | | * Delete the old tables. They may still be in use by concurrent |
1508 | | * readers, so it is expected that unlinking tables may fail. |
1509 | | */ |
1510 | 0 | for (i = 0; i < nlocks; i++) { |
1511 | 0 | struct reftable_flock *table_lock = &table_locks[i]; |
1512 | |
|
1513 | 0 | reftable_buf_reset(&table_name); |
1514 | 0 | err = reftable_buf_add(&table_name, table_lock->path, |
1515 | 0 | strlen(table_lock->path) - strlen(".lock")); |
1516 | 0 | if (err) |
1517 | 0 | continue; |
1518 | | |
1519 | 0 | unlink(table_name.buf); |
1520 | 0 | } |
1521 | |
|
1522 | 0 | done: |
1523 | 0 | flock_release(&tables_list_lock); |
1524 | 0 | for (i = 0; table_locks && i < nlocks; i++) |
1525 | 0 | flock_release(&table_locks[i]); |
1526 | 0 | reftable_free(table_locks); |
1527 | |
|
1528 | 0 | tmpfile_delete(&new_table); |
1529 | 0 | reftable_buf_release(&new_table_name); |
1530 | 0 | reftable_buf_release(&new_table_path); |
1531 | 0 | reftable_buf_release(&tables_list_buf); |
1532 | 0 | reftable_buf_release(&table_name); |
1533 | 0 | free_names(names); |
1534 | |
|
1535 | 0 | if (err == REFTABLE_LOCK_ERROR) |
1536 | 0 | st->stats.failures++; |
1537 | |
|
1538 | 0 | return err; |
1539 | 0 | } |
1540 | | |
1541 | | int reftable_stack_compact_all(struct reftable_stack *st, |
1542 | | struct reftable_log_expiry_config *config) |
1543 | 0 | { |
1544 | 0 | size_t last = st->merged->tables_len ? st->merged->tables_len - 1 : 0; |
1545 | 0 | return stack_compact_range(st, 0, last, config, 0); |
1546 | 0 | } |
1547 | | |
1548 | | static int segment_size(struct segment *s) |
1549 | 0 | { |
1550 | 0 | return s->end - s->start; |
1551 | 0 | } |
1552 | | |
1553 | | struct segment suggest_compaction_segment(uint64_t *sizes, size_t n, |
1554 | | uint8_t factor) |
1555 | 0 | { |
1556 | 0 | struct segment seg = { 0 }; |
1557 | 0 | uint64_t bytes; |
1558 | 0 | size_t i; |
1559 | |
|
1560 | 0 | if (!factor) |
1561 | 0 | factor = DEFAULT_GEOMETRIC_FACTOR; |
1562 | | |
1563 | | /* |
1564 | | * If there are no tables or only a single one then we don't have to |
1565 | | * compact anything. The sequence is geometric by definition already. |
1566 | | */ |
1567 | 0 | if (n <= 1) |
1568 | 0 | return seg; |
1569 | | |
1570 | | /* |
1571 | | * Find the ending table of the compaction segment needed to restore the |
1572 | | * geometric sequence. Note that the segment end is exclusive. |
1573 | | * |
1574 | | * To do so, we iterate backwards starting from the most recent table |
1575 | | * until a valid segment end is found. If the preceding table is smaller |
1576 | | * than the current table multiplied by the geometric factor (2), the |
1577 | | * compaction segment end has been identified. |
1578 | | * |
1579 | | * Tables after the ending point are not added to the byte count because |
1580 | | * they are already valid members of the geometric sequence. Due to the |
1581 | | * properties of a geometric sequence, it is not possible for the sum of |
1582 | | * these tables to exceed the value of the ending point table. |
1583 | | * |
1584 | | * Example table size sequence requiring no compaction: |
1585 | | * 64, 32, 16, 8, 4, 2, 1 |
1586 | | * |
1587 | | * Example table size sequence where compaction segment end is set to |
1588 | | * the last table. Since the segment end is exclusive, the last table is |
1589 | | * excluded during subsequent compaction and the table with size 3 is |
1590 | | * the final table included: |
1591 | | * 64, 32, 16, 8, 4, 3, 1 |
1592 | | */ |
1593 | 0 | for (i = n - 1; i > 0; i--) { |
1594 | 0 | if (sizes[i - 1] < sizes[i] * factor) { |
1595 | 0 | seg.end = i + 1; |
1596 | 0 | bytes = sizes[i]; |
1597 | 0 | break; |
1598 | 0 | } |
1599 | 0 | } |
1600 | | |
1601 | | /* |
1602 | | * Find the starting table of the compaction segment by iterating |
1603 | | * through the remaining tables and keeping track of the accumulated |
1604 | | * size of all tables seen from the segment end table. The previous |
1605 | | * table is compared to the accumulated size because the tables from the |
1606 | | * segment end are merged backwards recursively. |
1607 | | * |
1608 | | * Note that we keep iterating even after we have found the first |
1609 | | * starting point. This is because there may be tables in the stack |
1610 | | * preceding that first starting point which violate the geometric |
1611 | | * sequence. |
1612 | | * |
1613 | | * Example compaction segment start set to table with size 32: |
1614 | | * 128, 32, 16, 8, 4, 3, 1 |
1615 | | */ |
1616 | 0 | for (; i > 0; i--) { |
1617 | 0 | uint64_t curr = bytes; |
1618 | 0 | bytes += sizes[i - 1]; |
1619 | |
|
1620 | 0 | if (sizes[i - 1] < curr * factor) { |
1621 | 0 | seg.start = i - 1; |
1622 | 0 | seg.bytes = bytes; |
1623 | 0 | } |
1624 | 0 | } |
1625 | |
|
1626 | 0 | return seg; |
1627 | 0 | } |
1628 | | |
1629 | | static int stack_segments_for_compaction(struct reftable_stack *st, |
1630 | | struct segment *seg) |
1631 | 0 | { |
1632 | 0 | int version = (st->opts.hash_id == REFTABLE_HASH_SHA1) ? 1 : 2; |
1633 | 0 | int overhead = header_size(version) - 1; |
1634 | 0 | uint64_t *sizes; |
1635 | |
|
1636 | 0 | REFTABLE_CALLOC_ARRAY(sizes, st->merged->tables_len); |
1637 | 0 | if (!sizes) |
1638 | 0 | return REFTABLE_OUT_OF_MEMORY_ERROR; |
1639 | | |
1640 | 0 | for (size_t i = 0; i < st->merged->tables_len; i++) |
1641 | 0 | sizes[i] = st->tables[i]->size - overhead; |
1642 | |
|
1643 | 0 | *seg = suggest_compaction_segment(sizes, st->merged->tables_len, |
1644 | 0 | st->opts.auto_compaction_factor); |
1645 | 0 | reftable_free(sizes); |
1646 | |
|
1647 | 0 | return 0; |
1648 | 0 | } |
1649 | | |
1650 | | static int update_segment_if_compaction_required(struct reftable_stack *st, |
1651 | | struct segment *seg, |
1652 | | bool use_geometric, |
1653 | | bool *required) |
1654 | 0 | { |
1655 | 0 | int err; |
1656 | |
|
1657 | 0 | if (st->merged->tables_len < 2) { |
1658 | 0 | *required = false; |
1659 | 0 | return 0; |
1660 | 0 | } |
1661 | | |
1662 | 0 | if (!use_geometric) { |
1663 | 0 | *required = true; |
1664 | 0 | return 0; |
1665 | 0 | } |
1666 | | |
1667 | 0 | err = stack_segments_for_compaction(st, seg); |
1668 | 0 | if (err) |
1669 | 0 | return err; |
1670 | | |
1671 | 0 | *required = segment_size(seg) > 0; |
1672 | 0 | return 0; |
1673 | 0 | } |
1674 | | |
1675 | | int reftable_stack_compaction_required(struct reftable_stack *st, |
1676 | | bool use_heuristics, |
1677 | | bool *required) |
1678 | 0 | { |
1679 | 0 | struct segment seg; |
1680 | 0 | return update_segment_if_compaction_required(st, &seg, use_heuristics, |
1681 | 0 | required); |
1682 | 0 | } |
1683 | | |
1684 | | int reftable_stack_auto_compact(struct reftable_stack *st) |
1685 | 0 | { |
1686 | 0 | struct segment seg; |
1687 | 0 | bool required; |
1688 | 0 | int err; |
1689 | |
|
1690 | 0 | err = update_segment_if_compaction_required(st, &seg, true, &required); |
1691 | 0 | if (err) |
1692 | 0 | return err; |
1693 | | |
1694 | 0 | if (required) |
1695 | 0 | return stack_compact_range(st, seg.start, seg.end - 1, |
1696 | 0 | NULL, STACK_COMPACT_RANGE_BEST_EFFORT); |
1697 | | |
1698 | 0 | return 0; |
1699 | 0 | } |
1700 | | |
1701 | | struct reftable_compaction_stats * |
1702 | | reftable_stack_compaction_stats(struct reftable_stack *st) |
1703 | 0 | { |
1704 | 0 | return &st->stats; |
1705 | 0 | } |
1706 | | |
1707 | | int reftable_stack_read_ref(struct reftable_stack *st, const char *refname, |
1708 | | struct reftable_ref_record *ref) |
1709 | 0 | { |
1710 | 0 | struct reftable_iterator it = { 0 }; |
1711 | 0 | int ret; |
1712 | |
|
1713 | 0 | ret = reftable_merged_table_init_ref_iterator(st->merged, &it); |
1714 | 0 | if (ret) |
1715 | 0 | goto out; |
1716 | | |
1717 | 0 | ret = reftable_iterator_seek_ref(&it, refname); |
1718 | 0 | if (ret) |
1719 | 0 | goto out; |
1720 | | |
1721 | 0 | ret = reftable_iterator_next_ref(&it, ref); |
1722 | 0 | if (ret) |
1723 | 0 | goto out; |
1724 | | |
1725 | 0 | if (strcmp(ref->refname, refname) || |
1726 | 0 | reftable_ref_record_is_deletion(ref)) { |
1727 | 0 | reftable_ref_record_release(ref); |
1728 | 0 | ret = 1; |
1729 | 0 | goto out; |
1730 | 0 | } |
1731 | | |
1732 | 0 | out: |
1733 | 0 | reftable_iterator_destroy(&it); |
1734 | 0 | return ret; |
1735 | 0 | } |
1736 | | |
1737 | | int reftable_stack_read_log(struct reftable_stack *st, const char *refname, |
1738 | | struct reftable_log_record *log) |
1739 | 0 | { |
1740 | 0 | struct reftable_iterator it = {0}; |
1741 | 0 | int err; |
1742 | |
|
1743 | 0 | err = reftable_stack_init_log_iterator(st, &it); |
1744 | 0 | if (err) |
1745 | 0 | goto done; |
1746 | | |
1747 | 0 | err = reftable_iterator_seek_log(&it, refname); |
1748 | 0 | if (err) |
1749 | 0 | goto done; |
1750 | | |
1751 | 0 | err = reftable_iterator_next_log(&it, log); |
1752 | 0 | if (err) |
1753 | 0 | goto done; |
1754 | | |
1755 | 0 | if (strcmp(log->refname, refname) || |
1756 | 0 | reftable_log_record_is_deletion(log)) { |
1757 | 0 | err = 1; |
1758 | 0 | goto done; |
1759 | 0 | } |
1760 | | |
1761 | 0 | done: |
1762 | 0 | if (err) { |
1763 | 0 | reftable_log_record_release(log); |
1764 | 0 | } |
1765 | 0 | reftable_iterator_destroy(&it); |
1766 | 0 | return err; |
1767 | 0 | } |
1768 | | |
1769 | | static int is_table_name(const char *s) |
1770 | 0 | { |
1771 | 0 | const char *dot = strrchr(s, '.'); |
1772 | 0 | return dot && !strcmp(dot, ".ref"); |
1773 | 0 | } |
1774 | | |
1775 | | static void remove_maybe_stale_table(struct reftable_stack *st, uint64_t max, |
1776 | | const char *name) |
1777 | 0 | { |
1778 | 0 | int err = 0; |
1779 | 0 | uint64_t update_idx = 0; |
1780 | 0 | struct reftable_block_source src = { NULL }; |
1781 | 0 | struct reftable_table *table = NULL; |
1782 | 0 | struct reftable_buf table_path = REFTABLE_BUF_INIT; |
1783 | |
|
1784 | 0 | err = stack_filename(&table_path, st, name); |
1785 | 0 | if (err < 0) |
1786 | 0 | goto done; |
1787 | | |
1788 | 0 | err = reftable_block_source_from_file(&src, table_path.buf); |
1789 | 0 | if (err < 0) |
1790 | 0 | goto done; |
1791 | | |
1792 | 0 | err = reftable_table_new(&table, &src, name); |
1793 | 0 | if (err < 0) |
1794 | 0 | goto done; |
1795 | | |
1796 | 0 | update_idx = reftable_table_max_update_index(table); |
1797 | 0 | reftable_table_decref(table); |
1798 | |
|
1799 | 0 | if (update_idx <= max) { |
1800 | 0 | unlink(table_path.buf); |
1801 | 0 | } |
1802 | 0 | done: |
1803 | 0 | reftable_buf_release(&table_path); |
1804 | 0 | } |
1805 | | |
1806 | | static int reftable_stack_clean_locked(struct reftable_stack *st) |
1807 | 0 | { |
1808 | 0 | uint64_t max = reftable_merged_table_max_update_index( |
1809 | 0 | reftable_stack_merged_table(st)); |
1810 | 0 | DIR *dir = opendir(st->reftable_dir); |
1811 | 0 | struct dirent *d = NULL; |
1812 | 0 | if (!dir) { |
1813 | 0 | return REFTABLE_IO_ERROR; |
1814 | 0 | } |
1815 | | |
1816 | 0 | while ((d = readdir(dir))) { |
1817 | 0 | int found = 0; |
1818 | 0 | if (!is_table_name(d->d_name)) |
1819 | 0 | continue; |
1820 | | |
1821 | 0 | for (size_t i = 0; !found && i < st->tables_len; i++) |
1822 | 0 | found = !strcmp(reftable_table_name(st->tables[i]), d->d_name); |
1823 | 0 | if (found) |
1824 | 0 | continue; |
1825 | | |
1826 | 0 | remove_maybe_stale_table(st, max, d->d_name); |
1827 | 0 | } |
1828 | |
|
1829 | 0 | closedir(dir); |
1830 | 0 | return 0; |
1831 | 0 | } |
1832 | | |
1833 | | int reftable_stack_clean(struct reftable_stack *st) |
1834 | 0 | { |
1835 | 0 | struct reftable_addition *add = NULL; |
1836 | 0 | int err = reftable_stack_new_addition(&add, st, 0); |
1837 | 0 | if (err < 0) { |
1838 | 0 | goto done; |
1839 | 0 | } |
1840 | | |
1841 | 0 | err = reftable_stack_reload(st); |
1842 | 0 | if (err < 0) { |
1843 | 0 | goto done; |
1844 | 0 | } |
1845 | | |
1846 | 0 | err = reftable_stack_clean_locked(st); |
1847 | |
|
1848 | 0 | done: |
1849 | 0 | reftable_addition_destroy(add); |
1850 | 0 | return err; |
1851 | 0 | } |
1852 | | |
1853 | | enum reftable_hash reftable_stack_hash_id(struct reftable_stack *st) |
1854 | 0 | { |
1855 | 0 | return reftable_merged_table_hash_id(st->merged); |
1856 | 0 | } |