/src/git/reftable/writer.c
Line | Count | Source |
1 | | /* |
2 | | * Copyright 2020 Google LLC |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style |
5 | | * license that can be found in the LICENSE file or at |
6 | | * https://developers.google.com/open-source/licenses/bsd |
7 | | */ |
8 | | |
9 | | #include "writer.h" |
10 | | |
11 | | #include "system.h" |
12 | | |
13 | | #include "block.h" |
14 | | #include "constants.h" |
15 | | #include "record.h" |
16 | | #include "tree.h" |
17 | | #include "reftable-error.h" |
18 | | |
19 | | /* finishes a block, and writes it to storage */ |
20 | | static int writer_flush_block(struct reftable_writer *w); |
21 | | |
22 | | /* deallocates memory related to the index */ |
23 | | static void writer_clear_index(struct reftable_writer *w); |
24 | | |
25 | | /* finishes writing a 'r' (refs) or 'g' (reflogs) section */ |
26 | | static int writer_finish_public_section(struct reftable_writer *w); |
27 | | |
28 | | static struct reftable_block_stats * |
29 | | writer_reftable_block_stats(struct reftable_writer *w, uint8_t typ) |
30 | 0 | { |
31 | 0 | switch (typ) { |
32 | 0 | case 'r': |
33 | 0 | return &w->stats.ref_stats; |
34 | 0 | case 'o': |
35 | 0 | return &w->stats.obj_stats; |
36 | 0 | case 'i': |
37 | 0 | return &w->stats.idx_stats; |
38 | 0 | case 'g': |
39 | 0 | return &w->stats.log_stats; |
40 | 0 | } |
41 | 0 | abort(); |
42 | 0 | return NULL; |
43 | 0 | } |
44 | | |
45 | | /* write data, queuing the padding for the next write. Returns negative for |
46 | | * error. */ |
47 | | static int padded_write(struct reftable_writer *w, uint8_t *data, size_t len, |
48 | | int padding) |
49 | 0 | { |
50 | 0 | int n = 0; |
51 | 0 | if (w->pending_padding > 0) { |
52 | 0 | uint8_t *zeroed; |
53 | 0 | int n; |
54 | |
|
55 | 0 | zeroed = reftable_calloc(w->pending_padding, sizeof(*zeroed)); |
56 | 0 | if (!zeroed) |
57 | 0 | return -1; |
58 | | |
59 | 0 | n = w->write(w->write_arg, zeroed, w->pending_padding); |
60 | 0 | if (n < 0) { |
61 | 0 | reftable_free(zeroed); |
62 | 0 | return n; |
63 | 0 | } |
64 | | |
65 | 0 | w->pending_padding = 0; |
66 | 0 | reftable_free(zeroed); |
67 | 0 | } |
68 | | |
69 | 0 | w->pending_padding = padding; |
70 | 0 | n = w->write(w->write_arg, data, len); |
71 | 0 | if (n < 0) |
72 | 0 | return n; |
73 | 0 | n += padding; |
74 | 0 | return 0; |
75 | 0 | } |
76 | | |
77 | | static void options_set_defaults(struct reftable_write_options *opts) |
78 | 0 | { |
79 | 0 | if (opts->restart_interval == 0) { |
80 | 0 | opts->restart_interval = 16; |
81 | 0 | } |
82 | |
|
83 | 0 | if (opts->hash_id == 0) { |
84 | 0 | opts->hash_id = REFTABLE_HASH_SHA1; |
85 | 0 | } |
86 | 0 | if (opts->block_size == 0) { |
87 | 0 | opts->block_size = DEFAULT_BLOCK_SIZE; |
88 | 0 | } |
89 | 0 | } |
90 | | |
91 | | static int writer_version(struct reftable_writer *w) |
92 | 0 | { |
93 | 0 | return (w->opts.hash_id == 0 || w->opts.hash_id == REFTABLE_HASH_SHA1) ? |
94 | 0 | 1 : |
95 | 0 | 2; |
96 | 0 | } |
97 | | |
98 | | static int writer_write_header(struct reftable_writer *w, uint8_t *dest) |
99 | 0 | { |
100 | 0 | memcpy(dest, "REFT", 4); |
101 | |
|
102 | 0 | dest[4] = writer_version(w); |
103 | |
|
104 | 0 | reftable_put_be24(dest + 5, w->opts.block_size); |
105 | 0 | reftable_put_be64(dest + 8, w->min_update_index); |
106 | 0 | reftable_put_be64(dest + 16, w->max_update_index); |
107 | 0 | if (writer_version(w) == 2) { |
108 | 0 | uint32_t hash_id; |
109 | |
|
110 | 0 | switch (w->opts.hash_id) { |
111 | 0 | case REFTABLE_HASH_SHA1: |
112 | 0 | hash_id = REFTABLE_FORMAT_ID_SHA1; |
113 | 0 | break; |
114 | 0 | case REFTABLE_HASH_SHA256: |
115 | 0 | hash_id = REFTABLE_FORMAT_ID_SHA256; |
116 | 0 | break; |
117 | 0 | default: |
118 | 0 | return -1; |
119 | 0 | } |
120 | | |
121 | 0 | reftable_put_be32(dest + 24, hash_id); |
122 | 0 | } |
123 | | |
124 | 0 | return header_size(writer_version(w)); |
125 | 0 | } |
126 | | |
127 | | static int writer_reinit_block_writer(struct reftable_writer *w, uint8_t typ) |
128 | 0 | { |
129 | 0 | int block_start = 0, ret; |
130 | |
|
131 | 0 | if (w->next == 0) |
132 | 0 | block_start = header_size(writer_version(w)); |
133 | |
|
134 | 0 | reftable_buf_reset(&w->last_key); |
135 | 0 | ret = block_writer_init(&w->block_writer_data, typ, w->block, |
136 | 0 | w->opts.block_size, block_start, |
137 | 0 | hash_size(w->opts.hash_id)); |
138 | 0 | if (ret < 0) |
139 | 0 | return ret; |
140 | | |
141 | 0 | w->block_writer = &w->block_writer_data; |
142 | 0 | w->block_writer->restart_interval = w->opts.restart_interval; |
143 | |
|
144 | 0 | return 0; |
145 | 0 | } |
146 | | |
147 | | int reftable_writer_new(struct reftable_writer **out, |
148 | | ssize_t (*writer_func)(void *, const void *, size_t), |
149 | | int (*flush_func)(void *), |
150 | | void *writer_arg, const struct reftable_write_options *_opts) |
151 | 0 | { |
152 | 0 | struct reftable_write_options opts = {0}; |
153 | 0 | struct reftable_writer *wp; |
154 | |
|
155 | 0 | wp = reftable_calloc(1, sizeof(*wp)); |
156 | 0 | if (!wp) |
157 | 0 | return REFTABLE_OUT_OF_MEMORY_ERROR; |
158 | | |
159 | 0 | if (_opts) |
160 | 0 | opts = *_opts; |
161 | 0 | options_set_defaults(&opts); |
162 | 0 | if (opts.block_size >= (1 << 24)) |
163 | 0 | return REFTABLE_API_ERROR; |
164 | | |
165 | 0 | reftable_buf_init(&wp->block_writer_data.last_key); |
166 | 0 | reftable_buf_init(&wp->last_key); |
167 | 0 | reftable_buf_init(&wp->scratch); |
168 | 0 | REFTABLE_CALLOC_ARRAY(wp->block, opts.block_size); |
169 | 0 | if (!wp->block) { |
170 | 0 | reftable_free(wp); |
171 | 0 | return REFTABLE_OUT_OF_MEMORY_ERROR; |
172 | 0 | } |
173 | 0 | wp->write = writer_func; |
174 | 0 | wp->write_arg = writer_arg; |
175 | 0 | wp->opts = opts; |
176 | 0 | wp->flush = flush_func; |
177 | 0 | writer_reinit_block_writer(wp, REFTABLE_BLOCK_TYPE_REF); |
178 | |
|
179 | 0 | *out = wp; |
180 | |
|
181 | 0 | return 0; |
182 | 0 | } |
183 | | |
184 | | int reftable_writer_set_limits(struct reftable_writer *w, uint64_t min, |
185 | | uint64_t max) |
186 | 0 | { |
187 | | /* |
188 | | * Set the min/max update index limits for the reftable writer. |
189 | | * This must be called before adding any records, since: |
190 | | * - The 'next' field gets set after writing the first block. |
191 | | * - The 'last_key' field updates with each new record (but resets |
192 | | * after sections). |
193 | | * Returns REFTABLE_API_ERROR if called after writing has begun. |
194 | | */ |
195 | 0 | if (w->next || w->last_key.len) |
196 | 0 | return REFTABLE_API_ERROR; |
197 | | |
198 | 0 | w->min_update_index = min; |
199 | 0 | w->max_update_index = max; |
200 | |
|
201 | 0 | return 0; |
202 | 0 | } |
203 | | |
204 | | static void writer_release(struct reftable_writer *w) |
205 | 0 | { |
206 | 0 | if (w) { |
207 | 0 | reftable_free(w->block); |
208 | 0 | w->block = NULL; |
209 | 0 | block_writer_release(&w->block_writer_data); |
210 | 0 | w->block_writer = NULL; |
211 | 0 | writer_clear_index(w); |
212 | 0 | reftable_buf_release(&w->last_key); |
213 | 0 | reftable_buf_release(&w->scratch); |
214 | 0 | } |
215 | 0 | } |
216 | | |
217 | | void reftable_writer_free(struct reftable_writer *w) |
218 | 0 | { |
219 | 0 | writer_release(w); |
220 | 0 | reftable_free(w); |
221 | 0 | } |
222 | | |
223 | | struct obj_index_tree_node { |
224 | | struct reftable_buf hash; |
225 | | uint64_t *offsets; |
226 | | size_t offset_len; |
227 | | size_t offset_cap; |
228 | | }; |
229 | | |
230 | | #define OBJ_INDEX_TREE_NODE_INIT \ |
231 | 0 | { \ |
232 | 0 | .hash = REFTABLE_BUF_INIT \ |
233 | 0 | } |
234 | | |
235 | | static int obj_index_tree_node_compare(const void *a, const void *b) |
236 | 0 | { |
237 | 0 | return reftable_buf_cmp(&((const struct obj_index_tree_node *)a)->hash, |
238 | 0 | &((const struct obj_index_tree_node *)b)->hash); |
239 | 0 | } |
240 | | |
241 | | static int writer_index_hash(struct reftable_writer *w, struct reftable_buf *hash) |
242 | 0 | { |
243 | 0 | uint64_t off = w->next; |
244 | 0 | struct obj_index_tree_node want = { .hash = *hash }; |
245 | 0 | struct obj_index_tree_node *key; |
246 | 0 | struct tree_node *node; |
247 | |
|
248 | 0 | node = tree_search(w->obj_index_tree, &want, &obj_index_tree_node_compare); |
249 | 0 | if (!node) { |
250 | 0 | struct obj_index_tree_node empty = OBJ_INDEX_TREE_NODE_INIT; |
251 | 0 | int err; |
252 | |
|
253 | 0 | key = reftable_malloc(sizeof(*key)); |
254 | 0 | if (!key) |
255 | 0 | return REFTABLE_OUT_OF_MEMORY_ERROR; |
256 | | |
257 | 0 | *key = empty; |
258 | |
|
259 | 0 | reftable_buf_reset(&key->hash); |
260 | 0 | err = reftable_buf_add(&key->hash, hash->buf, hash->len); |
261 | 0 | if (err < 0) { |
262 | 0 | reftable_free(key); |
263 | 0 | return err; |
264 | 0 | } |
265 | 0 | tree_insert(&w->obj_index_tree, key, |
266 | 0 | &obj_index_tree_node_compare); |
267 | 0 | } else { |
268 | 0 | key = node->key; |
269 | 0 | } |
270 | | |
271 | 0 | if (key->offset_len > 0 && key->offsets[key->offset_len - 1] == off) |
272 | 0 | return 0; |
273 | | |
274 | 0 | REFTABLE_ALLOC_GROW_OR_NULL(key->offsets, key->offset_len + 1, |
275 | 0 | key->offset_cap); |
276 | 0 | if (!key->offsets) |
277 | 0 | return REFTABLE_OUT_OF_MEMORY_ERROR; |
278 | 0 | key->offsets[key->offset_len++] = off; |
279 | |
|
280 | 0 | return 0; |
281 | 0 | } |
282 | | |
283 | | static int writer_add_record(struct reftable_writer *w, |
284 | | struct reftable_record *rec) |
285 | 0 | { |
286 | 0 | int err; |
287 | |
|
288 | 0 | err = reftable_record_key(rec, &w->scratch); |
289 | 0 | if (err < 0) |
290 | 0 | goto done; |
291 | | |
292 | 0 | if (reftable_buf_cmp(&w->last_key, &w->scratch) >= 0) { |
293 | 0 | err = REFTABLE_API_ERROR; |
294 | 0 | goto done; |
295 | 0 | } |
296 | | |
297 | 0 | reftable_buf_reset(&w->last_key); |
298 | 0 | err = reftable_buf_add(&w->last_key, w->scratch.buf, w->scratch.len); |
299 | 0 | if (err < 0) |
300 | 0 | goto done; |
301 | | |
302 | 0 | if (!w->block_writer) { |
303 | 0 | err = writer_reinit_block_writer(w, reftable_record_type(rec)); |
304 | 0 | if (err < 0) |
305 | 0 | goto done; |
306 | 0 | } |
307 | | |
308 | 0 | if (block_writer_type(w->block_writer) != reftable_record_type(rec)) |
309 | 0 | return REFTABLE_API_ERROR; |
310 | | |
311 | | /* |
312 | | * Try to add the record to the writer. If this succeeds then we're |
313 | | * done. Otherwise the block writer may have hit the block size limit |
314 | | * and needs to be flushed. |
315 | | */ |
316 | 0 | err = block_writer_add(w->block_writer, rec); |
317 | 0 | if (err == 0) |
318 | 0 | goto done; |
319 | | |
320 | 0 | if (err != REFTABLE_ENTRY_TOO_BIG_ERROR) |
321 | 0 | goto done; |
322 | | /* |
323 | | * The current block is full, so we need to flush and reinitialize the |
324 | | * writer to start writing the next block. |
325 | | */ |
326 | 0 | err = writer_flush_block(w); |
327 | 0 | if (err < 0) |
328 | 0 | goto done; |
329 | 0 | err = writer_reinit_block_writer(w, reftable_record_type(rec)); |
330 | 0 | if (err < 0) |
331 | 0 | goto done; |
332 | | |
333 | | /* |
334 | | * Try to add the record to the writer again. If this still fails then |
335 | | * the record does not fit into the block size. |
336 | | */ |
337 | 0 | err = block_writer_add(w->block_writer, rec); |
338 | 0 | if (err) |
339 | 0 | goto done; |
340 | | |
341 | 0 | done: |
342 | 0 | return err; |
343 | 0 | } |
344 | | |
345 | | int reftable_writer_add_ref(struct reftable_writer *w, |
346 | | struct reftable_ref_record *ref) |
347 | 0 | { |
348 | 0 | struct reftable_record rec = { |
349 | 0 | .type = REFTABLE_BLOCK_TYPE_REF, |
350 | 0 | .u = { |
351 | 0 | .ref = *ref |
352 | 0 | }, |
353 | 0 | }; |
354 | 0 | int err; |
355 | |
|
356 | 0 | if (!ref->refname || |
357 | 0 | ref->update_index < w->min_update_index || |
358 | 0 | ref->update_index > w->max_update_index) |
359 | 0 | return REFTABLE_API_ERROR; |
360 | | |
361 | 0 | rec.u.ref.update_index -= w->min_update_index; |
362 | |
|
363 | 0 | err = writer_add_record(w, &rec); |
364 | 0 | if (err < 0) |
365 | 0 | goto out; |
366 | | |
367 | 0 | if (!w->opts.skip_index_objects && reftable_ref_record_val1(ref)) { |
368 | 0 | reftable_buf_reset(&w->scratch); |
369 | 0 | err = reftable_buf_add(&w->scratch, (char *)reftable_ref_record_val1(ref), |
370 | 0 | hash_size(w->opts.hash_id)); |
371 | 0 | if (err < 0) |
372 | 0 | goto out; |
373 | | |
374 | 0 | err = writer_index_hash(w, &w->scratch); |
375 | 0 | if (err < 0) |
376 | 0 | goto out; |
377 | 0 | } |
378 | | |
379 | 0 | if (!w->opts.skip_index_objects && reftable_ref_record_val2(ref)) { |
380 | 0 | reftable_buf_reset(&w->scratch); |
381 | 0 | err = reftable_buf_add(&w->scratch, reftable_ref_record_val2(ref), |
382 | 0 | hash_size(w->opts.hash_id)); |
383 | 0 | if (err < 0) |
384 | 0 | goto out; |
385 | | |
386 | 0 | err = writer_index_hash(w, &w->scratch); |
387 | 0 | if (err < 0) |
388 | 0 | goto out; |
389 | 0 | } |
390 | | |
391 | 0 | err = 0; |
392 | |
|
393 | 0 | out: |
394 | 0 | return err; |
395 | 0 | } |
396 | | |
397 | | int reftable_writer_add_refs(struct reftable_writer *w, |
398 | | struct reftable_ref_record *refs, size_t n) |
399 | 0 | { |
400 | 0 | int err = 0; |
401 | |
|
402 | 0 | if (n) |
403 | 0 | qsort(refs, n, sizeof(*refs), reftable_ref_record_compare_name); |
404 | |
|
405 | 0 | for (size_t i = 0; err == 0 && i < n; i++) |
406 | 0 | err = reftable_writer_add_ref(w, &refs[i]); |
407 | |
|
408 | 0 | return err; |
409 | 0 | } |
410 | | |
411 | | static int reftable_writer_add_log_verbatim(struct reftable_writer *w, |
412 | | struct reftable_log_record *log) |
413 | 0 | { |
414 | 0 | struct reftable_record rec = { |
415 | 0 | .type = REFTABLE_BLOCK_TYPE_LOG, |
416 | 0 | .u = { |
417 | 0 | .log = *log, |
418 | 0 | }, |
419 | 0 | }; |
420 | 0 | if (w->block_writer && |
421 | 0 | block_writer_type(w->block_writer) == REFTABLE_BLOCK_TYPE_REF) { |
422 | 0 | int err = writer_finish_public_section(w); |
423 | 0 | if (err < 0) |
424 | 0 | return err; |
425 | 0 | } |
426 | | |
427 | 0 | w->next -= w->pending_padding; |
428 | 0 | w->pending_padding = 0; |
429 | 0 | return writer_add_record(w, &rec); |
430 | 0 | } |
431 | | |
432 | | int reftable_writer_add_log(struct reftable_writer *w, |
433 | | struct reftable_log_record *log) |
434 | 0 | { |
435 | 0 | char *input_log_message = NULL; |
436 | 0 | struct reftable_buf cleaned_message = REFTABLE_BUF_INIT; |
437 | 0 | int err = 0; |
438 | |
|
439 | 0 | if (log->value_type == REFTABLE_LOG_DELETION) |
440 | 0 | return reftable_writer_add_log_verbatim(w, log); |
441 | | |
442 | | /* |
443 | | * Verify only the upper limit of the update_index. Each reflog entry |
444 | | * is tied to a specific update_index. Entries in the reflog can be |
445 | | * replaced by adding a new entry with the same update_index, |
446 | | * effectively canceling the old one. |
447 | | * |
448 | | * Consequently, reflog updates may include update_index values lower |
449 | | * than the writer's min_update_index. |
450 | | */ |
451 | 0 | if (log->update_index > w->max_update_index) |
452 | 0 | return REFTABLE_API_ERROR; |
453 | | |
454 | 0 | if (!log->refname) |
455 | 0 | return REFTABLE_API_ERROR; |
456 | | |
457 | 0 | input_log_message = log->value.update.message; |
458 | 0 | if (!w->opts.exact_log_message && log->value.update.message) { |
459 | 0 | err = reftable_buf_addstr(&cleaned_message, log->value.update.message); |
460 | 0 | if (err < 0) |
461 | 0 | goto done; |
462 | | |
463 | 0 | while (cleaned_message.len && |
464 | 0 | cleaned_message.buf[cleaned_message.len - 1] == '\n') { |
465 | 0 | err = reftable_buf_setlen(&cleaned_message, |
466 | 0 | cleaned_message.len - 1); |
467 | 0 | if (err < 0) |
468 | 0 | goto done; |
469 | 0 | } |
470 | 0 | if (strchr(cleaned_message.buf, '\n')) { |
471 | | /* multiple lines not allowed. */ |
472 | 0 | err = REFTABLE_API_ERROR; |
473 | 0 | goto done; |
474 | 0 | } |
475 | | |
476 | 0 | err = reftable_buf_addstr(&cleaned_message, "\n"); |
477 | 0 | if (err < 0) |
478 | 0 | goto done; |
479 | | |
480 | 0 | log->value.update.message = cleaned_message.buf; |
481 | 0 | } |
482 | | |
483 | 0 | err = reftable_writer_add_log_verbatim(w, log); |
484 | 0 | log->value.update.message = input_log_message; |
485 | 0 | done: |
486 | 0 | reftable_buf_release(&cleaned_message); |
487 | 0 | return err; |
488 | 0 | } |
489 | | |
490 | | int reftable_writer_add_logs(struct reftable_writer *w, |
491 | | struct reftable_log_record *logs, size_t n) |
492 | 0 | { |
493 | 0 | int err = 0; |
494 | |
|
495 | 0 | if (n) |
496 | 0 | qsort(logs, n, sizeof(*logs), reftable_log_record_compare_key); |
497 | |
|
498 | 0 | for (size_t i = 0; err == 0 && i < n; i++) |
499 | 0 | err = reftable_writer_add_log(w, &logs[i]); |
500 | |
|
501 | 0 | return err; |
502 | 0 | } |
503 | | |
504 | | static int writer_finish_section(struct reftable_writer *w) |
505 | 0 | { |
506 | 0 | struct reftable_block_stats *bstats = NULL; |
507 | 0 | uint8_t typ = block_writer_type(w->block_writer); |
508 | 0 | uint64_t index_start = 0; |
509 | 0 | int max_level = 0; |
510 | 0 | size_t threshold = w->opts.unpadded ? 1 : 3; |
511 | 0 | int before_blocks = w->stats.idx_stats.blocks; |
512 | 0 | int err; |
513 | |
|
514 | 0 | err = writer_flush_block(w); |
515 | 0 | if (err < 0) |
516 | 0 | return err; |
517 | | |
518 | | /* |
519 | | * When the section we are about to index has a lot of blocks then the |
520 | | * index itself may span across multiple blocks, as well. This would |
521 | | * require a linear scan over index blocks only to find the desired |
522 | | * indexed block, which is inefficient. Instead, we write a multi-level |
523 | | * index where index records of level N+1 will refer to index blocks of |
524 | | * level N. This isn't constant time, either, but at least logarithmic. |
525 | | * |
526 | | * This loop handles writing this multi-level index. Note that we write |
527 | | * the lowest-level index pointing to the indexed blocks first. We then |
528 | | * continue writing additional index levels until the current level has |
529 | | * less blocks than the threshold so that the highest level will be at |
530 | | * the end of the index section. |
531 | | * |
532 | | * Readers are thus required to start reading the index section from |
533 | | * its end, which is why we set `index_start` to the beginning of the |
534 | | * last index section. |
535 | | */ |
536 | 0 | while (w->index_len > threshold) { |
537 | 0 | struct reftable_index_record *idx = NULL; |
538 | 0 | size_t i, idx_len; |
539 | |
|
540 | 0 | max_level++; |
541 | 0 | index_start = w->next; |
542 | 0 | err = writer_reinit_block_writer(w, REFTABLE_BLOCK_TYPE_INDEX); |
543 | 0 | if (err < 0) |
544 | 0 | return err; |
545 | | |
546 | 0 | idx = w->index; |
547 | 0 | idx_len = w->index_len; |
548 | |
|
549 | 0 | w->index = NULL; |
550 | 0 | w->index_len = 0; |
551 | 0 | w->index_cap = 0; |
552 | 0 | for (i = 0; i < idx_len; i++) { |
553 | 0 | struct reftable_record rec = { |
554 | 0 | .type = REFTABLE_BLOCK_TYPE_INDEX, |
555 | 0 | .u = { |
556 | 0 | .idx = idx[i], |
557 | 0 | }, |
558 | 0 | }; |
559 | |
|
560 | 0 | err = writer_add_record(w, &rec); |
561 | 0 | if (err < 0) |
562 | 0 | return err; |
563 | 0 | } |
564 | | |
565 | 0 | err = writer_flush_block(w); |
566 | 0 | if (err < 0) |
567 | 0 | return err; |
568 | | |
569 | 0 | for (i = 0; i < idx_len; i++) |
570 | 0 | reftable_buf_release(&idx[i].last_key); |
571 | 0 | reftable_free(idx); |
572 | 0 | } |
573 | | |
574 | | /* |
575 | | * The index may still contain a number of index blocks lower than the |
576 | | * threshold. Clear it so that these entries don't leak into the next |
577 | | * index section. |
578 | | */ |
579 | 0 | writer_clear_index(w); |
580 | |
|
581 | 0 | bstats = writer_reftable_block_stats(w, typ); |
582 | 0 | bstats->index_blocks = w->stats.idx_stats.blocks - before_blocks; |
583 | 0 | bstats->index_offset = index_start; |
584 | 0 | bstats->max_index_level = max_level; |
585 | | |
586 | | /* Reinit lastKey, as the next section can start with any key. */ |
587 | 0 | reftable_buf_reset(&w->last_key); |
588 | |
|
589 | 0 | return 0; |
590 | 0 | } |
591 | | |
592 | | struct common_prefix_arg { |
593 | | struct reftable_buf *last; |
594 | | size_t max; |
595 | | }; |
596 | | |
597 | | static void update_common(void *void_arg, void *key) |
598 | 0 | { |
599 | 0 | struct common_prefix_arg *arg = void_arg; |
600 | 0 | struct obj_index_tree_node *entry = key; |
601 | 0 | if (arg->last) { |
602 | 0 | size_t n = common_prefix_size(&entry->hash, arg->last); |
603 | 0 | if (n > arg->max) |
604 | 0 | arg->max = n; |
605 | 0 | } |
606 | 0 | arg->last = &entry->hash; |
607 | 0 | } |
608 | | |
609 | | struct write_record_arg { |
610 | | struct reftable_writer *w; |
611 | | int err; |
612 | | }; |
613 | | |
614 | | static void write_object_record(void *void_arg, void *key) |
615 | 0 | { |
616 | 0 | struct write_record_arg *arg = void_arg; |
617 | 0 | struct obj_index_tree_node *entry = key; |
618 | 0 | struct reftable_record |
619 | 0 | rec = { .type = REFTABLE_BLOCK_TYPE_OBJ, |
620 | 0 | .u.obj = { |
621 | 0 | .hash_prefix = (uint8_t *)entry->hash.buf, |
622 | 0 | .hash_prefix_len = arg->w->stats.object_id_len, |
623 | 0 | .offsets = entry->offsets, |
624 | 0 | .offset_len = entry->offset_len, |
625 | 0 | } }; |
626 | 0 | if (arg->err < 0) |
627 | 0 | goto done; |
628 | | |
629 | | /* |
630 | | * Try to add the record to the writer. If this succeeds then we're |
631 | | * done. Otherwise the block writer may have hit the block size limit |
632 | | * and needs to be flushed. |
633 | | */ |
634 | 0 | arg->err = block_writer_add(arg->w->block_writer, &rec); |
635 | 0 | if (arg->err == 0) |
636 | 0 | goto done; |
637 | | |
638 | 0 | if (arg->err != REFTABLE_ENTRY_TOO_BIG_ERROR) |
639 | 0 | goto done; |
640 | | |
641 | | /* |
642 | | * The current block is full, so we need to flush and reinitialize the |
643 | | * writer to start writing the next block. |
644 | | */ |
645 | 0 | arg->err = writer_flush_block(arg->w); |
646 | 0 | if (arg->err < 0) |
647 | 0 | goto done; |
648 | | |
649 | 0 | arg->err = writer_reinit_block_writer(arg->w, REFTABLE_BLOCK_TYPE_OBJ); |
650 | 0 | if (arg->err < 0) |
651 | 0 | goto done; |
652 | | |
653 | | /* |
654 | | * If this still fails then we may need to reset record's offset |
655 | | * length to reduce the data size to be written. |
656 | | */ |
657 | 0 | arg->err = block_writer_add(arg->w->block_writer, &rec); |
658 | 0 | if (arg->err == 0) |
659 | 0 | goto done; |
660 | | |
661 | 0 | if (arg->err != REFTABLE_ENTRY_TOO_BIG_ERROR) |
662 | 0 | goto done; |
663 | | |
664 | 0 | rec.u.obj.offset_len = 0; |
665 | 0 | arg->err = block_writer_add(arg->w->block_writer, &rec); |
666 | | |
667 | | /* Should be able to write into a fresh block. */ |
668 | 0 | assert(arg->err == 0); |
669 | |
|
670 | 0 | done:; |
671 | 0 | } |
672 | | |
673 | | static void object_record_free(void *void_arg REFTABLE_UNUSED, void *key) |
674 | 0 | { |
675 | 0 | struct obj_index_tree_node *entry = key; |
676 | |
|
677 | 0 | REFTABLE_FREE_AND_NULL(entry->offsets); |
678 | 0 | reftable_buf_release(&entry->hash); |
679 | 0 | reftable_free(entry); |
680 | 0 | } |
681 | | |
682 | | static int writer_dump_object_index(struct reftable_writer *w) |
683 | 0 | { |
684 | 0 | struct write_record_arg closure = { .w = w }; |
685 | 0 | struct common_prefix_arg common = { |
686 | 0 | .max = 1, /* obj_id_len should be >= 2. */ |
687 | 0 | }; |
688 | 0 | int err; |
689 | |
|
690 | 0 | if (w->obj_index_tree) |
691 | 0 | infix_walk(w->obj_index_tree, &update_common, &common); |
692 | 0 | w->stats.object_id_len = common.max + 1; |
693 | |
|
694 | 0 | err = writer_reinit_block_writer(w, REFTABLE_BLOCK_TYPE_OBJ); |
695 | 0 | if (err < 0) |
696 | 0 | return err; |
697 | | |
698 | 0 | if (w->obj_index_tree) |
699 | 0 | infix_walk(w->obj_index_tree, &write_object_record, &closure); |
700 | |
|
701 | 0 | if (closure.err < 0) |
702 | 0 | return closure.err; |
703 | 0 | return writer_finish_section(w); |
704 | 0 | } |
705 | | |
706 | | static int writer_finish_public_section(struct reftable_writer *w) |
707 | 0 | { |
708 | 0 | uint8_t typ = 0; |
709 | 0 | int err = 0; |
710 | |
|
711 | 0 | if (!w->block_writer) |
712 | 0 | return 0; |
713 | | |
714 | 0 | typ = block_writer_type(w->block_writer); |
715 | 0 | err = writer_finish_section(w); |
716 | 0 | if (err < 0) |
717 | 0 | return err; |
718 | 0 | if (typ == REFTABLE_BLOCK_TYPE_REF && !w->opts.skip_index_objects && |
719 | 0 | w->stats.ref_stats.index_blocks > 0) { |
720 | 0 | err = writer_dump_object_index(w); |
721 | 0 | if (err < 0) |
722 | 0 | return err; |
723 | 0 | } |
724 | | |
725 | 0 | if (w->obj_index_tree) { |
726 | 0 | infix_walk(w->obj_index_tree, &object_record_free, NULL); |
727 | 0 | tree_free(w->obj_index_tree); |
728 | 0 | w->obj_index_tree = NULL; |
729 | 0 | } |
730 | |
|
731 | 0 | w->block_writer = NULL; |
732 | 0 | return 0; |
733 | 0 | } |
734 | | |
735 | | int reftable_writer_close(struct reftable_writer *w) |
736 | 0 | { |
737 | 0 | uint8_t footer[72]; |
738 | 0 | uint8_t *p = footer; |
739 | 0 | int err = writer_finish_public_section(w); |
740 | 0 | int empty_table = w->next == 0; |
741 | 0 | if (err != 0) |
742 | 0 | goto done; |
743 | 0 | w->pending_padding = 0; |
744 | 0 | if (empty_table) { |
745 | | /* Empty tables need a header anyway. */ |
746 | 0 | uint8_t header[28]; |
747 | 0 | int n = writer_write_header(w, header); |
748 | 0 | err = padded_write(w, header, n, 0); |
749 | 0 | if (err < 0) |
750 | 0 | goto done; |
751 | 0 | } |
752 | | |
753 | 0 | p += writer_write_header(w, footer); |
754 | 0 | reftable_put_be64(p, w->stats.ref_stats.index_offset); |
755 | 0 | p += 8; |
756 | 0 | reftable_put_be64(p, (w->stats.obj_stats.offset) << 5 | w->stats.object_id_len); |
757 | 0 | p += 8; |
758 | 0 | reftable_put_be64(p, w->stats.obj_stats.index_offset); |
759 | 0 | p += 8; |
760 | |
|
761 | 0 | reftable_put_be64(p, w->stats.log_stats.offset); |
762 | 0 | p += 8; |
763 | 0 | reftable_put_be64(p, w->stats.log_stats.index_offset); |
764 | 0 | p += 8; |
765 | |
|
766 | 0 | reftable_put_be32(p, crc32(0, footer, p - footer)); |
767 | 0 | p += 4; |
768 | |
|
769 | 0 | err = w->flush(w->write_arg); |
770 | 0 | if (err < 0) { |
771 | 0 | err = REFTABLE_IO_ERROR; |
772 | 0 | goto done; |
773 | 0 | } |
774 | | |
775 | 0 | err = padded_write(w, footer, footer_size(writer_version(w)), 0); |
776 | 0 | if (err < 0) |
777 | 0 | goto done; |
778 | | |
779 | 0 | if (empty_table) { |
780 | 0 | err = REFTABLE_EMPTY_TABLE_ERROR; |
781 | 0 | goto done; |
782 | 0 | } |
783 | | |
784 | 0 | done: |
785 | 0 | writer_release(w); |
786 | 0 | return err; |
787 | 0 | } |
788 | | |
789 | | static void writer_clear_index(struct reftable_writer *w) |
790 | 0 | { |
791 | 0 | for (size_t i = 0; w->index && i < w->index_len; i++) |
792 | 0 | reftable_buf_release(&w->index[i].last_key); |
793 | 0 | REFTABLE_FREE_AND_NULL(w->index); |
794 | 0 | w->index_len = 0; |
795 | 0 | w->index_cap = 0; |
796 | 0 | } |
797 | | |
798 | | static int writer_flush_nonempty_block(struct reftable_writer *w) |
799 | 0 | { |
800 | 0 | struct reftable_index_record index_record = { |
801 | 0 | .last_key = REFTABLE_BUF_INIT, |
802 | 0 | }; |
803 | 0 | uint8_t typ = block_writer_type(w->block_writer); |
804 | 0 | struct reftable_block_stats *bstats; |
805 | 0 | int raw_bytes, padding = 0, err; |
806 | 0 | uint64_t block_typ_off; |
807 | | |
808 | | /* |
809 | | * Finish the current block. This will cause the block writer to emit |
810 | | * restart points and potentially compress records in case we are |
811 | | * writing a log block. |
812 | | * |
813 | | * Note that this is still happening in memory. |
814 | | */ |
815 | 0 | raw_bytes = block_writer_finish(w->block_writer); |
816 | 0 | if (raw_bytes < 0) |
817 | 0 | return raw_bytes; |
818 | | |
819 | | /* |
820 | | * By default, all records except for log records are padded to the |
821 | | * block size. |
822 | | */ |
823 | 0 | if (!w->opts.unpadded && typ != REFTABLE_BLOCK_TYPE_LOG) |
824 | 0 | padding = w->opts.block_size - raw_bytes; |
825 | |
|
826 | 0 | bstats = writer_reftable_block_stats(w, typ); |
827 | 0 | block_typ_off = (bstats->blocks == 0) ? w->next : 0; |
828 | 0 | if (block_typ_off > 0) |
829 | 0 | bstats->offset = block_typ_off; |
830 | 0 | bstats->entries += w->block_writer->entries; |
831 | 0 | bstats->restarts += w->block_writer->restart_len; |
832 | 0 | bstats->blocks++; |
833 | 0 | w->stats.blocks++; |
834 | | |
835 | | /* |
836 | | * If this is the first block we're writing to the table then we need |
837 | | * to also write the reftable header. |
838 | | */ |
839 | 0 | if (!w->next) |
840 | 0 | writer_write_header(w, w->block); |
841 | |
|
842 | 0 | err = padded_write(w, w->block, raw_bytes, padding); |
843 | 0 | if (err < 0) |
844 | 0 | return err; |
845 | | |
846 | | /* |
847 | | * Add an index record for every block that we're writing. If we end up |
848 | | * having more than a threshold of index records we will end up writing |
849 | | * an index section in `writer_finish_section()`. Each index record |
850 | | * contains the last record key of the block it is indexing as well as |
851 | | * the offset of that block. |
852 | | * |
853 | | * Note that this also applies when flushing index blocks, in which |
854 | | * case we will end up with a multi-level index. |
855 | | */ |
856 | 0 | REFTABLE_ALLOC_GROW_OR_NULL(w->index, w->index_len + 1, w->index_cap); |
857 | 0 | if (!w->index) |
858 | 0 | return REFTABLE_OUT_OF_MEMORY_ERROR; |
859 | | |
860 | 0 | index_record.offset = w->next; |
861 | 0 | reftable_buf_reset(&index_record.last_key); |
862 | 0 | err = reftable_buf_add(&index_record.last_key, w->block_writer->last_key.buf, |
863 | 0 | w->block_writer->last_key.len); |
864 | 0 | if (err < 0) |
865 | 0 | return err; |
866 | 0 | w->index[w->index_len] = index_record; |
867 | 0 | w->index_len++; |
868 | |
|
869 | 0 | w->next += padding + raw_bytes; |
870 | 0 | w->block_writer = NULL; |
871 | |
|
872 | 0 | return 0; |
873 | 0 | } |
874 | | |
875 | | static int writer_flush_block(struct reftable_writer *w) |
876 | 0 | { |
877 | 0 | if (!w->block_writer) |
878 | 0 | return 0; |
879 | 0 | if (w->block_writer->entries == 0) |
880 | 0 | return 0; |
881 | 0 | return writer_flush_nonempty_block(w); |
882 | 0 | } |
883 | | |
884 | | const struct reftable_stats *reftable_writer_stats(struct reftable_writer *w) |
885 | 0 | { |
886 | 0 | return &w->stats; |
887 | 0 | } |