Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | Copyright 2020 Google LLC |
3 | | |
4 | | Use of this source code is governed by a BSD-style |
5 | | license that can be found in the LICENSE file or at |
6 | | https://developers.google.com/open-source/licenses/bsd |
7 | | */ |
8 | | |
9 | | #include "iter.h" |
10 | | |
11 | | #include "system.h" |
12 | | |
13 | | #include "block.h" |
14 | | #include "constants.h" |
15 | | #include "reader.h" |
16 | | #include "reftable-error.h" |
17 | | |
18 | | int iterator_seek(struct reftable_iterator *it, struct reftable_record *want) |
19 | 0 | { |
20 | 0 | return it->ops->seek(it->iter_arg, want); |
21 | 0 | } |
22 | | |
23 | | int iterator_next(struct reftable_iterator *it, struct reftable_record *rec) |
24 | 0 | { |
25 | 0 | return it->ops->next(it->iter_arg, rec); |
26 | 0 | } |
27 | | |
28 | | static int empty_iterator_seek(void *arg UNUSED, struct reftable_record *want UNUSED) |
29 | 0 | { |
30 | 0 | return 0; |
31 | 0 | } |
32 | | |
33 | | static int empty_iterator_next(void *arg UNUSED, struct reftable_record *rec UNUSED) |
34 | 0 | { |
35 | 0 | return 1; |
36 | 0 | } |
37 | | |
38 | | static void empty_iterator_close(void *arg UNUSED) |
39 | 0 | { |
40 | 0 | } |
41 | | |
42 | | static struct reftable_iterator_vtable empty_vtable = { |
43 | | .seek = &empty_iterator_seek, |
44 | | .next = &empty_iterator_next, |
45 | | .close = &empty_iterator_close, |
46 | | }; |
47 | | |
48 | | void iterator_set_empty(struct reftable_iterator *it) |
49 | 0 | { |
50 | 0 | assert(!it->ops); |
51 | 0 | it->iter_arg = NULL; |
52 | 0 | it->ops = &empty_vtable; |
53 | 0 | } |
54 | | |
55 | | static void filtering_ref_iterator_close(void *iter_arg) |
56 | 0 | { |
57 | 0 | struct filtering_ref_iterator *fri = iter_arg; |
58 | 0 | strbuf_release(&fri->oid); |
59 | 0 | reftable_iterator_destroy(&fri->it); |
60 | 0 | } |
61 | | |
62 | | static int filtering_ref_iterator_seek(void *iter_arg, |
63 | | struct reftable_record *want) |
64 | 0 | { |
65 | 0 | struct filtering_ref_iterator *fri = iter_arg; |
66 | 0 | return iterator_seek(&fri->it, want); |
67 | 0 | } |
68 | | |
69 | | static int filtering_ref_iterator_next(void *iter_arg, |
70 | | struct reftable_record *rec) |
71 | 0 | { |
72 | 0 | struct filtering_ref_iterator *fri = iter_arg; |
73 | 0 | struct reftable_ref_record *ref = &rec->u.ref; |
74 | 0 | int err = 0; |
75 | 0 | while (1) { |
76 | 0 | err = reftable_iterator_next_ref(&fri->it, ref); |
77 | 0 | if (err != 0) { |
78 | 0 | break; |
79 | 0 | } |
80 | | |
81 | 0 | if (ref->value_type == REFTABLE_REF_VAL2 && |
82 | 0 | (!memcmp(fri->oid.buf, ref->value.val2.target_value, |
83 | 0 | fri->oid.len) || |
84 | 0 | !memcmp(fri->oid.buf, ref->value.val2.value, |
85 | 0 | fri->oid.len))) |
86 | 0 | return 0; |
87 | | |
88 | 0 | if (ref->value_type == REFTABLE_REF_VAL1 && |
89 | 0 | !memcmp(fri->oid.buf, ref->value.val1, fri->oid.len)) { |
90 | 0 | return 0; |
91 | 0 | } |
92 | 0 | } |
93 | | |
94 | 0 | reftable_ref_record_release(ref); |
95 | 0 | return err; |
96 | 0 | } |
97 | | |
98 | | static struct reftable_iterator_vtable filtering_ref_iterator_vtable = { |
99 | | .seek = &filtering_ref_iterator_seek, |
100 | | .next = &filtering_ref_iterator_next, |
101 | | .close = &filtering_ref_iterator_close, |
102 | | }; |
103 | | |
104 | | void iterator_from_filtering_ref_iterator(struct reftable_iterator *it, |
105 | | struct filtering_ref_iterator *fri) |
106 | 0 | { |
107 | 0 | assert(!it->ops); |
108 | 0 | it->iter_arg = fri; |
109 | 0 | it->ops = &filtering_ref_iterator_vtable; |
110 | 0 | } |
111 | | |
112 | | static void indexed_table_ref_iter_close(void *p) |
113 | 0 | { |
114 | 0 | struct indexed_table_ref_iter *it = p; |
115 | 0 | block_iter_close(&it->cur); |
116 | 0 | reftable_block_done(&it->block_reader.block); |
117 | 0 | reftable_free(it->offsets); |
118 | 0 | strbuf_release(&it->oid); |
119 | 0 | } |
120 | | |
121 | | static int indexed_table_ref_iter_next_block(struct indexed_table_ref_iter *it) |
122 | 0 | { |
123 | 0 | uint64_t off; |
124 | 0 | int err = 0; |
125 | 0 | if (it->offset_idx == it->offset_len) { |
126 | 0 | it->is_finished = 1; |
127 | 0 | return 1; |
128 | 0 | } |
129 | | |
130 | 0 | reftable_block_done(&it->block_reader.block); |
131 | |
|
132 | 0 | off = it->offsets[it->offset_idx++]; |
133 | 0 | err = reader_init_block_reader(it->r, &it->block_reader, off, |
134 | 0 | BLOCK_TYPE_REF); |
135 | 0 | if (err < 0) { |
136 | 0 | return err; |
137 | 0 | } |
138 | 0 | if (err > 0) { |
139 | | /* indexed block does not exist. */ |
140 | 0 | return REFTABLE_FORMAT_ERROR; |
141 | 0 | } |
142 | 0 | block_iter_seek_start(&it->cur, &it->block_reader); |
143 | 0 | return 0; |
144 | 0 | } |
145 | | |
146 | | static int indexed_table_ref_iter_seek(void *p UNUSED, |
147 | | struct reftable_record *want UNUSED) |
148 | 0 | { |
149 | 0 | BUG("seeking indexed table is not supported"); |
150 | 0 | return -1; |
151 | 0 | } |
152 | | |
153 | | static int indexed_table_ref_iter_next(void *p, struct reftable_record *rec) |
154 | 0 | { |
155 | 0 | struct indexed_table_ref_iter *it = p; |
156 | 0 | struct reftable_ref_record *ref = &rec->u.ref; |
157 | |
|
158 | 0 | while (1) { |
159 | 0 | int err = block_iter_next(&it->cur, rec); |
160 | 0 | if (err < 0) { |
161 | 0 | return err; |
162 | 0 | } |
163 | | |
164 | 0 | if (err > 0) { |
165 | 0 | err = indexed_table_ref_iter_next_block(it); |
166 | 0 | if (err < 0) { |
167 | 0 | return err; |
168 | 0 | } |
169 | | |
170 | 0 | if (it->is_finished) { |
171 | 0 | return 1; |
172 | 0 | } |
173 | 0 | continue; |
174 | 0 | } |
175 | | /* BUG */ |
176 | 0 | if (!memcmp(it->oid.buf, ref->value.val2.target_value, |
177 | 0 | it->oid.len) || |
178 | 0 | !memcmp(it->oid.buf, ref->value.val2.value, it->oid.len)) { |
179 | 0 | return 0; |
180 | 0 | } |
181 | 0 | } |
182 | 0 | } |
183 | | |
184 | | int new_indexed_table_ref_iter(struct indexed_table_ref_iter **dest, |
185 | | struct reftable_reader *r, uint8_t *oid, |
186 | | int oid_len, uint64_t *offsets, int offset_len) |
187 | 0 | { |
188 | 0 | struct indexed_table_ref_iter empty = INDEXED_TABLE_REF_ITER_INIT; |
189 | 0 | struct indexed_table_ref_iter *itr = reftable_calloc(1, sizeof(*itr)); |
190 | 0 | int err = 0; |
191 | |
|
192 | 0 | *itr = empty; |
193 | 0 | itr->r = r; |
194 | 0 | strbuf_add(&itr->oid, oid, oid_len); |
195 | |
|
196 | 0 | itr->offsets = offsets; |
197 | 0 | itr->offset_len = offset_len; |
198 | |
|
199 | 0 | err = indexed_table_ref_iter_next_block(itr); |
200 | 0 | if (err < 0) { |
201 | 0 | reftable_free(itr); |
202 | 0 | } else { |
203 | 0 | *dest = itr; |
204 | 0 | } |
205 | 0 | return err; |
206 | 0 | } |
207 | | |
208 | | static struct reftable_iterator_vtable indexed_table_ref_iter_vtable = { |
209 | | .seek = &indexed_table_ref_iter_seek, |
210 | | .next = &indexed_table_ref_iter_next, |
211 | | .close = &indexed_table_ref_iter_close, |
212 | | }; |
213 | | |
214 | | void iterator_from_indexed_table_ref_iter(struct reftable_iterator *it, |
215 | | struct indexed_table_ref_iter *itr) |
216 | 0 | { |
217 | 0 | assert(!it->ops); |
218 | 0 | it->iter_arg = itr; |
219 | 0 | it->ops = &indexed_table_ref_iter_vtable; |
220 | 0 | } |
221 | | |
222 | | void reftable_iterator_destroy(struct reftable_iterator *it) |
223 | 0 | { |
224 | 0 | if (!it->ops) |
225 | 0 | return; |
226 | 0 | it->ops->close(it->iter_arg); |
227 | 0 | it->ops = NULL; |
228 | 0 | FREE_AND_NULL(it->iter_arg); |
229 | 0 | } |
230 | | |
231 | | int reftable_iterator_seek_ref(struct reftable_iterator *it, |
232 | | const char *name) |
233 | 0 | { |
234 | 0 | struct reftable_record want = { |
235 | 0 | .type = BLOCK_TYPE_REF, |
236 | 0 | .u.ref = { |
237 | 0 | .refname = (char *)name, |
238 | 0 | }, |
239 | 0 | }; |
240 | 0 | return it->ops->seek(it->iter_arg, &want); |
241 | 0 | } |
242 | | |
243 | | int reftable_iterator_next_ref(struct reftable_iterator *it, |
244 | | struct reftable_ref_record *ref) |
245 | 0 | { |
246 | 0 | struct reftable_record rec = { |
247 | 0 | .type = BLOCK_TYPE_REF, |
248 | 0 | .u = { |
249 | 0 | .ref = *ref |
250 | 0 | }, |
251 | 0 | }; |
252 | 0 | int err = iterator_next(it, &rec); |
253 | 0 | *ref = rec.u.ref; |
254 | 0 | return err; |
255 | 0 | } |
256 | | |
257 | | int reftable_iterator_seek_log_at(struct reftable_iterator *it, |
258 | | const char *name, uint64_t update_index) |
259 | 0 | { |
260 | 0 | struct reftable_record want = { |
261 | 0 | .type = BLOCK_TYPE_LOG, |
262 | 0 | .u.log = { |
263 | 0 | .refname = (char *)name, |
264 | 0 | .update_index = update_index, |
265 | 0 | }, |
266 | 0 | }; |
267 | 0 | return it->ops->seek(it->iter_arg, &want); |
268 | 0 | } |
269 | | |
270 | | int reftable_iterator_seek_log(struct reftable_iterator *it, |
271 | | const char *name) |
272 | 0 | { |
273 | 0 | return reftable_iterator_seek_log_at(it, name, ~((uint64_t) 0)); |
274 | 0 | } |
275 | | |
276 | | int reftable_iterator_next_log(struct reftable_iterator *it, |
277 | | struct reftable_log_record *log) |
278 | 0 | { |
279 | 0 | struct reftable_record rec = { |
280 | 0 | .type = BLOCK_TYPE_LOG, |
281 | 0 | .u = { |
282 | 0 | .log = *log, |
283 | 0 | }, |
284 | 0 | }; |
285 | 0 | int err = iterator_next(it, &rec); |
286 | 0 | *log = rec.u.log; |
287 | 0 | return err; |
288 | 0 | } |