Line data Source code
1 : // Copyright 2011 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package sstable
6 :
7 : import (
8 : "bytes"
9 : "context"
10 : "fmt"
11 :
12 : "github.com/cockroachdb/pebble/internal/base"
13 : "github.com/cockroachdb/pebble/internal/invariants"
14 : "github.com/cockroachdb/pebble/internal/treeprinter"
15 : "github.com/cockroachdb/pebble/objstorage/objstorageprovider/objiotracing"
16 : "github.com/cockroachdb/pebble/sstable/block"
17 : "github.com/cockroachdb/pebble/sstable/rowblk"
18 : )
19 :
20 : type twoLevelIterator struct {
21 : singleLevelIterator
22 : topLevelIndex rowblk.Iter
23 : }
24 :
25 : // twoLevelIterator implements the base.InternalIterator interface.
26 : var _ base.InternalIterator = (*twoLevelIterator)(nil)
27 :
28 : // loadIndex loads the index block at the current top level index position and
29 : // leaves i.index unpositioned. If unsuccessful, it gets i.err to any error
30 : // encountered, which may be nil if we have simply exhausted the entire table.
31 : // This is used for two level indexes.
32 1 : func (i *twoLevelIterator) loadIndex(dir int8) loadBlockResult {
33 1 : // Ensure the index data block iterators are invalidated even if loading of
34 1 : // the index fails.
35 1 : i.data.Invalidate()
36 1 : i.index.Invalidate()
37 1 : if !i.topLevelIndex.Valid() {
38 0 : return loadBlockFailed
39 0 : }
40 1 : v := i.topLevelIndex.Value()
41 1 : bhp, err := decodeBlockHandleWithProperties(v.InPlaceValue())
42 1 : if err != nil {
43 0 : i.err = base.CorruptionErrorf("pebble/table: corrupt top level index entry (%v)", err)
44 0 : return loadBlockFailed
45 0 : }
46 1 : if i.bpfs != nil {
47 1 : intersects, err := i.bpfs.intersects(bhp.Props)
48 1 : if err != nil {
49 0 : i.err = errCorruptIndexEntry(err)
50 0 : return loadBlockFailed
51 0 : }
52 1 : if intersects == blockMaybeExcluded {
53 1 : intersects = i.resolveMaybeExcluded(dir)
54 1 : }
55 1 : if intersects == blockExcluded {
56 1 : return loadBlockIrrelevant
57 1 : }
58 : // blockIntersects
59 : }
60 1 : ctx := objiotracing.WithBlockType(i.ctx, objiotracing.MetadataBlock)
61 1 : indexBlock, err := i.reader.readBlock(
62 1 : ctx, bhp.Handle, nil /* transform */, i.indexFilterRH, i.stats, &i.iterStats, i.bufferPool)
63 1 : if err == nil {
64 1 : err = i.index.InitHandle(i.cmp, i.reader.Split, indexBlock, i.transforms)
65 1 : }
66 1 : if err != nil {
67 0 : i.err = err
68 0 : return loadBlockFailed
69 0 : }
70 1 : return loadBlockOK
71 : }
72 :
73 : // resolveMaybeExcluded is invoked when the block-property filterer has found
74 : // that an index block is excluded according to its properties but only if its
75 : // bounds fall within the filter's current bounds. This function consults the
76 : // appropriate bound, depending on the iteration direction, and returns either
77 : // `blockIntersects` or `blockExcluded`.
78 1 : func (i *twoLevelIterator) resolveMaybeExcluded(dir int8) intersectsResult {
79 1 : // This iterator is configured with a bound-limited block property filter.
80 1 : // The bpf determined this entire index block could be excluded from
81 1 : // iteration based on the property encoded in the block handle. However, we
82 1 : // still need to determine if the index block is wholly contained within the
83 1 : // filter's key bounds.
84 1 : //
85 1 : // External guarantees ensure all its data blocks' keys are ≥ the filter's
86 1 : // lower bound during forward iteration, and that all its data blocks' keys
87 1 : // are < the filter's upper bound during backward iteration. We only need to
88 1 : // determine if the opposite bound is also met.
89 1 : //
90 1 : // The index separator in topLevelIndex.Key() provides an inclusive
91 1 : // upper-bound for the index block's keys, guaranteeing that all its keys
92 1 : // are ≤ topLevelIndex.Key(). For forward iteration, this is all we need.
93 1 : if dir > 0 {
94 1 : // Forward iteration.
95 1 : if i.bpfs.boundLimitedFilter.KeyIsWithinUpperBound(i.topLevelIndex.Key().UserKey) {
96 1 : return blockExcluded
97 1 : }
98 1 : return blockIntersects
99 : }
100 :
101 : // Reverse iteration.
102 : //
103 : // Because we're iterating in the reverse direction, we don't yet have
104 : // enough context available to determine if the block is wholly contained
105 : // within its bounds. This case arises only during backward iteration,
106 : // because of the way the index is structured.
107 : //
108 : // Consider a bound-limited bpf limited to the bounds [b,d), loading the
109 : // block with separator `c`. During reverse iteration, the guarantee that
110 : // all the block's keys are < `d` is externally provided, but no guarantee
111 : // is made on the bpf's lower bound. The separator `c` only provides an
112 : // inclusive upper bound on the block's keys, indicating that the
113 : // corresponding block handle points to a block containing only keys ≤ `c`.
114 : //
115 : // To establish a lower bound, we step the top-level index backwards to read
116 : // the previous block's separator, which provides an inclusive lower bound
117 : // on the original index block's keys. Afterwards, we step forward to
118 : // restore our top-level index position.
119 1 : if peekKey := i.topLevelIndex.Prev(); peekKey == nil {
120 1 : // The original block points to the first index block of this table. If
121 1 : // we knew the lower bound for the entire table, it could provide a
122 1 : // lower bound, but the code refactoring necessary to read it doesn't
123 1 : // seem worth the payoff. We fall through to loading the block.
124 1 : } else if i.bpfs.boundLimitedFilter.KeyIsWithinLowerBound(peekKey.K.UserKey) {
125 1 : // The lower-bound on the original index block falls within the filter's
126 1 : // bounds, and we can skip the block (after restoring our current
127 1 : // top-level index position).
128 1 : _ = i.topLevelIndex.Next()
129 1 : return blockExcluded
130 1 : }
131 1 : _ = i.topLevelIndex.Next()
132 1 : return blockIntersects
133 : }
134 :
135 : // newTwoLevelIterator reads the top-level index block and creates and
136 : // initializes a two level iterator.
137 : //
138 : // Note that lower, upper are iterator bounds and are separate from virtual
139 : // sstable bounds. If the virtualState passed in is not nil, then virtual
140 : // sstable bounds will be enforced.
141 : func newTwoLevelIterator(
142 : ctx context.Context,
143 : r *Reader,
144 : v *virtualState,
145 : transforms IterTransforms,
146 : lower, upper []byte,
147 : filterer *BlockPropertiesFilterer,
148 : filterBlockSizeLimit FilterBlockSizeLimit,
149 : stats *base.InternalIteratorStats,
150 : categoryAndQoS CategoryAndQoS,
151 : statsCollector *CategoryStatsCollector,
152 : rp ReaderProvider,
153 : bufferPool *block.BufferPool,
154 1 : ) (*twoLevelIterator, error) {
155 1 : if r.err != nil {
156 0 : return nil, r.err
157 0 : }
158 1 : i := twoLevelIterPool.Get().(*twoLevelIterator)
159 1 : i.singleLevelIterator.init(ctx, r, v, transforms, lower, upper, filterer, filterBlockSizeLimit,
160 1 : stats, categoryAndQoS, statsCollector, rp, bufferPool)
161 1 :
162 1 : topLevelIndexH, err := r.readIndex(ctx, i.indexFilterRH, stats, &i.iterStats)
163 1 : if err == nil {
164 1 : err = i.topLevelIndex.InitHandle(i.cmp, i.reader.Split, topLevelIndexH, transforms)
165 1 : }
166 1 : if err != nil {
167 0 : _ = i.Close()
168 0 : return nil, err
169 0 : }
170 1 : return i, nil
171 : }
172 :
173 0 : func (i *twoLevelIterator) String() string {
174 0 : return i.singleLevelIterator.String()
175 0 : }
176 :
177 : // DebugTree is part of the InternalIterator interface.
178 0 : func (i *twoLevelIterator) DebugTree(tp treeprinter.Node) {
179 0 : tp.Childf("%T(%p) fileNum=%s", i, i, i.String())
180 0 : }
181 :
182 : // SeekGE implements internalIterator.SeekGE, as documented in the pebble
183 : // package. Note that SeekGE only checks the upper bound. It is up to the
184 : // caller to ensure that key is greater than or equal to the lower bound.
185 1 : func (i *twoLevelIterator) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV {
186 1 : if i.vState != nil {
187 1 : // Callers of SeekGE don't know about virtual sstable bounds, so we may
188 1 : // have to internally restrict the bounds.
189 1 : //
190 1 : // TODO(bananabrick): We can optimize away this check for the level iter
191 1 : // if necessary.
192 1 : if i.cmp(key, i.lower) < 0 {
193 1 : key = i.lower
194 1 : }
195 : }
196 :
197 1 : err := i.err
198 1 : i.err = nil // clear cached iteration error
199 1 :
200 1 : // The twoLevelIterator could be already exhausted. Utilize that when
201 1 : // trySeekUsingNext is true. See the comment about data-exhausted, PGDE, and
202 1 : // bounds-exhausted near the top of the file.
203 1 : if flags.TrySeekUsingNext() &&
204 1 : (i.exhaustedBounds == +1 || (i.data.IsDataInvalidated() && i.index.IsDataInvalidated())) &&
205 1 : err == nil {
206 1 : // Already exhausted, so return nil.
207 1 : return nil
208 1 : }
209 :
210 : // SeekGE performs various step-instead-of-seeking optimizations: eg enabled
211 : // by trySeekUsingNext, or by monotonically increasing bounds (i.boundsCmp).
212 :
213 : // We fall into the slow path if i.index.isDataInvalidated() even if the
214 : // top-level iterator is already positioned correctly and all other
215 : // conditions are met. An alternative structure could reuse topLevelIndex's
216 : // current position and reload the index block to which it points. Arguably,
217 : // an index block load is expensive and the index block may still be earlier
218 : // than the index block containing the sought key, resulting in a wasteful
219 : // block load.
220 :
221 1 : var dontSeekWithinSingleLevelIter bool
222 1 : if i.topLevelIndex.IsDataInvalidated() || !i.topLevelIndex.Valid() || i.index.IsDataInvalidated() || err != nil ||
223 1 : (i.boundsCmp <= 0 && !flags.TrySeekUsingNext()) || i.cmp(key, i.topLevelIndex.Key().UserKey) > 0 {
224 1 : // Slow-path: need to position the topLevelIndex.
225 1 :
226 1 : // The previous exhausted state of singleLevelIterator is no longer
227 1 : // relevant, since we may be moving to a different index block.
228 1 : i.exhaustedBounds = 0
229 1 : flags = flags.DisableTrySeekUsingNext()
230 1 : var ikv *base.InternalKV
231 1 : if ikv = i.topLevelIndex.SeekGE(key, flags); ikv == nil {
232 1 : i.data.Invalidate()
233 1 : i.index.Invalidate()
234 1 : return nil
235 1 : }
236 :
237 1 : result := i.loadIndex(+1)
238 1 : if result == loadBlockFailed {
239 0 : i.boundsCmp = 0
240 0 : return nil
241 0 : }
242 1 : if result == loadBlockIrrelevant {
243 1 : // Enforce the upper bound here since don't want to bother moving
244 1 : // to the next entry in the top level index if upper bound is
245 1 : // already exceeded. Note that the next entry starts with keys >=
246 1 : // ikey.InternalKey.UserKey since even though this is the block separator, the
247 1 : // same user key can span multiple index blocks. If upper is
248 1 : // exclusive we use >= below, else we use >.
249 1 : if i.upper != nil {
250 1 : cmp := i.cmp(ikv.K.UserKey, i.upper)
251 1 : if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 {
252 1 : i.exhaustedBounds = +1
253 1 : }
254 : }
255 : // Fall through to skipForward.
256 1 : dontSeekWithinSingleLevelIter = true
257 1 : // Clear boundsCmp.
258 1 : //
259 1 : // In the typical cases where dontSeekWithinSingleLevelIter=false,
260 1 : // the singleLevelIterator.SeekGE call will clear boundsCmp.
261 1 : // However, in this case where dontSeekWithinSingleLevelIter=true,
262 1 : // we never seek on the single-level iterator. This call will fall
263 1 : // through to skipForward, which may improperly leave boundsCmp=+1
264 1 : // unless we clear it here.
265 1 : i.boundsCmp = 0
266 : }
267 1 : } else {
268 1 : // INVARIANT: err == nil.
269 1 : //
270 1 : // Else fast-path: There are two possible cases, from
271 1 : // (i.boundsCmp > 0 || flags.TrySeekUsingNext()):
272 1 : //
273 1 : // 1) The bounds have moved forward (i.boundsCmp > 0) and this SeekGE is
274 1 : // respecting the lower bound (guaranteed by Iterator). We know that the
275 1 : // iterator must already be positioned within or just outside the previous
276 1 : // bounds. Therefore, the topLevelIndex iter cannot be positioned at an
277 1 : // entry ahead of the seek position (though it can be positioned behind).
278 1 : // The !i.cmp(key, i.topLevelIndex.Key().UserKey) > 0 confirms that it is
279 1 : // not behind. Since it is not ahead and not behind it must be at the
280 1 : // right position.
281 1 : //
282 1 : // 2) This SeekGE will land on a key that is greater than the key we are
283 1 : // currently at (guaranteed by trySeekUsingNext), but since i.cmp(key,
284 1 : // i.topLevelIndex.Key().UserKey) <= 0, we are at the correct lower level
285 1 : // index block. No need to reset the state of singleLevelIterator.
286 1 : //
287 1 : // Note that cases 1 and 2 never overlap, and one of them must be true.
288 1 : // This invariant checking is important enough that we do not gate it
289 1 : // behind invariants.Enabled.
290 1 : if i.boundsCmp > 0 == flags.TrySeekUsingNext() {
291 0 : panic(fmt.Sprintf("inconsistency in optimization case 1 %t and case 2 %t",
292 0 : i.boundsCmp > 0, flags.TrySeekUsingNext()))
293 : }
294 :
295 1 : if !flags.TrySeekUsingNext() {
296 1 : // Case 1. Bounds have changed so the previous exhausted bounds state is
297 1 : // irrelevant.
298 1 : // WARNING-data-exhausted: this is safe to do only because the monotonic
299 1 : // bounds optimizations only work when !data-exhausted. If they also
300 1 : // worked with data-exhausted, we have made it unclear whether
301 1 : // data-exhausted is actually true. See the comment at the top of the
302 1 : // file.
303 1 : i.exhaustedBounds = 0
304 1 : }
305 : // Else flags.TrySeekUsingNext(). The i.exhaustedBounds is important to
306 : // preserve for singleLevelIterator, and twoLevelIterator.skipForward. See
307 : // bug https://github.com/cockroachdb/pebble/issues/2036.
308 : }
309 :
310 1 : if !dontSeekWithinSingleLevelIter {
311 1 : // Note that while trySeekUsingNext could be false here, singleLevelIterator
312 1 : // could do its own boundsCmp-based optimization to seek using next.
313 1 : if ikv := i.singleLevelIterator.SeekGE(key, flags); ikv != nil {
314 1 : return ikv
315 1 : }
316 : }
317 1 : return i.skipForward()
318 : }
319 :
320 : // SeekPrefixGE implements internalIterator.SeekPrefixGE, as documented in the
321 : // pebble package. Note that SeekPrefixGE only checks the upper bound. It is up
322 : // to the caller to ensure that key is greater than or equal to the lower bound.
323 : func (i *twoLevelIterator) SeekPrefixGE(
324 : prefix, key []byte, flags base.SeekGEFlags,
325 1 : ) *base.InternalKV {
326 1 : if i.vState != nil {
327 1 : // Callers of SeekGE don't know about virtual sstable bounds, so we may
328 1 : // have to internally restrict the bounds.
329 1 : //
330 1 : // TODO(bananabrick): We can optimize away this check for the level iter
331 1 : // if necessary.
332 1 : if i.cmp(key, i.lower) < 0 {
333 1 : key = i.lower
334 1 : }
335 : }
336 :
337 : // NOTE: prefix is only used for bloom filter checking and not later work in
338 : // this method. Hence, we can use the existing iterator position if the last
339 : // SeekPrefixGE did not fail bloom filter matching.
340 :
341 1 : err := i.err
342 1 : i.err = nil // clear cached iteration error
343 1 :
344 1 : useFilter := shouldUseFilterBlock(i.reader, i.filterBlockSizeLimit)
345 1 : // The twoLevelIterator could be already exhausted. Utilize that when
346 1 : // trySeekUsingNext is true. See the comment about data-exhausted, PGDE, and
347 1 : // bounds-exhausted near the top of the file.
348 1 : filterUsedAndDidNotMatch := useFilter && !i.lastBloomFilterMatched
349 1 : if flags.TrySeekUsingNext() && !filterUsedAndDidNotMatch &&
350 1 : (i.exhaustedBounds == +1 || (i.data.IsDataInvalidated() && i.index.IsDataInvalidated())) &&
351 1 : err == nil {
352 1 : // Already exhausted, so return nil.
353 1 : return nil
354 1 : }
355 :
356 : // Check prefix bloom filter.
357 1 : if useFilter {
358 1 : if !i.lastBloomFilterMatched {
359 1 : // Iterator is not positioned based on last seek.
360 1 : flags = flags.DisableTrySeekUsingNext()
361 1 : }
362 1 : i.lastBloomFilterMatched = false
363 1 : var mayContain bool
364 1 : mayContain, i.err = i.bloomFilterMayContain(prefix)
365 1 : if i.err != nil || !mayContain {
366 1 : // In the i.err == nil case, this invalidation may not be necessary for
367 1 : // correctness, and may be a place to optimize later by reusing the
368 1 : // already loaded block. It was necessary in earlier versions of the code
369 1 : // since the caller was allowed to call Next when SeekPrefixGE returned
370 1 : // nil. This is no longer allowed.
371 1 : i.data.Invalidate()
372 1 : return nil
373 1 : }
374 1 : i.lastBloomFilterMatched = true
375 : }
376 :
377 : // Bloom filter matches.
378 :
379 : // SeekPrefixGE performs various step-instead-of-seeking optimizations: eg
380 : // enabled by trySeekUsingNext, or by monotonically increasing bounds
381 : // (i.boundsCmp).
382 :
383 : // We fall into the slow path if i.index.isDataInvalidated() even if the
384 : // top-level iterator is already positioned correctly and all other
385 : // conditions are met. An alternative structure could reuse topLevelIndex's
386 : // current position and reload the index block to which it points. Arguably,
387 : // an index block load is expensive and the index block may still be earlier
388 : // than the index block containing the sought key, resulting in a wasteful
389 : // block load.
390 :
391 1 : var dontSeekWithinSingleLevelIter bool
392 1 : if i.topLevelIndex.IsDataInvalidated() || !i.topLevelIndex.Valid() || i.index.IsDataInvalidated() || err != nil ||
393 1 : (i.boundsCmp <= 0 && !flags.TrySeekUsingNext()) || i.cmp(key, i.topLevelIndex.Key().UserKey) > 0 {
394 1 : // Slow-path: need to position the topLevelIndex.
395 1 :
396 1 : // The previous exhausted state of singleLevelIterator is no longer
397 1 : // relevant, since we may be moving to a different index block.
398 1 : i.exhaustedBounds = 0
399 1 : flags = flags.DisableTrySeekUsingNext()
400 1 : var ikv *base.InternalKV
401 1 : if ikv = i.topLevelIndex.SeekGE(key, flags); ikv == nil {
402 1 : i.data.Invalidate()
403 1 : i.index.Invalidate()
404 1 : return nil
405 1 : }
406 :
407 1 : result := i.loadIndex(+1)
408 1 : if result == loadBlockFailed {
409 0 : i.boundsCmp = 0
410 0 : return nil
411 0 : }
412 1 : if result == loadBlockIrrelevant {
413 1 : // Enforce the upper bound here since don't want to bother moving
414 1 : // to the next entry in the top level index if upper bound is
415 1 : // already exceeded. Note that the next entry starts with keys >=
416 1 : // ikey.InternalKey.UserKey since even though this is the block separator, the
417 1 : // same user key can span multiple index blocks. If upper is
418 1 : // exclusive we use >= below, else we use >.
419 1 : if i.upper != nil {
420 1 : cmp := i.cmp(ikv.K.UserKey, i.upper)
421 1 : if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 {
422 1 : i.exhaustedBounds = +1
423 1 : }
424 : }
425 : // Fall through to skipForward.
426 1 : dontSeekWithinSingleLevelIter = true
427 1 : // Clear boundsCmp.
428 1 : //
429 1 : // In the typical cases where dontSeekWithinSingleLevelIter=false,
430 1 : // the singleLevelIterator.SeekPrefixGE call will clear boundsCmp.
431 1 : // However, in this case where dontSeekWithinSingleLevelIter=true,
432 1 : // we never seek on the single-level iterator. This call will fall
433 1 : // through to skipForward, which may improperly leave boundsCmp=+1
434 1 : // unless we clear it here.
435 1 : i.boundsCmp = 0
436 : }
437 1 : } else {
438 1 : // INVARIANT: err == nil.
439 1 : //
440 1 : // Else fast-path: There are two possible cases, from
441 1 : // (i.boundsCmp > 0 || flags.TrySeekUsingNext()):
442 1 : //
443 1 : // 1) The bounds have moved forward (i.boundsCmp > 0) and this
444 1 : // SeekPrefixGE is respecting the lower bound (guaranteed by Iterator). We
445 1 : // know that the iterator must already be positioned within or just
446 1 : // outside the previous bounds. Therefore, the topLevelIndex iter cannot
447 1 : // be positioned at an entry ahead of the seek position (though it can be
448 1 : // positioned behind). The !i.cmp(key, i.topLevelIndex.Key().UserKey) > 0
449 1 : // confirms that it is not behind. Since it is not ahead and not behind it
450 1 : // must be at the right position.
451 1 : //
452 1 : // 2) This SeekPrefixGE will land on a key that is greater than the key we
453 1 : // are currently at (guaranteed by trySeekUsingNext), but since i.cmp(key,
454 1 : // i.topLevelIndex.Key().UserKey) <= 0, we are at the correct lower level
455 1 : // index block. No need to reset the state of singleLevelIterator.
456 1 : //
457 1 : // Note that cases 1 and 2 never overlap, and one of them must be true.
458 1 : // This invariant checking is important enough that we do not gate it
459 1 : // behind invariants.Enabled.
460 1 : if i.boundsCmp > 0 == flags.TrySeekUsingNext() {
461 0 : panic(fmt.Sprintf("inconsistency in optimization case 1 %t and case 2 %t",
462 0 : i.boundsCmp > 0, flags.TrySeekUsingNext()))
463 : }
464 :
465 1 : if !flags.TrySeekUsingNext() {
466 1 : // Case 1. Bounds have changed so the previous exhausted bounds state is
467 1 : // irrelevant.
468 1 : // WARNING-data-exhausted: this is safe to do only because the monotonic
469 1 : // bounds optimizations only work when !data-exhausted. If they also
470 1 : // worked with data-exhausted, we have made it unclear whether
471 1 : // data-exhausted is actually true. See the comment at the top of the
472 1 : // file.
473 1 : i.exhaustedBounds = 0
474 1 : }
475 : // Else flags.TrySeekUsingNext(). The i.exhaustedBounds is important to
476 : // preserve for singleLevelIterator, and twoLevelIterator.skipForward. See
477 : // bug https://github.com/cockroachdb/pebble/issues/2036.
478 : }
479 :
480 1 : if !dontSeekWithinSingleLevelIter {
481 1 : if ikv := i.singleLevelIterator.seekPrefixGE(
482 1 : prefix, key, flags, NeverUseFilterBlock); ikv != nil {
483 1 : return ikv
484 1 : }
485 : }
486 : // NB: skipForward checks whether exhaustedBounds is already +1.
487 1 : return i.skipForward()
488 : }
489 :
490 : // virtualLast should only be called if i.vReader != nil.
491 1 : func (i *twoLevelIterator) virtualLast() *base.InternalKV {
492 1 : if i.vState == nil {
493 0 : panic("pebble: invalid call to virtualLast")
494 : }
495 1 : if !i.endKeyInclusive {
496 1 : // Trivial case.
497 1 : return i.SeekLT(i.upper, base.SeekLTFlagsNone)
498 1 : }
499 1 : return i.virtualLastSeekLE()
500 : }
501 :
502 : // virtualLastSeekLE implements a SeekLE() that can be used as part
503 : // of reverse-iteration calls such as a Last() on a virtual sstable. Does a
504 : // SeekLE on the upper bound of the file/iterator.
505 1 : func (i *twoLevelIterator) virtualLastSeekLE() *base.InternalKV {
506 1 : // Callers of SeekLE don't know about virtual sstable bounds, so we may
507 1 : // have to internally restrict the bounds.
508 1 : //
509 1 : // TODO(bananabrick): We can optimize this check away for the level iter
510 1 : // if necessary.
511 1 : if !i.endKeyInclusive {
512 0 : panic("unexpected virtualLastSeekLE with exclusive upper bounds")
513 : }
514 1 : key := i.upper
515 1 : // Need to position the topLevelIndex.
516 1 : //
517 1 : // The previous exhausted state of singleLevelIterator is no longer
518 1 : // relevant, since we may be moving to a different index block.
519 1 : i.exhaustedBounds = 0
520 1 : // Seek optimization only applies until iterator is first positioned with a
521 1 : // SeekGE or SeekLT after SetBounds.
522 1 : i.boundsCmp = 0
523 1 : ikv := i.topLevelIndex.SeekGE(key, base.SeekGEFlagsNone)
524 1 : // We can have multiple internal keys with the same user key as the seek
525 1 : // key. In that case, we want the last (greatest) internal key.
526 1 : for ikv != nil && bytes.Equal(ikv.K.UserKey, key) {
527 1 : ikv = i.topLevelIndex.Next()
528 1 : }
529 1 : if ikv == nil {
530 1 : return i.skipBackward()
531 1 : }
532 1 : result := i.loadIndex(-1)
533 1 : if result == loadBlockFailed {
534 0 : i.boundsCmp = 0
535 0 : return nil
536 0 : }
537 1 : if result == loadBlockIrrelevant {
538 1 : // Load the previous block.
539 1 : return i.skipBackward()
540 1 : }
541 1 : if ikv := i.singleLevelIterator.virtualLastSeekLE(); ikv != nil {
542 1 : return ikv
543 1 : }
544 1 : return i.skipBackward()
545 : }
546 :
547 : // SeekLT implements internalIterator.SeekLT, as documented in the pebble
548 : // package. Note that SeekLT only checks the lower bound. It is up to the
549 : // caller to ensure that key is less than the upper bound.
550 1 : func (i *twoLevelIterator) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV {
551 1 : if i.vState != nil {
552 1 : // Might have to fix upper bound since virtual sstable bounds are not
553 1 : // known to callers of SeekLT.
554 1 : //
555 1 : // TODO(bananabrick): We can optimize away this check for the level iter
556 1 : // if necessary.
557 1 : cmp := i.cmp(key, i.upper)
558 1 : // key == i.upper is fine. We'll do the right thing and return the
559 1 : // first internal key with user key < key.
560 1 : if cmp > 0 {
561 1 : return i.virtualLast()
562 1 : }
563 : }
564 :
565 1 : i.exhaustedBounds = 0
566 1 : i.err = nil // clear cached iteration error
567 1 : // Seek optimization only applies until iterator is first positioned after SetBounds.
568 1 : i.boundsCmp = 0
569 1 :
570 1 : var result loadBlockResult
571 1 : var ikv *base.InternalKV
572 1 : // NB: Unlike SeekGE, we don't have a fast-path here since we don't know
573 1 : // whether the topLevelIndex is positioned after the position that would
574 1 : // be returned by doing i.topLevelIndex.SeekGE(). To know this we would
575 1 : // need to know the index key preceding the current one.
576 1 : // NB: If a bound-limited block property filter is configured, it's
577 1 : // externally ensured that the filter is disabled (through returning
578 1 : // Intersects=false irrespective of the block props provided) during seeks.
579 1 : if ikv = i.topLevelIndex.SeekGE(key, base.SeekGEFlagsNone); ikv == nil {
580 1 : if ikv = i.topLevelIndex.Last(); ikv == nil {
581 0 : i.data.Invalidate()
582 0 : i.index.Invalidate()
583 0 : return nil
584 0 : }
585 :
586 1 : result = i.loadIndex(-1)
587 1 : if result == loadBlockFailed {
588 0 : return nil
589 0 : }
590 1 : if result == loadBlockOK {
591 1 : if ikv := i.singleLevelIterator.lastInternal(); ikv != nil {
592 1 : return i.maybeVerifyKey(ikv)
593 1 : }
594 : // Fall through to skipBackward since the singleLevelIterator did
595 : // not have any blocks that satisfy the block interval
596 : // constraints, or the lower bound was reached.
597 : }
598 : // Else loadBlockIrrelevant, so fall through.
599 1 : } else {
600 1 : result = i.loadIndex(-1)
601 1 : if result == loadBlockFailed {
602 0 : return nil
603 0 : }
604 1 : if result == loadBlockOK {
605 1 : if ikv := i.singleLevelIterator.SeekLT(key, flags); ikv != nil {
606 1 : return i.maybeVerifyKey(ikv)
607 1 : }
608 : // Fall through to skipBackward since the singleLevelIterator did
609 : // not have any blocks that satisfy the block interval
610 : // constraint, or the lower bound was reached.
611 : }
612 : // Else loadBlockIrrelevant, so fall through.
613 : }
614 1 : if result == loadBlockIrrelevant {
615 1 : // Enforce the lower bound here since don't want to bother moving to
616 1 : // the previous entry in the top level index if lower bound is already
617 1 : // exceeded. Note that the previous entry starts with keys <=
618 1 : // ikey.InternalKey.UserKey since even though this is the current block's
619 1 : // separator, the same user key can span multiple index blocks.
620 1 : if i.lower != nil && i.cmp(ikv.K.UserKey, i.lower) < 0 {
621 1 : i.exhaustedBounds = -1
622 1 : }
623 : }
624 : // NB: skipBackward checks whether exhaustedBounds is already -1.
625 1 : return i.skipBackward()
626 : }
627 :
628 : // First implements internalIterator.First, as documented in the pebble
629 : // package. Note that First only checks the upper bound. It is up to the caller
630 : // to ensure that key is greater than or equal to the lower bound (e.g. via a
631 : // call to SeekGE(lower)).
632 1 : func (i *twoLevelIterator) First() *base.InternalKV {
633 1 : // If we have a lower bound, use SeekGE. Note that in general this is not
634 1 : // supported usage, except when the lower bound is there because the table is
635 1 : // virtual.
636 1 : if i.lower != nil {
637 1 : return i.SeekGE(i.lower, base.SeekGEFlagsNone)
638 1 : }
639 1 : i.exhaustedBounds = 0
640 1 : i.err = nil // clear cached iteration error
641 1 : // Seek optimization only applies until iterator is first positioned after SetBounds.
642 1 : i.boundsCmp = 0
643 1 :
644 1 : var ikv *base.InternalKV
645 1 : if ikv = i.topLevelIndex.First(); ikv == nil {
646 0 : return nil
647 0 : }
648 :
649 1 : result := i.loadIndex(+1)
650 1 : if result == loadBlockFailed {
651 0 : return nil
652 0 : }
653 1 : if result == loadBlockOK {
654 1 : if ikv := i.singleLevelIterator.First(); ikv != nil {
655 1 : return ikv
656 1 : }
657 : // Else fall through to skipForward.
658 1 : } else {
659 1 : // result == loadBlockIrrelevant. Enforce the upper bound here since
660 1 : // don't want to bother moving to the next entry in the top level
661 1 : // index if upper bound is already exceeded. Note that the next entry
662 1 : // starts with keys >= ikv.InternalKey.UserKey since even though this is the
663 1 : // block separator, the same user key can span multiple index blocks.
664 1 : // If upper is exclusive we use >= below, else we use >.
665 1 : if i.upper != nil {
666 1 : cmp := i.cmp(ikv.K.UserKey, i.upper)
667 1 : if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 {
668 1 : i.exhaustedBounds = +1
669 1 : }
670 : }
671 : }
672 : // NB: skipForward checks whether exhaustedBounds is already +1.
673 1 : return i.skipForward()
674 : }
675 :
676 : // Last implements internalIterator.Last, as documented in the pebble
677 : // package. Note that Last only checks the lower bound. It is up to the caller
678 : // to ensure that key is less than the upper bound (e.g. via a call to
679 : // SeekLT(upper))
680 1 : func (i *twoLevelIterator) Last() *base.InternalKV {
681 1 : if i.vState != nil {
682 1 : if i.endKeyInclusive {
683 1 : return i.virtualLast()
684 1 : }
685 1 : return i.SeekLT(i.upper, base.SeekLTFlagsNone)
686 : }
687 :
688 1 : if i.upper != nil {
689 0 : panic("twoLevelIterator.Last() used despite upper bound")
690 : }
691 1 : i.exhaustedBounds = 0
692 1 : i.err = nil // clear cached iteration error
693 1 : // Seek optimization only applies until iterator is first positioned after SetBounds.
694 1 : i.boundsCmp = 0
695 1 :
696 1 : var ikv *base.InternalKV
697 1 : if ikv = i.topLevelIndex.Last(); ikv == nil {
698 0 : return nil
699 0 : }
700 :
701 1 : result := i.loadIndex(-1)
702 1 : if result == loadBlockFailed {
703 0 : return nil
704 0 : }
705 1 : if result == loadBlockOK {
706 1 : if ikv := i.singleLevelIterator.Last(); ikv != nil {
707 1 : return ikv
708 1 : }
709 : // Else fall through to skipBackward.
710 1 : } else {
711 1 : // result == loadBlockIrrelevant. Enforce the lower bound here since
712 1 : // don't want to bother moving to the previous entry in the top level
713 1 : // index if lower bound is already exceeded. Note that the previous
714 1 : // entry starts with keys <= ikv.InternalKey.UserKey since even though
715 1 : // this is the current block's separator, the same user key can span
716 1 : // multiple index blocks.
717 1 : if i.lower != nil && i.cmp(ikv.K.UserKey, i.lower) < 0 {
718 1 : i.exhaustedBounds = -1
719 1 : }
720 : }
721 : // NB: skipBackward checks whether exhaustedBounds is already -1.
722 1 : return i.skipBackward()
723 : }
724 :
725 : // Next implements internalIterator.Next, as documented in the pebble
726 : // package.
727 : // Note: twoLevelCompactionIterator.Next mirrors the implementation of
728 : // twoLevelIterator.Next due to performance. Keep the two in sync.
729 1 : func (i *twoLevelIterator) Next() *base.InternalKV {
730 1 : // Seek optimization only applies until iterator is first positioned after SetBounds.
731 1 : i.boundsCmp = 0
732 1 : if i.err != nil {
733 0 : // TODO(jackson): Can this case be turned into a panic? Once an error is
734 0 : // encountered, the iterator must be re-seeked.
735 0 : return nil
736 0 : }
737 1 : if ikv := i.singleLevelIterator.Next(); ikv != nil {
738 1 : return ikv
739 1 : }
740 1 : return i.skipForward()
741 : }
742 :
743 : // NextPrefix implements (base.InternalIterator).NextPrefix.
744 1 : func (i *twoLevelIterator) NextPrefix(succKey []byte) *base.InternalKV {
745 1 : if i.exhaustedBounds == +1 {
746 0 : panic("Next called even though exhausted upper bound")
747 : }
748 : // Seek optimization only applies until iterator is first positioned after SetBounds.
749 1 : i.boundsCmp = 0
750 1 : if i.err != nil {
751 0 : // TODO(jackson): Can this case be turned into a panic? Once an error is
752 0 : // encountered, the iterator must be re-seeked.
753 0 : return nil
754 0 : }
755 1 : if ikv := i.singleLevelIterator.NextPrefix(succKey); ikv != nil {
756 1 : return ikv
757 1 : }
758 : // key == nil
759 1 : if i.err != nil {
760 0 : return nil
761 0 : }
762 :
763 : // Did not find prefix in the existing second-level index block. This is the
764 : // slow-path where we seek the iterator.
765 1 : var ikv *base.InternalKV
766 1 : if ikv = i.topLevelIndex.SeekGE(succKey, base.SeekGEFlagsNone); ikv == nil {
767 1 : i.data.Invalidate()
768 1 : i.index.Invalidate()
769 1 : return nil
770 1 : }
771 1 : result := i.loadIndex(+1)
772 1 : if result == loadBlockFailed {
773 0 : return nil
774 0 : }
775 1 : if result == loadBlockIrrelevant {
776 0 : // Enforce the upper bound here since don't want to bother moving to the
777 0 : // next entry in the top level index if upper bound is already exceeded.
778 0 : // Note that the next entry starts with keys >= ikv.InternalKey.UserKey
779 0 : // since even though this is the block separator, the same user key can
780 0 : // span multiple index blocks. If upper is exclusive we use >= below,
781 0 : // else we use >.
782 0 : if i.upper != nil {
783 0 : cmp := i.cmp(ikv.K.UserKey, i.upper)
784 0 : if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 {
785 0 : i.exhaustedBounds = +1
786 0 : }
787 : }
788 1 : } else if kv := i.singleLevelIterator.SeekGE(succKey, base.SeekGEFlagsNone); kv != nil {
789 1 : return i.maybeVerifyKey(kv)
790 1 : }
791 1 : return i.skipForward()
792 : }
793 :
794 : // Prev implements internalIterator.Prev, as documented in the pebble
795 : // package.
796 1 : func (i *twoLevelIterator) Prev() *base.InternalKV {
797 1 : // Seek optimization only applies until iterator is first positioned after SetBounds.
798 1 : i.boundsCmp = 0
799 1 : if i.err != nil {
800 0 : return nil
801 0 : }
802 1 : if kv := i.singleLevelIterator.Prev(); kv != nil {
803 1 : return kv
804 1 : }
805 1 : return i.skipBackward()
806 : }
807 :
808 1 : func (i *twoLevelIterator) skipForward() *base.InternalKV {
809 1 : for {
810 1 : if i.err != nil || i.exhaustedBounds > 0 {
811 1 : return nil
812 1 : }
813 :
814 : // It is possible that skipBackward went too far and the virtual table lower
815 : // bound is after the first key in the block we are about to load, in which
816 : // case we must use SeekGE below. The keys in the block we are about to load
817 : // start right after the topLevelIndex key (before we Next).
818 : //
819 : // An example of how this can happen:
820 : //
821 : // Second-level index block 1 - contains keys a@1, c@1
822 : // Second-level index block 2 - contains keys e@1, g@1
823 : // Second-level index block 3 - contains keys i@2, k@2
824 : //
825 : // The virtual table lower bound is f. We have a range key masking filter
826 : // that filters keys with @1 suffix. We are positioned inside block 3 then
827 : // we Prev(). Block 2 is entirely filtered out, which makes us move to
828 : // block 1. Now the range key masking filter gets an update (via
829 : // SpanChanged) and it no longer filters out any keys. At this point if a
830 : // Next happens, we will load block 2 but it would not be legal to return
831 : // "e@1" which is outside the virtual bounds.
832 : //
833 : // The core of the problem is that skipBackward doesn't know it can stop
834 : // at block 2, because it doesn't know what keys are at the start of that
835 : // block. This is why we don't have this problem in the opposite
836 : // direction: skipForward will never go beyond the last relevant block
837 : // because it looks at the separator key which is an upper bound for the
838 : // block.
839 : //
840 : // Note that this is only a problem with virtual tables; we make no
841 : // guarantees wrt an iterator lower bound when we iterate forward. But we
842 : // must never return keys that are not inside the virtual table.
843 1 : useSeek := i.vState != nil &&
844 1 : (!i.topLevelIndex.Valid() || base.InternalCompare(i.cmp, *i.topLevelIndex.Key(), i.vState.lower) < 0)
845 1 :
846 1 : i.exhaustedBounds = 0
847 1 : topLevelKey := i.topLevelIndex.Next()
848 1 : if topLevelKey == nil {
849 1 : i.data.Invalidate()
850 1 : i.index.Invalidate()
851 1 : return nil
852 1 : }
853 1 : result := i.loadIndex(+1)
854 1 : if result == loadBlockFailed {
855 0 : return nil
856 0 : }
857 1 : if result == loadBlockOK {
858 1 : var ikv *base.InternalKV
859 1 : if useSeek {
860 1 : ikv = i.singleLevelIterator.SeekGE(i.lower, base.SeekGEFlagsNone)
861 1 : } else {
862 1 : ikv = i.singleLevelIterator.firstInternal()
863 1 : }
864 1 : if ikv != nil {
865 1 : return i.maybeVerifyKey(ikv)
866 1 : }
867 : // Next iteration will return if singleLevelIterator set
868 : // exhaustedBounds = +1.
869 1 : } else {
870 1 : // result == loadBlockIrrelevant. Enforce the upper bound here since
871 1 : // don't want to bother moving to the next entry in the top level
872 1 : // index if upper bound is already exceeded. Note that the next
873 1 : // entry starts with keys >= ikv.InternalKey.UserKey since even
874 1 : // though this is the block separator, the same user key can span
875 1 : // multiple index blocks. If upper is exclusive we use >= below,
876 1 : // else we use >.
877 1 : if i.upper != nil {
878 1 : cmp := i.cmp(topLevelKey.K.UserKey, i.upper)
879 1 : if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 {
880 1 : i.exhaustedBounds = +1
881 1 : // Next iteration will return.
882 1 : }
883 : }
884 : }
885 : }
886 : }
887 :
888 1 : func (i *twoLevelIterator) skipBackward() *base.InternalKV {
889 1 : for {
890 1 : if i.err != nil || i.exhaustedBounds < 0 {
891 1 : return nil
892 1 : }
893 1 : i.exhaustedBounds = 0
894 1 : topLevelKey := i.topLevelIndex.Prev()
895 1 : if topLevelKey == nil {
896 1 : i.data.Invalidate()
897 1 : i.index.Invalidate()
898 1 : return nil
899 1 : }
900 1 : result := i.loadIndex(-1)
901 1 : if result == loadBlockFailed {
902 0 : return nil
903 0 : }
904 1 : if result == loadBlockOK {
905 1 : ikv := i.singleLevelIterator.lastInternal()
906 1 : if ikv != nil {
907 1 : return i.maybeVerifyKey(ikv)
908 1 : }
909 :
910 : // Next iteration will return if singleLevelIterator set
911 : // exhaustedBounds = -1.
912 1 : } else {
913 1 : // result == loadBlockIrrelevant. Enforce the lower bound here since
914 1 : // don't want to bother moving to the previous entry in the top
915 1 : // level index if lower bound is already exceeded. Note that the
916 1 : // previous entry starts with keys <= ikv.InternalKey.UserKey since
917 1 : // even though this is the current block's separator, the same user
918 1 : // key can span multiple index blocks.
919 1 : if i.lower != nil && i.cmp(topLevelKey.K.UserKey, i.lower) < 0 {
920 1 : i.exhaustedBounds = -1
921 1 : // Next iteration will return.
922 1 : }
923 : }
924 : }
925 : }
926 :
927 : // Close implements internalIterator.Close, as documented in the pebble
928 : // package.
929 1 : func (i *twoLevelIterator) Close() error {
930 1 : if invariants.Enabled && i.inPool {
931 0 : panic("Close called on interator in pool")
932 : }
933 1 : i.iterStats.close()
934 1 : var err error
935 1 : if i.closeHook != nil {
936 1 : err = firstError(err, i.closeHook(i))
937 1 : }
938 1 : err = firstError(err, i.data.Close())
939 1 : err = firstError(err, i.index.Close())
940 1 : err = firstError(err, i.topLevelIndex.Close())
941 1 : if i.indexFilterRH != nil {
942 1 : err = firstError(err, i.indexFilterRH.Close())
943 1 : i.indexFilterRH = nil
944 1 : }
945 1 : if i.dataRH != nil {
946 1 : err = firstError(err, i.dataRH.Close())
947 1 : i.dataRH = nil
948 1 : }
949 1 : err = firstError(err, i.err)
950 1 : if i.bpfs != nil {
951 1 : releaseBlockPropertiesFilterer(i.bpfs)
952 1 : }
953 1 : if i.vbReader != nil {
954 1 : i.vbReader.close()
955 1 : }
956 1 : if i.vbRH != nil {
957 1 : err = firstError(err, i.vbRH.Close())
958 1 : i.vbRH = nil
959 1 : }
960 1 : *i = twoLevelIterator{
961 1 : singleLevelIterator: i.singleLevelIterator.resetForReuse(),
962 1 : topLevelIndex: i.topLevelIndex.ResetForReuse(),
963 1 : }
964 1 : twoLevelIterPool.Put(i)
965 1 : return err
966 : }
967 :
968 : // Note: twoLevelCompactionIterator and compactionIterator are very similar but
969 : // were separated due to performance.
970 : type twoLevelCompactionIterator struct {
971 : *twoLevelIterator
972 : }
973 :
974 : // twoLevelCompactionIterator implements the base.InternalIterator interface.
975 : var _ base.InternalIterator = (*twoLevelCompactionIterator)(nil)
976 :
977 1 : func (i *twoLevelCompactionIterator) Close() error {
978 1 : return i.twoLevelIterator.Close()
979 1 : }
980 :
981 0 : func (i *twoLevelCompactionIterator) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV {
982 0 : panic("pebble: SeekGE unimplemented")
983 : }
984 :
985 : func (i *twoLevelCompactionIterator) SeekPrefixGE(
986 : prefix, key []byte, flags base.SeekGEFlags,
987 0 : ) *base.InternalKV {
988 0 : panic("pebble: SeekPrefixGE unimplemented")
989 : }
990 :
991 0 : func (i *twoLevelCompactionIterator) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV {
992 0 : panic("pebble: SeekLT unimplemented")
993 : }
994 :
995 1 : func (i *twoLevelCompactionIterator) First() *base.InternalKV {
996 1 : i.err = nil // clear cached iteration error
997 1 : return i.skipForward(i.twoLevelIterator.First())
998 1 : }
999 :
1000 0 : func (i *twoLevelCompactionIterator) Last() *base.InternalKV {
1001 0 : panic("pebble: Last unimplemented")
1002 : }
1003 :
1004 : // Note: twoLevelCompactionIterator.Next mirrors the implementation of
1005 : // twoLevelIterator.Next due to performance. Keep the two in sync.
1006 1 : func (i *twoLevelCompactionIterator) Next() *base.InternalKV {
1007 1 : if i.err != nil {
1008 0 : return nil
1009 0 : }
1010 1 : return i.skipForward(i.singleLevelIterator.Next())
1011 : }
1012 :
1013 0 : func (i *twoLevelCompactionIterator) NextPrefix(succKey []byte) *base.InternalKV {
1014 0 : panic("pebble: NextPrefix unimplemented")
1015 : }
1016 :
1017 0 : func (i *twoLevelCompactionIterator) Prev() *base.InternalKV {
1018 0 : panic("pebble: Prev unimplemented")
1019 : }
1020 :
1021 1 : func (i *twoLevelCompactionIterator) skipForward(kv *base.InternalKV) *base.InternalKV {
1022 1 : if kv == nil {
1023 1 : for {
1024 1 : if key := i.topLevelIndex.Next(); key == nil {
1025 1 : break
1026 : }
1027 1 : result := i.loadIndex(+1)
1028 1 : if result != loadBlockOK {
1029 0 : if i.err != nil {
1030 0 : break
1031 : }
1032 0 : switch result {
1033 0 : case loadBlockFailed:
1034 0 : // We checked that i.index was at a valid entry, so
1035 0 : // loadBlockFailed could not have happened due to to i.index
1036 0 : // being exhausted, and must be due to an error.
1037 0 : panic("loadBlock should not have failed with no error")
1038 0 : case loadBlockIrrelevant:
1039 0 : panic("compactionIter should not be using block intervals for skipping")
1040 0 : default:
1041 0 : panic(fmt.Sprintf("unexpected case %d", result))
1042 : }
1043 : }
1044 : // result == loadBlockOK
1045 1 : if kv = i.singleLevelIterator.First(); kv != nil {
1046 1 : break
1047 : }
1048 : }
1049 : }
1050 :
1051 : // We have an upper bound when the table is virtual.
1052 1 : if i.upper != nil && kv != nil {
1053 1 : cmp := i.cmp(kv.K.UserKey, i.upper)
1054 1 : if cmp > 0 || (!i.endKeyInclusive && cmp == 0) {
1055 0 : return nil
1056 0 : }
1057 : }
1058 :
1059 1 : return kv
1060 : }
|