Line data Source code
1 : // Copyright 2011 The LevelDB-Go and Pebble Authors. All rights reserved. Use
2 : // of this source code is governed by a BSD-style license that can be found in
3 : // the LICENSE file.
4 :
5 : package sstable
6 :
7 : import (
8 : "bytes"
9 : "context"
10 : "fmt"
11 : "sync"
12 : "unsafe"
13 :
14 : "github.com/cockroachdb/pebble/internal/base"
15 : "github.com/cockroachdb/pebble/internal/invariants"
16 : "github.com/cockroachdb/pebble/internal/treeprinter"
17 : "github.com/cockroachdb/pebble/objstorage"
18 : "github.com/cockroachdb/pebble/objstorage/objstorageprovider"
19 : "github.com/cockroachdb/pebble/objstorage/objstorageprovider/objiotracing"
20 : "github.com/cockroachdb/pebble/sstable/block"
21 : "github.com/cockroachdb/pebble/sstable/rowblk"
22 : )
23 :
24 : // singleLevelIterator iterates over an entire table of data. To seek for a given
25 : // key, it first looks in the index for the block that contains that key, and then
26 : // looks inside that block.
27 : //
28 : // singleLevelIterator is parameterized by the type of the data block iterator.
29 : // The type parameters are designed to allow the singleLevelIterator to embed
30 : // the data block iterator struct within itself, avoiding an extra allocation
31 : // and pointer indirection. The complication comes from the fact that we want to
32 : // implement the interface on a pointer receiver but embed the non-pointer type
33 : // within the struct. The D type parameter is the non-pointer data block
34 : // iterator type, and the PD type parameter is the *D type that actually
35 : // implements the DataBlockIterator constraint.
36 : //
37 : // Unfortunately, uses of the [data] field must explicitly cast &data to the PD
38 : // type in order to access its interface methods. This pattern is taken from the
39 : // Go generics proposal:
40 : // https://go.googlesource.com/proposal/+/refs/heads/master/design/43651-type-parameters.md#pointer-method-example
41 : type singleLevelIterator[D any, PD block.DataBlockIterator[D]] struct {
42 : ctx context.Context
43 : cmp Compare
44 : // Global lower/upper bound for the iterator.
45 : lower []byte
46 : upper []byte
47 : bpfs *BlockPropertiesFilterer
48 : // Per-block lower/upper bound. Nil if the bound does not apply to the block
49 : // because we determined the block lies completely within the bound.
50 : blockLower []byte
51 : blockUpper []byte
52 : reader *Reader
53 : // vState will be set iff the iterator is constructed for virtual sstable
54 : // iteration.
55 : vState *virtualState
56 : // endKeyInclusive is set to force the iterator to treat the upper field as
57 : // inclusive while iterating instead of exclusive.
58 : endKeyInclusive bool
59 : index rowblk.Iter
60 : indexFilterRH objstorage.ReadHandle
61 : indexFilterRHPrealloc objstorageprovider.PreallocatedReadHandle
62 : data D
63 : dataRH objstorage.ReadHandle
64 : dataRHPrealloc objstorageprovider.PreallocatedReadHandle
65 : // dataBH refers to the last data block that the iterator considered
66 : // loading. It may not actually have loaded the block, due to an error or
67 : // because it was considered irrelevant.
68 : dataBH block.Handle
69 : vbReader *valueBlockReader
70 : // vbRH is the read handle for value blocks, which are in a different
71 : // part of the sstable than data blocks.
72 : vbRH objstorage.ReadHandle
73 : vbRHPrealloc objstorageprovider.PreallocatedReadHandle
74 : err error
75 : closeHook func(i Iterator) error
76 : // stats and iterStats are slightly different. stats is a shared struct
77 : // supplied from the outside, and represents stats for the whole iterator
78 : // tree and can be reset from the outside (e.g. when the pebble.Iterator is
79 : // being reused). It is currently only provided when the iterator tree is
80 : // rooted at pebble.Iterator. iterStats is this sstable iterator's private
81 : // stats that are reported to a CategoryStatsCollector when this iterator is
82 : // closed. More paths are instrumented with this as the
83 : // CategoryStatsCollector needed for this is provided by the
84 : // tableCacheContainer (which is more universally used).
85 : stats *base.InternalIteratorStats
86 : iterStats iterStatsAccumulator
87 : bufferPool *block.BufferPool
88 :
89 : // boundsCmp and positionedUsingLatestBounds are for optimizing iteration
90 : // that uses multiple adjacent bounds. The seek after setting a new bound
91 : // can use the fact that the iterator is either within the previous bounds
92 : // or exactly one key before or after the bounds. If the new bounds is
93 : // after/before the previous bounds, and we are already positioned at a
94 : // block that is relevant for the new bounds, we can try to first position
95 : // using Next/Prev (repeatedly) instead of doing a more expensive seek.
96 : //
97 : // When there are wide files at higher levels that match the bounds
98 : // but don't have any data for the bound, we will already be
99 : // positioned at the key beyond the bounds and won't need to do much
100 : // work -- given that most data is in L6, such files are likely to
101 : // dominate the performance of the mergingIter, and may be the main
102 : // benefit of this performance optimization (of course it also helps
103 : // when the file that has the data has successive seeks that stay in
104 : // the same block).
105 : //
106 : // Specifically, boundsCmp captures the relationship between the previous
107 : // and current bounds, if the iterator had been positioned after setting
108 : // the previous bounds. If it was not positioned, i.e., Seek/First/Last
109 : // were not called, we don't know where it is positioned and cannot
110 : // optimize.
111 : //
112 : // Example: Bounds moving forward, and iterator exhausted in forward direction.
113 : // bounds = [f, h), ^ shows block iterator position
114 : // file contents [ a b c d e f g h i j k ]
115 : // ^
116 : // new bounds = [j, k). Since positionedUsingLatestBounds=true, boundsCmp is
117 : // set to +1. SeekGE(j) can use next (the optimization also requires that j
118 : // is within the block, but that is not for correctness, but to limit the
119 : // optimization to when it will actually be an optimization).
120 : //
121 : // Example: Bounds moving forward.
122 : // bounds = [f, h), ^ shows block iterator position
123 : // file contents [ a b c d e f g h i j k ]
124 : // ^
125 : // new bounds = [j, k). Since positionedUsingLatestBounds=true, boundsCmp is
126 : // set to +1. SeekGE(j) can use next.
127 : //
128 : // Example: Bounds moving forward, but iterator not positioned using previous
129 : // bounds.
130 : // bounds = [f, h), ^ shows block iterator position
131 : // file contents [ a b c d e f g h i j k ]
132 : // ^
133 : // new bounds = [i, j). Iterator is at j since it was never positioned using
134 : // [f, h). So positionedUsingLatestBounds=false, and boundsCmp is set to 0.
135 : // SeekGE(i) will not use next.
136 : //
137 : // Example: Bounds moving forward and sparse file
138 : // bounds = [f, h), ^ shows block iterator position
139 : // file contents [ a z ]
140 : // ^
141 : // new bounds = [j, k). Since positionedUsingLatestBounds=true, boundsCmp is
142 : // set to +1. SeekGE(j) notices that the iterator is already past j and does
143 : // not need to do anything.
144 : //
145 : // Similar examples can be constructed for backward iteration.
146 : //
147 : // This notion of exactly one key before or after the bounds is not quite
148 : // true when block properties are used to ignore blocks. In that case we
149 : // can't stop precisely at the first block that is past the bounds since
150 : // we are using the index entries to enforce the bounds.
151 : //
152 : // e.g. 3 blocks with keys [b, c] [f, g], [i, j, k] with index entries d,
153 : // h, l. And let the lower bound be k, and we are reverse iterating. If
154 : // the block [i, j, k] is ignored due to the block interval annotations we
155 : // do need to move the index to block [f, g] since the index entry for the
156 : // [i, j, k] block is l which is not less than the lower bound of k. So we
157 : // have passed the entries i, j.
158 : //
159 : // This behavior is harmless since the block property filters are fixed
160 : // for the lifetime of the iterator so i, j are irrelevant. In addition,
161 : // the current code will not load the [f, g] block, so the seek
162 : // optimization that attempts to use Next/Prev do not apply anyway.
163 : boundsCmp int
164 : positionedUsingLatestBounds bool
165 :
166 : // exhaustedBounds represents whether the iterator is exhausted for
167 : // iteration by reaching the upper or lower bound. +1 when exhausted
168 : // the upper bound, -1 when exhausted the lower bound, and 0 when
169 : // neither. exhaustedBounds is also used for the TrySeekUsingNext
170 : // optimization in twoLevelIterator and singleLevelIterator. Care should be
171 : // taken in setting this in twoLevelIterator before calling into
172 : // singleLevelIterator, given that these two iterators share this field.
173 : exhaustedBounds int8
174 :
175 : // useFilterBlock controls whether the bloom filter block in this sstable, if
176 : // present, should be used for prefix seeks or not. In some cases it is
177 : // beneficial to skip a filter block even if it exists (eg. if probability of
178 : // a match is high).
179 : useFilterBlock bool
180 : lastBloomFilterMatched bool
181 :
182 : transforms IterTransforms
183 :
184 : // inPool is set to true before putting the iterator in the reusable pool;
185 : // used to detect double-close.
186 : inPool bool
187 : // pool is the pool from which the iterator was allocated and to which the
188 : // iterator should be returned on Close. Because the iterator is
189 : // parameterized by the type of the data block iterator, pools must be
190 : // specific to the type of the data block iterator.
191 : //
192 : // If the iterator is embedded within a twoLevelIterator, pool is nil and
193 : // the twoLevelIterator.pool field may be non-nil.
194 : pool *sync.Pool
195 : }
196 :
197 : // singleLevelIterator implements the base.InternalIterator interface.
198 : var _ base.InternalIterator = (*singleLevelIterator[rowblk.Iter, *rowblk.Iter])(nil)
199 :
200 : // newRowBlockSingleLevelIterator reads the index block and creates and
201 : // initializes a singleLevelIterator over an sstable with row-oriented data
202 : // blocks.
203 : //
204 : // Note that lower, upper are iterator bounds and are separate from virtual
205 : // sstable bounds. If the virtualState passed in is not nil, then virtual
206 : // sstable bounds will be enforced.
207 : func newRowBlockSingleLevelIterator(
208 : ctx context.Context,
209 : r *Reader,
210 : v *virtualState,
211 : transforms IterTransforms,
212 : lower, upper []byte,
213 : filterer *BlockPropertiesFilterer,
214 : filterBlockSizeLimit FilterBlockSizeLimit,
215 : stats *base.InternalIteratorStats,
216 : categoryAndQoS CategoryAndQoS,
217 : statsCollector *CategoryStatsCollector,
218 : rp ReaderProvider,
219 : bufferPool *block.BufferPool,
220 1 : ) (*singleLevelIterator[rowblk.Iter, *rowblk.Iter], error) {
221 1 : if r.err != nil {
222 0 : return nil, r.err
223 0 : }
224 : // TODO(jackson): When we have a columnar-block sstable format, assert that
225 : // the table format is row-oriented.
226 1 : i := singleLevelIterRowBlockPool.Get().(*singleLevelIterator[rowblk.Iter, *rowblk.Iter])
227 1 : useFilterBlock := shouldUseFilterBlock(r, filterBlockSizeLimit)
228 1 : i.init(
229 1 : ctx, r, v, transforms, lower, upper, filterer, useFilterBlock,
230 1 : stats, categoryAndQoS, statsCollector, bufferPool,
231 1 : )
232 1 : if r.tableFormat >= TableFormatPebblev3 {
233 1 : if r.Properties.NumValueBlocks > 0 {
234 1 : // NB: we cannot avoid this ~248 byte allocation, since valueBlockReader
235 1 : // can outlive the singleLevelIterator due to be being embedded in a
236 1 : // LazyValue. This consumes ~2% in microbenchmark CPU profiles, but we
237 1 : // should only optimize this if it shows up as significant in end-to-end
238 1 : // CockroachDB benchmarks, since it is tricky to do so. One possibility
239 1 : // is that if many sstable iterators only get positioned at latest
240 1 : // versions of keys, and therefore never expose a LazyValue that is
241 1 : // separated to their callers, they can put this valueBlockReader into a
242 1 : // sync.Pool.
243 1 : i.vbReader = &valueBlockReader{
244 1 : bpOpen: i,
245 1 : rp: rp,
246 1 : vbih: r.valueBIH,
247 1 : stats: stats,
248 1 : }
249 1 : (&i.data).SetGetLazyValuer(i.vbReader)
250 1 : i.vbRH = objstorageprovider.UsePreallocatedReadHandle(r.readable, objstorage.NoReadBefore, &i.vbRHPrealloc)
251 1 : }
252 1 : i.data.SetHasValuePrefix(true)
253 : }
254 :
255 1 : indexH, err := r.readIndex(ctx, i.indexFilterRH, stats, &i.iterStats)
256 1 : if err == nil {
257 1 : err = i.index.InitHandle(i.cmp, r.Split, indexH, transforms)
258 1 : }
259 1 : if err != nil {
260 0 : _ = i.Close()
261 0 : return nil, err
262 0 : }
263 1 : return i, nil
264 : }
265 :
266 : // init initializes the singleLevelIterator struct. It does not read the index.
267 : func (i *singleLevelIterator[D, PD]) init(
268 : ctx context.Context,
269 : r *Reader,
270 : v *virtualState,
271 : transforms IterTransforms,
272 : lower, upper []byte,
273 : filterer *BlockPropertiesFilterer,
274 : useFilterBlock bool,
275 : stats *base.InternalIteratorStats,
276 : categoryAndQoS CategoryAndQoS,
277 : statsCollector *CategoryStatsCollector,
278 : bufferPool *block.BufferPool,
279 1 : ) {
280 1 : i.inPool = false
281 1 : i.ctx = ctx
282 1 : i.lower = lower
283 1 : i.upper = upper
284 1 : i.bpfs = filterer
285 1 : i.useFilterBlock = useFilterBlock
286 1 : i.reader = r
287 1 : i.cmp = r.Compare
288 1 : i.stats = stats
289 1 : i.transforms = transforms
290 1 : i.bufferPool = bufferPool
291 1 : if v != nil {
292 1 : i.vState = v
293 1 : i.endKeyInclusive, i.lower, i.upper = v.constrainBounds(lower, upper, false /* endInclusive */)
294 1 : }
295 :
296 1 : i.iterStats.init(categoryAndQoS, statsCollector)
297 1 :
298 1 : i.indexFilterRH = objstorageprovider.UsePreallocatedReadHandle(
299 1 : r.readable, objstorage.ReadBeforeForIndexAndFilter, &i.indexFilterRHPrealloc)
300 1 : i.dataRH = objstorageprovider.UsePreallocatedReadHandle(
301 1 : r.readable, objstorage.NoReadBefore, &i.dataRHPrealloc)
302 : }
303 :
304 : // Helper function to check if keys returned from iterator are within virtual bounds.
305 1 : func (i *singleLevelIterator[D, PD]) maybeVerifyKey(kv *base.InternalKV) *base.InternalKV {
306 1 : if invariants.Enabled && kv != nil && i.vState != nil {
307 1 : key := kv.K.UserKey
308 1 : v := i.vState
309 1 : lc := i.cmp(key, v.lower.UserKey)
310 1 : uc := i.cmp(key, v.upper.UserKey)
311 1 : if lc < 0 || uc > 0 || (uc == 0 && v.upper.IsExclusiveSentinel()) {
312 0 : panic(fmt.Sprintf("key %q out of singleLeveliterator virtual bounds %s %s", key, v.lower.UserKey, v.upper.UserKey))
313 : }
314 : }
315 1 : return kv
316 : }
317 :
318 : // SetupForCompaction sets up the singleLevelIterator for use with compactionIter.
319 : // Currently, it skips readahead ramp-up. It should be called after init is called.
320 1 : func (i *singleLevelIterator[D, PD]) SetupForCompaction() {
321 1 : i.dataRH.SetupForCompaction()
322 1 : if i.vbRH != nil {
323 1 : i.vbRH.SetupForCompaction()
324 1 : }
325 : }
326 :
327 1 : func (i *singleLevelIterator[D, PD]) resetForReuse() singleLevelIterator[D, PD] {
328 1 : return singleLevelIterator[D, PD]{
329 1 : index: i.index.ResetForReuse(),
330 1 : data: PD(&i.data).ResetForReuse(),
331 1 : pool: i.pool,
332 1 : inPool: true,
333 1 : }
334 1 : }
335 :
336 1 : func (i *singleLevelIterator[D, PD]) initBounds() {
337 1 : // Trim the iteration bounds for the current block. We don't have to check
338 1 : // the bounds on each iteration if the block is entirely contained within the
339 1 : // iteration bounds.
340 1 : i.blockLower = i.lower
341 1 : if i.blockLower != nil {
342 1 : kv := PD(&i.data).First()
343 1 : // TODO(radu): this should be <= 0
344 1 : if kv != nil && i.cmp(i.blockLower, kv.K.UserKey) < 0 {
345 1 : // The lower-bound is less than the first key in the block. No need
346 1 : // to check the lower-bound again for this block.
347 1 : i.blockLower = nil
348 1 : }
349 : }
350 1 : i.blockUpper = i.upper
351 1 : // TODO(radu): this should be >= 0 if blockUpper is inclusive.
352 1 : if i.blockUpper != nil && i.cmp(i.blockUpper, i.index.Key().UserKey) > 0 {
353 1 : // The upper-bound is greater than the index key which itself is greater
354 1 : // than or equal to every key in the block. No need to check the
355 1 : // upper-bound again for this block. Even if blockUpper is inclusive
356 1 : // because of upper being inclusive, we can still safely set blockUpper
357 1 : // to nil here.
358 1 : i.blockUpper = nil
359 1 : }
360 : }
361 :
362 1 : func (i *singleLevelIterator[D, PD]) initBoundsForAlreadyLoadedBlock() {
363 1 : // TODO(radu): determine automatically if we need to call First or not and
364 1 : // unify this function with initBounds().
365 1 : if PD(&i.data).FirstUserKey() == nil {
366 0 : panic("initBoundsForAlreadyLoadedBlock must not be called on empty or corrupted block")
367 : }
368 1 : i.blockLower = i.lower
369 1 : if i.blockLower != nil {
370 1 : firstUserKey := PD(&i.data).FirstUserKey()
371 1 : // TODO(radu): this should be <= 0
372 1 : if firstUserKey != nil && i.cmp(i.blockLower, firstUserKey) < 0 {
373 1 : // The lower-bound is less than the first key in the block. No need
374 1 : // to check the lower-bound again for this block.
375 1 : i.blockLower = nil
376 1 : }
377 : }
378 1 : i.blockUpper = i.upper
379 1 : // TODO(radu): this should be >= 0 if blockUpper is inclusive.
380 1 : if i.blockUpper != nil && i.cmp(i.blockUpper, i.index.Key().UserKey) > 0 {
381 1 : // The upper-bound is greater than the index key which itself is greater
382 1 : // than or equal to every key in the block. No need to check the
383 1 : // upper-bound again for this block.
384 1 : i.blockUpper = nil
385 1 : }
386 : }
387 :
388 : // Deterministic disabling (in testing mode) of the bounds-based optimization
389 : // that avoids seeking. Uses the iterator pointer, since we want diversity in
390 : // iterator behavior for the same SetBounds call. Used for tests.
391 1 : func testingDisableBoundsOpt(bound []byte, ptr uintptr) bool {
392 1 : if !invariants.Enabled || ensureBoundsOptDeterminism {
393 0 : return false
394 0 : }
395 : // Fibonacci hash https://probablydance.com/2018/06/16/fibonacci-hashing-the-optimization-that-the-world-forgot-or-a-better-alternative-to-integer-modulo/
396 1 : simpleHash := (11400714819323198485 * uint64(ptr)) >> 63
397 1 : return bound[len(bound)-1]&byte(1) == 0 && simpleHash == 0
398 : }
399 :
400 : // ensureBoundsOptDeterminism provides a facility for disabling of the bounds
401 : // optimizations performed by disableBoundsOpt for tests that require
402 : // deterministic iterator behavior. Some unit tests examine internal iterator
403 : // state and require this behavior to be deterministic.
404 : var ensureBoundsOptDeterminism bool
405 :
406 : // SetBoundsWithSyntheticPrefix indicates whether this iterator requires keys
407 : // passed to its SetBounds() method by a prefix rewriting wrapper to be *not*
408 : // rewritten to be in terms of this iterator's content, but instead be passed
409 : // as-is, i.e. with the synthetic prefix still on them.
410 : //
411 : // This allows an optimization when this iterator is passing these bounds on to
412 : // a vState to additionally constrain them. In said vState, passed bounds are
413 : // combined with the vState bounds which are in terms of the rewritten prefix.
414 : // If the caller rewrote bounds to be in terms of content prefix and SetBounds
415 : // passed those to vState, the vState would need to *un*rewrite them back to the
416 : // synthetic prefix in order to combine them with the vState bounds. Thus, if
417 : // this iterator knows bounds will be passed to vState, it can signal that it
418 : // they should be passed without being rewritten to skip converting to and fro.
419 0 : func (i singleLevelIterator[P, PD]) SetBoundsWithSyntheticPrefix() bool {
420 0 : return i.vState != nil
421 0 : }
422 :
423 : // SetBounds implements internalIterator.SetBounds, as documented in the pebble
424 : // package. Note that the upper field is exclusive.
425 1 : func (i *singleLevelIterator[P, PD]) SetBounds(lower, upper []byte) {
426 1 : i.boundsCmp = 0
427 1 : if i.vState != nil {
428 1 : // If the reader is constructed for a virtual sstable, then we must
429 1 : // constrain the bounds of the reader. For physical sstables, the bounds
430 1 : // can be wider than the actual sstable's bounds because we won't
431 1 : // accidentally expose additional keys as there are no additional keys.
432 1 : i.endKeyInclusive, lower, upper = i.vState.constrainBounds(
433 1 : lower, upper, false,
434 1 : )
435 1 : } else {
436 1 : // TODO(bananabrick): Figure out the logic here to enable the boundsCmp
437 1 : // optimization for virtual sstables.
438 1 : if i.positionedUsingLatestBounds {
439 1 : if i.upper != nil && lower != nil && i.cmp(i.upper, lower) <= 0 {
440 1 : i.boundsCmp = +1
441 1 : if testingDisableBoundsOpt(lower, uintptr(unsafe.Pointer(i))) {
442 1 : i.boundsCmp = 0
443 1 : }
444 1 : } else if i.lower != nil && upper != nil && i.cmp(upper, i.lower) <= 0 {
445 1 : i.boundsCmp = -1
446 1 : if testingDisableBoundsOpt(upper, uintptr(unsafe.Pointer(i))) {
447 1 : i.boundsCmp = 0
448 1 : }
449 : }
450 : }
451 : }
452 :
453 1 : i.positionedUsingLatestBounds = false
454 1 : i.lower = lower
455 1 : i.upper = upper
456 1 : i.blockLower = nil
457 1 : i.blockUpper = nil
458 : }
459 :
460 0 : func (i *singleLevelIterator[P, PD]) SetContext(ctx context.Context) {
461 0 : i.ctx = ctx
462 0 : }
463 :
464 : // loadBlock loads the block at the current index position and leaves i.data
465 : // unpositioned. If unsuccessful, it sets i.err to any error encountered, which
466 : // may be nil if we have simply exhausted the entire table.
467 1 : func (i *singleLevelIterator[P, PD]) loadBlock(dir int8) loadBlockResult {
468 1 : if !i.index.Valid() {
469 0 : // Ensure the data block iterator is invalidated even if loading of the block
470 0 : // fails.
471 0 : PD(&i.data).Invalidate()
472 0 : return loadBlockFailed
473 0 : }
474 : // Load the next block.
475 1 : v := i.index.Value()
476 1 : bhp, err := decodeBlockHandleWithProperties(v.InPlaceValue())
477 1 : if i.dataBH == bhp.Handle && PD(&i.data).Valid() {
478 1 : // We're already at the data block we want to load. Reset bounds in case
479 1 : // they changed since the last seek, but don't reload the block from cache
480 1 : // or disk.
481 1 : //
482 1 : // It's safe to leave i.data in its original state here, as all callers to
483 1 : // loadBlock make an absolute positioning call (i.e. a seek, first, or last)
484 1 : // to `i.data` right after loadBlock returns loadBlockOK.
485 1 : i.initBounds()
486 1 : return loadBlockOK
487 1 : }
488 : // Ensure the data block iterator is invalidated even if loading of the block
489 : // fails.
490 1 : PD(&i.data).Invalidate()
491 1 : i.dataBH = bhp.Handle
492 1 : if err != nil {
493 0 : i.err = errCorruptIndexEntry(err)
494 0 : return loadBlockFailed
495 0 : }
496 1 : if i.bpfs != nil {
497 1 : intersects, err := i.bpfs.intersects(bhp.Props)
498 1 : if err != nil {
499 0 : i.err = errCorruptIndexEntry(err)
500 0 : return loadBlockFailed
501 0 : }
502 1 : if intersects == blockMaybeExcluded {
503 1 : intersects = i.resolveMaybeExcluded(dir)
504 1 : }
505 1 : if intersects == blockExcluded {
506 1 : return loadBlockIrrelevant
507 1 : }
508 : // blockIntersects
509 : }
510 1 : ctx := objiotracing.WithBlockType(i.ctx, objiotracing.DataBlock)
511 1 : block, err := i.reader.readBlock(
512 1 : ctx, i.dataBH, nil /* transform */, i.dataRH, i.stats, &i.iterStats, i.bufferPool)
513 1 : if err != nil {
514 0 : i.err = err
515 0 : return loadBlockFailed
516 0 : }
517 1 : i.err = PD(&i.data).InitHandle(i.cmp, i.reader.Split, block, i.transforms)
518 1 : if i.err != nil {
519 0 : // The block is partially loaded, and we don't want it to appear valid.
520 0 : PD(&i.data).Invalidate()
521 0 : return loadBlockFailed
522 0 : }
523 1 : i.initBounds()
524 1 : return loadBlockOK
525 : }
526 :
527 : // readBlockForVBR implements the blockProviderWhenOpen interface for use by
528 : // the valueBlockReader.
529 : func (i *singleLevelIterator[D, PD]) readBlockForVBR(
530 : h block.Handle, stats *base.InternalIteratorStats,
531 1 : ) (block.BufferHandle, error) {
532 1 : ctx := objiotracing.WithBlockType(i.ctx, objiotracing.ValueBlock)
533 1 : return i.reader.readBlock(ctx, h, nil, i.vbRH, stats, &i.iterStats, i.bufferPool)
534 1 : }
535 :
536 : // resolveMaybeExcluded is invoked when the block-property filterer has found
537 : // that a block is excluded according to its properties but only if its bounds
538 : // fall within the filter's current bounds. This function consults the
539 : // apprioriate bound, depending on the iteration direction, and returns either
540 : // `blockIntersects` or `blockExcluded`.
541 1 : func (i *singleLevelIterator[D, PD]) resolveMaybeExcluded(dir int8) intersectsResult {
542 1 : // TODO(jackson): We could first try comparing to top-level index block's
543 1 : // key, and if within bounds avoid per-data block key comparisons.
544 1 :
545 1 : // This iterator is configured with a bound-limited block property
546 1 : // filter. The bpf determined this block could be excluded from
547 1 : // iteration based on the property encoded in the block handle.
548 1 : // However, we still need to determine if the block is wholly
549 1 : // contained within the filter's key bounds.
550 1 : //
551 1 : // External guarantees ensure all the block's keys are ≥ the
552 1 : // filter's lower bound during forward iteration, and that all the
553 1 : // block's keys are < the filter's upper bound during backward
554 1 : // iteration. We only need to determine if the opposite bound is
555 1 : // also met.
556 1 : //
557 1 : // The index separator in index.Key() provides an inclusive
558 1 : // upper-bound for the data block's keys, guaranteeing that all its
559 1 : // keys are ≤ index.Key(). For forward iteration, this is all we
560 1 : // need.
561 1 : if dir > 0 {
562 1 : // Forward iteration.
563 1 : if i.bpfs.boundLimitedFilter.KeyIsWithinUpperBound(i.index.Key().UserKey) {
564 1 : return blockExcluded
565 1 : }
566 1 : return blockIntersects
567 : }
568 :
569 : // Reverse iteration.
570 : //
571 : // Because we're iterating in the reverse direction, we don't yet have
572 : // enough context available to determine if the block is wholly contained
573 : // within its bounds. This case arises only during backward iteration,
574 : // because of the way the index is structured.
575 : //
576 : // Consider a bound-limited bpf limited to the bounds [b,d), loading the
577 : // block with separator `c`. During reverse iteration, the guarantee that
578 : // all the block's keys are < `d` is externally provided, but no guarantee
579 : // is made on the bpf's lower bound. The separator `c` only provides an
580 : // inclusive upper bound on the block's keys, indicating that the
581 : // corresponding block handle points to a block containing only keys ≤ `c`.
582 : //
583 : // To establish a lower bound, we step the index backwards to read the
584 : // previous block's separator, which provides an inclusive lower bound on
585 : // the original block's keys. Afterwards, we step forward to restore our
586 : // index position.
587 1 : if peekKV := i.index.Prev(); peekKV == nil {
588 1 : // The original block points to the first block of this index block. If
589 1 : // there's a two-level index, it could potentially provide a lower
590 1 : // bound, but the code refactoring necessary to read it doesn't seem
591 1 : // worth the payoff. We fall through to loading the block.
592 1 : } else if i.bpfs.boundLimitedFilter.KeyIsWithinLowerBound(peekKV.K.UserKey) {
593 1 : // The lower-bound on the original block falls within the filter's
594 1 : // bounds, and we can skip the block (after restoring our current index
595 1 : // position).
596 1 : _ = i.index.Next()
597 1 : return blockExcluded
598 1 : }
599 1 : _ = i.index.Next()
600 1 : return blockIntersects
601 : }
602 :
603 : // The number of times to call Next/Prev in a block before giving up and seeking.
604 : // The value of 4 is arbitrary.
605 : // TODO(sumeer): experiment with dynamic adjustment based on the history of
606 : // seeks for a particular iterator.
607 : const numStepsBeforeSeek = 4
608 :
609 : func (i *singleLevelIterator[D, PD]) trySeekGEUsingNextWithinBlock(
610 : key []byte,
611 1 : ) (kv *base.InternalKV, done bool) {
612 1 : kv = PD(&i.data).KV()
613 1 : for j := 0; j < numStepsBeforeSeek; j++ {
614 1 : curKeyCmp := i.cmp(kv.K.UserKey, key)
615 1 : if curKeyCmp >= 0 {
616 1 : if i.blockUpper != nil {
617 1 : cmp := i.cmp(kv.K.UserKey, i.blockUpper)
618 1 : if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 {
619 1 : i.exhaustedBounds = +1
620 1 : return nil, true
621 1 : }
622 : }
623 1 : return kv, true
624 : }
625 1 : kv = PD(&i.data).Next()
626 1 : if kv == nil {
627 1 : break
628 : }
629 : }
630 1 : return kv, false
631 : }
632 :
633 : func (i *singleLevelIterator[D, PD]) trySeekLTUsingPrevWithinBlock(
634 : key []byte,
635 1 : ) (kv *base.InternalKV, done bool) {
636 1 : kv = PD(&i.data).KV()
637 1 : for j := 0; j < numStepsBeforeSeek; j++ {
638 1 : curKeyCmp := i.cmp(kv.K.UserKey, key)
639 1 : if curKeyCmp < 0 {
640 1 : if i.blockLower != nil && i.cmp(kv.K.UserKey, i.blockLower) < 0 {
641 1 : i.exhaustedBounds = -1
642 1 : return nil, true
643 1 : }
644 1 : return kv, true
645 : }
646 1 : kv = PD(&i.data).Prev()
647 1 : if kv == nil {
648 1 : break
649 : }
650 : }
651 1 : return kv, false
652 : }
653 :
654 : // SeekGE implements internalIterator.SeekGE, as documented in the pebble
655 : // package. Note that SeekGE only checks the upper bound. It is up to the
656 : // caller to ensure that key is greater than or equal to the lower bound.
657 1 : func (i *singleLevelIterator[D, PD]) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV {
658 1 : if i.vState != nil {
659 1 : // Callers of SeekGE don't know about virtual sstable bounds, so we may
660 1 : // have to internally restrict the bounds.
661 1 : //
662 1 : // TODO(bananabrick): We can optimize this check away for the level iter
663 1 : // if necessary.
664 1 : if i.cmp(key, i.lower) < 0 {
665 1 : key = i.lower
666 1 : }
667 : }
668 :
669 1 : if flags.TrySeekUsingNext() {
670 1 : // The i.exhaustedBounds comparison indicates that the upper bound was
671 1 : // reached. The i.data.isDataInvalidated() indicates that the sstable was
672 1 : // exhausted.
673 1 : if (i.exhaustedBounds == +1 || PD(&i.data).IsDataInvalidated()) && i.err == nil {
674 1 : // Already exhausted, so return nil.
675 1 : return nil
676 1 : }
677 1 : if i.err != nil {
678 0 : // The current iterator position cannot be used.
679 0 : flags = flags.DisableTrySeekUsingNext()
680 0 : }
681 : // INVARIANT: flags.TrySeekUsingNext() => i.err == nil &&
682 : // !i.exhaustedBounds==+1 && !i.data.isDataInvalidated(). That is,
683 : // data-exhausted and bounds-exhausted, as defined earlier, are both
684 : // false. Ths makes it safe to clear out i.exhaustedBounds and i.err
685 : // before calling into seekGEHelper.
686 : }
687 :
688 1 : i.exhaustedBounds = 0
689 1 : i.err = nil // clear cached iteration error
690 1 : boundsCmp := i.boundsCmp
691 1 : // Seek optimization only applies until iterator is first positioned after SetBounds.
692 1 : i.boundsCmp = 0
693 1 : i.positionedUsingLatestBounds = true
694 1 : return i.seekGEHelper(key, boundsCmp, flags)
695 : }
696 :
697 : // seekGEHelper contains the common functionality for SeekGE and SeekPrefixGE.
698 : func (i *singleLevelIterator[D, PD]) seekGEHelper(
699 : key []byte, boundsCmp int, flags base.SeekGEFlags,
700 1 : ) *base.InternalKV {
701 1 : // Invariant: trySeekUsingNext => !i.data.isDataInvalidated() && i.exhaustedBounds != +1
702 1 :
703 1 : // SeekGE performs various step-instead-of-seeking optimizations: eg enabled
704 1 : // by trySeekUsingNext, or by monotonically increasing bounds (i.boundsCmp).
705 1 :
706 1 : var dontSeekWithinBlock bool
707 1 : if !PD(&i.data).IsDataInvalidated() && PD(&i.data).Valid() && i.index.Valid() &&
708 1 : boundsCmp > 0 && i.cmp(key, i.index.Key().UserKey) <= 0 {
709 1 : // Fast-path: The bounds have moved forward and this SeekGE is
710 1 : // respecting the lower bound (guaranteed by Iterator). We know that
711 1 : // the iterator must already be positioned within or just outside the
712 1 : // previous bounds. Therefore it cannot be positioned at a block (or
713 1 : // the position within that block) that is ahead of the seek position.
714 1 : // However it can be positioned at an earlier block. This fast-path to
715 1 : // use Next() on the block is only applied when we are already at the
716 1 : // block that the slow-path (the else-clause) would load -- this is
717 1 : // the motivation for the i.cmp(key, i.index.Key().UserKey) <= 0
718 1 : // predicate.
719 1 : i.initBoundsForAlreadyLoadedBlock()
720 1 : kv, done := i.trySeekGEUsingNextWithinBlock(key)
721 1 : if done {
722 1 : return kv
723 1 : }
724 1 : if kv == nil {
725 1 : // Done with this block.
726 1 : dontSeekWithinBlock = true
727 1 : }
728 1 : } else {
729 1 : // Cannot use bounds monotonicity. But may be able to optimize if
730 1 : // caller claimed externally known invariant represented by
731 1 : // flags.TrySeekUsingNext().
732 1 : if flags.TrySeekUsingNext() {
733 1 : // seekPrefixGE or SeekGE has already ensured
734 1 : // !i.data.isDataInvalidated() && i.exhaustedBounds != +1
735 1 : curr := PD(&i.data).KV()
736 1 : less := i.cmp(curr.K.UserKey, key) < 0
737 1 : // We could be more sophisticated and confirm that the seek
738 1 : // position is within the current block before applying this
739 1 : // optimization. But there may be some benefit even if it is in
740 1 : // the next block, since we can avoid seeking i.index.
741 1 : for j := 0; less && j < numStepsBeforeSeek; j++ {
742 1 : curr = i.Next()
743 1 : if curr == nil {
744 1 : return nil
745 1 : }
746 1 : less = i.cmp(curr.K.UserKey, key) < 0
747 : }
748 1 : if !less {
749 1 : if i.blockUpper != nil {
750 1 : cmp := i.cmp(curr.K.UserKey, i.blockUpper)
751 1 : if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 {
752 0 : i.exhaustedBounds = +1
753 0 : return nil
754 0 : }
755 : }
756 1 : return curr
757 : }
758 : }
759 :
760 : // Slow-path.
761 :
762 1 : var ikv *base.InternalKV
763 1 : if ikv = i.index.SeekGE(key, flags.DisableTrySeekUsingNext()); ikv == nil {
764 1 : // The target key is greater than any key in the index block.
765 1 : // Invalidate the block iterator so that a subsequent call to Prev()
766 1 : // will return the last key in the table.
767 1 : PD(&i.data).Invalidate()
768 1 : return nil
769 1 : }
770 1 : result := i.loadBlock(+1)
771 1 : if result == loadBlockFailed {
772 0 : return nil
773 0 : }
774 1 : if result == loadBlockIrrelevant {
775 1 : // Enforce the upper bound here since don't want to bother moving
776 1 : // to the next block if upper bound is already exceeded. Note that
777 1 : // the next block starts with keys >= ikey.UserKey since even
778 1 : // though this is the block separator, the same user key can span
779 1 : // multiple blocks. If upper is exclusive we use >= below, else
780 1 : // we use >.
781 1 : if i.upper != nil {
782 1 : cmp := i.cmp(ikv.K.UserKey, i.upper)
783 1 : if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 {
784 1 : i.exhaustedBounds = +1
785 1 : return nil
786 1 : }
787 : }
788 : // Want to skip to the next block.
789 1 : dontSeekWithinBlock = true
790 : }
791 : }
792 1 : if !dontSeekWithinBlock {
793 1 : if ikv := PD(&i.data).SeekGE(key, flags.DisableTrySeekUsingNext()); ikv != nil {
794 1 : if i.blockUpper != nil {
795 1 : cmp := i.cmp(ikv.K.UserKey, i.blockUpper)
796 1 : if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 {
797 1 : i.exhaustedBounds = +1
798 1 : return nil
799 1 : }
800 : }
801 1 : return ikv
802 : }
803 : }
804 1 : return i.skipForward()
805 : }
806 :
807 : // SeekPrefixGE implements internalIterator.SeekPrefixGE, as documented in the
808 : // pebble package. Note that SeekPrefixGE only checks the upper bound. It is up
809 : // to the caller to ensure that key is greater than or equal to the lower bound.
810 : func (i *singleLevelIterator[D, PD]) SeekPrefixGE(
811 : prefix, key []byte, flags base.SeekGEFlags,
812 1 : ) *base.InternalKV {
813 1 : if i.vState != nil {
814 1 : // Callers of SeekPrefixGE aren't aware of virtual sstable bounds, so
815 1 : // we may have to internally restrict the bounds.
816 1 : //
817 1 : // TODO(bananabrick): We can optimize away this check for the level iter
818 1 : // if necessary.
819 1 : if i.cmp(key, i.lower) < 0 {
820 1 : key = i.lower
821 1 : }
822 : }
823 1 : return i.seekPrefixGE(prefix, key, flags)
824 : }
825 :
826 : func (i *singleLevelIterator[D, PD]) seekPrefixGE(
827 : prefix, key []byte, flags base.SeekGEFlags,
828 1 : ) (kv *base.InternalKV) {
829 1 : // NOTE: prefix is only used for bloom filter checking and not later work in
830 1 : // this method. Hence, we can use the existing iterator position if the last
831 1 : // SeekPrefixGE did not fail bloom filter matching.
832 1 :
833 1 : err := i.err
834 1 : i.err = nil // clear cached iteration error
835 1 : if i.useFilterBlock {
836 1 : if !i.lastBloomFilterMatched {
837 1 : // Iterator is not positioned based on last seek.
838 1 : flags = flags.DisableTrySeekUsingNext()
839 1 : }
840 1 : i.lastBloomFilterMatched = false
841 1 : // Check prefix bloom filter.
842 1 : var mayContain bool
843 1 : mayContain, i.err = i.bloomFilterMayContain(prefix)
844 1 : if i.err != nil || !mayContain {
845 1 : // In the i.err == nil case, this invalidation may not be necessary for
846 1 : // correctness, and may be a place to optimize later by reusing the
847 1 : // already loaded block. It was necessary in earlier versions of the code
848 1 : // since the caller was allowed to call Next when SeekPrefixGE returned
849 1 : // nil. This is no longer allowed.
850 1 : PD(&i.data).Invalidate()
851 1 : return nil
852 1 : }
853 1 : i.lastBloomFilterMatched = true
854 : }
855 1 : if flags.TrySeekUsingNext() {
856 1 : // The i.exhaustedBounds comparison indicates that the upper bound was
857 1 : // reached. The i.data.isDataInvalidated() indicates that the sstable was
858 1 : // exhausted.
859 1 : if (i.exhaustedBounds == +1 || PD(&i.data).IsDataInvalidated()) && err == nil {
860 1 : // Already exhausted, so return nil.
861 1 : return nil
862 1 : }
863 1 : if err != nil {
864 0 : // The current iterator position cannot be used.
865 0 : flags = flags.DisableTrySeekUsingNext()
866 0 : }
867 : // INVARIANT: flags.TrySeekUsingNext() => err == nil &&
868 : // !i.exhaustedBounds==+1 && !i.data.isDataInvalidated(). That is,
869 : // data-exhausted and bounds-exhausted, as defined earlier, are both
870 : // false. Ths makes it safe to clear out i.exhaustedBounds and i.err
871 : // before calling into seekGEHelper.
872 : }
873 : // Bloom filter matches, or skipped, so this method will position the
874 : // iterator.
875 1 : i.exhaustedBounds = 0
876 1 : boundsCmp := i.boundsCmp
877 1 : // Seek optimization only applies until iterator is first positioned after SetBounds.
878 1 : i.boundsCmp = 0
879 1 : i.positionedUsingLatestBounds = true
880 1 : return i.maybeVerifyKey(i.seekGEHelper(key, boundsCmp, flags))
881 : }
882 :
883 : // shouldUseFilterBlock returns whether we should use the filter block, based on
884 : // its length and the size limit.
885 1 : func shouldUseFilterBlock(reader *Reader, filterBlockSizeLimit FilterBlockSizeLimit) bool {
886 1 : return reader.tableFilter != nil && reader.filterBH.Length <= uint64(filterBlockSizeLimit)
887 1 : }
888 :
889 1 : func (i *singleLevelIterator[D, PD]) bloomFilterMayContain(prefix []byte) (bool, error) {
890 1 : // Check prefix bloom filter.
891 1 : prefixToCheck := prefix
892 1 : if i.transforms.SyntheticPrefix.IsSet() {
893 1 : // We have to remove the synthetic prefix.
894 1 : var ok bool
895 1 : prefixToCheck, ok = bytes.CutPrefix(prefix, i.transforms.SyntheticPrefix)
896 1 : if !ok {
897 1 : // This prefix will not be found inside this table.
898 1 : return false, nil
899 1 : }
900 : }
901 :
902 1 : dataH, err := i.reader.readFilter(i.ctx, i.indexFilterRH, i.stats, &i.iterStats)
903 1 : if err != nil {
904 0 : return false, err
905 0 : }
906 1 : defer dataH.Release()
907 1 : return i.reader.tableFilter.mayContain(dataH.Get(), prefixToCheck), nil
908 : }
909 :
910 : // virtualLast should only be called if i.vReader != nil.
911 1 : func (i *singleLevelIterator[D, PD]) virtualLast() *base.InternalKV {
912 1 : if i.vState == nil {
913 0 : panic("pebble: invalid call to virtualLast")
914 : }
915 :
916 1 : if !i.endKeyInclusive {
917 1 : // Trivial case.
918 1 : return i.SeekLT(i.upper, base.SeekLTFlagsNone)
919 1 : }
920 1 : return i.virtualLastSeekLE()
921 : }
922 :
923 : // virtualLastSeekLE is called by virtualLast to do a SeekLE as part of a
924 : // virtualLast. Consider generalizing this into a SeekLE() if there are other
925 : // uses of this method in the future. Does a SeekLE on the upper bound of the
926 : // file/iterator.
927 1 : func (i *singleLevelIterator[D, PD]) virtualLastSeekLE() *base.InternalKV {
928 1 : // Callers of SeekLE don't know about virtual sstable bounds, so we may
929 1 : // have to internally restrict the bounds.
930 1 : //
931 1 : // TODO(bananabrick): We can optimize this check away for the level iter
932 1 : // if necessary.
933 1 : if !i.endKeyInclusive {
934 0 : panic("unexpected virtualLastSeekLE with exclusive upper bounds")
935 : }
936 1 : key := i.upper
937 1 :
938 1 : i.exhaustedBounds = 0
939 1 : i.err = nil // clear cached iteration error
940 1 : // Seek optimization only applies until iterator is first positioned with a
941 1 : // SeekGE or SeekLT after SetBounds.
942 1 : i.boundsCmp = 0
943 1 : i.positionedUsingLatestBounds = true
944 1 :
945 1 : ikv := i.index.SeekGE(key, base.SeekGEFlagsNone)
946 1 : // We can have multiple internal keys with the same user key as the seek
947 1 : // key. In that case, we want the last (greatest) internal key.
948 1 : //
949 1 : // INVARIANT: One of two cases:
950 1 : // A. ikey == nil. There is no data block with index key >= key. So all keys
951 1 : // in the last data block are < key.
952 1 : // B. ikey.userkey >= key. This data block may have some keys > key.
953 1 : //
954 1 : // Subcases of B:
955 1 : // B1. ikey.userkey == key. This is when loop iteration happens.
956 1 : // Since ikey.UserKey >= largest data key in the block, the largest data
957 1 : // key in this block is <= key.
958 1 : // B2. ikey.userkey > key. Loop iteration will not happen.
959 1 : //
960 1 : // NB: We can avoid this Next()ing if we just implement a blockIter.SeekLE().
961 1 : // This might be challenging to do correctly, so impose regular operations
962 1 : // for now.
963 1 : for ikv != nil && bytes.Equal(ikv.K.UserKey, key) {
964 1 : ikv = i.index.Next()
965 1 : }
966 1 : if ikv == nil {
967 1 : // Cases A or B1 where B1 exhausted all blocks. In both cases the last block
968 1 : // has all keys <= key. skipBackward enforces the lower bound.
969 1 : return i.skipBackward()
970 1 : }
971 : // Case B. We are here because we were originally in case B2, or we were in B1
972 : // and we arrived at a block where ikey.UserKey > key. Either way, ikey.UserKey
973 : // > key. So there could be keys in the block > key. But the block preceding
974 : // this block cannot have any keys > key, otherwise it would have been the
975 : // result of the original index.SeekGE.
976 1 : result := i.loadBlock(-1)
977 1 : if result == loadBlockFailed {
978 0 : return nil
979 0 : }
980 1 : if result == loadBlockIrrelevant {
981 1 : // Want to skip to the previous block.
982 1 : return i.skipBackward()
983 1 : }
984 1 : ikv = PD(&i.data).SeekGE(key, base.SeekGEFlagsNone)
985 1 : // Go to the last user key that matches key, and then Prev() on the data
986 1 : // block.
987 1 : for ikv != nil && bytes.Equal(ikv.K.UserKey, key) {
988 1 : ikv = PD(&i.data).Next()
989 1 : }
990 1 : ikv = PD(&i.data).Prev()
991 1 : if ikv != nil {
992 1 : // Enforce the lower bound here, as we could have gone past it. This happens
993 1 : // if keys between `i.blockLower` and `key` are obsolete, for instance. Even
994 1 : // though i.blockLower (which is either nil or equal to i.lower) is <= key,
995 1 : // all internal keys in the user key interval [i.blockLower, key] could be
996 1 : // obsolete (due to a RANGEDEL which will not be observed here). And
997 1 : // i.data.Prev will skip all these obsolete keys, and could land on a key
998 1 : // below the lower bound, requiring the lower bound check.
999 1 : if i.blockLower != nil && i.cmp(ikv.K.UserKey, i.blockLower) < 0 {
1000 1 : i.exhaustedBounds = -1
1001 1 : return nil
1002 1 : }
1003 1 : return ikv
1004 : }
1005 1 : return i.skipBackward()
1006 : }
1007 :
1008 : // SeekLT implements internalIterator.SeekLT, as documented in the pebble
1009 : // package. Note that SeekLT only checks the lower bound. It is up to the
1010 : // caller to ensure that key is less than or equal to the upper bound.
1011 1 : func (i *singleLevelIterator[D, PD]) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV {
1012 1 : if i.vState != nil {
1013 1 : // Might have to fix upper bound since virtual sstable bounds are not
1014 1 : // known to callers of SeekLT.
1015 1 : //
1016 1 : // TODO(bananabrick): We can optimize away this check for the level iter
1017 1 : // if necessary.
1018 1 : cmp := i.cmp(key, i.upper)
1019 1 : // key == i.upper is fine. We'll do the right thing and return the
1020 1 : // first internal key with user key < key.
1021 1 : if cmp > 0 {
1022 1 : // Return the last key in the virtual sstable.
1023 1 : return i.maybeVerifyKey(i.virtualLast())
1024 1 : }
1025 : }
1026 :
1027 1 : i.exhaustedBounds = 0
1028 1 : i.err = nil // clear cached iteration error
1029 1 : boundsCmp := i.boundsCmp
1030 1 : // Seek optimization only applies until iterator is first positioned after SetBounds.
1031 1 : i.boundsCmp = 0
1032 1 :
1033 1 : // Seeking operations perform various step-instead-of-seeking optimizations:
1034 1 : // eg by considering monotonically increasing bounds (i.boundsCmp).
1035 1 :
1036 1 : i.positionedUsingLatestBounds = true
1037 1 :
1038 1 : var dontSeekWithinBlock bool
1039 1 : if !PD(&i.data).IsDataInvalidated() && PD(&i.data).Valid() && i.index.Valid() &&
1040 1 : boundsCmp < 0 && i.cmp(PD(&i.data).FirstUserKey(), key) < 0 {
1041 1 : // Fast-path: The bounds have moved backward, and this SeekLT is
1042 1 : // respecting the upper bound (guaranteed by Iterator). We know that
1043 1 : // the iterator must already be positioned within or just outside the
1044 1 : // previous bounds. Therefore it cannot be positioned at a block (or
1045 1 : // the position within that block) that is behind the seek position.
1046 1 : // However it can be positioned at a later block. This fast-path to
1047 1 : // use Prev() on the block is only applied when we are already at the
1048 1 : // block that can satisfy this seek -- this is the motivation for the
1049 1 : // the i.cmp(i.data.firstKey.UserKey, key) < 0 predicate.
1050 1 : i.initBoundsForAlreadyLoadedBlock()
1051 1 : ikv, done := i.trySeekLTUsingPrevWithinBlock(key)
1052 1 : if done {
1053 1 : return ikv
1054 1 : }
1055 1 : if ikv == nil {
1056 1 : // Done with this block.
1057 1 : dontSeekWithinBlock = true
1058 1 : }
1059 1 : } else {
1060 1 : // Slow-path.
1061 1 : var ikv *base.InternalKV
1062 1 :
1063 1 : // NB: If a bound-limited block property filter is configured, it's
1064 1 : // externally ensured that the filter is disabled (through returning
1065 1 : // Intersects=false irrespective of the block props provided) during
1066 1 : // seeks.
1067 1 : if ikv = i.index.SeekGE(key, base.SeekGEFlagsNone); ikv == nil {
1068 1 : ikv = i.index.Last()
1069 1 : if ikv == nil {
1070 0 : return nil
1071 0 : }
1072 : }
1073 : // INVARIANT: ikey != nil.
1074 1 : result := i.loadBlock(-1)
1075 1 : if result == loadBlockFailed {
1076 0 : return nil
1077 0 : }
1078 1 : if result == loadBlockIrrelevant {
1079 1 : // Enforce the lower bound here since don't want to bother moving
1080 1 : // to the previous block if lower bound is already exceeded. Note
1081 1 : // that the previous block starts with keys <= ikey.UserKey since
1082 1 : // even though this is the current block's separator, the same
1083 1 : // user key can span multiple blocks.
1084 1 : if i.lower != nil && i.cmp(ikv.K.UserKey, i.lower) < 0 {
1085 1 : i.exhaustedBounds = -1
1086 1 : return nil
1087 1 : }
1088 : // Want to skip to the previous block.
1089 1 : dontSeekWithinBlock = true
1090 : }
1091 : }
1092 1 : if !dontSeekWithinBlock {
1093 1 : if ikv := PD(&i.data).SeekLT(key, flags); ikv != nil {
1094 1 : if i.blockLower != nil && i.cmp(ikv.K.UserKey, i.blockLower) < 0 {
1095 1 : i.exhaustedBounds = -1
1096 1 : return nil
1097 1 : }
1098 1 : return ikv
1099 : }
1100 : }
1101 : // The index contains separator keys which may lie between
1102 : // user-keys. Consider the user-keys:
1103 : //
1104 : // complete
1105 : // ---- new block ---
1106 : // complexion
1107 : //
1108 : // If these two keys end one block and start the next, the index key may
1109 : // be chosen as "compleu". The SeekGE in the index block will then point
1110 : // us to the block containing "complexion". If this happens, we want the
1111 : // last key from the previous data block.
1112 1 : return i.maybeVerifyKey(i.skipBackward())
1113 : }
1114 :
1115 : // First implements internalIterator.First, as documented in the pebble
1116 : // package. Note that First only checks the upper bound. It is up to the caller
1117 : // to ensure that key is greater than or equal to the lower bound (e.g. via a
1118 : // call to SeekGE(lower)).
1119 1 : func (i *singleLevelIterator[D, PD]) First() *base.InternalKV {
1120 1 : // If we have a lower bound, use SeekGE. Note that in general this is not
1121 1 : // supported usage, except when the lower bound is there because the table is
1122 1 : // virtual.
1123 1 : if i.lower != nil {
1124 1 : return i.SeekGE(i.lower, base.SeekGEFlagsNone)
1125 1 : }
1126 :
1127 1 : i.positionedUsingLatestBounds = true
1128 1 :
1129 1 : return i.firstInternal()
1130 : }
1131 :
1132 : // firstInternal is a helper used for absolute positioning in a single-level
1133 : // index file, or for positioning in the second-level index in a two-level
1134 : // index file. For the latter, one cannot make any claims about absolute
1135 : // positioning.
1136 1 : func (i *singleLevelIterator[D, PD]) firstInternal() *base.InternalKV {
1137 1 : i.exhaustedBounds = 0
1138 1 : i.err = nil // clear cached iteration error
1139 1 : // Seek optimization only applies until iterator is first positioned after SetBounds.
1140 1 : i.boundsCmp = 0
1141 1 :
1142 1 : var kv *base.InternalKV
1143 1 : if kv = i.index.First(); kv == nil {
1144 0 : PD(&i.data).Invalidate()
1145 0 : return nil
1146 0 : }
1147 1 : result := i.loadBlock(+1)
1148 1 : if result == loadBlockFailed {
1149 0 : return nil
1150 0 : }
1151 1 : if result == loadBlockOK {
1152 1 : if kv := PD(&i.data).First(); kv != nil {
1153 1 : if i.blockUpper != nil {
1154 1 : cmp := i.cmp(kv.K.UserKey, i.blockUpper)
1155 1 : if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 {
1156 1 : i.exhaustedBounds = +1
1157 1 : return nil
1158 1 : }
1159 : }
1160 1 : return kv
1161 : }
1162 : // Else fall through to skipForward.
1163 1 : } else {
1164 1 : // result == loadBlockIrrelevant. Enforce the upper bound here since
1165 1 : // don't want to bother moving to the next block if upper bound is
1166 1 : // already exceeded. Note that the next block starts with keys >=
1167 1 : // ikey.UserKey since even though this is the block separator, the
1168 1 : // same user key can span multiple blocks. If upper is exclusive we
1169 1 : // use >= below, else we use >.
1170 1 : if i.upper != nil {
1171 1 : cmp := i.cmp(kv.K.UserKey, i.upper)
1172 1 : if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 {
1173 1 : i.exhaustedBounds = +1
1174 1 : return nil
1175 1 : }
1176 : }
1177 : // Else fall through to skipForward.
1178 : }
1179 :
1180 1 : return i.skipForward()
1181 : }
1182 :
1183 : // Last implements internalIterator.Last, as documented in the pebble
1184 : // package. Note that Last only checks the lower bound. It is up to the caller
1185 : // to ensure that key is less than the upper bound (e.g. via a call to
1186 : // SeekLT(upper))
1187 1 : func (i *singleLevelIterator[D, PD]) Last() *base.InternalKV {
1188 1 : if i.vState != nil {
1189 1 : return i.maybeVerifyKey(i.virtualLast())
1190 1 : }
1191 :
1192 1 : if i.upper != nil {
1193 0 : panic("singleLevelIterator.Last() used despite upper bound")
1194 : }
1195 1 : i.positionedUsingLatestBounds = true
1196 1 : return i.lastInternal()
1197 : }
1198 :
1199 : // lastInternal is a helper used for absolute positioning in a single-level
1200 : // index file, or for positioning in the second-level index in a two-level
1201 : // index file. For the latter, one cannot make any claims about absolute
1202 : // positioning.
1203 1 : func (i *singleLevelIterator[D, PD]) lastInternal() *base.InternalKV {
1204 1 : i.exhaustedBounds = 0
1205 1 : i.err = nil // clear cached iteration error
1206 1 : // Seek optimization only applies until iterator is first positioned after SetBounds.
1207 1 : i.boundsCmp = 0
1208 1 :
1209 1 : var ikv *base.InternalKV
1210 1 : if ikv = i.index.Last(); ikv == nil {
1211 0 : PD(&i.data).Invalidate()
1212 0 : return nil
1213 0 : }
1214 1 : result := i.loadBlock(-1)
1215 1 : if result == loadBlockFailed {
1216 0 : return nil
1217 0 : }
1218 1 : if result == loadBlockOK {
1219 1 : if ikv := PD(&i.data).Last(); ikv != nil {
1220 1 : if i.blockLower != nil && i.cmp(ikv.K.UserKey, i.blockLower) < 0 {
1221 1 : i.exhaustedBounds = -1
1222 1 : return nil
1223 1 : }
1224 1 : return ikv
1225 : }
1226 : // Else fall through to skipBackward.
1227 1 : } else {
1228 1 : // result == loadBlockIrrelevant. Enforce the lower bound here since
1229 1 : // don't want to bother moving to the previous block if lower bound is
1230 1 : // already exceeded. Note that the previous block starts with keys <=
1231 1 : // key.UserKey since even though this is the current block's
1232 1 : // separator, the same user key can span multiple blocks.
1233 1 : if i.lower != nil && i.cmp(ikv.K.UserKey, i.lower) < 0 {
1234 1 : i.exhaustedBounds = -1
1235 1 : return nil
1236 1 : }
1237 : }
1238 :
1239 1 : return i.skipBackward()
1240 : }
1241 :
1242 : // Next implements internalIterator.Next, as documented in the pebble
1243 : // package.
1244 : // Note: compactionIterator.Next mirrors the implementation of Iterator.Next
1245 : // due to performance. Keep the two in sync.
1246 1 : func (i *singleLevelIterator[D, PD]) Next() *base.InternalKV {
1247 1 : if i.exhaustedBounds == +1 {
1248 0 : panic("Next called even though exhausted upper bound")
1249 : }
1250 1 : i.exhaustedBounds = 0
1251 1 : // Seek optimization only applies until iterator is first positioned after SetBounds.
1252 1 : i.boundsCmp = 0
1253 1 :
1254 1 : if i.err != nil {
1255 0 : // TODO(jackson): Can this case be turned into a panic? Once an error is
1256 0 : // encountered, the iterator must be re-seeked.
1257 0 : return nil
1258 0 : }
1259 1 : if kv := PD(&i.data).Next(); kv != nil {
1260 1 : if i.blockUpper != nil {
1261 1 : cmp := i.cmp(kv.K.UserKey, i.blockUpper)
1262 1 : if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 {
1263 1 : i.exhaustedBounds = +1
1264 1 : return nil
1265 1 : }
1266 : }
1267 1 : return kv
1268 : }
1269 1 : return i.skipForward()
1270 : }
1271 :
1272 : // NextPrefix implements (base.InternalIterator).NextPrefix.
1273 1 : func (i *singleLevelIterator[D, PD]) NextPrefix(succKey []byte) *base.InternalKV {
1274 1 : if i.exhaustedBounds == +1 {
1275 0 : panic("NextPrefix called even though exhausted upper bound")
1276 : }
1277 1 : i.exhaustedBounds = 0
1278 1 : // Seek optimization only applies until iterator is first positioned after SetBounds.
1279 1 : i.boundsCmp = 0
1280 1 : if i.err != nil {
1281 0 : // TODO(jackson): Can this case be turned into a panic? Once an error is
1282 0 : // encountered, the iterator must be re-seeked.
1283 0 : return nil
1284 0 : }
1285 1 : if kv := PD(&i.data).NextPrefix(succKey); kv != nil {
1286 1 : if i.blockUpper != nil {
1287 1 : cmp := i.cmp(kv.K.UserKey, i.blockUpper)
1288 1 : if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 {
1289 1 : i.exhaustedBounds = +1
1290 1 : return nil
1291 1 : }
1292 : }
1293 1 : return kv
1294 : }
1295 : // Did not find prefix in the existing data block. This is the slow-path
1296 : // where we effectively seek the iterator.
1297 1 : var ikv *base.InternalKV
1298 1 : // The key is likely to be in the next data block, so try one step.
1299 1 : if ikv = i.index.Next(); ikv == nil {
1300 1 : // The target key is greater than any key in the index block.
1301 1 : // Invalidate the block iterator so that a subsequent call to Prev()
1302 1 : // will return the last key in the table.
1303 1 : PD(&i.data).Invalidate()
1304 1 : return nil
1305 1 : }
1306 1 : if i.cmp(succKey, ikv.K.UserKey) > 0 {
1307 1 : // Not in the next data block, so seek the index.
1308 1 : if ikv = i.index.SeekGE(succKey, base.SeekGEFlagsNone); ikv == nil {
1309 1 : // The target key is greater than any key in the index block.
1310 1 : // Invalidate the block iterator so that a subsequent call to Prev()
1311 1 : // will return the last key in the table.
1312 1 : PD(&i.data).Invalidate()
1313 1 : return nil
1314 1 : }
1315 : }
1316 1 : result := i.loadBlock(+1)
1317 1 : if result == loadBlockFailed {
1318 0 : return nil
1319 0 : }
1320 1 : if result == loadBlockIrrelevant {
1321 1 : // Enforce the upper bound here since don't want to bother moving
1322 1 : // to the next block if upper bound is already exceeded. Note that
1323 1 : // the next block starts with keys >= ikey.UserKey since even
1324 1 : // though this is the block separator, the same user key can span
1325 1 : // multiple blocks. If upper is exclusive we use >= below, else we use
1326 1 : // >.
1327 1 : if i.upper != nil {
1328 1 : cmp := i.cmp(ikv.K.UserKey, i.upper)
1329 1 : if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 {
1330 0 : i.exhaustedBounds = +1
1331 0 : return nil
1332 0 : }
1333 : }
1334 1 : } else if kv := PD(&i.data).SeekGE(succKey, base.SeekGEFlagsNone); kv != nil {
1335 1 : if i.blockUpper != nil {
1336 1 : cmp := i.cmp(kv.K.UserKey, i.blockUpper)
1337 1 : if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 {
1338 1 : i.exhaustedBounds = +1
1339 1 : return nil
1340 1 : }
1341 : }
1342 1 : return i.maybeVerifyKey(kv)
1343 : }
1344 :
1345 1 : return i.skipForward()
1346 : }
1347 :
1348 : // Prev implements internalIterator.Prev, as documented in the pebble
1349 : // package.
1350 1 : func (i *singleLevelIterator[D, PD]) Prev() *base.InternalKV {
1351 1 : if i.exhaustedBounds == -1 {
1352 0 : panic("Prev called even though exhausted lower bound")
1353 : }
1354 1 : i.exhaustedBounds = 0
1355 1 : // Seek optimization only applies until iterator is first positioned after SetBounds.
1356 1 : i.boundsCmp = 0
1357 1 :
1358 1 : if i.err != nil {
1359 0 : return nil
1360 0 : }
1361 1 : if kv := PD(&i.data).Prev(); kv != nil {
1362 1 : if i.blockLower != nil && i.cmp(kv.K.UserKey, i.blockLower) < 0 {
1363 1 : i.exhaustedBounds = -1
1364 1 : return nil
1365 1 : }
1366 1 : return kv
1367 : }
1368 1 : return i.skipBackward()
1369 : }
1370 :
1371 1 : func (i *singleLevelIterator[D, PD]) skipForward() *base.InternalKV {
1372 1 : for {
1373 1 : indexKey := i.index.Next()
1374 1 : if indexKey == nil {
1375 1 : PD(&i.data).Invalidate()
1376 1 : break
1377 : }
1378 1 : result := i.loadBlock(+1)
1379 1 : if result != loadBlockOK {
1380 1 : if i.err != nil {
1381 0 : break
1382 : }
1383 1 : if result == loadBlockFailed {
1384 0 : // We checked that i.index was at a valid entry, so
1385 0 : // loadBlockFailed could not have happened due to i.index
1386 0 : // being exhausted, and must be due to an error.
1387 0 : panic("loadBlock should not have failed with no error")
1388 : }
1389 : // result == loadBlockIrrelevant. Enforce the upper bound here
1390 : // since don't want to bother moving to the next block if upper
1391 : // bound is already exceeded. Note that the next block starts with
1392 : // keys >= key.UserKey since even though this is the block
1393 : // separator, the same user key can span multiple blocks. If upper
1394 : // is exclusive we use >= below, else we use >.
1395 1 : if i.upper != nil {
1396 1 : cmp := i.cmp(indexKey.K.UserKey, i.upper)
1397 1 : if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 {
1398 1 : i.exhaustedBounds = +1
1399 1 : return nil
1400 1 : }
1401 : }
1402 1 : continue
1403 : }
1404 1 : var kv *base.InternalKV
1405 1 : // It is possible that skipBackward went too far and the virtual table lower
1406 1 : // bound is after the first key in the block we are about to load, in which
1407 1 : // case we must use SeekGE.
1408 1 : //
1409 1 : // An example of how this can happen:
1410 1 : //
1411 1 : // Data block 1 - contains keys a@1, c@1
1412 1 : // Data block 2 - contains keys e@1, g@1
1413 1 : // Data block 3 - contains keys i@2, k@2
1414 1 : //
1415 1 : // The virtual table lower bound is f. We have a range key masking filter
1416 1 : // that filters keys with @1 suffix. We are positioned inside block 3 then
1417 1 : // we Prev(). Block 2 is entirely filtered out, which makes us move to
1418 1 : // block 1. Now the range key masking filter gets an update (via
1419 1 : // SpanChanged) and it no longer filters out any keys. At this point if a
1420 1 : // Next happens, we will load block 2 but it would not be legal to return
1421 1 : // "e@1" which is outside the virtual bounds.
1422 1 : //
1423 1 : // The core of the problem is that skipBackward doesn't know it can stop
1424 1 : // at block 2, because it doesn't know what keys are at the start of that
1425 1 : // block. This is why we don't have this problem in the opposite
1426 1 : // direction: skipForward will never go beyond the last relevant block
1427 1 : // because it looks at the separator key which is an upper bound for the
1428 1 : // block.
1429 1 : //
1430 1 : // Note that this is only a problem with virtual tables; we make no
1431 1 : // guarantees wrt an iterator lower bound when we iterate forward. But we
1432 1 : // must never return keys that are not inside the virtual table.
1433 1 : if i.vState != nil && i.blockLower != nil {
1434 1 : kv = PD(&i.data).SeekGE(i.lower, base.SeekGEFlagsNone)
1435 1 : } else {
1436 1 : kv = PD(&i.data).First()
1437 1 : }
1438 1 : if kv != nil {
1439 1 : if i.blockUpper != nil {
1440 1 : cmp := i.cmp(kv.K.UserKey, i.blockUpper)
1441 1 : if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 {
1442 1 : i.exhaustedBounds = +1
1443 1 : return nil
1444 1 : }
1445 : }
1446 1 : return i.maybeVerifyKey(kv)
1447 : }
1448 : }
1449 1 : return nil
1450 : }
1451 :
1452 1 : func (i *singleLevelIterator[D, PD]) skipBackward() *base.InternalKV {
1453 1 : for {
1454 1 : indexKey := i.index.Prev()
1455 1 : if indexKey == nil {
1456 1 : PD(&i.data).Invalidate()
1457 1 : break
1458 : }
1459 1 : result := i.loadBlock(-1)
1460 1 : if result != loadBlockOK {
1461 1 : if i.err != nil {
1462 0 : break
1463 : }
1464 1 : if result == loadBlockFailed {
1465 0 : // We checked that i.index was at a valid entry, so
1466 0 : // loadBlockFailed could not have happened due to to i.index
1467 0 : // being exhausted, and must be due to an error.
1468 0 : panic("loadBlock should not have failed with no error")
1469 : }
1470 : // result == loadBlockIrrelevant. Enforce the lower bound here
1471 : // since don't want to bother moving to the previous block if lower
1472 : // bound is already exceeded. Note that the previous block starts with
1473 : // keys <= key.UserKey since even though this is the current block's
1474 : // separator, the same user key can span multiple blocks.
1475 1 : if i.lower != nil && i.cmp(indexKey.K.UserKey, i.lower) < 0 {
1476 1 : i.exhaustedBounds = -1
1477 1 : return nil
1478 1 : }
1479 1 : continue
1480 : }
1481 1 : kv := PD(&i.data).Last()
1482 1 : if kv == nil {
1483 1 : // The block iter could have hid some obsolete points, so it isn't
1484 1 : // safe to assume that there are no keys if we keep skipping backwards.
1485 1 : // Check the previous block, but check the lower bound before doing
1486 1 : // that.
1487 1 : if i.lower != nil && i.cmp(indexKey.K.UserKey, i.lower) < 0 {
1488 1 : i.exhaustedBounds = -1
1489 1 : return nil
1490 1 : }
1491 1 : continue
1492 : }
1493 1 : if i.blockLower != nil && i.cmp(kv.K.UserKey, i.blockLower) < 0 {
1494 1 : i.exhaustedBounds = -1
1495 1 : return nil
1496 1 : }
1497 1 : return i.maybeVerifyKey(kv)
1498 : }
1499 1 : return nil
1500 : }
1501 :
1502 : // Error implements internalIterator.Error, as documented in the pebble
1503 : // package.
1504 1 : func (i *singleLevelIterator[D, PD]) Error() error {
1505 1 : if err := PD(&i.data).Error(); err != nil {
1506 0 : return err
1507 0 : }
1508 1 : return i.err
1509 : }
1510 :
1511 : // SetCloseHook sets a function that will be called when the iterator is
1512 : // closed.
1513 1 : func (i *singleLevelIterator[D, PD]) SetCloseHook(fn func(i Iterator) error) {
1514 1 : i.closeHook = fn
1515 1 : }
1516 :
1517 1 : func firstError(err0, err1 error) error {
1518 1 : if err0 != nil {
1519 0 : return err0
1520 0 : }
1521 1 : return err1
1522 : }
1523 :
1524 : // Close implements internalIterator.Close, as documented in the pebble
1525 : // package.
1526 1 : func (i *singleLevelIterator[D, PD]) Close() error {
1527 1 : err := i.closeInternal()
1528 1 : pool := i.pool
1529 1 : *i = i.resetForReuse()
1530 1 : if pool != nil {
1531 1 : pool.Put(i)
1532 1 : }
1533 1 : return err
1534 : }
1535 :
1536 1 : func (i *singleLevelIterator[D, PD]) closeInternal() error {
1537 1 : if invariants.Enabled && i.inPool {
1538 0 : panic("Close called on interator in pool")
1539 : }
1540 1 : i.iterStats.close()
1541 1 : var err error
1542 1 : if i.closeHook != nil {
1543 1 : err = firstError(err, i.closeHook(i))
1544 1 : }
1545 1 : err = firstError(err, PD(&i.data).Close())
1546 1 : err = firstError(err, i.index.Close())
1547 1 : if i.indexFilterRH != nil {
1548 1 : err = firstError(err, i.indexFilterRH.Close())
1549 1 : i.indexFilterRH = nil
1550 1 : }
1551 1 : if i.dataRH != nil {
1552 1 : err = firstError(err, i.dataRH.Close())
1553 1 : i.dataRH = nil
1554 1 : }
1555 1 : err = firstError(err, i.err)
1556 1 : if i.bpfs != nil {
1557 1 : releaseBlockPropertiesFilterer(i.bpfs)
1558 1 : }
1559 1 : if i.vbReader != nil {
1560 1 : i.vbReader.close()
1561 1 : }
1562 1 : if i.vbRH != nil {
1563 1 : err = firstError(err, i.vbRH.Close())
1564 1 : i.vbRH = nil
1565 1 : }
1566 1 : return err
1567 : }
1568 :
1569 0 : func (i *singleLevelIterator[D, PD]) String() string {
1570 0 : if i.vState != nil {
1571 0 : return i.vState.fileNum.String()
1572 0 : }
1573 0 : return i.reader.cacheOpts.FileNum.String()
1574 : }
1575 :
1576 : // DebugTree is part of the InternalIterator interface.
1577 0 : func (i *singleLevelIterator[D, PD]) DebugTree(tp treeprinter.Node) {
1578 0 : tp.Childf("%T(%p) fileNum=%s", i, i, i.String())
1579 0 : }
|